diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2016-10-11 05:07:56 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-10-11 05:07:56 -0400 |
commit | 065397a969a0f80624598c5030c2551abbd986fd (patch) | |
tree | 60a4c453e6b494c8b3973497c577efa2f10102e4 /drivers/soc/fsl | |
parent | 8321564a11bbeadffcc7d6335bcf3c07e5c397a3 (diff) | |
parent | e0b80f00bb96b925995d53980e0c764430bedb42 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott:
"Highlights include qbman support (a prerequisite for datapath drivers
such as ethernet), a PCI DMA fix+improvement, reset handler changes, more
8xx optimizations, and some cleanups and fixes."
Diffstat (limited to 'drivers/soc/fsl')
23 files changed, 7180 insertions, 8 deletions
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile index 203307fd92c1..75e1f5334821 100644 --- a/drivers/soc/fsl/Makefile +++ b/drivers/soc/fsl/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for the Linux Kernel SOC fsl specific device drivers | 2 | # Makefile for the Linux Kernel SOC fsl specific device drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_FSL_DPAA) += qbman/ | ||
5 | obj-$(CONFIG_QUICC_ENGINE) += qe/ | 6 | obj-$(CONFIG_QUICC_ENGINE) += qe/ |
6 | obj-$(CONFIG_CPM) += qe/ | 7 | obj-$(CONFIG_CPM) += qe/ |
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig new file mode 100644 index 000000000000..757033c0586c --- /dev/null +++ b/drivers/soc/fsl/qbman/Kconfig | |||
@@ -0,0 +1,67 @@ | |||
1 | menuconfig FSL_DPAA | ||
2 | bool "Freescale DPAA 1.x support" | ||
3 | depends on FSL_SOC_BOOKE | ||
4 | select GENERIC_ALLOCATOR | ||
5 | help | ||
6 | The Freescale Data Path Acceleration Architecture (DPAA) is a set of | ||
7 | hardware components on specific QorIQ multicore processors. | ||
8 | This architecture provides the infrastructure to support simplified | ||
9 | sharing of networking interfaces and accelerators by multiple CPUs. | ||
10 | The major h/w blocks composing DPAA are BMan and QMan. | ||
11 | |||
12 | The Buffer Manager (BMan) is a hardware buffer pool management block | ||
13 | that allows software and accelerators on the datapath to acquire and | ||
14 | release buffers in order to build frames. | ||
15 | |||
16 | The Queue Manager (QMan) is a hardware queue management block | ||
17 | that allows software and accelerators on the datapath to enqueue and | ||
18 | dequeue frames in order to communicate. | ||
19 | |||
20 | if FSL_DPAA | ||
21 | |||
22 | config FSL_DPAA_CHECKING | ||
23 | bool "Additional driver checking" | ||
24 | help | ||
25 | Compiles in additional checks, to sanity-check the drivers and | ||
26 | any use of the exported API. Not recommended for performance. | ||
27 | |||
28 | config FSL_BMAN_TEST | ||
29 | tristate "BMan self-tests" | ||
30 | help | ||
31 | Compile the BMan self-test code. These tests will | ||
32 | exercise the BMan APIs to confirm functionality | ||
33 | of both the software drivers and hardware device. | ||
34 | |||
35 | config FSL_BMAN_TEST_API | ||
36 | bool "High-level API self-test" | ||
37 | depends on FSL_BMAN_TEST | ||
38 | default y | ||
39 | help | ||
40 | This requires the presence of cpu-affine portals, and performs | ||
41 | high-level API testing with them (whichever portal(s) are affine | ||
42 | to the cpu(s) the test executes on). | ||
43 | |||
44 | config FSL_QMAN_TEST | ||
45 | tristate "QMan self-tests" | ||
46 | help | ||
47 | Compile self-test code for QMan. | ||
48 | |||
49 | config FSL_QMAN_TEST_API | ||
50 | bool "QMan high-level self-test" | ||
51 | depends on FSL_QMAN_TEST | ||
52 | default y | ||
53 | help | ||
54 | This requires the presence of cpu-affine portals, and performs | ||
55 | high-level API testing with them (whichever portal(s) are affine to | ||
56 | the cpu(s) the test executes on). | ||
57 | |||
58 | config FSL_QMAN_TEST_STASH | ||
59 | bool "QMan 'hot potato' data-stashing self-test" | ||
60 | depends on FSL_QMAN_TEST | ||
61 | default y | ||
62 | help | ||
63 | This performs a "hot potato" style test enqueuing/dequeuing a frame | ||
64 | across a series of FQs scheduled to different portals (and cpus), with | ||
65 | DQRR, data and context stashing always on. | ||
66 | |||
67 | endif # FSL_DPAA | ||
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile new file mode 100644 index 000000000000..7ae199f1664e --- /dev/null +++ b/drivers/soc/fsl/qbman/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ | ||
2 | bman_portal.o qman_portal.o \ | ||
3 | bman.o qman.o | ||
4 | |||
5 | obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o | ||
6 | bman-test-y = bman_test.o | ||
7 | bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o | ||
8 | |||
9 | obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o | ||
10 | qman-test-y = qman_test.o | ||
11 | qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o | ||
12 | qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o | ||
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c new file mode 100644 index 000000000000..ffa48fdbb1a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman.c | |||
@@ -0,0 +1,797 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_priv.h" | ||
32 | |||
33 | #define IRQNAME "BMan portal %d" | ||
34 | #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ | ||
35 | |||
36 | /* Portal register assists */ | ||
37 | |||
38 | /* Cache-inhibited register offsets */ | ||
39 | #define BM_REG_RCR_PI_CINH 0x0000 | ||
40 | #define BM_REG_RCR_CI_CINH 0x0004 | ||
41 | #define BM_REG_RCR_ITR 0x0008 | ||
42 | #define BM_REG_CFG 0x0100 | ||
43 | #define BM_REG_SCN(n) (0x0200 + ((n) << 2)) | ||
44 | #define BM_REG_ISR 0x0e00 | ||
45 | #define BM_REG_IER 0x0e04 | ||
46 | #define BM_REG_ISDR 0x0e08 | ||
47 | #define BM_REG_IIR 0x0e0c | ||
48 | |||
49 | /* Cache-enabled register offsets */ | ||
50 | #define BM_CL_CR 0x0000 | ||
51 | #define BM_CL_RR0 0x0100 | ||
52 | #define BM_CL_RR1 0x0140 | ||
53 | #define BM_CL_RCR 0x1000 | ||
54 | #define BM_CL_RCR_PI_CENA 0x3000 | ||
55 | #define BM_CL_RCR_CI_CENA 0x3100 | ||
56 | |||
57 | /* | ||
58 | * Portal modes. | ||
59 | * Enum types; | ||
60 | * pmode == production mode | ||
61 | * cmode == consumption mode, | ||
62 | * Enum values use 3 letter codes. First letter matches the portal mode, | ||
63 | * remaining two letters indicate; | ||
64 | * ci == cache-inhibited portal register | ||
65 | * ce == cache-enabled portal register | ||
66 | * vb == in-band valid-bit (cache-enabled) | ||
67 | */ | ||
68 | enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ | ||
69 | bm_rcr_pci = 0, /* PI index, cache-inhibited */ | ||
70 | bm_rcr_pce = 1, /* PI index, cache-enabled */ | ||
71 | bm_rcr_pvb = 2 /* valid-bit */ | ||
72 | }; | ||
73 | enum bm_rcr_cmode { /* s/w-only */ | ||
74 | bm_rcr_cci, /* CI index, cache-inhibited */ | ||
75 | bm_rcr_cce /* CI index, cache-enabled */ | ||
76 | }; | ||
77 | |||
78 | |||
79 | /* --- Portal structures --- */ | ||
80 | |||
81 | #define BM_RCR_SIZE 8 | ||
82 | |||
83 | /* Release Command */ | ||
84 | struct bm_rcr_entry { | ||
85 | union { | ||
86 | struct { | ||
87 | u8 _ncw_verb; /* writes to this are non-coherent */ | ||
88 | u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ | ||
89 | u8 __reserved1[62]; | ||
90 | }; | ||
91 | struct bm_buffer bufs[8]; | ||
92 | }; | ||
93 | }; | ||
94 | #define BM_RCR_VERB_VBIT 0x80 | ||
95 | #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ | ||
96 | #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 | ||
97 | #define BM_RCR_VERB_CMD_BPID_MULTI 0x30 | ||
98 | #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ | ||
99 | |||
100 | struct bm_rcr { | ||
101 | struct bm_rcr_entry *ring, *cursor; | ||
102 | u8 ci, available, ithresh, vbit; | ||
103 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
104 | u32 busy; | ||
105 | enum bm_rcr_pmode pmode; | ||
106 | enum bm_rcr_cmode cmode; | ||
107 | #endif | ||
108 | }; | ||
109 | |||
110 | /* MC (Management Command) command */ | ||
111 | struct bm_mc_command { | ||
112 | u8 _ncw_verb; /* writes to this are non-coherent */ | ||
113 | u8 bpid; /* used by acquire command */ | ||
114 | u8 __reserved[62]; | ||
115 | }; | ||
116 | #define BM_MCC_VERB_VBIT 0x80 | ||
117 | #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ | ||
118 | #define BM_MCC_VERB_CMD_ACQUIRE 0x10 | ||
119 | #define BM_MCC_VERB_CMD_QUERY 0x40 | ||
120 | #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ | ||
121 | |||
122 | /* MC result, Acquire and Query Response */ | ||
123 | union bm_mc_result { | ||
124 | struct { | ||
125 | u8 verb; | ||
126 | u8 bpid; | ||
127 | u8 __reserved[62]; | ||
128 | }; | ||
129 | struct bm_buffer bufs[8]; | ||
130 | }; | ||
131 | #define BM_MCR_VERB_VBIT 0x80 | ||
132 | #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK | ||
133 | #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE | ||
134 | #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY | ||
135 | #define BM_MCR_VERB_CMD_ERR_INVALID 0x60 | ||
136 | #define BM_MCR_VERB_CMD_ERR_ECC 0x70 | ||
137 | #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ | ||
138 | #define BM_MCR_TIMEOUT 10000 /* us */ | ||
139 | |||
140 | struct bm_mc { | ||
141 | struct bm_mc_command *cr; | ||
142 | union bm_mc_result *rr; | ||
143 | u8 rridx, vbit; | ||
144 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
145 | enum { | ||
146 | /* Can only be _mc_start()ed */ | ||
147 | mc_idle, | ||
148 | /* Can only be _mc_commit()ed or _mc_abort()ed */ | ||
149 | mc_user, | ||
150 | /* Can only be _mc_retry()ed */ | ||
151 | mc_hw | ||
152 | } state; | ||
153 | #endif | ||
154 | }; | ||
155 | |||
156 | struct bm_addr { | ||
157 | void __iomem *ce; /* cache-enabled */ | ||
158 | void __iomem *ci; /* cache-inhibited */ | ||
159 | }; | ||
160 | |||
161 | struct bm_portal { | ||
162 | struct bm_addr addr; | ||
163 | struct bm_rcr rcr; | ||
164 | struct bm_mc mc; | ||
165 | } ____cacheline_aligned; | ||
166 | |||
167 | /* Cache-inhibited register access. */ | ||
168 | static inline u32 bm_in(struct bm_portal *p, u32 offset) | ||
169 | { | ||
170 | return __raw_readl(p->addr.ci + offset); | ||
171 | } | ||
172 | |||
173 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) | ||
174 | { | ||
175 | __raw_writel(val, p->addr.ci + offset); | ||
176 | } | ||
177 | |||
178 | /* Cache Enabled Portal Access */ | ||
179 | static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) | ||
180 | { | ||
181 | dpaa_invalidate(p->addr.ce + offset); | ||
182 | } | ||
183 | |||
184 | static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) | ||
185 | { | ||
186 | dpaa_touch_ro(p->addr.ce + offset); | ||
187 | } | ||
188 | |||
189 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) | ||
190 | { | ||
191 | return __raw_readl(p->addr.ce + offset); | ||
192 | } | ||
193 | |||
194 | struct bman_portal { | ||
195 | struct bm_portal p; | ||
196 | /* interrupt sources processed by portal_isr(), configurable */ | ||
197 | unsigned long irq_sources; | ||
198 | /* probing time config params for cpu-affine portals */ | ||
199 | const struct bm_portal_config *config; | ||
200 | char irqname[MAX_IRQNAME]; | ||
201 | }; | ||
202 | |||
203 | static cpumask_t affine_mask; | ||
204 | static DEFINE_SPINLOCK(affine_mask_lock); | ||
205 | static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); | ||
206 | |||
207 | static inline struct bman_portal *get_affine_portal(void) | ||
208 | { | ||
209 | return &get_cpu_var(bman_affine_portal); | ||
210 | } | ||
211 | |||
212 | static inline void put_affine_portal(void) | ||
213 | { | ||
214 | put_cpu_var(bman_affine_portal); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * This object type refers to a pool, it isn't *the* pool. There may be | ||
219 | * more than one such object per BMan buffer pool, eg. if different users of the | ||
220 | * pool are operating via different portals. | ||
221 | */ | ||
222 | struct bman_pool { | ||
223 | /* index of the buffer pool to encapsulate (0-63) */ | ||
224 | u32 bpid; | ||
225 | /* Used for hash-table admin when using depletion notifications. */ | ||
226 | struct bman_portal *portal; | ||
227 | struct bman_pool *next; | ||
228 | }; | ||
229 | |||
230 | static u32 poll_portal_slow(struct bman_portal *p, u32 is); | ||
231 | |||
232 | static irqreturn_t portal_isr(int irq, void *ptr) | ||
233 | { | ||
234 | struct bman_portal *p = ptr; | ||
235 | struct bm_portal *portal = &p->p; | ||
236 | u32 clear = p->irq_sources; | ||
237 | u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; | ||
238 | |||
239 | if (unlikely(!is)) | ||
240 | return IRQ_NONE; | ||
241 | |||
242 | clear |= poll_portal_slow(p, is); | ||
243 | bm_out(portal, BM_REG_ISR, clear); | ||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | /* --- RCR API --- */ | ||
248 | |||
249 | #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) | ||
250 | #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) | ||
251 | |||
252 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ | ||
253 | static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) | ||
254 | { | ||
255 | uintptr_t addr = (uintptr_t)p; | ||
256 | |||
257 | addr &= ~RCR_CARRY; | ||
258 | |||
259 | return (struct bm_rcr_entry *)addr; | ||
260 | } | ||
261 | |||
262 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
263 | /* Bit-wise logic to convert a ring pointer to a ring index */ | ||
264 | static int rcr_ptr2idx(struct bm_rcr_entry *e) | ||
265 | { | ||
266 | return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); | ||
267 | } | ||
268 | #endif | ||
269 | |||
270 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ | ||
271 | static inline void rcr_inc(struct bm_rcr *rcr) | ||
272 | { | ||
273 | /* increment to the next RCR pointer and handle overflow and 'vbit' */ | ||
274 | struct bm_rcr_entry *partial = rcr->cursor + 1; | ||
275 | |||
276 | rcr->cursor = rcr_carryclear(partial); | ||
277 | if (partial != rcr->cursor) | ||
278 | rcr->vbit ^= BM_RCR_VERB_VBIT; | ||
279 | } | ||
280 | |||
281 | static int bm_rcr_get_avail(struct bm_portal *portal) | ||
282 | { | ||
283 | struct bm_rcr *rcr = &portal->rcr; | ||
284 | |||
285 | return rcr->available; | ||
286 | } | ||
287 | |||
288 | static int bm_rcr_get_fill(struct bm_portal *portal) | ||
289 | { | ||
290 | struct bm_rcr *rcr = &portal->rcr; | ||
291 | |||
292 | return BM_RCR_SIZE - 1 - rcr->available; | ||
293 | } | ||
294 | |||
295 | static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) | ||
296 | { | ||
297 | struct bm_rcr *rcr = &portal->rcr; | ||
298 | |||
299 | rcr->ithresh = ithresh; | ||
300 | bm_out(portal, BM_REG_RCR_ITR, ithresh); | ||
301 | } | ||
302 | |||
303 | static void bm_rcr_cce_prefetch(struct bm_portal *portal) | ||
304 | { | ||
305 | __maybe_unused struct bm_rcr *rcr = &portal->rcr; | ||
306 | |||
307 | DPAA_ASSERT(rcr->cmode == bm_rcr_cce); | ||
308 | bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); | ||
309 | } | ||
310 | |||
311 | static u8 bm_rcr_cce_update(struct bm_portal *portal) | ||
312 | { | ||
313 | struct bm_rcr *rcr = &portal->rcr; | ||
314 | u8 diff, old_ci = rcr->ci; | ||
315 | |||
316 | DPAA_ASSERT(rcr->cmode == bm_rcr_cce); | ||
317 | rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); | ||
318 | bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); | ||
319 | diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); | ||
320 | rcr->available += diff; | ||
321 | return diff; | ||
322 | } | ||
323 | |||
324 | static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) | ||
325 | { | ||
326 | struct bm_rcr *rcr = &portal->rcr; | ||
327 | |||
328 | DPAA_ASSERT(!rcr->busy); | ||
329 | if (!rcr->available) | ||
330 | return NULL; | ||
331 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
332 | rcr->busy = 1; | ||
333 | #endif | ||
334 | dpaa_zero(rcr->cursor); | ||
335 | return rcr->cursor; | ||
336 | } | ||
337 | |||
338 | static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) | ||
339 | { | ||
340 | struct bm_rcr *rcr = &portal->rcr; | ||
341 | struct bm_rcr_entry *rcursor; | ||
342 | |||
343 | DPAA_ASSERT(rcr->busy); | ||
344 | DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); | ||
345 | DPAA_ASSERT(rcr->available >= 1); | ||
346 | dma_wmb(); | ||
347 | rcursor = rcr->cursor; | ||
348 | rcursor->_ncw_verb = myverb | rcr->vbit; | ||
349 | dpaa_flush(rcursor); | ||
350 | rcr_inc(rcr); | ||
351 | rcr->available--; | ||
352 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
353 | rcr->busy = 0; | ||
354 | #endif | ||
355 | } | ||
356 | |||
357 | static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, | ||
358 | enum bm_rcr_cmode cmode) | ||
359 | { | ||
360 | struct bm_rcr *rcr = &portal->rcr; | ||
361 | u32 cfg; | ||
362 | u8 pi; | ||
363 | |||
364 | rcr->ring = portal->addr.ce + BM_CL_RCR; | ||
365 | rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); | ||
366 | pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); | ||
367 | rcr->cursor = rcr->ring + pi; | ||
368 | rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? | ||
369 | BM_RCR_VERB_VBIT : 0; | ||
370 | rcr->available = BM_RCR_SIZE - 1 | ||
371 | - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); | ||
372 | rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); | ||
373 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
374 | rcr->busy = 0; | ||
375 | rcr->pmode = pmode; | ||
376 | rcr->cmode = cmode; | ||
377 | #endif | ||
378 | cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) | ||
379 | | (pmode & 0x3); /* BCSP_CFG::RPM */ | ||
380 | bm_out(portal, BM_REG_CFG, cfg); | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static void bm_rcr_finish(struct bm_portal *portal) | ||
385 | { | ||
386 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
387 | struct bm_rcr *rcr = &portal->rcr; | ||
388 | int i; | ||
389 | |||
390 | DPAA_ASSERT(!rcr->busy); | ||
391 | |||
392 | i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); | ||
393 | if (i != rcr_ptr2idx(rcr->cursor)) | ||
394 | pr_crit("losing uncommited RCR entries\n"); | ||
395 | |||
396 | i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); | ||
397 | if (i != rcr->ci) | ||
398 | pr_crit("missing existing RCR completions\n"); | ||
399 | if (rcr->ci != rcr_ptr2idx(rcr->cursor)) | ||
400 | pr_crit("RCR destroyed unquiesced\n"); | ||
401 | #endif | ||
402 | } | ||
403 | |||
404 | /* --- Management command API --- */ | ||
405 | static int bm_mc_init(struct bm_portal *portal) | ||
406 | { | ||
407 | struct bm_mc *mc = &portal->mc; | ||
408 | |||
409 | mc->cr = portal->addr.ce + BM_CL_CR; | ||
410 | mc->rr = portal->addr.ce + BM_CL_RR0; | ||
411 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? | ||
412 | 0 : 1; | ||
413 | mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; | ||
414 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
415 | mc->state = mc_idle; | ||
416 | #endif | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void bm_mc_finish(struct bm_portal *portal) | ||
421 | { | ||
422 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
423 | struct bm_mc *mc = &portal->mc; | ||
424 | |||
425 | DPAA_ASSERT(mc->state == mc_idle); | ||
426 | if (mc->state != mc_idle) | ||
427 | pr_crit("Losing incomplete MC command\n"); | ||
428 | #endif | ||
429 | } | ||
430 | |||
431 | static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) | ||
432 | { | ||
433 | struct bm_mc *mc = &portal->mc; | ||
434 | |||
435 | DPAA_ASSERT(mc->state == mc_idle); | ||
436 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
437 | mc->state = mc_user; | ||
438 | #endif | ||
439 | dpaa_zero(mc->cr); | ||
440 | return mc->cr; | ||
441 | } | ||
442 | |||
443 | static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) | ||
444 | { | ||
445 | struct bm_mc *mc = &portal->mc; | ||
446 | union bm_mc_result *rr = mc->rr + mc->rridx; | ||
447 | |||
448 | DPAA_ASSERT(mc->state == mc_user); | ||
449 | dma_wmb(); | ||
450 | mc->cr->_ncw_verb = myverb | mc->vbit; | ||
451 | dpaa_flush(mc->cr); | ||
452 | dpaa_invalidate_touch_ro(rr); | ||
453 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
454 | mc->state = mc_hw; | ||
455 | #endif | ||
456 | } | ||
457 | |||
458 | static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) | ||
459 | { | ||
460 | struct bm_mc *mc = &portal->mc; | ||
461 | union bm_mc_result *rr = mc->rr + mc->rridx; | ||
462 | |||
463 | DPAA_ASSERT(mc->state == mc_hw); | ||
464 | /* | ||
465 | * The inactive response register's verb byte always returns zero until | ||
466 | * its command is submitted and completed. This includes the valid-bit, | ||
467 | * in case you were wondering... | ||
468 | */ | ||
469 | if (!__raw_readb(&rr->verb)) { | ||
470 | dpaa_invalidate_touch_ro(rr); | ||
471 | return NULL; | ||
472 | } | ||
473 | mc->rridx ^= 1; | ||
474 | mc->vbit ^= BM_MCC_VERB_VBIT; | ||
475 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
476 | mc->state = mc_idle; | ||
477 | #endif | ||
478 | return rr; | ||
479 | } | ||
480 | |||
481 | static inline int bm_mc_result_timeout(struct bm_portal *portal, | ||
482 | union bm_mc_result **mcr) | ||
483 | { | ||
484 | int timeout = BM_MCR_TIMEOUT; | ||
485 | |||
486 | do { | ||
487 | *mcr = bm_mc_result(portal); | ||
488 | if (*mcr) | ||
489 | break; | ||
490 | udelay(1); | ||
491 | } while (--timeout); | ||
492 | |||
493 | return timeout; | ||
494 | } | ||
495 | |||
496 | /* Disable all BSCN interrupts for the portal */ | ||
497 | static void bm_isr_bscn_disable(struct bm_portal *portal) | ||
498 | { | ||
499 | bm_out(portal, BM_REG_SCN(0), 0); | ||
500 | bm_out(portal, BM_REG_SCN(1), 0); | ||
501 | } | ||
502 | |||
503 | static int bman_create_portal(struct bman_portal *portal, | ||
504 | const struct bm_portal_config *c) | ||
505 | { | ||
506 | struct bm_portal *p; | ||
507 | int ret; | ||
508 | |||
509 | p = &portal->p; | ||
510 | /* | ||
511 | * prep the low-level portal struct with the mapped addresses from the | ||
512 | * config, everything that follows depends on it and "config" is more | ||
513 | * for (de)reference... | ||
514 | */ | ||
515 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; | ||
516 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; | ||
517 | if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { | ||
518 | dev_err(c->dev, "RCR initialisation failed\n"); | ||
519 | goto fail_rcr; | ||
520 | } | ||
521 | if (bm_mc_init(p)) { | ||
522 | dev_err(c->dev, "MC initialisation failed\n"); | ||
523 | goto fail_mc; | ||
524 | } | ||
525 | /* | ||
526 | * Default to all BPIDs disabled, we enable as required at | ||
527 | * run-time. | ||
528 | */ | ||
529 | bm_isr_bscn_disable(p); | ||
530 | |||
531 | /* Write-to-clear any stale interrupt status bits */ | ||
532 | bm_out(p, BM_REG_ISDR, 0xffffffff); | ||
533 | portal->irq_sources = 0; | ||
534 | bm_out(p, BM_REG_IER, 0); | ||
535 | bm_out(p, BM_REG_ISR, 0xffffffff); | ||
536 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | ||
537 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | ||
538 | dev_err(c->dev, "request_irq() failed\n"); | ||
539 | goto fail_irq; | ||
540 | } | ||
541 | if (c->cpu != -1 && irq_can_set_affinity(c->irq) && | ||
542 | irq_set_affinity(c->irq, cpumask_of(c->cpu))) { | ||
543 | dev_err(c->dev, "irq_set_affinity() failed\n"); | ||
544 | goto fail_affinity; | ||
545 | } | ||
546 | |||
547 | /* Need RCR to be empty before continuing */ | ||
548 | ret = bm_rcr_get_fill(p); | ||
549 | if (ret) { | ||
550 | dev_err(c->dev, "RCR unclean\n"); | ||
551 | goto fail_rcr_empty; | ||
552 | } | ||
553 | /* Success */ | ||
554 | portal->config = c; | ||
555 | |||
556 | bm_out(p, BM_REG_ISDR, 0); | ||
557 | bm_out(p, BM_REG_IIR, 0); | ||
558 | |||
559 | return 0; | ||
560 | |||
561 | fail_rcr_empty: | ||
562 | fail_affinity: | ||
563 | free_irq(c->irq, portal); | ||
564 | fail_irq: | ||
565 | bm_mc_finish(p); | ||
566 | fail_mc: | ||
567 | bm_rcr_finish(p); | ||
568 | fail_rcr: | ||
569 | return -EIO; | ||
570 | } | ||
571 | |||
572 | struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) | ||
573 | { | ||
574 | struct bman_portal *portal; | ||
575 | int err; | ||
576 | |||
577 | portal = &per_cpu(bman_affine_portal, c->cpu); | ||
578 | err = bman_create_portal(portal, c); | ||
579 | if (err) | ||
580 | return NULL; | ||
581 | |||
582 | spin_lock(&affine_mask_lock); | ||
583 | cpumask_set_cpu(c->cpu, &affine_mask); | ||
584 | spin_unlock(&affine_mask_lock); | ||
585 | |||
586 | return portal; | ||
587 | } | ||
588 | |||
589 | static u32 poll_portal_slow(struct bman_portal *p, u32 is) | ||
590 | { | ||
591 | u32 ret = is; | ||
592 | |||
593 | if (is & BM_PIRQ_RCRI) { | ||
594 | bm_rcr_cce_update(&p->p); | ||
595 | bm_rcr_set_ithresh(&p->p, 0); | ||
596 | bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); | ||
597 | is &= ~BM_PIRQ_RCRI; | ||
598 | } | ||
599 | |||
600 | /* There should be no status register bits left undefined */ | ||
601 | DPAA_ASSERT(!is); | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | int bman_p_irqsource_add(struct bman_portal *p, u32 bits) | ||
606 | { | ||
607 | unsigned long irqflags; | ||
608 | |||
609 | local_irq_save(irqflags); | ||
610 | set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); | ||
611 | bm_out(&p->p, BM_REG_IER, p->irq_sources); | ||
612 | local_irq_restore(irqflags); | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int bm_shutdown_pool(u32 bpid) | ||
617 | { | ||
618 | struct bm_mc_command *bm_cmd; | ||
619 | union bm_mc_result *bm_res; | ||
620 | |||
621 | while (1) { | ||
622 | struct bman_portal *p = get_affine_portal(); | ||
623 | /* Acquire buffers until empty */ | ||
624 | bm_cmd = bm_mc_start(&p->p); | ||
625 | bm_cmd->bpid = bpid; | ||
626 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); | ||
627 | if (!bm_mc_result_timeout(&p->p, &bm_res)) { | ||
628 | put_affine_portal(); | ||
629 | pr_crit("BMan Acquire Command timedout\n"); | ||
630 | return -ETIMEDOUT; | ||
631 | } | ||
632 | if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { | ||
633 | put_affine_portal(); | ||
634 | /* Pool is empty */ | ||
635 | return 0; | ||
636 | } | ||
637 | put_affine_portal(); | ||
638 | } | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | struct gen_pool *bm_bpalloc; | ||
644 | |||
645 | static int bm_alloc_bpid_range(u32 *result, u32 count) | ||
646 | { | ||
647 | unsigned long addr; | ||
648 | |||
649 | addr = gen_pool_alloc(bm_bpalloc, count); | ||
650 | if (!addr) | ||
651 | return -ENOMEM; | ||
652 | |||
653 | *result = addr & ~DPAA_GENALLOC_OFF; | ||
654 | |||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | static int bm_release_bpid(u32 bpid) | ||
659 | { | ||
660 | int ret; | ||
661 | |||
662 | ret = bm_shutdown_pool(bpid); | ||
663 | if (ret) { | ||
664 | pr_debug("BPID %d leaked\n", bpid); | ||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); | ||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | struct bman_pool *bman_new_pool(void) | ||
673 | { | ||
674 | struct bman_pool *pool = NULL; | ||
675 | u32 bpid; | ||
676 | |||
677 | if (bm_alloc_bpid_range(&bpid, 1)) | ||
678 | return NULL; | ||
679 | |||
680 | pool = kmalloc(sizeof(*pool), GFP_KERNEL); | ||
681 | if (!pool) | ||
682 | goto err; | ||
683 | |||
684 | pool->bpid = bpid; | ||
685 | |||
686 | return pool; | ||
687 | err: | ||
688 | bm_release_bpid(bpid); | ||
689 | kfree(pool); | ||
690 | return NULL; | ||
691 | } | ||
692 | EXPORT_SYMBOL(bman_new_pool); | ||
693 | |||
694 | void bman_free_pool(struct bman_pool *pool) | ||
695 | { | ||
696 | bm_release_bpid(pool->bpid); | ||
697 | |||
698 | kfree(pool); | ||
699 | } | ||
700 | EXPORT_SYMBOL(bman_free_pool); | ||
701 | |||
702 | int bman_get_bpid(const struct bman_pool *pool) | ||
703 | { | ||
704 | return pool->bpid; | ||
705 | } | ||
706 | EXPORT_SYMBOL(bman_get_bpid); | ||
707 | |||
708 | static void update_rcr_ci(struct bman_portal *p, int avail) | ||
709 | { | ||
710 | if (avail) | ||
711 | bm_rcr_cce_prefetch(&p->p); | ||
712 | else | ||
713 | bm_rcr_cce_update(&p->p); | ||
714 | } | ||
715 | |||
716 | int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) | ||
717 | { | ||
718 | struct bman_portal *p; | ||
719 | struct bm_rcr_entry *r; | ||
720 | unsigned long irqflags; | ||
721 | int avail, timeout = 1000; /* 1ms */ | ||
722 | int i = num - 1; | ||
723 | |||
724 | DPAA_ASSERT(num > 0 && num <= 8); | ||
725 | |||
726 | do { | ||
727 | p = get_affine_portal(); | ||
728 | local_irq_save(irqflags); | ||
729 | avail = bm_rcr_get_avail(&p->p); | ||
730 | if (avail < 2) | ||
731 | update_rcr_ci(p, avail); | ||
732 | r = bm_rcr_start(&p->p); | ||
733 | local_irq_restore(irqflags); | ||
734 | put_affine_portal(); | ||
735 | if (likely(r)) | ||
736 | break; | ||
737 | |||
738 | udelay(1); | ||
739 | } while (--timeout); | ||
740 | |||
741 | if (unlikely(!timeout)) | ||
742 | return -ETIMEDOUT; | ||
743 | |||
744 | p = get_affine_portal(); | ||
745 | local_irq_save(irqflags); | ||
746 | /* | ||
747 | * we can copy all but the first entry, as this can trigger badness | ||
748 | * with the valid-bit | ||
749 | */ | ||
750 | bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); | ||
751 | bm_buffer_set_bpid(r->bufs, pool->bpid); | ||
752 | if (i) | ||
753 | memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); | ||
754 | |||
755 | bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | | ||
756 | (num & BM_RCR_VERB_BUFCOUNT_MASK)); | ||
757 | |||
758 | local_irq_restore(irqflags); | ||
759 | put_affine_portal(); | ||
760 | return 0; | ||
761 | } | ||
762 | EXPORT_SYMBOL(bman_release); | ||
763 | |||
764 | int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) | ||
765 | { | ||
766 | struct bman_portal *p = get_affine_portal(); | ||
767 | struct bm_mc_command *mcc; | ||
768 | union bm_mc_result *mcr; | ||
769 | int ret; | ||
770 | |||
771 | DPAA_ASSERT(num > 0 && num <= 8); | ||
772 | |||
773 | mcc = bm_mc_start(&p->p); | ||
774 | mcc->bpid = pool->bpid; | ||
775 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | | ||
776 | (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); | ||
777 | if (!bm_mc_result_timeout(&p->p, &mcr)) { | ||
778 | put_affine_portal(); | ||
779 | pr_crit("BMan Acquire Timeout\n"); | ||
780 | return -ETIMEDOUT; | ||
781 | } | ||
782 | ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; | ||
783 | if (bufs) | ||
784 | memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); | ||
785 | |||
786 | put_affine_portal(); | ||
787 | if (ret != num) | ||
788 | ret = -ENOMEM; | ||
789 | return ret; | ||
790 | } | ||
791 | EXPORT_SYMBOL(bman_acquire); | ||
792 | |||
793 | const struct bm_portal_config * | ||
794 | bman_get_bm_portal_config(const struct bman_portal *portal) | ||
795 | { | ||
796 | return portal->config; | ||
797 | } | ||
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c new file mode 100644 index 000000000000..9deb0524543f --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_ccsr.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_priv.h" | ||
32 | |||
33 | u16 bman_ip_rev; | ||
34 | EXPORT_SYMBOL(bman_ip_rev); | ||
35 | |||
36 | /* Register offsets */ | ||
37 | #define REG_FBPR_FPC 0x0800 | ||
38 | #define REG_ECSR 0x0a00 | ||
39 | #define REG_ECIR 0x0a04 | ||
40 | #define REG_EADR 0x0a08 | ||
41 | #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) | ||
42 | #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) | ||
43 | #define REG_IP_REV_1 0x0bf8 | ||
44 | #define REG_IP_REV_2 0x0bfc | ||
45 | #define REG_FBPR_BARE 0x0c00 | ||
46 | #define REG_FBPR_BAR 0x0c04 | ||
47 | #define REG_FBPR_AR 0x0c10 | ||
48 | #define REG_SRCIDR 0x0d04 | ||
49 | #define REG_LIODNR 0x0d08 | ||
50 | #define REG_ERR_ISR 0x0e00 | ||
51 | #define REG_ERR_IER 0x0e04 | ||
52 | #define REG_ERR_ISDR 0x0e08 | ||
53 | |||
54 | /* Used by all error interrupt registers except 'inhibit' */ | ||
55 | #define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ | ||
56 | #define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ | ||
57 | #define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ | ||
58 | #define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ | ||
59 | #define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ | ||
60 | |||
61 | struct bman_hwerr_txt { | ||
62 | u32 mask; | ||
63 | const char *txt; | ||
64 | }; | ||
65 | |||
66 | static const struct bman_hwerr_txt bman_hwerr_txts[] = { | ||
67 | { BM_EIRQ_IVCI, "Invalid Command Verb" }, | ||
68 | { BM_EIRQ_FLWI, "FBPR Low Watermark" }, | ||
69 | { BM_EIRQ_MBEI, "Multi-bit ECC Error" }, | ||
70 | { BM_EIRQ_SBEI, "Single-bit ECC Error" }, | ||
71 | { BM_EIRQ_BSCN, "Pool State Change Notification" }, | ||
72 | }; | ||
73 | |||
74 | /* Only trigger low water mark interrupt once only */ | ||
75 | #define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI | ||
76 | |||
77 | /* Pointer to the start of the BMan's CCSR space */ | ||
78 | static u32 __iomem *bm_ccsr_start; | ||
79 | |||
80 | static inline u32 bm_ccsr_in(u32 offset) | ||
81 | { | ||
82 | return ioread32be(bm_ccsr_start + offset/4); | ||
83 | } | ||
84 | static inline void bm_ccsr_out(u32 offset, u32 val) | ||
85 | { | ||
86 | iowrite32be(val, bm_ccsr_start + offset/4); | ||
87 | } | ||
88 | |||
89 | static void bm_get_version(u16 *id, u8 *major, u8 *minor) | ||
90 | { | ||
91 | u32 v = bm_ccsr_in(REG_IP_REV_1); | ||
92 | *id = (v >> 16); | ||
93 | *major = (v >> 8) & 0xff; | ||
94 | *minor = v & 0xff; | ||
95 | } | ||
96 | |||
97 | /* signal transactions for FBPRs with higher priority */ | ||
98 | #define FBPR_AR_RPRIO_HI BIT(30) | ||
99 | |||
100 | static void bm_set_memory(u64 ba, u32 size) | ||
101 | { | ||
102 | u32 exp = ilog2(size); | ||
103 | /* choke if size isn't within range */ | ||
104 | DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && | ||
105 | is_power_of_2(size)); | ||
106 | /* choke if '[e]ba' has lower-alignment than 'size' */ | ||
107 | DPAA_ASSERT(!(ba & (size - 1))); | ||
108 | bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); | ||
109 | bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); | ||
110 | bm_ccsr_out(REG_FBPR_AR, exp - 1); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Location and size of BMan private memory | ||
115 | * | ||
116 | * Ideally we would use the DMA API to turn rmem->base into a DMA address | ||
117 | * (especially if iommu translations ever get involved). Unfortunately, the | ||
118 | * DMA API currently does not allow mapping anything that is not backed with | ||
119 | * a struct page. | ||
120 | */ | ||
121 | static dma_addr_t fbpr_a; | ||
122 | static size_t fbpr_sz; | ||
123 | |||
124 | static int bman_fbpr(struct reserved_mem *rmem) | ||
125 | { | ||
126 | fbpr_a = rmem->base; | ||
127 | fbpr_sz = rmem->size; | ||
128 | |||
129 | WARN_ON(!(fbpr_a && fbpr_sz)); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); | ||
134 | |||
135 | static irqreturn_t bman_isr(int irq, void *ptr) | ||
136 | { | ||
137 | u32 isr_val, ier_val, ecsr_val, isr_mask, i; | ||
138 | struct device *dev = ptr; | ||
139 | |||
140 | ier_val = bm_ccsr_in(REG_ERR_IER); | ||
141 | isr_val = bm_ccsr_in(REG_ERR_ISR); | ||
142 | ecsr_val = bm_ccsr_in(REG_ECSR); | ||
143 | isr_mask = isr_val & ier_val; | ||
144 | |||
145 | if (!isr_mask) | ||
146 | return IRQ_NONE; | ||
147 | |||
148 | for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) { | ||
149 | if (bman_hwerr_txts[i].mask & isr_mask) { | ||
150 | dev_err_ratelimited(dev, "ErrInt: %s\n", | ||
151 | bman_hwerr_txts[i].txt); | ||
152 | if (bman_hwerr_txts[i].mask & ecsr_val) { | ||
153 | /* Re-arm error capture registers */ | ||
154 | bm_ccsr_out(REG_ECSR, ecsr_val); | ||
155 | } | ||
156 | if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) { | ||
157 | dev_dbg(dev, "Disabling error 0x%x\n", | ||
158 | bman_hwerr_txts[i].mask); | ||
159 | ier_val &= ~bman_hwerr_txts[i].mask; | ||
160 | bm_ccsr_out(REG_ERR_IER, ier_val); | ||
161 | } | ||
162 | } | ||
163 | } | ||
164 | bm_ccsr_out(REG_ERR_ISR, isr_val); | ||
165 | |||
166 | return IRQ_HANDLED; | ||
167 | } | ||
168 | |||
169 | static int fsl_bman_probe(struct platform_device *pdev) | ||
170 | { | ||
171 | int ret, err_irq; | ||
172 | struct device *dev = &pdev->dev; | ||
173 | struct device_node *node = dev->of_node; | ||
174 | struct resource *res; | ||
175 | u16 id, bm_pool_cnt; | ||
176 | u8 major, minor; | ||
177 | |||
178 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
179 | if (!res) { | ||
180 | dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", | ||
181 | node->full_name); | ||
182 | return -ENXIO; | ||
183 | } | ||
184 | bm_ccsr_start = devm_ioremap(dev, res->start, | ||
185 | res->end - res->start + 1); | ||
186 | if (!bm_ccsr_start) | ||
187 | return -ENXIO; | ||
188 | |||
189 | bm_get_version(&id, &major, &minor); | ||
190 | if (major == 1 && minor == 0) { | ||
191 | bman_ip_rev = BMAN_REV10; | ||
192 | bm_pool_cnt = BM_POOL_MAX; | ||
193 | } else if (major == 2 && minor == 0) { | ||
194 | bman_ip_rev = BMAN_REV20; | ||
195 | bm_pool_cnt = 8; | ||
196 | } else if (major == 2 && minor == 1) { | ||
197 | bman_ip_rev = BMAN_REV21; | ||
198 | bm_pool_cnt = BM_POOL_MAX; | ||
199 | } else { | ||
200 | dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n", | ||
201 | id, major, minor); | ||
202 | return -ENODEV; | ||
203 | } | ||
204 | |||
205 | bm_set_memory(fbpr_a, fbpr_sz); | ||
206 | |||
207 | err_irq = platform_get_irq(pdev, 0); | ||
208 | if (err_irq <= 0) { | ||
209 | dev_info(dev, "Can't get %s IRQ\n", node->full_name); | ||
210 | return -ENODEV; | ||
211 | } | ||
212 | ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", | ||
213 | dev); | ||
214 | if (ret) { | ||
215 | dev_err(dev, "devm_request_irq() failed %d for '%s'\n", | ||
216 | ret, node->full_name); | ||
217 | return ret; | ||
218 | } | ||
219 | /* Disable Buffer Pool State Change */ | ||
220 | bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN); | ||
221 | /* | ||
222 | * Write-to-clear any stale bits, (eg. starvation being asserted prior | ||
223 | * to resource allocation during driver init). | ||
224 | */ | ||
225 | bm_ccsr_out(REG_ERR_ISR, 0xffffffff); | ||
226 | /* Enable Error Interrupts */ | ||
227 | bm_ccsr_out(REG_ERR_IER, 0xffffffff); | ||
228 | |||
229 | bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc"); | ||
230 | if (IS_ERR(bm_bpalloc)) { | ||
231 | ret = PTR_ERR(bm_bpalloc); | ||
232 | dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret); | ||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | /* seed BMan resource pool */ | ||
237 | ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1); | ||
238 | if (ret) { | ||
239 | dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n", | ||
240 | 0, bm_pool_cnt - 1, ret); | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | return 0; | ||
245 | }; | ||
246 | |||
247 | static const struct of_device_id fsl_bman_ids[] = { | ||
248 | { | ||
249 | .compatible = "fsl,bman", | ||
250 | }, | ||
251 | {} | ||
252 | }; | ||
253 | |||
254 | static struct platform_driver fsl_bman_driver = { | ||
255 | .driver = { | ||
256 | .name = KBUILD_MODNAME, | ||
257 | .of_match_table = fsl_bman_ids, | ||
258 | .suppress_bind_attrs = true, | ||
259 | }, | ||
260 | .probe = fsl_bman_probe, | ||
261 | }; | ||
262 | |||
263 | builtin_platform_driver(fsl_bman_driver); | ||
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c new file mode 100644 index 000000000000..6579cc18811a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_portal.c | |||
@@ -0,0 +1,219 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_priv.h" | ||
32 | |||
33 | static struct bman_portal *affine_bportals[NR_CPUS]; | ||
34 | static struct cpumask portal_cpus; | ||
35 | /* protect bman global registers and global data shared among portals */ | ||
36 | static DEFINE_SPINLOCK(bman_lock); | ||
37 | |||
38 | static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) | ||
39 | { | ||
40 | struct bman_portal *p = bman_create_affine_portal(pcfg); | ||
41 | |||
42 | if (!p) { | ||
43 | dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", | ||
44 | __func__, pcfg->cpu); | ||
45 | return NULL; | ||
46 | } | ||
47 | |||
48 | bman_p_irqsource_add(p, BM_PIRQ_RCRI); | ||
49 | affine_bportals[pcfg->cpu] = p; | ||
50 | |||
51 | dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); | ||
52 | |||
53 | return p; | ||
54 | } | ||
55 | |||
56 | static void bman_offline_cpu(unsigned int cpu) | ||
57 | { | ||
58 | struct bman_portal *p = affine_bportals[cpu]; | ||
59 | const struct bm_portal_config *pcfg; | ||
60 | |||
61 | if (!p) | ||
62 | return; | ||
63 | |||
64 | pcfg = bman_get_bm_portal_config(p); | ||
65 | if (!pcfg) | ||
66 | return; | ||
67 | |||
68 | irq_set_affinity(pcfg->irq, cpumask_of(0)); | ||
69 | } | ||
70 | |||
71 | static void bman_online_cpu(unsigned int cpu) | ||
72 | { | ||
73 | struct bman_portal *p = affine_bportals[cpu]; | ||
74 | const struct bm_portal_config *pcfg; | ||
75 | |||
76 | if (!p) | ||
77 | return; | ||
78 | |||
79 | pcfg = bman_get_bm_portal_config(p); | ||
80 | if (!pcfg) | ||
81 | return; | ||
82 | |||
83 | irq_set_affinity(pcfg->irq, cpumask_of(cpu)); | ||
84 | } | ||
85 | |||
86 | static int bman_hotplug_cpu_callback(struct notifier_block *nfb, | ||
87 | unsigned long action, void *hcpu) | ||
88 | { | ||
89 | unsigned int cpu = (unsigned long)hcpu; | ||
90 | |||
91 | switch (action) { | ||
92 | case CPU_ONLINE: | ||
93 | case CPU_ONLINE_FROZEN: | ||
94 | bman_online_cpu(cpu); | ||
95 | break; | ||
96 | case CPU_DOWN_PREPARE: | ||
97 | case CPU_DOWN_PREPARE_FROZEN: | ||
98 | bman_offline_cpu(cpu); | ||
99 | } | ||
100 | |||
101 | return NOTIFY_OK; | ||
102 | } | ||
103 | |||
104 | static struct notifier_block bman_hotplug_cpu_notifier = { | ||
105 | .notifier_call = bman_hotplug_cpu_callback, | ||
106 | }; | ||
107 | |||
108 | static int bman_portal_probe(struct platform_device *pdev) | ||
109 | { | ||
110 | struct device *dev = &pdev->dev; | ||
111 | struct device_node *node = dev->of_node; | ||
112 | struct bm_portal_config *pcfg; | ||
113 | struct resource *addr_phys[2]; | ||
114 | void __iomem *va; | ||
115 | int irq, cpu; | ||
116 | |||
117 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); | ||
118 | if (!pcfg) | ||
119 | return -ENOMEM; | ||
120 | |||
121 | pcfg->dev = dev; | ||
122 | |||
123 | addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, | ||
124 | DPAA_PORTAL_CE); | ||
125 | if (!addr_phys[0]) { | ||
126 | dev_err(dev, "Can't get %s property 'reg::CE'\n", | ||
127 | node->full_name); | ||
128 | return -ENXIO; | ||
129 | } | ||
130 | |||
131 | addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, | ||
132 | DPAA_PORTAL_CI); | ||
133 | if (!addr_phys[1]) { | ||
134 | dev_err(dev, "Can't get %s property 'reg::CI'\n", | ||
135 | node->full_name); | ||
136 | return -ENXIO; | ||
137 | } | ||
138 | |||
139 | pcfg->cpu = -1; | ||
140 | |||
141 | irq = platform_get_irq(pdev, 0); | ||
142 | if (irq <= 0) { | ||
143 | dev_err(dev, "Can't get %s IRQ'\n", node->full_name); | ||
144 | return -ENXIO; | ||
145 | } | ||
146 | pcfg->irq = irq; | ||
147 | |||
148 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | ||
149 | if (!va) | ||
150 | goto err_ioremap1; | ||
151 | |||
152 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | ||
153 | |||
154 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | ||
155 | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
156 | if (!va) | ||
157 | goto err_ioremap2; | ||
158 | |||
159 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | ||
160 | |||
161 | spin_lock(&bman_lock); | ||
162 | cpu = cpumask_next_zero(-1, &portal_cpus); | ||
163 | if (cpu >= nr_cpu_ids) { | ||
164 | /* unassigned portal, skip init */ | ||
165 | spin_unlock(&bman_lock); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | cpumask_set_cpu(cpu, &portal_cpus); | ||
170 | spin_unlock(&bman_lock); | ||
171 | pcfg->cpu = cpu; | ||
172 | |||
173 | if (!init_pcfg(pcfg)) | ||
174 | goto err_ioremap2; | ||
175 | |||
176 | /* clear irq affinity if assigned cpu is offline */ | ||
177 | if (!cpu_online(cpu)) | ||
178 | bman_offline_cpu(cpu); | ||
179 | |||
180 | return 0; | ||
181 | |||
182 | err_ioremap2: | ||
183 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | ||
184 | err_ioremap1: | ||
185 | dev_err(dev, "ioremap failed\n"); | ||
186 | return -ENXIO; | ||
187 | } | ||
188 | |||
189 | static const struct of_device_id bman_portal_ids[] = { | ||
190 | { | ||
191 | .compatible = "fsl,bman-portal", | ||
192 | }, | ||
193 | {} | ||
194 | }; | ||
195 | MODULE_DEVICE_TABLE(of, bman_portal_ids); | ||
196 | |||
197 | static struct platform_driver bman_portal_driver = { | ||
198 | .driver = { | ||
199 | .name = KBUILD_MODNAME, | ||
200 | .of_match_table = bman_portal_ids, | ||
201 | }, | ||
202 | .probe = bman_portal_probe, | ||
203 | }; | ||
204 | |||
205 | static int __init bman_portal_driver_register(struct platform_driver *drv) | ||
206 | { | ||
207 | int ret; | ||
208 | |||
209 | ret = platform_driver_register(drv); | ||
210 | if (ret < 0) | ||
211 | return ret; | ||
212 | |||
213 | register_hotcpu_notifier(&bman_hotplug_cpu_notifier); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | module_driver(bman_portal_driver, | ||
219 | bman_portal_driver_register, platform_driver_unregister); | ||
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h new file mode 100644 index 000000000000..f6896a2f6d90 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_priv.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
32 | |||
33 | #include "dpaa_sys.h" | ||
34 | |||
35 | #include <soc/fsl/bman.h> | ||
36 | |||
37 | /* Portal processing (interrupt) sources */ | ||
38 | #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ | ||
39 | |||
40 | /* Revision info (for errata and feature handling) */ | ||
41 | #define BMAN_REV10 0x0100 | ||
42 | #define BMAN_REV20 0x0200 | ||
43 | #define BMAN_REV21 0x0201 | ||
44 | extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ | ||
45 | |||
46 | extern struct gen_pool *bm_bpalloc; | ||
47 | |||
48 | struct bm_portal_config { | ||
49 | /* | ||
50 | * Corenet portal addresses; | ||
51 | * [0]==cache-enabled, [1]==cache-inhibited. | ||
52 | */ | ||
53 | void __iomem *addr_virt[2]; | ||
54 | /* Allow these to be joined in lists */ | ||
55 | struct list_head list; | ||
56 | struct device *dev; | ||
57 | /* User-visible portal configuration settings */ | ||
58 | /* portal is affined to this cpu */ | ||
59 | int cpu; | ||
60 | /* portal interrupt line */ | ||
61 | int irq; | ||
62 | }; | ||
63 | |||
64 | struct bman_portal *bman_create_affine_portal( | ||
65 | const struct bm_portal_config *config); | ||
66 | /* | ||
67 | * The below bman_p_***() variant might be called in a situation that the cpu | ||
68 | * which the portal affine to is not online yet. | ||
69 | * @bman_portal specifies which portal the API will use. | ||
70 | */ | ||
71 | int bman_p_irqsource_add(struct bman_portal *p, u32 bits); | ||
72 | |||
73 | /* | ||
74 | * Used by all portal interrupt registers except 'inhibit' | ||
75 | * This mask contains all the "irqsource" bits visible to API users | ||
76 | */ | ||
77 | #define BM_PIRQ_VISIBLE BM_PIRQ_RCRI | ||
78 | |||
79 | const struct bm_portal_config * | ||
80 | bman_get_bm_portal_config(const struct bman_portal *portal); | ||
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c new file mode 100644 index 000000000000..09b1c960b26a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_test.h" | ||
32 | |||
33 | MODULE_AUTHOR("Geoff Thorpe"); | ||
34 | MODULE_LICENSE("Dual BSD/GPL"); | ||
35 | MODULE_DESCRIPTION("BMan testing"); | ||
36 | |||
37 | static int test_init(void) | ||
38 | { | ||
39 | #ifdef CONFIG_FSL_BMAN_TEST_API | ||
40 | int loop = 1; | ||
41 | |||
42 | while (loop--) | ||
43 | bman_test_api(); | ||
44 | #endif | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static void test_exit(void) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | module_init(test_init); | ||
53 | module_exit(test_exit); | ||
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h new file mode 100644 index 000000000000..037ed342add4 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_priv.h" | ||
32 | |||
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
34 | |||
35 | void bman_test_api(void); | ||
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c new file mode 100644 index 000000000000..6f6bdd154fe3 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test_api.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "bman_test.h" | ||
32 | |||
33 | #define NUM_BUFS 93 | ||
34 | #define LOOPS 3 | ||
35 | #define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU | ||
36 | |||
37 | static struct bman_pool *pool; | ||
38 | static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; | ||
39 | static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; | ||
40 | static int bufs_received; | ||
41 | |||
42 | static void bufs_init(void) | ||
43 | { | ||
44 | int i; | ||
45 | |||
46 | for (i = 0; i < NUM_BUFS; i++) | ||
47 | bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); | ||
48 | bufs_received = 0; | ||
49 | } | ||
50 | |||
51 | static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) | ||
52 | { | ||
53 | if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) { | ||
54 | |||
55 | /* | ||
56 | * On SoCs with BMan revison 2.0, BMan only respects the 40 | ||
57 | * LS-bits of buffer addresses, masking off the upper 8-bits on | ||
58 | * release commands. The API provides for 48-bit addresses | ||
59 | * because some SoCs support all 48-bits. When generating | ||
60 | * garbage addresses for testing, we either need to zero the | ||
61 | * upper 8-bits when releasing to BMan (otherwise we'll be | ||
62 | * disappointed when the buffers we acquire back from BMan | ||
63 | * don't match), or we need to mask the upper 8-bits off when | ||
64 | * comparing. We do the latter. | ||
65 | */ | ||
66 | if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) < | ||
67 | (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) | ||
68 | return -1; | ||
69 | if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) > | ||
70 | (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) | ||
71 | return 1; | ||
72 | } else { | ||
73 | if (bm_buffer_get64(a) < bm_buffer_get64(b)) | ||
74 | return -1; | ||
75 | if (bm_buffer_get64(a) > bm_buffer_get64(b)) | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static void bufs_confirm(void) | ||
83 | { | ||
84 | int i, j; | ||
85 | |||
86 | for (i = 0; i < NUM_BUFS; i++) { | ||
87 | int matches = 0; | ||
88 | |||
89 | for (j = 0; j < NUM_BUFS; j++) | ||
90 | if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) | ||
91 | matches++; | ||
92 | WARN_ON(matches != 1); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* test */ | ||
97 | void bman_test_api(void) | ||
98 | { | ||
99 | int i, loops = LOOPS; | ||
100 | |||
101 | bufs_init(); | ||
102 | |||
103 | pr_info("%s(): Starting\n", __func__); | ||
104 | |||
105 | pool = bman_new_pool(); | ||
106 | if (!pool) { | ||
107 | pr_crit("bman_new_pool() failed\n"); | ||
108 | goto failed; | ||
109 | } | ||
110 | |||
111 | /* Release buffers */ | ||
112 | do_loop: | ||
113 | i = 0; | ||
114 | while (i < NUM_BUFS) { | ||
115 | int num = 8; | ||
116 | |||
117 | if (i + num > NUM_BUFS) | ||
118 | num = NUM_BUFS - i; | ||
119 | if (bman_release(pool, bufs_in + i, num)) { | ||
120 | pr_crit("bman_release() failed\n"); | ||
121 | goto failed; | ||
122 | } | ||
123 | i += num; | ||
124 | } | ||
125 | |||
126 | /* Acquire buffers */ | ||
127 | while (i > 0) { | ||
128 | int tmp, num = 8; | ||
129 | |||
130 | if (num > i) | ||
131 | num = i; | ||
132 | tmp = bman_acquire(pool, bufs_out + i - num, num); | ||
133 | WARN_ON(tmp != num); | ||
134 | i -= num; | ||
135 | } | ||
136 | i = bman_acquire(pool, NULL, 1); | ||
137 | WARN_ON(i > 0); | ||
138 | |||
139 | bufs_confirm(); | ||
140 | |||
141 | if (--loops) | ||
142 | goto do_loop; | ||
143 | |||
144 | /* Clean up */ | ||
145 | bman_free_pool(pool); | ||
146 | pr_info("%s(): Finished\n", __func__); | ||
147 | return; | ||
148 | |||
149 | failed: | ||
150 | WARN_ON(1); | ||
151 | } | ||
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h new file mode 100644 index 000000000000..b63fd72295c6 --- /dev/null +++ b/drivers/soc/fsl/qbman/dpaa_sys.h | |||
@@ -0,0 +1,103 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #ifndef __DPAA_SYS_H | ||
32 | #define __DPAA_SYS_H | ||
33 | |||
34 | #include <linux/cpu.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/kthread.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | #include <linux/platform_device.h> | ||
41 | #include <linux/of_reserved_mem.h> | ||
42 | #include <linux/prefetch.h> | ||
43 | #include <linux/genalloc.h> | ||
44 | #include <asm/cacheflush.h> | ||
45 | |||
46 | /* For 2-element tables related to cache-inhibited and cache-enabled mappings */ | ||
47 | #define DPAA_PORTAL_CE 0 | ||
48 | #define DPAA_PORTAL_CI 1 | ||
49 | |||
50 | #if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) | ||
51 | #error "Unsupported Cacheline Size" | ||
52 | #endif | ||
53 | |||
54 | static inline void dpaa_flush(void *p) | ||
55 | { | ||
56 | #ifdef CONFIG_PPC | ||
57 | flush_dcache_range((unsigned long)p, (unsigned long)p+64); | ||
58 | #elif defined(CONFIG_ARM32) | ||
59 | __cpuc_flush_dcache_area(p, 64); | ||
60 | #elif defined(CONFIG_ARM64) | ||
61 | __flush_dcache_area(p, 64); | ||
62 | #endif | ||
63 | } | ||
64 | |||
65 | #define dpaa_invalidate(p) dpaa_flush(p) | ||
66 | |||
67 | #define dpaa_zero(p) memset(p, 0, 64) | ||
68 | |||
69 | static inline void dpaa_touch_ro(void *p) | ||
70 | { | ||
71 | #if (L1_CACHE_BYTES == 32) | ||
72 | prefetch(p+32); | ||
73 | #endif | ||
74 | prefetch(p); | ||
75 | } | ||
76 | |||
77 | /* Commonly used combo */ | ||
78 | static inline void dpaa_invalidate_touch_ro(void *p) | ||
79 | { | ||
80 | dpaa_invalidate(p); | ||
81 | dpaa_touch_ro(p); | ||
82 | } | ||
83 | |||
84 | |||
85 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
86 | #define DPAA_ASSERT(x) WARN_ON(!(x)) | ||
87 | #else | ||
88 | #define DPAA_ASSERT(x) | ||
89 | #endif | ||
90 | |||
91 | /* cyclic helper for rings */ | ||
92 | static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) | ||
93 | { | ||
94 | /* 'first' is included, 'last' is excluded */ | ||
95 | if (first <= last) | ||
96 | return last - first; | ||
97 | return ringsize + last - first; | ||
98 | } | ||
99 | |||
100 | /* Offset applied to genalloc pools due to zero being an error return */ | ||
101 | #define DPAA_GENALLOC_OFF 0x80000000 | ||
102 | |||
103 | #endif /* __DPAA_SYS_H */ | ||
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c new file mode 100644 index 000000000000..119054bc922b --- /dev/null +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -0,0 +1,2881 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_priv.h" | ||
32 | |||
33 | #define DQRR_MAXFILL 15 | ||
34 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ | ||
35 | #define IRQNAME "QMan portal %d" | ||
36 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ | ||
37 | #define QMAN_POLL_LIMIT 32 | ||
38 | #define QMAN_PIRQ_DQRR_ITHRESH 12 | ||
39 | #define QMAN_PIRQ_MR_ITHRESH 4 | ||
40 | #define QMAN_PIRQ_IPERIOD 100 | ||
41 | |||
42 | /* Portal register assists */ | ||
43 | |||
44 | /* Cache-inhibited register offsets */ | ||
45 | #define QM_REG_EQCR_PI_CINH 0x0000 | ||
46 | #define QM_REG_EQCR_CI_CINH 0x0004 | ||
47 | #define QM_REG_EQCR_ITR 0x0008 | ||
48 | #define QM_REG_DQRR_PI_CINH 0x0040 | ||
49 | #define QM_REG_DQRR_CI_CINH 0x0044 | ||
50 | #define QM_REG_DQRR_ITR 0x0048 | ||
51 | #define QM_REG_DQRR_DCAP 0x0050 | ||
52 | #define QM_REG_DQRR_SDQCR 0x0054 | ||
53 | #define QM_REG_DQRR_VDQCR 0x0058 | ||
54 | #define QM_REG_DQRR_PDQCR 0x005c | ||
55 | #define QM_REG_MR_PI_CINH 0x0080 | ||
56 | #define QM_REG_MR_CI_CINH 0x0084 | ||
57 | #define QM_REG_MR_ITR 0x0088 | ||
58 | #define QM_REG_CFG 0x0100 | ||
59 | #define QM_REG_ISR 0x0e00 | ||
60 | #define QM_REG_IER 0x0e04 | ||
61 | #define QM_REG_ISDR 0x0e08 | ||
62 | #define QM_REG_IIR 0x0e0c | ||
63 | #define QM_REG_ITPR 0x0e14 | ||
64 | |||
65 | /* Cache-enabled register offsets */ | ||
66 | #define QM_CL_EQCR 0x0000 | ||
67 | #define QM_CL_DQRR 0x1000 | ||
68 | #define QM_CL_MR 0x2000 | ||
69 | #define QM_CL_EQCR_PI_CENA 0x3000 | ||
70 | #define QM_CL_EQCR_CI_CENA 0x3100 | ||
71 | #define QM_CL_DQRR_PI_CENA 0x3200 | ||
72 | #define QM_CL_DQRR_CI_CENA 0x3300 | ||
73 | #define QM_CL_MR_PI_CENA 0x3400 | ||
74 | #define QM_CL_MR_CI_CENA 0x3500 | ||
75 | #define QM_CL_CR 0x3800 | ||
76 | #define QM_CL_RR0 0x3900 | ||
77 | #define QM_CL_RR1 0x3940 | ||
78 | |||
79 | /* | ||
80 | * BTW, the drivers (and h/w programming model) already obtain the required | ||
81 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s | ||
82 | * or other order-preserving primitives simply degrade performance. Hence the | ||
83 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats | ||
84 | * the portal registers as volatile | ||
85 | */ | ||
86 | |||
87 | /* Cache-enabled ring access */ | ||
88 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) | ||
89 | |||
90 | /* | ||
91 | * Portal modes. | ||
92 | * Enum types; | ||
93 | * pmode == production mode | ||
94 | * cmode == consumption mode, | ||
95 | * dmode == h/w dequeue mode. | ||
96 | * Enum values use 3 letter codes. First letter matches the portal mode, | ||
97 | * remaining two letters indicate; | ||
98 | * ci == cache-inhibited portal register | ||
99 | * ce == cache-enabled portal register | ||
100 | * vb == in-band valid-bit (cache-enabled) | ||
101 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only | ||
102 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. | ||
103 | */ | ||
104 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ | ||
105 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ | ||
106 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ | ||
107 | qm_eqcr_pvb = 2 /* valid-bit */ | ||
108 | }; | ||
109 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ | ||
110 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ | ||
111 | qm_dqrr_dpull = 1 /* PDQCR */ | ||
112 | }; | ||
113 | enum qm_dqrr_pmode { /* s/w-only */ | ||
114 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ | ||
115 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ | ||
116 | qm_dqrr_pvb /* reads valid-bit */ | ||
117 | }; | ||
118 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ | ||
119 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ | ||
120 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ | ||
121 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ | ||
122 | }; | ||
123 | enum qm_mr_pmode { /* s/w-only */ | ||
124 | qm_mr_pci, /* reads MR_PI_CINH */ | ||
125 | qm_mr_pce, /* reads MR_PI_CENA */ | ||
126 | qm_mr_pvb /* reads valid-bit */ | ||
127 | }; | ||
128 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ | ||
129 | qm_mr_cci = 0, /* CI index, cache-inhibited */ | ||
130 | qm_mr_cce = 1 /* CI index, cache-enabled */ | ||
131 | }; | ||
132 | |||
133 | /* --- Portal structures --- */ | ||
134 | |||
135 | #define QM_EQCR_SIZE 8 | ||
136 | #define QM_DQRR_SIZE 16 | ||
137 | #define QM_MR_SIZE 8 | ||
138 | |||
139 | /* "Enqueue Command" */ | ||
140 | struct qm_eqcr_entry { | ||
141 | u8 _ncw_verb; /* writes to this are non-coherent */ | ||
142 | u8 dca; | ||
143 | u16 seqnum; | ||
144 | u32 orp; /* 24-bit */ | ||
145 | u32 fqid; /* 24-bit */ | ||
146 | u32 tag; | ||
147 | struct qm_fd fd; | ||
148 | u8 __reserved3[32]; | ||
149 | } __packed; | ||
150 | #define QM_EQCR_VERB_VBIT 0x80 | ||
151 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ | ||
152 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 | ||
153 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ | ||
154 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ | ||
155 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ | ||
156 | |||
157 | struct qm_eqcr { | ||
158 | struct qm_eqcr_entry *ring, *cursor; | ||
159 | u8 ci, available, ithresh, vbit; | ||
160 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
161 | u32 busy; | ||
162 | enum qm_eqcr_pmode pmode; | ||
163 | #endif | ||
164 | }; | ||
165 | |||
166 | struct qm_dqrr { | ||
167 | const struct qm_dqrr_entry *ring, *cursor; | ||
168 | u8 pi, ci, fill, ithresh, vbit; | ||
169 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
170 | enum qm_dqrr_dmode dmode; | ||
171 | enum qm_dqrr_pmode pmode; | ||
172 | enum qm_dqrr_cmode cmode; | ||
173 | #endif | ||
174 | }; | ||
175 | |||
176 | struct qm_mr { | ||
177 | union qm_mr_entry *ring, *cursor; | ||
178 | u8 pi, ci, fill, ithresh, vbit; | ||
179 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
180 | enum qm_mr_pmode pmode; | ||
181 | enum qm_mr_cmode cmode; | ||
182 | #endif | ||
183 | }; | ||
184 | |||
185 | /* MC (Management Command) command */ | ||
186 | /* "Query FQ" */ | ||
187 | struct qm_mcc_queryfq { | ||
188 | u8 _ncw_verb; | ||
189 | u8 __reserved1[3]; | ||
190 | u32 fqid; /* 24-bit */ | ||
191 | u8 __reserved2[56]; | ||
192 | } __packed; | ||
193 | /* "Alter FQ State Commands " */ | ||
194 | struct qm_mcc_alterfq { | ||
195 | u8 _ncw_verb; | ||
196 | u8 __reserved1[3]; | ||
197 | u32 fqid; /* 24-bit */ | ||
198 | u8 __reserved2; | ||
199 | u8 count; /* number of consecutive FQID */ | ||
200 | u8 __reserved3[10]; | ||
201 | u32 context_b; /* frame queue context b */ | ||
202 | u8 __reserved4[40]; | ||
203 | } __packed; | ||
204 | |||
205 | /* "Query CGR" */ | ||
206 | struct qm_mcc_querycgr { | ||
207 | u8 _ncw_verb; | ||
208 | u8 __reserved1[30]; | ||
209 | u8 cgid; | ||
210 | u8 __reserved2[32]; | ||
211 | }; | ||
212 | |||
213 | struct qm_mcc_querywq { | ||
214 | u8 _ncw_verb; | ||
215 | u8 __reserved; | ||
216 | /* select channel if verb != QUERYWQ_DEDICATED */ | ||
217 | u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ | ||
218 | u8 __reserved2[60]; | ||
219 | } __packed; | ||
220 | |||
221 | #define QM_MCC_VERB_VBIT 0x80 | ||
222 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ | ||
223 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 | ||
224 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 | ||
225 | #define QM_MCC_VERB_QUERYFQ 0x44 | ||
226 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ | ||
227 | #define QM_MCC_VERB_QUERYWQ 0x46 | ||
228 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 | ||
229 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ | ||
230 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ | ||
231 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ | ||
232 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ | ||
233 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ | ||
234 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ | ||
235 | #define QM_MCC_VERB_INITCGR 0x50 | ||
236 | #define QM_MCC_VERB_MODIFYCGR 0x51 | ||
237 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 | ||
238 | #define QM_MCC_VERB_QUERYCGR 0x58 | ||
239 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 | ||
240 | union qm_mc_command { | ||
241 | struct { | ||
242 | u8 _ncw_verb; /* writes to this are non-coherent */ | ||
243 | u8 __reserved[63]; | ||
244 | }; | ||
245 | struct qm_mcc_initfq initfq; | ||
246 | struct qm_mcc_queryfq queryfq; | ||
247 | struct qm_mcc_alterfq alterfq; | ||
248 | struct qm_mcc_initcgr initcgr; | ||
249 | struct qm_mcc_querycgr querycgr; | ||
250 | struct qm_mcc_querywq querywq; | ||
251 | struct qm_mcc_queryfq_np queryfq_np; | ||
252 | }; | ||
253 | |||
254 | /* MC (Management Command) result */ | ||
255 | /* "Query FQ" */ | ||
256 | struct qm_mcr_queryfq { | ||
257 | u8 verb; | ||
258 | u8 result; | ||
259 | u8 __reserved1[8]; | ||
260 | struct qm_fqd fqd; /* the FQD fields are here */ | ||
261 | u8 __reserved2[30]; | ||
262 | } __packed; | ||
263 | |||
264 | /* "Alter FQ State Commands" */ | ||
265 | struct qm_mcr_alterfq { | ||
266 | u8 verb; | ||
267 | u8 result; | ||
268 | u8 fqs; /* Frame Queue Status */ | ||
269 | u8 __reserved1[61]; | ||
270 | }; | ||
271 | #define QM_MCR_VERB_RRID 0x80 | ||
272 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK | ||
273 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED | ||
274 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED | ||
275 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ | ||
276 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP | ||
277 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ | ||
278 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED | ||
279 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED | ||
280 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE | ||
281 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE | ||
282 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS | ||
283 | #define QM_MCR_RESULT_NULL 0x00 | ||
284 | #define QM_MCR_RESULT_OK 0xf0 | ||
285 | #define QM_MCR_RESULT_ERR_FQID 0xf1 | ||
286 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 | ||
287 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ | ||
288 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 | ||
289 | #define QM_MCR_RESULT_PENDING 0xf8 | ||
290 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff | ||
291 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ | ||
292 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ | ||
293 | #define QM_MCR_TIMEOUT 10000 /* us */ | ||
294 | union qm_mc_result { | ||
295 | struct { | ||
296 | u8 verb; | ||
297 | u8 result; | ||
298 | u8 __reserved1[62]; | ||
299 | }; | ||
300 | struct qm_mcr_queryfq queryfq; | ||
301 | struct qm_mcr_alterfq alterfq; | ||
302 | struct qm_mcr_querycgr querycgr; | ||
303 | struct qm_mcr_querycongestion querycongestion; | ||
304 | struct qm_mcr_querywq querywq; | ||
305 | struct qm_mcr_queryfq_np queryfq_np; | ||
306 | }; | ||
307 | |||
308 | struct qm_mc { | ||
309 | union qm_mc_command *cr; | ||
310 | union qm_mc_result *rr; | ||
311 | u8 rridx, vbit; | ||
312 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
313 | enum { | ||
314 | /* Can be _mc_start()ed */ | ||
315 | qman_mc_idle, | ||
316 | /* Can be _mc_commit()ed or _mc_abort()ed */ | ||
317 | qman_mc_user, | ||
318 | /* Can only be _mc_retry()ed */ | ||
319 | qman_mc_hw | ||
320 | } state; | ||
321 | #endif | ||
322 | }; | ||
323 | |||
324 | struct qm_addr { | ||
325 | void __iomem *ce; /* cache-enabled */ | ||
326 | void __iomem *ci; /* cache-inhibited */ | ||
327 | }; | ||
328 | |||
329 | struct qm_portal { | ||
330 | /* | ||
331 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to | ||
332 | * and including 'mc' fits within a cacheline (yay!). The 'config' part | ||
333 | * is setup-only, so isn't a cause for a concern. In other words, don't | ||
334 | * rearrange this structure on a whim, there be dragons ... | ||
335 | */ | ||
336 | struct qm_addr addr; | ||
337 | struct qm_eqcr eqcr; | ||
338 | struct qm_dqrr dqrr; | ||
339 | struct qm_mr mr; | ||
340 | struct qm_mc mc; | ||
341 | } ____cacheline_aligned; | ||
342 | |||
343 | /* Cache-inhibited register access. */ | ||
344 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | ||
345 | { | ||
346 | return __raw_readl(p->addr.ci + offset); | ||
347 | } | ||
348 | |||
349 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | ||
350 | { | ||
351 | __raw_writel(val, p->addr.ci + offset); | ||
352 | } | ||
353 | |||
354 | /* Cache Enabled Portal Access */ | ||
355 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) | ||
356 | { | ||
357 | dpaa_invalidate(p->addr.ce + offset); | ||
358 | } | ||
359 | |||
360 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | ||
361 | { | ||
362 | dpaa_touch_ro(p->addr.ce + offset); | ||
363 | } | ||
364 | |||
365 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | ||
366 | { | ||
367 | return __raw_readl(p->addr.ce + offset); | ||
368 | } | ||
369 | |||
370 | /* --- EQCR API --- */ | ||
371 | |||
372 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) | ||
373 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) | ||
374 | |||
375 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ | ||
376 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) | ||
377 | { | ||
378 | uintptr_t addr = (uintptr_t)p; | ||
379 | |||
380 | addr &= ~EQCR_CARRY; | ||
381 | |||
382 | return (struct qm_eqcr_entry *)addr; | ||
383 | } | ||
384 | |||
385 | /* Bit-wise logic to convert a ring pointer to a ring index */ | ||
386 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) | ||
387 | { | ||
388 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); | ||
389 | } | ||
390 | |||
391 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ | ||
392 | static inline void eqcr_inc(struct qm_eqcr *eqcr) | ||
393 | { | ||
394 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ | ||
395 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; | ||
396 | |||
397 | eqcr->cursor = eqcr_carryclear(partial); | ||
398 | if (partial != eqcr->cursor) | ||
399 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; | ||
400 | } | ||
401 | |||
402 | static inline int qm_eqcr_init(struct qm_portal *portal, | ||
403 | enum qm_eqcr_pmode pmode, | ||
404 | unsigned int eq_stash_thresh, | ||
405 | int eq_stash_prio) | ||
406 | { | ||
407 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
408 | u32 cfg; | ||
409 | u8 pi; | ||
410 | |||
411 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; | ||
412 | eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | ||
413 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | ||
414 | pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | ||
415 | eqcr->cursor = eqcr->ring + pi; | ||
416 | eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? | ||
417 | QM_EQCR_VERB_VBIT : 0; | ||
418 | eqcr->available = QM_EQCR_SIZE - 1 - | ||
419 | dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); | ||
420 | eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); | ||
421 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
422 | eqcr->busy = 0; | ||
423 | eqcr->pmode = pmode; | ||
424 | #endif | ||
425 | cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | | ||
426 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ | ||
427 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ | ||
428 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ | ||
429 | qm_out(portal, QM_REG_CFG, cfg); | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) | ||
434 | { | ||
435 | return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; | ||
436 | } | ||
437 | |||
438 | static inline void qm_eqcr_finish(struct qm_portal *portal) | ||
439 | { | ||
440 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
441 | u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); | ||
442 | u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); | ||
443 | |||
444 | DPAA_ASSERT(!eqcr->busy); | ||
445 | if (pi != eqcr_ptr2idx(eqcr->cursor)) | ||
446 | pr_crit("losing uncommited EQCR entries\n"); | ||
447 | if (ci != eqcr->ci) | ||
448 | pr_crit("missing existing EQCR completions\n"); | ||
449 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) | ||
450 | pr_crit("EQCR destroyed unquiesced\n"); | ||
451 | } | ||
452 | |||
453 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal | ||
454 | *portal) | ||
455 | { | ||
456 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
457 | |||
458 | DPAA_ASSERT(!eqcr->busy); | ||
459 | if (!eqcr->available) | ||
460 | return NULL; | ||
461 | |||
462 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
463 | eqcr->busy = 1; | ||
464 | #endif | ||
465 | dpaa_zero(eqcr->cursor); | ||
466 | return eqcr->cursor; | ||
467 | } | ||
468 | |||
469 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal | ||
470 | *portal) | ||
471 | { | ||
472 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
473 | u8 diff, old_ci; | ||
474 | |||
475 | DPAA_ASSERT(!eqcr->busy); | ||
476 | if (!eqcr->available) { | ||
477 | old_ci = eqcr->ci; | ||
478 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & | ||
479 | (QM_EQCR_SIZE - 1); | ||
480 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | ||
481 | eqcr->available += diff; | ||
482 | if (!diff) | ||
483 | return NULL; | ||
484 | } | ||
485 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
486 | eqcr->busy = 1; | ||
487 | #endif | ||
488 | dpaa_zero(eqcr->cursor); | ||
489 | return eqcr->cursor; | ||
490 | } | ||
491 | |||
492 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) | ||
493 | { | ||
494 | DPAA_ASSERT(eqcr->busy); | ||
495 | DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); | ||
496 | DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); | ||
497 | DPAA_ASSERT(eqcr->available >= 1); | ||
498 | } | ||
499 | |||
500 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) | ||
501 | { | ||
502 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
503 | struct qm_eqcr_entry *eqcursor; | ||
504 | |||
505 | eqcr_commit_checks(eqcr); | ||
506 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); | ||
507 | dma_wmb(); | ||
508 | eqcursor = eqcr->cursor; | ||
509 | eqcursor->_ncw_verb = myverb | eqcr->vbit; | ||
510 | dpaa_flush(eqcursor); | ||
511 | eqcr_inc(eqcr); | ||
512 | eqcr->available--; | ||
513 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
514 | eqcr->busy = 0; | ||
515 | #endif | ||
516 | } | ||
517 | |||
518 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) | ||
519 | { | ||
520 | qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); | ||
521 | } | ||
522 | |||
523 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) | ||
524 | { | ||
525 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
526 | u8 diff, old_ci = eqcr->ci; | ||
527 | |||
528 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); | ||
529 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); | ||
530 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); | ||
531 | eqcr->available += diff; | ||
532 | return diff; | ||
533 | } | ||
534 | |||
535 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) | ||
536 | { | ||
537 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
538 | |||
539 | eqcr->ithresh = ithresh; | ||
540 | qm_out(portal, QM_REG_EQCR_ITR, ithresh); | ||
541 | } | ||
542 | |||
543 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) | ||
544 | { | ||
545 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
546 | |||
547 | return eqcr->available; | ||
548 | } | ||
549 | |||
550 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) | ||
551 | { | ||
552 | struct qm_eqcr *eqcr = &portal->eqcr; | ||
553 | |||
554 | return QM_EQCR_SIZE - 1 - eqcr->available; | ||
555 | } | ||
556 | |||
557 | /* --- DQRR API --- */ | ||
558 | |||
559 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) | ||
560 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) | ||
561 | |||
562 | static const struct qm_dqrr_entry *dqrr_carryclear( | ||
563 | const struct qm_dqrr_entry *p) | ||
564 | { | ||
565 | uintptr_t addr = (uintptr_t)p; | ||
566 | |||
567 | addr &= ~DQRR_CARRY; | ||
568 | |||
569 | return (const struct qm_dqrr_entry *)addr; | ||
570 | } | ||
571 | |||
572 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) | ||
573 | { | ||
574 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); | ||
575 | } | ||
576 | |||
577 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) | ||
578 | { | ||
579 | return dqrr_carryclear(e + 1); | ||
580 | } | ||
581 | |||
582 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) | ||
583 | { | ||
584 | qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | | ||
585 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); | ||
586 | } | ||
587 | |||
588 | static inline int qm_dqrr_init(struct qm_portal *portal, | ||
589 | const struct qm_portal_config *config, | ||
590 | enum qm_dqrr_dmode dmode, | ||
591 | enum qm_dqrr_pmode pmode, | ||
592 | enum qm_dqrr_cmode cmode, u8 max_fill) | ||
593 | { | ||
594 | struct qm_dqrr *dqrr = &portal->dqrr; | ||
595 | u32 cfg; | ||
596 | |||
597 | /* Make sure the DQRR will be idle when we enable */ | ||
598 | qm_out(portal, QM_REG_DQRR_SDQCR, 0); | ||
599 | qm_out(portal, QM_REG_DQRR_VDQCR, 0); | ||
600 | qm_out(portal, QM_REG_DQRR_PDQCR, 0); | ||
601 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; | ||
602 | dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); | ||
603 | dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); | ||
604 | dqrr->cursor = dqrr->ring + dqrr->ci; | ||
605 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); | ||
606 | dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? | ||
607 | QM_DQRR_VERB_VBIT : 0; | ||
608 | dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); | ||
609 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
610 | dqrr->dmode = dmode; | ||
611 | dqrr->pmode = pmode; | ||
612 | dqrr->cmode = cmode; | ||
613 | #endif | ||
614 | /* Invalidate every ring entry before beginning */ | ||
615 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) | ||
616 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); | ||
617 | cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | | ||
618 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ | ||
619 | ((dmode & 1) << 18) | /* DP */ | ||
620 | ((cmode & 3) << 16) | /* DCM */ | ||
621 | 0xa0 | /* RE+SE */ | ||
622 | (0 ? 0x40 : 0) | /* Ignore RP */ | ||
623 | (0 ? 0x10 : 0); /* Ignore SP */ | ||
624 | qm_out(portal, QM_REG_CFG, cfg); | ||
625 | qm_dqrr_set_maxfill(portal, max_fill); | ||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | static inline void qm_dqrr_finish(struct qm_portal *portal) | ||
630 | { | ||
631 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
632 | struct qm_dqrr *dqrr = &portal->dqrr; | ||
633 | |||
634 | if (dqrr->cmode != qm_dqrr_cdc && | ||
635 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) | ||
636 | pr_crit("Ignoring completed DQRR entries\n"); | ||
637 | #endif | ||
638 | } | ||
639 | |||
640 | static inline const struct qm_dqrr_entry *qm_dqrr_current( | ||
641 | struct qm_portal *portal) | ||
642 | { | ||
643 | struct qm_dqrr *dqrr = &portal->dqrr; | ||
644 | |||
645 | if (!dqrr->fill) | ||
646 | return NULL; | ||
647 | return dqrr->cursor; | ||
648 | } | ||
649 | |||
650 | static inline u8 qm_dqrr_next(struct qm_portal *portal) | ||
651 | { | ||
652 | struct qm_dqrr *dqrr = &portal->dqrr; | ||
653 | |||
654 | DPAA_ASSERT(dqrr->fill); | ||
655 | dqrr->cursor = dqrr_inc(dqrr->cursor); | ||
656 | return --dqrr->fill; | ||
657 | } | ||
658 | |||
659 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) | ||
660 | { | ||
661 | struct qm_dqrr *dqrr = &portal->dqrr; | ||
662 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); | ||
663 | |||
664 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); | ||
665 | #ifndef CONFIG_FSL_PAMU | ||
666 | /* | ||
667 | * If PAMU is not available we need to invalidate the cache. | ||
668 | * When PAMU is available the cache is updated by stash | ||
669 | */ | ||
670 | dpaa_invalidate_touch_ro(res); | ||
671 | #endif | ||
672 | /* | ||
673 | * when accessing 'verb', use __raw_readb() to ensure that compiler | ||
674 | * inlining doesn't try to optimise out "excess reads". | ||
675 | */ | ||
676 | if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { | ||
677 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); | ||
678 | if (!dqrr->pi) | ||
679 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; | ||
680 | dqrr->fill++; | ||
681 | } | ||
682 | } | ||
683 | |||
684 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, | ||
685 | const struct qm_dqrr_entry *dq, | ||
686 | int park) | ||
687 | { | ||
688 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | ||
689 | int idx = dqrr_ptr2idx(dq); | ||
690 | |||
691 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | ||
692 | DPAA_ASSERT((dqrr->ring + idx) == dq); | ||
693 | DPAA_ASSERT(idx < QM_DQRR_SIZE); | ||
694 | qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ | ||
695 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ | ||
696 | idx); /* DQRR_DCAP::DCAP_CI */ | ||
697 | } | ||
698 | |||
699 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) | ||
700 | { | ||
701 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; | ||
702 | |||
703 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); | ||
704 | qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ | ||
705 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ | ||
706 | } | ||
707 | |||
708 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) | ||
709 | { | ||
710 | qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); | ||
711 | } | ||
712 | |||
713 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) | ||
714 | { | ||
715 | qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); | ||
716 | } | ||
717 | |||
718 | static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) | ||
719 | { | ||
720 | qm_out(portal, QM_REG_DQRR_ITR, ithresh); | ||
721 | } | ||
722 | |||
723 | /* --- MR API --- */ | ||
724 | |||
725 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) | ||
726 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) | ||
727 | |||
728 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) | ||
729 | { | ||
730 | uintptr_t addr = (uintptr_t)p; | ||
731 | |||
732 | addr &= ~MR_CARRY; | ||
733 | |||
734 | return (union qm_mr_entry *)addr; | ||
735 | } | ||
736 | |||
737 | static inline int mr_ptr2idx(const union qm_mr_entry *e) | ||
738 | { | ||
739 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); | ||
740 | } | ||
741 | |||
742 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) | ||
743 | { | ||
744 | return mr_carryclear(e + 1); | ||
745 | } | ||
746 | |||
747 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, | ||
748 | enum qm_mr_cmode cmode) | ||
749 | { | ||
750 | struct qm_mr *mr = &portal->mr; | ||
751 | u32 cfg; | ||
752 | |||
753 | mr->ring = portal->addr.ce + QM_CL_MR; | ||
754 | mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); | ||
755 | mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); | ||
756 | mr->cursor = mr->ring + mr->ci; | ||
757 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); | ||
758 | mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) | ||
759 | ? QM_MR_VERB_VBIT : 0; | ||
760 | mr->ithresh = qm_in(portal, QM_REG_MR_ITR); | ||
761 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
762 | mr->pmode = pmode; | ||
763 | mr->cmode = cmode; | ||
764 | #endif | ||
765 | cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | | ||
766 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ | ||
767 | qm_out(portal, QM_REG_CFG, cfg); | ||
768 | return 0; | ||
769 | } | ||
770 | |||
771 | static inline void qm_mr_finish(struct qm_portal *portal) | ||
772 | { | ||
773 | struct qm_mr *mr = &portal->mr; | ||
774 | |||
775 | if (mr->ci != mr_ptr2idx(mr->cursor)) | ||
776 | pr_crit("Ignoring completed MR entries\n"); | ||
777 | } | ||
778 | |||
779 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) | ||
780 | { | ||
781 | struct qm_mr *mr = &portal->mr; | ||
782 | |||
783 | if (!mr->fill) | ||
784 | return NULL; | ||
785 | return mr->cursor; | ||
786 | } | ||
787 | |||
788 | static inline int qm_mr_next(struct qm_portal *portal) | ||
789 | { | ||
790 | struct qm_mr *mr = &portal->mr; | ||
791 | |||
792 | DPAA_ASSERT(mr->fill); | ||
793 | mr->cursor = mr_inc(mr->cursor); | ||
794 | return --mr->fill; | ||
795 | } | ||
796 | |||
797 | static inline void qm_mr_pvb_update(struct qm_portal *portal) | ||
798 | { | ||
799 | struct qm_mr *mr = &portal->mr; | ||
800 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); | ||
801 | |||
802 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); | ||
803 | /* | ||
804 | * when accessing 'verb', use __raw_readb() to ensure that compiler | ||
805 | * inlining doesn't try to optimise out "excess reads". | ||
806 | */ | ||
807 | if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { | ||
808 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); | ||
809 | if (!mr->pi) | ||
810 | mr->vbit ^= QM_MR_VERB_VBIT; | ||
811 | mr->fill++; | ||
812 | res = mr_inc(res); | ||
813 | } | ||
814 | dpaa_invalidate_touch_ro(res); | ||
815 | } | ||
816 | |||
817 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) | ||
818 | { | ||
819 | struct qm_mr *mr = &portal->mr; | ||
820 | |||
821 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | ||
822 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); | ||
823 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | ||
824 | } | ||
825 | |||
826 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) | ||
827 | { | ||
828 | struct qm_mr *mr = &portal->mr; | ||
829 | |||
830 | DPAA_ASSERT(mr->cmode == qm_mr_cci); | ||
831 | mr->ci = mr_ptr2idx(mr->cursor); | ||
832 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); | ||
833 | } | ||
834 | |||
835 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) | ||
836 | { | ||
837 | qm_out(portal, QM_REG_MR_ITR, ithresh); | ||
838 | } | ||
839 | |||
840 | /* --- Management command API --- */ | ||
841 | |||
842 | static inline int qm_mc_init(struct qm_portal *portal) | ||
843 | { | ||
844 | struct qm_mc *mc = &portal->mc; | ||
845 | |||
846 | mc->cr = portal->addr.ce + QM_CL_CR; | ||
847 | mc->rr = portal->addr.ce + QM_CL_RR0; | ||
848 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) | ||
849 | ? 0 : 1; | ||
850 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; | ||
851 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
852 | mc->state = qman_mc_idle; | ||
853 | #endif | ||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | static inline void qm_mc_finish(struct qm_portal *portal) | ||
858 | { | ||
859 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
860 | struct qm_mc *mc = &portal->mc; | ||
861 | |||
862 | DPAA_ASSERT(mc->state == qman_mc_idle); | ||
863 | if (mc->state != qman_mc_idle) | ||
864 | pr_crit("Losing incomplete MC command\n"); | ||
865 | #endif | ||
866 | } | ||
867 | |||
868 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) | ||
869 | { | ||
870 | struct qm_mc *mc = &portal->mc; | ||
871 | |||
872 | DPAA_ASSERT(mc->state == qman_mc_idle); | ||
873 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
874 | mc->state = qman_mc_user; | ||
875 | #endif | ||
876 | dpaa_zero(mc->cr); | ||
877 | return mc->cr; | ||
878 | } | ||
879 | |||
880 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) | ||
881 | { | ||
882 | struct qm_mc *mc = &portal->mc; | ||
883 | union qm_mc_result *rr = mc->rr + mc->rridx; | ||
884 | |||
885 | DPAA_ASSERT(mc->state == qman_mc_user); | ||
886 | dma_wmb(); | ||
887 | mc->cr->_ncw_verb = myverb | mc->vbit; | ||
888 | dpaa_flush(mc->cr); | ||
889 | dpaa_invalidate_touch_ro(rr); | ||
890 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
891 | mc->state = qman_mc_hw; | ||
892 | #endif | ||
893 | } | ||
894 | |||
895 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) | ||
896 | { | ||
897 | struct qm_mc *mc = &portal->mc; | ||
898 | union qm_mc_result *rr = mc->rr + mc->rridx; | ||
899 | |||
900 | DPAA_ASSERT(mc->state == qman_mc_hw); | ||
901 | /* | ||
902 | * The inactive response register's verb byte always returns zero until | ||
903 | * its command is submitted and completed. This includes the valid-bit, | ||
904 | * in case you were wondering... | ||
905 | */ | ||
906 | if (!__raw_readb(&rr->verb)) { | ||
907 | dpaa_invalidate_touch_ro(rr); | ||
908 | return NULL; | ||
909 | } | ||
910 | mc->rridx ^= 1; | ||
911 | mc->vbit ^= QM_MCC_VERB_VBIT; | ||
912 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
913 | mc->state = qman_mc_idle; | ||
914 | #endif | ||
915 | return rr; | ||
916 | } | ||
917 | |||
918 | static inline int qm_mc_result_timeout(struct qm_portal *portal, | ||
919 | union qm_mc_result **mcr) | ||
920 | { | ||
921 | int timeout = QM_MCR_TIMEOUT; | ||
922 | |||
923 | do { | ||
924 | *mcr = qm_mc_result(portal); | ||
925 | if (*mcr) | ||
926 | break; | ||
927 | udelay(1); | ||
928 | } while (--timeout); | ||
929 | |||
930 | return timeout; | ||
931 | } | ||
932 | |||
933 | static inline void fq_set(struct qman_fq *fq, u32 mask) | ||
934 | { | ||
935 | set_bits(mask, &fq->flags); | ||
936 | } | ||
937 | |||
938 | static inline void fq_clear(struct qman_fq *fq, u32 mask) | ||
939 | { | ||
940 | clear_bits(mask, &fq->flags); | ||
941 | } | ||
942 | |||
943 | static inline int fq_isset(struct qman_fq *fq, u32 mask) | ||
944 | { | ||
945 | return fq->flags & mask; | ||
946 | } | ||
947 | |||
948 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) | ||
949 | { | ||
950 | return !(fq->flags & mask); | ||
951 | } | ||
952 | |||
953 | struct qman_portal { | ||
954 | struct qm_portal p; | ||
955 | /* PORTAL_BITS_*** - dynamic, strictly internal */ | ||
956 | unsigned long bits; | ||
957 | /* interrupt sources processed by portal_isr(), configurable */ | ||
958 | unsigned long irq_sources; | ||
959 | u32 use_eqcr_ci_stashing; | ||
960 | /* only 1 volatile dequeue at a time */ | ||
961 | struct qman_fq *vdqcr_owned; | ||
962 | u32 sdqcr; | ||
963 | /* probing time config params for cpu-affine portals */ | ||
964 | const struct qm_portal_config *config; | ||
965 | /* needed for providing a non-NULL device to dma_map_***() */ | ||
966 | struct platform_device *pdev; | ||
967 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ | ||
968 | struct qman_cgrs *cgrs; | ||
969 | /* linked-list of CSCN handlers. */ | ||
970 | struct list_head cgr_cbs; | ||
971 | /* list lock */ | ||
972 | spinlock_t cgr_lock; | ||
973 | struct work_struct congestion_work; | ||
974 | struct work_struct mr_work; | ||
975 | char irqname[MAX_IRQNAME]; | ||
976 | }; | ||
977 | |||
978 | static cpumask_t affine_mask; | ||
979 | static DEFINE_SPINLOCK(affine_mask_lock); | ||
980 | static u16 affine_channels[NR_CPUS]; | ||
981 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); | ||
982 | struct qman_portal *affine_portals[NR_CPUS]; | ||
983 | |||
984 | static inline struct qman_portal *get_affine_portal(void) | ||
985 | { | ||
986 | return &get_cpu_var(qman_affine_portal); | ||
987 | } | ||
988 | |||
989 | static inline void put_affine_portal(void) | ||
990 | { | ||
991 | put_cpu_var(qman_affine_portal); | ||
992 | } | ||
993 | |||
994 | static struct workqueue_struct *qm_portal_wq; | ||
995 | |||
996 | int qman_wq_alloc(void) | ||
997 | { | ||
998 | qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); | ||
999 | if (!qm_portal_wq) | ||
1000 | return -ENOMEM; | ||
1001 | return 0; | ||
1002 | } | ||
1003 | |||
1004 | /* | ||
1005 | * This is what everything can wait on, even if it migrates to a different cpu | ||
1006 | * to the one whose affine portal it is waiting on. | ||
1007 | */ | ||
1008 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); | ||
1009 | |||
1010 | static struct qman_fq **fq_table; | ||
1011 | static u32 num_fqids; | ||
1012 | |||
1013 | int qman_alloc_fq_table(u32 _num_fqids) | ||
1014 | { | ||
1015 | num_fqids = _num_fqids; | ||
1016 | |||
1017 | fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); | ||
1018 | if (!fq_table) | ||
1019 | return -ENOMEM; | ||
1020 | |||
1021 | pr_debug("Allocated fq lookup table at %p, entry count %u\n", | ||
1022 | fq_table, num_fqids * 2); | ||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static struct qman_fq *idx_to_fq(u32 idx) | ||
1027 | { | ||
1028 | struct qman_fq *fq; | ||
1029 | |||
1030 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
1031 | if (WARN_ON(idx >= num_fqids * 2)) | ||
1032 | return NULL; | ||
1033 | #endif | ||
1034 | fq = fq_table[idx]; | ||
1035 | DPAA_ASSERT(!fq || idx == fq->idx); | ||
1036 | |||
1037 | return fq; | ||
1038 | } | ||
1039 | |||
1040 | /* | ||
1041 | * Only returns full-service fq objects, not enqueue-only | ||
1042 | * references (QMAN_FQ_FLAG_NO_MODIFY). | ||
1043 | */ | ||
1044 | static struct qman_fq *fqid_to_fq(u32 fqid) | ||
1045 | { | ||
1046 | return idx_to_fq(fqid * 2); | ||
1047 | } | ||
1048 | |||
1049 | static struct qman_fq *tag_to_fq(u32 tag) | ||
1050 | { | ||
1051 | #if BITS_PER_LONG == 64 | ||
1052 | return idx_to_fq(tag); | ||
1053 | #else | ||
1054 | return (struct qman_fq *)tag; | ||
1055 | #endif | ||
1056 | } | ||
1057 | |||
1058 | static u32 fq_to_tag(struct qman_fq *fq) | ||
1059 | { | ||
1060 | #if BITS_PER_LONG == 64 | ||
1061 | return fq->idx; | ||
1062 | #else | ||
1063 | return (u32)fq; | ||
1064 | #endif | ||
1065 | } | ||
1066 | |||
1067 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); | ||
1068 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | ||
1069 | unsigned int poll_limit); | ||
1070 | static void qm_congestion_task(struct work_struct *work); | ||
1071 | static void qm_mr_process_task(struct work_struct *work); | ||
1072 | |||
1073 | static irqreturn_t portal_isr(int irq, void *ptr) | ||
1074 | { | ||
1075 | struct qman_portal *p = ptr; | ||
1076 | |||
1077 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; | ||
1078 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | ||
1079 | |||
1080 | if (unlikely(!is)) | ||
1081 | return IRQ_NONE; | ||
1082 | |||
1083 | /* DQRR-handling if it's interrupt-driven */ | ||
1084 | if (is & QM_PIRQ_DQRI) | ||
1085 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | ||
1086 | /* Handling of anything else that's interrupt-driven */ | ||
1087 | clear |= __poll_portal_slow(p, is); | ||
1088 | qm_out(&p->p, QM_REG_ISR, clear); | ||
1089 | return IRQ_HANDLED; | ||
1090 | } | ||
1091 | |||
1092 | static int drain_mr_fqrni(struct qm_portal *p) | ||
1093 | { | ||
1094 | const union qm_mr_entry *msg; | ||
1095 | loop: | ||
1096 | msg = qm_mr_current(p); | ||
1097 | if (!msg) { | ||
1098 | /* | ||
1099 | * if MR was full and h/w had other FQRNI entries to produce, we | ||
1100 | * need to allow it time to produce those entries once the | ||
1101 | * existing entries are consumed. A worst-case situation | ||
1102 | * (fully-loaded system) means h/w sequencers may have to do 3-4 | ||
1103 | * other things before servicing the portal's MR pump, each of | ||
1104 | * which (if slow) may take ~50 qman cycles (which is ~200 | ||
1105 | * processor cycles). So rounding up and then multiplying this | ||
1106 | * worst-case estimate by a factor of 10, just to be | ||
1107 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume | ||
1108 | * one entry at a time, so h/w has an opportunity to produce new | ||
1109 | * entries well before the ring has been fully consumed, so | ||
1110 | * we're being *really* paranoid here. | ||
1111 | */ | ||
1112 | u64 now, then = jiffies; | ||
1113 | |||
1114 | do { | ||
1115 | now = jiffies; | ||
1116 | } while ((then + 10000) > now); | ||
1117 | msg = qm_mr_current(p); | ||
1118 | if (!msg) | ||
1119 | return 0; | ||
1120 | } | ||
1121 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { | ||
1122 | /* We aren't draining anything but FQRNIs */ | ||
1123 | pr_err("Found verb 0x%x in MR\n", msg->verb); | ||
1124 | return -1; | ||
1125 | } | ||
1126 | qm_mr_next(p); | ||
1127 | qm_mr_cci_consume(p, 1); | ||
1128 | goto loop; | ||
1129 | } | ||
1130 | |||
1131 | static int qman_create_portal(struct qman_portal *portal, | ||
1132 | const struct qm_portal_config *c, | ||
1133 | const struct qman_cgrs *cgrs) | ||
1134 | { | ||
1135 | struct qm_portal *p; | ||
1136 | char buf[16]; | ||
1137 | int ret; | ||
1138 | u32 isdr; | ||
1139 | |||
1140 | p = &portal->p; | ||
1141 | |||
1142 | #ifdef CONFIG_FSL_PAMU | ||
1143 | /* PAMU is required for stashing */ | ||
1144 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); | ||
1145 | #else | ||
1146 | portal->use_eqcr_ci_stashing = 0; | ||
1147 | #endif | ||
1148 | /* | ||
1149 | * prep the low-level portal struct with the mapped addresses from the | ||
1150 | * config, everything that follows depends on it and "config" is more | ||
1151 | * for (de)reference | ||
1152 | */ | ||
1153 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; | ||
1154 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; | ||
1155 | /* | ||
1156 | * If CI-stashing is used, the current defaults use a threshold of 3, | ||
1157 | * and stash with high-than-DQRR priority. | ||
1158 | */ | ||
1159 | if (qm_eqcr_init(p, qm_eqcr_pvb, | ||
1160 | portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { | ||
1161 | dev_err(c->dev, "EQCR initialisation failed\n"); | ||
1162 | goto fail_eqcr; | ||
1163 | } | ||
1164 | if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, | ||
1165 | qm_dqrr_cdc, DQRR_MAXFILL)) { | ||
1166 | dev_err(c->dev, "DQRR initialisation failed\n"); | ||
1167 | goto fail_dqrr; | ||
1168 | } | ||
1169 | if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { | ||
1170 | dev_err(c->dev, "MR initialisation failed\n"); | ||
1171 | goto fail_mr; | ||
1172 | } | ||
1173 | if (qm_mc_init(p)) { | ||
1174 | dev_err(c->dev, "MC initialisation failed\n"); | ||
1175 | goto fail_mc; | ||
1176 | } | ||
1177 | /* static interrupt-gating controls */ | ||
1178 | qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); | ||
1179 | qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); | ||
1180 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); | ||
1181 | portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); | ||
1182 | if (!portal->cgrs) | ||
1183 | goto fail_cgrs; | ||
1184 | /* initial snapshot is no-depletion */ | ||
1185 | qman_cgrs_init(&portal->cgrs[1]); | ||
1186 | if (cgrs) | ||
1187 | portal->cgrs[0] = *cgrs; | ||
1188 | else | ||
1189 | /* if the given mask is NULL, assume all CGRs can be seen */ | ||
1190 | qman_cgrs_fill(&portal->cgrs[0]); | ||
1191 | INIT_LIST_HEAD(&portal->cgr_cbs); | ||
1192 | spin_lock_init(&portal->cgr_lock); | ||
1193 | INIT_WORK(&portal->congestion_work, qm_congestion_task); | ||
1194 | INIT_WORK(&portal->mr_work, qm_mr_process_task); | ||
1195 | portal->bits = 0; | ||
1196 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | | ||
1197 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | | ||
1198 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; | ||
1199 | sprintf(buf, "qportal-%d", c->channel); | ||
1200 | portal->pdev = platform_device_alloc(buf, -1); | ||
1201 | if (!portal->pdev) | ||
1202 | goto fail_devalloc; | ||
1203 | if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) | ||
1204 | goto fail_devadd; | ||
1205 | ret = platform_device_add(portal->pdev); | ||
1206 | if (ret) | ||
1207 | goto fail_devadd; | ||
1208 | isdr = 0xffffffff; | ||
1209 | qm_out(p, QM_REG_ISDR, isdr); | ||
1210 | portal->irq_sources = 0; | ||
1211 | qm_out(p, QM_REG_IER, 0); | ||
1212 | qm_out(p, QM_REG_ISR, 0xffffffff); | ||
1213 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | ||
1214 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | ||
1215 | dev_err(c->dev, "request_irq() failed\n"); | ||
1216 | goto fail_irq; | ||
1217 | } | ||
1218 | if (c->cpu != -1 && irq_can_set_affinity(c->irq) && | ||
1219 | irq_set_affinity(c->irq, cpumask_of(c->cpu))) { | ||
1220 | dev_err(c->dev, "irq_set_affinity() failed\n"); | ||
1221 | goto fail_affinity; | ||
1222 | } | ||
1223 | |||
1224 | /* Need EQCR to be empty before continuing */ | ||
1225 | isdr &= ~QM_PIRQ_EQCI; | ||
1226 | qm_out(p, QM_REG_ISDR, isdr); | ||
1227 | ret = qm_eqcr_get_fill(p); | ||
1228 | if (ret) { | ||
1229 | dev_err(c->dev, "EQCR unclean\n"); | ||
1230 | goto fail_eqcr_empty; | ||
1231 | } | ||
1232 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); | ||
1233 | qm_out(p, QM_REG_ISDR, isdr); | ||
1234 | if (qm_dqrr_current(p)) { | ||
1235 | dev_err(c->dev, "DQRR unclean\n"); | ||
1236 | qm_dqrr_cdc_consume_n(p, 0xffff); | ||
1237 | } | ||
1238 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { | ||
1239 | /* special handling, drain just in case it's a few FQRNIs */ | ||
1240 | const union qm_mr_entry *e = qm_mr_current(p); | ||
1241 | |||
1242 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", | ||
1243 | e->verb, e->ern.rc, e->ern.fd.addr_lo); | ||
1244 | goto fail_dqrr_mr_empty; | ||
1245 | } | ||
1246 | /* Success */ | ||
1247 | portal->config = c; | ||
1248 | qm_out(p, QM_REG_ISDR, 0); | ||
1249 | qm_out(p, QM_REG_IIR, 0); | ||
1250 | /* Write a sane SDQCR */ | ||
1251 | qm_dqrr_sdqcr_set(p, portal->sdqcr); | ||
1252 | return 0; | ||
1253 | |||
1254 | fail_dqrr_mr_empty: | ||
1255 | fail_eqcr_empty: | ||
1256 | fail_affinity: | ||
1257 | free_irq(c->irq, portal); | ||
1258 | fail_irq: | ||
1259 | platform_device_del(portal->pdev); | ||
1260 | fail_devadd: | ||
1261 | platform_device_put(portal->pdev); | ||
1262 | fail_devalloc: | ||
1263 | kfree(portal->cgrs); | ||
1264 | fail_cgrs: | ||
1265 | qm_mc_finish(p); | ||
1266 | fail_mc: | ||
1267 | qm_mr_finish(p); | ||
1268 | fail_mr: | ||
1269 | qm_dqrr_finish(p); | ||
1270 | fail_dqrr: | ||
1271 | qm_eqcr_finish(p); | ||
1272 | fail_eqcr: | ||
1273 | return -EIO; | ||
1274 | } | ||
1275 | |||
1276 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, | ||
1277 | const struct qman_cgrs *cgrs) | ||
1278 | { | ||
1279 | struct qman_portal *portal; | ||
1280 | int err; | ||
1281 | |||
1282 | portal = &per_cpu(qman_affine_portal, c->cpu); | ||
1283 | err = qman_create_portal(portal, c, cgrs); | ||
1284 | if (err) | ||
1285 | return NULL; | ||
1286 | |||
1287 | spin_lock(&affine_mask_lock); | ||
1288 | cpumask_set_cpu(c->cpu, &affine_mask); | ||
1289 | affine_channels[c->cpu] = c->channel; | ||
1290 | affine_portals[c->cpu] = portal; | ||
1291 | spin_unlock(&affine_mask_lock); | ||
1292 | |||
1293 | return portal; | ||
1294 | } | ||
1295 | |||
1296 | static void qman_destroy_portal(struct qman_portal *qm) | ||
1297 | { | ||
1298 | const struct qm_portal_config *pcfg; | ||
1299 | |||
1300 | /* Stop dequeues on the portal */ | ||
1301 | qm_dqrr_sdqcr_set(&qm->p, 0); | ||
1302 | |||
1303 | /* | ||
1304 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or | ||
1305 | * something related to QM_PIRQ_EQCI, this may need fixing. | ||
1306 | * Also, due to the prefetching model used for CI updates in the enqueue | ||
1307 | * path, this update will only invalidate the CI cacheline *after* | ||
1308 | * working on it, so we need to call this twice to ensure a full update | ||
1309 | * irrespective of where the enqueue processing was at when the teardown | ||
1310 | * began. | ||
1311 | */ | ||
1312 | qm_eqcr_cce_update(&qm->p); | ||
1313 | qm_eqcr_cce_update(&qm->p); | ||
1314 | pcfg = qm->config; | ||
1315 | |||
1316 | free_irq(pcfg->irq, qm); | ||
1317 | |||
1318 | kfree(qm->cgrs); | ||
1319 | qm_mc_finish(&qm->p); | ||
1320 | qm_mr_finish(&qm->p); | ||
1321 | qm_dqrr_finish(&qm->p); | ||
1322 | qm_eqcr_finish(&qm->p); | ||
1323 | |||
1324 | platform_device_del(qm->pdev); | ||
1325 | platform_device_put(qm->pdev); | ||
1326 | |||
1327 | qm->config = NULL; | ||
1328 | } | ||
1329 | |||
1330 | const struct qm_portal_config *qman_destroy_affine_portal(void) | ||
1331 | { | ||
1332 | struct qman_portal *qm = get_affine_portal(); | ||
1333 | const struct qm_portal_config *pcfg; | ||
1334 | int cpu; | ||
1335 | |||
1336 | pcfg = qm->config; | ||
1337 | cpu = pcfg->cpu; | ||
1338 | |||
1339 | qman_destroy_portal(qm); | ||
1340 | |||
1341 | spin_lock(&affine_mask_lock); | ||
1342 | cpumask_clear_cpu(cpu, &affine_mask); | ||
1343 | spin_unlock(&affine_mask_lock); | ||
1344 | put_affine_portal(); | ||
1345 | return pcfg; | ||
1346 | } | ||
1347 | |||
1348 | /* Inline helper to reduce nesting in __poll_portal_slow() */ | ||
1349 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, | ||
1350 | const union qm_mr_entry *msg, u8 verb) | ||
1351 | { | ||
1352 | switch (verb) { | ||
1353 | case QM_MR_VERB_FQRL: | ||
1354 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); | ||
1355 | fq_clear(fq, QMAN_FQ_STATE_ORL); | ||
1356 | break; | ||
1357 | case QM_MR_VERB_FQRN: | ||
1358 | DPAA_ASSERT(fq->state == qman_fq_state_parked || | ||
1359 | fq->state == qman_fq_state_sched); | ||
1360 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); | ||
1361 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); | ||
1362 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) | ||
1363 | fq_set(fq, QMAN_FQ_STATE_NE); | ||
1364 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) | ||
1365 | fq_set(fq, QMAN_FQ_STATE_ORL); | ||
1366 | fq->state = qman_fq_state_retired; | ||
1367 | break; | ||
1368 | case QM_MR_VERB_FQPN: | ||
1369 | DPAA_ASSERT(fq->state == qman_fq_state_sched); | ||
1370 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); | ||
1371 | fq->state = qman_fq_state_parked; | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | static void qm_congestion_task(struct work_struct *work) | ||
1376 | { | ||
1377 | struct qman_portal *p = container_of(work, struct qman_portal, | ||
1378 | congestion_work); | ||
1379 | struct qman_cgrs rr, c; | ||
1380 | union qm_mc_result *mcr; | ||
1381 | struct qman_cgr *cgr; | ||
1382 | |||
1383 | spin_lock(&p->cgr_lock); | ||
1384 | qm_mc_start(&p->p); | ||
1385 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); | ||
1386 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
1387 | spin_unlock(&p->cgr_lock); | ||
1388 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); | ||
1389 | return; | ||
1390 | } | ||
1391 | /* mask out the ones I'm not interested in */ | ||
1392 | qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, | ||
1393 | &p->cgrs[0]); | ||
1394 | /* check previous snapshot for delta, enter/exit congestion */ | ||
1395 | qman_cgrs_xor(&c, &rr, &p->cgrs[1]); | ||
1396 | /* update snapshot */ | ||
1397 | qman_cgrs_cp(&p->cgrs[1], &rr); | ||
1398 | /* Invoke callback */ | ||
1399 | list_for_each_entry(cgr, &p->cgr_cbs, node) | ||
1400 | if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) | ||
1401 | cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); | ||
1402 | spin_unlock(&p->cgr_lock); | ||
1403 | } | ||
1404 | |||
1405 | static void qm_mr_process_task(struct work_struct *work) | ||
1406 | { | ||
1407 | struct qman_portal *p = container_of(work, struct qman_portal, | ||
1408 | mr_work); | ||
1409 | const union qm_mr_entry *msg; | ||
1410 | struct qman_fq *fq; | ||
1411 | u8 verb, num = 0; | ||
1412 | |||
1413 | preempt_disable(); | ||
1414 | |||
1415 | while (1) { | ||
1416 | qm_mr_pvb_update(&p->p); | ||
1417 | msg = qm_mr_current(&p->p); | ||
1418 | if (!msg) | ||
1419 | break; | ||
1420 | |||
1421 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; | ||
1422 | /* The message is a software ERN iff the 0x20 bit is clear */ | ||
1423 | if (verb & 0x20) { | ||
1424 | switch (verb) { | ||
1425 | case QM_MR_VERB_FQRNI: | ||
1426 | /* nada, we drop FQRNIs on the floor */ | ||
1427 | break; | ||
1428 | case QM_MR_VERB_FQRN: | ||
1429 | case QM_MR_VERB_FQRL: | ||
1430 | /* Lookup in the retirement table */ | ||
1431 | fq = fqid_to_fq(msg->fq.fqid); | ||
1432 | if (WARN_ON(!fq)) | ||
1433 | break; | ||
1434 | fq_state_change(p, fq, msg, verb); | ||
1435 | if (fq->cb.fqs) | ||
1436 | fq->cb.fqs(p, fq, msg); | ||
1437 | break; | ||
1438 | case QM_MR_VERB_FQPN: | ||
1439 | /* Parked */ | ||
1440 | fq = tag_to_fq(msg->fq.contextB); | ||
1441 | fq_state_change(p, fq, msg, verb); | ||
1442 | if (fq->cb.fqs) | ||
1443 | fq->cb.fqs(p, fq, msg); | ||
1444 | break; | ||
1445 | case QM_MR_VERB_DC_ERN: | ||
1446 | /* DCP ERN */ | ||
1447 | pr_crit_once("Leaking DCP ERNs!\n"); | ||
1448 | break; | ||
1449 | default: | ||
1450 | pr_crit("Invalid MR verb 0x%02x\n", verb); | ||
1451 | } | ||
1452 | } else { | ||
1453 | /* Its a software ERN */ | ||
1454 | fq = tag_to_fq(msg->ern.tag); | ||
1455 | fq->cb.ern(p, fq, msg); | ||
1456 | } | ||
1457 | num++; | ||
1458 | qm_mr_next(&p->p); | ||
1459 | } | ||
1460 | |||
1461 | qm_mr_cci_consume(&p->p, num); | ||
1462 | preempt_enable(); | ||
1463 | } | ||
1464 | |||
1465 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) | ||
1466 | { | ||
1467 | if (is & QM_PIRQ_CSCI) { | ||
1468 | queue_work_on(smp_processor_id(), qm_portal_wq, | ||
1469 | &p->congestion_work); | ||
1470 | } | ||
1471 | |||
1472 | if (is & QM_PIRQ_EQRI) { | ||
1473 | qm_eqcr_cce_update(&p->p); | ||
1474 | qm_eqcr_set_ithresh(&p->p, 0); | ||
1475 | wake_up(&affine_queue); | ||
1476 | } | ||
1477 | |||
1478 | if (is & QM_PIRQ_MRI) { | ||
1479 | queue_work_on(smp_processor_id(), qm_portal_wq, | ||
1480 | &p->mr_work); | ||
1481 | } | ||
1482 | |||
1483 | return is; | ||
1484 | } | ||
1485 | |||
1486 | /* | ||
1487 | * remove some slowish-path stuff from the "fast path" and make sure it isn't | ||
1488 | * inlined. | ||
1489 | */ | ||
1490 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) | ||
1491 | { | ||
1492 | p->vdqcr_owned = NULL; | ||
1493 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); | ||
1494 | wake_up(&affine_queue); | ||
1495 | } | ||
1496 | |||
1497 | /* | ||
1498 | * The only states that would conflict with other things if they ran at the | ||
1499 | * same time on the same cpu are: | ||
1500 | * | ||
1501 | * (i) setting/clearing vdqcr_owned, and | ||
1502 | * (ii) clearing the NE (Not Empty) flag. | ||
1503 | * | ||
1504 | * Both are safe. Because; | ||
1505 | * | ||
1506 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the | ||
1507 | * vdqcr_owned field (which it does before setting VDQCR), and | ||
1508 | * qman_volatile_dequeue() blocks interrupts and preemption while this is | ||
1509 | * done so that we can't interfere. | ||
1510 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as | ||
1511 | * with (i) that API prevents us from interfering until it's safe. | ||
1512 | * | ||
1513 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far | ||
1514 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett | ||
1515 | * advantage comes from this function not having to "lock" anything at all. | ||
1516 | * | ||
1517 | * Note also that the callbacks are invoked at points which are safe against the | ||
1518 | * above potential conflicts, but that this function itself is not re-entrant | ||
1519 | * (this is because the function tracks one end of each FIFO in the portal and | ||
1520 | * we do *not* want to lock that). So the consequence is that it is safe for | ||
1521 | * user callbacks to call into any QMan API. | ||
1522 | */ | ||
1523 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, | ||
1524 | unsigned int poll_limit) | ||
1525 | { | ||
1526 | const struct qm_dqrr_entry *dq; | ||
1527 | struct qman_fq *fq; | ||
1528 | enum qman_cb_dqrr_result res; | ||
1529 | unsigned int limit = 0; | ||
1530 | |||
1531 | do { | ||
1532 | qm_dqrr_pvb_update(&p->p); | ||
1533 | dq = qm_dqrr_current(&p->p); | ||
1534 | if (!dq) | ||
1535 | break; | ||
1536 | |||
1537 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { | ||
1538 | /* | ||
1539 | * VDQCR: don't trust contextB as the FQ may have | ||
1540 | * been configured for h/w consumption and we're | ||
1541 | * draining it post-retirement. | ||
1542 | */ | ||
1543 | fq = p->vdqcr_owned; | ||
1544 | /* | ||
1545 | * We only set QMAN_FQ_STATE_NE when retiring, so we | ||
1546 | * only need to check for clearing it when doing | ||
1547 | * volatile dequeues. It's one less thing to check | ||
1548 | * in the critical path (SDQCR). | ||
1549 | */ | ||
1550 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) | ||
1551 | fq_clear(fq, QMAN_FQ_STATE_NE); | ||
1552 | /* | ||
1553 | * This is duplicated from the SDQCR code, but we | ||
1554 | * have stuff to do before *and* after this callback, | ||
1555 | * and we don't want multiple if()s in the critical | ||
1556 | * path (SDQCR). | ||
1557 | */ | ||
1558 | res = fq->cb.dqrr(p, fq, dq); | ||
1559 | if (res == qman_cb_dqrr_stop) | ||
1560 | break; | ||
1561 | /* Check for VDQCR completion */ | ||
1562 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) | ||
1563 | clear_vdqcr(p, fq); | ||
1564 | } else { | ||
1565 | /* SDQCR: contextB points to the FQ */ | ||
1566 | fq = tag_to_fq(dq->contextB); | ||
1567 | /* Now let the callback do its stuff */ | ||
1568 | res = fq->cb.dqrr(p, fq, dq); | ||
1569 | /* | ||
1570 | * The callback can request that we exit without | ||
1571 | * consuming this entry nor advancing; | ||
1572 | */ | ||
1573 | if (res == qman_cb_dqrr_stop) | ||
1574 | break; | ||
1575 | } | ||
1576 | /* Interpret 'dq' from a driver perspective. */ | ||
1577 | /* | ||
1578 | * Parking isn't possible unless HELDACTIVE was set. NB, | ||
1579 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to | ||
1580 | * check for HELDACTIVE to cover both. | ||
1581 | */ | ||
1582 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || | ||
1583 | (res != qman_cb_dqrr_park)); | ||
1584 | /* just means "skip it, I'll consume it myself later on" */ | ||
1585 | if (res != qman_cb_dqrr_defer) | ||
1586 | qm_dqrr_cdc_consume_1ptr(&p->p, dq, | ||
1587 | res == qman_cb_dqrr_park); | ||
1588 | /* Move forward */ | ||
1589 | qm_dqrr_next(&p->p); | ||
1590 | /* | ||
1591 | * Entry processed and consumed, increment our counter. The | ||
1592 | * callback can request that we exit after consuming the | ||
1593 | * entry, and we also exit if we reach our processing limit, | ||
1594 | * so loop back only if neither of these conditions is met. | ||
1595 | */ | ||
1596 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); | ||
1597 | |||
1598 | return limit; | ||
1599 | } | ||
1600 | |||
1601 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) | ||
1602 | { | ||
1603 | unsigned long irqflags; | ||
1604 | |||
1605 | local_irq_save(irqflags); | ||
1606 | set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); | ||
1607 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | ||
1608 | local_irq_restore(irqflags); | ||
1609 | } | ||
1610 | EXPORT_SYMBOL(qman_p_irqsource_add); | ||
1611 | |||
1612 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) | ||
1613 | { | ||
1614 | unsigned long irqflags; | ||
1615 | u32 ier; | ||
1616 | |||
1617 | /* | ||
1618 | * Our interrupt handler only processes+clears status register bits that | ||
1619 | * are in p->irq_sources. As we're trimming that mask, if one of them | ||
1620 | * were to assert in the status register just before we remove it from | ||
1621 | * the enable register, there would be an interrupt-storm when we | ||
1622 | * release the IRQ lock. So we wait for the enable register update to | ||
1623 | * take effect in h/w (by reading it back) and then clear all other bits | ||
1624 | * in the status register. Ie. we clear them from ISR once it's certain | ||
1625 | * IER won't allow them to reassert. | ||
1626 | */ | ||
1627 | local_irq_save(irqflags); | ||
1628 | bits &= QM_PIRQ_VISIBLE; | ||
1629 | clear_bits(bits, &p->irq_sources); | ||
1630 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | ||
1631 | ier = qm_in(&p->p, QM_REG_IER); | ||
1632 | /* | ||
1633 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a | ||
1634 | * data-dependency, ie. to protect against re-ordering. | ||
1635 | */ | ||
1636 | qm_out(&p->p, QM_REG_ISR, ~ier); | ||
1637 | local_irq_restore(irqflags); | ||
1638 | } | ||
1639 | EXPORT_SYMBOL(qman_p_irqsource_remove); | ||
1640 | |||
1641 | const cpumask_t *qman_affine_cpus(void) | ||
1642 | { | ||
1643 | return &affine_mask; | ||
1644 | } | ||
1645 | EXPORT_SYMBOL(qman_affine_cpus); | ||
1646 | |||
1647 | u16 qman_affine_channel(int cpu) | ||
1648 | { | ||
1649 | if (cpu < 0) { | ||
1650 | struct qman_portal *portal = get_affine_portal(); | ||
1651 | |||
1652 | cpu = portal->config->cpu; | ||
1653 | put_affine_portal(); | ||
1654 | } | ||
1655 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); | ||
1656 | return affine_channels[cpu]; | ||
1657 | } | ||
1658 | EXPORT_SYMBOL(qman_affine_channel); | ||
1659 | |||
1660 | struct qman_portal *qman_get_affine_portal(int cpu) | ||
1661 | { | ||
1662 | return affine_portals[cpu]; | ||
1663 | } | ||
1664 | EXPORT_SYMBOL(qman_get_affine_portal); | ||
1665 | |||
1666 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) | ||
1667 | { | ||
1668 | return __poll_portal_fast(p, limit); | ||
1669 | } | ||
1670 | EXPORT_SYMBOL(qman_p_poll_dqrr); | ||
1671 | |||
1672 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) | ||
1673 | { | ||
1674 | unsigned long irqflags; | ||
1675 | |||
1676 | local_irq_save(irqflags); | ||
1677 | pools &= p->config->pools; | ||
1678 | p->sdqcr |= pools; | ||
1679 | qm_dqrr_sdqcr_set(&p->p, p->sdqcr); | ||
1680 | local_irq_restore(irqflags); | ||
1681 | } | ||
1682 | EXPORT_SYMBOL(qman_p_static_dequeue_add); | ||
1683 | |||
1684 | /* Frame queue API */ | ||
1685 | |||
1686 | static const char *mcr_result_str(u8 result) | ||
1687 | { | ||
1688 | switch (result) { | ||
1689 | case QM_MCR_RESULT_NULL: | ||
1690 | return "QM_MCR_RESULT_NULL"; | ||
1691 | case QM_MCR_RESULT_OK: | ||
1692 | return "QM_MCR_RESULT_OK"; | ||
1693 | case QM_MCR_RESULT_ERR_FQID: | ||
1694 | return "QM_MCR_RESULT_ERR_FQID"; | ||
1695 | case QM_MCR_RESULT_ERR_FQSTATE: | ||
1696 | return "QM_MCR_RESULT_ERR_FQSTATE"; | ||
1697 | case QM_MCR_RESULT_ERR_NOTEMPTY: | ||
1698 | return "QM_MCR_RESULT_ERR_NOTEMPTY"; | ||
1699 | case QM_MCR_RESULT_PENDING: | ||
1700 | return "QM_MCR_RESULT_PENDING"; | ||
1701 | case QM_MCR_RESULT_ERR_BADCOMMAND: | ||
1702 | return "QM_MCR_RESULT_ERR_BADCOMMAND"; | ||
1703 | } | ||
1704 | return "<unknown MCR result>"; | ||
1705 | } | ||
1706 | |||
1707 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) | ||
1708 | { | ||
1709 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { | ||
1710 | int ret = qman_alloc_fqid(&fqid); | ||
1711 | |||
1712 | if (ret) | ||
1713 | return ret; | ||
1714 | } | ||
1715 | fq->fqid = fqid; | ||
1716 | fq->flags = flags; | ||
1717 | fq->state = qman_fq_state_oos; | ||
1718 | fq->cgr_groupid = 0; | ||
1719 | |||
1720 | /* A context_b of 0 is allegedly special, so don't use that fqid */ | ||
1721 | if (fqid == 0 || fqid >= num_fqids) { | ||
1722 | WARN(1, "bad fqid %d\n", fqid); | ||
1723 | return -EINVAL; | ||
1724 | } | ||
1725 | |||
1726 | fq->idx = fqid * 2; | ||
1727 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) | ||
1728 | fq->idx++; | ||
1729 | |||
1730 | WARN_ON(fq_table[fq->idx]); | ||
1731 | fq_table[fq->idx] = fq; | ||
1732 | |||
1733 | return 0; | ||
1734 | } | ||
1735 | EXPORT_SYMBOL(qman_create_fq); | ||
1736 | |||
1737 | void qman_destroy_fq(struct qman_fq *fq) | ||
1738 | { | ||
1739 | /* | ||
1740 | * We don't need to lock the FQ as it is a pre-condition that the FQ be | ||
1741 | * quiesced. Instead, run some checks. | ||
1742 | */ | ||
1743 | switch (fq->state) { | ||
1744 | case qman_fq_state_parked: | ||
1745 | case qman_fq_state_oos: | ||
1746 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) | ||
1747 | qman_release_fqid(fq->fqid); | ||
1748 | |||
1749 | DPAA_ASSERT(fq_table[fq->idx]); | ||
1750 | fq_table[fq->idx] = NULL; | ||
1751 | return; | ||
1752 | default: | ||
1753 | break; | ||
1754 | } | ||
1755 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); | ||
1756 | } | ||
1757 | EXPORT_SYMBOL(qman_destroy_fq); | ||
1758 | |||
1759 | u32 qman_fq_fqid(struct qman_fq *fq) | ||
1760 | { | ||
1761 | return fq->fqid; | ||
1762 | } | ||
1763 | EXPORT_SYMBOL(qman_fq_fqid); | ||
1764 | |||
1765 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | ||
1766 | { | ||
1767 | union qm_mc_command *mcc; | ||
1768 | union qm_mc_result *mcr; | ||
1769 | struct qman_portal *p; | ||
1770 | u8 res, myverb; | ||
1771 | int ret = 0; | ||
1772 | |||
1773 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) | ||
1774 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; | ||
1775 | |||
1776 | if (fq->state != qman_fq_state_oos && | ||
1777 | fq->state != qman_fq_state_parked) | ||
1778 | return -EINVAL; | ||
1779 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
1780 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | ||
1781 | return -EINVAL; | ||
1782 | #endif | ||
1783 | if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { | ||
1784 | /* And can't be set at the same time as TDTHRESH */ | ||
1785 | if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) | ||
1786 | return -EINVAL; | ||
1787 | } | ||
1788 | /* Issue an INITFQ_[PARKED|SCHED] management command */ | ||
1789 | p = get_affine_portal(); | ||
1790 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | ||
1791 | (fq->state != qman_fq_state_oos && | ||
1792 | fq->state != qman_fq_state_parked)) { | ||
1793 | ret = -EBUSY; | ||
1794 | goto out; | ||
1795 | } | ||
1796 | mcc = qm_mc_start(&p->p); | ||
1797 | if (opts) | ||
1798 | mcc->initfq = *opts; | ||
1799 | mcc->initfq.fqid = fq->fqid; | ||
1800 | mcc->initfq.count = 0; | ||
1801 | /* | ||
1802 | * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a | ||
1803 | * demux pointer. Otherwise, the caller-provided value is allowed to | ||
1804 | * stand, don't overwrite it. | ||
1805 | */ | ||
1806 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { | ||
1807 | dma_addr_t phys_fq; | ||
1808 | |||
1809 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; | ||
1810 | mcc->initfq.fqd.context_b = fq_to_tag(fq); | ||
1811 | /* | ||
1812 | * and the physical address - NB, if the user wasn't trying to | ||
1813 | * set CONTEXTA, clear the stashing settings. | ||
1814 | */ | ||
1815 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { | ||
1816 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; | ||
1817 | memset(&mcc->initfq.fqd.context_a, 0, | ||
1818 | sizeof(mcc->initfq.fqd.context_a)); | ||
1819 | } else { | ||
1820 | phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), | ||
1821 | DMA_TO_DEVICE); | ||
1822 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); | ||
1823 | } | ||
1824 | } | ||
1825 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { | ||
1826 | int wq = 0; | ||
1827 | |||
1828 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { | ||
1829 | mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; | ||
1830 | wq = 4; | ||
1831 | } | ||
1832 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); | ||
1833 | } | ||
1834 | qm_mc_commit(&p->p, myverb); | ||
1835 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
1836 | dev_err(p->config->dev, "MCR timeout\n"); | ||
1837 | ret = -ETIMEDOUT; | ||
1838 | goto out; | ||
1839 | } | ||
1840 | |||
1841 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); | ||
1842 | res = mcr->result; | ||
1843 | if (res != QM_MCR_RESULT_OK) { | ||
1844 | ret = -EIO; | ||
1845 | goto out; | ||
1846 | } | ||
1847 | if (opts) { | ||
1848 | if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { | ||
1849 | if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) | ||
1850 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); | ||
1851 | else | ||
1852 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); | ||
1853 | } | ||
1854 | if (opts->we_mask & QM_INITFQ_WE_CGID) | ||
1855 | fq->cgr_groupid = opts->fqd.cgid; | ||
1856 | } | ||
1857 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? | ||
1858 | qman_fq_state_sched : qman_fq_state_parked; | ||
1859 | |||
1860 | out: | ||
1861 | put_affine_portal(); | ||
1862 | return ret; | ||
1863 | } | ||
1864 | EXPORT_SYMBOL(qman_init_fq); | ||
1865 | |||
1866 | int qman_schedule_fq(struct qman_fq *fq) | ||
1867 | { | ||
1868 | union qm_mc_command *mcc; | ||
1869 | union qm_mc_result *mcr; | ||
1870 | struct qman_portal *p; | ||
1871 | int ret = 0; | ||
1872 | |||
1873 | if (fq->state != qman_fq_state_parked) | ||
1874 | return -EINVAL; | ||
1875 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
1876 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | ||
1877 | return -EINVAL; | ||
1878 | #endif | ||
1879 | /* Issue a ALTERFQ_SCHED management command */ | ||
1880 | p = get_affine_portal(); | ||
1881 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | ||
1882 | fq->state != qman_fq_state_parked) { | ||
1883 | ret = -EBUSY; | ||
1884 | goto out; | ||
1885 | } | ||
1886 | mcc = qm_mc_start(&p->p); | ||
1887 | mcc->alterfq.fqid = fq->fqid; | ||
1888 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); | ||
1889 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
1890 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); | ||
1891 | ret = -ETIMEDOUT; | ||
1892 | goto out; | ||
1893 | } | ||
1894 | |||
1895 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); | ||
1896 | if (mcr->result != QM_MCR_RESULT_OK) { | ||
1897 | ret = -EIO; | ||
1898 | goto out; | ||
1899 | } | ||
1900 | fq->state = qman_fq_state_sched; | ||
1901 | out: | ||
1902 | put_affine_portal(); | ||
1903 | return ret; | ||
1904 | } | ||
1905 | EXPORT_SYMBOL(qman_schedule_fq); | ||
1906 | |||
1907 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) | ||
1908 | { | ||
1909 | union qm_mc_command *mcc; | ||
1910 | union qm_mc_result *mcr; | ||
1911 | struct qman_portal *p; | ||
1912 | int ret; | ||
1913 | u8 res; | ||
1914 | |||
1915 | if (fq->state != qman_fq_state_parked && | ||
1916 | fq->state != qman_fq_state_sched) | ||
1917 | return -EINVAL; | ||
1918 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
1919 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | ||
1920 | return -EINVAL; | ||
1921 | #endif | ||
1922 | p = get_affine_portal(); | ||
1923 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || | ||
1924 | fq->state == qman_fq_state_retired || | ||
1925 | fq->state == qman_fq_state_oos) { | ||
1926 | ret = -EBUSY; | ||
1927 | goto out; | ||
1928 | } | ||
1929 | mcc = qm_mc_start(&p->p); | ||
1930 | mcc->alterfq.fqid = fq->fqid; | ||
1931 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | ||
1932 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
1933 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); | ||
1934 | ret = -ETIMEDOUT; | ||
1935 | goto out; | ||
1936 | } | ||
1937 | |||
1938 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); | ||
1939 | res = mcr->result; | ||
1940 | /* | ||
1941 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, | ||
1942 | * and defer the flags until FQRNI or FQRN (respectively) show up. But | ||
1943 | * "Friendly" is to process OK immediately, and not set CHANGING. We do | ||
1944 | * friendly, otherwise the caller doesn't necessarily have a fully | ||
1945 | * "retired" FQ on return even if the retirement was immediate. However | ||
1946 | * this does mean some code duplication between here and | ||
1947 | * fq_state_change(). | ||
1948 | */ | ||
1949 | if (res == QM_MCR_RESULT_OK) { | ||
1950 | ret = 0; | ||
1951 | /* Process 'fq' right away, we'll ignore FQRNI */ | ||
1952 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) | ||
1953 | fq_set(fq, QMAN_FQ_STATE_NE); | ||
1954 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) | ||
1955 | fq_set(fq, QMAN_FQ_STATE_ORL); | ||
1956 | if (flags) | ||
1957 | *flags = fq->flags; | ||
1958 | fq->state = qman_fq_state_retired; | ||
1959 | if (fq->cb.fqs) { | ||
1960 | /* | ||
1961 | * Another issue with supporting "immediate" retirement | ||
1962 | * is that we're forced to drop FQRNIs, because by the | ||
1963 | * time they're seen it may already be "too late" (the | ||
1964 | * fq may have been OOS'd and free()'d already). But if | ||
1965 | * the upper layer wants a callback whether it's | ||
1966 | * immediate or not, we have to fake a "MR" entry to | ||
1967 | * look like an FQRNI... | ||
1968 | */ | ||
1969 | union qm_mr_entry msg; | ||
1970 | |||
1971 | msg.verb = QM_MR_VERB_FQRNI; | ||
1972 | msg.fq.fqs = mcr->alterfq.fqs; | ||
1973 | msg.fq.fqid = fq->fqid; | ||
1974 | msg.fq.contextB = fq_to_tag(fq); | ||
1975 | fq->cb.fqs(p, fq, &msg); | ||
1976 | } | ||
1977 | } else if (res == QM_MCR_RESULT_PENDING) { | ||
1978 | ret = 1; | ||
1979 | fq_set(fq, QMAN_FQ_STATE_CHANGING); | ||
1980 | } else { | ||
1981 | ret = -EIO; | ||
1982 | } | ||
1983 | out: | ||
1984 | put_affine_portal(); | ||
1985 | return ret; | ||
1986 | } | ||
1987 | EXPORT_SYMBOL(qman_retire_fq); | ||
1988 | |||
1989 | int qman_oos_fq(struct qman_fq *fq) | ||
1990 | { | ||
1991 | union qm_mc_command *mcc; | ||
1992 | union qm_mc_result *mcr; | ||
1993 | struct qman_portal *p; | ||
1994 | int ret = 0; | ||
1995 | |||
1996 | if (fq->state != qman_fq_state_retired) | ||
1997 | return -EINVAL; | ||
1998 | #ifdef CONFIG_FSL_DPAA_CHECKING | ||
1999 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | ||
2000 | return -EINVAL; | ||
2001 | #endif | ||
2002 | p = get_affine_portal(); | ||
2003 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || | ||
2004 | fq->state != qman_fq_state_retired) { | ||
2005 | ret = -EBUSY; | ||
2006 | goto out; | ||
2007 | } | ||
2008 | mcc = qm_mc_start(&p->p); | ||
2009 | mcc->alterfq.fqid = fq->fqid; | ||
2010 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | ||
2011 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2012 | ret = -ETIMEDOUT; | ||
2013 | goto out; | ||
2014 | } | ||
2015 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); | ||
2016 | if (mcr->result != QM_MCR_RESULT_OK) { | ||
2017 | ret = -EIO; | ||
2018 | goto out; | ||
2019 | } | ||
2020 | fq->state = qman_fq_state_oos; | ||
2021 | out: | ||
2022 | put_affine_portal(); | ||
2023 | return ret; | ||
2024 | } | ||
2025 | EXPORT_SYMBOL(qman_oos_fq); | ||
2026 | |||
2027 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) | ||
2028 | { | ||
2029 | union qm_mc_command *mcc; | ||
2030 | union qm_mc_result *mcr; | ||
2031 | struct qman_portal *p = get_affine_portal(); | ||
2032 | int ret = 0; | ||
2033 | |||
2034 | mcc = qm_mc_start(&p->p); | ||
2035 | mcc->queryfq.fqid = fq->fqid; | ||
2036 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | ||
2037 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2038 | ret = -ETIMEDOUT; | ||
2039 | goto out; | ||
2040 | } | ||
2041 | |||
2042 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | ||
2043 | if (mcr->result == QM_MCR_RESULT_OK) | ||
2044 | *fqd = mcr->queryfq.fqd; | ||
2045 | else | ||
2046 | ret = -EIO; | ||
2047 | out: | ||
2048 | put_affine_portal(); | ||
2049 | return ret; | ||
2050 | } | ||
2051 | |||
2052 | static int qman_query_fq_np(struct qman_fq *fq, | ||
2053 | struct qm_mcr_queryfq_np *np) | ||
2054 | { | ||
2055 | union qm_mc_command *mcc; | ||
2056 | union qm_mc_result *mcr; | ||
2057 | struct qman_portal *p = get_affine_portal(); | ||
2058 | int ret = 0; | ||
2059 | |||
2060 | mcc = qm_mc_start(&p->p); | ||
2061 | mcc->queryfq.fqid = fq->fqid; | ||
2062 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | ||
2063 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2064 | ret = -ETIMEDOUT; | ||
2065 | goto out; | ||
2066 | } | ||
2067 | |||
2068 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | ||
2069 | if (mcr->result == QM_MCR_RESULT_OK) | ||
2070 | *np = mcr->queryfq_np; | ||
2071 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) | ||
2072 | ret = -ERANGE; | ||
2073 | else | ||
2074 | ret = -EIO; | ||
2075 | out: | ||
2076 | put_affine_portal(); | ||
2077 | return ret; | ||
2078 | } | ||
2079 | |||
2080 | static int qman_query_cgr(struct qman_cgr *cgr, | ||
2081 | struct qm_mcr_querycgr *cgrd) | ||
2082 | { | ||
2083 | union qm_mc_command *mcc; | ||
2084 | union qm_mc_result *mcr; | ||
2085 | struct qman_portal *p = get_affine_portal(); | ||
2086 | int ret = 0; | ||
2087 | |||
2088 | mcc = qm_mc_start(&p->p); | ||
2089 | mcc->querycgr.cgid = cgr->cgrid; | ||
2090 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); | ||
2091 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2092 | ret = -ETIMEDOUT; | ||
2093 | goto out; | ||
2094 | } | ||
2095 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); | ||
2096 | if (mcr->result == QM_MCR_RESULT_OK) | ||
2097 | *cgrd = mcr->querycgr; | ||
2098 | else { | ||
2099 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n", | ||
2100 | mcr_result_str(mcr->result)); | ||
2101 | ret = -EIO; | ||
2102 | } | ||
2103 | out: | ||
2104 | put_affine_portal(); | ||
2105 | return ret; | ||
2106 | } | ||
2107 | |||
2108 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) | ||
2109 | { | ||
2110 | struct qm_mcr_querycgr query_cgr; | ||
2111 | int err; | ||
2112 | |||
2113 | err = qman_query_cgr(cgr, &query_cgr); | ||
2114 | if (err) | ||
2115 | return err; | ||
2116 | |||
2117 | *result = !!query_cgr.cgr.cs; | ||
2118 | return 0; | ||
2119 | } | ||
2120 | EXPORT_SYMBOL(qman_query_cgr_congested); | ||
2121 | |||
2122 | /* internal function used as a wait_event() expression */ | ||
2123 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) | ||
2124 | { | ||
2125 | unsigned long irqflags; | ||
2126 | int ret = -EBUSY; | ||
2127 | |||
2128 | local_irq_save(irqflags); | ||
2129 | if (p->vdqcr_owned) | ||
2130 | goto out; | ||
2131 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | ||
2132 | goto out; | ||
2133 | |||
2134 | fq_set(fq, QMAN_FQ_STATE_VDQCR); | ||
2135 | p->vdqcr_owned = fq; | ||
2136 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | ||
2137 | ret = 0; | ||
2138 | out: | ||
2139 | local_irq_restore(irqflags); | ||
2140 | return ret; | ||
2141 | } | ||
2142 | |||
2143 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) | ||
2144 | { | ||
2145 | int ret; | ||
2146 | |||
2147 | *p = get_affine_portal(); | ||
2148 | ret = set_p_vdqcr(*p, fq, vdqcr); | ||
2149 | put_affine_portal(); | ||
2150 | return ret; | ||
2151 | } | ||
2152 | |||
2153 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, | ||
2154 | u32 vdqcr, u32 flags) | ||
2155 | { | ||
2156 | int ret = 0; | ||
2157 | |||
2158 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | ||
2159 | ret = wait_event_interruptible(affine_queue, | ||
2160 | !set_vdqcr(p, fq, vdqcr)); | ||
2161 | else | ||
2162 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); | ||
2163 | return ret; | ||
2164 | } | ||
2165 | |||
2166 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) | ||
2167 | { | ||
2168 | struct qman_portal *p; | ||
2169 | int ret; | ||
2170 | |||
2171 | if (fq->state != qman_fq_state_parked && | ||
2172 | fq->state != qman_fq_state_retired) | ||
2173 | return -EINVAL; | ||
2174 | if (vdqcr & QM_VDQCR_FQID_MASK) | ||
2175 | return -EINVAL; | ||
2176 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) | ||
2177 | return -EBUSY; | ||
2178 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; | ||
2179 | if (flags & QMAN_VOLATILE_FLAG_WAIT) | ||
2180 | ret = wait_vdqcr_start(&p, fq, vdqcr, flags); | ||
2181 | else | ||
2182 | ret = set_vdqcr(&p, fq, vdqcr); | ||
2183 | if (ret) | ||
2184 | return ret; | ||
2185 | /* VDQCR is set */ | ||
2186 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { | ||
2187 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) | ||
2188 | /* | ||
2189 | * NB: don't propagate any error - the caller wouldn't | ||
2190 | * know whether the VDQCR was issued or not. A signal | ||
2191 | * could arrive after returning anyway, so the caller | ||
2192 | * can check signal_pending() if that's an issue. | ||
2193 | */ | ||
2194 | wait_event_interruptible(affine_queue, | ||
2195 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | ||
2196 | else | ||
2197 | wait_event(affine_queue, | ||
2198 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); | ||
2199 | } | ||
2200 | return 0; | ||
2201 | } | ||
2202 | EXPORT_SYMBOL(qman_volatile_dequeue); | ||
2203 | |||
2204 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) | ||
2205 | { | ||
2206 | if (avail) | ||
2207 | qm_eqcr_cce_prefetch(&p->p); | ||
2208 | else | ||
2209 | qm_eqcr_cce_update(&p->p); | ||
2210 | } | ||
2211 | |||
2212 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) | ||
2213 | { | ||
2214 | struct qman_portal *p; | ||
2215 | struct qm_eqcr_entry *eq; | ||
2216 | unsigned long irqflags; | ||
2217 | u8 avail; | ||
2218 | |||
2219 | p = get_affine_portal(); | ||
2220 | local_irq_save(irqflags); | ||
2221 | |||
2222 | if (p->use_eqcr_ci_stashing) { | ||
2223 | /* | ||
2224 | * The stashing case is easy, only update if we need to in | ||
2225 | * order to try and liberate ring entries. | ||
2226 | */ | ||
2227 | eq = qm_eqcr_start_stash(&p->p); | ||
2228 | } else { | ||
2229 | /* | ||
2230 | * The non-stashing case is harder, need to prefetch ahead of | ||
2231 | * time. | ||
2232 | */ | ||
2233 | avail = qm_eqcr_get_avail(&p->p); | ||
2234 | if (avail < 2) | ||
2235 | update_eqcr_ci(p, avail); | ||
2236 | eq = qm_eqcr_start_no_stash(&p->p); | ||
2237 | } | ||
2238 | |||
2239 | if (unlikely(!eq)) | ||
2240 | goto out; | ||
2241 | |||
2242 | eq->fqid = fq->fqid; | ||
2243 | eq->tag = fq_to_tag(fq); | ||
2244 | eq->fd = *fd; | ||
2245 | |||
2246 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); | ||
2247 | out: | ||
2248 | local_irq_restore(irqflags); | ||
2249 | put_affine_portal(); | ||
2250 | return 0; | ||
2251 | } | ||
2252 | EXPORT_SYMBOL(qman_enqueue); | ||
2253 | |||
2254 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, | ||
2255 | struct qm_mcc_initcgr *opts) | ||
2256 | { | ||
2257 | union qm_mc_command *mcc; | ||
2258 | union qm_mc_result *mcr; | ||
2259 | struct qman_portal *p = get_affine_portal(); | ||
2260 | u8 verb = QM_MCC_VERB_MODIFYCGR; | ||
2261 | int ret = 0; | ||
2262 | |||
2263 | mcc = qm_mc_start(&p->p); | ||
2264 | if (opts) | ||
2265 | mcc->initcgr = *opts; | ||
2266 | mcc->initcgr.cgid = cgr->cgrid; | ||
2267 | if (flags & QMAN_CGR_FLAG_USE_INIT) | ||
2268 | verb = QM_MCC_VERB_INITCGR; | ||
2269 | qm_mc_commit(&p->p, verb); | ||
2270 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2271 | ret = -ETIMEDOUT; | ||
2272 | goto out; | ||
2273 | } | ||
2274 | |||
2275 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); | ||
2276 | if (mcr->result != QM_MCR_RESULT_OK) | ||
2277 | ret = -EIO; | ||
2278 | |||
2279 | out: | ||
2280 | put_affine_portal(); | ||
2281 | return ret; | ||
2282 | } | ||
2283 | |||
2284 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) | ||
2285 | #define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) | ||
2286 | |||
2287 | static u8 qman_cgr_cpus[CGR_NUM]; | ||
2288 | |||
2289 | void qman_init_cgr_all(void) | ||
2290 | { | ||
2291 | struct qman_cgr cgr; | ||
2292 | int err_cnt = 0; | ||
2293 | |||
2294 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { | ||
2295 | if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) | ||
2296 | err_cnt++; | ||
2297 | } | ||
2298 | |||
2299 | if (err_cnt) | ||
2300 | pr_err("Warning: %d error%s while initialising CGR h/w\n", | ||
2301 | err_cnt, (err_cnt > 1) ? "s" : ""); | ||
2302 | } | ||
2303 | |||
2304 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | ||
2305 | struct qm_mcc_initcgr *opts) | ||
2306 | { | ||
2307 | struct qm_mcr_querycgr cgr_state; | ||
2308 | struct qm_mcc_initcgr local_opts = {}; | ||
2309 | int ret; | ||
2310 | struct qman_portal *p; | ||
2311 | |||
2312 | /* | ||
2313 | * We have to check that the provided CGRID is within the limits of the | ||
2314 | * data-structures, for obvious reasons. However we'll let h/w take | ||
2315 | * care of determining whether it's within the limits of what exists on | ||
2316 | * the SoC. | ||
2317 | */ | ||
2318 | if (cgr->cgrid >= CGR_NUM) | ||
2319 | return -EINVAL; | ||
2320 | |||
2321 | preempt_disable(); | ||
2322 | p = get_affine_portal(); | ||
2323 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); | ||
2324 | preempt_enable(); | ||
2325 | |||
2326 | cgr->chan = p->config->channel; | ||
2327 | spin_lock(&p->cgr_lock); | ||
2328 | |||
2329 | if (opts) { | ||
2330 | ret = qman_query_cgr(cgr, &cgr_state); | ||
2331 | if (ret) | ||
2332 | goto out; | ||
2333 | if (opts) | ||
2334 | local_opts = *opts; | ||
2335 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | ||
2336 | local_opts.cgr.cscn_targ_upd_ctrl = | ||
2337 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); | ||
2338 | else | ||
2339 | /* Overwrite TARG */ | ||
2340 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | | ||
2341 | TARG_MASK(p); | ||
2342 | local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; | ||
2343 | |||
2344 | /* send init if flags indicate so */ | ||
2345 | if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) | ||
2346 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, | ||
2347 | &local_opts); | ||
2348 | else | ||
2349 | ret = qm_modify_cgr(cgr, 0, &local_opts); | ||
2350 | if (ret) | ||
2351 | goto out; | ||
2352 | } | ||
2353 | |||
2354 | list_add(&cgr->node, &p->cgr_cbs); | ||
2355 | |||
2356 | /* Determine if newly added object requires its callback to be called */ | ||
2357 | ret = qman_query_cgr(cgr, &cgr_state); | ||
2358 | if (ret) { | ||
2359 | /* we can't go back, so proceed and return success */ | ||
2360 | dev_err(p->config->dev, "CGR HW state partially modified\n"); | ||
2361 | ret = 0; | ||
2362 | goto out; | ||
2363 | } | ||
2364 | if (cgr->cb && cgr_state.cgr.cscn_en && | ||
2365 | qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) | ||
2366 | cgr->cb(p, cgr, 1); | ||
2367 | out: | ||
2368 | spin_unlock(&p->cgr_lock); | ||
2369 | put_affine_portal(); | ||
2370 | return ret; | ||
2371 | } | ||
2372 | EXPORT_SYMBOL(qman_create_cgr); | ||
2373 | |||
2374 | int qman_delete_cgr(struct qman_cgr *cgr) | ||
2375 | { | ||
2376 | unsigned long irqflags; | ||
2377 | struct qm_mcr_querycgr cgr_state; | ||
2378 | struct qm_mcc_initcgr local_opts; | ||
2379 | int ret = 0; | ||
2380 | struct qman_cgr *i; | ||
2381 | struct qman_portal *p = get_affine_portal(); | ||
2382 | |||
2383 | if (cgr->chan != p->config->channel) { | ||
2384 | /* attempt to delete from other portal than creator */ | ||
2385 | dev_err(p->config->dev, "CGR not owned by current portal"); | ||
2386 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", | ||
2387 | cgr->chan, p->config->channel); | ||
2388 | |||
2389 | ret = -EINVAL; | ||
2390 | goto put_portal; | ||
2391 | } | ||
2392 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); | ||
2393 | spin_lock_irqsave(&p->cgr_lock, irqflags); | ||
2394 | list_del(&cgr->node); | ||
2395 | /* | ||
2396 | * If there are no other CGR objects for this CGRID in the list, | ||
2397 | * update CSCN_TARG accordingly | ||
2398 | */ | ||
2399 | list_for_each_entry(i, &p->cgr_cbs, node) | ||
2400 | if (i->cgrid == cgr->cgrid && i->cb) | ||
2401 | goto release_lock; | ||
2402 | ret = qman_query_cgr(cgr, &cgr_state); | ||
2403 | if (ret) { | ||
2404 | /* add back to the list */ | ||
2405 | list_add(&cgr->node, &p->cgr_cbs); | ||
2406 | goto release_lock; | ||
2407 | } | ||
2408 | /* Overwrite TARG */ | ||
2409 | local_opts.we_mask = QM_CGR_WE_CSCN_TARG; | ||
2410 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | ||
2411 | local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); | ||
2412 | else | ||
2413 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & | ||
2414 | ~(TARG_MASK(p)); | ||
2415 | ret = qm_modify_cgr(cgr, 0, &local_opts); | ||
2416 | if (ret) | ||
2417 | /* add back to the list */ | ||
2418 | list_add(&cgr->node, &p->cgr_cbs); | ||
2419 | release_lock: | ||
2420 | spin_unlock_irqrestore(&p->cgr_lock, irqflags); | ||
2421 | put_portal: | ||
2422 | put_affine_portal(); | ||
2423 | return ret; | ||
2424 | } | ||
2425 | EXPORT_SYMBOL(qman_delete_cgr); | ||
2426 | |||
2427 | struct cgr_comp { | ||
2428 | struct qman_cgr *cgr; | ||
2429 | struct completion completion; | ||
2430 | }; | ||
2431 | |||
2432 | static int qman_delete_cgr_thread(void *p) | ||
2433 | { | ||
2434 | struct cgr_comp *cgr_comp = (struct cgr_comp *)p; | ||
2435 | int ret; | ||
2436 | |||
2437 | ret = qman_delete_cgr(cgr_comp->cgr); | ||
2438 | complete(&cgr_comp->completion); | ||
2439 | |||
2440 | return ret; | ||
2441 | } | ||
2442 | |||
2443 | void qman_delete_cgr_safe(struct qman_cgr *cgr) | ||
2444 | { | ||
2445 | struct task_struct *thread; | ||
2446 | struct cgr_comp cgr_comp; | ||
2447 | |||
2448 | preempt_disable(); | ||
2449 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { | ||
2450 | init_completion(&cgr_comp.completion); | ||
2451 | cgr_comp.cgr = cgr; | ||
2452 | thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, | ||
2453 | "cgr_del"); | ||
2454 | |||
2455 | if (IS_ERR(thread)) | ||
2456 | goto out; | ||
2457 | |||
2458 | kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); | ||
2459 | wake_up_process(thread); | ||
2460 | wait_for_completion(&cgr_comp.completion); | ||
2461 | preempt_enable(); | ||
2462 | return; | ||
2463 | } | ||
2464 | out: | ||
2465 | qman_delete_cgr(cgr); | ||
2466 | preempt_enable(); | ||
2467 | } | ||
2468 | EXPORT_SYMBOL(qman_delete_cgr_safe); | ||
2469 | |||
2470 | /* Cleanup FQs */ | ||
2471 | |||
2472 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) | ||
2473 | { | ||
2474 | const union qm_mr_entry *msg; | ||
2475 | int found = 0; | ||
2476 | |||
2477 | qm_mr_pvb_update(p); | ||
2478 | msg = qm_mr_current(p); | ||
2479 | while (msg) { | ||
2480 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) | ||
2481 | found = 1; | ||
2482 | qm_mr_next(p); | ||
2483 | qm_mr_cci_consume_to_current(p); | ||
2484 | qm_mr_pvb_update(p); | ||
2485 | msg = qm_mr_current(p); | ||
2486 | } | ||
2487 | return found; | ||
2488 | } | ||
2489 | |||
2490 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | ||
2491 | bool wait) | ||
2492 | { | ||
2493 | const struct qm_dqrr_entry *dqrr; | ||
2494 | int found = 0; | ||
2495 | |||
2496 | do { | ||
2497 | qm_dqrr_pvb_update(p); | ||
2498 | dqrr = qm_dqrr_current(p); | ||
2499 | if (!dqrr) | ||
2500 | cpu_relax(); | ||
2501 | } while (wait && !dqrr); | ||
2502 | |||
2503 | while (dqrr) { | ||
2504 | if (dqrr->fqid == fqid && (dqrr->stat & s)) | ||
2505 | found = 1; | ||
2506 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); | ||
2507 | qm_dqrr_pvb_update(p); | ||
2508 | qm_dqrr_next(p); | ||
2509 | dqrr = qm_dqrr_current(p); | ||
2510 | } | ||
2511 | return found; | ||
2512 | } | ||
2513 | |||
2514 | #define qm_mr_drain(p, V) \ | ||
2515 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) | ||
2516 | |||
2517 | #define qm_dqrr_drain(p, f, S) \ | ||
2518 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) | ||
2519 | |||
2520 | #define qm_dqrr_drain_wait(p, f, S) \ | ||
2521 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) | ||
2522 | |||
2523 | #define qm_dqrr_drain_nomatch(p) \ | ||
2524 | _qm_dqrr_consume_and_match(p, 0, 0, false) | ||
2525 | |||
2526 | static int qman_shutdown_fq(u32 fqid) | ||
2527 | { | ||
2528 | struct qman_portal *p; | ||
2529 | struct device *dev; | ||
2530 | union qm_mc_command *mcc; | ||
2531 | union qm_mc_result *mcr; | ||
2532 | int orl_empty, drain = 0, ret = 0; | ||
2533 | u32 channel, wq, res; | ||
2534 | u8 state; | ||
2535 | |||
2536 | p = get_affine_portal(); | ||
2537 | dev = p->config->dev; | ||
2538 | /* Determine the state of the FQID */ | ||
2539 | mcc = qm_mc_start(&p->p); | ||
2540 | mcc->queryfq_np.fqid = fqid; | ||
2541 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | ||
2542 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2543 | dev_err(dev, "QUERYFQ_NP timeout\n"); | ||
2544 | ret = -ETIMEDOUT; | ||
2545 | goto out; | ||
2546 | } | ||
2547 | |||
2548 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); | ||
2549 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; | ||
2550 | if (state == QM_MCR_NP_STATE_OOS) | ||
2551 | goto out; /* Already OOS, no need to do anymore checks */ | ||
2552 | |||
2553 | /* Query which channel the FQ is using */ | ||
2554 | mcc = qm_mc_start(&p->p); | ||
2555 | mcc->queryfq.fqid = fqid; | ||
2556 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | ||
2557 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2558 | dev_err(dev, "QUERYFQ timeout\n"); | ||
2559 | ret = -ETIMEDOUT; | ||
2560 | goto out; | ||
2561 | } | ||
2562 | |||
2563 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); | ||
2564 | /* Need to store these since the MCR gets reused */ | ||
2565 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); | ||
2566 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); | ||
2567 | |||
2568 | switch (state) { | ||
2569 | case QM_MCR_NP_STATE_TEN_SCHED: | ||
2570 | case QM_MCR_NP_STATE_TRU_SCHED: | ||
2571 | case QM_MCR_NP_STATE_ACTIVE: | ||
2572 | case QM_MCR_NP_STATE_PARKED: | ||
2573 | orl_empty = 0; | ||
2574 | mcc = qm_mc_start(&p->p); | ||
2575 | mcc->alterfq.fqid = fqid; | ||
2576 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | ||
2577 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2578 | dev_err(dev, "QUERYFQ_NP timeout\n"); | ||
2579 | ret = -ETIMEDOUT; | ||
2580 | goto out; | ||
2581 | } | ||
2582 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | ||
2583 | QM_MCR_VERB_ALTER_RETIRE); | ||
2584 | res = mcr->result; /* Make a copy as we reuse MCR below */ | ||
2585 | |||
2586 | if (res == QM_MCR_RESULT_PENDING) { | ||
2587 | /* | ||
2588 | * Need to wait for the FQRN in the message ring, which | ||
2589 | * will only occur once the FQ has been drained. In | ||
2590 | * order for the FQ to drain the portal needs to be set | ||
2591 | * to dequeue from the channel the FQ is scheduled on | ||
2592 | */ | ||
2593 | int found_fqrn = 0; | ||
2594 | u16 dequeue_wq = 0; | ||
2595 | |||
2596 | /* Flag that we need to drain FQ */ | ||
2597 | drain = 1; | ||
2598 | |||
2599 | if (channel >= qm_channel_pool1 && | ||
2600 | channel < qm_channel_pool1 + 15) { | ||
2601 | /* Pool channel, enable the bit in the portal */ | ||
2602 | dequeue_wq = (channel - | ||
2603 | qm_channel_pool1 + 1)<<4 | wq; | ||
2604 | } else if (channel < qm_channel_pool1) { | ||
2605 | /* Dedicated channel */ | ||
2606 | dequeue_wq = wq; | ||
2607 | } else { | ||
2608 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", | ||
2609 | fqid, channel); | ||
2610 | ret = -EBUSY; | ||
2611 | goto out; | ||
2612 | } | ||
2613 | /* Set the sdqcr to drain this channel */ | ||
2614 | if (channel < qm_channel_pool1) | ||
2615 | qm_dqrr_sdqcr_set(&p->p, | ||
2616 | QM_SDQCR_TYPE_ACTIVE | | ||
2617 | QM_SDQCR_CHANNELS_DEDICATED); | ||
2618 | else | ||
2619 | qm_dqrr_sdqcr_set(&p->p, | ||
2620 | QM_SDQCR_TYPE_ACTIVE | | ||
2621 | QM_SDQCR_CHANNELS_POOL_CONV | ||
2622 | (channel)); | ||
2623 | do { | ||
2624 | /* Keep draining DQRR while checking the MR*/ | ||
2625 | qm_dqrr_drain_nomatch(&p->p); | ||
2626 | /* Process message ring too */ | ||
2627 | found_fqrn = qm_mr_drain(&p->p, FQRN); | ||
2628 | cpu_relax(); | ||
2629 | } while (!found_fqrn); | ||
2630 | |||
2631 | } | ||
2632 | if (res != QM_MCR_RESULT_OK && | ||
2633 | res != QM_MCR_RESULT_PENDING) { | ||
2634 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", | ||
2635 | fqid, res); | ||
2636 | ret = -EIO; | ||
2637 | goto out; | ||
2638 | } | ||
2639 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { | ||
2640 | /* | ||
2641 | * ORL had no entries, no need to wait until the | ||
2642 | * ERNs come in | ||
2643 | */ | ||
2644 | orl_empty = 1; | ||
2645 | } | ||
2646 | /* | ||
2647 | * Retirement succeeded, check to see if FQ needs | ||
2648 | * to be drained | ||
2649 | */ | ||
2650 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { | ||
2651 | /* FQ is Not Empty, drain using volatile DQ commands */ | ||
2652 | do { | ||
2653 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); | ||
2654 | |||
2655 | qm_dqrr_vdqcr_set(&p->p, vdqcr); | ||
2656 | /* | ||
2657 | * Wait for a dequeue and process the dequeues, | ||
2658 | * making sure to empty the ring completely | ||
2659 | */ | ||
2660 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); | ||
2661 | } | ||
2662 | qm_dqrr_sdqcr_set(&p->p, 0); | ||
2663 | |||
2664 | while (!orl_empty) { | ||
2665 | /* Wait for the ORL to have been completely drained */ | ||
2666 | orl_empty = qm_mr_drain(&p->p, FQRL); | ||
2667 | cpu_relax(); | ||
2668 | } | ||
2669 | mcc = qm_mc_start(&p->p); | ||
2670 | mcc->alterfq.fqid = fqid; | ||
2671 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | ||
2672 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2673 | ret = -ETIMEDOUT; | ||
2674 | goto out; | ||
2675 | } | ||
2676 | |||
2677 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | ||
2678 | QM_MCR_VERB_ALTER_OOS); | ||
2679 | if (mcr->result != QM_MCR_RESULT_OK) { | ||
2680 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", | ||
2681 | fqid, mcr->result); | ||
2682 | ret = -EIO; | ||
2683 | goto out; | ||
2684 | } | ||
2685 | break; | ||
2686 | |||
2687 | case QM_MCR_NP_STATE_RETIRED: | ||
2688 | /* Send OOS Command */ | ||
2689 | mcc = qm_mc_start(&p->p); | ||
2690 | mcc->alterfq.fqid = fqid; | ||
2691 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | ||
2692 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | ||
2693 | ret = -ETIMEDOUT; | ||
2694 | goto out; | ||
2695 | } | ||
2696 | |||
2697 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | ||
2698 | QM_MCR_VERB_ALTER_OOS); | ||
2699 | if (mcr->result) { | ||
2700 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", | ||
2701 | fqid, mcr->result); | ||
2702 | ret = -EIO; | ||
2703 | goto out; | ||
2704 | } | ||
2705 | break; | ||
2706 | |||
2707 | case QM_MCR_NP_STATE_OOS: | ||
2708 | /* Done */ | ||
2709 | break; | ||
2710 | |||
2711 | default: | ||
2712 | ret = -EIO; | ||
2713 | } | ||
2714 | |||
2715 | out: | ||
2716 | put_affine_portal(); | ||
2717 | return ret; | ||
2718 | } | ||
2719 | |||
2720 | const struct qm_portal_config *qman_get_qm_portal_config( | ||
2721 | struct qman_portal *portal) | ||
2722 | { | ||
2723 | return portal->config; | ||
2724 | } | ||
2725 | |||
2726 | struct gen_pool *qm_fqalloc; /* FQID allocator */ | ||
2727 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | ||
2728 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ | ||
2729 | |||
2730 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) | ||
2731 | { | ||
2732 | unsigned long addr; | ||
2733 | |||
2734 | addr = gen_pool_alloc(p, cnt); | ||
2735 | if (!addr) | ||
2736 | return -ENOMEM; | ||
2737 | |||
2738 | *result = addr & ~DPAA_GENALLOC_OFF; | ||
2739 | |||
2740 | return 0; | ||
2741 | } | ||
2742 | |||
2743 | int qman_alloc_fqid_range(u32 *result, u32 count) | ||
2744 | { | ||
2745 | return qman_alloc_range(qm_fqalloc, result, count); | ||
2746 | } | ||
2747 | EXPORT_SYMBOL(qman_alloc_fqid_range); | ||
2748 | |||
2749 | int qman_alloc_pool_range(u32 *result, u32 count) | ||
2750 | { | ||
2751 | return qman_alloc_range(qm_qpalloc, result, count); | ||
2752 | } | ||
2753 | EXPORT_SYMBOL(qman_alloc_pool_range); | ||
2754 | |||
2755 | int qman_alloc_cgrid_range(u32 *result, u32 count) | ||
2756 | { | ||
2757 | return qman_alloc_range(qm_cgralloc, result, count); | ||
2758 | } | ||
2759 | EXPORT_SYMBOL(qman_alloc_cgrid_range); | ||
2760 | |||
2761 | int qman_release_fqid(u32 fqid) | ||
2762 | { | ||
2763 | int ret = qman_shutdown_fq(fqid); | ||
2764 | |||
2765 | if (ret) { | ||
2766 | pr_debug("FQID %d leaked\n", fqid); | ||
2767 | return ret; | ||
2768 | } | ||
2769 | |||
2770 | gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); | ||
2771 | return 0; | ||
2772 | } | ||
2773 | EXPORT_SYMBOL(qman_release_fqid); | ||
2774 | |||
2775 | static int qpool_cleanup(u32 qp) | ||
2776 | { | ||
2777 | /* | ||
2778 | * We query all FQDs starting from | ||
2779 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs | ||
2780 | * whose destination channel is the pool-channel being released. | ||
2781 | * When a non-OOS FQD is found we attempt to clean it up | ||
2782 | */ | ||
2783 | struct qman_fq fq = { | ||
2784 | .fqid = QM_FQID_RANGE_START | ||
2785 | }; | ||
2786 | int err; | ||
2787 | |||
2788 | do { | ||
2789 | struct qm_mcr_queryfq_np np; | ||
2790 | |||
2791 | err = qman_query_fq_np(&fq, &np); | ||
2792 | if (err) | ||
2793 | /* FQID range exceeded, found no problems */ | ||
2794 | return 0; | ||
2795 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | ||
2796 | struct qm_fqd fqd; | ||
2797 | |||
2798 | err = qman_query_fq(&fq, &fqd); | ||
2799 | if (WARN_ON(err)) | ||
2800 | return 0; | ||
2801 | if (qm_fqd_get_chan(&fqd) == qp) { | ||
2802 | /* The channel is the FQ's target, clean it */ | ||
2803 | err = qman_shutdown_fq(fq.fqid); | ||
2804 | if (err) | ||
2805 | /* | ||
2806 | * Couldn't shut down the FQ | ||
2807 | * so the pool must be leaked | ||
2808 | */ | ||
2809 | return err; | ||
2810 | } | ||
2811 | } | ||
2812 | /* Move to the next FQID */ | ||
2813 | fq.fqid++; | ||
2814 | } while (1); | ||
2815 | } | ||
2816 | |||
2817 | int qman_release_pool(u32 qp) | ||
2818 | { | ||
2819 | int ret; | ||
2820 | |||
2821 | ret = qpool_cleanup(qp); | ||
2822 | if (ret) { | ||
2823 | pr_debug("CHID %d leaked\n", qp); | ||
2824 | return ret; | ||
2825 | } | ||
2826 | |||
2827 | gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); | ||
2828 | return 0; | ||
2829 | } | ||
2830 | EXPORT_SYMBOL(qman_release_pool); | ||
2831 | |||
2832 | static int cgr_cleanup(u32 cgrid) | ||
2833 | { | ||
2834 | /* | ||
2835 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" | ||
2836 | * error, looking for non-OOS FQDs whose CGR is the CGR being released | ||
2837 | */ | ||
2838 | struct qman_fq fq = { | ||
2839 | .fqid = 1 | ||
2840 | }; | ||
2841 | int err; | ||
2842 | |||
2843 | do { | ||
2844 | struct qm_mcr_queryfq_np np; | ||
2845 | |||
2846 | err = qman_query_fq_np(&fq, &np); | ||
2847 | if (err) | ||
2848 | /* FQID range exceeded, found no problems */ | ||
2849 | return 0; | ||
2850 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | ||
2851 | struct qm_fqd fqd; | ||
2852 | |||
2853 | err = qman_query_fq(&fq, &fqd); | ||
2854 | if (WARN_ON(err)) | ||
2855 | return 0; | ||
2856 | if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && | ||
2857 | fqd.cgid == cgrid) { | ||
2858 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", | ||
2859 | cgrid, fq.fqid); | ||
2860 | return -EIO; | ||
2861 | } | ||
2862 | } | ||
2863 | /* Move to the next FQID */ | ||
2864 | fq.fqid++; | ||
2865 | } while (1); | ||
2866 | } | ||
2867 | |||
2868 | int qman_release_cgrid(u32 cgrid) | ||
2869 | { | ||
2870 | int ret; | ||
2871 | |||
2872 | ret = cgr_cleanup(cgrid); | ||
2873 | if (ret) { | ||
2874 | pr_debug("CGRID %d leaked\n", cgrid); | ||
2875 | return ret; | ||
2876 | } | ||
2877 | |||
2878 | gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); | ||
2879 | return 0; | ||
2880 | } | ||
2881 | EXPORT_SYMBOL(qman_release_cgrid); | ||
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c new file mode 100644 index 000000000000..0cace9e0077e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_ccsr.c | |||
@@ -0,0 +1,808 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_priv.h" | ||
32 | |||
33 | u16 qman_ip_rev; | ||
34 | EXPORT_SYMBOL(qman_ip_rev); | ||
35 | u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; | ||
36 | EXPORT_SYMBOL(qm_channel_pool1); | ||
37 | |||
38 | /* Register offsets */ | ||
39 | #define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) | ||
40 | #define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) | ||
41 | #define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) | ||
42 | #define REG_DD_CFG 0x0200 | ||
43 | #define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) | ||
44 | #define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) | ||
45 | #define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) | ||
46 | #define REG_PFDR_FPC 0x0400 | ||
47 | #define REG_PFDR_FP_HEAD 0x0404 | ||
48 | #define REG_PFDR_FP_TAIL 0x0408 | ||
49 | #define REG_PFDR_FP_LWIT 0x0410 | ||
50 | #define REG_PFDR_CFG 0x0414 | ||
51 | #define REG_SFDR_CFG 0x0500 | ||
52 | #define REG_SFDR_IN_USE 0x0504 | ||
53 | #define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) | ||
54 | #define REG_WQ_DEF_ENC_WQID 0x0630 | ||
55 | #define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) | ||
56 | #define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) | ||
57 | #define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) | ||
58 | #define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) | ||
59 | #define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ | ||
60 | #define REG_CM_CFG 0x0800 | ||
61 | #define REG_ECSR 0x0a00 | ||
62 | #define REG_ECIR 0x0a04 | ||
63 | #define REG_EADR 0x0a08 | ||
64 | #define REG_ECIR2 0x0a0c | ||
65 | #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) | ||
66 | #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) | ||
67 | #define REG_MCR 0x0b00 | ||
68 | #define REG_MCP(n) (0x0b04 + ((n) * 0x04)) | ||
69 | #define REG_MISC_CFG 0x0be0 | ||
70 | #define REG_HID_CFG 0x0bf0 | ||
71 | #define REG_IDLE_STAT 0x0bf4 | ||
72 | #define REG_IP_REV_1 0x0bf8 | ||
73 | #define REG_IP_REV_2 0x0bfc | ||
74 | #define REG_FQD_BARE 0x0c00 | ||
75 | #define REG_PFDR_BARE 0x0c20 | ||
76 | #define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ | ||
77 | #define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ | ||
78 | #define REG_QCSP_BARE 0x0c80 | ||
79 | #define REG_QCSP_BAR 0x0c84 | ||
80 | #define REG_CI_SCHED_CFG 0x0d00 | ||
81 | #define REG_SRCIDR 0x0d04 | ||
82 | #define REG_LIODNR 0x0d08 | ||
83 | #define REG_CI_RLM_AVG 0x0d14 | ||
84 | #define REG_ERR_ISR 0x0e00 | ||
85 | #define REG_ERR_IER 0x0e04 | ||
86 | #define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) | ||
87 | #define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) | ||
88 | #define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) | ||
89 | |||
90 | /* Assists for QMAN_MCR */ | ||
91 | #define MCR_INIT_PFDR 0x01000000 | ||
92 | #define MCR_get_rslt(v) (u8)((v) >> 24) | ||
93 | #define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0)) | ||
94 | #define MCR_rslt_ok(r) ((r) == 0xf0) | ||
95 | #define MCR_rslt_eaccess(r) ((r) == 0xf8) | ||
96 | #define MCR_rslt_inval(r) ((r) == 0xff) | ||
97 | |||
98 | /* | ||
99 | * Corenet initiator settings. Stash request queues are 4-deep to match cores | ||
100 | * ability to snarf. Stash priority is 3, other priorities are 2. | ||
101 | */ | ||
102 | #define QM_CI_SCHED_CFG_SRCCIV 4 | ||
103 | #define QM_CI_SCHED_CFG_SRQ_W 3 | ||
104 | #define QM_CI_SCHED_CFG_RW_W 2 | ||
105 | #define QM_CI_SCHED_CFG_BMAN_W 2 | ||
106 | /* write SRCCIV enable */ | ||
107 | #define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31) | ||
108 | |||
109 | /* Follows WQ_CS_CFG0-5 */ | ||
110 | enum qm_wq_class { | ||
111 | qm_wq_portal = 0, | ||
112 | qm_wq_pool = 1, | ||
113 | qm_wq_fman0 = 2, | ||
114 | qm_wq_fman1 = 3, | ||
115 | qm_wq_caam = 4, | ||
116 | qm_wq_pme = 5, | ||
117 | qm_wq_first = qm_wq_portal, | ||
118 | qm_wq_last = qm_wq_pme | ||
119 | }; | ||
120 | |||
121 | /* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ | ||
122 | enum qm_memory { | ||
123 | qm_memory_fqd, | ||
124 | qm_memory_pfdr | ||
125 | }; | ||
126 | |||
127 | /* Used by all error interrupt registers except 'inhibit' */ | ||
128 | #define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ | ||
129 | #define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ | ||
130 | #define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ | ||
131 | #define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ | ||
132 | #define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ | ||
133 | #define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ | ||
134 | #define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ | ||
135 | #define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ | ||
136 | #define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ | ||
137 | #define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ | ||
138 | #define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ | ||
139 | #define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ | ||
140 | #define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ | ||
141 | #define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ | ||
142 | #define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ | ||
143 | #define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ | ||
144 | #define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ | ||
145 | #define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ | ||
146 | |||
147 | /* QMAN_ECIR valid error bit */ | ||
148 | #define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ | ||
149 | QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ | ||
150 | QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) | ||
151 | #define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ | ||
152 | QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ | ||
153 | QM_EIRQ_IFSI) | ||
154 | |||
155 | struct qm_ecir { | ||
156 | u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */ | ||
157 | }; | ||
158 | |||
159 | static bool qm_ecir_is_dcp(const struct qm_ecir *p) | ||
160 | { | ||
161 | return p->info & BIT(29); | ||
162 | } | ||
163 | |||
164 | static int qm_ecir_get_pnum(const struct qm_ecir *p) | ||
165 | { | ||
166 | return (p->info >> 24) & 0x1f; | ||
167 | } | ||
168 | |||
169 | static int qm_ecir_get_fqid(const struct qm_ecir *p) | ||
170 | { | ||
171 | return p->info & (BIT(24) - 1); | ||
172 | } | ||
173 | |||
174 | struct qm_ecir2 { | ||
175 | u32 info; /* ptyp[31], res[10-30], pnum[0-9] */ | ||
176 | }; | ||
177 | |||
178 | static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p) | ||
179 | { | ||
180 | return p->info & BIT(31); | ||
181 | } | ||
182 | |||
183 | static int qm_ecir2_get_pnum(const struct qm_ecir2 *p) | ||
184 | { | ||
185 | return p->info & (BIT(10) - 1); | ||
186 | } | ||
187 | |||
188 | struct qm_eadr { | ||
189 | u32 info; /* memid[24-27], eadr[0-11] */ | ||
190 | /* v3: memid[24-28], eadr[0-15] */ | ||
191 | }; | ||
192 | |||
193 | static int qm_eadr_get_memid(const struct qm_eadr *p) | ||
194 | { | ||
195 | return (p->info >> 24) & 0xf; | ||
196 | } | ||
197 | |||
198 | static int qm_eadr_get_eadr(const struct qm_eadr *p) | ||
199 | { | ||
200 | return p->info & (BIT(12) - 1); | ||
201 | } | ||
202 | |||
203 | static int qm_eadr_v3_get_memid(const struct qm_eadr *p) | ||
204 | { | ||
205 | return (p->info >> 24) & 0x1f; | ||
206 | } | ||
207 | |||
208 | static int qm_eadr_v3_get_eadr(const struct qm_eadr *p) | ||
209 | { | ||
210 | return p->info & (BIT(16) - 1); | ||
211 | } | ||
212 | |||
213 | struct qman_hwerr_txt { | ||
214 | u32 mask; | ||
215 | const char *txt; | ||
216 | }; | ||
217 | |||
218 | |||
219 | static const struct qman_hwerr_txt qman_hwerr_txts[] = { | ||
220 | { QM_EIRQ_CIDE, "Corenet Initiator Data Error" }, | ||
221 | { QM_EIRQ_CTDE, "Corenet Target Data Error" }, | ||
222 | { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" }, | ||
223 | { QM_EIRQ_PLWI, "PFDR Low Watermark" }, | ||
224 | { QM_EIRQ_MBEI, "Multi-bit ECC Error" }, | ||
225 | { QM_EIRQ_SBEI, "Single-bit ECC Error" }, | ||
226 | { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" }, | ||
227 | { QM_EIRQ_ICVI, "Invalid Command Verb" }, | ||
228 | { QM_EIRQ_IFSI, "Invalid Flow Control State" }, | ||
229 | { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" }, | ||
230 | { QM_EIRQ_IDFI, "Invalid Dequeue FQ" }, | ||
231 | { QM_EIRQ_IDSI, "Invalid Dequeue Source" }, | ||
232 | { QM_EIRQ_IDQI, "Invalid Dequeue Queue" }, | ||
233 | { QM_EIRQ_IECE, "Invalid Enqueue Configuration" }, | ||
234 | { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" }, | ||
235 | { QM_EIRQ_IESI, "Invalid Enqueue State" }, | ||
236 | { QM_EIRQ_IECI, "Invalid Enqueue Channel" }, | ||
237 | { QM_EIRQ_IEQI, "Invalid Enqueue Queue" }, | ||
238 | }; | ||
239 | |||
240 | struct qman_error_info_mdata { | ||
241 | u16 addr_mask; | ||
242 | u16 bits; | ||
243 | const char *txt; | ||
244 | }; | ||
245 | |||
246 | static const struct qman_error_info_mdata error_mdata[] = { | ||
247 | { 0x01FF, 24, "FQD cache tag memory 0" }, | ||
248 | { 0x01FF, 24, "FQD cache tag memory 1" }, | ||
249 | { 0x01FF, 24, "FQD cache tag memory 2" }, | ||
250 | { 0x01FF, 24, "FQD cache tag memory 3" }, | ||
251 | { 0x0FFF, 512, "FQD cache memory" }, | ||
252 | { 0x07FF, 128, "SFDR memory" }, | ||
253 | { 0x01FF, 72, "WQ context memory" }, | ||
254 | { 0x00FF, 240, "CGR memory" }, | ||
255 | { 0x00FF, 302, "Internal Order Restoration List memory" }, | ||
256 | { 0x01FF, 256, "SW portal ring memory" }, | ||
257 | }; | ||
258 | |||
259 | #define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) | ||
260 | |||
261 | /* | ||
262 | * TODO: unimplemented registers | ||
263 | * | ||
264 | * Keeping a list here of QMan registers I have not yet covered; | ||
265 | * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, | ||
266 | * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, | ||
267 | * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 | ||
268 | */ | ||
269 | |||
270 | /* Pointer to the start of the QMan's CCSR space */ | ||
271 | static u32 __iomem *qm_ccsr_start; | ||
272 | /* A SDQCR mask comprising all the available/visible pool channels */ | ||
273 | static u32 qm_pools_sdqcr; | ||
274 | |||
275 | static inline u32 qm_ccsr_in(u32 offset) | ||
276 | { | ||
277 | return ioread32be(qm_ccsr_start + offset/4); | ||
278 | } | ||
279 | |||
280 | static inline void qm_ccsr_out(u32 offset, u32 val) | ||
281 | { | ||
282 | iowrite32be(val, qm_ccsr_start + offset/4); | ||
283 | } | ||
284 | |||
285 | u32 qm_get_pools_sdqcr(void) | ||
286 | { | ||
287 | return qm_pools_sdqcr; | ||
288 | } | ||
289 | |||
290 | enum qm_dc_portal { | ||
291 | qm_dc_portal_fman0 = 0, | ||
292 | qm_dc_portal_fman1 = 1 | ||
293 | }; | ||
294 | |||
295 | static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd) | ||
296 | { | ||
297 | DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 || | ||
298 | portal == qm_dc_portal_fman1); | ||
299 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | ||
300 | qm_ccsr_out(REG_DCP_CFG(portal), | ||
301 | (ed ? 0x1000 : 0) | (sernd & 0x3ff)); | ||
302 | else | ||
303 | qm_ccsr_out(REG_DCP_CFG(portal), | ||
304 | (ed ? 0x100 : 0) | (sernd & 0x1f)); | ||
305 | } | ||
306 | |||
307 | static void qm_set_wq_scheduling(enum qm_wq_class wq_class, | ||
308 | u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, | ||
309 | u8 csw5, u8 csw6, u8 csw7) | ||
310 | { | ||
311 | qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | | ||
312 | ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | | ||
313 | ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | | ||
314 | ((csw6 & 0x7) << 4) | (csw7 & 0x7)); | ||
315 | } | ||
316 | |||
317 | static void qm_set_hid(void) | ||
318 | { | ||
319 | qm_ccsr_out(REG_HID_CFG, 0); | ||
320 | } | ||
321 | |||
322 | static void qm_set_corenet_initiator(void) | ||
323 | { | ||
324 | qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN | | ||
325 | (QM_CI_SCHED_CFG_SRCCIV << 24) | | ||
326 | (QM_CI_SCHED_CFG_SRQ_W << 8) | | ||
327 | (QM_CI_SCHED_CFG_RW_W << 4) | | ||
328 | QM_CI_SCHED_CFG_BMAN_W); | ||
329 | } | ||
330 | |||
331 | static void qm_get_version(u16 *id, u8 *major, u8 *minor) | ||
332 | { | ||
333 | u32 v = qm_ccsr_in(REG_IP_REV_1); | ||
334 | *id = (v >> 16); | ||
335 | *major = (v >> 8) & 0xff; | ||
336 | *minor = v & 0xff; | ||
337 | } | ||
338 | |||
339 | #define PFDR_AR_EN BIT(31) | ||
340 | static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) | ||
341 | { | ||
342 | u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; | ||
343 | u32 exp = ilog2(size); | ||
344 | |||
345 | /* choke if size isn't within range */ | ||
346 | DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && | ||
347 | is_power_of_2(size)); | ||
348 | /* choke if 'ba' has lower-alignment than 'size' */ | ||
349 | DPAA_ASSERT(!(ba & (size - 1))); | ||
350 | qm_ccsr_out(offset, upper_32_bits(ba)); | ||
351 | qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); | ||
352 | qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); | ||
353 | } | ||
354 | |||
355 | static void qm_set_pfdr_threshold(u32 th, u8 k) | ||
356 | { | ||
357 | qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff); | ||
358 | qm_ccsr_out(REG_PFDR_CFG, k); | ||
359 | } | ||
360 | |||
361 | static void qm_set_sfdr_threshold(u16 th) | ||
362 | { | ||
363 | qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff); | ||
364 | } | ||
365 | |||
366 | static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) | ||
367 | { | ||
368 | u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); | ||
369 | |||
370 | DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); | ||
371 | /* Make sure the command interface is 'idle' */ | ||
372 | if (!MCR_rslt_idle(rslt)) { | ||
373 | dev_crit(dev, "QMAN_MCR isn't idle"); | ||
374 | WARN_ON(1); | ||
375 | } | ||
376 | |||
377 | /* Write the MCR command params then the verb */ | ||
378 | qm_ccsr_out(REG_MCP(0), pfdr_start); | ||
379 | /* | ||
380 | * TODO: remove this - it's a workaround for a model bug that is | ||
381 | * corrected in more recent versions. We use the workaround until | ||
382 | * everyone has upgraded. | ||
383 | */ | ||
384 | qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16); | ||
385 | dma_wmb(); | ||
386 | qm_ccsr_out(REG_MCR, MCR_INIT_PFDR); | ||
387 | /* Poll for the result */ | ||
388 | do { | ||
389 | rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); | ||
390 | } while (!MCR_rslt_idle(rslt)); | ||
391 | if (MCR_rslt_ok(rslt)) | ||
392 | return 0; | ||
393 | if (MCR_rslt_eaccess(rslt)) | ||
394 | return -EACCES; | ||
395 | if (MCR_rslt_inval(rslt)) | ||
396 | return -EINVAL; | ||
397 | dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); | ||
398 | return -ENODEV; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Ideally we would use the DMA API to turn rmem->base into a DMA address | ||
403 | * (especially if iommu translations ever get involved). Unfortunately, the | ||
404 | * DMA API currently does not allow mapping anything that is not backed with | ||
405 | * a struct page. | ||
406 | */ | ||
407 | static dma_addr_t fqd_a, pfdr_a; | ||
408 | static size_t fqd_sz, pfdr_sz; | ||
409 | |||
410 | static int qman_fqd(struct reserved_mem *rmem) | ||
411 | { | ||
412 | fqd_a = rmem->base; | ||
413 | fqd_sz = rmem->size; | ||
414 | |||
415 | WARN_ON(!(fqd_a && fqd_sz)); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); | ||
420 | |||
421 | static int qman_pfdr(struct reserved_mem *rmem) | ||
422 | { | ||
423 | pfdr_a = rmem->base; | ||
424 | pfdr_sz = rmem->size; | ||
425 | |||
426 | WARN_ON(!(pfdr_a && pfdr_sz)); | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); | ||
431 | |||
432 | static unsigned int qm_get_fqid_maxcnt(void) | ||
433 | { | ||
434 | return fqd_sz / 64; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Flush this memory range from data cache so that QMAN originated | ||
439 | * transactions for this memory region could be marked non-coherent. | ||
440 | */ | ||
441 | static int zero_priv_mem(struct device *dev, struct device_node *node, | ||
442 | phys_addr_t addr, size_t sz) | ||
443 | { | ||
444 | /* map as cacheable, non-guarded */ | ||
445 | void __iomem *tmpp = ioremap_prot(addr, sz, 0); | ||
446 | |||
447 | memset_io(tmpp, 0, sz); | ||
448 | flush_dcache_range((unsigned long)tmpp, | ||
449 | (unsigned long)tmpp + sz); | ||
450 | iounmap(tmpp); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static void log_edata_bits(struct device *dev, u32 bit_count) | ||
456 | { | ||
457 | u32 i, j, mask = 0xffffffff; | ||
458 | |||
459 | dev_warn(dev, "ErrInt, EDATA:\n"); | ||
460 | i = bit_count / 32; | ||
461 | if (bit_count % 32) { | ||
462 | i++; | ||
463 | mask = ~(mask << bit_count % 32); | ||
464 | } | ||
465 | j = 16 - i; | ||
466 | dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask); | ||
467 | j++; | ||
468 | for (; j < 16; j++) | ||
469 | dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j))); | ||
470 | } | ||
471 | |||
472 | static void log_additional_error_info(struct device *dev, u32 isr_val, | ||
473 | u32 ecsr_val) | ||
474 | { | ||
475 | struct qm_ecir ecir_val; | ||
476 | struct qm_eadr eadr_val; | ||
477 | int memid; | ||
478 | |||
479 | ecir_val.info = qm_ccsr_in(REG_ECIR); | ||
480 | /* Is portal info valid */ | ||
481 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { | ||
482 | struct qm_ecir2 ecir2_val; | ||
483 | |||
484 | ecir2_val.info = qm_ccsr_in(REG_ECIR2); | ||
485 | if (ecsr_val & PORTAL_ECSR_ERR) { | ||
486 | dev_warn(dev, "ErrInt: %s id %d\n", | ||
487 | qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP", | ||
488 | qm_ecir2_get_pnum(&ecir2_val)); | ||
489 | } | ||
490 | if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) | ||
491 | dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", | ||
492 | qm_ecir_get_fqid(&ecir_val)); | ||
493 | |||
494 | if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { | ||
495 | eadr_val.info = qm_ccsr_in(REG_EADR); | ||
496 | memid = qm_eadr_v3_get_memid(&eadr_val); | ||
497 | dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", | ||
498 | error_mdata[memid].txt, | ||
499 | error_mdata[memid].addr_mask | ||
500 | & qm_eadr_v3_get_eadr(&eadr_val)); | ||
501 | log_edata_bits(dev, error_mdata[memid].bits); | ||
502 | } | ||
503 | } else { | ||
504 | if (ecsr_val & PORTAL_ECSR_ERR) { | ||
505 | dev_warn(dev, "ErrInt: %s id %d\n", | ||
506 | qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP", | ||
507 | qm_ecir_get_pnum(&ecir_val)); | ||
508 | } | ||
509 | if (ecsr_val & FQID_ECSR_ERR) | ||
510 | dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", | ||
511 | qm_ecir_get_fqid(&ecir_val)); | ||
512 | |||
513 | if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { | ||
514 | eadr_val.info = qm_ccsr_in(REG_EADR); | ||
515 | memid = qm_eadr_get_memid(&eadr_val); | ||
516 | dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", | ||
517 | error_mdata[memid].txt, | ||
518 | error_mdata[memid].addr_mask | ||
519 | & qm_eadr_get_eadr(&eadr_val)); | ||
520 | log_edata_bits(dev, error_mdata[memid].bits); | ||
521 | } | ||
522 | } | ||
523 | } | ||
524 | |||
525 | static irqreturn_t qman_isr(int irq, void *ptr) | ||
526 | { | ||
527 | u32 isr_val, ier_val, ecsr_val, isr_mask, i; | ||
528 | struct device *dev = ptr; | ||
529 | |||
530 | ier_val = qm_ccsr_in(REG_ERR_IER); | ||
531 | isr_val = qm_ccsr_in(REG_ERR_ISR); | ||
532 | ecsr_val = qm_ccsr_in(REG_ECSR); | ||
533 | isr_mask = isr_val & ier_val; | ||
534 | |||
535 | if (!isr_mask) | ||
536 | return IRQ_NONE; | ||
537 | |||
538 | for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) { | ||
539 | if (qman_hwerr_txts[i].mask & isr_mask) { | ||
540 | dev_err_ratelimited(dev, "ErrInt: %s\n", | ||
541 | qman_hwerr_txts[i].txt); | ||
542 | if (qman_hwerr_txts[i].mask & ecsr_val) { | ||
543 | log_additional_error_info(dev, isr_mask, | ||
544 | ecsr_val); | ||
545 | /* Re-arm error capture registers */ | ||
546 | qm_ccsr_out(REG_ECSR, ecsr_val); | ||
547 | } | ||
548 | if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) { | ||
549 | dev_dbg(dev, "Disabling error 0x%x\n", | ||
550 | qman_hwerr_txts[i].mask); | ||
551 | ier_val &= ~qman_hwerr_txts[i].mask; | ||
552 | qm_ccsr_out(REG_ERR_IER, ier_val); | ||
553 | } | ||
554 | } | ||
555 | } | ||
556 | qm_ccsr_out(REG_ERR_ISR, isr_val); | ||
557 | |||
558 | return IRQ_HANDLED; | ||
559 | } | ||
560 | |||
561 | static int qman_init_ccsr(struct device *dev) | ||
562 | { | ||
563 | int i, err; | ||
564 | |||
565 | /* FQD memory */ | ||
566 | qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); | ||
567 | /* PFDR memory */ | ||
568 | qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); | ||
569 | err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); | ||
570 | if (err) | ||
571 | return err; | ||
572 | /* thresholds */ | ||
573 | qm_set_pfdr_threshold(512, 64); | ||
574 | qm_set_sfdr_threshold(128); | ||
575 | /* clear stale PEBI bit from interrupt status register */ | ||
576 | qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI); | ||
577 | /* corenet initiator settings */ | ||
578 | qm_set_corenet_initiator(); | ||
579 | /* HID settings */ | ||
580 | qm_set_hid(); | ||
581 | /* Set scheduling weights to defaults */ | ||
582 | for (i = qm_wq_first; i <= qm_wq_last; i++) | ||
583 | qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0); | ||
584 | /* We are not prepared to accept ERNs for hardware enqueues */ | ||
585 | qm_set_dc(qm_dc_portal_fman0, 1, 0); | ||
586 | qm_set_dc(qm_dc_portal_fman1, 1, 0); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | #define LIO_CFG_LIODN_MASK 0x0fff0000 | ||
591 | void qman_liodn_fixup(u16 channel) | ||
592 | { | ||
593 | static int done; | ||
594 | static u32 liodn_offset; | ||
595 | u32 before, after; | ||
596 | int idx = channel - QM_CHANNEL_SWPORTAL0; | ||
597 | |||
598 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | ||
599 | before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx)); | ||
600 | else | ||
601 | before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx)); | ||
602 | if (!done) { | ||
603 | liodn_offset = before & LIO_CFG_LIODN_MASK; | ||
604 | done = 1; | ||
605 | return; | ||
606 | } | ||
607 | after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; | ||
608 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | ||
609 | qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after); | ||
610 | else | ||
611 | qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after); | ||
612 | } | ||
613 | |||
614 | #define IO_CFG_SDEST_MASK 0x00ff0000 | ||
615 | void qman_set_sdest(u16 channel, unsigned int cpu_idx) | ||
616 | { | ||
617 | int idx = channel - QM_CHANNEL_SWPORTAL0; | ||
618 | u32 before, after; | ||
619 | |||
620 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { | ||
621 | before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx)); | ||
622 | /* Each pair of vcpu share the same SRQ(SDEST) */ | ||
623 | cpu_idx /= 2; | ||
624 | after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); | ||
625 | qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after); | ||
626 | } else { | ||
627 | before = qm_ccsr_in(REG_QCSP_IO_CFG(idx)); | ||
628 | after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); | ||
629 | qm_ccsr_out(REG_QCSP_IO_CFG(idx), after); | ||
630 | } | ||
631 | } | ||
632 | |||
633 | static int qman_resource_init(struct device *dev) | ||
634 | { | ||
635 | int pool_chan_num, cgrid_num; | ||
636 | int ret, i; | ||
637 | |||
638 | switch (qman_ip_rev >> 8) { | ||
639 | case 1: | ||
640 | pool_chan_num = 15; | ||
641 | cgrid_num = 256; | ||
642 | break; | ||
643 | case 2: | ||
644 | pool_chan_num = 3; | ||
645 | cgrid_num = 64; | ||
646 | break; | ||
647 | case 3: | ||
648 | pool_chan_num = 15; | ||
649 | cgrid_num = 256; | ||
650 | break; | ||
651 | default: | ||
652 | return -ENODEV; | ||
653 | } | ||
654 | |||
655 | ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF, | ||
656 | pool_chan_num, -1); | ||
657 | if (ret) { | ||
658 | dev_err(dev, "Failed to seed pool channels (%d)\n", ret); | ||
659 | return ret; | ||
660 | } | ||
661 | |||
662 | ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1); | ||
663 | if (ret) { | ||
664 | dev_err(dev, "Failed to seed CGRID range (%d)\n", ret); | ||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | /* parse pool channels into the SDQCR mask */ | ||
669 | for (i = 0; i < cgrid_num; i++) | ||
670 | qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i); | ||
671 | |||
672 | ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF, | ||
673 | qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1); | ||
674 | if (ret) { | ||
675 | dev_err(dev, "Failed to seed FQID range (%d)\n", ret); | ||
676 | return ret; | ||
677 | } | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int fsl_qman_probe(struct platform_device *pdev) | ||
683 | { | ||
684 | struct device *dev = &pdev->dev; | ||
685 | struct device_node *node = dev->of_node; | ||
686 | struct resource *res; | ||
687 | int ret, err_irq; | ||
688 | u16 id; | ||
689 | u8 major, minor; | ||
690 | |||
691 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
692 | if (!res) { | ||
693 | dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", | ||
694 | node->full_name); | ||
695 | return -ENXIO; | ||
696 | } | ||
697 | qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); | ||
698 | if (!qm_ccsr_start) | ||
699 | return -ENXIO; | ||
700 | |||
701 | qm_get_version(&id, &major, &minor); | ||
702 | if (major == 1 && minor == 0) { | ||
703 | dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n"); | ||
704 | return -ENODEV; | ||
705 | } else if (major == 1 && minor == 1) | ||
706 | qman_ip_rev = QMAN_REV11; | ||
707 | else if (major == 1 && minor == 2) | ||
708 | qman_ip_rev = QMAN_REV12; | ||
709 | else if (major == 2 && minor == 0) | ||
710 | qman_ip_rev = QMAN_REV20; | ||
711 | else if (major == 3 && minor == 0) | ||
712 | qman_ip_rev = QMAN_REV30; | ||
713 | else if (major == 3 && minor == 1) | ||
714 | qman_ip_rev = QMAN_REV31; | ||
715 | else { | ||
716 | dev_err(dev, "Unknown QMan version\n"); | ||
717 | return -ENODEV; | ||
718 | } | ||
719 | |||
720 | if ((qman_ip_rev & 0xff00) >= QMAN_REV30) | ||
721 | qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; | ||
722 | |||
723 | ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); | ||
724 | WARN_ON(ret); | ||
725 | if (ret) | ||
726 | return -ENODEV; | ||
727 | |||
728 | ret = qman_init_ccsr(dev); | ||
729 | if (ret) { | ||
730 | dev_err(dev, "CCSR setup failed\n"); | ||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | err_irq = platform_get_irq(pdev, 0); | ||
735 | if (err_irq <= 0) { | ||
736 | dev_info(dev, "Can't get %s property 'interrupts'\n", | ||
737 | node->full_name); | ||
738 | return -ENODEV; | ||
739 | } | ||
740 | ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err", | ||
741 | dev); | ||
742 | if (ret) { | ||
743 | dev_err(dev, "devm_request_irq() failed %d for '%s'\n", | ||
744 | ret, node->full_name); | ||
745 | return ret; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * Write-to-clear any stale bits, (eg. starvation being asserted prior | ||
750 | * to resource allocation during driver init). | ||
751 | */ | ||
752 | qm_ccsr_out(REG_ERR_ISR, 0xffffffff); | ||
753 | /* Enable Error Interrupts */ | ||
754 | qm_ccsr_out(REG_ERR_IER, 0xffffffff); | ||
755 | |||
756 | qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc"); | ||
757 | if (IS_ERR(qm_fqalloc)) { | ||
758 | ret = PTR_ERR(qm_fqalloc); | ||
759 | dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret); | ||
760 | return ret; | ||
761 | } | ||
762 | |||
763 | qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc"); | ||
764 | if (IS_ERR(qm_qpalloc)) { | ||
765 | ret = PTR_ERR(qm_qpalloc); | ||
766 | dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret); | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc"); | ||
771 | if (IS_ERR(qm_cgralloc)) { | ||
772 | ret = PTR_ERR(qm_cgralloc); | ||
773 | dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret); | ||
774 | return ret; | ||
775 | } | ||
776 | |||
777 | ret = qman_resource_init(dev); | ||
778 | if (ret) | ||
779 | return ret; | ||
780 | |||
781 | ret = qman_alloc_fq_table(qm_get_fqid_maxcnt()); | ||
782 | if (ret) | ||
783 | return ret; | ||
784 | |||
785 | ret = qman_wq_alloc(); | ||
786 | if (ret) | ||
787 | return ret; | ||
788 | |||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | static const struct of_device_id fsl_qman_ids[] = { | ||
793 | { | ||
794 | .compatible = "fsl,qman", | ||
795 | }, | ||
796 | {} | ||
797 | }; | ||
798 | |||
799 | static struct platform_driver fsl_qman_driver = { | ||
800 | .driver = { | ||
801 | .name = KBUILD_MODNAME, | ||
802 | .of_match_table = fsl_qman_ids, | ||
803 | .suppress_bind_attrs = true, | ||
804 | }, | ||
805 | .probe = fsl_qman_probe, | ||
806 | }; | ||
807 | |||
808 | builtin_platform_driver(fsl_qman_driver); | ||
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c new file mode 100644 index 000000000000..148614388fca --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_portal.c | |||
@@ -0,0 +1,355 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_priv.h" | ||
32 | |||
33 | /* Enable portal interupts (as opposed to polling mode) */ | ||
34 | #define CONFIG_FSL_DPA_PIRQ_SLOW 1 | ||
35 | #define CONFIG_FSL_DPA_PIRQ_FAST 1 | ||
36 | |||
37 | static struct cpumask portal_cpus; | ||
38 | /* protect qman global registers and global data shared among portals */ | ||
39 | static DEFINE_SPINLOCK(qman_lock); | ||
40 | |||
41 | static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) | ||
42 | { | ||
43 | #ifdef CONFIG_FSL_PAMU | ||
44 | struct device *dev = pcfg->dev; | ||
45 | int window_count = 1; | ||
46 | struct iommu_domain_geometry geom_attr; | ||
47 | struct pamu_stash_attribute stash_attr; | ||
48 | int ret; | ||
49 | |||
50 | pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); | ||
51 | if (!pcfg->iommu_domain) { | ||
52 | dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); | ||
53 | goto no_iommu; | ||
54 | } | ||
55 | geom_attr.aperture_start = 0; | ||
56 | geom_attr.aperture_end = | ||
57 | ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; | ||
58 | geom_attr.force_aperture = true; | ||
59 | ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, | ||
60 | &geom_attr); | ||
61 | if (ret < 0) { | ||
62 | dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, | ||
63 | ret); | ||
64 | goto out_domain_free; | ||
65 | } | ||
66 | ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, | ||
67 | &window_count); | ||
68 | if (ret < 0) { | ||
69 | dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, | ||
70 | ret); | ||
71 | goto out_domain_free; | ||
72 | } | ||
73 | stash_attr.cpu = cpu; | ||
74 | stash_attr.cache = PAMU_ATTR_CACHE_L1; | ||
75 | ret = iommu_domain_set_attr(pcfg->iommu_domain, | ||
76 | DOMAIN_ATTR_FSL_PAMU_STASH, | ||
77 | &stash_attr); | ||
78 | if (ret < 0) { | ||
79 | dev_err(dev, "%s(): iommu_domain_set_attr() = %d", | ||
80 | __func__, ret); | ||
81 | goto out_domain_free; | ||
82 | } | ||
83 | ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, | ||
84 | IOMMU_READ | IOMMU_WRITE); | ||
85 | if (ret < 0) { | ||
86 | dev_err(dev, "%s(): iommu_domain_window_enable() = %d", | ||
87 | __func__, ret); | ||
88 | goto out_domain_free; | ||
89 | } | ||
90 | ret = iommu_attach_device(pcfg->iommu_domain, dev); | ||
91 | if (ret < 0) { | ||
92 | dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, | ||
93 | ret); | ||
94 | goto out_domain_free; | ||
95 | } | ||
96 | ret = iommu_domain_set_attr(pcfg->iommu_domain, | ||
97 | DOMAIN_ATTR_FSL_PAMU_ENABLE, | ||
98 | &window_count); | ||
99 | if (ret < 0) { | ||
100 | dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, | ||
101 | ret); | ||
102 | goto out_detach_device; | ||
103 | } | ||
104 | |||
105 | no_iommu: | ||
106 | #endif | ||
107 | qman_set_sdest(pcfg->channel, cpu); | ||
108 | |||
109 | return; | ||
110 | |||
111 | #ifdef CONFIG_FSL_PAMU | ||
112 | out_detach_device: | ||
113 | iommu_detach_device(pcfg->iommu_domain, NULL); | ||
114 | out_domain_free: | ||
115 | iommu_domain_free(pcfg->iommu_domain); | ||
116 | pcfg->iommu_domain = NULL; | ||
117 | #endif | ||
118 | } | ||
119 | |||
120 | static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) | ||
121 | { | ||
122 | struct qman_portal *p; | ||
123 | u32 irq_sources = 0; | ||
124 | |||
125 | /* We need the same LIODN offset for all portals */ | ||
126 | qman_liodn_fixup(pcfg->channel); | ||
127 | |||
128 | pcfg->iommu_domain = NULL; | ||
129 | portal_set_cpu(pcfg, pcfg->cpu); | ||
130 | |||
131 | p = qman_create_affine_portal(pcfg, NULL); | ||
132 | if (!p) { | ||
133 | dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", | ||
134 | __func__, pcfg->cpu); | ||
135 | return NULL; | ||
136 | } | ||
137 | |||
138 | /* Determine what should be interrupt-vs-poll driven */ | ||
139 | #ifdef CONFIG_FSL_DPA_PIRQ_SLOW | ||
140 | irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | | ||
141 | QM_PIRQ_CSCI; | ||
142 | #endif | ||
143 | #ifdef CONFIG_FSL_DPA_PIRQ_FAST | ||
144 | irq_sources |= QM_PIRQ_DQRI; | ||
145 | #endif | ||
146 | qman_p_irqsource_add(p, irq_sources); | ||
147 | |||
148 | spin_lock(&qman_lock); | ||
149 | if (cpumask_equal(&portal_cpus, cpu_possible_mask)) { | ||
150 | /* all assigned portals are initialized now */ | ||
151 | qman_init_cgr_all(); | ||
152 | } | ||
153 | spin_unlock(&qman_lock); | ||
154 | |||
155 | dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); | ||
156 | |||
157 | return p; | ||
158 | } | ||
159 | |||
160 | static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, | ||
161 | unsigned int cpu) | ||
162 | { | ||
163 | #ifdef CONFIG_FSL_PAMU /* TODO */ | ||
164 | struct pamu_stash_attribute stash_attr; | ||
165 | int ret; | ||
166 | |||
167 | if (pcfg->iommu_domain) { | ||
168 | stash_attr.cpu = cpu; | ||
169 | stash_attr.cache = PAMU_ATTR_CACHE_L1; | ||
170 | ret = iommu_domain_set_attr(pcfg->iommu_domain, | ||
171 | DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); | ||
172 | if (ret < 0) { | ||
173 | dev_err(pcfg->dev, | ||
174 | "Failed to update pamu stash setting\n"); | ||
175 | return; | ||
176 | } | ||
177 | } | ||
178 | #endif | ||
179 | qman_set_sdest(pcfg->channel, cpu); | ||
180 | } | ||
181 | |||
182 | static void qman_offline_cpu(unsigned int cpu) | ||
183 | { | ||
184 | struct qman_portal *p; | ||
185 | const struct qm_portal_config *pcfg; | ||
186 | |||
187 | p = affine_portals[cpu]; | ||
188 | if (p) { | ||
189 | pcfg = qman_get_qm_portal_config(p); | ||
190 | if (pcfg) { | ||
191 | irq_set_affinity(pcfg->irq, cpumask_of(0)); | ||
192 | qman_portal_update_sdest(pcfg, 0); | ||
193 | } | ||
194 | } | ||
195 | } | ||
196 | |||
197 | static void qman_online_cpu(unsigned int cpu) | ||
198 | { | ||
199 | struct qman_portal *p; | ||
200 | const struct qm_portal_config *pcfg; | ||
201 | |||
202 | p = affine_portals[cpu]; | ||
203 | if (p) { | ||
204 | pcfg = qman_get_qm_portal_config(p); | ||
205 | if (pcfg) { | ||
206 | irq_set_affinity(pcfg->irq, cpumask_of(cpu)); | ||
207 | qman_portal_update_sdest(pcfg, cpu); | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static int qman_hotplug_cpu_callback(struct notifier_block *nfb, | ||
213 | unsigned long action, void *hcpu) | ||
214 | { | ||
215 | unsigned int cpu = (unsigned long)hcpu; | ||
216 | |||
217 | switch (action) { | ||
218 | case CPU_ONLINE: | ||
219 | case CPU_ONLINE_FROZEN: | ||
220 | qman_online_cpu(cpu); | ||
221 | break; | ||
222 | case CPU_DOWN_PREPARE: | ||
223 | case CPU_DOWN_PREPARE_FROZEN: | ||
224 | qman_offline_cpu(cpu); | ||
225 | default: | ||
226 | break; | ||
227 | } | ||
228 | return NOTIFY_OK; | ||
229 | } | ||
230 | |||
231 | static struct notifier_block qman_hotplug_cpu_notifier = { | ||
232 | .notifier_call = qman_hotplug_cpu_callback, | ||
233 | }; | ||
234 | |||
235 | static int qman_portal_probe(struct platform_device *pdev) | ||
236 | { | ||
237 | struct device *dev = &pdev->dev; | ||
238 | struct device_node *node = dev->of_node; | ||
239 | struct qm_portal_config *pcfg; | ||
240 | struct resource *addr_phys[2]; | ||
241 | const u32 *channel; | ||
242 | void __iomem *va; | ||
243 | int irq, len, cpu; | ||
244 | |||
245 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); | ||
246 | if (!pcfg) | ||
247 | return -ENOMEM; | ||
248 | |||
249 | pcfg->dev = dev; | ||
250 | |||
251 | addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, | ||
252 | DPAA_PORTAL_CE); | ||
253 | if (!addr_phys[0]) { | ||
254 | dev_err(dev, "Can't get %s property 'reg::CE'\n", | ||
255 | node->full_name); | ||
256 | return -ENXIO; | ||
257 | } | ||
258 | |||
259 | addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, | ||
260 | DPAA_PORTAL_CI); | ||
261 | if (!addr_phys[1]) { | ||
262 | dev_err(dev, "Can't get %s property 'reg::CI'\n", | ||
263 | node->full_name); | ||
264 | return -ENXIO; | ||
265 | } | ||
266 | |||
267 | channel = of_get_property(node, "cell-index", &len); | ||
268 | if (!channel || (len != 4)) { | ||
269 | dev_err(dev, "Can't get %s property 'cell-index'\n", | ||
270 | node->full_name); | ||
271 | return -ENXIO; | ||
272 | } | ||
273 | pcfg->channel = *channel; | ||
274 | pcfg->cpu = -1; | ||
275 | irq = platform_get_irq(pdev, 0); | ||
276 | if (irq <= 0) { | ||
277 | dev_err(dev, "Can't get %s IRQ\n", node->full_name); | ||
278 | return -ENXIO; | ||
279 | } | ||
280 | pcfg->irq = irq; | ||
281 | |||
282 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | ||
283 | if (!va) | ||
284 | goto err_ioremap1; | ||
285 | |||
286 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | ||
287 | |||
288 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | ||
289 | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
290 | if (!va) | ||
291 | goto err_ioremap2; | ||
292 | |||
293 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | ||
294 | |||
295 | pcfg->pools = qm_get_pools_sdqcr(); | ||
296 | |||
297 | spin_lock(&qman_lock); | ||
298 | cpu = cpumask_next_zero(-1, &portal_cpus); | ||
299 | if (cpu >= nr_cpu_ids) { | ||
300 | /* unassigned portal, skip init */ | ||
301 | spin_unlock(&qman_lock); | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | cpumask_set_cpu(cpu, &portal_cpus); | ||
306 | spin_unlock(&qman_lock); | ||
307 | pcfg->cpu = cpu; | ||
308 | |||
309 | if (!init_pcfg(pcfg)) | ||
310 | goto err_ioremap2; | ||
311 | |||
312 | /* clear irq affinity if assigned cpu is offline */ | ||
313 | if (!cpu_online(cpu)) | ||
314 | qman_offline_cpu(cpu); | ||
315 | |||
316 | return 0; | ||
317 | |||
318 | err_ioremap2: | ||
319 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | ||
320 | err_ioremap1: | ||
321 | dev_err(dev, "ioremap failed\n"); | ||
322 | return -ENXIO; | ||
323 | } | ||
324 | |||
325 | static const struct of_device_id qman_portal_ids[] = { | ||
326 | { | ||
327 | .compatible = "fsl,qman-portal", | ||
328 | }, | ||
329 | {} | ||
330 | }; | ||
331 | MODULE_DEVICE_TABLE(of, qman_portal_ids); | ||
332 | |||
333 | static struct platform_driver qman_portal_driver = { | ||
334 | .driver = { | ||
335 | .name = KBUILD_MODNAME, | ||
336 | .of_match_table = qman_portal_ids, | ||
337 | }, | ||
338 | .probe = qman_portal_probe, | ||
339 | }; | ||
340 | |||
341 | static int __init qman_portal_driver_register(struct platform_driver *drv) | ||
342 | { | ||
343 | int ret; | ||
344 | |||
345 | ret = platform_driver_register(drv); | ||
346 | if (ret < 0) | ||
347 | return ret; | ||
348 | |||
349 | register_hotcpu_notifier(&qman_hotplug_cpu_notifier); | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | module_driver(qman_portal_driver, | ||
355 | qman_portal_driver_register, platform_driver_unregister); | ||
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h new file mode 100644 index 000000000000..5cf821e623a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_priv.h | |||
@@ -0,0 +1,371 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
32 | |||
33 | #include "dpaa_sys.h" | ||
34 | |||
35 | #include <soc/fsl/qman.h> | ||
36 | #include <linux/iommu.h> | ||
37 | |||
38 | #if defined(CONFIG_FSL_PAMU) | ||
39 | #include <asm/fsl_pamu_stash.h> | ||
40 | #endif | ||
41 | |||
42 | struct qm_mcr_querywq { | ||
43 | u8 verb; | ||
44 | u8 result; | ||
45 | u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ | ||
46 | u8 __reserved[28]; | ||
47 | u32 wq_len[8]; | ||
48 | } __packed; | ||
49 | |||
50 | static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) | ||
51 | { | ||
52 | return wq->channel_wq >> 3; | ||
53 | } | ||
54 | |||
55 | struct __qm_mcr_querycongestion { | ||
56 | u32 state[8]; | ||
57 | }; | ||
58 | |||
59 | /* "Query Congestion Group State" */ | ||
60 | struct qm_mcr_querycongestion { | ||
61 | u8 verb; | ||
62 | u8 result; | ||
63 | u8 __reserved[30]; | ||
64 | /* Access this struct using qman_cgrs_get() */ | ||
65 | struct __qm_mcr_querycongestion state; | ||
66 | } __packed; | ||
67 | |||
68 | /* "Query CGR" */ | ||
69 | struct qm_mcr_querycgr { | ||
70 | u8 verb; | ||
71 | u8 result; | ||
72 | u16 __reserved1; | ||
73 | struct __qm_mc_cgr cgr; /* CGR fields */ | ||
74 | u8 __reserved2[6]; | ||
75 | u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ | ||
76 | u32 i_bcnt_lo; /* low 32-bits of 40-bit */ | ||
77 | u8 __reserved3[3]; | ||
78 | u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ | ||
79 | u32 a_bcnt_lo; /* low 32-bits of 40-bit */ | ||
80 | u32 cscn_targ_swp[4]; | ||
81 | } __packed; | ||
82 | |||
83 | static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) | ||
84 | { | ||
85 | return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; | ||
86 | } | ||
87 | static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) | ||
88 | { | ||
89 | return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; | ||
90 | } | ||
91 | |||
92 | /* "Query FQ Non-Programmable Fields" */ | ||
93 | struct qm_mcc_queryfq_np { | ||
94 | u8 _ncw_verb; | ||
95 | u8 __reserved1[3]; | ||
96 | u32 fqid; /* 24-bit */ | ||
97 | u8 __reserved2[56]; | ||
98 | } __packed; | ||
99 | |||
100 | struct qm_mcr_queryfq_np { | ||
101 | u8 verb; | ||
102 | u8 result; | ||
103 | u8 __reserved1; | ||
104 | u8 state; /* QM_MCR_NP_STATE_*** */ | ||
105 | u32 fqd_link; /* 24-bit, _res2[24-31] */ | ||
106 | u16 odp_seq; /* 14-bit, _res3[14-15] */ | ||
107 | u16 orp_nesn; /* 14-bit, _res4[14-15] */ | ||
108 | u16 orp_ea_hseq; /* 15-bit, _res5[15] */ | ||
109 | u16 orp_ea_tseq; /* 15-bit, _res6[15] */ | ||
110 | u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */ | ||
111 | u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */ | ||
112 | u32 pfdr_hptr; /* 24-bit, _res9[24-31] */ | ||
113 | u32 pfdr_tptr; /* 24-bit, _res10[24-31] */ | ||
114 | u8 __reserved2[5]; | ||
115 | u8 is; /* 1-bit, _res12[1-7] */ | ||
116 | u16 ics_surp; | ||
117 | u32 byte_cnt; | ||
118 | u32 frm_cnt; /* 24-bit, _res13[24-31] */ | ||
119 | u32 __reserved3; | ||
120 | u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ | ||
121 | u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ | ||
122 | u16 __reserved4; | ||
123 | u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ | ||
124 | u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ | ||
125 | u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ | ||
126 | } __packed; | ||
127 | |||
128 | #define QM_MCR_NP_STATE_FE 0x10 | ||
129 | #define QM_MCR_NP_STATE_R 0x08 | ||
130 | #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ | ||
131 | #define QM_MCR_NP_STATE_OOS 0x00 | ||
132 | #define QM_MCR_NP_STATE_RETIRED 0x01 | ||
133 | #define QM_MCR_NP_STATE_TEN_SCHED 0x02 | ||
134 | #define QM_MCR_NP_STATE_TRU_SCHED 0x03 | ||
135 | #define QM_MCR_NP_STATE_PARKED 0x04 | ||
136 | #define QM_MCR_NP_STATE_ACTIVE 0x05 | ||
137 | #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ | ||
138 | #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ | ||
139 | #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ | ||
140 | #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ | ||
141 | #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ | ||
142 | |||
143 | enum qm_mcr_queryfq_np_masks { | ||
144 | qm_mcr_fqd_link_mask = BIT(24)-1, | ||
145 | qm_mcr_odp_seq_mask = BIT(14)-1, | ||
146 | qm_mcr_orp_nesn_mask = BIT(14)-1, | ||
147 | qm_mcr_orp_ea_hseq_mask = BIT(15)-1, | ||
148 | qm_mcr_orp_ea_tseq_mask = BIT(15)-1, | ||
149 | qm_mcr_orp_ea_hptr_mask = BIT(24)-1, | ||
150 | qm_mcr_orp_ea_tptr_mask = BIT(24)-1, | ||
151 | qm_mcr_pfdr_hptr_mask = BIT(24)-1, | ||
152 | qm_mcr_pfdr_tptr_mask = BIT(24)-1, | ||
153 | qm_mcr_is_mask = BIT(1)-1, | ||
154 | qm_mcr_frm_cnt_mask = BIT(24)-1, | ||
155 | }; | ||
156 | #define qm_mcr_np_get(np, field) \ | ||
157 | ((np)->field & (qm_mcr_##field##_mask)) | ||
158 | |||
159 | /* Congestion Groups */ | ||
160 | |||
161 | /* | ||
162 | * This wrapper represents a bit-array for the state of the 256 QMan congestion | ||
163 | * groups. Is also used as a *mask* for congestion groups, eg. so we ignore | ||
164 | * those that don't concern us. We harness the structure and accessor details | ||
165 | * already used in the management command to query congestion groups. | ||
166 | */ | ||
167 | #define CGR_BITS_PER_WORD 5 | ||
168 | #define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD) | ||
169 | #define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f)) | ||
170 | #define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) | ||
171 | |||
172 | struct qman_cgrs { | ||
173 | struct __qm_mcr_querycongestion q; | ||
174 | }; | ||
175 | |||
176 | static inline void qman_cgrs_init(struct qman_cgrs *c) | ||
177 | { | ||
178 | memset(c, 0, sizeof(*c)); | ||
179 | } | ||
180 | |||
181 | static inline void qman_cgrs_fill(struct qman_cgrs *c) | ||
182 | { | ||
183 | memset(c, 0xff, sizeof(*c)); | ||
184 | } | ||
185 | |||
186 | static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) | ||
187 | { | ||
188 | return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); | ||
189 | } | ||
190 | |||
191 | static inline void qman_cgrs_cp(struct qman_cgrs *dest, | ||
192 | const struct qman_cgrs *src) | ||
193 | { | ||
194 | *dest = *src; | ||
195 | } | ||
196 | |||
197 | static inline void qman_cgrs_and(struct qman_cgrs *dest, | ||
198 | const struct qman_cgrs *a, const struct qman_cgrs *b) | ||
199 | { | ||
200 | int ret; | ||
201 | u32 *_d = dest->q.state; | ||
202 | const u32 *_a = a->q.state; | ||
203 | const u32 *_b = b->q.state; | ||
204 | |||
205 | for (ret = 0; ret < 8; ret++) | ||
206 | *_d++ = *_a++ & *_b++; | ||
207 | } | ||
208 | |||
209 | static inline void qman_cgrs_xor(struct qman_cgrs *dest, | ||
210 | const struct qman_cgrs *a, const struct qman_cgrs *b) | ||
211 | { | ||
212 | int ret; | ||
213 | u32 *_d = dest->q.state; | ||
214 | const u32 *_a = a->q.state; | ||
215 | const u32 *_b = b->q.state; | ||
216 | |||
217 | for (ret = 0; ret < 8; ret++) | ||
218 | *_d++ = *_a++ ^ *_b++; | ||
219 | } | ||
220 | |||
221 | void qman_init_cgr_all(void); | ||
222 | |||
223 | struct qm_portal_config { | ||
224 | /* | ||
225 | * Corenet portal addresses; | ||
226 | * [0]==cache-enabled, [1]==cache-inhibited. | ||
227 | */ | ||
228 | void __iomem *addr_virt[2]; | ||
229 | struct device *dev; | ||
230 | struct iommu_domain *iommu_domain; | ||
231 | /* Allow these to be joined in lists */ | ||
232 | struct list_head list; | ||
233 | /* User-visible portal configuration settings */ | ||
234 | /* portal is affined to this cpu */ | ||
235 | int cpu; | ||
236 | /* portal interrupt line */ | ||
237 | int irq; | ||
238 | /* | ||
239 | * the portal's dedicated channel id, used initialising | ||
240 | * frame queues to target this portal when scheduled | ||
241 | */ | ||
242 | u16 channel; | ||
243 | /* | ||
244 | * mask of pool channels this portal has dequeue access to | ||
245 | * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) | ||
246 | */ | ||
247 | u32 pools; | ||
248 | }; | ||
249 | |||
250 | /* Revision info (for errata and feature handling) */ | ||
251 | #define QMAN_REV11 0x0101 | ||
252 | #define QMAN_REV12 0x0102 | ||
253 | #define QMAN_REV20 0x0200 | ||
254 | #define QMAN_REV30 0x0300 | ||
255 | #define QMAN_REV31 0x0301 | ||
256 | extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ | ||
257 | |||
258 | #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ | ||
259 | extern struct gen_pool *qm_fqalloc; /* FQID allocator */ | ||
260 | extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | ||
261 | extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ | ||
262 | u32 qm_get_pools_sdqcr(void); | ||
263 | |||
264 | int qman_wq_alloc(void); | ||
265 | void qman_liodn_fixup(u16 channel); | ||
266 | void qman_set_sdest(u16 channel, unsigned int cpu_idx); | ||
267 | |||
268 | struct qman_portal *qman_create_affine_portal( | ||
269 | const struct qm_portal_config *config, | ||
270 | const struct qman_cgrs *cgrs); | ||
271 | const struct qm_portal_config *qman_destroy_affine_portal(void); | ||
272 | |||
273 | /* | ||
274 | * qman_query_fq - Queries FQD fields (via h/w query command) | ||
275 | * @fq: the frame queue object to be queried | ||
276 | * @fqd: storage for the queried FQD fields | ||
277 | */ | ||
278 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); | ||
279 | |||
280 | /* | ||
281 | * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use | ||
282 | * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use | ||
283 | * FQID(n) to fill in the frame queue ID. | ||
284 | */ | ||
285 | #define QM_VDQCR_PRECEDENCE_VDQCR 0x0 | ||
286 | #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 | ||
287 | #define QM_VDQCR_EXACT 0x40000000 | ||
288 | #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 | ||
289 | #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) | ||
290 | #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) | ||
291 | #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) | ||
292 | |||
293 | #define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ | ||
294 | #define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ | ||
295 | #define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ | ||
296 | |||
297 | /* | ||
298 | * qman_volatile_dequeue - Issue a volatile dequeue command | ||
299 | * @fq: the frame queue object to dequeue from | ||
300 | * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options | ||
301 | * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() | ||
302 | * | ||
303 | * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. | ||
304 | * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and | ||
305 | * the VDQCR is already in use, otherwise returns non-zero for failure. If | ||
306 | * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once | ||
307 | * the VDQCR command has finished executing (ie. once the callback for the last | ||
308 | * DQRR entry resulting from the VDQCR command has been called). If not using | ||
309 | * the FINISH flag, completion can be determined either by detecting the | ||
310 | * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits | ||
311 | * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting | ||
312 | * for the QMAN_FQ_STATE_VDQCR bit to disappear. | ||
313 | */ | ||
314 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); | ||
315 | |||
316 | int qman_alloc_fq_table(u32 num_fqids); | ||
317 | |||
318 | /* QMan s/w corenet portal, low-level i/face */ | ||
319 | |||
320 | /* | ||
321 | * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one | ||
322 | * dequeue TYPE. Choose TOKEN (8-bit). | ||
323 | * If SOURCE == CHANNELS, | ||
324 | * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). | ||
325 | * You can choose DEDICATED_PRECEDENCE if the portal channel should have | ||
326 | * priority. | ||
327 | * If SOURCE == SPECIFICWQ, | ||
328 | * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the | ||
329 | * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the | ||
330 | * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the | ||
331 | * same value. | ||
332 | */ | ||
333 | #define QM_SDQCR_SOURCE_CHANNELS 0x0 | ||
334 | #define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 | ||
335 | #define QM_SDQCR_COUNT_EXACT1 0x0 | ||
336 | #define QM_SDQCR_COUNT_UPTO3 0x20000000 | ||
337 | #define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 | ||
338 | #define QM_SDQCR_TYPE_MASK 0x03000000 | ||
339 | #define QM_SDQCR_TYPE_NULL 0x0 | ||
340 | #define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 | ||
341 | #define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 | ||
342 | #define QM_SDQCR_TYPE_ACTIVE 0x03000000 | ||
343 | #define QM_SDQCR_TOKEN_MASK 0x00ff0000 | ||
344 | #define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) | ||
345 | #define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) | ||
346 | #define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 | ||
347 | #define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 | ||
348 | #define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 | ||
349 | #define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) | ||
350 | #define QM_SDQCR_SPECIFICWQ_WQ(n) (n) | ||
351 | |||
352 | /* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ | ||
353 | #define QM_VDQCR_FQID_MASK 0x00ffffff | ||
354 | #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) | ||
355 | |||
356 | /* | ||
357 | * Used by all portal interrupt registers except 'inhibit' | ||
358 | * Channels with frame availability | ||
359 | */ | ||
360 | #define QM_PIRQ_DQAVAIL 0x0000ffff | ||
361 | |||
362 | /* The DQAVAIL interrupt fields break down into these bits; */ | ||
363 | #define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ | ||
364 | #define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ | ||
365 | #define QM_DQAVAIL_MASK 0xffff | ||
366 | /* This mask contains all the "irqsource" bits visible to API users */ | ||
367 | #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) | ||
368 | |||
369 | extern struct qman_portal *affine_portals[NR_CPUS]; | ||
370 | const struct qm_portal_config *qman_get_qm_portal_config( | ||
371 | struct qman_portal *portal); | ||
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c new file mode 100644 index 000000000000..18f7f0202fa7 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_test.h" | ||
32 | |||
33 | MODULE_AUTHOR("Geoff Thorpe"); | ||
34 | MODULE_LICENSE("Dual BSD/GPL"); | ||
35 | MODULE_DESCRIPTION("QMan testing"); | ||
36 | |||
37 | static int test_init(void) | ||
38 | { | ||
39 | int loop = 1; | ||
40 | int err = 0; | ||
41 | |||
42 | while (loop--) { | ||
43 | #ifdef CONFIG_FSL_QMAN_TEST_STASH | ||
44 | err = qman_test_stash(); | ||
45 | if (err) | ||
46 | break; | ||
47 | #endif | ||
48 | #ifdef CONFIG_FSL_QMAN_TEST_API | ||
49 | err = qman_test_api(); | ||
50 | if (err) | ||
51 | break; | ||
52 | #endif | ||
53 | } | ||
54 | return err; | ||
55 | } | ||
56 | |||
57 | static void test_exit(void) | ||
58 | { | ||
59 | } | ||
60 | |||
61 | module_init(test_init); | ||
62 | module_exit(test_exit); | ||
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h new file mode 100644 index 000000000000..d5f8cb2260dc --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_priv.h" | ||
32 | |||
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
34 | |||
35 | int qman_test_stash(void); | ||
36 | int qman_test_api(void); | ||
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c new file mode 100644 index 000000000000..6880ff17f45e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_api.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_test.h" | ||
32 | |||
33 | #define CGR_ID 27 | ||
34 | #define POOL_ID 2 | ||
35 | #define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID | ||
36 | #define NUM_ENQUEUES 10 | ||
37 | #define NUM_PARTIAL 4 | ||
38 | #define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ | ||
39 | QM_SDQCR_TYPE_PRIO_QOS | \ | ||
40 | QM_SDQCR_TOKEN_SET(0x98) | \ | ||
41 | QM_SDQCR_CHANNELS_DEDICATED | \ | ||
42 | QM_SDQCR_CHANNELS_POOL(POOL_ID)) | ||
43 | #define PORTAL_OPAQUE ((void *)0xf00dbeef) | ||
44 | #define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) | ||
45 | |||
46 | static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, | ||
47 | struct qman_fq *, | ||
48 | const struct qm_dqrr_entry *); | ||
49 | static void cb_ern(struct qman_portal *, struct qman_fq *, | ||
50 | const union qm_mr_entry *); | ||
51 | static void cb_fqs(struct qman_portal *, struct qman_fq *, | ||
52 | const union qm_mr_entry *); | ||
53 | |||
54 | static struct qm_fd fd, fd_dq; | ||
55 | static struct qman_fq fq_base = { | ||
56 | .cb.dqrr = cb_dqrr, | ||
57 | .cb.ern = cb_ern, | ||
58 | .cb.fqs = cb_fqs | ||
59 | }; | ||
60 | static DECLARE_WAIT_QUEUE_HEAD(waitqueue); | ||
61 | static int retire_complete, sdqcr_complete; | ||
62 | |||
63 | /* Helpers for initialising and "incrementing" a frame descriptor */ | ||
64 | static void fd_init(struct qm_fd *fd) | ||
65 | { | ||
66 | qm_fd_addr_set64(fd, 0xabdeadbeefLLU); | ||
67 | qm_fd_set_contig_big(fd, 0x0000ffff); | ||
68 | fd->cmd = 0xfeedf00d; | ||
69 | } | ||
70 | |||
71 | static void fd_inc(struct qm_fd *fd) | ||
72 | { | ||
73 | u64 t = qm_fd_addr_get64(fd); | ||
74 | int z = t >> 40; | ||
75 | unsigned int len, off; | ||
76 | enum qm_fd_format fmt; | ||
77 | |||
78 | t <<= 1; | ||
79 | if (z) | ||
80 | t |= 1; | ||
81 | qm_fd_addr_set64(fd, t); | ||
82 | |||
83 | fmt = qm_fd_get_format(fd); | ||
84 | off = qm_fd_get_offset(fd); | ||
85 | len = qm_fd_get_length(fd); | ||
86 | len--; | ||
87 | qm_fd_set_param(fd, fmt, off, len); | ||
88 | |||
89 | fd->cmd++; | ||
90 | } | ||
91 | |||
92 | /* The only part of the 'fd' we can't memcmp() is the ppid */ | ||
93 | static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) | ||
94 | { | ||
95 | int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; | ||
96 | |||
97 | if (!r) { | ||
98 | enum qm_fd_format fmt_a, fmt_b; | ||
99 | |||
100 | fmt_a = qm_fd_get_format(a); | ||
101 | fmt_b = qm_fd_get_format(b); | ||
102 | r = fmt_a - fmt_b; | ||
103 | } | ||
104 | if (!r) | ||
105 | r = a->cfg - b->cfg; | ||
106 | if (!r) | ||
107 | r = a->cmd - b->cmd; | ||
108 | return r; | ||
109 | } | ||
110 | |||
111 | /* test */ | ||
112 | static int do_enqueues(struct qman_fq *fq) | ||
113 | { | ||
114 | unsigned int loop; | ||
115 | int err = 0; | ||
116 | |||
117 | for (loop = 0; loop < NUM_ENQUEUES; loop++) { | ||
118 | if (qman_enqueue(fq, &fd)) { | ||
119 | pr_crit("qman_enqueue() failed\n"); | ||
120 | err = -EIO; | ||
121 | } | ||
122 | fd_inc(&fd); | ||
123 | } | ||
124 | |||
125 | return err; | ||
126 | } | ||
127 | |||
128 | int qman_test_api(void) | ||
129 | { | ||
130 | unsigned int flags, frmcnt; | ||
131 | int err; | ||
132 | struct qman_fq *fq = &fq_base; | ||
133 | |||
134 | pr_info("%s(): Starting\n", __func__); | ||
135 | fd_init(&fd); | ||
136 | fd_init(&fd_dq); | ||
137 | |||
138 | /* Initialise (parked) FQ */ | ||
139 | err = qman_create_fq(0, FQ_FLAGS, fq); | ||
140 | if (err) { | ||
141 | pr_crit("qman_create_fq() failed\n"); | ||
142 | goto failed; | ||
143 | } | ||
144 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); | ||
145 | if (err) { | ||
146 | pr_crit("qman_init_fq() failed\n"); | ||
147 | goto failed; | ||
148 | } | ||
149 | /* Do enqueues + VDQCR, twice. (Parked FQ) */ | ||
150 | err = do_enqueues(fq); | ||
151 | if (err) | ||
152 | goto failed; | ||
153 | pr_info("VDQCR (till-empty);\n"); | ||
154 | frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; | ||
155 | err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); | ||
156 | if (err) { | ||
157 | pr_crit("qman_volatile_dequeue() failed\n"); | ||
158 | goto failed; | ||
159 | } | ||
160 | err = do_enqueues(fq); | ||
161 | if (err) | ||
162 | goto failed; | ||
163 | pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); | ||
164 | frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); | ||
165 | err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); | ||
166 | if (err) { | ||
167 | pr_crit("qman_volatile_dequeue() failed\n"); | ||
168 | goto failed; | ||
169 | } | ||
170 | pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, | ||
171 | NUM_ENQUEUES); | ||
172 | frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); | ||
173 | err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); | ||
174 | if (err) { | ||
175 | pr_err("qman_volatile_dequeue() failed\n"); | ||
176 | goto failed; | ||
177 | } | ||
178 | |||
179 | err = do_enqueues(fq); | ||
180 | if (err) | ||
181 | goto failed; | ||
182 | pr_info("scheduled dequeue (till-empty)\n"); | ||
183 | err = qman_schedule_fq(fq); | ||
184 | if (err) { | ||
185 | pr_crit("qman_schedule_fq() failed\n"); | ||
186 | goto failed; | ||
187 | } | ||
188 | wait_event(waitqueue, sdqcr_complete); | ||
189 | |||
190 | /* Retire and OOS the FQ */ | ||
191 | err = qman_retire_fq(fq, &flags); | ||
192 | if (err < 0) { | ||
193 | pr_crit("qman_retire_fq() failed\n"); | ||
194 | goto failed; | ||
195 | } | ||
196 | wait_event(waitqueue, retire_complete); | ||
197 | if (flags & QMAN_FQ_STATE_BLOCKOOS) { | ||
198 | err = -EIO; | ||
199 | pr_crit("leaking frames\n"); | ||
200 | goto failed; | ||
201 | } | ||
202 | err = qman_oos_fq(fq); | ||
203 | if (err) { | ||
204 | pr_crit("qman_oos_fq() failed\n"); | ||
205 | goto failed; | ||
206 | } | ||
207 | qman_destroy_fq(fq); | ||
208 | pr_info("%s(): Finished\n", __func__); | ||
209 | return 0; | ||
210 | |||
211 | failed: | ||
212 | WARN_ON(1); | ||
213 | return err; | ||
214 | } | ||
215 | |||
216 | static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, | ||
217 | struct qman_fq *fq, | ||
218 | const struct qm_dqrr_entry *dq) | ||
219 | { | ||
220 | if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { | ||
221 | pr_err("BADNESS: dequeued frame doesn't match;\n"); | ||
222 | return qman_cb_dqrr_consume; | ||
223 | } | ||
224 | fd_inc(&fd_dq); | ||
225 | if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { | ||
226 | sdqcr_complete = 1; | ||
227 | wake_up(&waitqueue); | ||
228 | } | ||
229 | return qman_cb_dqrr_consume; | ||
230 | } | ||
231 | |||
232 | static void cb_ern(struct qman_portal *p, struct qman_fq *fq, | ||
233 | const union qm_mr_entry *msg) | ||
234 | { | ||
235 | pr_crit("cb_ern() unimplemented"); | ||
236 | WARN_ON(1); | ||
237 | } | ||
238 | |||
239 | static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, | ||
240 | const union qm_mr_entry *msg) | ||
241 | { | ||
242 | u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); | ||
243 | |||
244 | if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) { | ||
245 | pr_crit("unexpected FQS message"); | ||
246 | WARN_ON(1); | ||
247 | return; | ||
248 | } | ||
249 | pr_info("Retirement message received\n"); | ||
250 | retire_complete = 1; | ||
251 | wake_up(&waitqueue); | ||
252 | } | ||
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c new file mode 100644 index 000000000000..43cf66ba42f5 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_stash.c | |||
@@ -0,0 +1,617 @@ | |||
1 | /* Copyright 2009 - 2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #include "qman_test.h" | ||
32 | |||
33 | #include <linux/dma-mapping.h> | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | /* | ||
37 | * Algorithm: | ||
38 | * | ||
39 | * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates | ||
40 | * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The | ||
41 | * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will | ||
42 | * shuttle a "hot potato" frame around them such that every forwarding action | ||
43 | * moves it from one cpu to another. (The use of more than one handler per cpu | ||
44 | * is to allow enough handlers/FQs to truly test the significance of caching - | ||
45 | * ie. when cache-expiries are occurring.) | ||
46 | * | ||
47 | * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the | ||
48 | * first and last words of the frame data will undergo a transformation step on | ||
49 | * each forwarding action. To achieve this, each handler will be assigned a | ||
50 | * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is | ||
51 | * received by a handler, the mixer of the expected sender is XOR'd into all | ||
52 | * words of the entire frame, which is then validated against the original | ||
53 | * values. Then, before forwarding, the entire frame is XOR'd with the mixer of | ||
54 | * the current handler. Apart from validating that the frame is taking the | ||
55 | * expected path, this also provides some quasi-realistic overheads to each | ||
56 | * forwarding action - dereferencing *all* the frame data, computation, and | ||
57 | * conditional branching. There is a "special" handler designated to act as the | ||
58 | * instigator of the test by creating an enqueuing the "hot potato" frame, and | ||
59 | * to determine when the test has completed by counting HP_LOOPS iterations. | ||
60 | * | ||
61 | * Init phases: | ||
62 | * | ||
63 | * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them | ||
64 | * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU | ||
65 | * handlers and link-list them (but do no other handler setup). | ||
66 | * | ||
67 | * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each | ||
68 | * hp_cpu's 'iterator' to point to its first handler. With each loop, | ||
69 | * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler | ||
70 | * and advance the iterator for the next loop. This includes a final fixup, | ||
71 | * which connects the last handler to the first (and which is why phase 2 | ||
72 | * and 3 are separate). | ||
73 | * | ||
74 | * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each | ||
75 | * hp_cpu's 'iterator' to point to its first handler. With each loop, | ||
76 | * initialise FQ objects and advance the iterator for the next loop. | ||
77 | * Moreover, do this initialisation on the cpu it applies to so that Rx FQ | ||
78 | * initialisation targets the correct cpu. | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * helper to run something on all cpus (can't use on_each_cpu(), as that invokes | ||
83 | * the fn from irq context, which is too restrictive). | ||
84 | */ | ||
85 | struct bstrap { | ||
86 | int (*fn)(void); | ||
87 | atomic_t started; | ||
88 | }; | ||
89 | static int bstrap_fn(void *bs) | ||
90 | { | ||
91 | struct bstrap *bstrap = bs; | ||
92 | int err; | ||
93 | |||
94 | atomic_inc(&bstrap->started); | ||
95 | err = bstrap->fn(); | ||
96 | if (err) | ||
97 | return err; | ||
98 | while (!kthread_should_stop()) | ||
99 | msleep(20); | ||
100 | return 0; | ||
101 | } | ||
102 | static int on_all_cpus(int (*fn)(void)) | ||
103 | { | ||
104 | int cpu; | ||
105 | |||
106 | for_each_cpu(cpu, cpu_online_mask) { | ||
107 | struct bstrap bstrap = { | ||
108 | .fn = fn, | ||
109 | .started = ATOMIC_INIT(0) | ||
110 | }; | ||
111 | struct task_struct *k = kthread_create(bstrap_fn, &bstrap, | ||
112 | "hotpotato%d", cpu); | ||
113 | int ret; | ||
114 | |||
115 | if (IS_ERR(k)) | ||
116 | return -ENOMEM; | ||
117 | kthread_bind(k, cpu); | ||
118 | wake_up_process(k); | ||
119 | /* | ||
120 | * If we call kthread_stop() before the "wake up" has had an | ||
121 | * effect, then the thread may exit with -EINTR without ever | ||
122 | * running the function. So poll until it's started before | ||
123 | * requesting it to stop. | ||
124 | */ | ||
125 | while (!atomic_read(&bstrap.started)) | ||
126 | msleep(20); | ||
127 | ret = kthread_stop(k); | ||
128 | if (ret) | ||
129 | return ret; | ||
130 | } | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | struct hp_handler { | ||
135 | |||
136 | /* The following data is stashed when 'rx' is dequeued; */ | ||
137 | /* -------------- */ | ||
138 | /* The Rx FQ, dequeues of which will stash the entire hp_handler */ | ||
139 | struct qman_fq rx; | ||
140 | /* The Tx FQ we should forward to */ | ||
141 | struct qman_fq tx; | ||
142 | /* The value we XOR post-dequeue, prior to validating */ | ||
143 | u32 rx_mixer; | ||
144 | /* The value we XOR pre-enqueue, after validating */ | ||
145 | u32 tx_mixer; | ||
146 | /* what the hotpotato address should be on dequeue */ | ||
147 | dma_addr_t addr; | ||
148 | u32 *frame_ptr; | ||
149 | |||
150 | /* The following data isn't (necessarily) stashed on dequeue; */ | ||
151 | /* -------------- */ | ||
152 | u32 fqid_rx, fqid_tx; | ||
153 | /* list node for linking us into 'hp_cpu' */ | ||
154 | struct list_head node; | ||
155 | /* Just to check ... */ | ||
156 | unsigned int processor_id; | ||
157 | } ____cacheline_aligned; | ||
158 | |||
159 | struct hp_cpu { | ||
160 | /* identify the cpu we run on; */ | ||
161 | unsigned int processor_id; | ||
162 | /* root node for the per-cpu list of handlers */ | ||
163 | struct list_head handlers; | ||
164 | /* list node for linking us into 'hp_cpu_list' */ | ||
165 | struct list_head node; | ||
166 | /* | ||
167 | * when repeatedly scanning 'hp_list', each time linking the n'th | ||
168 | * handlers together, this is used as per-cpu iterator state | ||
169 | */ | ||
170 | struct hp_handler *iterator; | ||
171 | }; | ||
172 | |||
173 | /* Each cpu has one of these */ | ||
174 | static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); | ||
175 | |||
176 | /* links together the hp_cpu structs, in first-come first-serve order. */ | ||
177 | static LIST_HEAD(hp_cpu_list); | ||
178 | static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); | ||
179 | |||
180 | static unsigned int hp_cpu_list_length; | ||
181 | |||
182 | /* the "special" handler, that starts and terminates the test. */ | ||
183 | static struct hp_handler *special_handler; | ||
184 | static int loop_counter; | ||
185 | |||
186 | /* handlers are allocated out of this, so they're properly aligned. */ | ||
187 | static struct kmem_cache *hp_handler_slab; | ||
188 | |||
189 | /* this is the frame data */ | ||
190 | static void *__frame_ptr; | ||
191 | static u32 *frame_ptr; | ||
192 | static dma_addr_t frame_dma; | ||
193 | |||
194 | /* the main function waits on this */ | ||
195 | static DECLARE_WAIT_QUEUE_HEAD(queue); | ||
196 | |||
197 | #define HP_PER_CPU 2 | ||
198 | #define HP_LOOPS 8 | ||
199 | /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ | ||
200 | #define HP_NUM_WORDS 80 | ||
201 | /* First word of the LFSR-based frame data */ | ||
202 | #define HP_FIRST_WORD 0xabbaf00d | ||
203 | |||
204 | static inline u32 do_lfsr(u32 prev) | ||
205 | { | ||
206 | return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); | ||
207 | } | ||
208 | |||
209 | static int allocate_frame_data(void) | ||
210 | { | ||
211 | u32 lfsr = HP_FIRST_WORD; | ||
212 | int loop; | ||
213 | struct platform_device *pdev = platform_device_alloc("foobar", -1); | ||
214 | |||
215 | if (!pdev) { | ||
216 | pr_crit("platform_device_alloc() failed"); | ||
217 | return -EIO; | ||
218 | } | ||
219 | if (platform_device_add(pdev)) { | ||
220 | pr_crit("platform_device_add() failed"); | ||
221 | return -EIO; | ||
222 | } | ||
223 | __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); | ||
224 | if (!__frame_ptr) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | frame_ptr = PTR_ALIGN(__frame_ptr, 64); | ||
228 | for (loop = 0; loop < HP_NUM_WORDS; loop++) { | ||
229 | frame_ptr[loop] = lfsr; | ||
230 | lfsr = do_lfsr(lfsr); | ||
231 | } | ||
232 | frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, | ||
233 | DMA_BIDIRECTIONAL); | ||
234 | platform_device_del(pdev); | ||
235 | platform_device_put(pdev); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static void deallocate_frame_data(void) | ||
240 | { | ||
241 | kfree(__frame_ptr); | ||
242 | } | ||
243 | |||
244 | static inline int process_frame_data(struct hp_handler *handler, | ||
245 | const struct qm_fd *fd) | ||
246 | { | ||
247 | u32 *p = handler->frame_ptr; | ||
248 | u32 lfsr = HP_FIRST_WORD; | ||
249 | int loop; | ||
250 | |||
251 | if (qm_fd_addr_get64(fd) != handler->addr) { | ||
252 | pr_crit("bad frame address"); | ||
253 | return -EIO; | ||
254 | } | ||
255 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { | ||
256 | *p ^= handler->rx_mixer; | ||
257 | if (*p != lfsr) { | ||
258 | pr_crit("corrupt frame data"); | ||
259 | return -EIO; | ||
260 | } | ||
261 | *p ^= handler->tx_mixer; | ||
262 | lfsr = do_lfsr(lfsr); | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, | ||
268 | struct qman_fq *fq, | ||
269 | const struct qm_dqrr_entry *dqrr) | ||
270 | { | ||
271 | struct hp_handler *handler = (struct hp_handler *)fq; | ||
272 | |||
273 | if (process_frame_data(handler, &dqrr->fd)) { | ||
274 | WARN_ON(1); | ||
275 | goto skip; | ||
276 | } | ||
277 | if (qman_enqueue(&handler->tx, &dqrr->fd)) { | ||
278 | pr_crit("qman_enqueue() failed"); | ||
279 | WARN_ON(1); | ||
280 | } | ||
281 | skip: | ||
282 | return qman_cb_dqrr_consume; | ||
283 | } | ||
284 | |||
285 | static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, | ||
286 | struct qman_fq *fq, | ||
287 | const struct qm_dqrr_entry *dqrr) | ||
288 | { | ||
289 | struct hp_handler *handler = (struct hp_handler *)fq; | ||
290 | |||
291 | process_frame_data(handler, &dqrr->fd); | ||
292 | if (++loop_counter < HP_LOOPS) { | ||
293 | if (qman_enqueue(&handler->tx, &dqrr->fd)) { | ||
294 | pr_crit("qman_enqueue() failed"); | ||
295 | WARN_ON(1); | ||
296 | goto skip; | ||
297 | } | ||
298 | } else { | ||
299 | pr_info("Received final (%dth) frame\n", loop_counter); | ||
300 | wake_up(&queue); | ||
301 | } | ||
302 | skip: | ||
303 | return qman_cb_dqrr_consume; | ||
304 | } | ||
305 | |||
306 | static int create_per_cpu_handlers(void) | ||
307 | { | ||
308 | struct hp_handler *handler; | ||
309 | int loop; | ||
310 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); | ||
311 | |||
312 | hp_cpu->processor_id = smp_processor_id(); | ||
313 | spin_lock(&hp_lock); | ||
314 | list_add_tail(&hp_cpu->node, &hp_cpu_list); | ||
315 | hp_cpu_list_length++; | ||
316 | spin_unlock(&hp_lock); | ||
317 | INIT_LIST_HEAD(&hp_cpu->handlers); | ||
318 | for (loop = 0; loop < HP_PER_CPU; loop++) { | ||
319 | handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); | ||
320 | if (!handler) { | ||
321 | pr_crit("kmem_cache_alloc() failed"); | ||
322 | WARN_ON(1); | ||
323 | return -EIO; | ||
324 | } | ||
325 | handler->processor_id = hp_cpu->processor_id; | ||
326 | handler->addr = frame_dma; | ||
327 | handler->frame_ptr = frame_ptr; | ||
328 | list_add_tail(&handler->node, &hp_cpu->handlers); | ||
329 | } | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static int destroy_per_cpu_handlers(void) | ||
334 | { | ||
335 | struct list_head *loop, *tmp; | ||
336 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); | ||
337 | |||
338 | spin_lock(&hp_lock); | ||
339 | list_del(&hp_cpu->node); | ||
340 | spin_unlock(&hp_lock); | ||
341 | list_for_each_safe(loop, tmp, &hp_cpu->handlers) { | ||
342 | u32 flags = 0; | ||
343 | struct hp_handler *handler = list_entry(loop, struct hp_handler, | ||
344 | node); | ||
345 | if (qman_retire_fq(&handler->rx, &flags) || | ||
346 | (flags & QMAN_FQ_STATE_BLOCKOOS)) { | ||
347 | pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); | ||
348 | WARN_ON(1); | ||
349 | return -EIO; | ||
350 | } | ||
351 | if (qman_oos_fq(&handler->rx)) { | ||
352 | pr_crit("qman_oos_fq(rx) failed"); | ||
353 | WARN_ON(1); | ||
354 | return -EIO; | ||
355 | } | ||
356 | qman_destroy_fq(&handler->rx); | ||
357 | qman_destroy_fq(&handler->tx); | ||
358 | qman_release_fqid(handler->fqid_rx); | ||
359 | list_del(&handler->node); | ||
360 | kmem_cache_free(hp_handler_slab, handler); | ||
361 | } | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static inline u8 num_cachelines(u32 offset) | ||
366 | { | ||
367 | u8 res = (offset + (L1_CACHE_BYTES - 1)) | ||
368 | / (L1_CACHE_BYTES); | ||
369 | if (res > 3) | ||
370 | return 3; | ||
371 | return res; | ||
372 | } | ||
373 | #define STASH_DATA_CL \ | ||
374 | num_cachelines(HP_NUM_WORDS * 4) | ||
375 | #define STASH_CTX_CL \ | ||
376 | num_cachelines(offsetof(struct hp_handler, fqid_rx)) | ||
377 | |||
378 | static int init_handler(void *h) | ||
379 | { | ||
380 | struct qm_mcc_initfq opts; | ||
381 | struct hp_handler *handler = h; | ||
382 | int err; | ||
383 | |||
384 | if (handler->processor_id != smp_processor_id()) { | ||
385 | err = -EIO; | ||
386 | goto failed; | ||
387 | } | ||
388 | /* Set up rx */ | ||
389 | memset(&handler->rx, 0, sizeof(handler->rx)); | ||
390 | if (handler == special_handler) | ||
391 | handler->rx.cb.dqrr = special_dqrr; | ||
392 | else | ||
393 | handler->rx.cb.dqrr = normal_dqrr; | ||
394 | err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); | ||
395 | if (err) { | ||
396 | pr_crit("qman_create_fq(rx) failed"); | ||
397 | goto failed; | ||
398 | } | ||
399 | memset(&opts, 0, sizeof(opts)); | ||
400 | opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; | ||
401 | opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; | ||
402 | qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); | ||
403 | err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | | ||
404 | QMAN_INITFQ_FLAG_LOCAL, &opts); | ||
405 | if (err) { | ||
406 | pr_crit("qman_init_fq(rx) failed"); | ||
407 | goto failed; | ||
408 | } | ||
409 | /* Set up tx */ | ||
410 | memset(&handler->tx, 0, sizeof(handler->tx)); | ||
411 | err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, | ||
412 | &handler->tx); | ||
413 | if (err) { | ||
414 | pr_crit("qman_create_fq(tx) failed"); | ||
415 | goto failed; | ||
416 | } | ||
417 | |||
418 | return 0; | ||
419 | failed: | ||
420 | return err; | ||
421 | } | ||
422 | |||
423 | static void init_handler_cb(void *h) | ||
424 | { | ||
425 | if (init_handler(h)) | ||
426 | WARN_ON(1); | ||
427 | } | ||
428 | |||
429 | static int init_phase2(void) | ||
430 | { | ||
431 | int loop; | ||
432 | u32 fqid = 0; | ||
433 | u32 lfsr = 0xdeadbeef; | ||
434 | struct hp_cpu *hp_cpu; | ||
435 | struct hp_handler *handler; | ||
436 | |||
437 | for (loop = 0; loop < HP_PER_CPU; loop++) { | ||
438 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { | ||
439 | int err; | ||
440 | |||
441 | if (!loop) | ||
442 | hp_cpu->iterator = list_first_entry( | ||
443 | &hp_cpu->handlers, | ||
444 | struct hp_handler, node); | ||
445 | else | ||
446 | hp_cpu->iterator = list_entry( | ||
447 | hp_cpu->iterator->node.next, | ||
448 | struct hp_handler, node); | ||
449 | /* Rx FQID is the previous handler's Tx FQID */ | ||
450 | hp_cpu->iterator->fqid_rx = fqid; | ||
451 | /* Allocate new FQID for Tx */ | ||
452 | err = qman_alloc_fqid(&fqid); | ||
453 | if (err) { | ||
454 | pr_crit("qman_alloc_fqid() failed"); | ||
455 | return err; | ||
456 | } | ||
457 | hp_cpu->iterator->fqid_tx = fqid; | ||
458 | /* Rx mixer is the previous handler's Tx mixer */ | ||
459 | hp_cpu->iterator->rx_mixer = lfsr; | ||
460 | /* Get new mixer for Tx */ | ||
461 | lfsr = do_lfsr(lfsr); | ||
462 | hp_cpu->iterator->tx_mixer = lfsr; | ||
463 | } | ||
464 | } | ||
465 | /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ | ||
466 | hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); | ||
467 | handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); | ||
468 | if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) | ||
469 | return 1; | ||
470 | handler->fqid_rx = fqid; | ||
471 | handler->rx_mixer = lfsr; | ||
472 | /* and tag it as our "special" handler */ | ||
473 | special_handler = handler; | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static int init_phase3(void) | ||
478 | { | ||
479 | int loop, err; | ||
480 | struct hp_cpu *hp_cpu; | ||
481 | |||
482 | for (loop = 0; loop < HP_PER_CPU; loop++) { | ||
483 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { | ||
484 | if (!loop) | ||
485 | hp_cpu->iterator = list_first_entry( | ||
486 | &hp_cpu->handlers, | ||
487 | struct hp_handler, node); | ||
488 | else | ||
489 | hp_cpu->iterator = list_entry( | ||
490 | hp_cpu->iterator->node.next, | ||
491 | struct hp_handler, node); | ||
492 | preempt_disable(); | ||
493 | if (hp_cpu->processor_id == smp_processor_id()) { | ||
494 | err = init_handler(hp_cpu->iterator); | ||
495 | if (err) | ||
496 | return err; | ||
497 | } else { | ||
498 | smp_call_function_single(hp_cpu->processor_id, | ||
499 | init_handler_cb, hp_cpu->iterator, 1); | ||
500 | } | ||
501 | preempt_enable(); | ||
502 | } | ||
503 | } | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static int send_first_frame(void *ignore) | ||
508 | { | ||
509 | u32 *p = special_handler->frame_ptr; | ||
510 | u32 lfsr = HP_FIRST_WORD; | ||
511 | int loop, err; | ||
512 | struct qm_fd fd; | ||
513 | |||
514 | if (special_handler->processor_id != smp_processor_id()) { | ||
515 | err = -EIO; | ||
516 | goto failed; | ||
517 | } | ||
518 | memset(&fd, 0, sizeof(fd)); | ||
519 | qm_fd_addr_set64(&fd, special_handler->addr); | ||
520 | qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); | ||
521 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { | ||
522 | if (*p != lfsr) { | ||
523 | err = -EIO; | ||
524 | pr_crit("corrupt frame data"); | ||
525 | goto failed; | ||
526 | } | ||
527 | *p ^= special_handler->tx_mixer; | ||
528 | lfsr = do_lfsr(lfsr); | ||
529 | } | ||
530 | pr_info("Sending first frame\n"); | ||
531 | err = qman_enqueue(&special_handler->tx, &fd); | ||
532 | if (err) { | ||
533 | pr_crit("qman_enqueue() failed"); | ||
534 | goto failed; | ||
535 | } | ||
536 | |||
537 | return 0; | ||
538 | failed: | ||
539 | return err; | ||
540 | } | ||
541 | |||
542 | static void send_first_frame_cb(void *ignore) | ||
543 | { | ||
544 | if (send_first_frame(NULL)) | ||
545 | WARN_ON(1); | ||
546 | } | ||
547 | |||
548 | int qman_test_stash(void) | ||
549 | { | ||
550 | int err; | ||
551 | |||
552 | if (cpumask_weight(cpu_online_mask) < 2) { | ||
553 | pr_info("%s(): skip - only 1 CPU\n", __func__); | ||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | pr_info("%s(): Starting\n", __func__); | ||
558 | |||
559 | hp_cpu_list_length = 0; | ||
560 | loop_counter = 0; | ||
561 | hp_handler_slab = kmem_cache_create("hp_handler_slab", | ||
562 | sizeof(struct hp_handler), L1_CACHE_BYTES, | ||
563 | SLAB_HWCACHE_ALIGN, NULL); | ||
564 | if (!hp_handler_slab) { | ||
565 | err = -EIO; | ||
566 | pr_crit("kmem_cache_create() failed"); | ||
567 | goto failed; | ||
568 | } | ||
569 | |||
570 | err = allocate_frame_data(); | ||
571 | if (err) | ||
572 | goto failed; | ||
573 | |||
574 | /* Init phase 1 */ | ||
575 | pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); | ||
576 | if (on_all_cpus(create_per_cpu_handlers)) { | ||
577 | err = -EIO; | ||
578 | pr_crit("on_each_cpu() failed"); | ||
579 | goto failed; | ||
580 | } | ||
581 | pr_info("Number of cpus: %d, total of %d handlers\n", | ||
582 | hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); | ||
583 | |||
584 | err = init_phase2(); | ||
585 | if (err) | ||
586 | goto failed; | ||
587 | |||
588 | err = init_phase3(); | ||
589 | if (err) | ||
590 | goto failed; | ||
591 | |||
592 | preempt_disable(); | ||
593 | if (special_handler->processor_id == smp_processor_id()) { | ||
594 | err = send_first_frame(NULL); | ||
595 | if (err) | ||
596 | goto failed; | ||
597 | } else { | ||
598 | smp_call_function_single(special_handler->processor_id, | ||
599 | send_first_frame_cb, NULL, 1); | ||
600 | } | ||
601 | preempt_enable(); | ||
602 | |||
603 | wait_event(queue, loop_counter == HP_LOOPS); | ||
604 | deallocate_frame_data(); | ||
605 | if (on_all_cpus(destroy_per_cpu_handlers)) { | ||
606 | err = -EIO; | ||
607 | pr_crit("on_each_cpu() failed"); | ||
608 | goto failed; | ||
609 | } | ||
610 | kmem_cache_destroy(hp_handler_slab); | ||
611 | pr_info("%s(): Finished\n", __func__); | ||
612 | |||
613 | return 0; | ||
614 | failed: | ||
615 | WARN_ON(1); | ||
616 | return err; | ||
617 | } | ||
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index 333eb2215a57..0aaf429f31d5 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c | |||
@@ -41,7 +41,8 @@ struct qe_gpio_chip { | |||
41 | 41 | ||
42 | static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) | 42 | static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) |
43 | { | 43 | { |
44 | struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc); | 44 | struct qe_gpio_chip *qe_gc = |
45 | container_of(mm_gc, struct qe_gpio_chip, mm_gc); | ||
45 | struct qe_pio_regs __iomem *regs = mm_gc->regs; | 46 | struct qe_pio_regs __iomem *regs = mm_gc->regs; |
46 | 47 | ||
47 | qe_gc->cpdata = in_be32(®s->cpdata); | 48 | qe_gc->cpdata = in_be32(®s->cpdata); |
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 7026507e6f1d..2707a827261b 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c | |||
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1; | |||
69 | phys_addr_t get_qe_base(void) | 69 | phys_addr_t get_qe_base(void) |
70 | { | 70 | { |
71 | struct device_node *qe; | 71 | struct device_node *qe; |
72 | int size; | 72 | int ret; |
73 | const u32 *prop; | 73 | struct resource res; |
74 | 74 | ||
75 | if (qebase != -1) | 75 | if (qebase != -1) |
76 | return qebase; | 76 | return qebase; |
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void) | |||
82 | return qebase; | 82 | return qebase; |
83 | } | 83 | } |
84 | 84 | ||
85 | prop = of_get_property(qe, "reg", &size); | 85 | ret = of_address_to_resource(qe, 0, &res); |
86 | if (prop && size >= sizeof(*prop)) | 86 | if (!ret) |
87 | qebase = of_translate_address(qe, prop); | 87 | qebase = res.start; |
88 | of_node_put(qe); | 88 | of_node_put(qe); |
89 | 89 | ||
90 | return qebase; | 90 | return qebase; |
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 41eff805a904..104e68d9b84f 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c | |||
@@ -70,6 +70,11 @@ int cpm_muram_init(void) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | muram_pool = gen_pool_create(0, -1); | 72 | muram_pool = gen_pool_create(0, -1); |
73 | if (!muram_pool) { | ||
74 | pr_err("Cannot allocate memory pool for CPM/QE muram"); | ||
75 | ret = -ENOMEM; | ||
76 | goto out_muram; | ||
77 | } | ||
73 | muram_pbase = of_translate_address(np, zero); | 78 | muram_pbase = of_translate_address(np, zero); |
74 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { | 79 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { |
75 | pr_err("Cannot translate zero through CPM muram node"); | 80 | pr_err("Cannot translate zero through CPM muram node"); |
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size, | |||
116 | struct muram_block *entry; | 121 | struct muram_block *entry; |
117 | unsigned long start; | 122 | unsigned long start; |
118 | 123 | ||
124 | if (!muram_pool && cpm_muram_init()) | ||
125 | goto out2; | ||
126 | |||
119 | start = gen_pool_alloc_algo(muram_pool, size, algo, data); | 127 | start = gen_pool_alloc_algo(muram_pool, size, algo, data); |
120 | if (!start) | 128 | if (!start) |
121 | goto out2; | 129 | goto out2; |
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index 5e48b1470178..a1048b44e6b9 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c | |||
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
99 | utdm->tdm_port = val; | 99 | utdm->tdm_port = val; |
100 | ut_info->uf_info.tdm_num = utdm->tdm_port; | 100 | ut_info->uf_info.tdm_num = utdm->tdm_port; |
101 | 101 | ||
102 | if (of_get_property(np, "fsl,tdm-internal-loopback", NULL)) | 102 | if (of_property_read_bool(np, "fsl,tdm-internal-loopback")) |
103 | utdm->tdm_mode = TDM_INTERNAL_LOOPBACK; | 103 | utdm->tdm_mode = TDM_INTERNAL_LOOPBACK; |
104 | else | 104 | else |
105 | utdm->tdm_mode = TDM_NORMAL; | 105 | utdm->tdm_mode = TDM_NORMAL; |
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
167 | } | 167 | } |
168 | 168 | ||
169 | if (siram_init_flag == 0) { | 169 | if (siram_init_flag == 0) { |
170 | memset_io(utdm->siram, 0, res->end - res->start + 1); | 170 | memset_io(utdm->siram, 0, resource_size(res)); |
171 | siram_init_flag = 1; | 171 | siram_init_flag = 1; |
172 | } | 172 | } |
173 | 173 | ||