aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-10-11 05:07:56 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-11 05:07:56 -0400
commit065397a969a0f80624598c5030c2551abbd986fd (patch)
tree60a4c453e6b494c8b3973497c577efa2f10102e4
parent8321564a11bbeadffcc7d6335bcf3c07e5c397a3 (diff)
parente0b80f00bb96b925995d53980e0c764430bedb42 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include qbman support (a prerequisite for datapath drivers such as ethernet), a PCI DMA fix+improvement, reset handler changes, more 8xx optimizations, and some cleanups and fixes."
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/configs/dpaa.config1
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/hw_irq.h6
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h4
-rw-r--r--arch/powerpc/kernel/cputable.c1
-rw-r--r--arch/powerpc/kernel/head_8xx.S136
-rw-r--r--arch/powerpc/kernel/setup-common.c27
-rw-r--r--arch/powerpc/kernel/traps.c45
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c4
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c4
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c5
-rw-r--r--arch/powerpc/platforms/83xx/misc.c8
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c5
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_qds.c1
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c1
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c25
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c10
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c1
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c8
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c1
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c1
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c1
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c1
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c3
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c1
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c1
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c1
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c1
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c1
-rw-r--r--arch/powerpc/sysdev/cpm1.c2
-rw-r--r--arch/powerpc/sysdev/cpm2.c4
-rw-r--r--arch/powerpc/sysdev/cpm_common.c15
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c12
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c33
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile12
-rw-r--r--drivers/soc/fsl/qbman/bman.c797
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c263
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c219
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h80
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h103
-rw-r--r--drivers/soc/fsl/qbman/qman.c2881
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c808
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c355
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h371
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h36
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c252
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c617
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c10
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--include/soc/fsl/bman.h129
-rw-r--r--include/soc/fsl/qman.h1074
94 files changed, 8612 insertions, 248 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 50d020ac0f48..617dece67924 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -318,12 +318,12 @@ mpc85xx_smp_defconfig:
318PHONY += corenet32_smp_defconfig 318PHONY += corenet32_smp_defconfig
319corenet32_smp_defconfig: 319corenet32_smp_defconfig:
320 $(call merge_into_defconfig,corenet_basic_defconfig,\ 320 $(call merge_into_defconfig,corenet_basic_defconfig,\
321 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw) 321 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa)
322 322
323PHONY += corenet64_smp_defconfig 323PHONY += corenet64_smp_defconfig
324corenet64_smp_defconfig: 324corenet64_smp_defconfig:
325 $(call merge_into_defconfig,corenet_basic_defconfig,\ 325 $(call merge_into_defconfig,corenet_basic_defconfig,\
326 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw) 326 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa)
327 327
328PHONY += mpc86xx_defconfig 328PHONY += mpc86xx_defconfig
329mpc86xx_defconfig: 329mpc86xx_defconfig:
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
new file mode 100644
index 000000000000..efa99c048543
--- /dev/null
+++ b/arch/powerpc/configs/dpaa.config
@@ -0,0 +1 @@
CONFIG_FSL_DPAA=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f752e6f7cfbe..ab68d0ee7725 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,7 @@ extern int machine_check_e500mc(struct pt_regs *regs);
43extern int machine_check_e500(struct pt_regs *regs); 43extern int machine_check_e500(struct pt_regs *regs);
44extern int machine_check_e200(struct pt_regs *regs); 44extern int machine_check_e200(struct pt_regs *regs);
45extern int machine_check_47x(struct pt_regs *regs); 45extern int machine_check_47x(struct pt_regs *regs);
46int machine_check_8xx(struct pt_regs *regs);
46 47
47extern void cpu_down_flush_e500v2(void); 48extern void cpu_down_flush_e500v2(void);
48extern void cpu_down_flush_e500mc(void); 49extern void cpu_down_flush_e500mc(void);
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c7d82ff62a33..eba60416536e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -155,6 +155,8 @@ static inline unsigned long arch_local_irq_save(void)
155 unsigned long flags = arch_local_save_flags(); 155 unsigned long flags = arch_local_save_flags();
156#ifdef CONFIG_BOOKE 156#ifdef CONFIG_BOOKE
157 asm volatile("wrteei 0" : : : "memory"); 157 asm volatile("wrteei 0" : : : "memory");
158#elif defined(CONFIG_PPC_8xx)
159 wrtspr(SPRN_EID);
158#else 160#else
159 SET_MSR_EE(flags & ~MSR_EE); 161 SET_MSR_EE(flags & ~MSR_EE);
160#endif 162#endif
@@ -165,6 +167,8 @@ static inline void arch_local_irq_disable(void)
165{ 167{
166#ifdef CONFIG_BOOKE 168#ifdef CONFIG_BOOKE
167 asm volatile("wrteei 0" : : : "memory"); 169 asm volatile("wrteei 0" : : : "memory");
170#elif defined(CONFIG_PPC_8xx)
171 wrtspr(SPRN_EID);
168#else 172#else
169 arch_local_irq_save(); 173 arch_local_irq_save();
170#endif 174#endif
@@ -174,6 +178,8 @@ static inline void arch_local_irq_enable(void)
174{ 178{
175#ifdef CONFIG_BOOKE 179#ifdef CONFIG_BOOKE
176 asm volatile("wrteei 1" : : : "memory"); 180 asm volatile("wrteei 1" : : : "memory");
181#elif defined(CONFIG_PPC_8xx)
182 wrtspr(SPRN_EIE);
177#else 183#else
178 unsigned long msr = mfmsr(); 184 unsigned long msr = mfmsr();
179 SET_MSR_EE(msr | MSR_EE); 185 SET_MSR_EE(msr | MSR_EE);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 54ff8ce7fa96..0132831b3081 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -152,6 +152,7 @@
152#define PPC_INST_LWSYNC 0x7c2004ac 152#define PPC_INST_LWSYNC 0x7c2004ac
153#define PPC_INST_SYNC 0x7c0004ac 153#define PPC_INST_SYNC 0x7c0004ac
154#define PPC_INST_SYNC_MASK 0xfc0007fe 154#define PPC_INST_SYNC_MASK 0xfc0007fe
155#define PPC_INST_ISYNC 0x4c00012c
155#define PPC_INST_LXVD2X 0x7c000698 156#define PPC_INST_LXVD2X 0x7c000698
156#define PPC_INST_MCRXR 0x7c000400 157#define PPC_INST_MCRXR 0x7c000400
157#define PPC_INST_MCRXR_MASK 0xfc0007fe 158#define PPC_INST_MCRXR_MASK 0xfc0007fe
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a8f63bcb71af..cff79885c2fd 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1249,6 +1249,8 @@ static inline void mtmsr_isync(unsigned long val)
1249 : "r" ((unsigned long)(v)) \ 1249 : "r" ((unsigned long)(v)) \
1250 : "memory") 1250 : "memory")
1251#endif 1251#endif
1252#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
1253 : : "memory")
1252 1254
1253extern unsigned long msr_check_and_set(unsigned long bits); 1255extern unsigned long msr_check_and_set(unsigned long bits);
1254extern bool strict_msr_control; 1256extern bool strict_msr_control;
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 94d01f81e668..0197e12f7d48 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -25,6 +25,10 @@
25#define SPRN_MD_RAM0 825 25#define SPRN_MD_RAM0 825
26#define SPRN_MD_RAM1 826 26#define SPRN_MD_RAM1 826
27 27
28/* Special MSR manipulation registers */
29#define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */
30#define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */
31
28/* Commands. Only the first few are available to the instruction cache. 32/* Commands. Only the first few are available to the instruction cache.
29*/ 33*/
30#define IDC_ENABLE 0x02000000 /* Cache enable */ 34#define IDC_ENABLE 0x02000000 /* Cache enable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6c4646ac9234..6a82ef039c50 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1248,6 +1248,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1248 .mmu_features = MMU_FTR_TYPE_8xx, 1248 .mmu_features = MMU_FTR_TYPE_8xx,
1249 .icache_bsize = 16, 1249 .icache_bsize = 16,
1250 .dcache_bsize = 16, 1250 .dcache_bsize = 16,
1251 .machine_check = machine_check_8xx,
1251 .platform = "ppc823", 1252 .platform = "ppc823",
1252 }, 1253 },
1253#endif /* CONFIG_8xx */ 1254#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3a185c51ce8f..033a6b735487 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -226,7 +226,7 @@ i##n: \
226 ret_from_except) 226 ret_from_except)
227 227
228/* System reset */ 228/* System reset */
229 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) 229 EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
230 230
231/* Machine check */ 231/* Machine check */
232 . = 0x200 232 . = 0x200
@@ -321,7 +321,7 @@ SystemCall:
321#endif 321#endif
322 322
323InstructionTLBMiss: 323InstructionTLBMiss:
324#ifdef CONFIG_8xx_CPU6 324#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
325 mtspr SPRN_SPRG_SCRATCH2, r3 325 mtspr SPRN_SPRG_SCRATCH2, r3
326#endif 326#endif
327 EXCEPTION_PROLOG_0 327 EXCEPTION_PROLOG_0
@@ -329,23 +329,20 @@ InstructionTLBMiss:
329 /* If we are faulting a kernel address, we have to use the 329 /* If we are faulting a kernel address, we have to use the
330 * kernel page tables. 330 * kernel page tables.
331 */ 331 */
332 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
333 INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
332#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) 334#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
333 /* Only modules will cause ITLB Misses as we always 335 /* Only modules will cause ITLB Misses as we always
334 * pin the first 8MB of kernel memory */ 336 * pin the first 8MB of kernel memory */
335 mfspr r11, SPRN_SRR0 /* Get effective address of fault */ 337 mfcr r3
336 INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) 338 IS_KERNEL(r11, r10)
337 mfcr r10 339#endif
338 IS_KERNEL(r11, r11)
339 mfspr r11, SPRN_M_TW /* Get level 1 table */ 340 mfspr r11, SPRN_M_TW /* Get level 1 table */
341#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
340 BRANCH_UNLESS_KERNEL(3f) 342 BRANCH_UNLESS_KERNEL(3f)
341 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 343 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3423: 3443:
343 mtcr r10 345 mtcr r3
344 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
345#else
346 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
347 INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
348 mfspr r11, SPRN_M_TW /* Get level 1 table base address */
349#endif 346#endif
350 /* Insert level 1 index */ 347 /* Insert level 1 index */
351 rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 348 rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -377,58 +374,39 @@ InstructionTLBMiss:
377 MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ 374 MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
378 375
379 /* Restore registers */ 376 /* Restore registers */
380#ifdef CONFIG_8xx_CPU6 377#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
381 mfspr r3, SPRN_SPRG_SCRATCH2 378 mfspr r3, SPRN_SPRG_SCRATCH2
382#endif 379#endif
383 EXCEPTION_EPILOG_0 380 EXCEPTION_EPILOG_0
384 rfi 381 rfi
385 382
386/*
387 * Bottom part of DataStoreTLBMiss handler for IMMR area
388 * not enough space in the DataStoreTLBMiss area
389 */
390DTLBMissIMMR:
391 mtcr r10
392 /* Set 512k byte guarded page and mark it valid */
393 li r10, MD_PS512K | MD_GUARDED | MD_SVALID
394 MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
395 mfspr r10, SPRN_IMMR /* Get current IMMR */
396 rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
397 ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
398 _PAGE_PRESENT | _PAGE_NO_CACHE
399 MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
400
401 li r11, RPN_PATTERN
402 mtspr SPRN_DAR, r11 /* Tag DAR */
403 EXCEPTION_EPILOG_0
404 rfi
405
406 . = 0x1200 383 . = 0x1200
407DataStoreTLBMiss: 384DataStoreTLBMiss:
385 mtspr SPRN_SPRG_SCRATCH2, r3
408 EXCEPTION_PROLOG_0 386 EXCEPTION_PROLOG_0
409 mfcr r10 387 mfcr r3
410 388
411 /* If we are faulting a kernel address, we have to use the 389 /* If we are faulting a kernel address, we have to use the
412 * kernel page tables. 390 * kernel page tables.
413 */ 391 */
414 mfspr r11, SPRN_MD_EPN 392 mfspr r10, SPRN_MD_EPN
415 rlwinm r11, r11, 16, 0xfff8 393 rlwinm r10, r10, 16, 0xfff8
394 cmpli cr0, r10, PAGE_OFFSET@h
395 mfspr r11, SPRN_M_TW /* Get level 1 table */
396 blt+ 3f
416#ifndef CONFIG_PIN_TLB_IMMR 397#ifndef CONFIG_PIN_TLB_IMMR
417 cmpli cr0, r11, VIRT_IMMR_BASE@h 398 cmpli cr0, r10, VIRT_IMMR_BASE@h
418#endif 399#endif
419 cmpli cr7, r11, PAGE_OFFSET@h 400_ENTRY(DTLBMiss_cmp)
401 cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h
402 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
420#ifndef CONFIG_PIN_TLB_IMMR 403#ifndef CONFIG_PIN_TLB_IMMR
421_ENTRY(DTLBMiss_jmp) 404_ENTRY(DTLBMiss_jmp)
422 beq- DTLBMissIMMR 405 beq- DTLBMissIMMR
423#endif 406#endif
424 bge- cr7, 4f 407 blt cr7, DTLBMissLinear
425
426 mfspr r11, SPRN_M_TW /* Get level 1 table */
4273: 4083:
428 mtcr r10 409 mtcr r3
429#ifdef CONFIG_8xx_CPU6
430 mtspr SPRN_SPRG_SCRATCH2, r3
431#endif
432 mfspr r10, SPRN_MD_EPN 410 mfspr r10, SPRN_MD_EPN
433 411
434 /* Insert level 1 index */ 412 /* Insert level 1 index */
@@ -481,30 +459,7 @@ _ENTRY(DTLBMiss_jmp)
481 MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ 459 MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
482 460
483 /* Restore registers */ 461 /* Restore registers */
484#ifdef CONFIG_8xx_CPU6
485 mfspr r3, SPRN_SPRG_SCRATCH2 462 mfspr r3, SPRN_SPRG_SCRATCH2
486#endif
487 mtspr SPRN_DAR, r11 /* Tag DAR */
488 EXCEPTION_EPILOG_0
489 rfi
490
4914:
492_ENTRY(DTLBMiss_cmp)
493 cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
494 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
495 bge- 3b
496
497 mtcr r10
498 /* Set 8M byte page and mark it valid */
499 li r10, MD_PS8MEG | MD_SVALID
500 MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
501 mfspr r10, SPRN_MD_EPN
502 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
503 ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
504 _PAGE_PRESENT
505 MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
506
507 li r11, RPN_PATTERN
508 mtspr SPRN_DAR, r11 /* Tag DAR */ 463 mtspr SPRN_DAR, r11 /* Tag DAR */
509 EXCEPTION_EPILOG_0 464 EXCEPTION_EPILOG_0
510 rfi 465 rfi
@@ -570,6 +525,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */
570 525
571 . = 0x2000 526 . = 0x2000
572 527
528/*
529 * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
530 * not enough space in the DataStoreTLBMiss area.
531 */
532DTLBMissIMMR:
533 mtcr r3
534 /* Set 512k byte guarded page and mark it valid */
535 li r10, MD_PS512K | MD_GUARDED | MD_SVALID
536 MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
537 mfspr r10, SPRN_IMMR /* Get current IMMR */
538 rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
539 ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
540 _PAGE_PRESENT | _PAGE_NO_CACHE
541 MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
542
543 li r11, RPN_PATTERN
544 mtspr SPRN_DAR, r11 /* Tag DAR */
545 mfspr r3, SPRN_SPRG_SCRATCH2
546 EXCEPTION_EPILOG_0
547 rfi
548
549DTLBMissLinear:
550 mtcr r3
551 /* Set 8M byte page and mark it valid */
552 li r11, MD_PS8MEG | MD_SVALID
553 MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
554 rlwinm r10, r10, 16, 0x0f800000 /* 8xx supports max 256Mb RAM */
555 ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
556 _PAGE_PRESENT
557 MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
558
559 li r11, RPN_PATTERN
560 mtspr SPRN_DAR, r11 /* Tag DAR */
561 mfspr r3, SPRN_SPRG_SCRATCH2
562 EXCEPTION_EPILOG_0
563 rfi
564
573/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions 565/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
574 * by decoding the registers used by the dcbx instruction and adding them. 566 * by decoding the registers used by the dcbx instruction and adding them.
575 * DAR is set to the calculated address. 567 * DAR is set to the calculated address.
@@ -586,7 +578,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
586 rlwinm r11, r10, 16, 0xfff8 578 rlwinm r11, r10, 16, 0xfff8
587_ENTRY(FixupDAR_cmp) 579_ENTRY(FixupDAR_cmp)
588 cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h 580 cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
589 blt- cr7, 200f 581 /* create physical page address from effective address */
582 tophys(r11, r10)
583 blt- cr7, 201f
590 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 584 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
591 /* Insert level 1 index */ 585 /* Insert level 1 index */
5923: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 5863: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -616,10 +610,6 @@ _ENTRY(FixupDAR_cmp)
616141: mfspr r10,SPRN_SPRG_SCRATCH2 610141: mfspr r10,SPRN_SPRG_SCRATCH2
617 b DARFixed /* Nope, go back to normal TLB processing */ 611 b DARFixed /* Nope, go back to normal TLB processing */
618 612
619 /* create physical page address from effective address */
620200: tophys(r11, r10)
621 b 201b
622
623144: mfspr r10, SPRN_DSISR 613144: mfspr r10, SPRN_DSISR
624 rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ 614 rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
625 mtspr SPRN_DSISR, r10 615 mtspr SPRN_DSISR, r10
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index dba265c586df..270ee30abdcf 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -131,15 +131,26 @@ void machine_shutdown(void)
131 ppc_md.machine_shutdown(); 131 ppc_md.machine_shutdown();
132} 132}
133 133
134static void machine_hang(void)
135{
136 pr_emerg("System Halted, OK to turn off power\n");
137 local_irq_disable();
138 while (1)
139 ;
140}
141
134void machine_restart(char *cmd) 142void machine_restart(char *cmd)
135{ 143{
136 machine_shutdown(); 144 machine_shutdown();
137 if (ppc_md.restart) 145 if (ppc_md.restart)
138 ppc_md.restart(cmd); 146 ppc_md.restart(cmd);
147
139 smp_send_stop(); 148 smp_send_stop();
140 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 149
141 local_irq_disable(); 150 do_kernel_restart(cmd);
142 while (1) ; 151 mdelay(1000);
152
153 machine_hang();
143} 154}
144 155
145void machine_power_off(void) 156void machine_power_off(void)
@@ -147,10 +158,9 @@ void machine_power_off(void)
147 machine_shutdown(); 158 machine_shutdown();
148 if (pm_power_off) 159 if (pm_power_off)
149 pm_power_off(); 160 pm_power_off();
161
150 smp_send_stop(); 162 smp_send_stop();
151 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 163 machine_hang();
152 local_irq_disable();
153 while (1) ;
154} 164}
155/* Used by the G5 thermal driver */ 165/* Used by the G5 thermal driver */
156EXPORT_SYMBOL_GPL(machine_power_off); 166EXPORT_SYMBOL_GPL(machine_power_off);
@@ -163,10 +173,9 @@ void machine_halt(void)
163 machine_shutdown(); 173 machine_shutdown();
164 if (ppc_md.halt) 174 if (ppc_md.halt)
165 ppc_md.halt(); 175 ppc_md.halt();
176
166 smp_send_stop(); 177 smp_send_stop();
167 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 178 machine_hang();
168 local_irq_disable();
169 while (1) ;
170} 179}
171 180
172 181
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a1f8f5641e9e..023a462725b5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -273,7 +273,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
273 force_sig_info(signr, &info, current); 273 force_sig_info(signr, &info, current);
274} 274}
275 275
276#ifdef CONFIG_PPC64
277void system_reset_exception(struct pt_regs *regs) 276void system_reset_exception(struct pt_regs *regs)
278{ 277{
279 /* See if any machine dependent calls */ 278 /* See if any machine dependent calls */
@@ -291,6 +290,7 @@ void system_reset_exception(struct pt_regs *regs)
291 /* What should we do here? We could issue a shutdown or hard reset. */ 290 /* What should we do here? We could issue a shutdown or hard reset. */
292} 291}
293 292
293#ifdef CONFIG_PPC64
294/* 294/*
295 * This function is called in real mode. Strictly no printk's please. 295 * This function is called in real mode. Strictly no printk's please.
296 * 296 *
@@ -352,12 +352,11 @@ static inline int check_io_access(struct pt_regs *regs)
352 * For the debug message, we look at the preceding 352 * For the debug message, we look at the preceding
353 * load or store. 353 * load or store.
354 */ 354 */
355 if (*nip == 0x60000000) /* nop */ 355 if (*nip == PPC_INST_NOP)
356 nip -= 2; 356 nip -= 2;
357 else if (*nip == 0x4c00012c) /* isync */ 357 else if (*nip == PPC_INST_ISYNC)
358 --nip; 358 --nip;
359 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 359 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
360 /* sync or twi */
361 unsigned int rb; 360 unsigned int rb;
362 361
363 --nip; 362 --nip;
@@ -668,6 +667,31 @@ int machine_check_e200(struct pt_regs *regs)
668 667
669 return 0; 668 return 0;
670} 669}
670#elif defined(CONFIG_PPC_8xx)
671int machine_check_8xx(struct pt_regs *regs)
672{
673 unsigned long reason = get_mc_reason(regs);
674
675 pr_err("Machine check in kernel mode.\n");
676 pr_err("Caused by (from SRR1=%lx): ", reason);
677 if (reason & 0x40000000)
678 pr_err("Fetch error at address %lx\n", regs->nip);
679 else
680 pr_err("Data access error at address %lx\n", regs->dar);
681
682#ifdef CONFIG_PCI
683 /* the qspan pci read routines can cause machine checks -- Cort
684 *
685 * yuck !!! that totally needs to go away ! There are better ways
686 * to deal with that than having a wart in the mcheck handler.
687 * -- BenH
688 */
689 bad_page_fault(regs, regs->dar, SIGBUS);
690 return 1;
691#else
692 return 0;
693#endif
694}
671#else 695#else
672int machine_check_generic(struct pt_regs *regs) 696int machine_check_generic(struct pt_regs *regs)
673{ 697{
@@ -727,17 +751,6 @@ void machine_check_exception(struct pt_regs *regs)
727 if (recover > 0) 751 if (recover > 0)
728 goto bail; 752 goto bail;
729 753
730#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
731 /* the qspan pci read routines can cause machine checks -- Cort
732 *
733 * yuck !!! that totally needs to go away ! There are better ways
734 * to deal with that than having a wart in the mcheck handler.
735 * -- BenH
736 */
737 bad_page_fault(regs, regs->dar, SIGBUS);
738 goto bail;
739#endif
740
741 if (debugger_fault_handler(regs)) 754 if (debugger_fault_handler(regs))
742 goto bail; 755 goto bail;
743 756
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index 7c7df4003820..994d1a959e20 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -30,8 +30,8 @@ config EP8248E
30 select 8272 30 select 8272
31 select 8260 31 select 8260
32 select FSL_SOC 32 select FSL_SOC
33 select PHYLIB 33 select PHYLIB if NETDEVICES
34 select MDIO_BITBANG 34 select MDIO_BITBANG if PHYLIB
35 help 35 help
36 This enables support for the Embedded Planet EP8248E board. 36 This enables support for the Embedded Planet EP8248E board.
37 37
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index cdab847749e6..8fec050f2d5b 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -298,7 +298,9 @@ static const struct of_device_id of_bus_ids[] __initconst = {
298static int __init declare_of_platform_devices(void) 298static int __init declare_of_platform_devices(void)
299{ 299{
300 of_platform_bus_probe(NULL, of_bus_ids, NULL); 300 of_platform_bus_probe(NULL, of_bus_ids, NULL);
301 platform_driver_register(&ep8248e_mdio_driver); 301
302 if (IS_ENABLED(CONFIG_MDIO_BITBANG))
303 platform_driver_register(&ep8248e_mdio_driver);
302 304
303 return 0; 305 return 0;
304} 306}
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 17e54339f8d9..575afd6eb36a 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -30,9 +30,7 @@
30 */ 30 */
31static void __init asp834x_setup_arch(void) 31static void __init asp834x_setup_arch(void)
32{ 32{
33 if (ppc_md.progress) 33 mpc83xx_setup_arch();
34 ppc_md.progress("asp834x_setup_arch()", 0);
35
36 mpc834x_usb_cfg(); 34 mpc834x_usb_cfg();
37} 35}
38 36
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index e7fbd6366abb..d8642a4afc74 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -130,10 +130,7 @@ static void __init mpc83xx_km_setup_arch(void)
130 struct device_node *np; 130 struct device_node *np;
131#endif 131#endif
132 132
133 if (ppc_md.progress) 133 mpc83xx_setup_arch();
134 ppc_md.progress("kmpbec83xx_setup_arch()", 0);
135
136 mpc83xx_setup_pci();
137 134
138#ifdef CONFIG_QUICC_ENGINE 135#ifdef CONFIG_QUICC_ENGINE
139 np = of_find_node_by_name(NULL, "par_io"); 136 np = of_find_node_by_name(NULL, "par_io");
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 8899aa9d11f5..d75c9816a5c9 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -142,3 +142,11 @@ void __init mpc83xx_setup_pci(void)
142 mpc83xx_add_bridge(np); 142 mpc83xx_add_bridge(np);
143} 143}
144#endif 144#endif
145
146void __init mpc83xx_setup_arch(void)
147{
148 if (ppc_md.progress)
149 ppc_md.progress("mpc83xx_setup_arch()", 0);
150
151 mpc83xx_setup_pci();
152}
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 040d5d085467..272c41c387b9 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -27,10 +27,7 @@
27 */ 27 */
28static void __init mpc830x_rdb_setup_arch(void) 28static void __init mpc830x_rdb_setup_arch(void)
29{ 29{
30 if (ppc_md.progress) 30 mpc83xx_setup_arch();
31 ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
32
33 mpc83xx_setup_pci();
34 mpc831x_usb_cfg(); 31 mpc831x_usb_cfg();
35} 32}
36 33
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 40e0d8307b59..fd80fd570e67 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -28,10 +28,7 @@
28 */ 28 */
29static void __init mpc831x_rdb_setup_arch(void) 29static void __init mpc831x_rdb_setup_arch(void)
30{ 30{
31 if (ppc_md.progress) 31 mpc83xx_setup_arch();
32 ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
33
34 mpc83xx_setup_pci();
35 mpc831x_usb_cfg(); 32 mpc831x_usb_cfg();
36} 33}
37 34
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index cdfa47c4d394..bb7b25acf26f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -58,8 +58,7 @@ static void __init mpc832x_sys_setup_arch(void)
58 struct device_node *np; 58 struct device_node *np;
59 u8 __iomem *bcsr_regs = NULL; 59 u8 __iomem *bcsr_regs = NULL;
60 60
61 if (ppc_md.progress) 61 mpc83xx_setup_arch();
62 ppc_md.progress("mpc832x_sys_setup_arch()", 0);
63 62
64 /* Map BCSR area */ 63 /* Map BCSR area */
65 np = of_find_node_by_name(NULL, "bcsr"); 64 np = of_find_node_by_name(NULL, "bcsr");
@@ -71,8 +70,6 @@ static void __init mpc832x_sys_setup_arch(void)
71 of_node_put(np); 70 of_node_put(np);
72 } 71 }
73 72
74 mpc83xx_setup_pci();
75
76#ifdef CONFIG_QUICC_ENGINE 73#ifdef CONFIG_QUICC_ENGINE
77 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { 74 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
78 par_io_init(np); 75 par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0d6a62fc5864..d7c9b186954d 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -197,10 +197,7 @@ static void __init mpc832x_rdb_setup_arch(void)
197 struct device_node *np; 197 struct device_node *np;
198#endif 198#endif
199 199
200 if (ppc_md.progress) 200 mpc83xx_setup_arch();
201 ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
202
203 mpc83xx_setup_pci();
204 201
205#ifdef CONFIG_QUICC_ENGINE 202#ifdef CONFIG_QUICC_ENGINE
206 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { 203 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 8fd0c1e8b182..73a5267df497 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -57,10 +57,7 @@ machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
57 */ 57 */
58static void __init mpc834x_itx_setup_arch(void) 58static void __init mpc834x_itx_setup_arch(void)
59{ 59{
60 if (ppc_md.progress) 60 mpc83xx_setup_arch();
61 ppc_md.progress("mpc834x_itx_setup_arch()", 0);
62
63 mpc83xx_setup_pci();
64 61
65 mpc834x_usb_cfg(); 62 mpc834x_usb_cfg();
66} 63}
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index eeaee6123bb3..009cfc18a4ee 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -76,10 +76,7 @@ static int mpc834xemds_usb_cfg(void)
76 */ 76 */
77static void __init mpc834x_mds_setup_arch(void) 77static void __init mpc834x_mds_setup_arch(void)
78{ 78{
79 if (ppc_md.progress) 79 mpc83xx_setup_arch();
80 ppc_md.progress("mpc834x_mds_setup_arch()", 0);
81
82 mpc83xx_setup_pci();
83 80
84 mpc834xemds_usb_cfg(); 81 mpc834xemds_usb_cfg();
85} 82}
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index dacf4c2df069..4fc3051c2b2e 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -66,8 +66,7 @@ static void __init mpc836x_mds_setup_arch(void)
66 struct device_node *np; 66 struct device_node *np;
67 u8 __iomem *bcsr_regs = NULL; 67 u8 __iomem *bcsr_regs = NULL;
68 68
69 if (ppc_md.progress) 69 mpc83xx_setup_arch();
70 ppc_md.progress("mpc836x_mds_setup_arch()", 0);
71 70
72 /* Map BCSR area */ 71 /* Map BCSR area */
73 np = of_find_node_by_name(NULL, "bcsr"); 72 np = of_find_node_by_name(NULL, "bcsr");
@@ -79,8 +78,6 @@ static void __init mpc836x_mds_setup_arch(void)
79 of_node_put(np); 78 of_node_put(np);
80 } 79 }
81 80
82 mpc83xx_setup_pci();
83
84#ifdef CONFIG_QUICC_ENGINE 81#ifdef CONFIG_QUICC_ENGINE
85 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { 82 if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
86 par_io_init(np); 83 par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index cf67ac93ddcb..93f024fd9b45 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -31,10 +31,7 @@ machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices);
31 31
32static void __init mpc836x_rdk_setup_arch(void) 32static void __init mpc836x_rdk_setup_arch(void)
33{ 33{
34 if (ppc_md.progress) 34 mpc83xx_setup_arch();
35 ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
36
37 mpc83xx_setup_pci();
38} 35}
39 36
40/* 37/*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 652b97d699c9..3b34cc1f626c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -79,10 +79,7 @@ out:
79 */ 79 */
80static void __init mpc837x_mds_setup_arch(void) 80static void __init mpc837x_mds_setup_arch(void)
81{ 81{
82 if (ppc_md.progress) 82 mpc83xx_setup_arch();
83 ppc_md.progress("mpc837x_mds_setup_arch()", 0);
84
85 mpc83xx_setup_pci();
86 mpc837xmds_usb_cfg(); 83 mpc837xmds_usb_cfg();
87} 84}
88 85
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 667731d81676..0c55fa6af2d5 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -50,10 +50,7 @@ static void mpc837x_rdb_sd_cfg(void)
50 */ 50 */
51static void __init mpc837x_rdb_setup_arch(void) 51static void __init mpc837x_rdb_setup_arch(void)
52{ 52{
53 if (ppc_md.progress) 53 mpc83xx_setup_arch();
54 ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
55
56 mpc83xx_setup_pci();
57 mpc837x_usb_cfg(); 54 mpc837x_usb_cfg();
58 mpc837x_rdb_sd_cfg(); 55 mpc837x_rdb_sd_cfg();
59} 56}
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index ad484199eff7..636eb9d0401a 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -86,5 +86,6 @@ extern void mpc83xx_setup_pci(void);
86#endif 86#endif
87 87
88extern int mpc83xx_declare_of_platform_devices(void); 88extern int mpc83xx_declare_of_platform_devices(void);
89extern void mpc83xx_setup_arch(void);
89 90
90#endif /* __MPC83XX_H__ */ 91#endif /* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index b867e88dfb0d..cb4bdabfdf1c 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -47,10 +47,7 @@
47 */ 47 */
48static void __init sbc834x_setup_arch(void) 48static void __init sbc834x_setup_arch(void)
49{ 49{
50 if (ppc_md.progress) 50 mpc83xx_setup_arch();
51 ppc_md.progress("sbc834x_setup_arch()", 0);
52
53 mpc83xx_setup_pci();
54} 51}
55 52
56machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices); 53machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index df25a3ed489d..9dc1d28975b9 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -72,7 +72,7 @@ config MPC85xx_CDS
72config MPC85xx_MDS 72config MPC85xx_MDS
73 bool "Freescale MPC85xx MDS" 73 bool "Freescale MPC85xx MDS"
74 select DEFAULT_UIMAGE 74 select DEFAULT_UIMAGE
75 select PHYLIB 75 select PHYLIB if NETDEVICES
76 select HAS_RAPIDIO 76 select HAS_RAPIDIO
77 select SWIOTLB 77 select SWIOTLB
78 help 78 help
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index 07dd6ae3ec52..d2f45569a026 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -72,7 +72,6 @@ define_machine(bsc9132_qds) {
72 .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 72 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
73#endif 73#endif
74 .get_irq = mpic_get_irq, 74 .get_irq = mpic_get_irq,
75 .restart = fsl_rstcr_restart,
76 .calibrate_decr = generic_calibrate_decr, 75 .calibrate_decr = generic_calibrate_decr,
77 .progress = udbg_progress, 76 .progress = udbg_progress,
78}; 77};
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index e48f6710e6d5..0ffdb4a80c2a 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -59,7 +59,6 @@ define_machine(bsc9131_rdb) {
59 .setup_arch = bsc913x_rdb_setup_arch, 59 .setup_arch = bsc913x_rdb_setup_arch,
60 .init_IRQ = bsc913x_rdb_pic_init, 60 .init_IRQ = bsc913x_rdb_pic_init,
61 .get_irq = mpic_get_irq, 61 .get_irq = mpic_get_irq,
62 .restart = fsl_rstcr_restart,
63 .calibrate_decr = generic_calibrate_decr, 62 .calibrate_decr = generic_calibrate_decr,
64 .progress = udbg_progress, 63 .progress = udbg_progress,
65}; 64};
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 3b9e3f0f9aec..4df1b4026eab 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -65,7 +65,6 @@ define_machine(c293_pcie) {
65 .setup_arch = c293_pcie_setup_arch, 65 .setup_arch = c293_pcie_setup_arch,
66 .init_IRQ = c293_pcie_pic_init, 66 .init_IRQ = c293_pcie_pic_init,
67 .get_irq = mpic_get_irq, 67 .get_irq = mpic_get_irq,
68 .restart = fsl_rstcr_restart,
69 .calibrate_decr = generic_calibrate_decr, 68 .calibrate_decr = generic_calibrate_decr,
70 .progress = udbg_progress, 69 .progress = udbg_progress,
71}; 70};
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 3a6a84f07f43..1179115a4b5c 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -225,7 +225,6 @@ define_machine(corenet_generic) {
225#else 225#else
226 .get_irq = mpic_get_coreint_irq, 226 .get_irq = mpic_get_coreint_irq,
227#endif 227#endif
228 .restart = fsl_rstcr_restart,
229 .calibrate_decr = generic_calibrate_decr, 228 .calibrate_decr = generic_calibrate_decr,
230 .progress = udbg_progress, 229 .progress = udbg_progress,
231#ifdef CONFIG_PPC64 230#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 14af36a7fa9c..f29c6f0909f3 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -215,7 +215,6 @@ define_machine(ge_imp3a) {
215 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 215 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
216#endif 216#endif
217 .get_irq = mpic_get_irq, 217 .get_irq = mpic_get_irq,
218 .restart = fsl_rstcr_restart,
219 .calibrate_decr = generic_calibrate_decr, 218 .calibrate_decr = generic_calibrate_decr,
220 .progress = udbg_progress, 219 .progress = udbg_progress,
221}; 220};
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 6ba687f19e45..94a7f92c858f 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -77,7 +77,6 @@ define_machine(mpc8536_ds) {
77 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 77 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
78#endif 78#endif
79 .get_irq = mpic_get_irq, 79 .get_irq = mpic_get_irq,
80 .restart = fsl_rstcr_restart,
81 .calibrate_decr = generic_calibrate_decr, 80 .calibrate_decr = generic_calibrate_decr,
82 .progress = udbg_progress, 81 .progress = udbg_progress,
83}; 82};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 8756715c7a47..f3e055fdd1de 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -170,7 +170,6 @@ define_machine(mpc85xx_ads) {
170 .init_IRQ = mpc85xx_ads_pic_init, 170 .init_IRQ = mpc85xx_ads_pic_init,
171 .show_cpuinfo = mpc85xx_ads_show_cpuinfo, 171 .show_cpuinfo = mpc85xx_ads_show_cpuinfo,
172 .get_irq = mpic_get_irq, 172 .get_irq = mpic_get_irq,
173 .restart = fsl_rstcr_restart,
174 .calibrate_decr = generic_calibrate_decr, 173 .calibrate_decr = generic_calibrate_decr,
175 .progress = udbg_progress, 174 .progress = udbg_progress,
176}; 175};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 86f20156178e..224db30c497b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -83,7 +83,8 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
83 return PCIBIOS_SUCCESSFUL; 83 return PCIBIOS_SUCCESSFUL;
84} 84}
85 85
86static void __noreturn mpc85xx_cds_restart(char *cmd) 86static int mpc85xx_cds_restart(struct notifier_block *this,
87 unsigned long mode, void *cmd)
87{ 88{
88 struct pci_dev *dev; 89 struct pci_dev *dev;
89 u_char tmp; 90 u_char tmp;
@@ -108,12 +109,25 @@ static void __noreturn mpc85xx_cds_restart(char *cmd)
108 } 109 }
109 110
110 /* 111 /*
111 * If we can't find the VIA chip (maybe the P2P bridge is disabled) 112 * If we can't find the VIA chip (maybe the P2P bridge is
112 * or the VIA chip reset didn't work, just use the default reset. 113 * disabled) or the VIA chip reset didn't work, just return
114 * and let default reset sequence happen.
113 */ 115 */
114 fsl_rstcr_restart(NULL); 116 return NOTIFY_DONE;
115} 117}
116 118
119static int mpc85xx_cds_restart_register(void)
120{
121 static struct notifier_block restart_handler;
122
123 restart_handler.notifier_call = mpc85xx_cds_restart;
124 restart_handler.priority = 192;
125
126 return register_restart_handler(&restart_handler);
127}
128machine_arch_initcall(mpc85xx_cds, mpc85xx_cds_restart_register);
129
130
117static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) 131static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
118{ 132{
119 u_char c; 133 u_char c;
@@ -380,11 +394,8 @@ define_machine(mpc85xx_cds) {
380 .show_cpuinfo = mpc85xx_cds_show_cpuinfo, 394 .show_cpuinfo = mpc85xx_cds_show_cpuinfo,
381 .get_irq = mpic_get_irq, 395 .get_irq = mpic_get_irq,
382#ifdef CONFIG_PCI 396#ifdef CONFIG_PCI
383 .restart = mpc85xx_cds_restart,
384 .pcibios_fixup_bus = mpc85xx_cds_fixup_bus, 397 .pcibios_fixup_bus = mpc85xx_cds_fixup_bus,
385 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 398 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
386#else
387 .restart = fsl_rstcr_restart,
388#endif 399#endif
389 .calibrate_decr = generic_calibrate_decr, 400 .calibrate_decr = generic_calibrate_decr,
390 .progress = udbg_progress, 401 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ed69c7ee1829..dc9e035cc637 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -204,7 +204,6 @@ define_machine(mpc8544_ds) {
204 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 204 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
205#endif 205#endif
206 .get_irq = mpic_get_irq, 206 .get_irq = mpic_get_irq,
207 .restart = fsl_rstcr_restart,
208 .calibrate_decr = generic_calibrate_decr, 207 .calibrate_decr = generic_calibrate_decr,
209 .progress = udbg_progress, 208 .progress = udbg_progress,
210}; 209};
@@ -219,7 +218,6 @@ define_machine(mpc8572_ds) {
219 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 218 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
220#endif 219#endif
221 .get_irq = mpic_get_irq, 220 .get_irq = mpic_get_irq,
222 .restart = fsl_rstcr_restart,
223 .calibrate_decr = generic_calibrate_decr, 221 .calibrate_decr = generic_calibrate_decr,
224 .progress = udbg_progress, 222 .progress = udbg_progress,
225}; 223};
@@ -234,7 +232,6 @@ define_machine(p2020_ds) {
234 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 232 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
235#endif 233#endif
236 .get_irq = mpic_get_irq, 234 .get_irq = mpic_get_irq,
237 .restart = fsl_rstcr_restart,
238 .calibrate_decr = generic_calibrate_decr, 235 .calibrate_decr = generic_calibrate_decr,
239 .progress = udbg_progress, 236 .progress = udbg_progress,
240}; 237};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index fa9cd710d2ae..d7e440e6dba3 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -63,6 +63,8 @@
63#define DBG(fmt...) 63#define DBG(fmt...)
64#endif 64#endif
65 65
66#if IS_BUILTIN(CONFIG_PHYLIB)
67
66#define MV88E1111_SCR 0x10 68#define MV88E1111_SCR 0x10
67#define MV88E1111_SCR_125CLK 0x0010 69#define MV88E1111_SCR_125CLK 0x0010
68static int mpc8568_fixup_125_clock(struct phy_device *phydev) 70static int mpc8568_fixup_125_clock(struct phy_device *phydev)
@@ -152,6 +154,8 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
152 return err; 154 return err;
153} 155}
154 156
157#endif
158
155/* ************************************************************************ 159/* ************************************************************************
156 * 160 *
157 * Setup the architecture 161 * Setup the architecture
@@ -313,6 +317,7 @@ static void __init mpc85xx_mds_setup_arch(void)
313 swiotlb_detect_4g(); 317 swiotlb_detect_4g();
314} 318}
315 319
320#if IS_BUILTIN(CONFIG_PHYLIB)
316 321
317static int __init board_fixups(void) 322static int __init board_fixups(void)
318{ 323{
@@ -342,9 +347,12 @@ static int __init board_fixups(void)
342 347
343 return 0; 348 return 0;
344} 349}
350
345machine_arch_initcall(mpc8568_mds, board_fixups); 351machine_arch_initcall(mpc8568_mds, board_fixups);
346machine_arch_initcall(mpc8569_mds, board_fixups); 352machine_arch_initcall(mpc8569_mds, board_fixups);
347 353
354#endif
355
348static int __init mpc85xx_publish_devices(void) 356static int __init mpc85xx_publish_devices(void)
349{ 357{
350 if (machine_is(mpc8568_mds)) 358 if (machine_is(mpc8568_mds))
@@ -385,7 +393,6 @@ define_machine(mpc8568_mds) {
385 .setup_arch = mpc85xx_mds_setup_arch, 393 .setup_arch = mpc85xx_mds_setup_arch,
386 .init_IRQ = mpc85xx_mds_pic_init, 394 .init_IRQ = mpc85xx_mds_pic_init,
387 .get_irq = mpic_get_irq, 395 .get_irq = mpic_get_irq,
388 .restart = fsl_rstcr_restart,
389 .calibrate_decr = generic_calibrate_decr, 396 .calibrate_decr = generic_calibrate_decr,
390 .progress = udbg_progress, 397 .progress = udbg_progress,
391#ifdef CONFIG_PCI 398#ifdef CONFIG_PCI
@@ -405,7 +412,6 @@ define_machine(mpc8569_mds) {
405 .setup_arch = mpc85xx_mds_setup_arch, 412 .setup_arch = mpc85xx_mds_setup_arch,
406 .init_IRQ = mpc85xx_mds_pic_init, 413 .init_IRQ = mpc85xx_mds_pic_init,
407 .get_irq = mpic_get_irq, 414 .get_irq = mpic_get_irq,
408 .restart = fsl_rstcr_restart,
409 .calibrate_decr = generic_calibrate_decr, 415 .calibrate_decr = generic_calibrate_decr,
410 .progress = udbg_progress, 416 .progress = udbg_progress,
411#ifdef CONFIG_PCI 417#ifdef CONFIG_PCI
@@ -426,7 +432,6 @@ define_machine(p1021_mds) {
426 .setup_arch = mpc85xx_mds_setup_arch, 432 .setup_arch = mpc85xx_mds_setup_arch,
427 .init_IRQ = mpc85xx_mds_pic_init, 433 .init_IRQ = mpc85xx_mds_pic_init,
428 .get_irq = mpic_get_irq, 434 .get_irq = mpic_get_irq,
429 .restart = fsl_rstcr_restart,
430 .calibrate_decr = generic_calibrate_decr, 435 .calibrate_decr = generic_calibrate_decr,
431 .progress = udbg_progress, 436 .progress = udbg_progress,
432#ifdef CONFIG_PCI 437#ifdef CONFIG_PCI
@@ -434,4 +439,3 @@ define_machine(p1021_mds) {
434 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 439 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
435#endif 440#endif
436}; 441};
437
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index c1499cbf3786..10069503e39f 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -213,7 +213,6 @@ define_machine(p2020_rdb) {
213 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 213 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
214#endif 214#endif
215 .get_irq = mpic_get_irq, 215 .get_irq = mpic_get_irq,
216 .restart = fsl_rstcr_restart,
217 .calibrate_decr = generic_calibrate_decr, 216 .calibrate_decr = generic_calibrate_decr,
218 .progress = udbg_progress, 217 .progress = udbg_progress,
219}; 218};
@@ -228,7 +227,6 @@ define_machine(p1020_rdb) {
228 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 227 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
229#endif 228#endif
230 .get_irq = mpic_get_irq, 229 .get_irq = mpic_get_irq,
231 .restart = fsl_rstcr_restart,
232 .calibrate_decr = generic_calibrate_decr, 230 .calibrate_decr = generic_calibrate_decr,
233 .progress = udbg_progress, 231 .progress = udbg_progress,
234}; 232};
@@ -243,7 +241,6 @@ define_machine(p1021_rdb_pc) {
243 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 241 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
244#endif 242#endif
245 .get_irq = mpic_get_irq, 243 .get_irq = mpic_get_irq,
246 .restart = fsl_rstcr_restart,
247 .calibrate_decr = generic_calibrate_decr, 244 .calibrate_decr = generic_calibrate_decr,
248 .progress = udbg_progress, 245 .progress = udbg_progress,
249}; 246};
@@ -258,7 +255,6 @@ define_machine(p2020_rdb_pc) {
258 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 255 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
259#endif 256#endif
260 .get_irq = mpic_get_irq, 257 .get_irq = mpic_get_irq,
261 .restart = fsl_rstcr_restart,
262 .calibrate_decr = generic_calibrate_decr, 258 .calibrate_decr = generic_calibrate_decr,
263 .progress = udbg_progress, 259 .progress = udbg_progress,
264}; 260};
@@ -273,7 +269,6 @@ define_machine(p1025_rdb) {
273 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 269 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
274#endif 270#endif
275 .get_irq = mpic_get_irq, 271 .get_irq = mpic_get_irq,
276 .restart = fsl_rstcr_restart,
277 .calibrate_decr = generic_calibrate_decr, 272 .calibrate_decr = generic_calibrate_decr,
278 .progress = udbg_progress, 273 .progress = udbg_progress,
279}; 274};
@@ -288,7 +283,6 @@ define_machine(p1020_mbg_pc) {
288 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 283 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
289#endif 284#endif
290 .get_irq = mpic_get_irq, 285 .get_irq = mpic_get_irq,
291 .restart = fsl_rstcr_restart,
292 .calibrate_decr = generic_calibrate_decr, 286 .calibrate_decr = generic_calibrate_decr,
293 .progress = udbg_progress, 287 .progress = udbg_progress,
294}; 288};
@@ -303,7 +297,6 @@ define_machine(p1020_utm_pc) {
303 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 297 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
304#endif 298#endif
305 .get_irq = mpic_get_irq, 299 .get_irq = mpic_get_irq,
306 .restart = fsl_rstcr_restart,
307 .calibrate_decr = generic_calibrate_decr, 300 .calibrate_decr = generic_calibrate_decr,
308 .progress = udbg_progress, 301 .progress = udbg_progress,
309}; 302};
@@ -318,7 +311,6 @@ define_machine(p1020_rdb_pc) {
318 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 311 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
319#endif 312#endif
320 .get_irq = mpic_get_irq, 313 .get_irq = mpic_get_irq,
321 .restart = fsl_rstcr_restart,
322 .calibrate_decr = generic_calibrate_decr, 314 .calibrate_decr = generic_calibrate_decr,
323 .progress = udbg_progress, 315 .progress = udbg_progress,
324}; 316};
@@ -333,7 +325,6 @@ define_machine(p1020_rdb_pd) {
333 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 325 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
334#endif 326#endif
335 .get_irq = mpic_get_irq, 327 .get_irq = mpic_get_irq,
336 .restart = fsl_rstcr_restart,
337 .calibrate_decr = generic_calibrate_decr, 328 .calibrate_decr = generic_calibrate_decr,
338 .progress = udbg_progress, 329 .progress = udbg_progress,
339}; 330};
@@ -348,7 +339,6 @@ define_machine(p1024_rdb) {
348 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 339 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
349#endif 340#endif
350 .get_irq = mpic_get_irq, 341 .get_irq = mpic_get_irq,
351 .restart = fsl_rstcr_restart,
352 .calibrate_decr = generic_calibrate_decr, 342 .calibrate_decr = generic_calibrate_decr,
353 .progress = udbg_progress, 343 .progress = udbg_progress,
354}; 344};
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index acc3d0d6049d..d5af0723a69e 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -66,7 +66,6 @@ define_machine(mvme2500) {
66 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 66 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
67#endif 67#endif
68 .get_irq = mpic_get_irq, 68 .get_irq = mpic_get_irq,
69 .restart = fsl_rstcr_restart,
70 .calibrate_decr = generic_calibrate_decr, 69 .calibrate_decr = generic_calibrate_decr,
71 .progress = udbg_progress, 70 .progress = udbg_progress,
72}; 71};
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 661d7b59e413..78d13b364cd6 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -79,7 +79,6 @@ define_machine(p1010_rdb) {
79 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 79 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
80#endif 80#endif
81 .get_irq = mpic_get_irq, 81 .get_irq = mpic_get_irq,
82 .restart = fsl_rstcr_restart,
83 .calibrate_decr = generic_calibrate_decr, 82 .calibrate_decr = generic_calibrate_decr,
84 .progress = udbg_progress, 83 .progress = udbg_progress,
85}; 84};
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 63568d68c76f..0908abd7e36f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -568,7 +568,6 @@ define_machine(p1022_ds) {
568 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 568 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
569#endif 569#endif
570 .get_irq = mpic_get_irq, 570 .get_irq = mpic_get_irq,
571 .restart = fsl_rstcr_restart,
572 .calibrate_decr = generic_calibrate_decr, 571 .calibrate_decr = generic_calibrate_decr,
573 .progress = udbg_progress, 572 .progress = udbg_progress,
574}; 573};
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 2f2943600301..276e00ab3dde 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -148,7 +148,6 @@ define_machine(p1022_rdk) {
148 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 148 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
149#endif 149#endif
150 .get_irq = mpic_get_irq, 150 .get_irq = mpic_get_irq,
151 .restart = fsl_rstcr_restart,
152 .calibrate_decr = generic_calibrate_decr, 151 .calibrate_decr = generic_calibrate_decr,
153 .progress = udbg_progress, 152 .progress = udbg_progress,
154}; 153};
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index 40d8de57c341..3e8cd0324dfc 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -110,7 +110,6 @@ define_machine(p1023_rdb) {
110 .setup_arch = mpc85xx_rdb_setup_arch, 110 .setup_arch = mpc85xx_rdb_setup_arch,
111 .init_IRQ = mpc85xx_rdb_pic_init, 111 .init_IRQ = mpc85xx_rdb_pic_init,
112 .get_irq = mpic_get_irq, 112 .get_irq = mpic_get_irq,
113 .restart = fsl_rstcr_restart,
114 .calibrate_decr = generic_calibrate_decr, 113 .calibrate_decr = generic_calibrate_decr,
115 .progress = udbg_progress, 114 .progress = udbg_progress,
116#ifdef CONFIG_PCI 115#ifdef CONFIG_PCI
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 2410167b290a..33c5ba644fa5 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -91,7 +91,6 @@ define_machine(ppa8548) {
91 .init_IRQ = ppa8548_pic_init, 91 .init_IRQ = ppa8548_pic_init,
92 .show_cpuinfo = ppa8548_show_cpuinfo, 92 .show_cpuinfo = ppa8548_show_cpuinfo,
93 .get_irq = mpic_get_irq, 93 .get_irq = mpic_get_irq,
94 .restart = fsl_rstcr_restart,
95 .calibrate_decr = generic_calibrate_decr, 94 .calibrate_decr = generic_calibrate_decr,
96 .progress = udbg_progress, 95 .progress = udbg_progress,
97}; 96};
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 50d745809809..b63a8548366f 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -77,7 +77,6 @@ define_machine(qemu_e500) {
77 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 77 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
78#endif 78#endif
79 .get_irq = mpic_get_coreint_irq, 79 .get_irq = mpic_get_coreint_irq,
80 .restart = fsl_rstcr_restart,
81 .calibrate_decr = generic_calibrate_decr, 80 .calibrate_decr = generic_calibrate_decr,
82 .progress = udbg_progress, 81 .progress = udbg_progress,
83}; 82};
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 62b6c45a5a9b..2c670848ff08 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -130,7 +130,6 @@ define_machine(sbc8548) {
130 .init_IRQ = sbc8548_pic_init, 130 .init_IRQ = sbc8548_pic_init,
131 .show_cpuinfo = sbc8548_show_cpuinfo, 131 .show_cpuinfo = sbc8548_show_cpuinfo,
132 .get_irq = mpic_get_irq, 132 .get_irq = mpic_get_irq,
133 .restart = fsl_rstcr_restart,
134#ifdef CONFIG_PCI 133#ifdef CONFIG_PCI
135 .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 134 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
136 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 135 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 79fd0dfd4b82..21d6aaa5c3e4 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -38,18 +38,18 @@ static void gpio_halt_wfn(struct work_struct *work)
38} 38}
39static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn); 39static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn);
40 40
41static void gpio_halt_cb(void) 41static void __noreturn gpio_halt_cb(void)
42{ 42{
43 enum of_gpio_flags flags; 43 enum of_gpio_flags flags;
44 int trigger, gpio; 44 int trigger, gpio;
45 45
46 if (!halt_node) 46 if (!halt_node)
47 return; 47 panic("No reset GPIO information was provided in DT\n");
48 48
49 gpio = of_get_gpio_flags(halt_node, 0, &flags); 49 gpio = of_get_gpio_flags(halt_node, 0, &flags);
50 50
51 if (!gpio_is_valid(gpio)) 51 if (!gpio_is_valid(gpio))
52 return; 52 panic("Provided GPIO is invalid\n");
53 53
54 trigger = (flags == OF_GPIO_ACTIVE_LOW); 54 trigger = (flags == OF_GPIO_ACTIVE_LOW);
55 55
@@ -57,6 +57,8 @@ static void gpio_halt_cb(void)
57 57
58 /* Probably wont return */ 58 /* Probably wont return */
59 gpio_set_value(gpio, trigger); 59 gpio_set_value(gpio, trigger);
60
61 panic("Halt failed\n");
60} 62}
61 63
62/* This IRQ means someone pressed the power button and it is waiting for us 64/* This IRQ means someone pressed the power button and it is waiting for us
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index cd255acde2e2..8da4ed90338d 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -91,7 +91,6 @@ define_machine(socrates) {
91 .setup_arch = socrates_setup_arch, 91 .setup_arch = socrates_setup_arch,
92 .init_IRQ = socrates_pic_init, 92 .init_IRQ = socrates_pic_init,
93 .get_irq = mpic_get_irq, 93 .get_irq = mpic_get_irq,
94 .restart = fsl_rstcr_restart,
95 .calibrate_decr = generic_calibrate_decr, 94 .calibrate_decr = generic_calibrate_decr,
96 .progress = udbg_progress, 95 .progress = udbg_progress,
97}; 96};
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 91b824c4dc08..1a1d44ea1754 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -103,7 +103,6 @@ define_machine(stx_gp3) {
103 .init_IRQ = stx_gp3_pic_init, 103 .init_IRQ = stx_gp3_pic_init,
104 .show_cpuinfo = stx_gp3_show_cpuinfo, 104 .show_cpuinfo = stx_gp3_show_cpuinfo,
105 .get_irq = mpic_get_irq, 105 .get_irq = mpic_get_irq,
106 .restart = fsl_rstcr_restart,
107 .calibrate_decr = generic_calibrate_decr, 106 .calibrate_decr = generic_calibrate_decr,
108 .progress = udbg_progress, 107 .progress = udbg_progress,
109}; 108};
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index b7c54454d611..9fc20a37835e 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -132,7 +132,6 @@ define_machine(tqm85xx) {
132 .init_IRQ = tqm85xx_pic_init, 132 .init_IRQ = tqm85xx_pic_init,
133 .show_cpuinfo = tqm85xx_show_cpuinfo, 133 .show_cpuinfo = tqm85xx_show_cpuinfo,
134 .get_irq = mpic_get_irq, 134 .get_irq = mpic_get_irq,
135 .restart = fsl_rstcr_restart,
136 .calibrate_decr = generic_calibrate_decr, 135 .calibrate_decr = generic_calibrate_decr,
137 .progress = udbg_progress, 136 .progress = udbg_progress,
138}; 137};
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 1bc02a87f597..360f6253e9ff 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -140,7 +140,6 @@ define_machine(twr_p1025) {
140 .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 140 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
141#endif 141#endif
142 .get_irq = mpic_get_irq, 142 .get_irq = mpic_get_irq,
143 .restart = fsl_rstcr_restart,
144 .calibrate_decr = generic_calibrate_decr, 143 .calibrate_decr = generic_calibrate_decr,
145 .progress = udbg_progress, 144 .progress = udbg_progress,
146}; 145};
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index cf0c70ff026e..cd6ce845f398 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -167,7 +167,6 @@ define_machine(xes_mpc8572) {
167 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 167 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
168#endif 168#endif
169 .get_irq = mpic_get_irq, 169 .get_irq = mpic_get_irq,
170 .restart = fsl_rstcr_restart,
171 .calibrate_decr = generic_calibrate_decr, 170 .calibrate_decr = generic_calibrate_decr,
172 .progress = udbg_progress, 171 .progress = udbg_progress,
173}; 172};
@@ -182,7 +181,6 @@ define_machine(xes_mpc8548) {
182 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 181 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
183#endif 182#endif
184 .get_irq = mpic_get_irq, 183 .get_irq = mpic_get_irq,
185 .restart = fsl_rstcr_restart,
186 .calibrate_decr = generic_calibrate_decr, 184 .calibrate_decr = generic_calibrate_decr,
187 .progress = udbg_progress, 185 .progress = udbg_progress,
188}; 186};
@@ -197,7 +195,6 @@ define_machine(xes_mpc8540) {
197 .pcibios_fixup_phb = fsl_pcibios_fixup_phb, 195 .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
198#endif 196#endif
199 .get_irq = mpic_get_irq, 197 .get_irq = mpic_get_irq,
200 .restart = fsl_rstcr_restart,
201 .calibrate_decr = generic_calibrate_decr, 198 .calibrate_decr = generic_calibrate_decr,
202 .progress = udbg_progress, 199 .progress = udbg_progress,
203}; 200};
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index ef684afb63c6..6b99300edd36 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -204,7 +204,6 @@ define_machine(gef_ppc9a) {
204 .init_IRQ = gef_ppc9a_init_irq, 204 .init_IRQ = gef_ppc9a_init_irq,
205 .show_cpuinfo = gef_ppc9a_show_cpuinfo, 205 .show_cpuinfo = gef_ppc9a_show_cpuinfo,
206 .get_irq = mpic_get_irq, 206 .get_irq = mpic_get_irq,
207 .restart = fsl_rstcr_restart,
208 .time_init = mpc86xx_time_init, 207 .time_init = mpc86xx_time_init,
209 .calibrate_decr = generic_calibrate_decr, 208 .calibrate_decr = generic_calibrate_decr,
210 .progress = udbg_progress, 209 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 67dd0c231646..8cdeca061127 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -191,7 +191,6 @@ define_machine(gef_sbc310) {
191 .init_IRQ = gef_sbc310_init_irq, 191 .init_IRQ = gef_sbc310_init_irq,
192 .show_cpuinfo = gef_sbc310_show_cpuinfo, 192 .show_cpuinfo = gef_sbc310_show_cpuinfo,
193 .get_irq = mpic_get_irq, 193 .get_irq = mpic_get_irq,
194 .restart = fsl_rstcr_restart,
195 .time_init = mpc86xx_time_init, 194 .time_init = mpc86xx_time_init,
196 .calibrate_decr = generic_calibrate_decr, 195 .calibrate_decr = generic_calibrate_decr,
197 .progress = udbg_progress, 196 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 805026976cac..da8723ae23ec 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -181,7 +181,6 @@ define_machine(gef_sbc610) {
181 .init_IRQ = gef_sbc610_init_irq, 181 .init_IRQ = gef_sbc610_init_irq,
182 .show_cpuinfo = gef_sbc610_show_cpuinfo, 182 .show_cpuinfo = gef_sbc610_show_cpuinfo,
183 .get_irq = mpic_get_irq, 183 .get_irq = mpic_get_irq,
184 .restart = fsl_rstcr_restart,
185 .time_init = mpc86xx_time_init, 184 .time_init = mpc86xx_time_init,
186 .calibrate_decr = generic_calibrate_decr, 185 .calibrate_decr = generic_calibrate_decr,
187 .progress = udbg_progress, 186 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index fef0582eddf1..a5d73fabe4d1 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -331,7 +331,6 @@ define_machine(mpc86xx_hpcd) {
331 .setup_arch = mpc86xx_hpcd_setup_arch, 331 .setup_arch = mpc86xx_hpcd_setup_arch,
332 .init_IRQ = mpc86xx_init_irq, 332 .init_IRQ = mpc86xx_init_irq,
333 .get_irq = mpic_get_irq, 333 .get_irq = mpic_get_irq,
334 .restart = fsl_rstcr_restart,
335 .time_init = mpc86xx_time_init, 334 .time_init = mpc86xx_time_init,
336 .calibrate_decr = generic_calibrate_decr, 335 .calibrate_decr = generic_calibrate_decr,
337 .progress = udbg_progress, 336 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 5ae42a037065..a0e989ed4b6f 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -130,7 +130,6 @@ define_machine(mpc86xx_hpcn) {
130 .init_IRQ = mpc86xx_init_irq, 130 .init_IRQ = mpc86xx_init_irq,
131 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, 131 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
132 .get_irq = mpic_get_irq, 132 .get_irq = mpic_get_irq,
133 .restart = fsl_rstcr_restart,
134 .time_init = mpc86xx_time_init, 133 .time_init = mpc86xx_time_init,
135 .calibrate_decr = generic_calibrate_decr, 134 .calibrate_decr = generic_calibrate_decr,
136 .progress = udbg_progress, 135 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
index addb41e7cd14..835352e63dc3 100644
--- a/arch/powerpc/platforms/86xx/mvme7100.c
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -111,7 +111,6 @@ define_machine(mvme7100) {
111 .setup_arch = mvme7100_setup_arch, 111 .setup_arch = mvme7100_setup_arch,
112 .init_IRQ = mpc86xx_init_irq, 112 .init_IRQ = mpc86xx_init_irq,
113 .get_irq = mpic_get_irq, 113 .get_irq = mpic_get_irq,
114 .restart = fsl_rstcr_restart,
115 .time_init = mpc86xx_time_init, 114 .time_init = mpc86xx_time_init,
116 .calibrate_decr = generic_calibrate_decr, 115 .calibrate_decr = generic_calibrate_decr,
117 .progress = udbg_progress, 116 .progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index 52af5735742e..93db35d4f6eb 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -82,7 +82,6 @@ define_machine(sbc8641) {
82 .init_IRQ = mpc86xx_init_irq, 82 .init_IRQ = mpc86xx_init_irq,
83 .show_cpuinfo = sbc8641_show_cpuinfo, 83 .show_cpuinfo = sbc8641_show_cpuinfo,
84 .get_irq = mpic_get_irq, 84 .get_irq = mpic_get_irq,
85 .restart = fsl_rstcr_restart,
86 .time_init = mpc86xx_time_init, 85 .time_init = mpc86xx_time_init,
87 .calibrate_decr = generic_calibrate_decr, 86 .calibrate_decr = generic_calibrate_decr,
88 .progress = udbg_progress, 87 .progress = udbg_progress,
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 3c0eb9b25535..986cd111d4df 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -233,8 +233,6 @@ void __init cpm_reset(void)
233 else 233 else
234 out_be32(&siu_conf->sc_sdcr, 1); 234 out_be32(&siu_conf->sc_sdcr, 1);
235 immr_unmap(siu_conf); 235 immr_unmap(siu_conf);
236
237 cpm_muram_init();
238} 236}
239 237
240static DEFINE_SPINLOCK(cmd_lock); 238static DEFINE_SPINLOCK(cmd_lock);
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 8dc1e24f3c23..f78ff841652c 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -66,10 +66,6 @@ void __init cpm2_reset(void)
66 cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); 66 cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
67#endif 67#endif
68 68
69 /* Reclaim the DP memory for our use.
70 */
71 cpm_muram_init();
72
73 /* Tell everyone where the comm processor resides. 69 /* Tell everyone where the comm processor resides.
74 */ 70 */
75 cpmp = &cpm2_immr->im_cpm; 71 cpmp = &cpm2_immr->im_cpm;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 947f42007734..51bf749a4f3a 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -37,6 +37,21 @@
37#include <linux/of_gpio.h> 37#include <linux/of_gpio.h>
38#endif 38#endif
39 39
40static int __init cpm_init(void)
41{
42 struct device_node *np;
43
44 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
45 if (!np)
46 np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
47 if (!np)
48 return -ENODEV;
49 cpm_muram_init();
50 of_node_put(np);
51 return 0;
52}
53subsys_initcall(cpm_init);
54
40#ifdef CONFIG_PPC_EARLY_DEBUG_CPM 55#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
41static u32 __iomem *cpm_udbg_txdesc; 56static u32 __iomem *cpm_udbg_txdesc;
42static u8 __iomem *cpm_udbg_txbuf; 57static u8 __iomem *cpm_udbg_txbuf;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 0ef9df49f0f2..d3a597456b6e 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,8 +111,7 @@ static struct pci_ops fsl_indirect_pcie_ops =
111 .write = indirect_write_config, 111 .write = indirect_write_config,
112}; 112};
113 113
114#define MAX_PHYS_ADDR_BITS 40 114static u64 pci64_dma_offset;
115static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
116 115
117#ifdef CONFIG_SWIOTLB 116#ifdef CONFIG_SWIOTLB
118static void setup_swiotlb_ops(struct pci_controller *hose) 117static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -132,12 +131,10 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
132 return -EIO; 131 return -EIO;
133 132
134 /* 133 /*
135 * Fixup PCI devices that are able to DMA to above the physical 134 * Fix up PCI devices that are able to DMA to the large inbound
136 * address width of the SoC such that we can address any internal 135 * mapping that allows addressing any RAM address from across PCI.
137 * SoC address from across PCI if needed
138 */ 136 */
139 if ((dev_is_pci(dev)) && 137 if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
140 dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
141 set_dma_ops(dev, &dma_direct_ops); 138 set_dma_ops(dev, &dma_direct_ops);
142 set_dma_offset(dev, pci64_dma_offset); 139 set_dma_offset(dev, pci64_dma_offset);
143 } 140 }
@@ -387,6 +384,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
387 mem_log++; 384 mem_log++;
388 385
389 piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); 386 piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
387 pci64_dma_offset = 1ULL << mem_log;
390 388
391 if (setup_inbound) { 389 if (setup_inbound) {
392 /* Setup inbound memory window */ 390 /* Setup inbound memory window */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index a09ca704de58..d93056eedcb0 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -29,6 +29,7 @@
29#include <linux/fsl_devices.h> 29#include <linux/fsl_devices.h>
30#include <linux/fs_enet_pd.h> 30#include <linux/fs_enet_pd.h>
31#include <linux/fs_uart_pd.h> 31#include <linux/fs_uart_pd.h>
32#include <linux/reboot.h>
32 33
33#include <linux/atomic.h> 34#include <linux/atomic.h>
34#include <asm/io.h> 35#include <asm/io.h>
@@ -180,23 +181,38 @@ EXPORT_SYMBOL(get_baudrate);
180#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 181#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
181static __be32 __iomem *rstcr; 182static __be32 __iomem *rstcr;
182 183
184static int fsl_rstcr_restart(struct notifier_block *this,
185 unsigned long mode, void *cmd)
186{
187 local_irq_disable();
188 /* set reset control register */
189 out_be32(rstcr, 0x2); /* HRESET_REQ */
190
191 return NOTIFY_DONE;
192}
193
183static int __init setup_rstcr(void) 194static int __init setup_rstcr(void)
184{ 195{
185 struct device_node *np; 196 struct device_node *np;
186 197
198 static struct notifier_block restart_handler = {
199 .notifier_call = fsl_rstcr_restart,
200 .priority = 128,
201 };
202
187 for_each_node_by_name(np, "global-utilities") { 203 for_each_node_by_name(np, "global-utilities") {
188 if ((of_get_property(np, "fsl,has-rstcr", NULL))) { 204 if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
189 rstcr = of_iomap(np, 0) + 0xb0; 205 rstcr = of_iomap(np, 0) + 0xb0;
190 if (!rstcr) 206 if (!rstcr) {
191 printk (KERN_ERR "Error: reset control " 207 printk (KERN_ERR "Error: reset control "
192 "register not mapped!\n"); 208 "register not mapped!\n");
209 } else {
210 register_restart_handler(&restart_handler);
211 }
193 break; 212 break;
194 } 213 }
195 } 214 }
196 215
197 if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
198 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
199
200 of_node_put(np); 216 of_node_put(np);
201 217
202 return 0; 218 return 0;
@@ -204,15 +220,6 @@ static int __init setup_rstcr(void)
204 220
205arch_initcall(setup_rstcr); 221arch_initcall(setup_rstcr);
206 222
207void __noreturn fsl_rstcr_restart(char *cmd)
208{
209 local_irq_disable();
210 if (rstcr)
211 /* set reset control register */
212 out_be32(rstcr, 0x2); /* HRESET_REQ */
213
214 while (1) ;
215}
216#endif 223#endif
217 224
218#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 225#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 433566a5ef19..d73daa4f0ccf 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -19,8 +19,6 @@ extern u32 fsl_get_sys_freq(void);
19struct spi_board_info; 19struct spi_board_info;
20struct device_node; 20struct device_node;
21 21
22extern void __noreturn fsl_rstcr_restart(char *cmd);
23
24/* The different ports that the DIU can be connected to */ 22/* The different ports that the DIU can be connected to */
25enum fsl_diu_monitor_port { 23enum fsl_diu_monitor_port {
26 FSL_DIU_PORT_DVI, /* DVI */ 24 FSL_DIU_PORT_DVI, /* DVI */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4d48cecfedd1..b9aac951a90f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1249,7 +1249,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1249 /* Pick the physical address from the device tree if unspecified */ 1249 /* Pick the physical address from the device tree if unspecified */
1250 if (!phys_addr) { 1250 if (!phys_addr) {
1251 /* Check if it is DCR-based */ 1251 /* Check if it is DCR-based */
1252 if (of_get_property(node, "dcr-reg", NULL)) { 1252 if (of_property_read_bool(node, "dcr-reg")) {
1253 flags |= MPIC_USES_DCR; 1253 flags |= MPIC_USES_DCR;
1254 } else { 1254 } else {
1255 struct resource r; 1255 struct resource r;
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fe42a2fdf351..e6e90e80519a 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,6 +1,7 @@
1menu "SOC (System On Chip) specific Drivers" 1menu "SOC (System On Chip) specific Drivers"
2 2
3source "drivers/soc/bcm/Kconfig" 3source "drivers/soc/bcm/Kconfig"
4source "drivers/soc/fsl/qbman/Kconfig"
4source "drivers/soc/fsl/qe/Kconfig" 5source "drivers/soc/fsl/qe/Kconfig"
5source "drivers/soc/mediatek/Kconfig" 6source "drivers/soc/mediatek/Kconfig"
6source "drivers/soc/qcom/Kconfig" 7source "drivers/soc/qcom/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 203307fd92c1..75e1f5334821 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -2,5 +2,6 @@
2# Makefile for the Linux Kernel SOC fsl specific device drivers 2# Makefile for the Linux Kernel SOC fsl specific device drivers
3# 3#
4 4
5obj-$(CONFIG_FSL_DPAA) += qbman/
5obj-$(CONFIG_QUICC_ENGINE) += qe/ 6obj-$(CONFIG_QUICC_ENGINE) += qe/
6obj-$(CONFIG_CPM) += qe/ 7obj-$(CONFIG_CPM) += qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 000000000000..757033c0586c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
1menuconfig FSL_DPAA
2 bool "Freescale DPAA 1.x support"
3 depends on FSL_SOC_BOOKE
4 select GENERIC_ALLOCATOR
5 help
6 The Freescale Data Path Acceleration Architecture (DPAA) is a set of
7 hardware components on specific QorIQ multicore processors.
8 This architecture provides the infrastructure to support simplified
9 sharing of networking interfaces and accelerators by multiple CPUs.
10 The major h/w blocks composing DPAA are BMan and QMan.
11
12 The Buffer Manager (BMan) is a hardware buffer pool management block
13 that allows software and accelerators on the datapath to acquire and
14 release buffers in order to build frames.
15
16 The Queue Manager (QMan) is a hardware queue management block
17 that allows software and accelerators on the datapath to enqueue and
18 dequeue frames in order to communicate.
19
20if FSL_DPAA
21
22config FSL_DPAA_CHECKING
23 bool "Additional driver checking"
24 help
25 Compiles in additional checks, to sanity-check the drivers and
26 any use of the exported API. Not recommended for performance.
27
28config FSL_BMAN_TEST
29 tristate "BMan self-tests"
30 help
31 Compile the BMan self-test code. These tests will
32 exercise the BMan APIs to confirm functionality
33 of both the software drivers and hardware device.
34
35config FSL_BMAN_TEST_API
36 bool "High-level API self-test"
37 depends on FSL_BMAN_TEST
38 default y
39 help
40 This requires the presence of cpu-affine portals, and performs
41 high-level API testing with them (whichever portal(s) are affine
42 to the cpu(s) the test executes on).
43
44config FSL_QMAN_TEST
45 tristate "QMan self-tests"
46 help
47 Compile self-test code for QMan.
48
49config FSL_QMAN_TEST_API
50 bool "QMan high-level self-test"
51 depends on FSL_QMAN_TEST
52 default y
53 help
54 This requires the presence of cpu-affine portals, and performs
55 high-level API testing with them (whichever portal(s) are affine to
56 the cpu(s) the test executes on).
57
58config FSL_QMAN_TEST_STASH
59 bool "QMan 'hot potato' data-stashing self-test"
60 depends on FSL_QMAN_TEST
61 default y
62 help
63 This performs a "hot potato" style test enqueuing/dequeuing a frame
64 across a series of FQs scheduled to different portals (and cpus), with
65 DQRR, data and context stashing always on.
66
67endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 000000000000..7ae199f1664e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,12 @@
1obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
2 bman_portal.o qman_portal.o \
3 bman.o qman.o
4
5obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
6bman-test-y = bman_test.o
7bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
8
9obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
10qman-test-y = qman_test.o
11qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
12qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 000000000000..ffa48fdbb1a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,797 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_priv.h"
32
33#define IRQNAME "BMan portal %d"
34#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
35
36/* Portal register assists */
37
38/* Cache-inhibited register offsets */
39#define BM_REG_RCR_PI_CINH 0x0000
40#define BM_REG_RCR_CI_CINH 0x0004
41#define BM_REG_RCR_ITR 0x0008
42#define BM_REG_CFG 0x0100
43#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
44#define BM_REG_ISR 0x0e00
45#define BM_REG_IER 0x0e04
46#define BM_REG_ISDR 0x0e08
47#define BM_REG_IIR 0x0e0c
48
49/* Cache-enabled register offsets */
50#define BM_CL_CR 0x0000
51#define BM_CL_RR0 0x0100
52#define BM_CL_RR1 0x0140
53#define BM_CL_RCR 0x1000
54#define BM_CL_RCR_PI_CENA 0x3000
55#define BM_CL_RCR_CI_CENA 0x3100
56
57/*
58 * Portal modes.
59 * Enum types;
60 * pmode == production mode
61 * cmode == consumption mode,
62 * Enum values use 3 letter codes. First letter matches the portal mode,
63 * remaining two letters indicate;
64 * ci == cache-inhibited portal register
65 * ce == cache-enabled portal register
66 * vb == in-band valid-bit (cache-enabled)
67 */
68enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
69 bm_rcr_pci = 0, /* PI index, cache-inhibited */
70 bm_rcr_pce = 1, /* PI index, cache-enabled */
71 bm_rcr_pvb = 2 /* valid-bit */
72};
73enum bm_rcr_cmode { /* s/w-only */
74 bm_rcr_cci, /* CI index, cache-inhibited */
75 bm_rcr_cce /* CI index, cache-enabled */
76};
77
78
79/* --- Portal structures --- */
80
81#define BM_RCR_SIZE 8
82
83/* Release Command */
84struct bm_rcr_entry {
85 union {
86 struct {
87 u8 _ncw_verb; /* writes to this are non-coherent */
88 u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
89 u8 __reserved1[62];
90 };
91 struct bm_buffer bufs[8];
92 };
93};
94#define BM_RCR_VERB_VBIT 0x80
95#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
96#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
97#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
98#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
99
100struct bm_rcr {
101 struct bm_rcr_entry *ring, *cursor;
102 u8 ci, available, ithresh, vbit;
103#ifdef CONFIG_FSL_DPAA_CHECKING
104 u32 busy;
105 enum bm_rcr_pmode pmode;
106 enum bm_rcr_cmode cmode;
107#endif
108};
109
110/* MC (Management Command) command */
111struct bm_mc_command {
112 u8 _ncw_verb; /* writes to this are non-coherent */
113 u8 bpid; /* used by acquire command */
114 u8 __reserved[62];
115};
116#define BM_MCC_VERB_VBIT 0x80
117#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
118#define BM_MCC_VERB_CMD_ACQUIRE 0x10
119#define BM_MCC_VERB_CMD_QUERY 0x40
120#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
121
122/* MC result, Acquire and Query Response */
123union bm_mc_result {
124 struct {
125 u8 verb;
126 u8 bpid;
127 u8 __reserved[62];
128 };
129 struct bm_buffer bufs[8];
130};
131#define BM_MCR_VERB_VBIT 0x80
132#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
133#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
134#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
135#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
136#define BM_MCR_VERB_CMD_ERR_ECC 0x70
137#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
138#define BM_MCR_TIMEOUT 10000 /* us */
139
140struct bm_mc {
141 struct bm_mc_command *cr;
142 union bm_mc_result *rr;
143 u8 rridx, vbit;
144#ifdef CONFIG_FSL_DPAA_CHECKING
145 enum {
146 /* Can only be _mc_start()ed */
147 mc_idle,
148 /* Can only be _mc_commit()ed or _mc_abort()ed */
149 mc_user,
150 /* Can only be _mc_retry()ed */
151 mc_hw
152 } state;
153#endif
154};
155
156struct bm_addr {
157 void __iomem *ce; /* cache-enabled */
158 void __iomem *ci; /* cache-inhibited */
159};
160
161struct bm_portal {
162 struct bm_addr addr;
163 struct bm_rcr rcr;
164 struct bm_mc mc;
165} ____cacheline_aligned;
166
167/* Cache-inhibited register access. */
168static inline u32 bm_in(struct bm_portal *p, u32 offset)
169{
170 return __raw_readl(p->addr.ci + offset);
171}
172
173static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
174{
175 __raw_writel(val, p->addr.ci + offset);
176}
177
178/* Cache Enabled Portal Access */
179static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
180{
181 dpaa_invalidate(p->addr.ce + offset);
182}
183
184static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
185{
186 dpaa_touch_ro(p->addr.ce + offset);
187}
188
189static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
190{
191 return __raw_readl(p->addr.ce + offset);
192}
193
194struct bman_portal {
195 struct bm_portal p;
196 /* interrupt sources processed by portal_isr(), configurable */
197 unsigned long irq_sources;
198 /* probing time config params for cpu-affine portals */
199 const struct bm_portal_config *config;
200 char irqname[MAX_IRQNAME];
201};
202
203static cpumask_t affine_mask;
204static DEFINE_SPINLOCK(affine_mask_lock);
205static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
206
207static inline struct bman_portal *get_affine_portal(void)
208{
209 return &get_cpu_var(bman_affine_portal);
210}
211
212static inline void put_affine_portal(void)
213{
214 put_cpu_var(bman_affine_portal);
215}
216
217/*
218 * This object type refers to a pool, it isn't *the* pool. There may be
219 * more than one such object per BMan buffer pool, eg. if different users of the
220 * pool are operating via different portals.
221 */
222struct bman_pool {
223 /* index of the buffer pool to encapsulate (0-63) */
224 u32 bpid;
225 /* Used for hash-table admin when using depletion notifications. */
226 struct bman_portal *portal;
227 struct bman_pool *next;
228};
229
230static u32 poll_portal_slow(struct bman_portal *p, u32 is);
231
232static irqreturn_t portal_isr(int irq, void *ptr)
233{
234 struct bman_portal *p = ptr;
235 struct bm_portal *portal = &p->p;
236 u32 clear = p->irq_sources;
237 u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
238
239 if (unlikely(!is))
240 return IRQ_NONE;
241
242 clear |= poll_portal_slow(p, is);
243 bm_out(portal, BM_REG_ISR, clear);
244 return IRQ_HANDLED;
245}
246
247/* --- RCR API --- */
248
249#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
250#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
251
252/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
253static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
254{
255 uintptr_t addr = (uintptr_t)p;
256
257 addr &= ~RCR_CARRY;
258
259 return (struct bm_rcr_entry *)addr;
260}
261
262#ifdef CONFIG_FSL_DPAA_CHECKING
263/* Bit-wise logic to convert a ring pointer to a ring index */
264static int rcr_ptr2idx(struct bm_rcr_entry *e)
265{
266 return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
267}
268#endif
269
270/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
271static inline void rcr_inc(struct bm_rcr *rcr)
272{
273 /* increment to the next RCR pointer and handle overflow and 'vbit' */
274 struct bm_rcr_entry *partial = rcr->cursor + 1;
275
276 rcr->cursor = rcr_carryclear(partial);
277 if (partial != rcr->cursor)
278 rcr->vbit ^= BM_RCR_VERB_VBIT;
279}
280
281static int bm_rcr_get_avail(struct bm_portal *portal)
282{
283 struct bm_rcr *rcr = &portal->rcr;
284
285 return rcr->available;
286}
287
288static int bm_rcr_get_fill(struct bm_portal *portal)
289{
290 struct bm_rcr *rcr = &portal->rcr;
291
292 return BM_RCR_SIZE - 1 - rcr->available;
293}
294
295static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
296{
297 struct bm_rcr *rcr = &portal->rcr;
298
299 rcr->ithresh = ithresh;
300 bm_out(portal, BM_REG_RCR_ITR, ithresh);
301}
302
303static void bm_rcr_cce_prefetch(struct bm_portal *portal)
304{
305 __maybe_unused struct bm_rcr *rcr = &portal->rcr;
306
307 DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
308 bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
309}
310
311static u8 bm_rcr_cce_update(struct bm_portal *portal)
312{
313 struct bm_rcr *rcr = &portal->rcr;
314 u8 diff, old_ci = rcr->ci;
315
316 DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
317 rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
318 bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
319 diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
320 rcr->available += diff;
321 return diff;
322}
323
324static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
325{
326 struct bm_rcr *rcr = &portal->rcr;
327
328 DPAA_ASSERT(!rcr->busy);
329 if (!rcr->available)
330 return NULL;
331#ifdef CONFIG_FSL_DPAA_CHECKING
332 rcr->busy = 1;
333#endif
334 dpaa_zero(rcr->cursor);
335 return rcr->cursor;
336}
337
338static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
339{
340 struct bm_rcr *rcr = &portal->rcr;
341 struct bm_rcr_entry *rcursor;
342
343 DPAA_ASSERT(rcr->busy);
344 DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
345 DPAA_ASSERT(rcr->available >= 1);
346 dma_wmb();
347 rcursor = rcr->cursor;
348 rcursor->_ncw_verb = myverb | rcr->vbit;
349 dpaa_flush(rcursor);
350 rcr_inc(rcr);
351 rcr->available--;
352#ifdef CONFIG_FSL_DPAA_CHECKING
353 rcr->busy = 0;
354#endif
355}
356
357static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
358 enum bm_rcr_cmode cmode)
359{
360 struct bm_rcr *rcr = &portal->rcr;
361 u32 cfg;
362 u8 pi;
363
364 rcr->ring = portal->addr.ce + BM_CL_RCR;
365 rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
366 pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
367 rcr->cursor = rcr->ring + pi;
368 rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
369 BM_RCR_VERB_VBIT : 0;
370 rcr->available = BM_RCR_SIZE - 1
371 - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
372 rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
373#ifdef CONFIG_FSL_DPAA_CHECKING
374 rcr->busy = 0;
375 rcr->pmode = pmode;
376 rcr->cmode = cmode;
377#endif
378 cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
379 | (pmode & 0x3); /* BCSP_CFG::RPM */
380 bm_out(portal, BM_REG_CFG, cfg);
381 return 0;
382}
383
384static void bm_rcr_finish(struct bm_portal *portal)
385{
386#ifdef CONFIG_FSL_DPAA_CHECKING
387 struct bm_rcr *rcr = &portal->rcr;
388 int i;
389
390 DPAA_ASSERT(!rcr->busy);
391
392 i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
393 if (i != rcr_ptr2idx(rcr->cursor))
394 pr_crit("losing uncommited RCR entries\n");
395
396 i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
397 if (i != rcr->ci)
398 pr_crit("missing existing RCR completions\n");
399 if (rcr->ci != rcr_ptr2idx(rcr->cursor))
400 pr_crit("RCR destroyed unquiesced\n");
401#endif
402}
403
404/* --- Management command API --- */
405static int bm_mc_init(struct bm_portal *portal)
406{
407 struct bm_mc *mc = &portal->mc;
408
409 mc->cr = portal->addr.ce + BM_CL_CR;
410 mc->rr = portal->addr.ce + BM_CL_RR0;
411 mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
412 0 : 1;
413 mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
414#ifdef CONFIG_FSL_DPAA_CHECKING
415 mc->state = mc_idle;
416#endif
417 return 0;
418}
419
420static void bm_mc_finish(struct bm_portal *portal)
421{
422#ifdef CONFIG_FSL_DPAA_CHECKING
423 struct bm_mc *mc = &portal->mc;
424
425 DPAA_ASSERT(mc->state == mc_idle);
426 if (mc->state != mc_idle)
427 pr_crit("Losing incomplete MC command\n");
428#endif
429}
430
431static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
432{
433 struct bm_mc *mc = &portal->mc;
434
435 DPAA_ASSERT(mc->state == mc_idle);
436#ifdef CONFIG_FSL_DPAA_CHECKING
437 mc->state = mc_user;
438#endif
439 dpaa_zero(mc->cr);
440 return mc->cr;
441}
442
443static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
444{
445 struct bm_mc *mc = &portal->mc;
446 union bm_mc_result *rr = mc->rr + mc->rridx;
447
448 DPAA_ASSERT(mc->state == mc_user);
449 dma_wmb();
450 mc->cr->_ncw_verb = myverb | mc->vbit;
451 dpaa_flush(mc->cr);
452 dpaa_invalidate_touch_ro(rr);
453#ifdef CONFIG_FSL_DPAA_CHECKING
454 mc->state = mc_hw;
455#endif
456}
457
458static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
459{
460 struct bm_mc *mc = &portal->mc;
461 union bm_mc_result *rr = mc->rr + mc->rridx;
462
463 DPAA_ASSERT(mc->state == mc_hw);
464 /*
465 * The inactive response register's verb byte always returns zero until
466 * its command is submitted and completed. This includes the valid-bit,
467 * in case you were wondering...
468 */
469 if (!__raw_readb(&rr->verb)) {
470 dpaa_invalidate_touch_ro(rr);
471 return NULL;
472 }
473 mc->rridx ^= 1;
474 mc->vbit ^= BM_MCC_VERB_VBIT;
475#ifdef CONFIG_FSL_DPAA_CHECKING
476 mc->state = mc_idle;
477#endif
478 return rr;
479}
480
481static inline int bm_mc_result_timeout(struct bm_portal *portal,
482 union bm_mc_result **mcr)
483{
484 int timeout = BM_MCR_TIMEOUT;
485
486 do {
487 *mcr = bm_mc_result(portal);
488 if (*mcr)
489 break;
490 udelay(1);
491 } while (--timeout);
492
493 return timeout;
494}
495
496/* Disable all BSCN interrupts for the portal */
497static void bm_isr_bscn_disable(struct bm_portal *portal)
498{
499 bm_out(portal, BM_REG_SCN(0), 0);
500 bm_out(portal, BM_REG_SCN(1), 0);
501}
502
503static int bman_create_portal(struct bman_portal *portal,
504 const struct bm_portal_config *c)
505{
506 struct bm_portal *p;
507 int ret;
508
509 p = &portal->p;
510 /*
511 * prep the low-level portal struct with the mapped addresses from the
512 * config, everything that follows depends on it and "config" is more
513 * for (de)reference...
514 */
515 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
516 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
517 if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
518 dev_err(c->dev, "RCR initialisation failed\n");
519 goto fail_rcr;
520 }
521 if (bm_mc_init(p)) {
522 dev_err(c->dev, "MC initialisation failed\n");
523 goto fail_mc;
524 }
525 /*
526 * Default to all BPIDs disabled, we enable as required at
527 * run-time.
528 */
529 bm_isr_bscn_disable(p);
530
531 /* Write-to-clear any stale interrupt status bits */
532 bm_out(p, BM_REG_ISDR, 0xffffffff);
533 portal->irq_sources = 0;
534 bm_out(p, BM_REG_IER, 0);
535 bm_out(p, BM_REG_ISR, 0xffffffff);
536 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
537 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
538 dev_err(c->dev, "request_irq() failed\n");
539 goto fail_irq;
540 }
541 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
542 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
543 dev_err(c->dev, "irq_set_affinity() failed\n");
544 goto fail_affinity;
545 }
546
547 /* Need RCR to be empty before continuing */
548 ret = bm_rcr_get_fill(p);
549 if (ret) {
550 dev_err(c->dev, "RCR unclean\n");
551 goto fail_rcr_empty;
552 }
553 /* Success */
554 portal->config = c;
555
556 bm_out(p, BM_REG_ISDR, 0);
557 bm_out(p, BM_REG_IIR, 0);
558
559 return 0;
560
561fail_rcr_empty:
562fail_affinity:
563 free_irq(c->irq, portal);
564fail_irq:
565 bm_mc_finish(p);
566fail_mc:
567 bm_rcr_finish(p);
568fail_rcr:
569 return -EIO;
570}
571
572struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
573{
574 struct bman_portal *portal;
575 int err;
576
577 portal = &per_cpu(bman_affine_portal, c->cpu);
578 err = bman_create_portal(portal, c);
579 if (err)
580 return NULL;
581
582 spin_lock(&affine_mask_lock);
583 cpumask_set_cpu(c->cpu, &affine_mask);
584 spin_unlock(&affine_mask_lock);
585
586 return portal;
587}
588
589static u32 poll_portal_slow(struct bman_portal *p, u32 is)
590{
591 u32 ret = is;
592
593 if (is & BM_PIRQ_RCRI) {
594 bm_rcr_cce_update(&p->p);
595 bm_rcr_set_ithresh(&p->p, 0);
596 bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
597 is &= ~BM_PIRQ_RCRI;
598 }
599
600 /* There should be no status register bits left undefined */
601 DPAA_ASSERT(!is);
602 return ret;
603}
604
605int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
606{
607 unsigned long irqflags;
608
609 local_irq_save(irqflags);
610 set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
611 bm_out(&p->p, BM_REG_IER, p->irq_sources);
612 local_irq_restore(irqflags);
613 return 0;
614}
615
616static int bm_shutdown_pool(u32 bpid)
617{
618 struct bm_mc_command *bm_cmd;
619 union bm_mc_result *bm_res;
620
621 while (1) {
622 struct bman_portal *p = get_affine_portal();
623 /* Acquire buffers until empty */
624 bm_cmd = bm_mc_start(&p->p);
625 bm_cmd->bpid = bpid;
626 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
627 if (!bm_mc_result_timeout(&p->p, &bm_res)) {
628 put_affine_portal();
629 pr_crit("BMan Acquire Command timedout\n");
630 return -ETIMEDOUT;
631 }
632 if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
633 put_affine_portal();
634 /* Pool is empty */
635 return 0;
636 }
637 put_affine_portal();
638 }
639
640 return 0;
641}
642
643struct gen_pool *bm_bpalloc;
644
645static int bm_alloc_bpid_range(u32 *result, u32 count)
646{
647 unsigned long addr;
648
649 addr = gen_pool_alloc(bm_bpalloc, count);
650 if (!addr)
651 return -ENOMEM;
652
653 *result = addr & ~DPAA_GENALLOC_OFF;
654
655 return 0;
656}
657
658static int bm_release_bpid(u32 bpid)
659{
660 int ret;
661
662 ret = bm_shutdown_pool(bpid);
663 if (ret) {
664 pr_debug("BPID %d leaked\n", bpid);
665 return ret;
666 }
667
668 gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
669 return 0;
670}
671
672struct bman_pool *bman_new_pool(void)
673{
674 struct bman_pool *pool = NULL;
675 u32 bpid;
676
677 if (bm_alloc_bpid_range(&bpid, 1))
678 return NULL;
679
680 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
681 if (!pool)
682 goto err;
683
684 pool->bpid = bpid;
685
686 return pool;
687err:
688 bm_release_bpid(bpid);
689 kfree(pool);
690 return NULL;
691}
692EXPORT_SYMBOL(bman_new_pool);
693
694void bman_free_pool(struct bman_pool *pool)
695{
696 bm_release_bpid(pool->bpid);
697
698 kfree(pool);
699}
700EXPORT_SYMBOL(bman_free_pool);
701
702int bman_get_bpid(const struct bman_pool *pool)
703{
704 return pool->bpid;
705}
706EXPORT_SYMBOL(bman_get_bpid);
707
708static void update_rcr_ci(struct bman_portal *p, int avail)
709{
710 if (avail)
711 bm_rcr_cce_prefetch(&p->p);
712 else
713 bm_rcr_cce_update(&p->p);
714}
715
716int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
717{
718 struct bman_portal *p;
719 struct bm_rcr_entry *r;
720 unsigned long irqflags;
721 int avail, timeout = 1000; /* 1ms */
722 int i = num - 1;
723
724 DPAA_ASSERT(num > 0 && num <= 8);
725
726 do {
727 p = get_affine_portal();
728 local_irq_save(irqflags);
729 avail = bm_rcr_get_avail(&p->p);
730 if (avail < 2)
731 update_rcr_ci(p, avail);
732 r = bm_rcr_start(&p->p);
733 local_irq_restore(irqflags);
734 put_affine_portal();
735 if (likely(r))
736 break;
737
738 udelay(1);
739 } while (--timeout);
740
741 if (unlikely(!timeout))
742 return -ETIMEDOUT;
743
744 p = get_affine_portal();
745 local_irq_save(irqflags);
746 /*
747 * we can copy all but the first entry, as this can trigger badness
748 * with the valid-bit
749 */
750 bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
751 bm_buffer_set_bpid(r->bufs, pool->bpid);
752 if (i)
753 memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
754
755 bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
756 (num & BM_RCR_VERB_BUFCOUNT_MASK));
757
758 local_irq_restore(irqflags);
759 put_affine_portal();
760 return 0;
761}
762EXPORT_SYMBOL(bman_release);
763
764int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
765{
766 struct bman_portal *p = get_affine_portal();
767 struct bm_mc_command *mcc;
768 union bm_mc_result *mcr;
769 int ret;
770
771 DPAA_ASSERT(num > 0 && num <= 8);
772
773 mcc = bm_mc_start(&p->p);
774 mcc->bpid = pool->bpid;
775 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
776 (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
777 if (!bm_mc_result_timeout(&p->p, &mcr)) {
778 put_affine_portal();
779 pr_crit("BMan Acquire Timeout\n");
780 return -ETIMEDOUT;
781 }
782 ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
783 if (bufs)
784 memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
785
786 put_affine_portal();
787 if (ret != num)
788 ret = -ENOMEM;
789 return ret;
790}
791EXPORT_SYMBOL(bman_acquire);
792
793const struct bm_portal_config *
794bman_get_bm_portal_config(const struct bman_portal *portal)
795{
796 return portal->config;
797}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 000000000000..9deb0524543f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,263 @@
1/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_priv.h"
32
33u16 bman_ip_rev;
34EXPORT_SYMBOL(bman_ip_rev);
35
36/* Register offsets */
37#define REG_FBPR_FPC 0x0800
38#define REG_ECSR 0x0a00
39#define REG_ECIR 0x0a04
40#define REG_EADR 0x0a08
41#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
42#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
43#define REG_IP_REV_1 0x0bf8
44#define REG_IP_REV_2 0x0bfc
45#define REG_FBPR_BARE 0x0c00
46#define REG_FBPR_BAR 0x0c04
47#define REG_FBPR_AR 0x0c10
48#define REG_SRCIDR 0x0d04
49#define REG_LIODNR 0x0d08
50#define REG_ERR_ISR 0x0e00
51#define REG_ERR_IER 0x0e04
52#define REG_ERR_ISDR 0x0e08
53
54/* Used by all error interrupt registers except 'inhibit' */
55#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
56#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
57#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
58#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
59#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
60
61struct bman_hwerr_txt {
62 u32 mask;
63 const char *txt;
64};
65
66static const struct bman_hwerr_txt bman_hwerr_txts[] = {
67 { BM_EIRQ_IVCI, "Invalid Command Verb" },
68 { BM_EIRQ_FLWI, "FBPR Low Watermark" },
69 { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
70 { BM_EIRQ_SBEI, "Single-bit ECC Error" },
71 { BM_EIRQ_BSCN, "Pool State Change Notification" },
72};
73
74/* Only trigger low water mark interrupt once only */
75#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
76
77/* Pointer to the start of the BMan's CCSR space */
78static u32 __iomem *bm_ccsr_start;
79
80static inline u32 bm_ccsr_in(u32 offset)
81{
82 return ioread32be(bm_ccsr_start + offset/4);
83}
84static inline void bm_ccsr_out(u32 offset, u32 val)
85{
86 iowrite32be(val, bm_ccsr_start + offset/4);
87}
88
89static void bm_get_version(u16 *id, u8 *major, u8 *minor)
90{
91 u32 v = bm_ccsr_in(REG_IP_REV_1);
92 *id = (v >> 16);
93 *major = (v >> 8) & 0xff;
94 *minor = v & 0xff;
95}
96
97/* signal transactions for FBPRs with higher priority */
98#define FBPR_AR_RPRIO_HI BIT(30)
99
100static void bm_set_memory(u64 ba, u32 size)
101{
102 u32 exp = ilog2(size);
103 /* choke if size isn't within range */
104 DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
105 is_power_of_2(size));
106 /* choke if '[e]ba' has lower-alignment than 'size' */
107 DPAA_ASSERT(!(ba & (size - 1)));
108 bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
109 bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
110 bm_ccsr_out(REG_FBPR_AR, exp - 1);
111}
112
113/*
114 * Location and size of BMan private memory
115 *
116 * Ideally we would use the DMA API to turn rmem->base into a DMA address
117 * (especially if iommu translations ever get involved). Unfortunately, the
118 * DMA API currently does not allow mapping anything that is not backed with
119 * a struct page.
120 */
121static dma_addr_t fbpr_a;
122static size_t fbpr_sz;
123
124static int bman_fbpr(struct reserved_mem *rmem)
125{
126 fbpr_a = rmem->base;
127 fbpr_sz = rmem->size;
128
129 WARN_ON(!(fbpr_a && fbpr_sz));
130
131 return 0;
132}
133RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
134
135static irqreturn_t bman_isr(int irq, void *ptr)
136{
137 u32 isr_val, ier_val, ecsr_val, isr_mask, i;
138 struct device *dev = ptr;
139
140 ier_val = bm_ccsr_in(REG_ERR_IER);
141 isr_val = bm_ccsr_in(REG_ERR_ISR);
142 ecsr_val = bm_ccsr_in(REG_ECSR);
143 isr_mask = isr_val & ier_val;
144
145 if (!isr_mask)
146 return IRQ_NONE;
147
148 for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
149 if (bman_hwerr_txts[i].mask & isr_mask) {
150 dev_err_ratelimited(dev, "ErrInt: %s\n",
151 bman_hwerr_txts[i].txt);
152 if (bman_hwerr_txts[i].mask & ecsr_val) {
153 /* Re-arm error capture registers */
154 bm_ccsr_out(REG_ECSR, ecsr_val);
155 }
156 if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
157 dev_dbg(dev, "Disabling error 0x%x\n",
158 bman_hwerr_txts[i].mask);
159 ier_val &= ~bman_hwerr_txts[i].mask;
160 bm_ccsr_out(REG_ERR_IER, ier_val);
161 }
162 }
163 }
164 bm_ccsr_out(REG_ERR_ISR, isr_val);
165
166 return IRQ_HANDLED;
167}
168
169static int fsl_bman_probe(struct platform_device *pdev)
170{
171 int ret, err_irq;
172 struct device *dev = &pdev->dev;
173 struct device_node *node = dev->of_node;
174 struct resource *res;
175 u16 id, bm_pool_cnt;
176 u8 major, minor;
177
178 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
179 if (!res) {
180 dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
181 node->full_name);
182 return -ENXIO;
183 }
184 bm_ccsr_start = devm_ioremap(dev, res->start,
185 res->end - res->start + 1);
186 if (!bm_ccsr_start)
187 return -ENXIO;
188
189 bm_get_version(&id, &major, &minor);
190 if (major == 1 && minor == 0) {
191 bman_ip_rev = BMAN_REV10;
192 bm_pool_cnt = BM_POOL_MAX;
193 } else if (major == 2 && minor == 0) {
194 bman_ip_rev = BMAN_REV20;
195 bm_pool_cnt = 8;
196 } else if (major == 2 && minor == 1) {
197 bman_ip_rev = BMAN_REV21;
198 bm_pool_cnt = BM_POOL_MAX;
199 } else {
200 dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
201 id, major, minor);
202 return -ENODEV;
203 }
204
205 bm_set_memory(fbpr_a, fbpr_sz);
206
207 err_irq = platform_get_irq(pdev, 0);
208 if (err_irq <= 0) {
209 dev_info(dev, "Can't get %s IRQ\n", node->full_name);
210 return -ENODEV;
211 }
212 ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
213 dev);
214 if (ret) {
215 dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
216 ret, node->full_name);
217 return ret;
218 }
219 /* Disable Buffer Pool State Change */
220 bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
221 /*
222 * Write-to-clear any stale bits, (eg. starvation being asserted prior
223 * to resource allocation during driver init).
224 */
225 bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
226 /* Enable Error Interrupts */
227 bm_ccsr_out(REG_ERR_IER, 0xffffffff);
228
229 bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
230 if (IS_ERR(bm_bpalloc)) {
231 ret = PTR_ERR(bm_bpalloc);
232 dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
233 return ret;
234 }
235
236 /* seed BMan resource pool */
237 ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
238 if (ret) {
239 dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
240 0, bm_pool_cnt - 1, ret);
241 return ret;
242 }
243
244 return 0;
245};
246
247static const struct of_device_id fsl_bman_ids[] = {
248 {
249 .compatible = "fsl,bman",
250 },
251 {}
252};
253
254static struct platform_driver fsl_bman_driver = {
255 .driver = {
256 .name = KBUILD_MODNAME,
257 .of_match_table = fsl_bman_ids,
258 .suppress_bind_attrs = true,
259 },
260 .probe = fsl_bman_probe,
261};
262
263builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 000000000000..6579cc18811a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,219 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_priv.h"
32
33static struct bman_portal *affine_bportals[NR_CPUS];
34static struct cpumask portal_cpus;
35/* protect bman global registers and global data shared among portals */
36static DEFINE_SPINLOCK(bman_lock);
37
38static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
39{
40 struct bman_portal *p = bman_create_affine_portal(pcfg);
41
42 if (!p) {
43 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
44 __func__, pcfg->cpu);
45 return NULL;
46 }
47
48 bman_p_irqsource_add(p, BM_PIRQ_RCRI);
49 affine_bportals[pcfg->cpu] = p;
50
51 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
52
53 return p;
54}
55
56static void bman_offline_cpu(unsigned int cpu)
57{
58 struct bman_portal *p = affine_bportals[cpu];
59 const struct bm_portal_config *pcfg;
60
61 if (!p)
62 return;
63
64 pcfg = bman_get_bm_portal_config(p);
65 if (!pcfg)
66 return;
67
68 irq_set_affinity(pcfg->irq, cpumask_of(0));
69}
70
71static void bman_online_cpu(unsigned int cpu)
72{
73 struct bman_portal *p = affine_bportals[cpu];
74 const struct bm_portal_config *pcfg;
75
76 if (!p)
77 return;
78
79 pcfg = bman_get_bm_portal_config(p);
80 if (!pcfg)
81 return;
82
83 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
84}
85
86static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
87 unsigned long action, void *hcpu)
88{
89 unsigned int cpu = (unsigned long)hcpu;
90
91 switch (action) {
92 case CPU_ONLINE:
93 case CPU_ONLINE_FROZEN:
94 bman_online_cpu(cpu);
95 break;
96 case CPU_DOWN_PREPARE:
97 case CPU_DOWN_PREPARE_FROZEN:
98 bman_offline_cpu(cpu);
99 }
100
101 return NOTIFY_OK;
102}
103
104static struct notifier_block bman_hotplug_cpu_notifier = {
105 .notifier_call = bman_hotplug_cpu_callback,
106};
107
108static int bman_portal_probe(struct platform_device *pdev)
109{
110 struct device *dev = &pdev->dev;
111 struct device_node *node = dev->of_node;
112 struct bm_portal_config *pcfg;
113 struct resource *addr_phys[2];
114 void __iomem *va;
115 int irq, cpu;
116
117 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
118 if (!pcfg)
119 return -ENOMEM;
120
121 pcfg->dev = dev;
122
123 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
124 DPAA_PORTAL_CE);
125 if (!addr_phys[0]) {
126 dev_err(dev, "Can't get %s property 'reg::CE'\n",
127 node->full_name);
128 return -ENXIO;
129 }
130
131 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
132 DPAA_PORTAL_CI);
133 if (!addr_phys[1]) {
134 dev_err(dev, "Can't get %s property 'reg::CI'\n",
135 node->full_name);
136 return -ENXIO;
137 }
138
139 pcfg->cpu = -1;
140
141 irq = platform_get_irq(pdev, 0);
142 if (irq <= 0) {
143 dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
144 return -ENXIO;
145 }
146 pcfg->irq = irq;
147
148 va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
149 if (!va)
150 goto err_ioremap1;
151
152 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
153
154 va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
155 _PAGE_GUARDED | _PAGE_NO_CACHE);
156 if (!va)
157 goto err_ioremap2;
158
159 pcfg->addr_virt[DPAA_PORTAL_CI] = va;
160
161 spin_lock(&bman_lock);
162 cpu = cpumask_next_zero(-1, &portal_cpus);
163 if (cpu >= nr_cpu_ids) {
164 /* unassigned portal, skip init */
165 spin_unlock(&bman_lock);
166 return 0;
167 }
168
169 cpumask_set_cpu(cpu, &portal_cpus);
170 spin_unlock(&bman_lock);
171 pcfg->cpu = cpu;
172
173 if (!init_pcfg(pcfg))
174 goto err_ioremap2;
175
176 /* clear irq affinity if assigned cpu is offline */
177 if (!cpu_online(cpu))
178 bman_offline_cpu(cpu);
179
180 return 0;
181
182err_ioremap2:
183 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
184err_ioremap1:
185 dev_err(dev, "ioremap failed\n");
186 return -ENXIO;
187}
188
189static const struct of_device_id bman_portal_ids[] = {
190 {
191 .compatible = "fsl,bman-portal",
192 },
193 {}
194};
195MODULE_DEVICE_TABLE(of, bman_portal_ids);
196
197static struct platform_driver bman_portal_driver = {
198 .driver = {
199 .name = KBUILD_MODNAME,
200 .of_match_table = bman_portal_ids,
201 },
202 .probe = bman_portal_probe,
203};
204
205static int __init bman_portal_driver_register(struct platform_driver *drv)
206{
207 int ret;
208
209 ret = platform_driver_register(drv);
210 if (ret < 0)
211 return ret;
212
213 register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
214
215 return 0;
216}
217
218module_driver(bman_portal_driver,
219 bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 000000000000..f6896a2f6d90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,80 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include "dpaa_sys.h"
34
35#include <soc/fsl/bman.h>
36
37/* Portal processing (interrupt) sources */
38#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
39
40/* Revision info (for errata and feature handling) */
41#define BMAN_REV10 0x0100
42#define BMAN_REV20 0x0200
43#define BMAN_REV21 0x0201
44extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
45
46extern struct gen_pool *bm_bpalloc;
47
48struct bm_portal_config {
49 /*
50 * Corenet portal addresses;
51 * [0]==cache-enabled, [1]==cache-inhibited.
52 */
53 void __iomem *addr_virt[2];
54 /* Allow these to be joined in lists */
55 struct list_head list;
56 struct device *dev;
57 /* User-visible portal configuration settings */
58 /* portal is affined to this cpu */
59 int cpu;
60 /* portal interrupt line */
61 int irq;
62};
63
64struct bman_portal *bman_create_affine_portal(
65 const struct bm_portal_config *config);
66/*
67 * The below bman_p_***() variant might be called in a situation that the cpu
68 * which the portal affine to is not online yet.
69 * @bman_portal specifies which portal the API will use.
70 */
71int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
72
73/*
74 * Used by all portal interrupt registers except 'inhibit'
75 * This mask contains all the "irqsource" bits visible to API users
76 */
77#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
78
79const struct bm_portal_config *
80bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 000000000000..09b1c960b26a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_test.h"
32
33MODULE_AUTHOR("Geoff Thorpe");
34MODULE_LICENSE("Dual BSD/GPL");
35MODULE_DESCRIPTION("BMan testing");
36
37static int test_init(void)
38{
39#ifdef CONFIG_FSL_BMAN_TEST_API
40 int loop = 1;
41
42 while (loop--)
43 bman_test_api();
44#endif
45 return 0;
46}
47
48static void test_exit(void)
49{
50}
51
52module_init(test_init);
53module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 000000000000..037ed342add4
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_priv.h"
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 000000000000..6f6bdd154fe3
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "bman_test.h"
32
33#define NUM_BUFS 93
34#define LOOPS 3
35#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
36
37static struct bman_pool *pool;
38static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
39static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
40static int bufs_received;
41
42static void bufs_init(void)
43{
44 int i;
45
46 for (i = 0; i < NUM_BUFS; i++)
47 bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
48 bufs_received = 0;
49}
50
51static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
52{
53 if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
54
55 /*
56 * On SoCs with BMan revison 2.0, BMan only respects the 40
57 * LS-bits of buffer addresses, masking off the upper 8-bits on
58 * release commands. The API provides for 48-bit addresses
59 * because some SoCs support all 48-bits. When generating
60 * garbage addresses for testing, we either need to zero the
61 * upper 8-bits when releasing to BMan (otherwise we'll be
62 * disappointed when the buffers we acquire back from BMan
63 * don't match), or we need to mask the upper 8-bits off when
64 * comparing. We do the latter.
65 */
66 if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
67 (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
68 return -1;
69 if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
70 (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
71 return 1;
72 } else {
73 if (bm_buffer_get64(a) < bm_buffer_get64(b))
74 return -1;
75 if (bm_buffer_get64(a) > bm_buffer_get64(b))
76 return 1;
77 }
78
79 return 0;
80}
81
82static void bufs_confirm(void)
83{
84 int i, j;
85
86 for (i = 0; i < NUM_BUFS; i++) {
87 int matches = 0;
88
89 for (j = 0; j < NUM_BUFS; j++)
90 if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
91 matches++;
92 WARN_ON(matches != 1);
93 }
94}
95
96/* test */
97void bman_test_api(void)
98{
99 int i, loops = LOOPS;
100
101 bufs_init();
102
103 pr_info("%s(): Starting\n", __func__);
104
105 pool = bman_new_pool();
106 if (!pool) {
107 pr_crit("bman_new_pool() failed\n");
108 goto failed;
109 }
110
111 /* Release buffers */
112do_loop:
113 i = 0;
114 while (i < NUM_BUFS) {
115 int num = 8;
116
117 if (i + num > NUM_BUFS)
118 num = NUM_BUFS - i;
119 if (bman_release(pool, bufs_in + i, num)) {
120 pr_crit("bman_release() failed\n");
121 goto failed;
122 }
123 i += num;
124 }
125
126 /* Acquire buffers */
127 while (i > 0) {
128 int tmp, num = 8;
129
130 if (num > i)
131 num = i;
132 tmp = bman_acquire(pool, bufs_out + i - num, num);
133 WARN_ON(tmp != num);
134 i -= num;
135 }
136 i = bman_acquire(pool, NULL, 1);
137 WARN_ON(i > 0);
138
139 bufs_confirm();
140
141 if (--loops)
142 goto do_loop;
143
144 /* Clean up */
145 bman_free_pool(pool);
146 pr_info("%s(): Finished\n", __func__);
147 return;
148
149failed:
150 WARN_ON(1);
151}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 000000000000..b63fd72295c6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,103 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __DPAA_SYS_H
32#define __DPAA_SYS_H
33
34#include <linux/cpu.h>
35#include <linux/slab.h>
36#include <linux/module.h>
37#include <linux/interrupt.h>
38#include <linux/kthread.h>
39#include <linux/vmalloc.h>
40#include <linux/platform_device.h>
41#include <linux/of_reserved_mem.h>
42#include <linux/prefetch.h>
43#include <linux/genalloc.h>
44#include <asm/cacheflush.h>
45
46/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
47#define DPAA_PORTAL_CE 0
48#define DPAA_PORTAL_CI 1
49
50#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
51#error "Unsupported Cacheline Size"
52#endif
53
54static inline void dpaa_flush(void *p)
55{
56#ifdef CONFIG_PPC
57 flush_dcache_range((unsigned long)p, (unsigned long)p+64);
58#elif defined(CONFIG_ARM32)
59 __cpuc_flush_dcache_area(p, 64);
60#elif defined(CONFIG_ARM64)
61 __flush_dcache_area(p, 64);
62#endif
63}
64
65#define dpaa_invalidate(p) dpaa_flush(p)
66
67#define dpaa_zero(p) memset(p, 0, 64)
68
69static inline void dpaa_touch_ro(void *p)
70{
71#if (L1_CACHE_BYTES == 32)
72 prefetch(p+32);
73#endif
74 prefetch(p);
75}
76
77/* Commonly used combo */
78static inline void dpaa_invalidate_touch_ro(void *p)
79{
80 dpaa_invalidate(p);
81 dpaa_touch_ro(p);
82}
83
84
85#ifdef CONFIG_FSL_DPAA_CHECKING
86#define DPAA_ASSERT(x) WARN_ON(!(x))
87#else
88#define DPAA_ASSERT(x)
89#endif
90
91/* cyclic helper for rings */
92static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
93{
94 /* 'first' is included, 'last' is excluded */
95 if (first <= last)
96 return last - first;
97 return ringsize + last - first;
98}
99
100/* Offset applied to genalloc pools due to zero being an error return */
101#define DPAA_GENALLOC_OFF 0x80000000
102
103#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 000000000000..119054bc922b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33#define DQRR_MAXFILL 15
34#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35#define IRQNAME "QMan portal %d"
36#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37#define QMAN_POLL_LIMIT 32
38#define QMAN_PIRQ_DQRR_ITHRESH 12
39#define QMAN_PIRQ_MR_ITHRESH 4
40#define QMAN_PIRQ_IPERIOD 100
41
42/* Portal register assists */
43
44/* Cache-inhibited register offsets */
45#define QM_REG_EQCR_PI_CINH 0x0000
46#define QM_REG_EQCR_CI_CINH 0x0004
47#define QM_REG_EQCR_ITR 0x0008
48#define QM_REG_DQRR_PI_CINH 0x0040
49#define QM_REG_DQRR_CI_CINH 0x0044
50#define QM_REG_DQRR_ITR 0x0048
51#define QM_REG_DQRR_DCAP 0x0050
52#define QM_REG_DQRR_SDQCR 0x0054
53#define QM_REG_DQRR_VDQCR 0x0058
54#define QM_REG_DQRR_PDQCR 0x005c
55#define QM_REG_MR_PI_CINH 0x0080
56#define QM_REG_MR_CI_CINH 0x0084
57#define QM_REG_MR_ITR 0x0088
58#define QM_REG_CFG 0x0100
59#define QM_REG_ISR 0x0e00
60#define QM_REG_IER 0x0e04
61#define QM_REG_ISDR 0x0e08
62#define QM_REG_IIR 0x0e0c
63#define QM_REG_ITPR 0x0e14
64
65/* Cache-enabled register offsets */
66#define QM_CL_EQCR 0x0000
67#define QM_CL_DQRR 0x1000
68#define QM_CL_MR 0x2000
69#define QM_CL_EQCR_PI_CENA 0x3000
70#define QM_CL_EQCR_CI_CENA 0x3100
71#define QM_CL_DQRR_PI_CENA 0x3200
72#define QM_CL_DQRR_CI_CENA 0x3300
73#define QM_CL_MR_PI_CENA 0x3400
74#define QM_CL_MR_CI_CENA 0x3500
75#define QM_CL_CR 0x3800
76#define QM_CL_RR0 0x3900
77#define QM_CL_RR1 0x3940
78
79/*
80 * BTW, the drivers (and h/w programming model) already obtain the required
81 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
82 * or other order-preserving primitives simply degrade performance. Hence the
83 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
84 * the portal registers as volatile
85 */
86
87/* Cache-enabled ring access */
88#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
89
90/*
91 * Portal modes.
92 * Enum types;
93 * pmode == production mode
94 * cmode == consumption mode,
95 * dmode == h/w dequeue mode.
96 * Enum values use 3 letter codes. First letter matches the portal mode,
97 * remaining two letters indicate;
98 * ci == cache-inhibited portal register
99 * ce == cache-enabled portal register
100 * vb == in-band valid-bit (cache-enabled)
101 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
102 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
103 */
104enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
105 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
106 qm_eqcr_pce = 1, /* PI index, cache-enabled */
107 qm_eqcr_pvb = 2 /* valid-bit */
108};
109enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
110 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
111 qm_dqrr_dpull = 1 /* PDQCR */
112};
113enum qm_dqrr_pmode { /* s/w-only */
114 qm_dqrr_pci, /* reads DQRR_PI_CINH */
115 qm_dqrr_pce, /* reads DQRR_PI_CENA */
116 qm_dqrr_pvb /* reads valid-bit */
117};
118enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
119 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
120 qm_dqrr_cce = 1, /* CI index, cache-enabled */
121 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
122};
123enum qm_mr_pmode { /* s/w-only */
124 qm_mr_pci, /* reads MR_PI_CINH */
125 qm_mr_pce, /* reads MR_PI_CENA */
126 qm_mr_pvb /* reads valid-bit */
127};
128enum qm_mr_cmode { /* matches QCSP_CFG::MM */
129 qm_mr_cci = 0, /* CI index, cache-inhibited */
130 qm_mr_cce = 1 /* CI index, cache-enabled */
131};
132
133/* --- Portal structures --- */
134
135#define QM_EQCR_SIZE 8
136#define QM_DQRR_SIZE 16
137#define QM_MR_SIZE 8
138
139/* "Enqueue Command" */
140struct qm_eqcr_entry {
141 u8 _ncw_verb; /* writes to this are non-coherent */
142 u8 dca;
143 u16 seqnum;
144 u32 orp; /* 24-bit */
145 u32 fqid; /* 24-bit */
146 u32 tag;
147 struct qm_fd fd;
148 u8 __reserved3[32];
149} __packed;
150#define QM_EQCR_VERB_VBIT 0x80
151#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
152#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
153#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
154#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
155#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
156
157struct qm_eqcr {
158 struct qm_eqcr_entry *ring, *cursor;
159 u8 ci, available, ithresh, vbit;
160#ifdef CONFIG_FSL_DPAA_CHECKING
161 u32 busy;
162 enum qm_eqcr_pmode pmode;
163#endif
164};
165
166struct qm_dqrr {
167 const struct qm_dqrr_entry *ring, *cursor;
168 u8 pi, ci, fill, ithresh, vbit;
169#ifdef CONFIG_FSL_DPAA_CHECKING
170 enum qm_dqrr_dmode dmode;
171 enum qm_dqrr_pmode pmode;
172 enum qm_dqrr_cmode cmode;
173#endif
174};
175
176struct qm_mr {
177 union qm_mr_entry *ring, *cursor;
178 u8 pi, ci, fill, ithresh, vbit;
179#ifdef CONFIG_FSL_DPAA_CHECKING
180 enum qm_mr_pmode pmode;
181 enum qm_mr_cmode cmode;
182#endif
183};
184
185/* MC (Management Command) command */
186/* "Query FQ" */
187struct qm_mcc_queryfq {
188 u8 _ncw_verb;
189 u8 __reserved1[3];
190 u32 fqid; /* 24-bit */
191 u8 __reserved2[56];
192} __packed;
193/* "Alter FQ State Commands " */
194struct qm_mcc_alterfq {
195 u8 _ncw_verb;
196 u8 __reserved1[3];
197 u32 fqid; /* 24-bit */
198 u8 __reserved2;
199 u8 count; /* number of consecutive FQID */
200 u8 __reserved3[10];
201 u32 context_b; /* frame queue context b */
202 u8 __reserved4[40];
203} __packed;
204
205/* "Query CGR" */
206struct qm_mcc_querycgr {
207 u8 _ncw_verb;
208 u8 __reserved1[30];
209 u8 cgid;
210 u8 __reserved2[32];
211};
212
213struct qm_mcc_querywq {
214 u8 _ncw_verb;
215 u8 __reserved;
216 /* select channel if verb != QUERYWQ_DEDICATED */
217 u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
218 u8 __reserved2[60];
219} __packed;
220
221#define QM_MCC_VERB_VBIT 0x80
222#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
223#define QM_MCC_VERB_INITFQ_PARKED 0x40
224#define QM_MCC_VERB_INITFQ_SCHED 0x41
225#define QM_MCC_VERB_QUERYFQ 0x44
226#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
227#define QM_MCC_VERB_QUERYWQ 0x46
228#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
229#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
230#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
231#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
232#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
233#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
234#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
235#define QM_MCC_VERB_INITCGR 0x50
236#define QM_MCC_VERB_MODIFYCGR 0x51
237#define QM_MCC_VERB_CGRTESTWRITE 0x52
238#define QM_MCC_VERB_QUERYCGR 0x58
239#define QM_MCC_VERB_QUERYCONGESTION 0x59
240union qm_mc_command {
241 struct {
242 u8 _ncw_verb; /* writes to this are non-coherent */
243 u8 __reserved[63];
244 };
245 struct qm_mcc_initfq initfq;
246 struct qm_mcc_queryfq queryfq;
247 struct qm_mcc_alterfq alterfq;
248 struct qm_mcc_initcgr initcgr;
249 struct qm_mcc_querycgr querycgr;
250 struct qm_mcc_querywq querywq;
251 struct qm_mcc_queryfq_np queryfq_np;
252};
253
254/* MC (Management Command) result */
255/* "Query FQ" */
256struct qm_mcr_queryfq {
257 u8 verb;
258 u8 result;
259 u8 __reserved1[8];
260 struct qm_fqd fqd; /* the FQD fields are here */
261 u8 __reserved2[30];
262} __packed;
263
264/* "Alter FQ State Commands" */
265struct qm_mcr_alterfq {
266 u8 verb;
267 u8 result;
268 u8 fqs; /* Frame Queue Status */
269 u8 __reserved1[61];
270};
271#define QM_MCR_VERB_RRID 0x80
272#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
273#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
274#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
275#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
276#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
277#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
278#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
279#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
280#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
281#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
282#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
283#define QM_MCR_RESULT_NULL 0x00
284#define QM_MCR_RESULT_OK 0xf0
285#define QM_MCR_RESULT_ERR_FQID 0xf1
286#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
287#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
288#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
289#define QM_MCR_RESULT_PENDING 0xf8
290#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
291#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
292#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
293#define QM_MCR_TIMEOUT 10000 /* us */
294union qm_mc_result {
295 struct {
296 u8 verb;
297 u8 result;
298 u8 __reserved1[62];
299 };
300 struct qm_mcr_queryfq queryfq;
301 struct qm_mcr_alterfq alterfq;
302 struct qm_mcr_querycgr querycgr;
303 struct qm_mcr_querycongestion querycongestion;
304 struct qm_mcr_querywq querywq;
305 struct qm_mcr_queryfq_np queryfq_np;
306};
307
308struct qm_mc {
309 union qm_mc_command *cr;
310 union qm_mc_result *rr;
311 u8 rridx, vbit;
312#ifdef CONFIG_FSL_DPAA_CHECKING
313 enum {
314 /* Can be _mc_start()ed */
315 qman_mc_idle,
316 /* Can be _mc_commit()ed or _mc_abort()ed */
317 qman_mc_user,
318 /* Can only be _mc_retry()ed */
319 qman_mc_hw
320 } state;
321#endif
322};
323
324struct qm_addr {
325 void __iomem *ce; /* cache-enabled */
326 void __iomem *ci; /* cache-inhibited */
327};
328
329struct qm_portal {
330 /*
331 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
332 * and including 'mc' fits within a cacheline (yay!). The 'config' part
333 * is setup-only, so isn't a cause for a concern. In other words, don't
334 * rearrange this structure on a whim, there be dragons ...
335 */
336 struct qm_addr addr;
337 struct qm_eqcr eqcr;
338 struct qm_dqrr dqrr;
339 struct qm_mr mr;
340 struct qm_mc mc;
341} ____cacheline_aligned;
342
343/* Cache-inhibited register access. */
344static inline u32 qm_in(struct qm_portal *p, u32 offset)
345{
346 return __raw_readl(p->addr.ci + offset);
347}
348
349static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
350{
351 __raw_writel(val, p->addr.ci + offset);
352}
353
354/* Cache Enabled Portal Access */
355static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
356{
357 dpaa_invalidate(p->addr.ce + offset);
358}
359
360static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
361{
362 dpaa_touch_ro(p->addr.ce + offset);
363}
364
365static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
366{
367 return __raw_readl(p->addr.ce + offset);
368}
369
370/* --- EQCR API --- */
371
372#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
373#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
374
375/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
376static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
377{
378 uintptr_t addr = (uintptr_t)p;
379
380 addr &= ~EQCR_CARRY;
381
382 return (struct qm_eqcr_entry *)addr;
383}
384
385/* Bit-wise logic to convert a ring pointer to a ring index */
386static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
387{
388 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
389}
390
391/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
392static inline void eqcr_inc(struct qm_eqcr *eqcr)
393{
394 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
395 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
396
397 eqcr->cursor = eqcr_carryclear(partial);
398 if (partial != eqcr->cursor)
399 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
400}
401
402static inline int qm_eqcr_init(struct qm_portal *portal,
403 enum qm_eqcr_pmode pmode,
404 unsigned int eq_stash_thresh,
405 int eq_stash_prio)
406{
407 struct qm_eqcr *eqcr = &portal->eqcr;
408 u32 cfg;
409 u8 pi;
410
411 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
412 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
413 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
414 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
415 eqcr->cursor = eqcr->ring + pi;
416 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
417 QM_EQCR_VERB_VBIT : 0;
418 eqcr->available = QM_EQCR_SIZE - 1 -
419 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
420 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
421#ifdef CONFIG_FSL_DPAA_CHECKING
422 eqcr->busy = 0;
423 eqcr->pmode = pmode;
424#endif
425 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
426 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
427 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
428 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
429 qm_out(portal, QM_REG_CFG, cfg);
430 return 0;
431}
432
433static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
434{
435 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
436}
437
438static inline void qm_eqcr_finish(struct qm_portal *portal)
439{
440 struct qm_eqcr *eqcr = &portal->eqcr;
441 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
442 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
443
444 DPAA_ASSERT(!eqcr->busy);
445 if (pi != eqcr_ptr2idx(eqcr->cursor))
446 pr_crit("losing uncommited EQCR entries\n");
447 if (ci != eqcr->ci)
448 pr_crit("missing existing EQCR completions\n");
449 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
450 pr_crit("EQCR destroyed unquiesced\n");
451}
452
453static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
454 *portal)
455{
456 struct qm_eqcr *eqcr = &portal->eqcr;
457
458 DPAA_ASSERT(!eqcr->busy);
459 if (!eqcr->available)
460 return NULL;
461
462#ifdef CONFIG_FSL_DPAA_CHECKING
463 eqcr->busy = 1;
464#endif
465 dpaa_zero(eqcr->cursor);
466 return eqcr->cursor;
467}
468
469static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
470 *portal)
471{
472 struct qm_eqcr *eqcr = &portal->eqcr;
473 u8 diff, old_ci;
474
475 DPAA_ASSERT(!eqcr->busy);
476 if (!eqcr->available) {
477 old_ci = eqcr->ci;
478 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
479 (QM_EQCR_SIZE - 1);
480 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
481 eqcr->available += diff;
482 if (!diff)
483 return NULL;
484 }
485#ifdef CONFIG_FSL_DPAA_CHECKING
486 eqcr->busy = 1;
487#endif
488 dpaa_zero(eqcr->cursor);
489 return eqcr->cursor;
490}
491
492static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
493{
494 DPAA_ASSERT(eqcr->busy);
495 DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
496 DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
497 DPAA_ASSERT(eqcr->available >= 1);
498}
499
500static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
501{
502 struct qm_eqcr *eqcr = &portal->eqcr;
503 struct qm_eqcr_entry *eqcursor;
504
505 eqcr_commit_checks(eqcr);
506 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
507 dma_wmb();
508 eqcursor = eqcr->cursor;
509 eqcursor->_ncw_verb = myverb | eqcr->vbit;
510 dpaa_flush(eqcursor);
511 eqcr_inc(eqcr);
512 eqcr->available--;
513#ifdef CONFIG_FSL_DPAA_CHECKING
514 eqcr->busy = 0;
515#endif
516}
517
518static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
519{
520 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
521}
522
523static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
524{
525 struct qm_eqcr *eqcr = &portal->eqcr;
526 u8 diff, old_ci = eqcr->ci;
527
528 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
529 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
530 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
531 eqcr->available += diff;
532 return diff;
533}
534
535static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
536{
537 struct qm_eqcr *eqcr = &portal->eqcr;
538
539 eqcr->ithresh = ithresh;
540 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
541}
542
543static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
544{
545 struct qm_eqcr *eqcr = &portal->eqcr;
546
547 return eqcr->available;
548}
549
550static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
551{
552 struct qm_eqcr *eqcr = &portal->eqcr;
553
554 return QM_EQCR_SIZE - 1 - eqcr->available;
555}
556
557/* --- DQRR API --- */
558
559#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
560#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
561
562static const struct qm_dqrr_entry *dqrr_carryclear(
563 const struct qm_dqrr_entry *p)
564{
565 uintptr_t addr = (uintptr_t)p;
566
567 addr &= ~DQRR_CARRY;
568
569 return (const struct qm_dqrr_entry *)addr;
570}
571
572static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
573{
574 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
575}
576
577static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
578{
579 return dqrr_carryclear(e + 1);
580}
581
582static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
583{
584 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
585 ((mf & (QM_DQRR_SIZE - 1)) << 20));
586}
587
588static inline int qm_dqrr_init(struct qm_portal *portal,
589 const struct qm_portal_config *config,
590 enum qm_dqrr_dmode dmode,
591 enum qm_dqrr_pmode pmode,
592 enum qm_dqrr_cmode cmode, u8 max_fill)
593{
594 struct qm_dqrr *dqrr = &portal->dqrr;
595 u32 cfg;
596
597 /* Make sure the DQRR will be idle when we enable */
598 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
599 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
600 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
601 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
602 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
603 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
604 dqrr->cursor = dqrr->ring + dqrr->ci;
605 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
606 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
607 QM_DQRR_VERB_VBIT : 0;
608 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
609#ifdef CONFIG_FSL_DPAA_CHECKING
610 dqrr->dmode = dmode;
611 dqrr->pmode = pmode;
612 dqrr->cmode = cmode;
613#endif
614 /* Invalidate every ring entry before beginning */
615 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
616 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
617 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
618 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
619 ((dmode & 1) << 18) | /* DP */
620 ((cmode & 3) << 16) | /* DCM */
621 0xa0 | /* RE+SE */
622 (0 ? 0x40 : 0) | /* Ignore RP */
623 (0 ? 0x10 : 0); /* Ignore SP */
624 qm_out(portal, QM_REG_CFG, cfg);
625 qm_dqrr_set_maxfill(portal, max_fill);
626 return 0;
627}
628
629static inline void qm_dqrr_finish(struct qm_portal *portal)
630{
631#ifdef CONFIG_FSL_DPAA_CHECKING
632 struct qm_dqrr *dqrr = &portal->dqrr;
633
634 if (dqrr->cmode != qm_dqrr_cdc &&
635 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
636 pr_crit("Ignoring completed DQRR entries\n");
637#endif
638}
639
640static inline const struct qm_dqrr_entry *qm_dqrr_current(
641 struct qm_portal *portal)
642{
643 struct qm_dqrr *dqrr = &portal->dqrr;
644
645 if (!dqrr->fill)
646 return NULL;
647 return dqrr->cursor;
648}
649
650static inline u8 qm_dqrr_next(struct qm_portal *portal)
651{
652 struct qm_dqrr *dqrr = &portal->dqrr;
653
654 DPAA_ASSERT(dqrr->fill);
655 dqrr->cursor = dqrr_inc(dqrr->cursor);
656 return --dqrr->fill;
657}
658
659static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
660{
661 struct qm_dqrr *dqrr = &portal->dqrr;
662 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
663
664 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
665#ifndef CONFIG_FSL_PAMU
666 /*
667 * If PAMU is not available we need to invalidate the cache.
668 * When PAMU is available the cache is updated by stash
669 */
670 dpaa_invalidate_touch_ro(res);
671#endif
672 /*
673 * when accessing 'verb', use __raw_readb() to ensure that compiler
674 * inlining doesn't try to optimise out "excess reads".
675 */
676 if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
677 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
678 if (!dqrr->pi)
679 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
680 dqrr->fill++;
681 }
682}
683
684static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
685 const struct qm_dqrr_entry *dq,
686 int park)
687{
688 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
689 int idx = dqrr_ptr2idx(dq);
690
691 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
692 DPAA_ASSERT((dqrr->ring + idx) == dq);
693 DPAA_ASSERT(idx < QM_DQRR_SIZE);
694 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
695 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
696 idx); /* DQRR_DCAP::DCAP_CI */
697}
698
699static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
700{
701 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
702
703 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
704 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
705 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
706}
707
708static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
709{
710 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
711}
712
713static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
714{
715 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
716}
717
718static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
719{
720 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
721}
722
723/* --- MR API --- */
724
725#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
726#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
727
728static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
729{
730 uintptr_t addr = (uintptr_t)p;
731
732 addr &= ~MR_CARRY;
733
734 return (union qm_mr_entry *)addr;
735}
736
737static inline int mr_ptr2idx(const union qm_mr_entry *e)
738{
739 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
740}
741
742static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
743{
744 return mr_carryclear(e + 1);
745}
746
747static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
748 enum qm_mr_cmode cmode)
749{
750 struct qm_mr *mr = &portal->mr;
751 u32 cfg;
752
753 mr->ring = portal->addr.ce + QM_CL_MR;
754 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
755 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
756 mr->cursor = mr->ring + mr->ci;
757 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
758 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
759 ? QM_MR_VERB_VBIT : 0;
760 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
761#ifdef CONFIG_FSL_DPAA_CHECKING
762 mr->pmode = pmode;
763 mr->cmode = cmode;
764#endif
765 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
766 ((cmode & 1) << 8); /* QCSP_CFG:MM */
767 qm_out(portal, QM_REG_CFG, cfg);
768 return 0;
769}
770
771static inline void qm_mr_finish(struct qm_portal *portal)
772{
773 struct qm_mr *mr = &portal->mr;
774
775 if (mr->ci != mr_ptr2idx(mr->cursor))
776 pr_crit("Ignoring completed MR entries\n");
777}
778
779static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
780{
781 struct qm_mr *mr = &portal->mr;
782
783 if (!mr->fill)
784 return NULL;
785 return mr->cursor;
786}
787
788static inline int qm_mr_next(struct qm_portal *portal)
789{
790 struct qm_mr *mr = &portal->mr;
791
792 DPAA_ASSERT(mr->fill);
793 mr->cursor = mr_inc(mr->cursor);
794 return --mr->fill;
795}
796
797static inline void qm_mr_pvb_update(struct qm_portal *portal)
798{
799 struct qm_mr *mr = &portal->mr;
800 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
801
802 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
803 /*
804 * when accessing 'verb', use __raw_readb() to ensure that compiler
805 * inlining doesn't try to optimise out "excess reads".
806 */
807 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
808 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
809 if (!mr->pi)
810 mr->vbit ^= QM_MR_VERB_VBIT;
811 mr->fill++;
812 res = mr_inc(res);
813 }
814 dpaa_invalidate_touch_ro(res);
815}
816
817static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
818{
819 struct qm_mr *mr = &portal->mr;
820
821 DPAA_ASSERT(mr->cmode == qm_mr_cci);
822 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
823 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
824}
825
826static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
827{
828 struct qm_mr *mr = &portal->mr;
829
830 DPAA_ASSERT(mr->cmode == qm_mr_cci);
831 mr->ci = mr_ptr2idx(mr->cursor);
832 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
833}
834
835static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
836{
837 qm_out(portal, QM_REG_MR_ITR, ithresh);
838}
839
840/* --- Management command API --- */
841
842static inline int qm_mc_init(struct qm_portal *portal)
843{
844 struct qm_mc *mc = &portal->mc;
845
846 mc->cr = portal->addr.ce + QM_CL_CR;
847 mc->rr = portal->addr.ce + QM_CL_RR0;
848 mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
849 ? 0 : 1;
850 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
851#ifdef CONFIG_FSL_DPAA_CHECKING
852 mc->state = qman_mc_idle;
853#endif
854 return 0;
855}
856
857static inline void qm_mc_finish(struct qm_portal *portal)
858{
859#ifdef CONFIG_FSL_DPAA_CHECKING
860 struct qm_mc *mc = &portal->mc;
861
862 DPAA_ASSERT(mc->state == qman_mc_idle);
863 if (mc->state != qman_mc_idle)
864 pr_crit("Losing incomplete MC command\n");
865#endif
866}
867
868static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
869{
870 struct qm_mc *mc = &portal->mc;
871
872 DPAA_ASSERT(mc->state == qman_mc_idle);
873#ifdef CONFIG_FSL_DPAA_CHECKING
874 mc->state = qman_mc_user;
875#endif
876 dpaa_zero(mc->cr);
877 return mc->cr;
878}
879
880static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
881{
882 struct qm_mc *mc = &portal->mc;
883 union qm_mc_result *rr = mc->rr + mc->rridx;
884
885 DPAA_ASSERT(mc->state == qman_mc_user);
886 dma_wmb();
887 mc->cr->_ncw_verb = myverb | mc->vbit;
888 dpaa_flush(mc->cr);
889 dpaa_invalidate_touch_ro(rr);
890#ifdef CONFIG_FSL_DPAA_CHECKING
891 mc->state = qman_mc_hw;
892#endif
893}
894
895static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
896{
897 struct qm_mc *mc = &portal->mc;
898 union qm_mc_result *rr = mc->rr + mc->rridx;
899
900 DPAA_ASSERT(mc->state == qman_mc_hw);
901 /*
902 * The inactive response register's verb byte always returns zero until
903 * its command is submitted and completed. This includes the valid-bit,
904 * in case you were wondering...
905 */
906 if (!__raw_readb(&rr->verb)) {
907 dpaa_invalidate_touch_ro(rr);
908 return NULL;
909 }
910 mc->rridx ^= 1;
911 mc->vbit ^= QM_MCC_VERB_VBIT;
912#ifdef CONFIG_FSL_DPAA_CHECKING
913 mc->state = qman_mc_idle;
914#endif
915 return rr;
916}
917
918static inline int qm_mc_result_timeout(struct qm_portal *portal,
919 union qm_mc_result **mcr)
920{
921 int timeout = QM_MCR_TIMEOUT;
922
923 do {
924 *mcr = qm_mc_result(portal);
925 if (*mcr)
926 break;
927 udelay(1);
928 } while (--timeout);
929
930 return timeout;
931}
932
933static inline void fq_set(struct qman_fq *fq, u32 mask)
934{
935 set_bits(mask, &fq->flags);
936}
937
938static inline void fq_clear(struct qman_fq *fq, u32 mask)
939{
940 clear_bits(mask, &fq->flags);
941}
942
943static inline int fq_isset(struct qman_fq *fq, u32 mask)
944{
945 return fq->flags & mask;
946}
947
948static inline int fq_isclear(struct qman_fq *fq, u32 mask)
949{
950 return !(fq->flags & mask);
951}
952
953struct qman_portal {
954 struct qm_portal p;
955 /* PORTAL_BITS_*** - dynamic, strictly internal */
956 unsigned long bits;
957 /* interrupt sources processed by portal_isr(), configurable */
958 unsigned long irq_sources;
959 u32 use_eqcr_ci_stashing;
960 /* only 1 volatile dequeue at a time */
961 struct qman_fq *vdqcr_owned;
962 u32 sdqcr;
963 /* probing time config params for cpu-affine portals */
964 const struct qm_portal_config *config;
965 /* needed for providing a non-NULL device to dma_map_***() */
966 struct platform_device *pdev;
967 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
968 struct qman_cgrs *cgrs;
969 /* linked-list of CSCN handlers. */
970 struct list_head cgr_cbs;
971 /* list lock */
972 spinlock_t cgr_lock;
973 struct work_struct congestion_work;
974 struct work_struct mr_work;
975 char irqname[MAX_IRQNAME];
976};
977
978static cpumask_t affine_mask;
979static DEFINE_SPINLOCK(affine_mask_lock);
980static u16 affine_channels[NR_CPUS];
981static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
982struct qman_portal *affine_portals[NR_CPUS];
983
984static inline struct qman_portal *get_affine_portal(void)
985{
986 return &get_cpu_var(qman_affine_portal);
987}
988
989static inline void put_affine_portal(void)
990{
991 put_cpu_var(qman_affine_portal);
992}
993
994static struct workqueue_struct *qm_portal_wq;
995
996int qman_wq_alloc(void)
997{
998 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
999 if (!qm_portal_wq)
1000 return -ENOMEM;
1001 return 0;
1002}
1003
1004/*
1005 * This is what everything can wait on, even if it migrates to a different cpu
1006 * to the one whose affine portal it is waiting on.
1007 */
1008static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1009
1010static struct qman_fq **fq_table;
1011static u32 num_fqids;
1012
1013int qman_alloc_fq_table(u32 _num_fqids)
1014{
1015 num_fqids = _num_fqids;
1016
1017 fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
1018 if (!fq_table)
1019 return -ENOMEM;
1020
1021 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1022 fq_table, num_fqids * 2);
1023 return 0;
1024}
1025
1026static struct qman_fq *idx_to_fq(u32 idx)
1027{
1028 struct qman_fq *fq;
1029
1030#ifdef CONFIG_FSL_DPAA_CHECKING
1031 if (WARN_ON(idx >= num_fqids * 2))
1032 return NULL;
1033#endif
1034 fq = fq_table[idx];
1035 DPAA_ASSERT(!fq || idx == fq->idx);
1036
1037 return fq;
1038}
1039
1040/*
1041 * Only returns full-service fq objects, not enqueue-only
1042 * references (QMAN_FQ_FLAG_NO_MODIFY).
1043 */
1044static struct qman_fq *fqid_to_fq(u32 fqid)
1045{
1046 return idx_to_fq(fqid * 2);
1047}
1048
1049static struct qman_fq *tag_to_fq(u32 tag)
1050{
1051#if BITS_PER_LONG == 64
1052 return idx_to_fq(tag);
1053#else
1054 return (struct qman_fq *)tag;
1055#endif
1056}
1057
1058static u32 fq_to_tag(struct qman_fq *fq)
1059{
1060#if BITS_PER_LONG == 64
1061 return fq->idx;
1062#else
1063 return (u32)fq;
1064#endif
1065}
1066
1067static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1068static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1069 unsigned int poll_limit);
1070static void qm_congestion_task(struct work_struct *work);
1071static void qm_mr_process_task(struct work_struct *work);
1072
1073static irqreturn_t portal_isr(int irq, void *ptr)
1074{
1075 struct qman_portal *p = ptr;
1076
1077 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1078 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1079
1080 if (unlikely(!is))
1081 return IRQ_NONE;
1082
1083 /* DQRR-handling if it's interrupt-driven */
1084 if (is & QM_PIRQ_DQRI)
1085 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1086 /* Handling of anything else that's interrupt-driven */
1087 clear |= __poll_portal_slow(p, is);
1088 qm_out(&p->p, QM_REG_ISR, clear);
1089 return IRQ_HANDLED;
1090}
1091
1092static int drain_mr_fqrni(struct qm_portal *p)
1093{
1094 const union qm_mr_entry *msg;
1095loop:
1096 msg = qm_mr_current(p);
1097 if (!msg) {
1098 /*
1099 * if MR was full and h/w had other FQRNI entries to produce, we
1100 * need to allow it time to produce those entries once the
1101 * existing entries are consumed. A worst-case situation
1102 * (fully-loaded system) means h/w sequencers may have to do 3-4
1103 * other things before servicing the portal's MR pump, each of
1104 * which (if slow) may take ~50 qman cycles (which is ~200
1105 * processor cycles). So rounding up and then multiplying this
1106 * worst-case estimate by a factor of 10, just to be
1107 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1108 * one entry at a time, so h/w has an opportunity to produce new
1109 * entries well before the ring has been fully consumed, so
1110 * we're being *really* paranoid here.
1111 */
1112 u64 now, then = jiffies;
1113
1114 do {
1115 now = jiffies;
1116 } while ((then + 10000) > now);
1117 msg = qm_mr_current(p);
1118 if (!msg)
1119 return 0;
1120 }
1121 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1122 /* We aren't draining anything but FQRNIs */
1123 pr_err("Found verb 0x%x in MR\n", msg->verb);
1124 return -1;
1125 }
1126 qm_mr_next(p);
1127 qm_mr_cci_consume(p, 1);
1128 goto loop;
1129}
1130
1131static int qman_create_portal(struct qman_portal *portal,
1132 const struct qm_portal_config *c,
1133 const struct qman_cgrs *cgrs)
1134{
1135 struct qm_portal *p;
1136 char buf[16];
1137 int ret;
1138 u32 isdr;
1139
1140 p = &portal->p;
1141
1142#ifdef CONFIG_FSL_PAMU
1143 /* PAMU is required for stashing */
1144 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1145#else
1146 portal->use_eqcr_ci_stashing = 0;
1147#endif
1148 /*
1149 * prep the low-level portal struct with the mapped addresses from the
1150 * config, everything that follows depends on it and "config" is more
1151 * for (de)reference
1152 */
1153 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
1154 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
1155 /*
1156 * If CI-stashing is used, the current defaults use a threshold of 3,
1157 * and stash with high-than-DQRR priority.
1158 */
1159 if (qm_eqcr_init(p, qm_eqcr_pvb,
1160 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1161 dev_err(c->dev, "EQCR initialisation failed\n");
1162 goto fail_eqcr;
1163 }
1164 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1165 qm_dqrr_cdc, DQRR_MAXFILL)) {
1166 dev_err(c->dev, "DQRR initialisation failed\n");
1167 goto fail_dqrr;
1168 }
1169 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1170 dev_err(c->dev, "MR initialisation failed\n");
1171 goto fail_mr;
1172 }
1173 if (qm_mc_init(p)) {
1174 dev_err(c->dev, "MC initialisation failed\n");
1175 goto fail_mc;
1176 }
1177 /* static interrupt-gating controls */
1178 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1179 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1180 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1181 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1182 if (!portal->cgrs)
1183 goto fail_cgrs;
1184 /* initial snapshot is no-depletion */
1185 qman_cgrs_init(&portal->cgrs[1]);
1186 if (cgrs)
1187 portal->cgrs[0] = *cgrs;
1188 else
1189 /* if the given mask is NULL, assume all CGRs can be seen */
1190 qman_cgrs_fill(&portal->cgrs[0]);
1191 INIT_LIST_HEAD(&portal->cgr_cbs);
1192 spin_lock_init(&portal->cgr_lock);
1193 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1194 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1195 portal->bits = 0;
1196 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1197 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1198 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1199 sprintf(buf, "qportal-%d", c->channel);
1200 portal->pdev = platform_device_alloc(buf, -1);
1201 if (!portal->pdev)
1202 goto fail_devalloc;
1203 if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
1204 goto fail_devadd;
1205 ret = platform_device_add(portal->pdev);
1206 if (ret)
1207 goto fail_devadd;
1208 isdr = 0xffffffff;
1209 qm_out(p, QM_REG_ISDR, isdr);
1210 portal->irq_sources = 0;
1211 qm_out(p, QM_REG_IER, 0);
1212 qm_out(p, QM_REG_ISR, 0xffffffff);
1213 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1214 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1215 dev_err(c->dev, "request_irq() failed\n");
1216 goto fail_irq;
1217 }
1218 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1219 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1220 dev_err(c->dev, "irq_set_affinity() failed\n");
1221 goto fail_affinity;
1222 }
1223
1224 /* Need EQCR to be empty before continuing */
1225 isdr &= ~QM_PIRQ_EQCI;
1226 qm_out(p, QM_REG_ISDR, isdr);
1227 ret = qm_eqcr_get_fill(p);
1228 if (ret) {
1229 dev_err(c->dev, "EQCR unclean\n");
1230 goto fail_eqcr_empty;
1231 }
1232 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1233 qm_out(p, QM_REG_ISDR, isdr);
1234 if (qm_dqrr_current(p)) {
1235 dev_err(c->dev, "DQRR unclean\n");
1236 qm_dqrr_cdc_consume_n(p, 0xffff);
1237 }
1238 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1239 /* special handling, drain just in case it's a few FQRNIs */
1240 const union qm_mr_entry *e = qm_mr_current(p);
1241
1242 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
1243 e->verb, e->ern.rc, e->ern.fd.addr_lo);
1244 goto fail_dqrr_mr_empty;
1245 }
1246 /* Success */
1247 portal->config = c;
1248 qm_out(p, QM_REG_ISDR, 0);
1249 qm_out(p, QM_REG_IIR, 0);
1250 /* Write a sane SDQCR */
1251 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1252 return 0;
1253
1254fail_dqrr_mr_empty:
1255fail_eqcr_empty:
1256fail_affinity:
1257 free_irq(c->irq, portal);
1258fail_irq:
1259 platform_device_del(portal->pdev);
1260fail_devadd:
1261 platform_device_put(portal->pdev);
1262fail_devalloc:
1263 kfree(portal->cgrs);
1264fail_cgrs:
1265 qm_mc_finish(p);
1266fail_mc:
1267 qm_mr_finish(p);
1268fail_mr:
1269 qm_dqrr_finish(p);
1270fail_dqrr:
1271 qm_eqcr_finish(p);
1272fail_eqcr:
1273 return -EIO;
1274}
1275
1276struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1277 const struct qman_cgrs *cgrs)
1278{
1279 struct qman_portal *portal;
1280 int err;
1281
1282 portal = &per_cpu(qman_affine_portal, c->cpu);
1283 err = qman_create_portal(portal, c, cgrs);
1284 if (err)
1285 return NULL;
1286
1287 spin_lock(&affine_mask_lock);
1288 cpumask_set_cpu(c->cpu, &affine_mask);
1289 affine_channels[c->cpu] = c->channel;
1290 affine_portals[c->cpu] = portal;
1291 spin_unlock(&affine_mask_lock);
1292
1293 return portal;
1294}
1295
1296static void qman_destroy_portal(struct qman_portal *qm)
1297{
1298 const struct qm_portal_config *pcfg;
1299
1300 /* Stop dequeues on the portal */
1301 qm_dqrr_sdqcr_set(&qm->p, 0);
1302
1303 /*
1304 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1305 * something related to QM_PIRQ_EQCI, this may need fixing.
1306 * Also, due to the prefetching model used for CI updates in the enqueue
1307 * path, this update will only invalidate the CI cacheline *after*
1308 * working on it, so we need to call this twice to ensure a full update
1309 * irrespective of where the enqueue processing was at when the teardown
1310 * began.
1311 */
1312 qm_eqcr_cce_update(&qm->p);
1313 qm_eqcr_cce_update(&qm->p);
1314 pcfg = qm->config;
1315
1316 free_irq(pcfg->irq, qm);
1317
1318 kfree(qm->cgrs);
1319 qm_mc_finish(&qm->p);
1320 qm_mr_finish(&qm->p);
1321 qm_dqrr_finish(&qm->p);
1322 qm_eqcr_finish(&qm->p);
1323
1324 platform_device_del(qm->pdev);
1325 platform_device_put(qm->pdev);
1326
1327 qm->config = NULL;
1328}
1329
1330const struct qm_portal_config *qman_destroy_affine_portal(void)
1331{
1332 struct qman_portal *qm = get_affine_portal();
1333 const struct qm_portal_config *pcfg;
1334 int cpu;
1335
1336 pcfg = qm->config;
1337 cpu = pcfg->cpu;
1338
1339 qman_destroy_portal(qm);
1340
1341 spin_lock(&affine_mask_lock);
1342 cpumask_clear_cpu(cpu, &affine_mask);
1343 spin_unlock(&affine_mask_lock);
1344 put_affine_portal();
1345 return pcfg;
1346}
1347
1348/* Inline helper to reduce nesting in __poll_portal_slow() */
1349static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1350 const union qm_mr_entry *msg, u8 verb)
1351{
1352 switch (verb) {
1353 case QM_MR_VERB_FQRL:
1354 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1355 fq_clear(fq, QMAN_FQ_STATE_ORL);
1356 break;
1357 case QM_MR_VERB_FQRN:
1358 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1359 fq->state == qman_fq_state_sched);
1360 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1361 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1362 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1363 fq_set(fq, QMAN_FQ_STATE_NE);
1364 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1365 fq_set(fq, QMAN_FQ_STATE_ORL);
1366 fq->state = qman_fq_state_retired;
1367 break;
1368 case QM_MR_VERB_FQPN:
1369 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1370 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1371 fq->state = qman_fq_state_parked;
1372 }
1373}
1374
1375static void qm_congestion_task(struct work_struct *work)
1376{
1377 struct qman_portal *p = container_of(work, struct qman_portal,
1378 congestion_work);
1379 struct qman_cgrs rr, c;
1380 union qm_mc_result *mcr;
1381 struct qman_cgr *cgr;
1382
1383 spin_lock(&p->cgr_lock);
1384 qm_mc_start(&p->p);
1385 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1386 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1387 spin_unlock(&p->cgr_lock);
1388 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1389 return;
1390 }
1391 /* mask out the ones I'm not interested in */
1392 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1393 &p->cgrs[0]);
1394 /* check previous snapshot for delta, enter/exit congestion */
1395 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1396 /* update snapshot */
1397 qman_cgrs_cp(&p->cgrs[1], &rr);
1398 /* Invoke callback */
1399 list_for_each_entry(cgr, &p->cgr_cbs, node)
1400 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1401 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1402 spin_unlock(&p->cgr_lock);
1403}
1404
1405static void qm_mr_process_task(struct work_struct *work)
1406{
1407 struct qman_portal *p = container_of(work, struct qman_portal,
1408 mr_work);
1409 const union qm_mr_entry *msg;
1410 struct qman_fq *fq;
1411 u8 verb, num = 0;
1412
1413 preempt_disable();
1414
1415 while (1) {
1416 qm_mr_pvb_update(&p->p);
1417 msg = qm_mr_current(&p->p);
1418 if (!msg)
1419 break;
1420
1421 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1422 /* The message is a software ERN iff the 0x20 bit is clear */
1423 if (verb & 0x20) {
1424 switch (verb) {
1425 case QM_MR_VERB_FQRNI:
1426 /* nada, we drop FQRNIs on the floor */
1427 break;
1428 case QM_MR_VERB_FQRN:
1429 case QM_MR_VERB_FQRL:
1430 /* Lookup in the retirement table */
1431 fq = fqid_to_fq(msg->fq.fqid);
1432 if (WARN_ON(!fq))
1433 break;
1434 fq_state_change(p, fq, msg, verb);
1435 if (fq->cb.fqs)
1436 fq->cb.fqs(p, fq, msg);
1437 break;
1438 case QM_MR_VERB_FQPN:
1439 /* Parked */
1440 fq = tag_to_fq(msg->fq.contextB);
1441 fq_state_change(p, fq, msg, verb);
1442 if (fq->cb.fqs)
1443 fq->cb.fqs(p, fq, msg);
1444 break;
1445 case QM_MR_VERB_DC_ERN:
1446 /* DCP ERN */
1447 pr_crit_once("Leaking DCP ERNs!\n");
1448 break;
1449 default:
1450 pr_crit("Invalid MR verb 0x%02x\n", verb);
1451 }
1452 } else {
1453 /* Its a software ERN */
1454 fq = tag_to_fq(msg->ern.tag);
1455 fq->cb.ern(p, fq, msg);
1456 }
1457 num++;
1458 qm_mr_next(&p->p);
1459 }
1460
1461 qm_mr_cci_consume(&p->p, num);
1462 preempt_enable();
1463}
1464
1465static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1466{
1467 if (is & QM_PIRQ_CSCI) {
1468 queue_work_on(smp_processor_id(), qm_portal_wq,
1469 &p->congestion_work);
1470 }
1471
1472 if (is & QM_PIRQ_EQRI) {
1473 qm_eqcr_cce_update(&p->p);
1474 qm_eqcr_set_ithresh(&p->p, 0);
1475 wake_up(&affine_queue);
1476 }
1477
1478 if (is & QM_PIRQ_MRI) {
1479 queue_work_on(smp_processor_id(), qm_portal_wq,
1480 &p->mr_work);
1481 }
1482
1483 return is;
1484}
1485
1486/*
1487 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1488 * inlined.
1489 */
1490static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1491{
1492 p->vdqcr_owned = NULL;
1493 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1494 wake_up(&affine_queue);
1495}
1496
1497/*
1498 * The only states that would conflict with other things if they ran at the
1499 * same time on the same cpu are:
1500 *
1501 * (i) setting/clearing vdqcr_owned, and
1502 * (ii) clearing the NE (Not Empty) flag.
1503 *
1504 * Both are safe. Because;
1505 *
1506 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1507 * vdqcr_owned field (which it does before setting VDQCR), and
1508 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1509 * done so that we can't interfere.
1510 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1511 * with (i) that API prevents us from interfering until it's safe.
1512 *
1513 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1514 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1515 * advantage comes from this function not having to "lock" anything at all.
1516 *
1517 * Note also that the callbacks are invoked at points which are safe against the
1518 * above potential conflicts, but that this function itself is not re-entrant
1519 * (this is because the function tracks one end of each FIFO in the portal and
1520 * we do *not* want to lock that). So the consequence is that it is safe for
1521 * user callbacks to call into any QMan API.
1522 */
1523static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1524 unsigned int poll_limit)
1525{
1526 const struct qm_dqrr_entry *dq;
1527 struct qman_fq *fq;
1528 enum qman_cb_dqrr_result res;
1529 unsigned int limit = 0;
1530
1531 do {
1532 qm_dqrr_pvb_update(&p->p);
1533 dq = qm_dqrr_current(&p->p);
1534 if (!dq)
1535 break;
1536
1537 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1538 /*
1539 * VDQCR: don't trust contextB as the FQ may have
1540 * been configured for h/w consumption and we're
1541 * draining it post-retirement.
1542 */
1543 fq = p->vdqcr_owned;
1544 /*
1545 * We only set QMAN_FQ_STATE_NE when retiring, so we
1546 * only need to check for clearing it when doing
1547 * volatile dequeues. It's one less thing to check
1548 * in the critical path (SDQCR).
1549 */
1550 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1551 fq_clear(fq, QMAN_FQ_STATE_NE);
1552 /*
1553 * This is duplicated from the SDQCR code, but we
1554 * have stuff to do before *and* after this callback,
1555 * and we don't want multiple if()s in the critical
1556 * path (SDQCR).
1557 */
1558 res = fq->cb.dqrr(p, fq, dq);
1559 if (res == qman_cb_dqrr_stop)
1560 break;
1561 /* Check for VDQCR completion */
1562 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1563 clear_vdqcr(p, fq);
1564 } else {
1565 /* SDQCR: contextB points to the FQ */
1566 fq = tag_to_fq(dq->contextB);
1567 /* Now let the callback do its stuff */
1568 res = fq->cb.dqrr(p, fq, dq);
1569 /*
1570 * The callback can request that we exit without
1571 * consuming this entry nor advancing;
1572 */
1573 if (res == qman_cb_dqrr_stop)
1574 break;
1575 }
1576 /* Interpret 'dq' from a driver perspective. */
1577 /*
1578 * Parking isn't possible unless HELDACTIVE was set. NB,
1579 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1580 * check for HELDACTIVE to cover both.
1581 */
1582 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1583 (res != qman_cb_dqrr_park));
1584 /* just means "skip it, I'll consume it myself later on" */
1585 if (res != qman_cb_dqrr_defer)
1586 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1587 res == qman_cb_dqrr_park);
1588 /* Move forward */
1589 qm_dqrr_next(&p->p);
1590 /*
1591 * Entry processed and consumed, increment our counter. The
1592 * callback can request that we exit after consuming the
1593 * entry, and we also exit if we reach our processing limit,
1594 * so loop back only if neither of these conditions is met.
1595 */
1596 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1597
1598 return limit;
1599}
1600
1601void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1602{
1603 unsigned long irqflags;
1604
1605 local_irq_save(irqflags);
1606 set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
1607 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1608 local_irq_restore(irqflags);
1609}
1610EXPORT_SYMBOL(qman_p_irqsource_add);
1611
1612void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1613{
1614 unsigned long irqflags;
1615 u32 ier;
1616
1617 /*
1618 * Our interrupt handler only processes+clears status register bits that
1619 * are in p->irq_sources. As we're trimming that mask, if one of them
1620 * were to assert in the status register just before we remove it from
1621 * the enable register, there would be an interrupt-storm when we
1622 * release the IRQ lock. So we wait for the enable register update to
1623 * take effect in h/w (by reading it back) and then clear all other bits
1624 * in the status register. Ie. we clear them from ISR once it's certain
1625 * IER won't allow them to reassert.
1626 */
1627 local_irq_save(irqflags);
1628 bits &= QM_PIRQ_VISIBLE;
1629 clear_bits(bits, &p->irq_sources);
1630 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1631 ier = qm_in(&p->p, QM_REG_IER);
1632 /*
1633 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1634 * data-dependency, ie. to protect against re-ordering.
1635 */
1636 qm_out(&p->p, QM_REG_ISR, ~ier);
1637 local_irq_restore(irqflags);
1638}
1639EXPORT_SYMBOL(qman_p_irqsource_remove);
1640
1641const cpumask_t *qman_affine_cpus(void)
1642{
1643 return &affine_mask;
1644}
1645EXPORT_SYMBOL(qman_affine_cpus);
1646
1647u16 qman_affine_channel(int cpu)
1648{
1649 if (cpu < 0) {
1650 struct qman_portal *portal = get_affine_portal();
1651
1652 cpu = portal->config->cpu;
1653 put_affine_portal();
1654 }
1655 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1656 return affine_channels[cpu];
1657}
1658EXPORT_SYMBOL(qman_affine_channel);
1659
1660struct qman_portal *qman_get_affine_portal(int cpu)
1661{
1662 return affine_portals[cpu];
1663}
1664EXPORT_SYMBOL(qman_get_affine_portal);
1665
1666int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1667{
1668 return __poll_portal_fast(p, limit);
1669}
1670EXPORT_SYMBOL(qman_p_poll_dqrr);
1671
1672void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1673{
1674 unsigned long irqflags;
1675
1676 local_irq_save(irqflags);
1677 pools &= p->config->pools;
1678 p->sdqcr |= pools;
1679 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1680 local_irq_restore(irqflags);
1681}
1682EXPORT_SYMBOL(qman_p_static_dequeue_add);
1683
1684/* Frame queue API */
1685
1686static const char *mcr_result_str(u8 result)
1687{
1688 switch (result) {
1689 case QM_MCR_RESULT_NULL:
1690 return "QM_MCR_RESULT_NULL";
1691 case QM_MCR_RESULT_OK:
1692 return "QM_MCR_RESULT_OK";
1693 case QM_MCR_RESULT_ERR_FQID:
1694 return "QM_MCR_RESULT_ERR_FQID";
1695 case QM_MCR_RESULT_ERR_FQSTATE:
1696 return "QM_MCR_RESULT_ERR_FQSTATE";
1697 case QM_MCR_RESULT_ERR_NOTEMPTY:
1698 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1699 case QM_MCR_RESULT_PENDING:
1700 return "QM_MCR_RESULT_PENDING";
1701 case QM_MCR_RESULT_ERR_BADCOMMAND:
1702 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1703 }
1704 return "<unknown MCR result>";
1705}
1706
1707int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1708{
1709 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1710 int ret = qman_alloc_fqid(&fqid);
1711
1712 if (ret)
1713 return ret;
1714 }
1715 fq->fqid = fqid;
1716 fq->flags = flags;
1717 fq->state = qman_fq_state_oos;
1718 fq->cgr_groupid = 0;
1719
1720 /* A context_b of 0 is allegedly special, so don't use that fqid */
1721 if (fqid == 0 || fqid >= num_fqids) {
1722 WARN(1, "bad fqid %d\n", fqid);
1723 return -EINVAL;
1724 }
1725
1726 fq->idx = fqid * 2;
1727 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1728 fq->idx++;
1729
1730 WARN_ON(fq_table[fq->idx]);
1731 fq_table[fq->idx] = fq;
1732
1733 return 0;
1734}
1735EXPORT_SYMBOL(qman_create_fq);
1736
1737void qman_destroy_fq(struct qman_fq *fq)
1738{
1739 /*
1740 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1741 * quiesced. Instead, run some checks.
1742 */
1743 switch (fq->state) {
1744 case qman_fq_state_parked:
1745 case qman_fq_state_oos:
1746 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1747 qman_release_fqid(fq->fqid);
1748
1749 DPAA_ASSERT(fq_table[fq->idx]);
1750 fq_table[fq->idx] = NULL;
1751 return;
1752 default:
1753 break;
1754 }
1755 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1756}
1757EXPORT_SYMBOL(qman_destroy_fq);
1758
1759u32 qman_fq_fqid(struct qman_fq *fq)
1760{
1761 return fq->fqid;
1762}
1763EXPORT_SYMBOL(qman_fq_fqid);
1764
1765int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1766{
1767 union qm_mc_command *mcc;
1768 union qm_mc_result *mcr;
1769 struct qman_portal *p;
1770 u8 res, myverb;
1771 int ret = 0;
1772
1773 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1774 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1775
1776 if (fq->state != qman_fq_state_oos &&
1777 fq->state != qman_fq_state_parked)
1778 return -EINVAL;
1779#ifdef CONFIG_FSL_DPAA_CHECKING
1780 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1781 return -EINVAL;
1782#endif
1783 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1784 /* And can't be set at the same time as TDTHRESH */
1785 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1786 return -EINVAL;
1787 }
1788 /* Issue an INITFQ_[PARKED|SCHED] management command */
1789 p = get_affine_portal();
1790 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1791 (fq->state != qman_fq_state_oos &&
1792 fq->state != qman_fq_state_parked)) {
1793 ret = -EBUSY;
1794 goto out;
1795 }
1796 mcc = qm_mc_start(&p->p);
1797 if (opts)
1798 mcc->initfq = *opts;
1799 mcc->initfq.fqid = fq->fqid;
1800 mcc->initfq.count = 0;
1801 /*
1802 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
1803 * demux pointer. Otherwise, the caller-provided value is allowed to
1804 * stand, don't overwrite it.
1805 */
1806 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1807 dma_addr_t phys_fq;
1808
1809 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1810 mcc->initfq.fqd.context_b = fq_to_tag(fq);
1811 /*
1812 * and the physical address - NB, if the user wasn't trying to
1813 * set CONTEXTA, clear the stashing settings.
1814 */
1815 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1816 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1817 memset(&mcc->initfq.fqd.context_a, 0,
1818 sizeof(mcc->initfq.fqd.context_a));
1819 } else {
1820 phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
1821 DMA_TO_DEVICE);
1822 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1823 }
1824 }
1825 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1826 int wq = 0;
1827
1828 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1829 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1830 wq = 4;
1831 }
1832 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1833 }
1834 qm_mc_commit(&p->p, myverb);
1835 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1836 dev_err(p->config->dev, "MCR timeout\n");
1837 ret = -ETIMEDOUT;
1838 goto out;
1839 }
1840
1841 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1842 res = mcr->result;
1843 if (res != QM_MCR_RESULT_OK) {
1844 ret = -EIO;
1845 goto out;
1846 }
1847 if (opts) {
1848 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1849 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1850 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1851 else
1852 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1853 }
1854 if (opts->we_mask & QM_INITFQ_WE_CGID)
1855 fq->cgr_groupid = opts->fqd.cgid;
1856 }
1857 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1858 qman_fq_state_sched : qman_fq_state_parked;
1859
1860out:
1861 put_affine_portal();
1862 return ret;
1863}
1864EXPORT_SYMBOL(qman_init_fq);
1865
1866int qman_schedule_fq(struct qman_fq *fq)
1867{
1868 union qm_mc_command *mcc;
1869 union qm_mc_result *mcr;
1870 struct qman_portal *p;
1871 int ret = 0;
1872
1873 if (fq->state != qman_fq_state_parked)
1874 return -EINVAL;
1875#ifdef CONFIG_FSL_DPAA_CHECKING
1876 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1877 return -EINVAL;
1878#endif
1879 /* Issue a ALTERFQ_SCHED management command */
1880 p = get_affine_portal();
1881 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1882 fq->state != qman_fq_state_parked) {
1883 ret = -EBUSY;
1884 goto out;
1885 }
1886 mcc = qm_mc_start(&p->p);
1887 mcc->alterfq.fqid = fq->fqid;
1888 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1889 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1890 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1891 ret = -ETIMEDOUT;
1892 goto out;
1893 }
1894
1895 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1896 if (mcr->result != QM_MCR_RESULT_OK) {
1897 ret = -EIO;
1898 goto out;
1899 }
1900 fq->state = qman_fq_state_sched;
1901out:
1902 put_affine_portal();
1903 return ret;
1904}
1905EXPORT_SYMBOL(qman_schedule_fq);
1906
1907int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1908{
1909 union qm_mc_command *mcc;
1910 union qm_mc_result *mcr;
1911 struct qman_portal *p;
1912 int ret;
1913 u8 res;
1914
1915 if (fq->state != qman_fq_state_parked &&
1916 fq->state != qman_fq_state_sched)
1917 return -EINVAL;
1918#ifdef CONFIG_FSL_DPAA_CHECKING
1919 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1920 return -EINVAL;
1921#endif
1922 p = get_affine_portal();
1923 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1924 fq->state == qman_fq_state_retired ||
1925 fq->state == qman_fq_state_oos) {
1926 ret = -EBUSY;
1927 goto out;
1928 }
1929 mcc = qm_mc_start(&p->p);
1930 mcc->alterfq.fqid = fq->fqid;
1931 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1932 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1933 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1934 ret = -ETIMEDOUT;
1935 goto out;
1936 }
1937
1938 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1939 res = mcr->result;
1940 /*
1941 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1942 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1943 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1944 * friendly, otherwise the caller doesn't necessarily have a fully
1945 * "retired" FQ on return even if the retirement was immediate. However
1946 * this does mean some code duplication between here and
1947 * fq_state_change().
1948 */
1949 if (res == QM_MCR_RESULT_OK) {
1950 ret = 0;
1951 /* Process 'fq' right away, we'll ignore FQRNI */
1952 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1953 fq_set(fq, QMAN_FQ_STATE_NE);
1954 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1955 fq_set(fq, QMAN_FQ_STATE_ORL);
1956 if (flags)
1957 *flags = fq->flags;
1958 fq->state = qman_fq_state_retired;
1959 if (fq->cb.fqs) {
1960 /*
1961 * Another issue with supporting "immediate" retirement
1962 * is that we're forced to drop FQRNIs, because by the
1963 * time they're seen it may already be "too late" (the
1964 * fq may have been OOS'd and free()'d already). But if
1965 * the upper layer wants a callback whether it's
1966 * immediate or not, we have to fake a "MR" entry to
1967 * look like an FQRNI...
1968 */
1969 union qm_mr_entry msg;
1970
1971 msg.verb = QM_MR_VERB_FQRNI;
1972 msg.fq.fqs = mcr->alterfq.fqs;
1973 msg.fq.fqid = fq->fqid;
1974 msg.fq.contextB = fq_to_tag(fq);
1975 fq->cb.fqs(p, fq, &msg);
1976 }
1977 } else if (res == QM_MCR_RESULT_PENDING) {
1978 ret = 1;
1979 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1980 } else {
1981 ret = -EIO;
1982 }
1983out:
1984 put_affine_portal();
1985 return ret;
1986}
1987EXPORT_SYMBOL(qman_retire_fq);
1988
1989int qman_oos_fq(struct qman_fq *fq)
1990{
1991 union qm_mc_command *mcc;
1992 union qm_mc_result *mcr;
1993 struct qman_portal *p;
1994 int ret = 0;
1995
1996 if (fq->state != qman_fq_state_retired)
1997 return -EINVAL;
1998#ifdef CONFIG_FSL_DPAA_CHECKING
1999 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2000 return -EINVAL;
2001#endif
2002 p = get_affine_portal();
2003 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2004 fq->state != qman_fq_state_retired) {
2005 ret = -EBUSY;
2006 goto out;
2007 }
2008 mcc = qm_mc_start(&p->p);
2009 mcc->alterfq.fqid = fq->fqid;
2010 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2011 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2012 ret = -ETIMEDOUT;
2013 goto out;
2014 }
2015 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2016 if (mcr->result != QM_MCR_RESULT_OK) {
2017 ret = -EIO;
2018 goto out;
2019 }
2020 fq->state = qman_fq_state_oos;
2021out:
2022 put_affine_portal();
2023 return ret;
2024}
2025EXPORT_SYMBOL(qman_oos_fq);
2026
2027int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2028{
2029 union qm_mc_command *mcc;
2030 union qm_mc_result *mcr;
2031 struct qman_portal *p = get_affine_portal();
2032 int ret = 0;
2033
2034 mcc = qm_mc_start(&p->p);
2035 mcc->queryfq.fqid = fq->fqid;
2036 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2037 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2038 ret = -ETIMEDOUT;
2039 goto out;
2040 }
2041
2042 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2043 if (mcr->result == QM_MCR_RESULT_OK)
2044 *fqd = mcr->queryfq.fqd;
2045 else
2046 ret = -EIO;
2047out:
2048 put_affine_portal();
2049 return ret;
2050}
2051
2052static int qman_query_fq_np(struct qman_fq *fq,
2053 struct qm_mcr_queryfq_np *np)
2054{
2055 union qm_mc_command *mcc;
2056 union qm_mc_result *mcr;
2057 struct qman_portal *p = get_affine_portal();
2058 int ret = 0;
2059
2060 mcc = qm_mc_start(&p->p);
2061 mcc->queryfq.fqid = fq->fqid;
2062 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2063 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2064 ret = -ETIMEDOUT;
2065 goto out;
2066 }
2067
2068 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2069 if (mcr->result == QM_MCR_RESULT_OK)
2070 *np = mcr->queryfq_np;
2071 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2072 ret = -ERANGE;
2073 else
2074 ret = -EIO;
2075out:
2076 put_affine_portal();
2077 return ret;
2078}
2079
2080static int qman_query_cgr(struct qman_cgr *cgr,
2081 struct qm_mcr_querycgr *cgrd)
2082{
2083 union qm_mc_command *mcc;
2084 union qm_mc_result *mcr;
2085 struct qman_portal *p = get_affine_portal();
2086 int ret = 0;
2087
2088 mcc = qm_mc_start(&p->p);
2089 mcc->querycgr.cgid = cgr->cgrid;
2090 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2091 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2092 ret = -ETIMEDOUT;
2093 goto out;
2094 }
2095 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2096 if (mcr->result == QM_MCR_RESULT_OK)
2097 *cgrd = mcr->querycgr;
2098 else {
2099 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2100 mcr_result_str(mcr->result));
2101 ret = -EIO;
2102 }
2103out:
2104 put_affine_portal();
2105 return ret;
2106}
2107
2108int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2109{
2110 struct qm_mcr_querycgr query_cgr;
2111 int err;
2112
2113 err = qman_query_cgr(cgr, &query_cgr);
2114 if (err)
2115 return err;
2116
2117 *result = !!query_cgr.cgr.cs;
2118 return 0;
2119}
2120EXPORT_SYMBOL(qman_query_cgr_congested);
2121
2122/* internal function used as a wait_event() expression */
2123static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2124{
2125 unsigned long irqflags;
2126 int ret = -EBUSY;
2127
2128 local_irq_save(irqflags);
2129 if (p->vdqcr_owned)
2130 goto out;
2131 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2132 goto out;
2133
2134 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2135 p->vdqcr_owned = fq;
2136 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2137 ret = 0;
2138out:
2139 local_irq_restore(irqflags);
2140 return ret;
2141}
2142
2143static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2144{
2145 int ret;
2146
2147 *p = get_affine_portal();
2148 ret = set_p_vdqcr(*p, fq, vdqcr);
2149 put_affine_portal();
2150 return ret;
2151}
2152
2153static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2154 u32 vdqcr, u32 flags)
2155{
2156 int ret = 0;
2157
2158 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2159 ret = wait_event_interruptible(affine_queue,
2160 !set_vdqcr(p, fq, vdqcr));
2161 else
2162 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2163 return ret;
2164}
2165
2166int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2167{
2168 struct qman_portal *p;
2169 int ret;
2170
2171 if (fq->state != qman_fq_state_parked &&
2172 fq->state != qman_fq_state_retired)
2173 return -EINVAL;
2174 if (vdqcr & QM_VDQCR_FQID_MASK)
2175 return -EINVAL;
2176 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2177 return -EBUSY;
2178 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2179 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2180 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2181 else
2182 ret = set_vdqcr(&p, fq, vdqcr);
2183 if (ret)
2184 return ret;
2185 /* VDQCR is set */
2186 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2187 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2188 /*
2189 * NB: don't propagate any error - the caller wouldn't
2190 * know whether the VDQCR was issued or not. A signal
2191 * could arrive after returning anyway, so the caller
2192 * can check signal_pending() if that's an issue.
2193 */
2194 wait_event_interruptible(affine_queue,
2195 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2196 else
2197 wait_event(affine_queue,
2198 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2199 }
2200 return 0;
2201}
2202EXPORT_SYMBOL(qman_volatile_dequeue);
2203
2204static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2205{
2206 if (avail)
2207 qm_eqcr_cce_prefetch(&p->p);
2208 else
2209 qm_eqcr_cce_update(&p->p);
2210}
2211
2212int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2213{
2214 struct qman_portal *p;
2215 struct qm_eqcr_entry *eq;
2216 unsigned long irqflags;
2217 u8 avail;
2218
2219 p = get_affine_portal();
2220 local_irq_save(irqflags);
2221
2222 if (p->use_eqcr_ci_stashing) {
2223 /*
2224 * The stashing case is easy, only update if we need to in
2225 * order to try and liberate ring entries.
2226 */
2227 eq = qm_eqcr_start_stash(&p->p);
2228 } else {
2229 /*
2230 * The non-stashing case is harder, need to prefetch ahead of
2231 * time.
2232 */
2233 avail = qm_eqcr_get_avail(&p->p);
2234 if (avail < 2)
2235 update_eqcr_ci(p, avail);
2236 eq = qm_eqcr_start_no_stash(&p->p);
2237 }
2238
2239 if (unlikely(!eq))
2240 goto out;
2241
2242 eq->fqid = fq->fqid;
2243 eq->tag = fq_to_tag(fq);
2244 eq->fd = *fd;
2245
2246 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2247out:
2248 local_irq_restore(irqflags);
2249 put_affine_portal();
2250 return 0;
2251}
2252EXPORT_SYMBOL(qman_enqueue);
2253
2254static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2255 struct qm_mcc_initcgr *opts)
2256{
2257 union qm_mc_command *mcc;
2258 union qm_mc_result *mcr;
2259 struct qman_portal *p = get_affine_portal();
2260 u8 verb = QM_MCC_VERB_MODIFYCGR;
2261 int ret = 0;
2262
2263 mcc = qm_mc_start(&p->p);
2264 if (opts)
2265 mcc->initcgr = *opts;
2266 mcc->initcgr.cgid = cgr->cgrid;
2267 if (flags & QMAN_CGR_FLAG_USE_INIT)
2268 verb = QM_MCC_VERB_INITCGR;
2269 qm_mc_commit(&p->p, verb);
2270 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2271 ret = -ETIMEDOUT;
2272 goto out;
2273 }
2274
2275 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2276 if (mcr->result != QM_MCR_RESULT_OK)
2277 ret = -EIO;
2278
2279out:
2280 put_affine_portal();
2281 return ret;
2282}
2283
2284#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2285#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
2286
2287static u8 qman_cgr_cpus[CGR_NUM];
2288
2289void qman_init_cgr_all(void)
2290{
2291 struct qman_cgr cgr;
2292 int err_cnt = 0;
2293
2294 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2295 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2296 err_cnt++;
2297 }
2298
2299 if (err_cnt)
2300 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2301 err_cnt, (err_cnt > 1) ? "s" : "");
2302}
2303
2304int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2305 struct qm_mcc_initcgr *opts)
2306{
2307 struct qm_mcr_querycgr cgr_state;
2308 struct qm_mcc_initcgr local_opts = {};
2309 int ret;
2310 struct qman_portal *p;
2311
2312 /*
2313 * We have to check that the provided CGRID is within the limits of the
2314 * data-structures, for obvious reasons. However we'll let h/w take
2315 * care of determining whether it's within the limits of what exists on
2316 * the SoC.
2317 */
2318 if (cgr->cgrid >= CGR_NUM)
2319 return -EINVAL;
2320
2321 preempt_disable();
2322 p = get_affine_portal();
2323 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2324 preempt_enable();
2325
2326 cgr->chan = p->config->channel;
2327 spin_lock(&p->cgr_lock);
2328
2329 if (opts) {
2330 ret = qman_query_cgr(cgr, &cgr_state);
2331 if (ret)
2332 goto out;
2333 if (opts)
2334 local_opts = *opts;
2335 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2336 local_opts.cgr.cscn_targ_upd_ctrl =
2337 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2338 else
2339 /* Overwrite TARG */
2340 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2341 TARG_MASK(p);
2342 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2343
2344 /* send init if flags indicate so */
2345 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2346 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2347 &local_opts);
2348 else
2349 ret = qm_modify_cgr(cgr, 0, &local_opts);
2350 if (ret)
2351 goto out;
2352 }
2353
2354 list_add(&cgr->node, &p->cgr_cbs);
2355
2356 /* Determine if newly added object requires its callback to be called */
2357 ret = qman_query_cgr(cgr, &cgr_state);
2358 if (ret) {
2359 /* we can't go back, so proceed and return success */
2360 dev_err(p->config->dev, "CGR HW state partially modified\n");
2361 ret = 0;
2362 goto out;
2363 }
2364 if (cgr->cb && cgr_state.cgr.cscn_en &&
2365 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2366 cgr->cb(p, cgr, 1);
2367out:
2368 spin_unlock(&p->cgr_lock);
2369 put_affine_portal();
2370 return ret;
2371}
2372EXPORT_SYMBOL(qman_create_cgr);
2373
2374int qman_delete_cgr(struct qman_cgr *cgr)
2375{
2376 unsigned long irqflags;
2377 struct qm_mcr_querycgr cgr_state;
2378 struct qm_mcc_initcgr local_opts;
2379 int ret = 0;
2380 struct qman_cgr *i;
2381 struct qman_portal *p = get_affine_portal();
2382
2383 if (cgr->chan != p->config->channel) {
2384 /* attempt to delete from other portal than creator */
2385 dev_err(p->config->dev, "CGR not owned by current portal");
2386 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2387 cgr->chan, p->config->channel);
2388
2389 ret = -EINVAL;
2390 goto put_portal;
2391 }
2392 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2393 spin_lock_irqsave(&p->cgr_lock, irqflags);
2394 list_del(&cgr->node);
2395 /*
2396 * If there are no other CGR objects for this CGRID in the list,
2397 * update CSCN_TARG accordingly
2398 */
2399 list_for_each_entry(i, &p->cgr_cbs, node)
2400 if (i->cgrid == cgr->cgrid && i->cb)
2401 goto release_lock;
2402 ret = qman_query_cgr(cgr, &cgr_state);
2403 if (ret) {
2404 /* add back to the list */
2405 list_add(&cgr->node, &p->cgr_cbs);
2406 goto release_lock;
2407 }
2408 /* Overwrite TARG */
2409 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2410 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2411 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2412 else
2413 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2414 ~(TARG_MASK(p));
2415 ret = qm_modify_cgr(cgr, 0, &local_opts);
2416 if (ret)
2417 /* add back to the list */
2418 list_add(&cgr->node, &p->cgr_cbs);
2419release_lock:
2420 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2421put_portal:
2422 put_affine_portal();
2423 return ret;
2424}
2425EXPORT_SYMBOL(qman_delete_cgr);
2426
2427struct cgr_comp {
2428 struct qman_cgr *cgr;
2429 struct completion completion;
2430};
2431
2432static int qman_delete_cgr_thread(void *p)
2433{
2434 struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
2435 int ret;
2436
2437 ret = qman_delete_cgr(cgr_comp->cgr);
2438 complete(&cgr_comp->completion);
2439
2440 return ret;
2441}
2442
2443void qman_delete_cgr_safe(struct qman_cgr *cgr)
2444{
2445 struct task_struct *thread;
2446 struct cgr_comp cgr_comp;
2447
2448 preempt_disable();
2449 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2450 init_completion(&cgr_comp.completion);
2451 cgr_comp.cgr = cgr;
2452 thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2453 "cgr_del");
2454
2455 if (IS_ERR(thread))
2456 goto out;
2457
2458 kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2459 wake_up_process(thread);
2460 wait_for_completion(&cgr_comp.completion);
2461 preempt_enable();
2462 return;
2463 }
2464out:
2465 qman_delete_cgr(cgr);
2466 preempt_enable();
2467}
2468EXPORT_SYMBOL(qman_delete_cgr_safe);
2469
2470/* Cleanup FQs */
2471
2472static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2473{
2474 const union qm_mr_entry *msg;
2475 int found = 0;
2476
2477 qm_mr_pvb_update(p);
2478 msg = qm_mr_current(p);
2479 while (msg) {
2480 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2481 found = 1;
2482 qm_mr_next(p);
2483 qm_mr_cci_consume_to_current(p);
2484 qm_mr_pvb_update(p);
2485 msg = qm_mr_current(p);
2486 }
2487 return found;
2488}
2489
2490static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2491 bool wait)
2492{
2493 const struct qm_dqrr_entry *dqrr;
2494 int found = 0;
2495
2496 do {
2497 qm_dqrr_pvb_update(p);
2498 dqrr = qm_dqrr_current(p);
2499 if (!dqrr)
2500 cpu_relax();
2501 } while (wait && !dqrr);
2502
2503 while (dqrr) {
2504 if (dqrr->fqid == fqid && (dqrr->stat & s))
2505 found = 1;
2506 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2507 qm_dqrr_pvb_update(p);
2508 qm_dqrr_next(p);
2509 dqrr = qm_dqrr_current(p);
2510 }
2511 return found;
2512}
2513
2514#define qm_mr_drain(p, V) \
2515 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2516
2517#define qm_dqrr_drain(p, f, S) \
2518 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2519
2520#define qm_dqrr_drain_wait(p, f, S) \
2521 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2522
2523#define qm_dqrr_drain_nomatch(p) \
2524 _qm_dqrr_consume_and_match(p, 0, 0, false)
2525
2526static int qman_shutdown_fq(u32 fqid)
2527{
2528 struct qman_portal *p;
2529 struct device *dev;
2530 union qm_mc_command *mcc;
2531 union qm_mc_result *mcr;
2532 int orl_empty, drain = 0, ret = 0;
2533 u32 channel, wq, res;
2534 u8 state;
2535
2536 p = get_affine_portal();
2537 dev = p->config->dev;
2538 /* Determine the state of the FQID */
2539 mcc = qm_mc_start(&p->p);
2540 mcc->queryfq_np.fqid = fqid;
2541 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2542 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2543 dev_err(dev, "QUERYFQ_NP timeout\n");
2544 ret = -ETIMEDOUT;
2545 goto out;
2546 }
2547
2548 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2549 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2550 if (state == QM_MCR_NP_STATE_OOS)
2551 goto out; /* Already OOS, no need to do anymore checks */
2552
2553 /* Query which channel the FQ is using */
2554 mcc = qm_mc_start(&p->p);
2555 mcc->queryfq.fqid = fqid;
2556 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2557 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2558 dev_err(dev, "QUERYFQ timeout\n");
2559 ret = -ETIMEDOUT;
2560 goto out;
2561 }
2562
2563 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2564 /* Need to store these since the MCR gets reused */
2565 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2566 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2567
2568 switch (state) {
2569 case QM_MCR_NP_STATE_TEN_SCHED:
2570 case QM_MCR_NP_STATE_TRU_SCHED:
2571 case QM_MCR_NP_STATE_ACTIVE:
2572 case QM_MCR_NP_STATE_PARKED:
2573 orl_empty = 0;
2574 mcc = qm_mc_start(&p->p);
2575 mcc->alterfq.fqid = fqid;
2576 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2577 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2578 dev_err(dev, "QUERYFQ_NP timeout\n");
2579 ret = -ETIMEDOUT;
2580 goto out;
2581 }
2582 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2583 QM_MCR_VERB_ALTER_RETIRE);
2584 res = mcr->result; /* Make a copy as we reuse MCR below */
2585
2586 if (res == QM_MCR_RESULT_PENDING) {
2587 /*
2588 * Need to wait for the FQRN in the message ring, which
2589 * will only occur once the FQ has been drained. In
2590 * order for the FQ to drain the portal needs to be set
2591 * to dequeue from the channel the FQ is scheduled on
2592 */
2593 int found_fqrn = 0;
2594 u16 dequeue_wq = 0;
2595
2596 /* Flag that we need to drain FQ */
2597 drain = 1;
2598
2599 if (channel >= qm_channel_pool1 &&
2600 channel < qm_channel_pool1 + 15) {
2601 /* Pool channel, enable the bit in the portal */
2602 dequeue_wq = (channel -
2603 qm_channel_pool1 + 1)<<4 | wq;
2604 } else if (channel < qm_channel_pool1) {
2605 /* Dedicated channel */
2606 dequeue_wq = wq;
2607 } else {
2608 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2609 fqid, channel);
2610 ret = -EBUSY;
2611 goto out;
2612 }
2613 /* Set the sdqcr to drain this channel */
2614 if (channel < qm_channel_pool1)
2615 qm_dqrr_sdqcr_set(&p->p,
2616 QM_SDQCR_TYPE_ACTIVE |
2617 QM_SDQCR_CHANNELS_DEDICATED);
2618 else
2619 qm_dqrr_sdqcr_set(&p->p,
2620 QM_SDQCR_TYPE_ACTIVE |
2621 QM_SDQCR_CHANNELS_POOL_CONV
2622 (channel));
2623 do {
2624 /* Keep draining DQRR while checking the MR*/
2625 qm_dqrr_drain_nomatch(&p->p);
2626 /* Process message ring too */
2627 found_fqrn = qm_mr_drain(&p->p, FQRN);
2628 cpu_relax();
2629 } while (!found_fqrn);
2630
2631 }
2632 if (res != QM_MCR_RESULT_OK &&
2633 res != QM_MCR_RESULT_PENDING) {
2634 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2635 fqid, res);
2636 ret = -EIO;
2637 goto out;
2638 }
2639 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2640 /*
2641 * ORL had no entries, no need to wait until the
2642 * ERNs come in
2643 */
2644 orl_empty = 1;
2645 }
2646 /*
2647 * Retirement succeeded, check to see if FQ needs
2648 * to be drained
2649 */
2650 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2651 /* FQ is Not Empty, drain using volatile DQ commands */
2652 do {
2653 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2654
2655 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2656 /*
2657 * Wait for a dequeue and process the dequeues,
2658 * making sure to empty the ring completely
2659 */
2660 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2661 }
2662 qm_dqrr_sdqcr_set(&p->p, 0);
2663
2664 while (!orl_empty) {
2665 /* Wait for the ORL to have been completely drained */
2666 orl_empty = qm_mr_drain(&p->p, FQRL);
2667 cpu_relax();
2668 }
2669 mcc = qm_mc_start(&p->p);
2670 mcc->alterfq.fqid = fqid;
2671 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2672 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2673 ret = -ETIMEDOUT;
2674 goto out;
2675 }
2676
2677 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2678 QM_MCR_VERB_ALTER_OOS);
2679 if (mcr->result != QM_MCR_RESULT_OK) {
2680 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2681 fqid, mcr->result);
2682 ret = -EIO;
2683 goto out;
2684 }
2685 break;
2686
2687 case QM_MCR_NP_STATE_RETIRED:
2688 /* Send OOS Command */
2689 mcc = qm_mc_start(&p->p);
2690 mcc->alterfq.fqid = fqid;
2691 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2692 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2693 ret = -ETIMEDOUT;
2694 goto out;
2695 }
2696
2697 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2698 QM_MCR_VERB_ALTER_OOS);
2699 if (mcr->result) {
2700 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2701 fqid, mcr->result);
2702 ret = -EIO;
2703 goto out;
2704 }
2705 break;
2706
2707 case QM_MCR_NP_STATE_OOS:
2708 /* Done */
2709 break;
2710
2711 default:
2712 ret = -EIO;
2713 }
2714
2715out:
2716 put_affine_portal();
2717 return ret;
2718}
2719
2720const struct qm_portal_config *qman_get_qm_portal_config(
2721 struct qman_portal *portal)
2722{
2723 return portal->config;
2724}
2725
2726struct gen_pool *qm_fqalloc; /* FQID allocator */
2727struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2728struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2729
2730static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2731{
2732 unsigned long addr;
2733
2734 addr = gen_pool_alloc(p, cnt);
2735 if (!addr)
2736 return -ENOMEM;
2737
2738 *result = addr & ~DPAA_GENALLOC_OFF;
2739
2740 return 0;
2741}
2742
2743int qman_alloc_fqid_range(u32 *result, u32 count)
2744{
2745 return qman_alloc_range(qm_fqalloc, result, count);
2746}
2747EXPORT_SYMBOL(qman_alloc_fqid_range);
2748
2749int qman_alloc_pool_range(u32 *result, u32 count)
2750{
2751 return qman_alloc_range(qm_qpalloc, result, count);
2752}
2753EXPORT_SYMBOL(qman_alloc_pool_range);
2754
2755int qman_alloc_cgrid_range(u32 *result, u32 count)
2756{
2757 return qman_alloc_range(qm_cgralloc, result, count);
2758}
2759EXPORT_SYMBOL(qman_alloc_cgrid_range);
2760
2761int qman_release_fqid(u32 fqid)
2762{
2763 int ret = qman_shutdown_fq(fqid);
2764
2765 if (ret) {
2766 pr_debug("FQID %d leaked\n", fqid);
2767 return ret;
2768 }
2769
2770 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2771 return 0;
2772}
2773EXPORT_SYMBOL(qman_release_fqid);
2774
2775static int qpool_cleanup(u32 qp)
2776{
2777 /*
2778 * We query all FQDs starting from
2779 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2780 * whose destination channel is the pool-channel being released.
2781 * When a non-OOS FQD is found we attempt to clean it up
2782 */
2783 struct qman_fq fq = {
2784 .fqid = QM_FQID_RANGE_START
2785 };
2786 int err;
2787
2788 do {
2789 struct qm_mcr_queryfq_np np;
2790
2791 err = qman_query_fq_np(&fq, &np);
2792 if (err)
2793 /* FQID range exceeded, found no problems */
2794 return 0;
2795 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2796 struct qm_fqd fqd;
2797
2798 err = qman_query_fq(&fq, &fqd);
2799 if (WARN_ON(err))
2800 return 0;
2801 if (qm_fqd_get_chan(&fqd) == qp) {
2802 /* The channel is the FQ's target, clean it */
2803 err = qman_shutdown_fq(fq.fqid);
2804 if (err)
2805 /*
2806 * Couldn't shut down the FQ
2807 * so the pool must be leaked
2808 */
2809 return err;
2810 }
2811 }
2812 /* Move to the next FQID */
2813 fq.fqid++;
2814 } while (1);
2815}
2816
2817int qman_release_pool(u32 qp)
2818{
2819 int ret;
2820
2821 ret = qpool_cleanup(qp);
2822 if (ret) {
2823 pr_debug("CHID %d leaked\n", qp);
2824 return ret;
2825 }
2826
2827 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2828 return 0;
2829}
2830EXPORT_SYMBOL(qman_release_pool);
2831
2832static int cgr_cleanup(u32 cgrid)
2833{
2834 /*
2835 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2836 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2837 */
2838 struct qman_fq fq = {
2839 .fqid = 1
2840 };
2841 int err;
2842
2843 do {
2844 struct qm_mcr_queryfq_np np;
2845
2846 err = qman_query_fq_np(&fq, &np);
2847 if (err)
2848 /* FQID range exceeded, found no problems */
2849 return 0;
2850 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2851 struct qm_fqd fqd;
2852
2853 err = qman_query_fq(&fq, &fqd);
2854 if (WARN_ON(err))
2855 return 0;
2856 if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
2857 fqd.cgid == cgrid) {
2858 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2859 cgrid, fq.fqid);
2860 return -EIO;
2861 }
2862 }
2863 /* Move to the next FQID */
2864 fq.fqid++;
2865 } while (1);
2866}
2867
2868int qman_release_cgrid(u32 cgrid)
2869{
2870 int ret;
2871
2872 ret = cgr_cleanup(cgrid);
2873 if (ret) {
2874 pr_debug("CGRID %d leaked\n", cgrid);
2875 return ret;
2876 }
2877
2878 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2879 return 0;
2880}
2881EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 000000000000..0cace9e0077e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,808 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33u16 qman_ip_rev;
34EXPORT_SYMBOL(qman_ip_rev);
35u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
36EXPORT_SYMBOL(qm_channel_pool1);
37
38/* Register offsets */
39#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
40#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
41#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
42#define REG_DD_CFG 0x0200
43#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
44#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
45#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
46#define REG_PFDR_FPC 0x0400
47#define REG_PFDR_FP_HEAD 0x0404
48#define REG_PFDR_FP_TAIL 0x0408
49#define REG_PFDR_FP_LWIT 0x0410
50#define REG_PFDR_CFG 0x0414
51#define REG_SFDR_CFG 0x0500
52#define REG_SFDR_IN_USE 0x0504
53#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
54#define REG_WQ_DEF_ENC_WQID 0x0630
55#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
56#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
57#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
58#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
59#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
60#define REG_CM_CFG 0x0800
61#define REG_ECSR 0x0a00
62#define REG_ECIR 0x0a04
63#define REG_EADR 0x0a08
64#define REG_ECIR2 0x0a0c
65#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
66#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
67#define REG_MCR 0x0b00
68#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
69#define REG_MISC_CFG 0x0be0
70#define REG_HID_CFG 0x0bf0
71#define REG_IDLE_STAT 0x0bf4
72#define REG_IP_REV_1 0x0bf8
73#define REG_IP_REV_2 0x0bfc
74#define REG_FQD_BARE 0x0c00
75#define REG_PFDR_BARE 0x0c20
76#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
77#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
78#define REG_QCSP_BARE 0x0c80
79#define REG_QCSP_BAR 0x0c84
80#define REG_CI_SCHED_CFG 0x0d00
81#define REG_SRCIDR 0x0d04
82#define REG_LIODNR 0x0d08
83#define REG_CI_RLM_AVG 0x0d14
84#define REG_ERR_ISR 0x0e00
85#define REG_ERR_IER 0x0e04
86#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
87#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
88#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
89
90/* Assists for QMAN_MCR */
91#define MCR_INIT_PFDR 0x01000000
92#define MCR_get_rslt(v) (u8)((v) >> 24)
93#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
94#define MCR_rslt_ok(r) ((r) == 0xf0)
95#define MCR_rslt_eaccess(r) ((r) == 0xf8)
96#define MCR_rslt_inval(r) ((r) == 0xff)
97
98/*
99 * Corenet initiator settings. Stash request queues are 4-deep to match cores
100 * ability to snarf. Stash priority is 3, other priorities are 2.
101 */
102#define QM_CI_SCHED_CFG_SRCCIV 4
103#define QM_CI_SCHED_CFG_SRQ_W 3
104#define QM_CI_SCHED_CFG_RW_W 2
105#define QM_CI_SCHED_CFG_BMAN_W 2
106/* write SRCCIV enable */
107#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
108
109/* Follows WQ_CS_CFG0-5 */
110enum qm_wq_class {
111 qm_wq_portal = 0,
112 qm_wq_pool = 1,
113 qm_wq_fman0 = 2,
114 qm_wq_fman1 = 3,
115 qm_wq_caam = 4,
116 qm_wq_pme = 5,
117 qm_wq_first = qm_wq_portal,
118 qm_wq_last = qm_wq_pme
119};
120
121/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
122enum qm_memory {
123 qm_memory_fqd,
124 qm_memory_pfdr
125};
126
127/* Used by all error interrupt registers except 'inhibit' */
128#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
129#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
130#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
131#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
132#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
133#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
134#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
135#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
136#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
137#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
138#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
139#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
140#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
141#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
142#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
143#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
144#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
145#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
146
147/* QMAN_ECIR valid error bit */
148#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
149 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
150 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
151#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
152 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
153 QM_EIRQ_IFSI)
154
155struct qm_ecir {
156 u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
157};
158
159static bool qm_ecir_is_dcp(const struct qm_ecir *p)
160{
161 return p->info & BIT(29);
162}
163
164static int qm_ecir_get_pnum(const struct qm_ecir *p)
165{
166 return (p->info >> 24) & 0x1f;
167}
168
169static int qm_ecir_get_fqid(const struct qm_ecir *p)
170{
171 return p->info & (BIT(24) - 1);
172}
173
174struct qm_ecir2 {
175 u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
176};
177
178static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
179{
180 return p->info & BIT(31);
181}
182
183static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
184{
185 return p->info & (BIT(10) - 1);
186}
187
188struct qm_eadr {
189 u32 info; /* memid[24-27], eadr[0-11] */
190 /* v3: memid[24-28], eadr[0-15] */
191};
192
193static int qm_eadr_get_memid(const struct qm_eadr *p)
194{
195 return (p->info >> 24) & 0xf;
196}
197
198static int qm_eadr_get_eadr(const struct qm_eadr *p)
199{
200 return p->info & (BIT(12) - 1);
201}
202
203static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
204{
205 return (p->info >> 24) & 0x1f;
206}
207
208static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
209{
210 return p->info & (BIT(16) - 1);
211}
212
213struct qman_hwerr_txt {
214 u32 mask;
215 const char *txt;
216};
217
218
219static const struct qman_hwerr_txt qman_hwerr_txts[] = {
220 { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
221 { QM_EIRQ_CTDE, "Corenet Target Data Error" },
222 { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
223 { QM_EIRQ_PLWI, "PFDR Low Watermark" },
224 { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
225 { QM_EIRQ_SBEI, "Single-bit ECC Error" },
226 { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
227 { QM_EIRQ_ICVI, "Invalid Command Verb" },
228 { QM_EIRQ_IFSI, "Invalid Flow Control State" },
229 { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
230 { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
231 { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
232 { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
233 { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
234 { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
235 { QM_EIRQ_IESI, "Invalid Enqueue State" },
236 { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
237 { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
238};
239
240struct qman_error_info_mdata {
241 u16 addr_mask;
242 u16 bits;
243 const char *txt;
244};
245
246static const struct qman_error_info_mdata error_mdata[] = {
247 { 0x01FF, 24, "FQD cache tag memory 0" },
248 { 0x01FF, 24, "FQD cache tag memory 1" },
249 { 0x01FF, 24, "FQD cache tag memory 2" },
250 { 0x01FF, 24, "FQD cache tag memory 3" },
251 { 0x0FFF, 512, "FQD cache memory" },
252 { 0x07FF, 128, "SFDR memory" },
253 { 0x01FF, 72, "WQ context memory" },
254 { 0x00FF, 240, "CGR memory" },
255 { 0x00FF, 302, "Internal Order Restoration List memory" },
256 { 0x01FF, 256, "SW portal ring memory" },
257};
258
259#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
260
261/*
262 * TODO: unimplemented registers
263 *
264 * Keeping a list here of QMan registers I have not yet covered;
265 * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
266 * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
267 * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
268 */
269
270/* Pointer to the start of the QMan's CCSR space */
271static u32 __iomem *qm_ccsr_start;
272/* A SDQCR mask comprising all the available/visible pool channels */
273static u32 qm_pools_sdqcr;
274
275static inline u32 qm_ccsr_in(u32 offset)
276{
277 return ioread32be(qm_ccsr_start + offset/4);
278}
279
280static inline void qm_ccsr_out(u32 offset, u32 val)
281{
282 iowrite32be(val, qm_ccsr_start + offset/4);
283}
284
285u32 qm_get_pools_sdqcr(void)
286{
287 return qm_pools_sdqcr;
288}
289
290enum qm_dc_portal {
291 qm_dc_portal_fman0 = 0,
292 qm_dc_portal_fman1 = 1
293};
294
295static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
296{
297 DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
298 portal == qm_dc_portal_fman1);
299 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
300 qm_ccsr_out(REG_DCP_CFG(portal),
301 (ed ? 0x1000 : 0) | (sernd & 0x3ff));
302 else
303 qm_ccsr_out(REG_DCP_CFG(portal),
304 (ed ? 0x100 : 0) | (sernd & 0x1f));
305}
306
307static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
308 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
309 u8 csw5, u8 csw6, u8 csw7)
310{
311 qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
312 ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
313 ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
314 ((csw6 & 0x7) << 4) | (csw7 & 0x7));
315}
316
317static void qm_set_hid(void)
318{
319 qm_ccsr_out(REG_HID_CFG, 0);
320}
321
322static void qm_set_corenet_initiator(void)
323{
324 qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
325 (QM_CI_SCHED_CFG_SRCCIV << 24) |
326 (QM_CI_SCHED_CFG_SRQ_W << 8) |
327 (QM_CI_SCHED_CFG_RW_W << 4) |
328 QM_CI_SCHED_CFG_BMAN_W);
329}
330
331static void qm_get_version(u16 *id, u8 *major, u8 *minor)
332{
333 u32 v = qm_ccsr_in(REG_IP_REV_1);
334 *id = (v >> 16);
335 *major = (v >> 8) & 0xff;
336 *minor = v & 0xff;
337}
338
339#define PFDR_AR_EN BIT(31)
340static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
341{
342 u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
343 u32 exp = ilog2(size);
344
345 /* choke if size isn't within range */
346 DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
347 is_power_of_2(size));
348 /* choke if 'ba' has lower-alignment than 'size' */
349 DPAA_ASSERT(!(ba & (size - 1)));
350 qm_ccsr_out(offset, upper_32_bits(ba));
351 qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
352 qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
353}
354
355static void qm_set_pfdr_threshold(u32 th, u8 k)
356{
357 qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
358 qm_ccsr_out(REG_PFDR_CFG, k);
359}
360
361static void qm_set_sfdr_threshold(u16 th)
362{
363 qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
364}
365
366static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
367{
368 u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
369
370 DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
371 /* Make sure the command interface is 'idle' */
372 if (!MCR_rslt_idle(rslt)) {
373 dev_crit(dev, "QMAN_MCR isn't idle");
374 WARN_ON(1);
375 }
376
377 /* Write the MCR command params then the verb */
378 qm_ccsr_out(REG_MCP(0), pfdr_start);
379 /*
380 * TODO: remove this - it's a workaround for a model bug that is
381 * corrected in more recent versions. We use the workaround until
382 * everyone has upgraded.
383 */
384 qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
385 dma_wmb();
386 qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
387 /* Poll for the result */
388 do {
389 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
390 } while (!MCR_rslt_idle(rslt));
391 if (MCR_rslt_ok(rslt))
392 return 0;
393 if (MCR_rslt_eaccess(rslt))
394 return -EACCES;
395 if (MCR_rslt_inval(rslt))
396 return -EINVAL;
397 dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
398 return -ENODEV;
399}
400
401/*
402 * Ideally we would use the DMA API to turn rmem->base into a DMA address
403 * (especially if iommu translations ever get involved). Unfortunately, the
404 * DMA API currently does not allow mapping anything that is not backed with
405 * a struct page.
406 */
407static dma_addr_t fqd_a, pfdr_a;
408static size_t fqd_sz, pfdr_sz;
409
410static int qman_fqd(struct reserved_mem *rmem)
411{
412 fqd_a = rmem->base;
413 fqd_sz = rmem->size;
414
415 WARN_ON(!(fqd_a && fqd_sz));
416
417 return 0;
418}
419RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
420
421static int qman_pfdr(struct reserved_mem *rmem)
422{
423 pfdr_a = rmem->base;
424 pfdr_sz = rmem->size;
425
426 WARN_ON(!(pfdr_a && pfdr_sz));
427
428 return 0;
429}
430RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
431
432static unsigned int qm_get_fqid_maxcnt(void)
433{
434 return fqd_sz / 64;
435}
436
437/*
438 * Flush this memory range from data cache so that QMAN originated
439 * transactions for this memory region could be marked non-coherent.
440 */
441static int zero_priv_mem(struct device *dev, struct device_node *node,
442 phys_addr_t addr, size_t sz)
443{
444 /* map as cacheable, non-guarded */
445 void __iomem *tmpp = ioremap_prot(addr, sz, 0);
446
447 memset_io(tmpp, 0, sz);
448 flush_dcache_range((unsigned long)tmpp,
449 (unsigned long)tmpp + sz);
450 iounmap(tmpp);
451
452 return 0;
453}
454
455static void log_edata_bits(struct device *dev, u32 bit_count)
456{
457 u32 i, j, mask = 0xffffffff;
458
459 dev_warn(dev, "ErrInt, EDATA:\n");
460 i = bit_count / 32;
461 if (bit_count % 32) {
462 i++;
463 mask = ~(mask << bit_count % 32);
464 }
465 j = 16 - i;
466 dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
467 j++;
468 for (; j < 16; j++)
469 dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
470}
471
472static void log_additional_error_info(struct device *dev, u32 isr_val,
473 u32 ecsr_val)
474{
475 struct qm_ecir ecir_val;
476 struct qm_eadr eadr_val;
477 int memid;
478
479 ecir_val.info = qm_ccsr_in(REG_ECIR);
480 /* Is portal info valid */
481 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
482 struct qm_ecir2 ecir2_val;
483
484 ecir2_val.info = qm_ccsr_in(REG_ECIR2);
485 if (ecsr_val & PORTAL_ECSR_ERR) {
486 dev_warn(dev, "ErrInt: %s id %d\n",
487 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
488 qm_ecir2_get_pnum(&ecir2_val));
489 }
490 if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
491 dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
492 qm_ecir_get_fqid(&ecir_val));
493
494 if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
495 eadr_val.info = qm_ccsr_in(REG_EADR);
496 memid = qm_eadr_v3_get_memid(&eadr_val);
497 dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
498 error_mdata[memid].txt,
499 error_mdata[memid].addr_mask
500 & qm_eadr_v3_get_eadr(&eadr_val));
501 log_edata_bits(dev, error_mdata[memid].bits);
502 }
503 } else {
504 if (ecsr_val & PORTAL_ECSR_ERR) {
505 dev_warn(dev, "ErrInt: %s id %d\n",
506 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
507 qm_ecir_get_pnum(&ecir_val));
508 }
509 if (ecsr_val & FQID_ECSR_ERR)
510 dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
511 qm_ecir_get_fqid(&ecir_val));
512
513 if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
514 eadr_val.info = qm_ccsr_in(REG_EADR);
515 memid = qm_eadr_get_memid(&eadr_val);
516 dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
517 error_mdata[memid].txt,
518 error_mdata[memid].addr_mask
519 & qm_eadr_get_eadr(&eadr_val));
520 log_edata_bits(dev, error_mdata[memid].bits);
521 }
522 }
523}
524
525static irqreturn_t qman_isr(int irq, void *ptr)
526{
527 u32 isr_val, ier_val, ecsr_val, isr_mask, i;
528 struct device *dev = ptr;
529
530 ier_val = qm_ccsr_in(REG_ERR_IER);
531 isr_val = qm_ccsr_in(REG_ERR_ISR);
532 ecsr_val = qm_ccsr_in(REG_ECSR);
533 isr_mask = isr_val & ier_val;
534
535 if (!isr_mask)
536 return IRQ_NONE;
537
538 for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
539 if (qman_hwerr_txts[i].mask & isr_mask) {
540 dev_err_ratelimited(dev, "ErrInt: %s\n",
541 qman_hwerr_txts[i].txt);
542 if (qman_hwerr_txts[i].mask & ecsr_val) {
543 log_additional_error_info(dev, isr_mask,
544 ecsr_val);
545 /* Re-arm error capture registers */
546 qm_ccsr_out(REG_ECSR, ecsr_val);
547 }
548 if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
549 dev_dbg(dev, "Disabling error 0x%x\n",
550 qman_hwerr_txts[i].mask);
551 ier_val &= ~qman_hwerr_txts[i].mask;
552 qm_ccsr_out(REG_ERR_IER, ier_val);
553 }
554 }
555 }
556 qm_ccsr_out(REG_ERR_ISR, isr_val);
557
558 return IRQ_HANDLED;
559}
560
561static int qman_init_ccsr(struct device *dev)
562{
563 int i, err;
564
565 /* FQD memory */
566 qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
567 /* PFDR memory */
568 qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
569 err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
570 if (err)
571 return err;
572 /* thresholds */
573 qm_set_pfdr_threshold(512, 64);
574 qm_set_sfdr_threshold(128);
575 /* clear stale PEBI bit from interrupt status register */
576 qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
577 /* corenet initiator settings */
578 qm_set_corenet_initiator();
579 /* HID settings */
580 qm_set_hid();
581 /* Set scheduling weights to defaults */
582 for (i = qm_wq_first; i <= qm_wq_last; i++)
583 qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
584 /* We are not prepared to accept ERNs for hardware enqueues */
585 qm_set_dc(qm_dc_portal_fman0, 1, 0);
586 qm_set_dc(qm_dc_portal_fman1, 1, 0);
587 return 0;
588}
589
590#define LIO_CFG_LIODN_MASK 0x0fff0000
591void qman_liodn_fixup(u16 channel)
592{
593 static int done;
594 static u32 liodn_offset;
595 u32 before, after;
596 int idx = channel - QM_CHANNEL_SWPORTAL0;
597
598 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
599 before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
600 else
601 before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
602 if (!done) {
603 liodn_offset = before & LIO_CFG_LIODN_MASK;
604 done = 1;
605 return;
606 }
607 after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
608 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
609 qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
610 else
611 qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
612}
613
614#define IO_CFG_SDEST_MASK 0x00ff0000
615void qman_set_sdest(u16 channel, unsigned int cpu_idx)
616{
617 int idx = channel - QM_CHANNEL_SWPORTAL0;
618 u32 before, after;
619
620 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
621 before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
622 /* Each pair of vcpu share the same SRQ(SDEST) */
623 cpu_idx /= 2;
624 after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
625 qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
626 } else {
627 before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
628 after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
629 qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
630 }
631}
632
633static int qman_resource_init(struct device *dev)
634{
635 int pool_chan_num, cgrid_num;
636 int ret, i;
637
638 switch (qman_ip_rev >> 8) {
639 case 1:
640 pool_chan_num = 15;
641 cgrid_num = 256;
642 break;
643 case 2:
644 pool_chan_num = 3;
645 cgrid_num = 64;
646 break;
647 case 3:
648 pool_chan_num = 15;
649 cgrid_num = 256;
650 break;
651 default:
652 return -ENODEV;
653 }
654
655 ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
656 pool_chan_num, -1);
657 if (ret) {
658 dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
659 return ret;
660 }
661
662 ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
663 if (ret) {
664 dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
665 return ret;
666 }
667
668 /* parse pool channels into the SDQCR mask */
669 for (i = 0; i < cgrid_num; i++)
670 qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
671
672 ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
673 qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
674 if (ret) {
675 dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
676 return ret;
677 }
678
679 return 0;
680}
681
682static int fsl_qman_probe(struct platform_device *pdev)
683{
684 struct device *dev = &pdev->dev;
685 struct device_node *node = dev->of_node;
686 struct resource *res;
687 int ret, err_irq;
688 u16 id;
689 u8 major, minor;
690
691 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
692 if (!res) {
693 dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
694 node->full_name);
695 return -ENXIO;
696 }
697 qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
698 if (!qm_ccsr_start)
699 return -ENXIO;
700
701 qm_get_version(&id, &major, &minor);
702 if (major == 1 && minor == 0) {
703 dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
704 return -ENODEV;
705 } else if (major == 1 && minor == 1)
706 qman_ip_rev = QMAN_REV11;
707 else if (major == 1 && minor == 2)
708 qman_ip_rev = QMAN_REV12;
709 else if (major == 2 && minor == 0)
710 qman_ip_rev = QMAN_REV20;
711 else if (major == 3 && minor == 0)
712 qman_ip_rev = QMAN_REV30;
713 else if (major == 3 && minor == 1)
714 qman_ip_rev = QMAN_REV31;
715 else {
716 dev_err(dev, "Unknown QMan version\n");
717 return -ENODEV;
718 }
719
720 if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
721 qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
722
723 ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
724 WARN_ON(ret);
725 if (ret)
726 return -ENODEV;
727
728 ret = qman_init_ccsr(dev);
729 if (ret) {
730 dev_err(dev, "CCSR setup failed\n");
731 return ret;
732 }
733
734 err_irq = platform_get_irq(pdev, 0);
735 if (err_irq <= 0) {
736 dev_info(dev, "Can't get %s property 'interrupts'\n",
737 node->full_name);
738 return -ENODEV;
739 }
740 ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
741 dev);
742 if (ret) {
743 dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
744 ret, node->full_name);
745 return ret;
746 }
747
748 /*
749 * Write-to-clear any stale bits, (eg. starvation being asserted prior
750 * to resource allocation during driver init).
751 */
752 qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
753 /* Enable Error Interrupts */
754 qm_ccsr_out(REG_ERR_IER, 0xffffffff);
755
756 qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
757 if (IS_ERR(qm_fqalloc)) {
758 ret = PTR_ERR(qm_fqalloc);
759 dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
760 return ret;
761 }
762
763 qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
764 if (IS_ERR(qm_qpalloc)) {
765 ret = PTR_ERR(qm_qpalloc);
766 dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
767 return ret;
768 }
769
770 qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
771 if (IS_ERR(qm_cgralloc)) {
772 ret = PTR_ERR(qm_cgralloc);
773 dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
774 return ret;
775 }
776
777 ret = qman_resource_init(dev);
778 if (ret)
779 return ret;
780
781 ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
782 if (ret)
783 return ret;
784
785 ret = qman_wq_alloc();
786 if (ret)
787 return ret;
788
789 return 0;
790}
791
792static const struct of_device_id fsl_qman_ids[] = {
793 {
794 .compatible = "fsl,qman",
795 },
796 {}
797};
798
799static struct platform_driver fsl_qman_driver = {
800 .driver = {
801 .name = KBUILD_MODNAME,
802 .of_match_table = fsl_qman_ids,
803 .suppress_bind_attrs = true,
804 },
805 .probe = fsl_qman_probe,
806};
807
808builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 000000000000..148614388fca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,355 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33/* Enable portal interupts (as opposed to polling mode) */
34#define CONFIG_FSL_DPA_PIRQ_SLOW 1
35#define CONFIG_FSL_DPA_PIRQ_FAST 1
36
37static struct cpumask portal_cpus;
38/* protect qman global registers and global data shared among portals */
39static DEFINE_SPINLOCK(qman_lock);
40
41static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
42{
43#ifdef CONFIG_FSL_PAMU
44 struct device *dev = pcfg->dev;
45 int window_count = 1;
46 struct iommu_domain_geometry geom_attr;
47 struct pamu_stash_attribute stash_attr;
48 int ret;
49
50 pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
51 if (!pcfg->iommu_domain) {
52 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
53 goto no_iommu;
54 }
55 geom_attr.aperture_start = 0;
56 geom_attr.aperture_end =
57 ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
58 geom_attr.force_aperture = true;
59 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
60 &geom_attr);
61 if (ret < 0) {
62 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
63 ret);
64 goto out_domain_free;
65 }
66 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
67 &window_count);
68 if (ret < 0) {
69 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
70 ret);
71 goto out_domain_free;
72 }
73 stash_attr.cpu = cpu;
74 stash_attr.cache = PAMU_ATTR_CACHE_L1;
75 ret = iommu_domain_set_attr(pcfg->iommu_domain,
76 DOMAIN_ATTR_FSL_PAMU_STASH,
77 &stash_attr);
78 if (ret < 0) {
79 dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
80 __func__, ret);
81 goto out_domain_free;
82 }
83 ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
84 IOMMU_READ | IOMMU_WRITE);
85 if (ret < 0) {
86 dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
87 __func__, ret);
88 goto out_domain_free;
89 }
90 ret = iommu_attach_device(pcfg->iommu_domain, dev);
91 if (ret < 0) {
92 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
93 ret);
94 goto out_domain_free;
95 }
96 ret = iommu_domain_set_attr(pcfg->iommu_domain,
97 DOMAIN_ATTR_FSL_PAMU_ENABLE,
98 &window_count);
99 if (ret < 0) {
100 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
101 ret);
102 goto out_detach_device;
103 }
104
105no_iommu:
106#endif
107 qman_set_sdest(pcfg->channel, cpu);
108
109 return;
110
111#ifdef CONFIG_FSL_PAMU
112out_detach_device:
113 iommu_detach_device(pcfg->iommu_domain, NULL);
114out_domain_free:
115 iommu_domain_free(pcfg->iommu_domain);
116 pcfg->iommu_domain = NULL;
117#endif
118}
119
120static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
121{
122 struct qman_portal *p;
123 u32 irq_sources = 0;
124
125 /* We need the same LIODN offset for all portals */
126 qman_liodn_fixup(pcfg->channel);
127
128 pcfg->iommu_domain = NULL;
129 portal_set_cpu(pcfg, pcfg->cpu);
130
131 p = qman_create_affine_portal(pcfg, NULL);
132 if (!p) {
133 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
134 __func__, pcfg->cpu);
135 return NULL;
136 }
137
138 /* Determine what should be interrupt-vs-poll driven */
139#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
140 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
141 QM_PIRQ_CSCI;
142#endif
143#ifdef CONFIG_FSL_DPA_PIRQ_FAST
144 irq_sources |= QM_PIRQ_DQRI;
145#endif
146 qman_p_irqsource_add(p, irq_sources);
147
148 spin_lock(&qman_lock);
149 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
150 /* all assigned portals are initialized now */
151 qman_init_cgr_all();
152 }
153 spin_unlock(&qman_lock);
154
155 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
156
157 return p;
158}
159
160static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
161 unsigned int cpu)
162{
163#ifdef CONFIG_FSL_PAMU /* TODO */
164 struct pamu_stash_attribute stash_attr;
165 int ret;
166
167 if (pcfg->iommu_domain) {
168 stash_attr.cpu = cpu;
169 stash_attr.cache = PAMU_ATTR_CACHE_L1;
170 ret = iommu_domain_set_attr(pcfg->iommu_domain,
171 DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
172 if (ret < 0) {
173 dev_err(pcfg->dev,
174 "Failed to update pamu stash setting\n");
175 return;
176 }
177 }
178#endif
179 qman_set_sdest(pcfg->channel, cpu);
180}
181
182static void qman_offline_cpu(unsigned int cpu)
183{
184 struct qman_portal *p;
185 const struct qm_portal_config *pcfg;
186
187 p = affine_portals[cpu];
188 if (p) {
189 pcfg = qman_get_qm_portal_config(p);
190 if (pcfg) {
191 irq_set_affinity(pcfg->irq, cpumask_of(0));
192 qman_portal_update_sdest(pcfg, 0);
193 }
194 }
195}
196
197static void qman_online_cpu(unsigned int cpu)
198{
199 struct qman_portal *p;
200 const struct qm_portal_config *pcfg;
201
202 p = affine_portals[cpu];
203 if (p) {
204 pcfg = qman_get_qm_portal_config(p);
205 if (pcfg) {
206 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
207 qman_portal_update_sdest(pcfg, cpu);
208 }
209 }
210}
211
212static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
213 unsigned long action, void *hcpu)
214{
215 unsigned int cpu = (unsigned long)hcpu;
216
217 switch (action) {
218 case CPU_ONLINE:
219 case CPU_ONLINE_FROZEN:
220 qman_online_cpu(cpu);
221 break;
222 case CPU_DOWN_PREPARE:
223 case CPU_DOWN_PREPARE_FROZEN:
224 qman_offline_cpu(cpu);
225 default:
226 break;
227 }
228 return NOTIFY_OK;
229}
230
231static struct notifier_block qman_hotplug_cpu_notifier = {
232 .notifier_call = qman_hotplug_cpu_callback,
233};
234
235static int qman_portal_probe(struct platform_device *pdev)
236{
237 struct device *dev = &pdev->dev;
238 struct device_node *node = dev->of_node;
239 struct qm_portal_config *pcfg;
240 struct resource *addr_phys[2];
241 const u32 *channel;
242 void __iomem *va;
243 int irq, len, cpu;
244
245 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
246 if (!pcfg)
247 return -ENOMEM;
248
249 pcfg->dev = dev;
250
251 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
252 DPAA_PORTAL_CE);
253 if (!addr_phys[0]) {
254 dev_err(dev, "Can't get %s property 'reg::CE'\n",
255 node->full_name);
256 return -ENXIO;
257 }
258
259 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
260 DPAA_PORTAL_CI);
261 if (!addr_phys[1]) {
262 dev_err(dev, "Can't get %s property 'reg::CI'\n",
263 node->full_name);
264 return -ENXIO;
265 }
266
267 channel = of_get_property(node, "cell-index", &len);
268 if (!channel || (len != 4)) {
269 dev_err(dev, "Can't get %s property 'cell-index'\n",
270 node->full_name);
271 return -ENXIO;
272 }
273 pcfg->channel = *channel;
274 pcfg->cpu = -1;
275 irq = platform_get_irq(pdev, 0);
276 if (irq <= 0) {
277 dev_err(dev, "Can't get %s IRQ\n", node->full_name);
278 return -ENXIO;
279 }
280 pcfg->irq = irq;
281
282 va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
283 if (!va)
284 goto err_ioremap1;
285
286 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
287
288 va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
289 _PAGE_GUARDED | _PAGE_NO_CACHE);
290 if (!va)
291 goto err_ioremap2;
292
293 pcfg->addr_virt[DPAA_PORTAL_CI] = va;
294
295 pcfg->pools = qm_get_pools_sdqcr();
296
297 spin_lock(&qman_lock);
298 cpu = cpumask_next_zero(-1, &portal_cpus);
299 if (cpu >= nr_cpu_ids) {
300 /* unassigned portal, skip init */
301 spin_unlock(&qman_lock);
302 return 0;
303 }
304
305 cpumask_set_cpu(cpu, &portal_cpus);
306 spin_unlock(&qman_lock);
307 pcfg->cpu = cpu;
308
309 if (!init_pcfg(pcfg))
310 goto err_ioremap2;
311
312 /* clear irq affinity if assigned cpu is offline */
313 if (!cpu_online(cpu))
314 qman_offline_cpu(cpu);
315
316 return 0;
317
318err_ioremap2:
319 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
320err_ioremap1:
321 dev_err(dev, "ioremap failed\n");
322 return -ENXIO;
323}
324
325static const struct of_device_id qman_portal_ids[] = {
326 {
327 .compatible = "fsl,qman-portal",
328 },
329 {}
330};
331MODULE_DEVICE_TABLE(of, qman_portal_ids);
332
333static struct platform_driver qman_portal_driver = {
334 .driver = {
335 .name = KBUILD_MODNAME,
336 .of_match_table = qman_portal_ids,
337 },
338 .probe = qman_portal_probe,
339};
340
341static int __init qman_portal_driver_register(struct platform_driver *drv)
342{
343 int ret;
344
345 ret = platform_driver_register(drv);
346 if (ret < 0)
347 return ret;
348
349 register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
350
351 return 0;
352}
353
354module_driver(qman_portal_driver,
355 qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 000000000000..5cf821e623a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,371 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include "dpaa_sys.h"
34
35#include <soc/fsl/qman.h>
36#include <linux/iommu.h>
37
38#if defined(CONFIG_FSL_PAMU)
39#include <asm/fsl_pamu_stash.h>
40#endif
41
42struct qm_mcr_querywq {
43 u8 verb;
44 u8 result;
45 u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
46 u8 __reserved[28];
47 u32 wq_len[8];
48} __packed;
49
50static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
51{
52 return wq->channel_wq >> 3;
53}
54
55struct __qm_mcr_querycongestion {
56 u32 state[8];
57};
58
59/* "Query Congestion Group State" */
60struct qm_mcr_querycongestion {
61 u8 verb;
62 u8 result;
63 u8 __reserved[30];
64 /* Access this struct using qman_cgrs_get() */
65 struct __qm_mcr_querycongestion state;
66} __packed;
67
68/* "Query CGR" */
69struct qm_mcr_querycgr {
70 u8 verb;
71 u8 result;
72 u16 __reserved1;
73 struct __qm_mc_cgr cgr; /* CGR fields */
74 u8 __reserved2[6];
75 u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
76 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
77 u8 __reserved3[3];
78 u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
79 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
80 u32 cscn_targ_swp[4];
81} __packed;
82
83static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
84{
85 return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
86}
87static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
88{
89 return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
90}
91
92/* "Query FQ Non-Programmable Fields" */
93struct qm_mcc_queryfq_np {
94 u8 _ncw_verb;
95 u8 __reserved1[3];
96 u32 fqid; /* 24-bit */
97 u8 __reserved2[56];
98} __packed;
99
100struct qm_mcr_queryfq_np {
101 u8 verb;
102 u8 result;
103 u8 __reserved1;
104 u8 state; /* QM_MCR_NP_STATE_*** */
105 u32 fqd_link; /* 24-bit, _res2[24-31] */
106 u16 odp_seq; /* 14-bit, _res3[14-15] */
107 u16 orp_nesn; /* 14-bit, _res4[14-15] */
108 u16 orp_ea_hseq; /* 15-bit, _res5[15] */
109 u16 orp_ea_tseq; /* 15-bit, _res6[15] */
110 u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
111 u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
112 u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
113 u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
114 u8 __reserved2[5];
115 u8 is; /* 1-bit, _res12[1-7] */
116 u16 ics_surp;
117 u32 byte_cnt;
118 u32 frm_cnt; /* 24-bit, _res13[24-31] */
119 u32 __reserved3;
120 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
121 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
122 u16 __reserved4;
123 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
124 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
125 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
126} __packed;
127
128#define QM_MCR_NP_STATE_FE 0x10
129#define QM_MCR_NP_STATE_R 0x08
130#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
131#define QM_MCR_NP_STATE_OOS 0x00
132#define QM_MCR_NP_STATE_RETIRED 0x01
133#define QM_MCR_NP_STATE_TEN_SCHED 0x02
134#define QM_MCR_NP_STATE_TRU_SCHED 0x03
135#define QM_MCR_NP_STATE_PARKED 0x04
136#define QM_MCR_NP_STATE_ACTIVE 0x05
137#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
138#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
139#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
140#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
141#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
142
143enum qm_mcr_queryfq_np_masks {
144 qm_mcr_fqd_link_mask = BIT(24)-1,
145 qm_mcr_odp_seq_mask = BIT(14)-1,
146 qm_mcr_orp_nesn_mask = BIT(14)-1,
147 qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
148 qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
149 qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
150 qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
151 qm_mcr_pfdr_hptr_mask = BIT(24)-1,
152 qm_mcr_pfdr_tptr_mask = BIT(24)-1,
153 qm_mcr_is_mask = BIT(1)-1,
154 qm_mcr_frm_cnt_mask = BIT(24)-1,
155};
156#define qm_mcr_np_get(np, field) \
157 ((np)->field & (qm_mcr_##field##_mask))
158
159/* Congestion Groups */
160
161/*
162 * This wrapper represents a bit-array for the state of the 256 QMan congestion
163 * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
164 * those that don't concern us. We harness the structure and accessor details
165 * already used in the management command to query congestion groups.
166 */
167#define CGR_BITS_PER_WORD 5
168#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
169#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
170#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
171
172struct qman_cgrs {
173 struct __qm_mcr_querycongestion q;
174};
175
176static inline void qman_cgrs_init(struct qman_cgrs *c)
177{
178 memset(c, 0, sizeof(*c));
179}
180
181static inline void qman_cgrs_fill(struct qman_cgrs *c)
182{
183 memset(c, 0xff, sizeof(*c));
184}
185
186static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
187{
188 return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
189}
190
191static inline void qman_cgrs_cp(struct qman_cgrs *dest,
192 const struct qman_cgrs *src)
193{
194 *dest = *src;
195}
196
197static inline void qman_cgrs_and(struct qman_cgrs *dest,
198 const struct qman_cgrs *a, const struct qman_cgrs *b)
199{
200 int ret;
201 u32 *_d = dest->q.state;
202 const u32 *_a = a->q.state;
203 const u32 *_b = b->q.state;
204
205 for (ret = 0; ret < 8; ret++)
206 *_d++ = *_a++ & *_b++;
207}
208
209static inline void qman_cgrs_xor(struct qman_cgrs *dest,
210 const struct qman_cgrs *a, const struct qman_cgrs *b)
211{
212 int ret;
213 u32 *_d = dest->q.state;
214 const u32 *_a = a->q.state;
215 const u32 *_b = b->q.state;
216
217 for (ret = 0; ret < 8; ret++)
218 *_d++ = *_a++ ^ *_b++;
219}
220
221void qman_init_cgr_all(void);
222
223struct qm_portal_config {
224 /*
225 * Corenet portal addresses;
226 * [0]==cache-enabled, [1]==cache-inhibited.
227 */
228 void __iomem *addr_virt[2];
229 struct device *dev;
230 struct iommu_domain *iommu_domain;
231 /* Allow these to be joined in lists */
232 struct list_head list;
233 /* User-visible portal configuration settings */
234 /* portal is affined to this cpu */
235 int cpu;
236 /* portal interrupt line */
237 int irq;
238 /*
239 * the portal's dedicated channel id, used initialising
240 * frame queues to target this portal when scheduled
241 */
242 u16 channel;
243 /*
244 * mask of pool channels this portal has dequeue access to
245 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
246 */
247 u32 pools;
248};
249
250/* Revision info (for errata and feature handling) */
251#define QMAN_REV11 0x0101
252#define QMAN_REV12 0x0102
253#define QMAN_REV20 0x0200
254#define QMAN_REV30 0x0300
255#define QMAN_REV31 0x0301
256extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
257
258#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
259extern struct gen_pool *qm_fqalloc; /* FQID allocator */
260extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
261extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
262u32 qm_get_pools_sdqcr(void);
263
264int qman_wq_alloc(void);
265void qman_liodn_fixup(u16 channel);
266void qman_set_sdest(u16 channel, unsigned int cpu_idx);
267
268struct qman_portal *qman_create_affine_portal(
269 const struct qm_portal_config *config,
270 const struct qman_cgrs *cgrs);
271const struct qm_portal_config *qman_destroy_affine_portal(void);
272
273/*
274 * qman_query_fq - Queries FQD fields (via h/w query command)
275 * @fq: the frame queue object to be queried
276 * @fqd: storage for the queried FQD fields
277 */
278int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
279
280/*
281 * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
282 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
283 * FQID(n) to fill in the frame queue ID.
284 */
285#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
286#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
287#define QM_VDQCR_EXACT 0x40000000
288#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
289#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
290#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
291#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
292
293#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
294#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
295#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
296
297/*
298 * qman_volatile_dequeue - Issue a volatile dequeue command
299 * @fq: the frame queue object to dequeue from
300 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
301 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
302 *
303 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
304 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
305 * the VDQCR is already in use, otherwise returns non-zero for failure. If
306 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
307 * the VDQCR command has finished executing (ie. once the callback for the last
308 * DQRR entry resulting from the VDQCR command has been called). If not using
309 * the FINISH flag, completion can be determined either by detecting the
310 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
311 * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
312 * for the QMAN_FQ_STATE_VDQCR bit to disappear.
313 */
314int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
315
316int qman_alloc_fq_table(u32 num_fqids);
317
318/* QMan s/w corenet portal, low-level i/face */
319
320/*
321 * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
322 * dequeue TYPE. Choose TOKEN (8-bit).
323 * If SOURCE == CHANNELS,
324 * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
325 * You can choose DEDICATED_PRECEDENCE if the portal channel should have
326 * priority.
327 * If SOURCE == SPECIFICWQ,
328 * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
329 * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
330 * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
331 * same value.
332 */
333#define QM_SDQCR_SOURCE_CHANNELS 0x0
334#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
335#define QM_SDQCR_COUNT_EXACT1 0x0
336#define QM_SDQCR_COUNT_UPTO3 0x20000000
337#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
338#define QM_SDQCR_TYPE_MASK 0x03000000
339#define QM_SDQCR_TYPE_NULL 0x0
340#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
341#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
342#define QM_SDQCR_TYPE_ACTIVE 0x03000000
343#define QM_SDQCR_TOKEN_MASK 0x00ff0000
344#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
345#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
346#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
347#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
348#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
349#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
350#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
351
352/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
353#define QM_VDQCR_FQID_MASK 0x00ffffff
354#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
355
356/*
357 * Used by all portal interrupt registers except 'inhibit'
358 * Channels with frame availability
359 */
360#define QM_PIRQ_DQAVAIL 0x0000ffff
361
362/* The DQAVAIL interrupt fields break down into these bits; */
363#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
364#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
365#define QM_DQAVAIL_MASK 0xffff
366/* This mask contains all the "irqsource" bits visible to API users */
367#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
368
369extern struct qman_portal *affine_portals[NR_CPUS];
370const struct qm_portal_config *qman_get_qm_portal_config(
371 struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 000000000000..18f7f0202fa7
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_test.h"
32
33MODULE_AUTHOR("Geoff Thorpe");
34MODULE_LICENSE("Dual BSD/GPL");
35MODULE_DESCRIPTION("QMan testing");
36
37static int test_init(void)
38{
39 int loop = 1;
40 int err = 0;
41
42 while (loop--) {
43#ifdef CONFIG_FSL_QMAN_TEST_STASH
44 err = qman_test_stash();
45 if (err)
46 break;
47#endif
48#ifdef CONFIG_FSL_QMAN_TEST_API
49 err = qman_test_api();
50 if (err)
51 break;
52#endif
53 }
54 return err;
55}
56
57static void test_exit(void)
58{
59}
60
61module_init(test_init);
62module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 000000000000..d5f8cb2260dc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,36 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35int qman_test_stash(void);
36int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 000000000000..6880ff17f45e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,252 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_test.h"
32
33#define CGR_ID 27
34#define POOL_ID 2
35#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
36#define NUM_ENQUEUES 10
37#define NUM_PARTIAL 4
38#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
39 QM_SDQCR_TYPE_PRIO_QOS | \
40 QM_SDQCR_TOKEN_SET(0x98) | \
41 QM_SDQCR_CHANNELS_DEDICATED | \
42 QM_SDQCR_CHANNELS_POOL(POOL_ID))
43#define PORTAL_OPAQUE ((void *)0xf00dbeef)
44#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
45
46static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
47 struct qman_fq *,
48 const struct qm_dqrr_entry *);
49static void cb_ern(struct qman_portal *, struct qman_fq *,
50 const union qm_mr_entry *);
51static void cb_fqs(struct qman_portal *, struct qman_fq *,
52 const union qm_mr_entry *);
53
54static struct qm_fd fd, fd_dq;
55static struct qman_fq fq_base = {
56 .cb.dqrr = cb_dqrr,
57 .cb.ern = cb_ern,
58 .cb.fqs = cb_fqs
59};
60static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
61static int retire_complete, sdqcr_complete;
62
63/* Helpers for initialising and "incrementing" a frame descriptor */
64static void fd_init(struct qm_fd *fd)
65{
66 qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
67 qm_fd_set_contig_big(fd, 0x0000ffff);
68 fd->cmd = 0xfeedf00d;
69}
70
71static void fd_inc(struct qm_fd *fd)
72{
73 u64 t = qm_fd_addr_get64(fd);
74 int z = t >> 40;
75 unsigned int len, off;
76 enum qm_fd_format fmt;
77
78 t <<= 1;
79 if (z)
80 t |= 1;
81 qm_fd_addr_set64(fd, t);
82
83 fmt = qm_fd_get_format(fd);
84 off = qm_fd_get_offset(fd);
85 len = qm_fd_get_length(fd);
86 len--;
87 qm_fd_set_param(fd, fmt, off, len);
88
89 fd->cmd++;
90}
91
92/* The only part of the 'fd' we can't memcmp() is the ppid */
93static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
94{
95 int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
96
97 if (!r) {
98 enum qm_fd_format fmt_a, fmt_b;
99
100 fmt_a = qm_fd_get_format(a);
101 fmt_b = qm_fd_get_format(b);
102 r = fmt_a - fmt_b;
103 }
104 if (!r)
105 r = a->cfg - b->cfg;
106 if (!r)
107 r = a->cmd - b->cmd;
108 return r;
109}
110
111/* test */
112static int do_enqueues(struct qman_fq *fq)
113{
114 unsigned int loop;
115 int err = 0;
116
117 for (loop = 0; loop < NUM_ENQUEUES; loop++) {
118 if (qman_enqueue(fq, &fd)) {
119 pr_crit("qman_enqueue() failed\n");
120 err = -EIO;
121 }
122 fd_inc(&fd);
123 }
124
125 return err;
126}
127
128int qman_test_api(void)
129{
130 unsigned int flags, frmcnt;
131 int err;
132 struct qman_fq *fq = &fq_base;
133
134 pr_info("%s(): Starting\n", __func__);
135 fd_init(&fd);
136 fd_init(&fd_dq);
137
138 /* Initialise (parked) FQ */
139 err = qman_create_fq(0, FQ_FLAGS, fq);
140 if (err) {
141 pr_crit("qman_create_fq() failed\n");
142 goto failed;
143 }
144 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
145 if (err) {
146 pr_crit("qman_init_fq() failed\n");
147 goto failed;
148 }
149 /* Do enqueues + VDQCR, twice. (Parked FQ) */
150 err = do_enqueues(fq);
151 if (err)
152 goto failed;
153 pr_info("VDQCR (till-empty);\n");
154 frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
155 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
156 if (err) {
157 pr_crit("qman_volatile_dequeue() failed\n");
158 goto failed;
159 }
160 err = do_enqueues(fq);
161 if (err)
162 goto failed;
163 pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
164 frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
165 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
166 if (err) {
167 pr_crit("qman_volatile_dequeue() failed\n");
168 goto failed;
169 }
170 pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
171 NUM_ENQUEUES);
172 frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
173 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
174 if (err) {
175 pr_err("qman_volatile_dequeue() failed\n");
176 goto failed;
177 }
178
179 err = do_enqueues(fq);
180 if (err)
181 goto failed;
182 pr_info("scheduled dequeue (till-empty)\n");
183 err = qman_schedule_fq(fq);
184 if (err) {
185 pr_crit("qman_schedule_fq() failed\n");
186 goto failed;
187 }
188 wait_event(waitqueue, sdqcr_complete);
189
190 /* Retire and OOS the FQ */
191 err = qman_retire_fq(fq, &flags);
192 if (err < 0) {
193 pr_crit("qman_retire_fq() failed\n");
194 goto failed;
195 }
196 wait_event(waitqueue, retire_complete);
197 if (flags & QMAN_FQ_STATE_BLOCKOOS) {
198 err = -EIO;
199 pr_crit("leaking frames\n");
200 goto failed;
201 }
202 err = qman_oos_fq(fq);
203 if (err) {
204 pr_crit("qman_oos_fq() failed\n");
205 goto failed;
206 }
207 qman_destroy_fq(fq);
208 pr_info("%s(): Finished\n", __func__);
209 return 0;
210
211failed:
212 WARN_ON(1);
213 return err;
214}
215
216static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
217 struct qman_fq *fq,
218 const struct qm_dqrr_entry *dq)
219{
220 if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
221 pr_err("BADNESS: dequeued frame doesn't match;\n");
222 return qman_cb_dqrr_consume;
223 }
224 fd_inc(&fd_dq);
225 if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
226 sdqcr_complete = 1;
227 wake_up(&waitqueue);
228 }
229 return qman_cb_dqrr_consume;
230}
231
232static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
233 const union qm_mr_entry *msg)
234{
235 pr_crit("cb_ern() unimplemented");
236 WARN_ON(1);
237}
238
239static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
240 const union qm_mr_entry *msg)
241{
242 u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
243
244 if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
245 pr_crit("unexpected FQS message");
246 WARN_ON(1);
247 return;
248 }
249 pr_info("Retirement message received\n");
250 retire_complete = 1;
251 wake_up(&waitqueue);
252}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 000000000000..43cf66ba42f5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,617 @@
1/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_test.h"
32
33#include <linux/dma-mapping.h>
34#include <linux/delay.h>
35
36/*
37 * Algorithm:
38 *
39 * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
40 * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
41 * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
42 * shuttle a "hot potato" frame around them such that every forwarding action
43 * moves it from one cpu to another. (The use of more than one handler per cpu
44 * is to allow enough handlers/FQs to truly test the significance of caching -
45 * ie. when cache-expiries are occurring.)
46 *
47 * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
48 * first and last words of the frame data will undergo a transformation step on
49 * each forwarding action. To achieve this, each handler will be assigned a
50 * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
51 * received by a handler, the mixer of the expected sender is XOR'd into all
52 * words of the entire frame, which is then validated against the original
53 * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
54 * the current handler. Apart from validating that the frame is taking the
55 * expected path, this also provides some quasi-realistic overheads to each
56 * forwarding action - dereferencing *all* the frame data, computation, and
57 * conditional branching. There is a "special" handler designated to act as the
58 * instigator of the test by creating an enqueuing the "hot potato" frame, and
59 * to determine when the test has completed by counting HP_LOOPS iterations.
60 *
61 * Init phases:
62 *
63 * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
64 * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
65 * handlers and link-list them (but do no other handler setup).
66 *
67 * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
68 * hp_cpu's 'iterator' to point to its first handler. With each loop,
69 * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
70 * and advance the iterator for the next loop. This includes a final fixup,
71 * which connects the last handler to the first (and which is why phase 2
72 * and 3 are separate).
73 *
74 * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
75 * hp_cpu's 'iterator' to point to its first handler. With each loop,
76 * initialise FQ objects and advance the iterator for the next loop.
77 * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
78 * initialisation targets the correct cpu.
79 */
80
81/*
82 * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
83 * the fn from irq context, which is too restrictive).
84 */
85struct bstrap {
86 int (*fn)(void);
87 atomic_t started;
88};
89static int bstrap_fn(void *bs)
90{
91 struct bstrap *bstrap = bs;
92 int err;
93
94 atomic_inc(&bstrap->started);
95 err = bstrap->fn();
96 if (err)
97 return err;
98 while (!kthread_should_stop())
99 msleep(20);
100 return 0;
101}
102static int on_all_cpus(int (*fn)(void))
103{
104 int cpu;
105
106 for_each_cpu(cpu, cpu_online_mask) {
107 struct bstrap bstrap = {
108 .fn = fn,
109 .started = ATOMIC_INIT(0)
110 };
111 struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
112 "hotpotato%d", cpu);
113 int ret;
114
115 if (IS_ERR(k))
116 return -ENOMEM;
117 kthread_bind(k, cpu);
118 wake_up_process(k);
119 /*
120 * If we call kthread_stop() before the "wake up" has had an
121 * effect, then the thread may exit with -EINTR without ever
122 * running the function. So poll until it's started before
123 * requesting it to stop.
124 */
125 while (!atomic_read(&bstrap.started))
126 msleep(20);
127 ret = kthread_stop(k);
128 if (ret)
129 return ret;
130 }
131 return 0;
132}
133
134struct hp_handler {
135
136 /* The following data is stashed when 'rx' is dequeued; */
137 /* -------------- */
138 /* The Rx FQ, dequeues of which will stash the entire hp_handler */
139 struct qman_fq rx;
140 /* The Tx FQ we should forward to */
141 struct qman_fq tx;
142 /* The value we XOR post-dequeue, prior to validating */
143 u32 rx_mixer;
144 /* The value we XOR pre-enqueue, after validating */
145 u32 tx_mixer;
146 /* what the hotpotato address should be on dequeue */
147 dma_addr_t addr;
148 u32 *frame_ptr;
149
150 /* The following data isn't (necessarily) stashed on dequeue; */
151 /* -------------- */
152 u32 fqid_rx, fqid_tx;
153 /* list node for linking us into 'hp_cpu' */
154 struct list_head node;
155 /* Just to check ... */
156 unsigned int processor_id;
157} ____cacheline_aligned;
158
159struct hp_cpu {
160 /* identify the cpu we run on; */
161 unsigned int processor_id;
162 /* root node for the per-cpu list of handlers */
163 struct list_head handlers;
164 /* list node for linking us into 'hp_cpu_list' */
165 struct list_head node;
166 /*
167 * when repeatedly scanning 'hp_list', each time linking the n'th
168 * handlers together, this is used as per-cpu iterator state
169 */
170 struct hp_handler *iterator;
171};
172
173/* Each cpu has one of these */
174static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
175
176/* links together the hp_cpu structs, in first-come first-serve order. */
177static LIST_HEAD(hp_cpu_list);
178static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
179
180static unsigned int hp_cpu_list_length;
181
182/* the "special" handler, that starts and terminates the test. */
183static struct hp_handler *special_handler;
184static int loop_counter;
185
186/* handlers are allocated out of this, so they're properly aligned. */
187static struct kmem_cache *hp_handler_slab;
188
189/* this is the frame data */
190static void *__frame_ptr;
191static u32 *frame_ptr;
192static dma_addr_t frame_dma;
193
194/* the main function waits on this */
195static DECLARE_WAIT_QUEUE_HEAD(queue);
196
197#define HP_PER_CPU 2
198#define HP_LOOPS 8
199/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
200#define HP_NUM_WORDS 80
201/* First word of the LFSR-based frame data */
202#define HP_FIRST_WORD 0xabbaf00d
203
204static inline u32 do_lfsr(u32 prev)
205{
206 return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
207}
208
209static int allocate_frame_data(void)
210{
211 u32 lfsr = HP_FIRST_WORD;
212 int loop;
213 struct platform_device *pdev = platform_device_alloc("foobar", -1);
214
215 if (!pdev) {
216 pr_crit("platform_device_alloc() failed");
217 return -EIO;
218 }
219 if (platform_device_add(pdev)) {
220 pr_crit("platform_device_add() failed");
221 return -EIO;
222 }
223 __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
224 if (!__frame_ptr)
225 return -ENOMEM;
226
227 frame_ptr = PTR_ALIGN(__frame_ptr, 64);
228 for (loop = 0; loop < HP_NUM_WORDS; loop++) {
229 frame_ptr[loop] = lfsr;
230 lfsr = do_lfsr(lfsr);
231 }
232 frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
233 DMA_BIDIRECTIONAL);
234 platform_device_del(pdev);
235 platform_device_put(pdev);
236 return 0;
237}
238
239static void deallocate_frame_data(void)
240{
241 kfree(__frame_ptr);
242}
243
244static inline int process_frame_data(struct hp_handler *handler,
245 const struct qm_fd *fd)
246{
247 u32 *p = handler->frame_ptr;
248 u32 lfsr = HP_FIRST_WORD;
249 int loop;
250
251 if (qm_fd_addr_get64(fd) != handler->addr) {
252 pr_crit("bad frame address");
253 return -EIO;
254 }
255 for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
256 *p ^= handler->rx_mixer;
257 if (*p != lfsr) {
258 pr_crit("corrupt frame data");
259 return -EIO;
260 }
261 *p ^= handler->tx_mixer;
262 lfsr = do_lfsr(lfsr);
263 }
264 return 0;
265}
266
267static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
268 struct qman_fq *fq,
269 const struct qm_dqrr_entry *dqrr)
270{
271 struct hp_handler *handler = (struct hp_handler *)fq;
272
273 if (process_frame_data(handler, &dqrr->fd)) {
274 WARN_ON(1);
275 goto skip;
276 }
277 if (qman_enqueue(&handler->tx, &dqrr->fd)) {
278 pr_crit("qman_enqueue() failed");
279 WARN_ON(1);
280 }
281skip:
282 return qman_cb_dqrr_consume;
283}
284
285static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
286 struct qman_fq *fq,
287 const struct qm_dqrr_entry *dqrr)
288{
289 struct hp_handler *handler = (struct hp_handler *)fq;
290
291 process_frame_data(handler, &dqrr->fd);
292 if (++loop_counter < HP_LOOPS) {
293 if (qman_enqueue(&handler->tx, &dqrr->fd)) {
294 pr_crit("qman_enqueue() failed");
295 WARN_ON(1);
296 goto skip;
297 }
298 } else {
299 pr_info("Received final (%dth) frame\n", loop_counter);
300 wake_up(&queue);
301 }
302skip:
303 return qman_cb_dqrr_consume;
304}
305
306static int create_per_cpu_handlers(void)
307{
308 struct hp_handler *handler;
309 int loop;
310 struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
311
312 hp_cpu->processor_id = smp_processor_id();
313 spin_lock(&hp_lock);
314 list_add_tail(&hp_cpu->node, &hp_cpu_list);
315 hp_cpu_list_length++;
316 spin_unlock(&hp_lock);
317 INIT_LIST_HEAD(&hp_cpu->handlers);
318 for (loop = 0; loop < HP_PER_CPU; loop++) {
319 handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
320 if (!handler) {
321 pr_crit("kmem_cache_alloc() failed");
322 WARN_ON(1);
323 return -EIO;
324 }
325 handler->processor_id = hp_cpu->processor_id;
326 handler->addr = frame_dma;
327 handler->frame_ptr = frame_ptr;
328 list_add_tail(&handler->node, &hp_cpu->handlers);
329 }
330 return 0;
331}
332
333static int destroy_per_cpu_handlers(void)
334{
335 struct list_head *loop, *tmp;
336 struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
337
338 spin_lock(&hp_lock);
339 list_del(&hp_cpu->node);
340 spin_unlock(&hp_lock);
341 list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
342 u32 flags = 0;
343 struct hp_handler *handler = list_entry(loop, struct hp_handler,
344 node);
345 if (qman_retire_fq(&handler->rx, &flags) ||
346 (flags & QMAN_FQ_STATE_BLOCKOOS)) {
347 pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
348 WARN_ON(1);
349 return -EIO;
350 }
351 if (qman_oos_fq(&handler->rx)) {
352 pr_crit("qman_oos_fq(rx) failed");
353 WARN_ON(1);
354 return -EIO;
355 }
356 qman_destroy_fq(&handler->rx);
357 qman_destroy_fq(&handler->tx);
358 qman_release_fqid(handler->fqid_rx);
359 list_del(&handler->node);
360 kmem_cache_free(hp_handler_slab, handler);
361 }
362 return 0;
363}
364
365static inline u8 num_cachelines(u32 offset)
366{
367 u8 res = (offset + (L1_CACHE_BYTES - 1))
368 / (L1_CACHE_BYTES);
369 if (res > 3)
370 return 3;
371 return res;
372}
373#define STASH_DATA_CL \
374 num_cachelines(HP_NUM_WORDS * 4)
375#define STASH_CTX_CL \
376 num_cachelines(offsetof(struct hp_handler, fqid_rx))
377
378static int init_handler(void *h)
379{
380 struct qm_mcc_initfq opts;
381 struct hp_handler *handler = h;
382 int err;
383
384 if (handler->processor_id != smp_processor_id()) {
385 err = -EIO;
386 goto failed;
387 }
388 /* Set up rx */
389 memset(&handler->rx, 0, sizeof(handler->rx));
390 if (handler == special_handler)
391 handler->rx.cb.dqrr = special_dqrr;
392 else
393 handler->rx.cb.dqrr = normal_dqrr;
394 err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
395 if (err) {
396 pr_crit("qman_create_fq(rx) failed");
397 goto failed;
398 }
399 memset(&opts, 0, sizeof(opts));
400 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
401 opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
402 qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
403 err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
404 QMAN_INITFQ_FLAG_LOCAL, &opts);
405 if (err) {
406 pr_crit("qman_init_fq(rx) failed");
407 goto failed;
408 }
409 /* Set up tx */
410 memset(&handler->tx, 0, sizeof(handler->tx));
411 err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
412 &handler->tx);
413 if (err) {
414 pr_crit("qman_create_fq(tx) failed");
415 goto failed;
416 }
417
418 return 0;
419failed:
420 return err;
421}
422
423static void init_handler_cb(void *h)
424{
425 if (init_handler(h))
426 WARN_ON(1);
427}
428
429static int init_phase2(void)
430{
431 int loop;
432 u32 fqid = 0;
433 u32 lfsr = 0xdeadbeef;
434 struct hp_cpu *hp_cpu;
435 struct hp_handler *handler;
436
437 for (loop = 0; loop < HP_PER_CPU; loop++) {
438 list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
439 int err;
440
441 if (!loop)
442 hp_cpu->iterator = list_first_entry(
443 &hp_cpu->handlers,
444 struct hp_handler, node);
445 else
446 hp_cpu->iterator = list_entry(
447 hp_cpu->iterator->node.next,
448 struct hp_handler, node);
449 /* Rx FQID is the previous handler's Tx FQID */
450 hp_cpu->iterator->fqid_rx = fqid;
451 /* Allocate new FQID for Tx */
452 err = qman_alloc_fqid(&fqid);
453 if (err) {
454 pr_crit("qman_alloc_fqid() failed");
455 return err;
456 }
457 hp_cpu->iterator->fqid_tx = fqid;
458 /* Rx mixer is the previous handler's Tx mixer */
459 hp_cpu->iterator->rx_mixer = lfsr;
460 /* Get new mixer for Tx */
461 lfsr = do_lfsr(lfsr);
462 hp_cpu->iterator->tx_mixer = lfsr;
463 }
464 }
465 /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
466 hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
467 handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
468 if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
469 return 1;
470 handler->fqid_rx = fqid;
471 handler->rx_mixer = lfsr;
472 /* and tag it as our "special" handler */
473 special_handler = handler;
474 return 0;
475}
476
477static int init_phase3(void)
478{
479 int loop, err;
480 struct hp_cpu *hp_cpu;
481
482 for (loop = 0; loop < HP_PER_CPU; loop++) {
483 list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
484 if (!loop)
485 hp_cpu->iterator = list_first_entry(
486 &hp_cpu->handlers,
487 struct hp_handler, node);
488 else
489 hp_cpu->iterator = list_entry(
490 hp_cpu->iterator->node.next,
491 struct hp_handler, node);
492 preempt_disable();
493 if (hp_cpu->processor_id == smp_processor_id()) {
494 err = init_handler(hp_cpu->iterator);
495 if (err)
496 return err;
497 } else {
498 smp_call_function_single(hp_cpu->processor_id,
499 init_handler_cb, hp_cpu->iterator, 1);
500 }
501 preempt_enable();
502 }
503 }
504 return 0;
505}
506
507static int send_first_frame(void *ignore)
508{
509 u32 *p = special_handler->frame_ptr;
510 u32 lfsr = HP_FIRST_WORD;
511 int loop, err;
512 struct qm_fd fd;
513
514 if (special_handler->processor_id != smp_processor_id()) {
515 err = -EIO;
516 goto failed;
517 }
518 memset(&fd, 0, sizeof(fd));
519 qm_fd_addr_set64(&fd, special_handler->addr);
520 qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
521 for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
522 if (*p != lfsr) {
523 err = -EIO;
524 pr_crit("corrupt frame data");
525 goto failed;
526 }
527 *p ^= special_handler->tx_mixer;
528 lfsr = do_lfsr(lfsr);
529 }
530 pr_info("Sending first frame\n");
531 err = qman_enqueue(&special_handler->tx, &fd);
532 if (err) {
533 pr_crit("qman_enqueue() failed");
534 goto failed;
535 }
536
537 return 0;
538failed:
539 return err;
540}
541
542static void send_first_frame_cb(void *ignore)
543{
544 if (send_first_frame(NULL))
545 WARN_ON(1);
546}
547
548int qman_test_stash(void)
549{
550 int err;
551
552 if (cpumask_weight(cpu_online_mask) < 2) {
553 pr_info("%s(): skip - only 1 CPU\n", __func__);
554 return 0;
555 }
556
557 pr_info("%s(): Starting\n", __func__);
558
559 hp_cpu_list_length = 0;
560 loop_counter = 0;
561 hp_handler_slab = kmem_cache_create("hp_handler_slab",
562 sizeof(struct hp_handler), L1_CACHE_BYTES,
563 SLAB_HWCACHE_ALIGN, NULL);
564 if (!hp_handler_slab) {
565 err = -EIO;
566 pr_crit("kmem_cache_create() failed");
567 goto failed;
568 }
569
570 err = allocate_frame_data();
571 if (err)
572 goto failed;
573
574 /* Init phase 1 */
575 pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
576 if (on_all_cpus(create_per_cpu_handlers)) {
577 err = -EIO;
578 pr_crit("on_each_cpu() failed");
579 goto failed;
580 }
581 pr_info("Number of cpus: %d, total of %d handlers\n",
582 hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
583
584 err = init_phase2();
585 if (err)
586 goto failed;
587
588 err = init_phase3();
589 if (err)
590 goto failed;
591
592 preempt_disable();
593 if (special_handler->processor_id == smp_processor_id()) {
594 err = send_first_frame(NULL);
595 if (err)
596 goto failed;
597 } else {
598 smp_call_function_single(special_handler->processor_id,
599 send_first_frame_cb, NULL, 1);
600 }
601 preempt_enable();
602
603 wait_event(queue, loop_counter == HP_LOOPS);
604 deallocate_frame_data();
605 if (on_all_cpus(destroy_per_cpu_handlers)) {
606 err = -EIO;
607 pr_crit("on_each_cpu() failed");
608 goto failed;
609 }
610 kmem_cache_destroy(hp_handler_slab);
611 pr_info("%s(): Finished\n", __func__);
612
613 return 0;
614failed:
615 WARN_ON(1);
616 return err;
617}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb2215a57..0aaf429f31d5 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
41 41
42static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 42static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
43{ 43{
44 struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc); 44 struct qe_gpio_chip *qe_gc =
45 container_of(mm_gc, struct qe_gpio_chip, mm_gc);
45 struct qe_pio_regs __iomem *regs = mm_gc->regs; 46 struct qe_pio_regs __iomem *regs = mm_gc->regs;
46 47
47 qe_gc->cpdata = in_be32(&regs->cpdata); 48 qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 7026507e6f1d..2707a827261b 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1;
69phys_addr_t get_qe_base(void) 69phys_addr_t get_qe_base(void)
70{ 70{
71 struct device_node *qe; 71 struct device_node *qe;
72 int size; 72 int ret;
73 const u32 *prop; 73 struct resource res;
74 74
75 if (qebase != -1) 75 if (qebase != -1)
76 return qebase; 76 return qebase;
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void)
82 return qebase; 82 return qebase;
83 } 83 }
84 84
85 prop = of_get_property(qe, "reg", &size); 85 ret = of_address_to_resource(qe, 0, &res);
86 if (prop && size >= sizeof(*prop)) 86 if (!ret)
87 qebase = of_translate_address(qe, prop); 87 qebase = res.start;
88 of_node_put(qe); 88 of_node_put(qe);
89 89
90 return qebase; 90 return qebase;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff805a904..104e68d9b84f 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
70 } 70 }
71 71
72 muram_pool = gen_pool_create(0, -1); 72 muram_pool = gen_pool_create(0, -1);
73 if (!muram_pool) {
74 pr_err("Cannot allocate memory pool for CPM/QE muram");
75 ret = -ENOMEM;
76 goto out_muram;
77 }
73 muram_pbase = of_translate_address(np, zero); 78 muram_pbase = of_translate_address(np, zero);
74 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { 79 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
75 pr_err("Cannot translate zero through CPM muram node"); 80 pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
116 struct muram_block *entry; 121 struct muram_block *entry;
117 unsigned long start; 122 unsigned long start;
118 123
124 if (!muram_pool && cpm_muram_init())
125 goto out2;
126
119 start = gen_pool_alloc_algo(muram_pool, size, algo, data); 127 start = gen_pool_alloc_algo(muram_pool, size, algo, data);
120 if (!start) 128 if (!start)
121 goto out2; 129 goto out2;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index 5e48b1470178..a1048b44e6b9 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
99 utdm->tdm_port = val; 99 utdm->tdm_port = val;
100 ut_info->uf_info.tdm_num = utdm->tdm_port; 100 ut_info->uf_info.tdm_num = utdm->tdm_port;
101 101
102 if (of_get_property(np, "fsl,tdm-internal-loopback", NULL)) 102 if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
103 utdm->tdm_mode = TDM_INTERNAL_LOOPBACK; 103 utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
104 else 104 else
105 utdm->tdm_mode = TDM_NORMAL; 105 utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
167 } 167 }
168 168
169 if (siram_init_flag == 0) { 169 if (siram_init_flag == 0) {
170 memset_io(utdm->siram, 0, res->end - res->start + 1); 170 memset_io(utdm->siram, 0, resource_size(res));
171 siram_init_flag = 1; 171 siram_init_flag = 1;
172 } 172 }
173 173
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644
index 000000000000..eaaf56df4086
--- /dev/null
+++ b/include/soc/fsl/bman.h
@@ -0,0 +1,129 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __FSL_BMAN_H
32#define __FSL_BMAN_H
33
34/* wrapper for 48-bit buffers */
35struct bm_buffer {
36 union {
37 struct {
38 __be16 bpid; /* hi 8-bits reserved */
39 __be16 hi; /* High 16-bits of 48-bit address */
40 __be32 lo; /* Low 32-bits of 48-bit address */
41 };
42 __be64 data;
43 };
44} __aligned(8);
45/*
46 * Restore the 48 bit address previously stored in BMan
47 * hardware pools as a dma_addr_t
48 */
49static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
50{
51 return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
52}
53
54static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
55{
56 return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
57}
58
59static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
60{
61 buf->hi = cpu_to_be16(upper_32_bits(addr));
62 buf->lo = cpu_to_be32(lower_32_bits(addr));
63}
64
65static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
66{
67 return be16_to_cpu(buf->bpid) & 0xff;
68}
69
70static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
71{
72 buf->bpid = cpu_to_be16(bpid & 0xff);
73}
74
75/* Managed portal, high-level i/face */
76
77/* Portal and Buffer Pools */
78struct bman_portal;
79struct bman_pool;
80
81#define BM_POOL_MAX 64 /* max # of buffer pools */
82
83/**
84 * bman_new_pool - Allocates a Buffer Pool object
85 *
86 * Creates a pool object, and returns a reference to it or NULL on error.
87 */
88struct bman_pool *bman_new_pool(void);
89
90/**
91 * bman_free_pool - Deallocates a Buffer Pool object
92 * @pool: the pool object to release
93 */
94void bman_free_pool(struct bman_pool *pool);
95
96/**
97 * bman_get_bpid - Returns a pool object's BPID.
98 * @pool: the pool object
99 *
100 * The returned value is the index of the encapsulated buffer pool,
101 * in the range of [0, @BM_POOL_MAX-1].
102 */
103int bman_get_bpid(const struct bman_pool *pool);
104
105/**
106 * bman_release - Release buffer(s) to the buffer pool
107 * @pool: the buffer pool object to release to
108 * @bufs: an array of buffers to release
109 * @num: the number of buffers in @bufs (1-8)
110 *
111 * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
112 * the function will return -ETIMEDOUT. Otherwise, it returns zero.
113 */
114int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
115
116/**
117 * bman_acquire - Acquire buffer(s) from a buffer pool
118 * @pool: the buffer pool object to acquire from
119 * @bufs: array for storing the acquired buffers
120 * @num: the number of buffers desired (@bufs is at least this big)
121 *
122 * Issues an "Acquire" command via the portal's management command interface.
123 * The return value will be the number of buffers obtained from the pool, or a
124 * negative error code if a h/w error or pool starvation was encountered. In
125 * the latter case, the content of @bufs is undefined.
126 */
127int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
128
129#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 000000000000..37f3eb001a16
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1074 @@
1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef __FSL_QMAN_H
32#define __FSL_QMAN_H
33
34#include <linux/bitops.h>
35
36/* Hardware constants */
37#define QM_CHANNEL_SWPORTAL0 0
38#define QMAN_CHANNEL_POOL1 0x21
39#define QMAN_CHANNEL_POOL1_REV3 0x401
40extern u16 qm_channel_pool1;
41
42/* Portal processing (interrupt) sources */
43#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
44#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
45#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
46#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
47#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
48/*
49 * This mask contains all the interrupt sources that need handling except DQRI,
50 * ie. that if present should trigger slow-path processing.
51 */
52#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
53 QM_PIRQ_MRI)
54
55/* For qman_static_dequeue_*** APIs */
56#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
57/* for n in [1,15] */
58#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
59/* for conversion from n of qm_channel */
60static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
61{
62 return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
63}
64
65/* --- QMan data structures (and associated constants) --- */
66
67/* "Frame Descriptor (FD)" */
68struct qm_fd {
69 union {
70 struct {
71 u8 cfg8b_w1;
72 u8 bpid; /* Buffer Pool ID */
73 u8 cfg8b_w3;
74 u8 addr_hi; /* high 8-bits of 40-bit address */
75 __be32 addr_lo; /* low 32-bits of 40-bit address */
76 } __packed;
77 __be64 data;
78 };
79 __be32 cfg; /* format, offset, length / congestion */
80 union {
81 __be32 cmd;
82 __be32 status;
83 };
84} __aligned(8);
85
86#define QM_FD_FORMAT_SG BIT(31)
87#define QM_FD_FORMAT_LONG BIT(30)
88#define QM_FD_FORMAT_COMPOUND BIT(29)
89#define QM_FD_FORMAT_MASK GENMASK(31, 29)
90#define QM_FD_OFF_SHIFT 20
91#define QM_FD_OFF_MASK GENMASK(28, 20)
92#define QM_FD_LEN_MASK GENMASK(19, 0)
93#define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
94
95enum qm_fd_format {
96 /*
97 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
98 * scatter-gather table. 'big' implies a 29-bit length with no offset
99 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
100 * implies a s/g-like table, where each entry itself represents a frame
101 * (contiguous or scatter-gather) and the 29-bit "length" is
102 * interpreted purely for congestion calculations, ie. a "congestion
103 * weight".
104 */
105 qm_fd_contig = 0,
106 qm_fd_contig_big = QM_FD_FORMAT_LONG,
107 qm_fd_sg = QM_FD_FORMAT_SG,
108 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
109 qm_fd_compound = QM_FD_FORMAT_COMPOUND
110};
111
112static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
113{
114 return be64_to_cpu(fd->data) & 0xffffffffffLLU;
115}
116
117static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
118{
119 return be64_to_cpu(fd->data) & 0xffffffffffLLU;
120}
121
122static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
123{
124 fd->addr_hi = upper_32_bits(addr);
125 fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
126}
127
128/*
129 * The 'format' field indicates the interpretation of the remaining
130 * 29 bits of the 32-bit word.
131 * If 'format' is _contig or _sg, 20b length and 9b offset.
132 * If 'format' is _contig_big or _sg_big, 29b length.
133 * If 'format' is _compound, 29b "congestion weight".
134 */
135static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
136{
137 return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
138}
139
140static inline int qm_fd_get_offset(const struct qm_fd *fd)
141{
142 return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
143}
144
145static inline int qm_fd_get_length(const struct qm_fd *fd)
146{
147 return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
148}
149
150static inline int qm_fd_get_len_big(const struct qm_fd *fd)
151{
152 return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
153}
154
155static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
156 int off, int len)
157{
158 fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
159 ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
160}
161
162#define qm_fd_set_contig(fd, off, len) \
163 qm_fd_set_param(fd, qm_fd_contig, off, len)
164#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
165#define qm_fd_set_contig_big(fd, len) \
166 qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
167#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
168
169static inline void qm_fd_clear_fd(struct qm_fd *fd)
170{
171 fd->data = 0;
172 fd->cfg = 0;
173 fd->cmd = 0;
174}
175
176/* Scatter/Gather table entry */
177struct qm_sg_entry {
178 union {
179 struct {
180 u8 __reserved1[3];
181 u8 addr_hi; /* high 8-bits of 40-bit address */
182 __be32 addr_lo; /* low 32-bits of 40-bit address */
183 };
184 __be64 data;
185 };
186 __be32 cfg; /* E bit, F bit, length */
187 u8 __reserved2;
188 u8 bpid;
189 __be16 offset; /* 13-bit, _res[13-15]*/
190} __packed;
191
192#define QM_SG_LEN_MASK GENMASK(29, 0)
193#define QM_SG_OFF_MASK GENMASK(12, 0)
194#define QM_SG_FIN BIT(30)
195#define QM_SG_EXT BIT(31)
196
197static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
198{
199 return be64_to_cpu(sg->data) & 0xffffffffffLLU;
200}
201
202static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
203{
204 return be64_to_cpu(sg->data) & 0xffffffffffLLU;
205}
206
207static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
208{
209 sg->addr_hi = upper_32_bits(addr);
210 sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
211}
212
213static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
214{
215 return be32_to_cpu(sg->cfg) & QM_SG_FIN;
216}
217
218static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
219{
220 return be32_to_cpu(sg->cfg) & QM_SG_EXT;
221}
222
223static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
224{
225 return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
226}
227
228static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
229{
230 sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
231}
232
233static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
234{
235 sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
236}
237
238static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
239{
240 return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
241}
242
243/* "Frame Dequeue Response" */
244struct qm_dqrr_entry {
245 u8 verb;
246 u8 stat;
247 u16 seqnum; /* 15-bit */
248 u8 tok;
249 u8 __reserved2[3];
250 u32 fqid; /* 24-bit */
251 u32 contextB;
252 struct qm_fd fd;
253 u8 __reserved4[32];
254} __packed;
255#define QM_DQRR_VERB_VBIT 0x80
256#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
257#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
258#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
259#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
260#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
261#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
262#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
263#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
264
265/* "ERN Message Response" */
266/* "FQ State Change Notification" */
267union qm_mr_entry {
268 struct {
269 u8 verb;
270 u8 __reserved[63];
271 };
272 struct {
273 u8 verb;
274 u8 dca;
275 u16 seqnum;
276 u8 rc; /* Rej Code: 8-bit */
277 u8 orp_hi; /* ORP: 24-bit */
278 u16 orp_lo;
279 u32 fqid; /* 24-bit */
280 u32 tag;
281 struct qm_fd fd;
282 u8 __reserved1[32];
283 } __packed ern;
284 struct {
285 u8 verb;
286 u8 fqs; /* Frame Queue Status */
287 u8 __reserved1[6];
288 u32 fqid; /* 24-bit */
289 u32 contextB;
290 u8 __reserved2[48];
291 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
292};
293#define QM_MR_VERB_VBIT 0x80
294/*
295 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
296 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
297 * from the other MR types by noting if the 0x20 bit is unset.
298 */
299#define QM_MR_VERB_TYPE_MASK 0x27
300#define QM_MR_VERB_DC_ERN 0x20
301#define QM_MR_VERB_FQRN 0x21
302#define QM_MR_VERB_FQRNI 0x22
303#define QM_MR_VERB_FQRL 0x23
304#define QM_MR_VERB_FQPN 0x24
305#define QM_MR_RC_MASK 0xf0 /* contains one of; */
306#define QM_MR_RC_CGR_TAILDROP 0x00
307#define QM_MR_RC_WRED 0x10
308#define QM_MR_RC_ERROR 0x20
309#define QM_MR_RC_ORPWINDOW_EARLY 0x30
310#define QM_MR_RC_ORPWINDOW_LATE 0x40
311#define QM_MR_RC_FQ_TAILDROP 0x50
312#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
313#define QM_MR_RC_ORP_ZERO 0x70
314#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
315#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
316
317/*
318 * An identical structure of FQD fields is present in the "Init FQ" command and
319 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
320 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
321 * latter has two inlines to assist with converting to/from the mant+exp
322 * representation.
323 */
324struct qm_fqd_stashing {
325 /* See QM_STASHING_EXCL_<...> */
326 u8 exclusive;
327 /* Numbers of cachelines */
328 u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
329};
330
331struct qm_fqd_oac {
332 /* "Overhead Accounting Control", see QM_OAC_<...> */
333 u8 oac; /* oac[6-7], _res[0-5] */
334 /* Two's-complement value (-128 to +127) */
335 s8 oal; /* "Overhead Accounting Length" */
336};
337
338struct qm_fqd {
339 /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
340 u8 orpc;
341 u8 cgid;
342 __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
343 __be16 dest_wq; /* channel[3-15], wq[0-2] */
344 __be16 ics_cred; /* 15-bit */
345 /*
346 * For "Initialize Frame Queue" commands, the write-enable mask
347 * determines whether 'td' or 'oac_init' is observed. For query
348 * commands, this field is always 'td', and 'oac_query' (below) reflects
349 * the Overhead ACcounting values.
350 */
351 union {
352 __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
353 struct qm_fqd_oac oac_init;
354 };
355 __be32 context_b;
356 union {
357 /* Treat it as 64-bit opaque */
358 __be64 opaque;
359 struct {
360 __be32 hi;
361 __be32 lo;
362 };
363 /* Treat it as s/w portal stashing config */
364 /* see "FQD Context_A field used for [...]" */
365 struct {
366 struct qm_fqd_stashing stashing;
367 /*
368 * 48-bit address of FQ context to
369 * stash, must be cacheline-aligned
370 */
371 __be16 context_hi;
372 __be32 context_lo;
373 } __packed;
374 } context_a;
375 struct qm_fqd_oac oac_query;
376} __packed;
377
378#define QM_FQD_CHAN_OFF 3
379#define QM_FQD_WQ_MASK GENMASK(2, 0)
380#define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
381#define QM_FQD_TD_MANT_OFF 5
382#define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
383#define QM_FQD_TD_MAX 0xe0000000
384#define QM_FQD_TD_MANT_MAX 0xff
385#define QM_FQD_OAC_OFF 6
386#define QM_FQD_AS_OFF 4
387#define QM_FQD_DS_OFF 2
388#define QM_FQD_XS_MASK 0x3
389
390/* 64-bit converters for context_hi/lo */
391static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
392{
393 return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
394}
395
396static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
397{
398 return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
399}
400
401static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
402{
403 return qm_fqd_stashing_get64(fqd);
404}
405
406static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
407{
408 fqd->context_a.context_hi = upper_32_bits(addr);
409 fqd->context_a.context_lo = lower_32_bits(addr);
410}
411
412static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
413{
414 fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
415 fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
416}
417
418/* convert a threshold value into mant+exp representation */
419static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
420 int roundup)
421{
422 u32 e = 0;
423 int td, oddbit = 0;
424
425 if (val > QM_FQD_TD_MAX)
426 return -ERANGE;
427
428 while (val > QM_FQD_TD_MANT_MAX) {
429 oddbit = val & 1;
430 val >>= 1;
431 e++;
432 if (roundup && oddbit)
433 val++;
434 }
435
436 td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
437 td |= (e & QM_FQD_TD_EXP_MASK);
438 fqd->td = cpu_to_be16(td);
439 return 0;
440}
441/* and the other direction */
442static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
443{
444 int td = be16_to_cpu(fqd->td);
445
446 return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
447 << (td & QM_FQD_TD_EXP_MASK);
448}
449
450static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
451{
452 struct qm_fqd_stashing *st = &fqd->context_a.stashing;
453
454 st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
455 ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
456 (cs & QM_FQD_XS_MASK);
457}
458
459static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
460{
461 return fqd->context_a.stashing.cl;
462}
463
464static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
465{
466 fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
467}
468
469static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
470{
471 fqd->oac_init.oal = val;
472}
473
474static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
475{
476 fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
477 (wq & QM_FQD_WQ_MASK));
478}
479
480static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
481{
482 return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
483}
484
485static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
486{
487 return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
488}
489
490/* See "Frame Queue Descriptor (FQD)" */
491/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
492#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
493#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
494#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
495#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
496#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
497#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
498#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
499#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
500#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
501#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
502
503/* See "FQD Context_A field used for [...] */
504/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
505#define QM_STASHING_EXCL_ANNOTATION 0x04
506#define QM_STASHING_EXCL_DATA 0x02
507#define QM_STASHING_EXCL_CTX 0x01
508
509/* See "Intra Class Scheduling" */
510/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
511#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
512#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
513
514/*
515 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
516 * and associated commands/responses. The WRED parameters are calculated from
517 * these fields as follows;
518 * MaxTH = MA * (2 ^ Mn)
519 * Slope = SA / (2 ^ Sn)
520 * MaxP = 4 * (Pn + 1)
521 */
522struct qm_cgr_wr_parm {
523 /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
524 u32 word;
525};
526/*
527 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
528 * management commands, this is padded to a 16-bit structure field, so that's
529 * how we represent it here. The congestion state threshold is calculated from
530 * these fields as follows;
531 * CS threshold = TA * (2 ^ Tn)
532 */
533struct qm_cgr_cs_thres {
534 /* _res[13-15], TA[5-12], Tn[0-4] */
535 u16 word;
536};
537/*
538 * This identical structure of CGR fields is present in the "Init/Modify CGR"
539 * commands and the "Query CGR" result. It's suctioned out here into its own
540 * struct.
541 */
542struct __qm_mc_cgr {
543 struct qm_cgr_wr_parm wr_parm_g;
544 struct qm_cgr_wr_parm wr_parm_y;
545 struct qm_cgr_wr_parm wr_parm_r;
546 u8 wr_en_g; /* boolean, use QM_CGR_EN */
547 u8 wr_en_y; /* boolean, use QM_CGR_EN */
548 u8 wr_en_r; /* boolean, use QM_CGR_EN */
549 u8 cscn_en; /* boolean, use QM_CGR_EN */
550 union {
551 struct {
552 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
553 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
554 };
555 u32 cscn_targ; /* use QM_CGR_TARG_* */
556 };
557 u8 cstd_en; /* boolean, use QM_CGR_EN */
558 u8 cs; /* boolean, only used in query response */
559 struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
560 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
561} __packed;
562#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
563#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
564#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
565#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
566#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
567#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
568/* Convert CGR thresholds to/from "cs_thres" format */
569static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
570{
571 return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
572}
573
574static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
575 int roundup)
576{
577 u32 e = 0;
578 int oddbit = 0;
579
580 while (val > 0xff) {
581 oddbit = val & 1;
582 val >>= 1;
583 e++;
584 if (roundup && oddbit)
585 val++;
586 }
587 th->word = ((val & 0xff) << 5) | (e & 0x1f);
588 return 0;
589}
590
591/* "Initialize FQ" */
592struct qm_mcc_initfq {
593 u8 __reserved1[2];
594 u16 we_mask; /* Write Enable Mask */
595 u32 fqid; /* 24-bit */
596 u16 count; /* Initialises 'count+1' FQDs */
597 struct qm_fqd fqd; /* the FQD fields go here */
598 u8 __reserved2[30];
599} __packed;
600/* "Initialize/Modify CGR" */
601struct qm_mcc_initcgr {
602 u8 __reserve1[2];
603 u16 we_mask; /* Write Enable Mask */
604 struct __qm_mc_cgr cgr; /* CGR fields */
605 u8 __reserved2[2];
606 u8 cgid;
607 u8 __reserved3[32];
608} __packed;
609
610/* INITFQ-specific flags */
611#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
612#define QM_INITFQ_WE_OAC 0x0100
613#define QM_INITFQ_WE_ORPC 0x0080
614#define QM_INITFQ_WE_CGID 0x0040
615#define QM_INITFQ_WE_FQCTRL 0x0020
616#define QM_INITFQ_WE_DESTWQ 0x0010
617#define QM_INITFQ_WE_ICSCRED 0x0008
618#define QM_INITFQ_WE_TDTHRESH 0x0004
619#define QM_INITFQ_WE_CONTEXTB 0x0002
620#define QM_INITFQ_WE_CONTEXTA 0x0001
621/* INITCGR/MODIFYCGR-specific flags */
622#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
623#define QM_CGR_WE_WR_PARM_G 0x0400
624#define QM_CGR_WE_WR_PARM_Y 0x0200
625#define QM_CGR_WE_WR_PARM_R 0x0100
626#define QM_CGR_WE_WR_EN_G 0x0080
627#define QM_CGR_WE_WR_EN_Y 0x0040
628#define QM_CGR_WE_WR_EN_R 0x0020
629#define QM_CGR_WE_CSCN_EN 0x0010
630#define QM_CGR_WE_CSCN_TARG 0x0008
631#define QM_CGR_WE_CSTD_EN 0x0004
632#define QM_CGR_WE_CS_THRES 0x0002
633#define QM_CGR_WE_MODE 0x0001
634
635#define QMAN_CGR_FLAG_USE_INIT 0x00000001
636
637 /* Portal and Frame Queues */
638/* Represents a managed portal */
639struct qman_portal;
640
641/*
642 * This object type represents QMan frame queue descriptors (FQD), it is
643 * cacheline-aligned, and initialised by qman_create_fq(). The structure is
644 * defined further down.
645 */
646struct qman_fq;
647
648/*
649 * This object type represents a QMan congestion group, it is defined further
650 * down.
651 */
652struct qman_cgr;
653
654/*
655 * This enum, and the callback type that returns it, are used when handling
656 * dequeued frames via DQRR. Note that for "null" callbacks registered with the
657 * portal object (for handling dequeues that do not demux because contextB is
658 * NULL), the return value *MUST* be qman_cb_dqrr_consume.
659 */
660enum qman_cb_dqrr_result {
661 /* DQRR entry can be consumed */
662 qman_cb_dqrr_consume,
663 /* Like _consume, but requests parking - FQ must be held-active */
664 qman_cb_dqrr_park,
665 /* Does not consume, for DCA mode only. */
666 qman_cb_dqrr_defer,
667 /*
668 * Stop processing without consuming this ring entry. Exits the current
669 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
670 * an interrupt handler, the callback would typically call
671 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
672 * otherwise the interrupt will reassert immediately.
673 */
674 qman_cb_dqrr_stop,
675 /* Like qman_cb_dqrr_stop, but consumes the current entry. */
676 qman_cb_dqrr_consume_stop
677};
678typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
679 struct qman_fq *fq,
680 const struct qm_dqrr_entry *dqrr);
681
682/*
683 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
684 * are always consumed after the callback returns.
685 */
686typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
687 const union qm_mr_entry *msg);
688
689/*
690 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
691 * held-active + held-suspended are just "sched". Things like "retired" will not
692 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
693 * then, to indicate it's completing and to gate attempts to retry the retire
694 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
695 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
696 * index rather than the FQ that ring entry corresponds to), so repeated park
697 * commands are allowed (if you're silly enough to try) but won't change FQ
698 * state, and the resulting park notifications move FQs from "sched" to
699 * "parked".
700 */
701enum qman_fq_state {
702 qman_fq_state_oos,
703 qman_fq_state_parked,
704 qman_fq_state_sched,
705 qman_fq_state_retired
706};
707
708#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
709#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
710#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
711#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
712#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
713#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
714
715/*
716 * Frame queue objects (struct qman_fq) are stored within memory passed to
717 * qman_create_fq(), as this allows stashing of caller-provided demux callback
718 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
719 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
720 * they should;
721 *
722 * (a) extend the qman_fq structure with their state; eg.
723 *
724 * // myfq is allocated and driver_fq callbacks filled in;
725 * struct my_fq {
726 * struct qman_fq base;
727 * int an_extra_field;
728 * [ ... add other fields to be associated with each FQ ...]
729 * } *myfq = some_my_fq_allocator();
730 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
731 *
732 * // in a dequeue callback, access extra fields from 'fq' via a cast;
733 * struct my_fq *myfq = (struct my_fq *)fq;
734 * do_something_with(myfq->an_extra_field);
735 * [...]
736 *
737 * (b) when and if configuring the FQ for context stashing, specify how ever
738 * many cachelines are required to stash 'struct my_fq', to accelerate not
739 * only the QMan driver but the callback as well.
740 */
741
742struct qman_fq_cb {
743 qman_cb_dqrr dqrr; /* for dequeued frames */
744 qman_cb_mr ern; /* for s/w ERNs */
745 qman_cb_mr fqs; /* frame-queue state changes*/
746};
747
748struct qman_fq {
749 /* Caller of qman_create_fq() provides these demux callbacks */
750 struct qman_fq_cb cb;
751 /*
752 * These are internal to the driver, don't touch. In particular, they
753 * may change, be removed, or extended (so you shouldn't rely on
754 * sizeof(qman_fq) being a constant).
755 */
756 u32 fqid, idx;
757 unsigned long flags;
758 enum qman_fq_state state;
759 int cgr_groupid;
760};
761
762/*
763 * This callback type is used when handling congestion group entry/exit.
764 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
765 */
766typedef void (*qman_cb_cgr)(struct qman_portal *qm,
767 struct qman_cgr *cgr, int congested);
768
769struct qman_cgr {
770 /* Set these prior to qman_create_cgr() */
771 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
772 qman_cb_cgr cb;
773 /* These are private to the driver */
774 u16 chan; /* portal channel this object is created on */
775 struct list_head node;
776};
777
778/* Flags to qman_create_fq() */
779#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
780#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
781#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
782#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
783
784/* Flags to qman_init_fq() */
785#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
786#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
787
788 /* Portal Management */
789/**
790 * qman_p_irqsource_add - add processing sources to be interrupt-driven
791 * @bits: bitmask of QM_PIRQ_**I processing sources
792 *
793 * Adds processing sources that should be interrupt-driven (rather than
794 * processed via qman_poll_***() functions).
795 */
796void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
797
798/**
799 * qman_p_irqsource_remove - remove processing sources from being int-driven
800 * @bits: bitmask of QM_PIRQ_**I processing sources
801 *
802 * Removes processing sources from being interrupt-driven, so that they will
803 * instead be processed via qman_poll_***() functions.
804 */
805void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
806
807/**
808 * qman_affine_cpus - return a mask of cpus that have affine portals
809 */
810const cpumask_t *qman_affine_cpus(void);
811
812/**
813 * qman_affine_channel - return the channel ID of an portal
814 * @cpu: the cpu whose affine portal is the subject of the query
815 *
816 * If @cpu is -1, the affine portal for the current CPU will be used. It is a
817 * bug to call this function for any value of @cpu (other than -1) that is not a
818 * member of the mask returned from qman_affine_cpus().
819 */
820u16 qman_affine_channel(int cpu);
821
822/**
823 * qman_get_affine_portal - return the portal pointer affine to cpu
824 * @cpu: the cpu whose affine portal is the subject of the query
825 */
826struct qman_portal *qman_get_affine_portal(int cpu);
827
828/**
829 * qman_p_poll_dqrr - process DQRR (fast-path) entries
830 * @limit: the maximum number of DQRR entries to process
831 *
832 * Use of this function requires that DQRR processing not be interrupt-driven.
833 * The return value represents the number of DQRR entries processed.
834 */
835int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
836
837/**
838 * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
839 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
840 *
841 * Adds a set of pool channels to the portal's static dequeue command register
842 * (SDQCR). The requested pools are limited to those the portal has dequeue
843 * access to.
844 */
845void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
846
847 /* FQ management */
848/**
849 * qman_create_fq - Allocates a FQ
850 * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
851 * @flags: bit-mask of QMAN_FQ_FLAG_*** options
852 * @fq: memory for storing the 'fq', with callbacks filled in
853 *
854 * Creates a frame queue object for the given @fqid, unless the
855 * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
856 * dynamically allocated (or the function fails if none are available). Once
857 * created, the caller should not touch the memory at 'fq' except as extended to
858 * adjacent memory for user-defined fields (see the definition of "struct
859 * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
860 * pre-existing frame-queues that aren't to be otherwise interfered with, it
861 * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
862 * causes the driver to honour any contextB modifications requested in the
863 * qm_init_fq() API, as this indicates the frame queue will be consumed by a
864 * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
865 * software portals, the contextB field is controlled by the driver and can't be
866 * modified by the caller.
867 */
868int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
869
870/**
871 * qman_destroy_fq - Deallocates a FQ
872 * @fq: the frame queue object to release
873 *
874 * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
875 * not deallocated but the caller regains ownership, to do with as desired. The
876 * FQ must be in the 'out-of-service' or in the 'parked' state.
877 */
878void qman_destroy_fq(struct qman_fq *fq);
879
880/**
881 * qman_fq_fqid - Queries the frame queue ID of a FQ object
882 * @fq: the frame queue object to query
883 */
884u32 qman_fq_fqid(struct qman_fq *fq);
885
886/**
887 * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
888 * @fq: the frame queue object to modify, must be 'parked' or new.
889 * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
890 * @opts: the FQ-modification settings, as defined in the low-level API
891 *
892 * The @opts parameter comes from the low-level portal API. Select
893 * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
894 * rather than parked. NB, @opts can be NULL.
895 *
896 * Note that some fields and options within @opts may be ignored or overwritten
897 * by the driver;
898 * 1. the 'count' and 'fqid' fields are always ignored (this operation only
899 * affects one frame queue: @fq).
900 * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
901 * 'fqd' structure's 'context_b' field are sometimes overwritten;
902 * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
903 * initialised to a value used by the driver for demux.
904 * - if context_b is initialised for demux, so is context_a in case stashing
905 * is requested (see item 4).
906 * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
907 * objects.)
908 * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
909 * 'dest::channel' field will be overwritten to match the portal used to issue
910 * the command. If the WE_DESTWQ write-enable bit had already been set by the
911 * caller, the channel workqueue will be left as-is, otherwise the write-enable
912 * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
913 * isn't set, the destination channel/workqueue fields and the write-enable bit
914 * are left as-is.
915 * 4. if the driver overwrites context_a/b for demux, then if
916 * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
917 * context_a.address fields and will leave the stashing fields provided by the
918 * user alone, otherwise it will zero out the context_a.stashing fields.
919 */
920int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
921
922/**
923 * qman_schedule_fq - Schedules a FQ
924 * @fq: the frame queue object to schedule, must be 'parked'
925 *
926 * Schedules the frame queue, which must be Parked, which takes it to
927 * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
928 */
929int qman_schedule_fq(struct qman_fq *fq);
930
931/**
932 * qman_retire_fq - Retires a FQ
933 * @fq: the frame queue object to retire
934 * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
935 *
936 * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
937 * the retirement was started asynchronously, otherwise it returns negative for
938 * failure. When this function returns zero, @flags is set to indicate whether
939 * the retired FQ is empty and/or whether it has any ORL fragments (to show up
940 * as ERNs). Otherwise the corresponding flags will be known when a subsequent
941 * FQRN message shows up on the portal's message ring.
942 *
943 * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
944 * Active state), the completion will be via the message ring as a FQRN - but
945 * the corresponding callback may occur before this function returns!! Ie. the
946 * caller should be prepared to accept the callback as the function is called,
947 * not only once it has returned.
948 */
949int qman_retire_fq(struct qman_fq *fq, u32 *flags);
950
951/**
952 * qman_oos_fq - Puts a FQ "out of service"
953 * @fq: the frame queue object to be put out-of-service, must be 'retired'
954 *
955 * The frame queue must be retired and empty, and if any order restoration list
956 * was released as ERNs at the time of retirement, they must all be consumed.
957 */
958int qman_oos_fq(struct qman_fq *fq);
959
960/**
961 * qman_enqueue - Enqueue a frame to a frame queue
962 * @fq: the frame queue object to enqueue to
963 * @fd: a descriptor of the frame to be enqueued
964 *
965 * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
966 * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
967 * field is ignored. The return value is non-zero on error, such as ring full.
968 */
969int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
970
971/**
972 * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
973 * @result: is set by the API to the base FQID of the allocated range
974 * @count: the number of FQIDs required
975 *
976 * Returns 0 on success, or a negative error code.
977 */
978int qman_alloc_fqid_range(u32 *result, u32 count);
979#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
980
981/**
982 * qman_release_fqid - Release the specified frame queue ID
983 * @fqid: the FQID to be released back to the resource pool
984 *
985 * This function can also be used to seed the allocator with
986 * FQID ranges that it can subsequently allocate from.
987 * Returns 0 on success, or a negative error code.
988 */
989int qman_release_fqid(u32 fqid);
990
991 /* Pool-channel management */
992/**
993 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
994 * @result: is set by the API to the base pool-channel ID of the allocated range
995 * @count: the number of pool-channel IDs required
996 *
997 * Returns 0 on success, or a negative error code.
998 */
999int qman_alloc_pool_range(u32 *result, u32 count);
1000#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
1001
1002/**
1003 * qman_release_pool - Release the specified pool-channel ID
1004 * @id: the pool-chan ID to be released back to the resource pool
1005 *
1006 * This function can also be used to seed the allocator with
1007 * pool-channel ID ranges that it can subsequently allocate from.
1008 * Returns 0 on success, or a negative error code.
1009 */
1010int qman_release_pool(u32 id);
1011
1012 /* CGR management */
1013/**
1014 * qman_create_cgr - Register a congestion group object
1015 * @cgr: the 'cgr' object, with fields filled in
1016 * @flags: QMAN_CGR_FLAG_* values
1017 * @opts: optional state of CGR settings
1018 *
1019 * Registers this object to receiving congestion entry/exit callbacks on the
1020 * portal affine to the cpu portal on which this API is executed. If opts is
1021 * NULL then only the callback (cgr->cb) function is registered. If @flags
1022 * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1023 * any unspecified parameters) will be used rather than a modify hw hardware
1024 * (which only modifies the specified parameters).
1025 */
1026int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1027 struct qm_mcc_initcgr *opts);
1028
1029/**
1030 * qman_delete_cgr - Deregisters a congestion group object
1031 * @cgr: the 'cgr' object to deregister
1032 *
1033 * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1034 * is executed. This must be excuted on the same affine portal on which it was
1035 * created.
1036 */
1037int qman_delete_cgr(struct qman_cgr *cgr);
1038
1039/**
1040 * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
1041 * @cgr: the 'cgr' object to deregister
1042 *
1043 * This will select the proper CPU and run there qman_delete_cgr().
1044 */
1045void qman_delete_cgr_safe(struct qman_cgr *cgr);
1046
1047/**
1048 * qman_query_cgr_congested - Queries CGR's congestion status
1049 * @cgr: the 'cgr' object to query
1050 * @result: returns 'cgr's congestion status, 1 (true) if congested
1051 */
1052int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
1053
1054/**
1055 * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1056 * @result: is set by the API to the base CGR ID of the allocated range
1057 * @count: the number of CGR IDs required
1058 *
1059 * Returns 0 on success, or a negative error code.
1060 */
1061int qman_alloc_cgrid_range(u32 *result, u32 count);
1062#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
1063
1064/**
1065 * qman_release_cgrid - Release the specified CGR ID
1066 * @id: the CGR ID to be released back to the resource pool
1067 *
1068 * This function can also be used to seed the allocator with
1069 * CGR ID ranges that it can subsequently allocate from.
1070 * Returns 0 on success, or a negative error code.
1071 */
1072int qman_release_cgrid(u32 id);
1073
1074#endif /* __FSL_QMAN_H */