summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2019-06-17 07:38:06 -0400
committerOlof Johansson <olof@lixom.net>2019-06-17 07:38:06 -0400
commit112603739338346eb2f34f3d3ab94b04731ab83d (patch)
tree0346312de6502fcd2882d5373c1018a4eee15267
parent9e0babf2c06c73cda2c0cd37a1653d823adb40ec (diff)
parent5d1d046e2868fc876a69231eb2f24f000b521f1c (diff)
Merge tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers
NXP/FSL SoC driver updates for v5.3 DPAA2 Console driver - Add driver to export two char devices to dump logs for MC and AIOP DPAA2 DPIO driver - Add support for memory backed QBMan portals - Increase the timeout period to prevent false error - Add APIs to retrieve QBMan portal probing status DPAA Qman driver - Only make liodn fixup on powerpc SoCs with PAMU iommu * tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: soc: fsl: qbman_portals: add APIs to retrieve the probing status soc: fsl: qman: fixup liodns only on ppc targets soc: fsl: dpio: Add support for memory backed QBMan portals bus: mc-bus: Add support for mapping shareable portals soc: fsl: dpio: Increase timeout for QBMan Management Commands soc: fsl: add DPAA2 console support Documentation: DT: Add entry for DPAA2 console soc: fsl: guts: Add definition for LX2160A Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt11
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bus/fsl-mc/dprc.c30
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c15
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h17
-rw-r--r--drivers/soc/fsl/Kconfig10
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/dpaa2-console.c329
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c23
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c148
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h9
-rw-r--r--drivers/soc/fsl/guts.c6
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c20
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c21
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h9
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/qman.h9
18 files changed, 618 insertions, 51 deletions
diff --git a/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
new file mode 100644
index 000000000000..1442ba5d2d98
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
@@ -0,0 +1,11 @@
1DPAA2 console support
2
3Required properties:
4
5 - compatible
6 Value type: <string>
7 Definition: Must be "fsl,dpaa2-console".
8 - reg
9 Value type: <prop-encoded-array>
10 Definition: A standard property. Specifies the region where the MCFBA
11 (MC firmware base address) register can be found.
diff --git a/MAINTAINERS b/MAINTAINERS
index 57f496cff999..70e9c0afbffb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6416,6 +6416,7 @@ M: Li Yang <leoyang.li@nxp.com>
6416L: linuxppc-dev@lists.ozlabs.org 6416L: linuxppc-dev@lists.ozlabs.org
6417L: linux-arm-kernel@lists.infradead.org 6417L: linux-arm-kernel@lists.infradead.org
6418S: Maintained 6418S: Maintained
6419F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
6419F: Documentation/devicetree/bindings/soc/fsl/ 6420F: Documentation/devicetree/bindings/soc/fsl/
6420F: drivers/soc/fsl/ 6421F: drivers/soc/fsl/
6421F: include/linux/fsl/ 6422F: include/linux/fsl/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 1c3f62182266..0fe3f52ae0de 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -443,11 +443,31 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
443 struct fsl_mc_command cmd = { 0 }; 443 struct fsl_mc_command cmd = { 0 };
444 struct dprc_cmd_get_obj_region *cmd_params; 444 struct dprc_cmd_get_obj_region *cmd_params;
445 struct dprc_rsp_get_obj_region *rsp_params; 445 struct dprc_rsp_get_obj_region *rsp_params;
446 u16 major_ver, minor_ver;
446 int err; 447 int err;
447 448
448 /* prepare command */ 449 /* prepare command */
449 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, 450 err = dprc_get_api_version(mc_io, 0,
450 cmd_flags, token); 451 &major_ver,
452 &minor_ver);
453 if (err)
454 return err;
455
456 /**
457 * MC API version 6.3 introduced a new field to the region
458 * descriptor: base_address. If the older API is in use then the base
459 * address is set to zero to indicate it needs to be obtained elsewhere
460 * (typically the device tree).
461 */
462 if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
463 cmd.header =
464 mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
465 cmd_flags, token);
466 else
467 cmd.header =
468 mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
469 cmd_flags, token);
470
451 cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; 471 cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
452 cmd_params->obj_id = cpu_to_le32(obj_id); 472 cmd_params->obj_id = cpu_to_le32(obj_id);
453 cmd_params->region_index = region_index; 473 cmd_params->region_index = region_index;
@@ -461,8 +481,12 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
461 481
462 /* retrieve response parameters */ 482 /* retrieve response parameters */
463 rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; 483 rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
464 region_desc->base_offset = le64_to_cpu(rsp_params->base_addr); 484 region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
465 region_desc->size = le32_to_cpu(rsp_params->size); 485 region_desc->size = le32_to_cpu(rsp_params->size);
486 if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
487 region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
488 else
489 region_desc->base_address = 0;
466 490
467 return 0; 491 return 0;
468} 492}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index f0404c6d1ff4..5c9bf2e06552 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -487,10 +487,19 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
487 "dprc_get_obj_region() failed: %d\n", error); 487 "dprc_get_obj_region() failed: %d\n", error);
488 goto error_cleanup_regions; 488 goto error_cleanup_regions;
489 } 489 }
490 490 /*
491 error = translate_mc_addr(mc_dev, mc_region_type, 491 * Older MC only returned region offset and no base address
492 * If base address is in the region_desc use it otherwise
493 * revert to old mechanism
494 */
495 if (region_desc.base_address)
496 regions[i].start = region_desc.base_address +
497 region_desc.base_offset;
498 else
499 error = translate_mc_addr(mc_dev, mc_region_type,
492 region_desc.base_offset, 500 region_desc.base_offset,
493 &regions[i].start); 501 &regions[i].start);
502
494 if (error < 0) { 503 if (error < 0) {
495 dev_err(parent_dev, 504 dev_err(parent_dev,
496 "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", 505 "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
@@ -504,6 +513,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
504 regions[i].flags = IORESOURCE_IO; 513 regions[i].flags = IORESOURCE_IO;
505 if (region_desc.flags & DPRC_REGION_CACHEABLE) 514 if (region_desc.flags & DPRC_REGION_CACHEABLE)
506 regions[i].flags |= IORESOURCE_CACHEABLE; 515 regions[i].flags |= IORESOURCE_CACHEABLE;
516 if (region_desc.flags & DPRC_REGION_SHAREABLE)
517 regions[i].flags |= IORESOURCE_MEM;
507 } 518 }
508 519
509 mc_dev->regions = regions; 520 mc_dev->regions = regions;
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index ea11b4fe59f7..020fcc04ec8b 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -79,9 +79,11 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
79 79
80/* DPRC command versioning */ 80/* DPRC command versioning */
81#define DPRC_CMD_BASE_VERSION 1 81#define DPRC_CMD_BASE_VERSION 1
82#define DPRC_CMD_2ND_VERSION 2
82#define DPRC_CMD_ID_OFFSET 4 83#define DPRC_CMD_ID_OFFSET 4
83 84
84#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) 85#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
86#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
85 87
86/* DPRC command IDs */ 88/* DPRC command IDs */
87#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) 89#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
@@ -100,6 +102,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
100#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) 102#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
101#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) 103#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
102#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) 104#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
105#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
103#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) 106#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
104 107
105struct dprc_cmd_open { 108struct dprc_cmd_open {
@@ -199,9 +202,16 @@ struct dprc_rsp_get_obj_region {
199 /* response word 0 */ 202 /* response word 0 */
200 __le64 pad; 203 __le64 pad;
201 /* response word 1 */ 204 /* response word 1 */
202 __le64 base_addr; 205 __le64 base_offset;
203 /* response word 2 */ 206 /* response word 2 */
204 __le32 size; 207 __le32 size;
208 __le32 pad2;
209 /* response word 3 */
210 __le32 flags;
211 __le32 pad3;
212 /* response word 4 */
213 /* base_addr may be zero if older MC firmware is used */
214 __le64 base_addr;
205}; 215};
206 216
207struct dprc_cmd_set_obj_irq { 217struct dprc_cmd_set_obj_irq {
@@ -334,6 +344,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
334/* Region flags */ 344/* Region flags */
335/* Cacheable - Indicates that region should be mapped as cacheable */ 345/* Cacheable - Indicates that region should be mapped as cacheable */
336#define DPRC_REGION_CACHEABLE 0x00000001 346#define DPRC_REGION_CACHEABLE 0x00000001
347#define DPRC_REGION_SHAREABLE 0x00000002
337 348
338/** 349/**
339 * enum dprc_region_type - Region type 350 * enum dprc_region_type - Region type
@@ -342,7 +353,8 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
342 */ 353 */
343enum dprc_region_type { 354enum dprc_region_type {
344 DPRC_REGION_TYPE_MC_PORTAL, 355 DPRC_REGION_TYPE_MC_PORTAL,
345 DPRC_REGION_TYPE_QBMAN_PORTAL 356 DPRC_REGION_TYPE_QBMAN_PORTAL,
357 DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
346}; 358};
347 359
348/** 360/**
@@ -360,6 +372,7 @@ struct dprc_region_desc {
360 u32 size; 372 u32 size;
361 u32 flags; 373 u32 flags;
362 enum dprc_region_type type; 374 enum dprc_region_type type;
375 u64 base_address;
363}; 376};
364 377
365int dprc_get_obj_region(struct fsl_mc_io *mc_io, 378int dprc_get_obj_region(struct fsl_mc_io *mc_io,
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 217f7752cf2c..f9ad8ad54a7d 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -30,4 +30,14 @@ config FSL_MC_DPIO
30 other DPAA2 objects. This driver does not expose the DPIO 30 other DPAA2 objects. This driver does not expose the DPIO
31 objects individually, but groups them under a service layer 31 objects individually, but groups them under a service layer
32 API. 32 API.
33
34config DPAA2_CONSOLE
35 tristate "QorIQ DPAA2 console driver"
36 depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
37 default y
38 help
39 Console driver for DPAA2 platforms. Exports 2 char devices,
40 /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
41 which can be used to dump the Management Complex and AIOP
42 firmware logs.
33endmenu 43endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 158541a83d26..71dee8d0d1f0 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/
8obj-$(CONFIG_CPM) += qe/ 8obj-$(CONFIG_CPM) += qe/
9obj-$(CONFIG_FSL_GUTS) += guts.o 9obj-$(CONFIG_FSL_GUTS) += guts.o
10obj-$(CONFIG_FSL_MC_DPIO) += dpio/ 10obj-$(CONFIG_FSL_MC_DPIO) += dpio/
11obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 000000000000..9168d8ddc932
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,329 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Freescale DPAA2 Platforms Console Driver
4 *
5 * Copyright 2015-2016 Freescale Semiconductor Inc.
6 * Copyright 2018 NXP
7 */
8
9#define pr_fmt(fmt) "dpaa2-console: " fmt
10
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_address.h>
14#include <linux/miscdevice.h>
15#include <linux/uaccess.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/io.h>
19
20/* MC firmware base low/high registers indexes */
21#define MCFBALR_OFFSET 0
22#define MCFBAHR_OFFSET 1
23
24/* Bit masks used to get the most/least significant part of the MC base addr */
25#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
26#define MC_FW_ADDR_MASK_LOW 0xE0000000
27
28#define MC_BUFFER_OFFSET 0x01000000
29#define MC_BUFFER_SIZE (1024 * 1024 * 16)
30#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
31
32#define AIOP_BUFFER_OFFSET 0x06000000
33#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
34#define AIOP_OFFSET_DELTA 0
35
36#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
37#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
38
39/* MC and AIOP Magic words */
40#define MAGIC_MC 0x4d430100
41#define MAGIC_AIOP 0x41494F50
42
43struct log_header {
44 __le32 magic_word;
45 char reserved[4];
46 __le32 buf_start;
47 __le32 buf_length;
48 __le32 last_byte;
49};
50
51struct console_data {
52 void __iomem *map_addr;
53 struct log_header __iomem *hdr;
54 void __iomem *start_addr;
55 void __iomem *end_addr;
56 void __iomem *end_of_data;
57 void __iomem *cur_ptr;
58};
59
60static struct resource mc_base_addr;
61
62static inline void adjust_end(struct console_data *cd)
63{
64 u32 last_byte = readl(&cd->hdr->last_byte);
65
66 cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
67}
68
69static u64 get_mc_fw_base_address(void)
70{
71 u64 mcfwbase = 0ULL;
72 u32 __iomem *mcfbaregs;
73
74 mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
75 if (!mcfbaregs) {
76 pr_err("could not map MC Firmaware Base registers\n");
77 return 0;
78 }
79
80 mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
81 MC_FW_ADDR_MASK_HIGH;
82 mcfwbase <<= 32;
83 mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
84 iounmap(mcfbaregs);
85
86 pr_debug("MC base address at 0x%016llx\n", mcfwbase);
87 return mcfwbase;
88}
89
90static ssize_t dpaa2_console_size(struct console_data *cd)
91{
92 ssize_t size;
93
94 if (cd->cur_ptr <= cd->end_of_data)
95 size = cd->end_of_data - cd->cur_ptr;
96 else
97 size = (cd->end_addr - cd->cur_ptr) +
98 (cd->end_of_data - cd->start_addr);
99
100 return size;
101}
102
103static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
104 u64 offset, u64 size,
105 u32 expected_magic,
106 u32 offset_delta)
107{
108 u32 read_magic, wrapped, last_byte, buf_start, buf_length;
109 struct console_data *cd;
110 u64 base_addr;
111 int err;
112
113 cd = kmalloc(sizeof(*cd), GFP_KERNEL);
114 if (!cd)
115 return -ENOMEM;
116
117 base_addr = get_mc_fw_base_address();
118 if (!base_addr) {
119 err = -EIO;
120 goto err_fwba;
121 }
122
123 cd->map_addr = ioremap(base_addr + offset, size);
124 if (!cd->map_addr) {
125 pr_err("cannot map console log memory\n");
126 err = -EIO;
127 goto err_ioremap;
128 }
129
130 cd->hdr = (struct log_header __iomem *)cd->map_addr;
131 read_magic = readl(&cd->hdr->magic_word);
132 last_byte = readl(&cd->hdr->last_byte);
133 buf_start = readl(&cd->hdr->buf_start);
134 buf_length = readl(&cd->hdr->buf_length);
135
136 if (read_magic != expected_magic) {
137 pr_warn("expected = %08x, read = %08x\n",
138 expected_magic, read_magic);
139 err = -EIO;
140 goto err_magic;
141 }
142
143 cd->start_addr = cd->map_addr + buf_start - offset_delta;
144 cd->end_addr = cd->start_addr + buf_length;
145
146 wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
147
148 adjust_end(cd);
149 if (wrapped && cd->end_of_data != cd->end_addr)
150 cd->cur_ptr = cd->end_of_data + 1;
151 else
152 cd->cur_ptr = cd->start_addr;
153
154 fp->private_data = cd;
155
156 return 0;
157
158err_magic:
159 iounmap(cd->map_addr);
160
161err_ioremap:
162err_fwba:
163 kfree(cd);
164
165 return err;
166}
167
168static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
169{
170 return dpaa2_generic_console_open(node, fp,
171 MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
172 MAGIC_MC, MC_OFFSET_DELTA);
173}
174
175static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
176{
177 return dpaa2_generic_console_open(node, fp,
178 AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
179 MAGIC_AIOP, AIOP_OFFSET_DELTA);
180}
181
182static int dpaa2_console_close(struct inode *node, struct file *fp)
183{
184 struct console_data *cd = fp->private_data;
185
186 iounmap(cd->map_addr);
187 kfree(cd);
188 return 0;
189}
190
191static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
192 size_t count, loff_t *f_pos)
193{
194 struct console_data *cd = fp->private_data;
195 size_t bytes = dpaa2_console_size(cd);
196 size_t bytes_end = cd->end_addr - cd->cur_ptr;
197 size_t written = 0;
198 void *kbuf;
199 int err;
200
201 /* Check if we need to adjust the end of data addr */
202 adjust_end(cd);
203
204 if (cd->end_of_data == cd->cur_ptr)
205 return 0;
206
207 if (count < bytes)
208 bytes = count;
209
210 kbuf = kmalloc(bytes, GFP_KERNEL);
211 if (!kbuf)
212 return -ENOMEM;
213
214 if (bytes > bytes_end) {
215 memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
216 if (copy_to_user(buf, kbuf, bytes_end)) {
217 err = -EFAULT;
218 goto err_free_buf;
219 }
220 buf += bytes_end;
221 cd->cur_ptr = cd->start_addr;
222 bytes -= bytes_end;
223 written += bytes_end;
224 }
225
226 memcpy_fromio(kbuf, cd->cur_ptr, bytes);
227 if (copy_to_user(buf, kbuf, bytes)) {
228 err = -EFAULT;
229 goto err_free_buf;
230 }
231 cd->cur_ptr += bytes;
232 written += bytes;
233
234 return written;
235
236err_free_buf:
237 kfree(kbuf);
238
239 return err;
240}
241
242static const struct file_operations dpaa2_mc_console_fops = {
243 .owner = THIS_MODULE,
244 .open = dpaa2_mc_console_open,
245 .release = dpaa2_console_close,
246 .read = dpaa2_console_read,
247};
248
249static struct miscdevice dpaa2_mc_console_dev = {
250 .minor = MISC_DYNAMIC_MINOR,
251 .name = "dpaa2_mc_console",
252 .fops = &dpaa2_mc_console_fops
253};
254
255static const struct file_operations dpaa2_aiop_console_fops = {
256 .owner = THIS_MODULE,
257 .open = dpaa2_aiop_console_open,
258 .release = dpaa2_console_close,
259 .read = dpaa2_console_read,
260};
261
262static struct miscdevice dpaa2_aiop_console_dev = {
263 .minor = MISC_DYNAMIC_MINOR,
264 .name = "dpaa2_aiop_console",
265 .fops = &dpaa2_aiop_console_fops
266};
267
268static int dpaa2_console_probe(struct platform_device *pdev)
269{
270 int error;
271
272 error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
273 if (error < 0) {
274 pr_err("of_address_to_resource() failed for %pOF with %d\n",
275 pdev->dev.of_node, error);
276 return error;
277 }
278
279 error = misc_register(&dpaa2_mc_console_dev);
280 if (error) {
281 pr_err("cannot register device %s\n",
282 dpaa2_mc_console_dev.name);
283 goto err_register_mc;
284 }
285
286 error = misc_register(&dpaa2_aiop_console_dev);
287 if (error) {
288 pr_err("cannot register device %s\n",
289 dpaa2_aiop_console_dev.name);
290 goto err_register_aiop;
291 }
292
293 return 0;
294
295err_register_aiop:
296 misc_deregister(&dpaa2_mc_console_dev);
297err_register_mc:
298 return error;
299}
300
301static int dpaa2_console_remove(struct platform_device *pdev)
302{
303 misc_deregister(&dpaa2_mc_console_dev);
304 misc_deregister(&dpaa2_aiop_console_dev);
305
306 return 0;
307}
308
309static const struct of_device_id dpaa2_console_match_table[] = {
310 { .compatible = "fsl,dpaa2-console",},
311 {},
312};
313
314MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
315
316static struct platform_driver dpaa2_console_driver = {
317 .driver = {
318 .name = "dpaa2-console",
319 .pm = NULL,
320 .of_match_table = dpaa2_console_match_table,
321 },
322 .probe = dpaa2_console_probe,
323 .remove = dpaa2_console_remove,
324};
325module_platform_driver(dpaa2_console_driver);
326
327MODULE_LICENSE("Dual BSD/GPL");
328MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
329MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index c0cdc8946031..70014ecce2a7 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
197 desc.cpu); 197 desc.cpu);
198 } 198 }
199 199
200 /* 200 if (dpio_dev->obj_desc.region_count < 3) {
201 * Set the CENA regs to be the cache inhibited area of the portal to 201 /* No support for DDR backed portals, use classic mapping */
202 * avoid coherency issues if a user migrates to another core. 202 /*
203 */ 203 * Set the CENA regs to be the cache inhibited area of the
204 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, 204 * portal to avoid coherency issues if a user migrates to
205 resource_size(&dpio_dev->regions[1]), 205 * another core.
206 MEMREMAP_WC); 206 */
207 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
208 resource_size(&dpio_dev->regions[1]),
209 MEMREMAP_WC);
210 } else {
211 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
212 resource_size(&dpio_dev->regions[2]),
213 MEMREMAP_WB);
214 }
215
207 if (IS_ERR(desc.regs_cena)) { 216 if (IS_ERR(desc.regs_cena)) {
208 dev_err(dev, "devm_memremap failed\n"); 217 dev_err(dev, "devm_memremap failed\n");
209 err = PTR_ERR(desc.regs_cena); 218 err = PTR_ERR(desc.regs_cena);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index d02013556a1b..c66f5b73777c 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -15,6 +15,8 @@
15#define QMAN_REV_4000 0x04000000 15#define QMAN_REV_4000 0x04000000
16#define QMAN_REV_4100 0x04010000 16#define QMAN_REV_4100 0x04010000
17#define QMAN_REV_4101 0x04010001 17#define QMAN_REV_4101 0x04010001
18#define QMAN_REV_5000 0x05000000
19
18#define QMAN_REV_MASK 0xffff0000 20#define QMAN_REV_MASK 0xffff0000
19 21
20/* All QBMan command and result structures use this "valid bit" encoding */ 22/* All QBMan command and result structures use this "valid bit" encoding */
@@ -25,10 +27,17 @@
25#define QBMAN_WQCHAN_CONFIGURE 0x46 27#define QBMAN_WQCHAN_CONFIGURE 0x46
26 28
27/* CINH register offsets */ 29/* CINH register offsets */
30#define QBMAN_CINH_SWP_EQCR_PI 0x800
28#define QBMAN_CINH_SWP_EQAR 0x8c0 31#define QBMAN_CINH_SWP_EQAR 0x8c0
32#define QBMAN_CINH_SWP_CR_RT 0x900
33#define QBMAN_CINH_SWP_VDQCR_RT 0x940
34#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
35#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
29#define QBMAN_CINH_SWP_DQPI 0xa00 36#define QBMAN_CINH_SWP_DQPI 0xa00
30#define QBMAN_CINH_SWP_DCAP 0xac0 37#define QBMAN_CINH_SWP_DCAP 0xac0
31#define QBMAN_CINH_SWP_SDQCR 0xb00 38#define QBMAN_CINH_SWP_SDQCR 0xb00
39#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
40#define QBMAN_CINH_SWP_RCR_PI 0xc00
32#define QBMAN_CINH_SWP_RAR 0xcc0 41#define QBMAN_CINH_SWP_RAR 0xcc0
33#define QBMAN_CINH_SWP_ISR 0xe00 42#define QBMAN_CINH_SWP_ISR 0xe00
34#define QBMAN_CINH_SWP_IER 0xe40 43#define QBMAN_CINH_SWP_IER 0xe40
@@ -43,6 +52,13 @@
43#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 52#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
44#define QBMAN_CENA_SWP_VDQCR 0x780 53#define QBMAN_CENA_SWP_VDQCR 0x780
45 54
55/* CENA register offsets in memory-backed mode */
56#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
57#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
58#define QBMAN_CENA_SWP_CR_MEM 0x1600
59#define QBMAN_CENA_SWP_RR_MEM 0x1680
60#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
61
46/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 62/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
47#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) 63#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
48 64
@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
96 112
97#define SWP_CFG_DQRR_MF_SHIFT 20 113#define SWP_CFG_DQRR_MF_SHIFT 20
98#define SWP_CFG_EST_SHIFT 16 114#define SWP_CFG_EST_SHIFT 16
115#define SWP_CFG_CPBS_SHIFT 15
99#define SWP_CFG_WN_SHIFT 14 116#define SWP_CFG_WN_SHIFT 14
100#define SWP_CFG_RPM_SHIFT 12 117#define SWP_CFG_RPM_SHIFT 12
101#define SWP_CFG_DCM_SHIFT 10 118#define SWP_CFG_DCM_SHIFT 10
102#define SWP_CFG_EPM_SHIFT 8 119#define SWP_CFG_EPM_SHIFT 8
120#define SWP_CFG_VPM_SHIFT 7
121#define SWP_CFG_CPM_SHIFT 6
103#define SWP_CFG_SD_SHIFT 5 122#define SWP_CFG_SD_SHIFT 5
104#define SWP_CFG_SP_SHIFT 4 123#define SWP_CFG_SP_SHIFT 4
105#define SWP_CFG_SE_SHIFT 3 124#define SWP_CFG_SE_SHIFT 3
@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
125 ep << SWP_CFG_EP_SHIFT); 144 ep << SWP_CFG_EP_SHIFT);
126} 145}
127 146
147#define QMAN_RT_MODE 0x00000100
148
128/** 149/**
129 * qbman_swp_init() - Create a functional object representing the given 150 * qbman_swp_init() - Create a functional object representing the given
130 * QBMan portal descriptor. 151 * QBMan portal descriptor.
@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
146 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; 167 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
147 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; 168 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
148 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; 169 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
170 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
171 p->mr.valid_bit = QB_VALID_BIT;
149 172
150 atomic_set(&p->vdq.available, 1); 173 atomic_set(&p->vdq.available, 1);
151 p->vdq.valid_bit = QB_VALID_BIT; 174 p->vdq.valid_bit = QB_VALID_BIT;
@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
163 p->addr_cena = d->cena_bar; 186 p->addr_cena = d->cena_bar;
164 p->addr_cinh = d->cinh_bar; 187 p->addr_cinh = d->cinh_bar;
165 188
189 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
190 memset(p->addr_cena, 0, 64 * 1024);
191
166 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 192 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
167 1, /* Writes Non-cacheable */ 193 1, /* Writes Non-cacheable */
168 0, /* EQCR_CI stashing threshold */ 194 0, /* EQCR_CI stashing threshold */
@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
175 1, /* dequeue stashing priority == TRUE */ 201 1, /* dequeue stashing priority == TRUE */
176 0, /* dequeue stashing enable == FALSE */ 202 0, /* dequeue stashing enable == FALSE */
177 0); /* EQCR_CI stashing priority == FALSE */ 203 0); /* EQCR_CI stashing priority == FALSE */
204 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
205 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
206 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
207 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
178 208
179 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 209 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
180 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); 210 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
184 return NULL; 214 return NULL;
185 } 215 }
186 216
217 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
218 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
219 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
220 }
187 /* 221 /*
188 * SDQCR needs to be initialized to 0 when no channels are 222 * SDQCR needs to be initialized to 0 when no channels are
189 * being dequeued from or else the QMan HW will indicate an 223 * being dequeued from or else the QMan HW will indicate an
@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
278 */ 312 */
279void *qbman_swp_mc_start(struct qbman_swp *p) 313void *qbman_swp_mc_start(struct qbman_swp *p)
280{ 314{
281 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 315 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
316 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
317 else
318 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
282} 319}
283 320
284/* 321/*
@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
289{ 326{
290 u8 *v = cmd; 327 u8 *v = cmd;
291 328
292 dma_wmb(); 329 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
293 *v = cmd_verb | p->mc.valid_bit; 330 dma_wmb();
331 *v = cmd_verb | p->mc.valid_bit;
332 } else {
333 *v = cmd_verb | p->mc.valid_bit;
334 dma_wmb();
335 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
336 }
294} 337}
295 338
296/* 339/*
@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
301{ 344{
302 u32 *ret, verb; 345 u32 *ret, verb;
303 346
304 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 347 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
348 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
349 /* Remove the valid-bit - command completed if the rest
350 * is non-zero.
351 */
352 verb = ret[0] & ~QB_VALID_BIT;
353 if (!verb)
354 return NULL;
355 p->mc.valid_bit ^= QB_VALID_BIT;
356 } else {
357 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
358 /* Command completed if the valid bit is toggled */
359 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
360 return NULL;
361 /* Command completed if the rest is non-zero */
362 verb = ret[0] & ~QB_VALID_BIT;
363 if (!verb)
364 return NULL;
365 p->mr.valid_bit ^= QB_VALID_BIT;
366 }
305 367
306 /* Remove the valid-bit - command completed if the rest is non-zero */
307 verb = ret[0] & ~QB_VALID_BIT;
308 if (!verb)
309 return NULL;
310 p->mc.valid_bit ^= QB_VALID_BIT;
311 return ret; 368 return ret;
312} 369}
313 370
@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
384#define EQAR_VB(eqar) ((eqar) & 0x80) 441#define EQAR_VB(eqar) ((eqar) & 0x80)
385#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 442#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
386 443
444static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
445 u8 idx)
446{
447 if (idx < 16)
448 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
449 QMAN_RT_MODE);
450 else
451 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
452 (idx - 16) * 4,
453 QMAN_RT_MODE);
454}
455
387/** 456/**
388 * qbman_swp_enqueue() - Issue an enqueue command 457 * qbman_swp_enqueue() - Issue an enqueue command
389 * @s: the software portal used for enqueue 458 * @s: the software portal used for enqueue
@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
408 memcpy(&p->dca, &d->dca, 31); 477 memcpy(&p->dca, &d->dca, 31);
409 memcpy(&p->fd, fd, sizeof(*fd)); 478 memcpy(&p->fd, fd, sizeof(*fd));
410 479
411 /* Set the verb byte, have to substitute in the valid-bit */ 480 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
412 dma_wmb(); 481 /* Set the verb byte, have to substitute in the valid-bit */
413 p->verb = d->verb | EQAR_VB(eqar); 482 dma_wmb();
483 p->verb = d->verb | EQAR_VB(eqar);
484 } else {
485 p->verb = d->verb | EQAR_VB(eqar);
486 dma_wmb();
487 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
488 }
414 489
415 return 0; 490 return 0;
416} 491}
@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
587 return -EBUSY; 662 return -EBUSY;
588 } 663 }
589 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 664 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
590 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 665 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
666 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
667 else
668 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
591 p->numf = d->numf; 669 p->numf = d->numf;
592 p->tok = QMAN_DQ_TOKEN_VALID; 670 p->tok = QMAN_DQ_TOKEN_VALID;
593 p->dq_src = d->dq_src; 671 p->dq_src = d->dq_src;
594 p->rsp_addr = d->rsp_addr; 672 p->rsp_addr = d->rsp_addr;
595 p->rsp_addr_virt = d->rsp_addr_virt; 673 p->rsp_addr_virt = d->rsp_addr_virt;
596 dma_wmb();
597 674
598 /* Set the verb byte, have to substitute in the valid-bit */ 675 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
599 p->verb = d->verb | s->vdq.valid_bit; 676 dma_wmb();
600 s->vdq.valid_bit ^= QB_VALID_BIT; 677 /* Set the verb byte, have to substitute in the valid-bit */
678 p->verb = d->verb | s->vdq.valid_bit;
679 s->vdq.valid_bit ^= QB_VALID_BIT;
680 } else {
681 p->verb = d->verb | s->vdq.valid_bit;
682 s->vdq.valid_bit ^= QB_VALID_BIT;
683 dma_wmb();
684 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
685 }
601 686
602 return 0; 687 return 0;
603} 688}
@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
655 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 740 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
656 } 741 }
657 742
658 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 743 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
744 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
745 else
746 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
659 verb = p->dq.verb; 747 verb = p->dq.verb;
660 748
661 /* 749 /*
@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
807 return -EBUSY; 895 return -EBUSY;
808 896
809 /* Start the release command */ 897 /* Start the release command */
810 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 898 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
899 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
900 else
901 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
811 /* Copy the caller's buffer pointers to the command */ 902 /* Copy the caller's buffer pointers to the command */
812 for (i = 0; i < num_buffers; i++) 903 for (i = 0; i < num_buffers; i++)
813 p->buf[i] = cpu_to_le64(buffers[i]); 904 p->buf[i] = cpu_to_le64(buffers[i]);
814 p->bpid = d->bpid; 905 p->bpid = d->bpid;
815 906
816 /* 907 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
817 * Set the verb byte, have to substitute in the valid-bit and the number 908 /*
818 * of buffers. 909 * Set the verb byte, have to substitute in the valid-bit
819 */ 910 * and the number of buffers.
820 dma_wmb(); 911 */
821 p->verb = d->verb | RAR_VB(rar) | num_buffers; 912 dma_wmb();
913 p->verb = d->verb | RAR_VB(rar) | num_buffers;
914 } else {
915 p->verb = d->verb | RAR_VB(rar) | num_buffers;
916 dma_wmb();
917 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
918 RAR_IDX(rar) * 4, QMAN_RT_MODE);
919 }
822 920
823 return 0; 921 return 0;
824} 922}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index fa35fc1afeaa..f3ec5d2044fb 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -1,7 +1,7 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/* 2/*
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016 NXP 4 * Copyright 2016-2019 NXP
5 * 5 *
6 */ 6 */
7#ifndef __FSL_QBMAN_PORTAL_H 7#ifndef __FSL_QBMAN_PORTAL_H
@@ -110,6 +110,11 @@ struct qbman_swp {
110 u32 valid_bit; /* 0x00 or 0x80 */ 110 u32 valid_bit; /* 0x00 or 0x80 */
111 } mc; 111 } mc;
112 112
113 /* Management response */
114 struct {
115 u32 valid_bit; /* 0x00 or 0x80 */
116 } mr;
117
113 /* Push dequeues */ 118 /* Push dequeues */
114 u32 sdq; 119 u32 sdq;
115 120
@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
428static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, 433static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
429 u8 cmd_verb) 434 u8 cmd_verb)
430{ 435{
431 int loopvar = 1000; 436 int loopvar = 2000;
432 437
433 qbman_swp_mc_submit(swp, cmd, cmd_verb); 438 qbman_swp_mc_submit(swp, cmd, cmd_verb);
434 439
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 78607da7320e..1ef8068c8dd3 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
97 .svr = 0x87000000, 97 .svr = 0x87000000,
98 .mask = 0xfff70000, 98 .mask = 0xfff70000,
99 }, 99 },
100 /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
101 { .die = "LX2160A",
102 .svr = 0x87360000,
103 .mask = 0xff3f0000,
104 },
100 { }, 105 { },
101}; 106};
102 107
@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
218 { .compatible = "fsl,ls1088a-dcfg", }, 223 { .compatible = "fsl,ls1088a-dcfg", },
219 { .compatible = "fsl,ls1012a-dcfg", }, 224 { .compatible = "fsl,ls1012a-dcfg", },
220 { .compatible = "fsl,ls1046a-dcfg", }, 225 { .compatible = "fsl,ls1046a-dcfg", },
226 { .compatible = "fsl,lx2160a-dcfg", },
221 {} 227 {}
222}; 228};
223MODULE_DEVICE_TABLE(of, fsl_guts_of_match); 229MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 2c95cf59f3e7..cf4f10d6f590 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -32,6 +32,7 @@
32 32
33static struct bman_portal *affine_bportals[NR_CPUS]; 33static struct bman_portal *affine_bportals[NR_CPUS];
34static struct cpumask portal_cpus; 34static struct cpumask portal_cpus;
35static int __bman_portals_probed;
35/* protect bman global registers and global data shared among portals */ 36/* protect bman global registers and global data shared among portals */
36static DEFINE_SPINLOCK(bman_lock); 37static DEFINE_SPINLOCK(bman_lock);
37 38
@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu)
87 return 0; 88 return 0;
88} 89}
89 90
91int bman_portals_probed(void)
92{
93 return __bman_portals_probed;
94}
95EXPORT_SYMBOL_GPL(bman_portals_probed);
96
90static int bman_portal_probe(struct platform_device *pdev) 97static int bman_portal_probe(struct platform_device *pdev)
91{ 98{
92 struct device *dev = &pdev->dev; 99 struct device *dev = &pdev->dev;
@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev)
104 } 111 }
105 112
106 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 113 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
107 if (!pcfg) 114 if (!pcfg) {
115 __bman_portals_probed = -1;
108 return -ENOMEM; 116 return -ENOMEM;
117 }
109 118
110 pcfg->dev = dev; 119 pcfg->dev = dev;
111 120
@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev)
113 DPAA_PORTAL_CE); 122 DPAA_PORTAL_CE);
114 if (!addr_phys[0]) { 123 if (!addr_phys[0]) {
115 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 124 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
116 return -ENXIO; 125 goto err_ioremap1;
117 } 126 }
118 127
119 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 128 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
120 DPAA_PORTAL_CI); 129 DPAA_PORTAL_CI);
121 if (!addr_phys[1]) { 130 if (!addr_phys[1]) {
122 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 131 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
123 return -ENXIO; 132 goto err_ioremap1;
124 } 133 }
125 134
126 pcfg->cpu = -1; 135 pcfg->cpu = -1;
@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev)
128 irq = platform_get_irq(pdev, 0); 137 irq = platform_get_irq(pdev, 0);
129 if (irq <= 0) { 138 if (irq <= 0) {
130 dev_err(dev, "Can't get %pOF IRQ'\n", node); 139 dev_err(dev, "Can't get %pOF IRQ'\n", node);
131 return -ENXIO; 140 goto err_ioremap1;
132 } 141 }
133 pcfg->irq = irq; 142 pcfg->irq = irq;
134 143
@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev)
150 spin_lock(&bman_lock); 159 spin_lock(&bman_lock);
151 cpu = cpumask_next_zero(-1, &portal_cpus); 160 cpu = cpumask_next_zero(-1, &portal_cpus);
152 if (cpu >= nr_cpu_ids) { 161 if (cpu >= nr_cpu_ids) {
162 __bman_portals_probed = 1;
153 /* unassigned portal, skip init */ 163 /* unassigned portal, skip init */
154 spin_unlock(&bman_lock); 164 spin_unlock(&bman_lock);
155 return 0; 165 return 0;
@@ -175,6 +185,8 @@ err_portal_init:
175err_ioremap2: 185err_ioremap2:
176 memunmap(pcfg->addr_virt_ce); 186 memunmap(pcfg->addr_virt_ce);
177err_ioremap1: 187err_ioremap1:
188 __bman_portals_probed = -1;
189
178 return -ENXIO; 190 return -ENXIO;
179} 191}
180 192
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 109b38de3176..a6bb43007d03 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev)
596} 596}
597 597
598#define LIO_CFG_LIODN_MASK 0x0fff0000 598#define LIO_CFG_LIODN_MASK 0x0fff0000
599void qman_liodn_fixup(u16 channel) 599void __qman_liodn_fixup(u16 channel)
600{ 600{
601 static int done; 601 static int done;
602 static u32 liodn_offset; 602 static u32 liodn_offset;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 661c9b234d32..e2186b681d87 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal);
38#define CONFIG_FSL_DPA_PIRQ_FAST 1 38#define CONFIG_FSL_DPA_PIRQ_FAST 1
39 39
40static struct cpumask portal_cpus; 40static struct cpumask portal_cpus;
41static int __qman_portals_probed;
41/* protect qman global registers and global data shared among portals */ 42/* protect qman global registers and global data shared among portals */
42static DEFINE_SPINLOCK(qman_lock); 43static DEFINE_SPINLOCK(qman_lock);
43 44
@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu)
220 return 0; 221 return 0;
221} 222}
222 223
224int qman_portals_probed(void)
225{
226 return __qman_portals_probed;
227}
228EXPORT_SYMBOL_GPL(qman_portals_probed);
229
223static int qman_portal_probe(struct platform_device *pdev) 230static int qman_portal_probe(struct platform_device *pdev)
224{ 231{
225 struct device *dev = &pdev->dev; 232 struct device *dev = &pdev->dev;
@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev)
238 } 245 }
239 246
240 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 247 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
241 if (!pcfg) 248 if (!pcfg) {
249 __qman_portals_probed = -1;
242 return -ENOMEM; 250 return -ENOMEM;
251 }
243 252
244 pcfg->dev = dev; 253 pcfg->dev = dev;
245 254
@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev)
247 DPAA_PORTAL_CE); 256 DPAA_PORTAL_CE);
248 if (!addr_phys[0]) { 257 if (!addr_phys[0]) {
249 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 258 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
250 return -ENXIO; 259 goto err_ioremap1;
251 } 260 }
252 261
253 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 262 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
254 DPAA_PORTAL_CI); 263 DPAA_PORTAL_CI);
255 if (!addr_phys[1]) { 264 if (!addr_phys[1]) {
256 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 265 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
257 return -ENXIO; 266 goto err_ioremap1;
258 } 267 }
259 268
260 err = of_property_read_u32(node, "cell-index", &val); 269 err = of_property_read_u32(node, "cell-index", &val);
261 if (err) { 270 if (err) {
262 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node); 271 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
272 __qman_portals_probed = -1;
263 return err; 273 return err;
264 } 274 }
265 pcfg->channel = val; 275 pcfg->channel = val;
@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev)
267 irq = platform_get_irq(pdev, 0); 277 irq = platform_get_irq(pdev, 0);
268 if (irq <= 0) { 278 if (irq <= 0) {
269 dev_err(dev, "Can't get %pOF IRQ\n", node); 279 dev_err(dev, "Can't get %pOF IRQ\n", node);
270 return -ENXIO; 280 goto err_ioremap1;
271 } 281 }
272 pcfg->irq = irq; 282 pcfg->irq = irq;
273 283
@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev)
291 spin_lock(&qman_lock); 301 spin_lock(&qman_lock);
292 cpu = cpumask_next_zero(-1, &portal_cpus); 302 cpu = cpumask_next_zero(-1, &portal_cpus);
293 if (cpu >= nr_cpu_ids) { 303 if (cpu >= nr_cpu_ids) {
304 __qman_portals_probed = 1;
294 /* unassigned portal, skip init */ 305 /* unassigned portal, skip init */
295 spin_unlock(&qman_lock); 306 spin_unlock(&qman_lock);
296 return 0; 307 return 0;
@@ -321,6 +332,8 @@ err_portal_init:
321err_ioremap2: 332err_ioremap2:
322 memunmap(pcfg->addr_virt_ce); 333 memunmap(pcfg->addr_virt_ce);
323err_ioremap1: 334err_ioremap1:
335 __qman_portals_probed = -1;
336
324 return -ENXIO; 337 return -ENXIO;
325} 338}
326 339
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 75a8f905f8f7..04515718cfd9 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
193u32 qm_get_pools_sdqcr(void); 193u32 qm_get_pools_sdqcr(void);
194 194
195int qman_wq_alloc(void); 195int qman_wq_alloc(void);
196void qman_liodn_fixup(u16 channel); 196#ifdef CONFIG_FSL_PAMU
197#define qman_liodn_fixup __qman_liodn_fixup
198#else
199static inline void qman_liodn_fixup(u16 channel)
200{
201}
202#endif
203void __qman_liodn_fixup(u16 channel);
197void qman_set_sdest(u16 channel, unsigned int cpu_idx); 204void qman_set_sdest(u16 channel, unsigned int cpu_idx);
198 205
199struct qman_portal *qman_create_affine_portal( 206struct qman_portal *qman_create_affine_portal(
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index 5b99cb2ea5ef..173e4049d963 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
133 * failed to probe or 0 if the bman driver did not probed yet. 133 * failed to probe or 0 if the bman driver did not probed yet.
134 */ 134 */
135int bman_is_probed(void); 135int bman_is_probed(void);
136/**
137 * bman_portals_probed - Check if all cpu bound bman portals are probed
138 *
139 * Returns 1 if all the required cpu bound bman portals successfully probed,
140 * -1 if probe errors appeared or 0 if the bman portals did not yet finished
141 * probing.
142 */
143int bman_portals_probed(void);
136 144
137#endif /* __FSL_BMAN_H */ 145#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 5cc7af06c1ba..aa31c05a103a 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1195,6 +1195,15 @@ int qman_release_cgrid(u32 id);
1195int qman_is_probed(void); 1195int qman_is_probed(void);
1196 1196
1197/** 1197/**
1198 * qman_portals_probed - Check if all cpu bound qman portals are probed
1199 *
1200 * Returns 1 if all the required cpu bound qman portals successfully probed,
1201 * -1 if probe errors appeared or 0 if the qman portals did not yet finished
1202 * probing.
1203 */
1204int qman_portals_probed(void);
1205
1206/**
1198 * qman_dqrr_get_ithresh - Get coalesce interrupt threshold 1207 * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
1199 * @portal: portal to get the value for 1208 * @portal: portal to get the value for
1200 * @ithresh: threshold pointer 1209 * @ithresh: threshold pointer