diff options
-rw-r--r-- | arch/arm/mach-ixp4xx/Kconfig | 14 | ||||
-rw-r--r-- | arch/arm/mach-ixp4xx/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mach-ixp4xx/ixp4xx_npe.c | 741 | ||||
-rw-r--r-- | arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | 274 | ||||
-rw-r--r-- | include/asm-arm/arch-ixp4xx/npe.h | 39 | ||||
-rw-r--r-- | include/asm-arm/arch-ixp4xx/qmgr.h | 126 |
6 files changed, 1196 insertions, 0 deletions
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index 61b2dfcb89d6..e774447c0592 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig | |||
@@ -189,6 +189,20 @@ config IXP4XX_INDIRECT_PCI | |||
189 | need to use the indirect method instead. If you don't know | 189 | need to use the indirect method instead. If you don't know |
190 | what you need, leave this option unselected. | 190 | what you need, leave this option unselected. |
191 | 191 | ||
192 | config IXP4XX_QMGR | ||
193 | tristate "IXP4xx Queue Manager support" | ||
194 | help | ||
195 | This driver supports IXP4xx built-in hardware queue manager | ||
196 | and is automatically selected by Ethernet and HSS drivers. | ||
197 | |||
198 | config IXP4XX_NPE | ||
199 | tristate "IXP4xx Network Processor Engine support" | ||
200 | select HOTPLUG | ||
201 | select FW_LOADER | ||
202 | help | ||
203 | This driver supports IXP4xx built-in network coprocessors | ||
204 | and is automatically selected by Ethernet and HSS drivers. | ||
205 | |||
192 | endmenu | 206 | endmenu |
193 | 207 | ||
194 | endif | 208 | endif |
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile index 77e00ade5585..4bb97e13f957 100644 --- a/arch/arm/mach-ixp4xx/Makefile +++ b/arch/arm/mach-ixp4xx/Makefile | |||
@@ -30,3 +30,5 @@ obj-$(CONFIG_MACH_GATEWAY7001) += gateway7001-setup.o | |||
30 | obj-$(CONFIG_MACH_WG302V2) += wg302v2-setup.o | 30 | obj-$(CONFIG_MACH_WG302V2) += wg302v2-setup.o |
31 | 31 | ||
32 | obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o | 32 | obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o |
33 | obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o | ||
34 | obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o | ||
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_npe.c b/arch/arm/mach-ixp4xx/ixp4xx_npe.c new file mode 100644 index 000000000000..83c137ec582c --- /dev/null +++ b/arch/arm/mach-ixp4xx/ixp4xx_npe.c | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * Intel IXP4xx Network Processor Engine driver for Linux | ||
3 | * | ||
4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * The code is based on publicly available information: | ||
11 | * - Intel IXP4xx Developer's Manual and other e-papers | ||
12 | * - Intel IXP400 Access Library Software (BSD license) | ||
13 | * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com> | ||
14 | * Thanks, Christian. | ||
15 | */ | ||
16 | |||
17 | #include <linux/delay.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/firmware.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <asm/arch/npe.h> | ||
25 | |||
26 | #define DEBUG_MSG 0 | ||
27 | #define DEBUG_FW 0 | ||
28 | |||
29 | #define NPE_COUNT 3 | ||
30 | #define MAX_RETRIES 1000 /* microseconds */ | ||
31 | #define NPE_42X_DATA_SIZE 0x800 /* in dwords */ | ||
32 | #define NPE_46X_DATA_SIZE 0x1000 | ||
33 | #define NPE_A_42X_INSTR_SIZE 0x1000 | ||
34 | #define NPE_B_AND_C_42X_INSTR_SIZE 0x800 | ||
35 | #define NPE_46X_INSTR_SIZE 0x1000 | ||
36 | #define REGS_SIZE 0x1000 | ||
37 | |||
38 | #define NPE_PHYS_REG 32 | ||
39 | |||
40 | #define FW_MAGIC 0xFEEDF00D | ||
41 | #define FW_BLOCK_TYPE_INSTR 0x0 | ||
42 | #define FW_BLOCK_TYPE_DATA 0x1 | ||
43 | #define FW_BLOCK_TYPE_EOF 0xF | ||
44 | |||
45 | /* NPE exec status (read) and command (write) */ | ||
46 | #define CMD_NPE_STEP 0x01 | ||
47 | #define CMD_NPE_START 0x02 | ||
48 | #define CMD_NPE_STOP 0x03 | ||
49 | #define CMD_NPE_CLR_PIPE 0x04 | ||
50 | #define CMD_CLR_PROFILE_CNT 0x0C | ||
51 | #define CMD_RD_INS_MEM 0x10 /* instruction memory */ | ||
52 | #define CMD_WR_INS_MEM 0x11 | ||
53 | #define CMD_RD_DATA_MEM 0x12 /* data memory */ | ||
54 | #define CMD_WR_DATA_MEM 0x13 | ||
55 | #define CMD_RD_ECS_REG 0x14 /* exec access register */ | ||
56 | #define CMD_WR_ECS_REG 0x15 | ||
57 | |||
58 | #define STAT_RUN 0x80000000 | ||
59 | #define STAT_STOP 0x40000000 | ||
60 | #define STAT_CLEAR 0x20000000 | ||
61 | #define STAT_ECS_K 0x00800000 /* pipeline clean */ | ||
62 | |||
63 | #define NPE_STEVT 0x1B | ||
64 | #define NPE_STARTPC 0x1C | ||
65 | #define NPE_REGMAP 0x1E | ||
66 | #define NPE_CINDEX 0x1F | ||
67 | |||
68 | #define INSTR_WR_REG_SHORT 0x0000C000 | ||
69 | #define INSTR_WR_REG_BYTE 0x00004000 | ||
70 | #define INSTR_RD_FIFO 0x0F888220 | ||
71 | #define INSTR_RESET_MBOX 0x0FAC8210 | ||
72 | |||
73 | #define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */ | ||
74 | #define ECS_BG_CTXT_REG_1 0x01 /* Stack level */ | ||
75 | #define ECS_BG_CTXT_REG_2 0x02 | ||
76 | #define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */ | ||
77 | #define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */ | ||
78 | #define ECS_PRI_1_CTXT_REG_2 0x06 | ||
79 | #define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */ | ||
80 | #define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */ | ||
81 | #define ECS_PRI_2_CTXT_REG_2 0x0A | ||
82 | #define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */ | ||
83 | #define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */ | ||
84 | #define ECS_DBG_CTXT_REG_2 0x0E | ||
85 | #define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */ | ||
86 | |||
87 | #define ECS_REG_0_ACTIVE 0x80000000 /* all levels */ | ||
88 | #define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */ | ||
89 | #define ECS_REG_0_LDUR_BITS 8 | ||
90 | #define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */ | ||
91 | #define ECS_REG_1_CCTXT_BITS 16 | ||
92 | #define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */ | ||
93 | #define ECS_REG_1_SELCTXT_BITS 0 | ||
94 | #define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */ | ||
95 | #define ECS_DBG_REG_2_IF 0x00100000 /* debug level */ | ||
96 | #define ECS_DBG_REG_2_IE 0x00080000 /* debug level */ | ||
97 | |||
98 | /* NPE watchpoint_fifo register bit */ | ||
99 | #define WFIFO_VALID 0x80000000 | ||
100 | |||
101 | /* NPE messaging_status register bit definitions */ | ||
102 | #define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */ | ||
103 | #define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */ | ||
104 | #define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */ | ||
105 | #define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */ | ||
106 | #define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */ | ||
107 | #define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */ | ||
108 | #define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */ | ||
109 | #define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */ | ||
110 | |||
111 | /* NPE messaging_control register bit definitions */ | ||
112 | #define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */ | ||
113 | #define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */ | ||
114 | #define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */ | ||
115 | #define MSGCTL_IN_FIFO_WRITE 0x02000000 | ||
116 | |||
117 | /* NPE mailbox_status value for reset */ | ||
118 | #define RESET_MBOX_STAT 0x0000F0F0 | ||
119 | |||
120 | const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" }; | ||
121 | |||
122 | #define print_npe(pri, npe, fmt, ...) \ | ||
123 | printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__) | ||
124 | |||
125 | #if DEBUG_MSG | ||
126 | #define debug_msg(npe, fmt, ...) \ | ||
127 | print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__) | ||
128 | #else | ||
129 | #define debug_msg(npe, fmt, ...) | ||
130 | #endif | ||
131 | |||
132 | static struct { | ||
133 | u32 reg, val; | ||
134 | } ecs_reset[] = { | ||
135 | { ECS_BG_CTXT_REG_0, 0xA0000000 }, | ||
136 | { ECS_BG_CTXT_REG_1, 0x01000000 }, | ||
137 | { ECS_BG_CTXT_REG_2, 0x00008000 }, | ||
138 | { ECS_PRI_1_CTXT_REG_0, 0x20000080 }, | ||
139 | { ECS_PRI_1_CTXT_REG_1, 0x01000000 }, | ||
140 | { ECS_PRI_1_CTXT_REG_2, 0x00008000 }, | ||
141 | { ECS_PRI_2_CTXT_REG_0, 0x20000080 }, | ||
142 | { ECS_PRI_2_CTXT_REG_1, 0x01000000 }, | ||
143 | { ECS_PRI_2_CTXT_REG_2, 0x00008000 }, | ||
144 | { ECS_DBG_CTXT_REG_0, 0x20000000 }, | ||
145 | { ECS_DBG_CTXT_REG_1, 0x00000000 }, | ||
146 | { ECS_DBG_CTXT_REG_2, 0x001E0000 }, | ||
147 | { ECS_INSTRUCT_REG, 0x1003C00F }, | ||
148 | }; | ||
149 | |||
150 | static struct npe npe_tab[NPE_COUNT] = { | ||
151 | { | ||
152 | .id = 0, | ||
153 | .regs = (struct npe_regs __iomem *)IXP4XX_NPEA_BASE_VIRT, | ||
154 | .regs_phys = IXP4XX_NPEA_BASE_PHYS, | ||
155 | }, { | ||
156 | .id = 1, | ||
157 | .regs = (struct npe_regs __iomem *)IXP4XX_NPEB_BASE_VIRT, | ||
158 | .regs_phys = IXP4XX_NPEB_BASE_PHYS, | ||
159 | }, { | ||
160 | .id = 2, | ||
161 | .regs = (struct npe_regs __iomem *)IXP4XX_NPEC_BASE_VIRT, | ||
162 | .regs_phys = IXP4XX_NPEC_BASE_PHYS, | ||
163 | } | ||
164 | }; | ||
165 | |||
166 | int npe_running(struct npe *npe) | ||
167 | { | ||
168 | return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0; | ||
169 | } | ||
170 | |||
171 | static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data) | ||
172 | { | ||
173 | __raw_writel(data, &npe->regs->exec_data); | ||
174 | __raw_writel(addr, &npe->regs->exec_addr); | ||
175 | __raw_writel(cmd, &npe->regs->exec_status_cmd); | ||
176 | } | ||
177 | |||
178 | static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd) | ||
179 | { | ||
180 | __raw_writel(addr, &npe->regs->exec_addr); | ||
181 | __raw_writel(cmd, &npe->regs->exec_status_cmd); | ||
182 | /* Iintroduce extra read cycles after issuing read command to NPE | ||
183 | so that we read the register after the NPE has updated it. | ||
184 | This is to overcome race condition between XScale and NPE */ | ||
185 | __raw_readl(&npe->regs->exec_data); | ||
186 | __raw_readl(&npe->regs->exec_data); | ||
187 | return __raw_readl(&npe->regs->exec_data); | ||
188 | } | ||
189 | |||
190 | static void npe_clear_active(struct npe *npe, u32 reg) | ||
191 | { | ||
192 | u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG); | ||
193 | npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE); | ||
194 | } | ||
195 | |||
196 | static void npe_start(struct npe *npe) | ||
197 | { | ||
198 | /* ensure only Background Context Stack Level is active */ | ||
199 | npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0); | ||
200 | npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0); | ||
201 | npe_clear_active(npe, ECS_DBG_CTXT_REG_0); | ||
202 | |||
203 | __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); | ||
204 | __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd); | ||
205 | } | ||
206 | |||
207 | static void npe_stop(struct npe *npe) | ||
208 | { | ||
209 | __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd); | ||
210 | __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/ | ||
211 | } | ||
212 | |||
213 | static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx, | ||
214 | u32 ldur) | ||
215 | { | ||
216 | u32 wc; | ||
217 | int i; | ||
218 | |||
219 | /* set the Active bit, and the LDUR, in the debug level */ | ||
220 | npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, | ||
221 | ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS)); | ||
222 | |||
223 | /* set CCTXT at ECS DEBUG L3 to specify in which context to execute | ||
224 | the instruction, and set SELCTXT at ECS DEBUG Level to specify | ||
225 | which context store to access. | ||
226 | Debug ECS Level Reg 1 has form 0x000n000n, where n = context number | ||
227 | */ | ||
228 | npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG, | ||
229 | (ctx << ECS_REG_1_CCTXT_BITS) | | ||
230 | (ctx << ECS_REG_1_SELCTXT_BITS)); | ||
231 | |||
232 | /* clear the pipeline */ | ||
233 | __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); | ||
234 | |||
235 | /* load NPE instruction into the instruction register */ | ||
236 | npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr); | ||
237 | |||
238 | /* we need this value later to wait for completion of NPE execution | ||
239 | step */ | ||
240 | wc = __raw_readl(&npe->regs->watch_count); | ||
241 | |||
242 | /* issue a Step One command via the Execution Control register */ | ||
243 | __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd); | ||
244 | |||
245 | /* Watch Count register increments when NPE completes an instruction */ | ||
246 | for (i = 0; i < MAX_RETRIES; i++) { | ||
247 | if (wc != __raw_readl(&npe->regs->watch_count)) | ||
248 | return 0; | ||
249 | udelay(1); | ||
250 | } | ||
251 | |||
252 | print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n"); | ||
253 | return -ETIMEDOUT; | ||
254 | } | ||
255 | |||
256 | static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr, | ||
257 | u8 val, u32 ctx) | ||
258 | { | ||
259 | /* here we build the NPE assembler instruction: mov8 d0, #0 */ | ||
260 | u32 instr = INSTR_WR_REG_BYTE | /* OpCode */ | ||
261 | addr << 9 | /* base Operand */ | ||
262 | (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ | ||
263 | (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */ | ||
264 | return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ | ||
265 | } | ||
266 | |||
267 | static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr, | ||
268 | u16 val, u32 ctx) | ||
269 | { | ||
270 | /* here we build the NPE assembler instruction: mov16 d0, #0 */ | ||
271 | u32 instr = INSTR_WR_REG_SHORT | /* OpCode */ | ||
272 | addr << 9 | /* base Operand */ | ||
273 | (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ | ||
274 | (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */ | ||
275 | return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ | ||
276 | } | ||
277 | |||
278 | static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr, | ||
279 | u32 val, u32 ctx) | ||
280 | { | ||
281 | /* write in 16 bit steps first the high and then the low value */ | ||
282 | if (npe_logical_reg_write16(npe, addr, val >> 16, ctx)) | ||
283 | return -ETIMEDOUT; | ||
284 | return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx); | ||
285 | } | ||
286 | |||
287 | static int npe_reset(struct npe *npe) | ||
288 | { | ||
289 | u32 val, ctl, exec_count, ctx_reg2; | ||
290 | int i; | ||
291 | |||
292 | ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) & | ||
293 | 0x3F3FFFFF; | ||
294 | |||
295 | /* disable parity interrupt */ | ||
296 | __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control); | ||
297 | |||
298 | /* pre exec - debug instruction */ | ||
299 | /* turn off the halt bit by clearing Execution Count register. */ | ||
300 | exec_count = __raw_readl(&npe->regs->exec_count); | ||
301 | __raw_writel(0, &npe->regs->exec_count); | ||
302 | /* ensure that IF and IE are on (temporarily), so that we don't end up | ||
303 | stepping forever */ | ||
304 | ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG); | ||
305 | npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 | | ||
306 | ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE); | ||
307 | |||
308 | /* clear the FIFOs */ | ||
309 | while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID) | ||
310 | ; | ||
311 | while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) | ||
312 | /* read from the outFIFO until empty */ | ||
313 | print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n", | ||
314 | __raw_readl(&npe->regs->in_out_fifo)); | ||
315 | |||
316 | while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) | ||
317 | /* step execution of the NPE intruction to read inFIFO using | ||
318 | the Debug Executing Context stack */ | ||
319 | if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0)) | ||
320 | return -ETIMEDOUT; | ||
321 | |||
322 | /* reset the mailbox reg from the XScale side */ | ||
323 | __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status); | ||
324 | /* from NPE side */ | ||
325 | if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0)) | ||
326 | return -ETIMEDOUT; | ||
327 | |||
328 | /* Reset the physical registers in the NPE register file */ | ||
329 | for (val = 0; val < NPE_PHYS_REG; val++) { | ||
330 | if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0)) | ||
331 | return -ETIMEDOUT; | ||
332 | /* address is either 0 or 4 */ | ||
333 | if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0)) | ||
334 | return -ETIMEDOUT; | ||
335 | } | ||
336 | |||
337 | /* Reset the context store = each context's Context Store registers */ | ||
338 | |||
339 | /* Context 0 has no STARTPC. Instead, this value is used to set NextPC | ||
340 | for Background ECS, to set where NPE starts executing code */ | ||
341 | val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG); | ||
342 | val &= ~ECS_REG_0_NEXTPC_MASK; | ||
343 | val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK; | ||
344 | npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val); | ||
345 | |||
346 | for (i = 0; i < 16; i++) { | ||
347 | if (i) { /* Context 0 has no STEVT nor STARTPC */ | ||
348 | /* STEVT = off, 0x80 */ | ||
349 | if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i)) | ||
350 | return -ETIMEDOUT; | ||
351 | if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i)) | ||
352 | return -ETIMEDOUT; | ||
353 | } | ||
354 | /* REGMAP = d0->p0, d8->p2, d16->p4 */ | ||
355 | if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i)) | ||
356 | return -ETIMEDOUT; | ||
357 | if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i)) | ||
358 | return -ETIMEDOUT; | ||
359 | } | ||
360 | |||
361 | /* post exec */ | ||
362 | /* clear active bit in debug level */ | ||
363 | npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0); | ||
364 | /* clear the pipeline */ | ||
365 | __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); | ||
366 | /* restore previous values */ | ||
367 | __raw_writel(exec_count, &npe->regs->exec_count); | ||
368 | npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2); | ||
369 | |||
370 | /* write reset values to Execution Context Stack registers */ | ||
371 | for (val = 0; val < ARRAY_SIZE(ecs_reset); val++) | ||
372 | npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG, | ||
373 | ecs_reset[val].val); | ||
374 | |||
375 | /* clear the profile counter */ | ||
376 | __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd); | ||
377 | |||
378 | __raw_writel(0, &npe->regs->exec_count); | ||
379 | __raw_writel(0, &npe->regs->action_points[0]); | ||
380 | __raw_writel(0, &npe->regs->action_points[1]); | ||
381 | __raw_writel(0, &npe->regs->action_points[2]); | ||
382 | __raw_writel(0, &npe->regs->action_points[3]); | ||
383 | __raw_writel(0, &npe->regs->watch_count); | ||
384 | |||
385 | val = ixp4xx_read_feature_bits(); | ||
386 | /* reset the NPE */ | ||
387 | ixp4xx_write_feature_bits(val & | ||
388 | ~(IXP4XX_FEATURE_RESET_NPEA << npe->id)); | ||
389 | for (i = 0; i < MAX_RETRIES; i++) { | ||
390 | if (!(ixp4xx_read_feature_bits() & | ||
391 | (IXP4XX_FEATURE_RESET_NPEA << npe->id))) | ||
392 | break; /* reset completed */ | ||
393 | udelay(1); | ||
394 | } | ||
395 | if (i == MAX_RETRIES) | ||
396 | return -ETIMEDOUT; | ||
397 | |||
398 | /* deassert reset */ | ||
399 | ixp4xx_write_feature_bits(val | | ||
400 | (IXP4XX_FEATURE_RESET_NPEA << npe->id)); | ||
401 | for (i = 0; i < MAX_RETRIES; i++) { | ||
402 | if (ixp4xx_read_feature_bits() & | ||
403 | (IXP4XX_FEATURE_RESET_NPEA << npe->id)) | ||
404 | break; /* NPE is back alive */ | ||
405 | udelay(1); | ||
406 | } | ||
407 | if (i == MAX_RETRIES) | ||
408 | return -ETIMEDOUT; | ||
409 | |||
410 | npe_stop(npe); | ||
411 | |||
412 | /* restore NPE configuration bus Control Register - parity settings */ | ||
413 | __raw_writel(ctl, &npe->regs->messaging_control); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | |||
418 | int npe_send_message(struct npe *npe, const void *msg, const char *what) | ||
419 | { | ||
420 | const u32 *send = msg; | ||
421 | int cycles = 0; | ||
422 | |||
423 | debug_msg(npe, "Trying to send message %s [%08X:%08X]\n", | ||
424 | what, send[0], send[1]); | ||
425 | |||
426 | if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) { | ||
427 | debug_msg(npe, "NPE input FIFO not empty\n"); | ||
428 | return -EIO; | ||
429 | } | ||
430 | |||
431 | __raw_writel(send[0], &npe->regs->in_out_fifo); | ||
432 | |||
433 | if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) { | ||
434 | debug_msg(npe, "NPE input FIFO full\n"); | ||
435 | return -EIO; | ||
436 | } | ||
437 | |||
438 | __raw_writel(send[1], &npe->regs->in_out_fifo); | ||
439 | |||
440 | while ((cycles < MAX_RETRIES) && | ||
441 | (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) { | ||
442 | udelay(1); | ||
443 | cycles++; | ||
444 | } | ||
445 | |||
446 | if (cycles == MAX_RETRIES) { | ||
447 | debug_msg(npe, "Timeout sending message\n"); | ||
448 | return -ETIMEDOUT; | ||
449 | } | ||
450 | |||
451 | debug_msg(npe, "Sending a message took %i cycles\n", cycles); | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | int npe_recv_message(struct npe *npe, void *msg, const char *what) | ||
456 | { | ||
457 | u32 *recv = msg; | ||
458 | int cycles = 0, cnt = 0; | ||
459 | |||
460 | debug_msg(npe, "Trying to receive message %s\n", what); | ||
461 | |||
462 | while (cycles < MAX_RETRIES) { | ||
463 | if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) { | ||
464 | recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo); | ||
465 | if (cnt == 2) | ||
466 | break; | ||
467 | } else { | ||
468 | udelay(1); | ||
469 | cycles++; | ||
470 | } | ||
471 | } | ||
472 | |||
473 | switch(cnt) { | ||
474 | case 1: | ||
475 | debug_msg(npe, "Received [%08X]\n", recv[0]); | ||
476 | break; | ||
477 | case 2: | ||
478 | debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]); | ||
479 | break; | ||
480 | } | ||
481 | |||
482 | if (cycles == MAX_RETRIES) { | ||
483 | debug_msg(npe, "Timeout waiting for message\n"); | ||
484 | return -ETIMEDOUT; | ||
485 | } | ||
486 | |||
487 | debug_msg(npe, "Receiving a message took %i cycles\n", cycles); | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | int npe_send_recv_message(struct npe *npe, void *msg, const char *what) | ||
492 | { | ||
493 | int result; | ||
494 | u32 *send = msg, recv[2]; | ||
495 | |||
496 | if ((result = npe_send_message(npe, msg, what)) != 0) | ||
497 | return result; | ||
498 | if ((result = npe_recv_message(npe, recv, what)) != 0) | ||
499 | return result; | ||
500 | |||
501 | if ((recv[0] != send[0]) || (recv[1] != send[1])) { | ||
502 | debug_msg(npe, "Message %s: unexpected message received\n", | ||
503 | what); | ||
504 | return -EIO; | ||
505 | } | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | |||
510 | int npe_load_firmware(struct npe *npe, const char *name, struct device *dev) | ||
511 | { | ||
512 | const struct firmware *fw_entry; | ||
513 | |||
514 | struct dl_block { | ||
515 | u32 type; | ||
516 | u32 offset; | ||
517 | } *blk; | ||
518 | |||
519 | struct dl_image { | ||
520 | u32 magic; | ||
521 | u32 id; | ||
522 | u32 size; | ||
523 | union { | ||
524 | u32 data[0]; | ||
525 | struct dl_block blocks[0]; | ||
526 | }; | ||
527 | } *image; | ||
528 | |||
529 | struct dl_codeblock { | ||
530 | u32 npe_addr; | ||
531 | u32 size; | ||
532 | u32 data[0]; | ||
533 | } *cb; | ||
534 | |||
535 | int i, j, err, data_size, instr_size, blocks, table_end; | ||
536 | u32 cmd; | ||
537 | |||
538 | if ((err = request_firmware(&fw_entry, name, dev)) != 0) | ||
539 | return err; | ||
540 | |||
541 | err = -EINVAL; | ||
542 | if (fw_entry->size < sizeof(struct dl_image)) { | ||
543 | print_npe(KERN_ERR, npe, "incomplete firmware file\n"); | ||
544 | goto err; | ||
545 | } | ||
546 | image = (struct dl_image*)fw_entry->data; | ||
547 | |||
548 | #if DEBUG_FW | ||
549 | print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n", | ||
550 | image->magic, image->id, image->size, image->size * 4); | ||
551 | #endif | ||
552 | |||
553 | if (image->magic == swab32(FW_MAGIC)) { /* swapped file */ | ||
554 | image->id = swab32(image->id); | ||
555 | image->size = swab32(image->size); | ||
556 | } else if (image->magic != FW_MAGIC) { | ||
557 | print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n", | ||
558 | image->magic); | ||
559 | goto err; | ||
560 | } | ||
561 | if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) { | ||
562 | print_npe(KERN_ERR, npe, | ||
563 | "inconsistent size of firmware file\n"); | ||
564 | goto err; | ||
565 | } | ||
566 | if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) { | ||
567 | print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n"); | ||
568 | goto err; | ||
569 | } | ||
570 | if (image->magic == swab32(FW_MAGIC)) | ||
571 | for (i = 0; i < image->size; i++) | ||
572 | image->data[i] = swab32(image->data[i]); | ||
573 | |||
574 | if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xF /* device ID */)) { | ||
575 | print_npe(KERN_INFO, npe, "IXP46x firmware ignored on " | ||
576 | "IXP42x\n"); | ||
577 | goto err; | ||
578 | } | ||
579 | |||
580 | if (npe_running(npe)) { | ||
581 | print_npe(KERN_INFO, npe, "unable to load firmware, NPE is " | ||
582 | "already running\n"); | ||
583 | err = -EBUSY; | ||
584 | goto err; | ||
585 | } | ||
586 | #if 0 | ||
587 | npe_stop(npe); | ||
588 | npe_reset(npe); | ||
589 | #endif | ||
590 | |||
591 | print_npe(KERN_INFO, npe, "firmware functionality 0x%X, " | ||
592 | "revision 0x%X:%X\n", (image->id >> 16) & 0xFF, | ||
593 | (image->id >> 8) & 0xFF, image->id & 0xFF); | ||
594 | |||
595 | if (!cpu_is_ixp46x()) { | ||
596 | if (!npe->id) | ||
597 | instr_size = NPE_A_42X_INSTR_SIZE; | ||
598 | else | ||
599 | instr_size = NPE_B_AND_C_42X_INSTR_SIZE; | ||
600 | data_size = NPE_42X_DATA_SIZE; | ||
601 | } else { | ||
602 | instr_size = NPE_46X_INSTR_SIZE; | ||
603 | data_size = NPE_46X_DATA_SIZE; | ||
604 | } | ||
605 | |||
606 | for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; | ||
607 | blocks++) | ||
608 | if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) | ||
609 | break; | ||
610 | if (blocks * sizeof(struct dl_block) / 4 >= image->size) { | ||
611 | print_npe(KERN_INFO, npe, "firmware EOF block marker not " | ||
612 | "found\n"); | ||
613 | goto err; | ||
614 | } | ||
615 | |||
616 | #if DEBUG_FW | ||
617 | print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); | ||
618 | #endif | ||
619 | |||
620 | table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; | ||
621 | for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { | ||
622 | if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4 | ||
623 | || blk->offset < table_end) { | ||
624 | print_npe(KERN_INFO, npe, "invalid offset 0x%X of " | ||
625 | "firmware block #%i\n", blk->offset, i); | ||
626 | goto err; | ||
627 | } | ||
628 | |||
629 | cb = (struct dl_codeblock*)&image->data[blk->offset]; | ||
630 | if (blk->type == FW_BLOCK_TYPE_INSTR) { | ||
631 | if (cb->npe_addr + cb->size > instr_size) | ||
632 | goto too_big; | ||
633 | cmd = CMD_WR_INS_MEM; | ||
634 | } else if (blk->type == FW_BLOCK_TYPE_DATA) { | ||
635 | if (cb->npe_addr + cb->size > data_size) | ||
636 | goto too_big; | ||
637 | cmd = CMD_WR_DATA_MEM; | ||
638 | } else { | ||
639 | print_npe(KERN_INFO, npe, "invalid firmware block #%i " | ||
640 | "type 0x%X\n", i, blk->type); | ||
641 | goto err; | ||
642 | } | ||
643 | if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) { | ||
644 | print_npe(KERN_INFO, npe, "firmware block #%i doesn't " | ||
645 | "fit in firmware image: type %c, start 0x%X," | ||
646 | " length 0x%X\n", i, | ||
647 | blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', | ||
648 | cb->npe_addr, cb->size); | ||
649 | goto err; | ||
650 | } | ||
651 | |||
652 | for (j = 0; j < cb->size; j++) | ||
653 | npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]); | ||
654 | } | ||
655 | |||
656 | npe_start(npe); | ||
657 | if (!npe_running(npe)) | ||
658 | print_npe(KERN_ERR, npe, "unable to start\n"); | ||
659 | release_firmware(fw_entry); | ||
660 | return 0; | ||
661 | |||
662 | too_big: | ||
663 | print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE " | ||
664 | "memory: type %c, start 0x%X, length 0x%X\n", i, | ||
665 | blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', | ||
666 | cb->npe_addr, cb->size); | ||
667 | err: | ||
668 | release_firmware(fw_entry); | ||
669 | return err; | ||
670 | } | ||
671 | |||
672 | |||
673 | struct npe *npe_request(int id) | ||
674 | { | ||
675 | if (id < NPE_COUNT) | ||
676 | if (npe_tab[id].valid) | ||
677 | if (try_module_get(THIS_MODULE)) | ||
678 | return &npe_tab[id]; | ||
679 | return NULL; | ||
680 | } | ||
681 | |||
682 | void npe_release(struct npe *npe) | ||
683 | { | ||
684 | module_put(THIS_MODULE); | ||
685 | } | ||
686 | |||
687 | |||
688 | static int __init npe_init_module(void) | ||
689 | { | ||
690 | |||
691 | int i, found = 0; | ||
692 | |||
693 | for (i = 0; i < NPE_COUNT; i++) { | ||
694 | struct npe *npe = &npe_tab[i]; | ||
695 | if (!(ixp4xx_read_feature_bits() & | ||
696 | (IXP4XX_FEATURE_RESET_NPEA << i))) | ||
697 | continue; /* NPE already disabled or not present */ | ||
698 | if (!(npe->mem_res = request_mem_region(npe->regs_phys, | ||
699 | REGS_SIZE, | ||
700 | npe_name(npe)))) { | ||
701 | print_npe(KERN_ERR, npe, | ||
702 | "failed to request memory region\n"); | ||
703 | continue; | ||
704 | } | ||
705 | |||
706 | if (npe_reset(npe)) | ||
707 | continue; | ||
708 | npe->valid = 1; | ||
709 | found++; | ||
710 | } | ||
711 | |||
712 | if (!found) | ||
713 | return -ENOSYS; | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | static void __exit npe_cleanup_module(void) | ||
718 | { | ||
719 | int i; | ||
720 | |||
721 | for (i = 0; i < NPE_COUNT; i++) | ||
722 | if (npe_tab[i].mem_res) { | ||
723 | npe_reset(&npe_tab[i]); | ||
724 | release_resource(npe_tab[i].mem_res); | ||
725 | } | ||
726 | } | ||
727 | |||
728 | module_init(npe_init_module); | ||
729 | module_exit(npe_cleanup_module); | ||
730 | |||
731 | MODULE_AUTHOR("Krzysztof Halasa"); | ||
732 | MODULE_LICENSE("GPL v2"); | ||
733 | |||
734 | EXPORT_SYMBOL(npe_names); | ||
735 | EXPORT_SYMBOL(npe_running); | ||
736 | EXPORT_SYMBOL(npe_request); | ||
737 | EXPORT_SYMBOL(npe_release); | ||
738 | EXPORT_SYMBOL(npe_load_firmware); | ||
739 | EXPORT_SYMBOL(npe_send_message); | ||
740 | EXPORT_SYMBOL(npe_recv_message); | ||
741 | EXPORT_SYMBOL(npe_send_recv_message); | ||
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c new file mode 100644 index 000000000000..e83301325301 --- /dev/null +++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * Intel IXP4xx Queue Manager driver for Linux | ||
3 | * | ||
4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/ioport.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/arch/qmgr.h> | ||
16 | |||
17 | #define DEBUG 0 | ||
18 | |||
19 | struct qmgr_regs __iomem *qmgr_regs; | ||
20 | static struct resource *mem_res; | ||
21 | static spinlock_t qmgr_lock; | ||
22 | static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ | ||
23 | static void (*irq_handlers[HALF_QUEUES])(void *pdev); | ||
24 | static void *irq_pdevs[HALF_QUEUES]; | ||
25 | |||
26 | void qmgr_set_irq(unsigned int queue, int src, | ||
27 | void (*handler)(void *pdev), void *pdev) | ||
28 | { | ||
29 | u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */ | ||
30 | int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ | ||
31 | unsigned long flags; | ||
32 | |||
33 | src &= 7; | ||
34 | spin_lock_irqsave(&qmgr_lock, flags); | ||
35 | __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg); | ||
36 | irq_handlers[queue] = handler; | ||
37 | irq_pdevs[queue] = pdev; | ||
38 | spin_unlock_irqrestore(&qmgr_lock, flags); | ||
39 | } | ||
40 | |||
41 | |||
42 | static irqreturn_t qmgr_irq1(int irq, void *pdev) | ||
43 | { | ||
44 | int i; | ||
45 | u32 val = __raw_readl(&qmgr_regs->irqstat[0]); | ||
46 | __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */ | ||
47 | |||
48 | for (i = 0; i < HALF_QUEUES; i++) | ||
49 | if (val & (1 << i)) | ||
50 | irq_handlers[i](irq_pdevs[i]); | ||
51 | |||
52 | return val ? IRQ_HANDLED : 0; | ||
53 | } | ||
54 | |||
55 | |||
56 | void qmgr_enable_irq(unsigned int queue) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | |||
60 | spin_lock_irqsave(&qmgr_lock, flags); | ||
61 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue), | ||
62 | &qmgr_regs->irqen[0]); | ||
63 | spin_unlock_irqrestore(&qmgr_lock, flags); | ||
64 | } | ||
65 | |||
66 | void qmgr_disable_irq(unsigned int queue) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | |||
70 | spin_lock_irqsave(&qmgr_lock, flags); | ||
71 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue), | ||
72 | &qmgr_regs->irqen[0]); | ||
73 | spin_unlock_irqrestore(&qmgr_lock, flags); | ||
74 | } | ||
75 | |||
76 | static inline void shift_mask(u32 *mask) | ||
77 | { | ||
78 | mask[3] = mask[3] << 1 | mask[2] >> 31; | ||
79 | mask[2] = mask[2] << 1 | mask[1] >> 31; | ||
80 | mask[1] = mask[1] << 1 | mask[0] >> 31; | ||
81 | mask[0] <<= 1; | ||
82 | } | ||
83 | |||
84 | int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, | ||
85 | unsigned int nearly_empty_watermark, | ||
86 | unsigned int nearly_full_watermark) | ||
87 | { | ||
88 | u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ | ||
89 | int err; | ||
90 | |||
91 | if (queue >= HALF_QUEUES) | ||
92 | return -ERANGE; | ||
93 | |||
94 | if ((nearly_empty_watermark | nearly_full_watermark) & ~7) | ||
95 | return -EINVAL; | ||
96 | |||
97 | switch (len) { | ||
98 | case 16: | ||
99 | cfg = 0 << 24; | ||
100 | mask[0] = 0x1; | ||
101 | break; | ||
102 | case 32: | ||
103 | cfg = 1 << 24; | ||
104 | mask[0] = 0x3; | ||
105 | break; | ||
106 | case 64: | ||
107 | cfg = 2 << 24; | ||
108 | mask[0] = 0xF; | ||
109 | break; | ||
110 | case 128: | ||
111 | cfg = 3 << 24; | ||
112 | mask[0] = 0xFF; | ||
113 | break; | ||
114 | default: | ||
115 | return -EINVAL; | ||
116 | } | ||
117 | |||
118 | cfg |= nearly_empty_watermark << 26; | ||
119 | cfg |= nearly_full_watermark << 29; | ||
120 | len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ | ||
121 | mask[1] = mask[2] = mask[3] = 0; | ||
122 | |||
123 | if (!try_module_get(THIS_MODULE)) | ||
124 | return -ENODEV; | ||
125 | |||
126 | spin_lock_irq(&qmgr_lock); | ||
127 | if (__raw_readl(&qmgr_regs->sram[queue])) { | ||
128 | err = -EBUSY; | ||
129 | goto err; | ||
130 | } | ||
131 | |||
132 | while (1) { | ||
133 | if (!(used_sram_bitmap[0] & mask[0]) && | ||
134 | !(used_sram_bitmap[1] & mask[1]) && | ||
135 | !(used_sram_bitmap[2] & mask[2]) && | ||
136 | !(used_sram_bitmap[3] & mask[3])) | ||
137 | break; /* found free space */ | ||
138 | |||
139 | addr++; | ||
140 | shift_mask(mask); | ||
141 | if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { | ||
142 | printk(KERN_ERR "qmgr: no free SRAM space for" | ||
143 | " queue %i\n", queue); | ||
144 | err = -ENOMEM; | ||
145 | goto err; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | used_sram_bitmap[0] |= mask[0]; | ||
150 | used_sram_bitmap[1] |= mask[1]; | ||
151 | used_sram_bitmap[2] |= mask[2]; | ||
152 | used_sram_bitmap[3] |= mask[3]; | ||
153 | __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); | ||
154 | spin_unlock_irq(&qmgr_lock); | ||
155 | |||
156 | #if DEBUG | ||
157 | printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n", | ||
158 | queue, addr); | ||
159 | #endif | ||
160 | return 0; | ||
161 | |||
162 | err: | ||
163 | spin_unlock_irq(&qmgr_lock); | ||
164 | module_put(THIS_MODULE); | ||
165 | return err; | ||
166 | } | ||
167 | |||
168 | void qmgr_release_queue(unsigned int queue) | ||
169 | { | ||
170 | u32 cfg, addr, mask[4]; | ||
171 | |||
172 | BUG_ON(queue >= HALF_QUEUES); /* not in valid range */ | ||
173 | |||
174 | spin_lock_irq(&qmgr_lock); | ||
175 | cfg = __raw_readl(&qmgr_regs->sram[queue]); | ||
176 | addr = (cfg >> 14) & 0xFF; | ||
177 | |||
178 | BUG_ON(!addr); /* not requested */ | ||
179 | |||
180 | switch ((cfg >> 24) & 3) { | ||
181 | case 0: mask[0] = 0x1; break; | ||
182 | case 1: mask[0] = 0x3; break; | ||
183 | case 2: mask[0] = 0xF; break; | ||
184 | case 3: mask[0] = 0xFF; break; | ||
185 | } | ||
186 | |||
187 | while (addr--) | ||
188 | shift_mask(mask); | ||
189 | |||
190 | __raw_writel(0, &qmgr_regs->sram[queue]); | ||
191 | |||
192 | used_sram_bitmap[0] &= ~mask[0]; | ||
193 | used_sram_bitmap[1] &= ~mask[1]; | ||
194 | used_sram_bitmap[2] &= ~mask[2]; | ||
195 | used_sram_bitmap[3] &= ~mask[3]; | ||
196 | irq_handlers[queue] = NULL; /* catch IRQ bugs */ | ||
197 | spin_unlock_irq(&qmgr_lock); | ||
198 | |||
199 | module_put(THIS_MODULE); | ||
200 | #if DEBUG | ||
201 | printk(KERN_DEBUG "qmgr: released queue %i\n", queue); | ||
202 | #endif | ||
203 | } | ||
204 | |||
205 | static int qmgr_init(void) | ||
206 | { | ||
207 | int i, err; | ||
208 | mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, | ||
209 | IXP4XX_QMGR_REGION_SIZE, | ||
210 | "IXP4xx Queue Manager"); | ||
211 | if (mem_res == NULL) | ||
212 | return -EBUSY; | ||
213 | |||
214 | qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | ||
215 | if (qmgr_regs == NULL) { | ||
216 | err = -ENOMEM; | ||
217 | goto error_map; | ||
218 | } | ||
219 | |||
220 | /* reset qmgr registers */ | ||
221 | for (i = 0; i < 4; i++) { | ||
222 | __raw_writel(0x33333333, &qmgr_regs->stat1[i]); | ||
223 | __raw_writel(0, &qmgr_regs->irqsrc[i]); | ||
224 | } | ||
225 | for (i = 0; i < 2; i++) { | ||
226 | __raw_writel(0, &qmgr_regs->stat2[i]); | ||
227 | __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ | ||
228 | __raw_writel(0, &qmgr_regs->irqen[i]); | ||
229 | } | ||
230 | |||
231 | for (i = 0; i < QUEUES; i++) | ||
232 | __raw_writel(0, &qmgr_regs->sram[i]); | ||
233 | |||
234 | err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0, | ||
235 | "IXP4xx Queue Manager", NULL); | ||
236 | if (err) { | ||
237 | printk(KERN_ERR "qmgr: failed to request IRQ%i\n", | ||
238 | IRQ_IXP4XX_QM1); | ||
239 | goto error_irq; | ||
240 | } | ||
241 | |||
242 | used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ | ||
243 | spin_lock_init(&qmgr_lock); | ||
244 | |||
245 | printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); | ||
246 | return 0; | ||
247 | |||
248 | error_irq: | ||
249 | iounmap(qmgr_regs); | ||
250 | error_map: | ||
251 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | ||
252 | return err; | ||
253 | } | ||
254 | |||
255 | static void qmgr_remove(void) | ||
256 | { | ||
257 | free_irq(IRQ_IXP4XX_QM1, NULL); | ||
258 | synchronize_irq(IRQ_IXP4XX_QM1); | ||
259 | iounmap(qmgr_regs); | ||
260 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); | ||
261 | } | ||
262 | |||
263 | module_init(qmgr_init); | ||
264 | module_exit(qmgr_remove); | ||
265 | |||
266 | MODULE_LICENSE("GPL v2"); | ||
267 | MODULE_AUTHOR("Krzysztof Halasa"); | ||
268 | |||
269 | EXPORT_SYMBOL(qmgr_regs); | ||
270 | EXPORT_SYMBOL(qmgr_set_irq); | ||
271 | EXPORT_SYMBOL(qmgr_enable_irq); | ||
272 | EXPORT_SYMBOL(qmgr_disable_irq); | ||
273 | EXPORT_SYMBOL(qmgr_request_queue); | ||
274 | EXPORT_SYMBOL(qmgr_release_queue); | ||
diff --git a/include/asm-arm/arch-ixp4xx/npe.h b/include/asm-arm/arch-ixp4xx/npe.h new file mode 100644 index 000000000000..37d0511689dc --- /dev/null +++ b/include/asm-arm/arch-ixp4xx/npe.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __IXP4XX_NPE_H | ||
2 | #define __IXP4XX_NPE_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | extern const char *npe_names[]; | ||
7 | |||
8 | struct npe_regs { | ||
9 | u32 exec_addr, exec_data, exec_status_cmd, exec_count; | ||
10 | u32 action_points[4]; | ||
11 | u32 watchpoint_fifo, watch_count; | ||
12 | u32 profile_count; | ||
13 | u32 messaging_status, messaging_control; | ||
14 | u32 mailbox_status, /*messaging_*/ in_out_fifo; | ||
15 | }; | ||
16 | |||
17 | struct npe { | ||
18 | struct resource *mem_res; | ||
19 | struct npe_regs __iomem *regs; | ||
20 | u32 regs_phys; | ||
21 | int id; | ||
22 | int valid; | ||
23 | }; | ||
24 | |||
25 | |||
26 | static inline const char *npe_name(struct npe *npe) | ||
27 | { | ||
28 | return npe_names[npe->id]; | ||
29 | } | ||
30 | |||
31 | int npe_running(struct npe *npe); | ||
32 | int npe_send_message(struct npe *npe, const void *msg, const char *what); | ||
33 | int npe_recv_message(struct npe *npe, void *msg, const char *what); | ||
34 | int npe_send_recv_message(struct npe *npe, void *msg, const char *what); | ||
35 | int npe_load_firmware(struct npe *npe, const char *name, struct device *dev); | ||
36 | struct npe *npe_request(int id); | ||
37 | void npe_release(struct npe *npe); | ||
38 | |||
39 | #endif /* __IXP4XX_NPE_H */ | ||
diff --git a/include/asm-arm/arch-ixp4xx/qmgr.h b/include/asm-arm/arch-ixp4xx/qmgr.h new file mode 100644 index 000000000000..1e52b95cede5 --- /dev/null +++ b/include/asm-arm/arch-ixp4xx/qmgr.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef IXP4XX_QMGR_H | ||
10 | #define IXP4XX_QMGR_H | ||
11 | |||
12 | #include <linux/io.h> | ||
13 | #include <linux/kernel.h> | ||
14 | |||
15 | #define HALF_QUEUES 32 | ||
16 | #define QUEUES 64 /* only 32 lower queues currently supported */ | ||
17 | #define MAX_QUEUE_LENGTH 4 /* in dwords */ | ||
18 | |||
19 | #define QUEUE_STAT1_EMPTY 1 /* queue status bits */ | ||
20 | #define QUEUE_STAT1_NEARLY_EMPTY 2 | ||
21 | #define QUEUE_STAT1_NEARLY_FULL 4 | ||
22 | #define QUEUE_STAT1_FULL 8 | ||
23 | #define QUEUE_STAT2_UNDERFLOW 1 | ||
24 | #define QUEUE_STAT2_OVERFLOW 2 | ||
25 | |||
26 | #define QUEUE_WATERMARK_0_ENTRIES 0 | ||
27 | #define QUEUE_WATERMARK_1_ENTRY 1 | ||
28 | #define QUEUE_WATERMARK_2_ENTRIES 2 | ||
29 | #define QUEUE_WATERMARK_4_ENTRIES 3 | ||
30 | #define QUEUE_WATERMARK_8_ENTRIES 4 | ||
31 | #define QUEUE_WATERMARK_16_ENTRIES 5 | ||
32 | #define QUEUE_WATERMARK_32_ENTRIES 6 | ||
33 | #define QUEUE_WATERMARK_64_ENTRIES 7 | ||
34 | |||
35 | /* queue interrupt request conditions */ | ||
36 | #define QUEUE_IRQ_SRC_EMPTY 0 | ||
37 | #define QUEUE_IRQ_SRC_NEARLY_EMPTY 1 | ||
38 | #define QUEUE_IRQ_SRC_NEARLY_FULL 2 | ||
39 | #define QUEUE_IRQ_SRC_FULL 3 | ||
40 | #define QUEUE_IRQ_SRC_NOT_EMPTY 4 | ||
41 | #define QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY 5 | ||
42 | #define QUEUE_IRQ_SRC_NOT_NEARLY_FULL 6 | ||
43 | #define QUEUE_IRQ_SRC_NOT_FULL 7 | ||
44 | |||
45 | struct qmgr_regs { | ||
46 | u32 acc[QUEUES][MAX_QUEUE_LENGTH]; /* 0x000 - 0x3FF */ | ||
47 | u32 stat1[4]; /* 0x400 - 0x40F */ | ||
48 | u32 stat2[2]; /* 0x410 - 0x417 */ | ||
49 | u32 statne_h; /* 0x418 - queue nearly empty */ | ||
50 | u32 statf_h; /* 0x41C - queue full */ | ||
51 | u32 irqsrc[4]; /* 0x420 - 0x42F IRC source */ | ||
52 | u32 irqen[2]; /* 0x430 - 0x437 IRQ enabled */ | ||
53 | u32 irqstat[2]; /* 0x438 - 0x43F - IRQ access only */ | ||
54 | u32 reserved[1776]; | ||
55 | u32 sram[2048]; /* 0x2000 - 0x3FFF - config and buffer */ | ||
56 | }; | ||
57 | |||
58 | void qmgr_set_irq(unsigned int queue, int src, | ||
59 | void (*handler)(void *pdev), void *pdev); | ||
60 | void qmgr_enable_irq(unsigned int queue); | ||
61 | void qmgr_disable_irq(unsigned int queue); | ||
62 | |||
63 | /* request_ and release_queue() must be called from non-IRQ context */ | ||
64 | int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, | ||
65 | unsigned int nearly_empty_watermark, | ||
66 | unsigned int nearly_full_watermark); | ||
67 | void qmgr_release_queue(unsigned int queue); | ||
68 | |||
69 | |||
70 | static inline void qmgr_put_entry(unsigned int queue, u32 val) | ||
71 | { | ||
72 | extern struct qmgr_regs __iomem *qmgr_regs; | ||
73 | __raw_writel(val, &qmgr_regs->acc[queue][0]); | ||
74 | } | ||
75 | |||
76 | static inline u32 qmgr_get_entry(unsigned int queue) | ||
77 | { | ||
78 | extern struct qmgr_regs __iomem *qmgr_regs; | ||
79 | return __raw_readl(&qmgr_regs->acc[queue][0]); | ||
80 | } | ||
81 | |||
82 | static inline int qmgr_get_stat1(unsigned int queue) | ||
83 | { | ||
84 | extern struct qmgr_regs __iomem *qmgr_regs; | ||
85 | return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) | ||
86 | >> ((queue & 7) << 2)) & 0xF; | ||
87 | } | ||
88 | |||
89 | static inline int qmgr_get_stat2(unsigned int queue) | ||
90 | { | ||
91 | extern struct qmgr_regs __iomem *qmgr_regs; | ||
92 | return (__raw_readl(&qmgr_regs->stat2[queue >> 4]) | ||
93 | >> ((queue & 0xF) << 1)) & 0x3; | ||
94 | } | ||
95 | |||
96 | static inline int qmgr_stat_empty(unsigned int queue) | ||
97 | { | ||
98 | return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY); | ||
99 | } | ||
100 | |||
101 | static inline int qmgr_stat_nearly_empty(unsigned int queue) | ||
102 | { | ||
103 | return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY); | ||
104 | } | ||
105 | |||
106 | static inline int qmgr_stat_nearly_full(unsigned int queue) | ||
107 | { | ||
108 | return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL); | ||
109 | } | ||
110 | |||
111 | static inline int qmgr_stat_full(unsigned int queue) | ||
112 | { | ||
113 | return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_FULL); | ||
114 | } | ||
115 | |||
116 | static inline int qmgr_stat_underflow(unsigned int queue) | ||
117 | { | ||
118 | return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW); | ||
119 | } | ||
120 | |||
121 | static inline int qmgr_stat_overflow(unsigned int queue) | ||
122 | { | ||
123 | return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW); | ||
124 | } | ||
125 | |||
126 | #endif | ||