diff options
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r-- | arch/powerpc/sysdev/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/sysdev/axonram.c | 1 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_ifc.c | 310 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_lbc.c | 36 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_msi.c | 99 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_msi.h | 10 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_pci.c | 84 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_rio.c | 1519 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_rio.h | 135 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_rmu.c | 1104 | ||||
-rw-r--r-- | arch/powerpc/sysdev/mpic.c | 201 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ppc4xx_cpm.c | 6 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ppc4xx_pci.c | 85 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ppc4xx_pci.h | 7 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/gpio.c | 42 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/qe_ic.c | 12 | ||||
-rw-r--r-- | arch/powerpc/sysdev/uic.c | 1 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-hv.c | 47 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/xics-common.c | 2 |
19 files changed, 2288 insertions, 1416 deletions
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 84e13253aec5..5e37b4717864 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -17,10 +17,11 @@ obj-$(CONFIG_FSL_SOC) += fsl_soc.o | |||
17 | obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) | 17 | obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) |
18 | obj-$(CONFIG_FSL_PMC) += fsl_pmc.o | 18 | obj-$(CONFIG_FSL_PMC) += fsl_pmc.o |
19 | obj-$(CONFIG_FSL_LBC) += fsl_lbc.o | 19 | obj-$(CONFIG_FSL_LBC) += fsl_lbc.o |
20 | obj-$(CONFIG_FSL_IFC) += fsl_ifc.o | ||
20 | obj-$(CONFIG_FSL_GTM) += fsl_gtm.o | 21 | obj-$(CONFIG_FSL_GTM) += fsl_gtm.o |
21 | obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o | 22 | obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o |
22 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o | 23 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o |
23 | obj-$(CONFIG_FSL_RIO) += fsl_rio.o | 24 | obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o |
24 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o | 25 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o |
25 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ | 26 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ |
26 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | 27 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index ba4271919062..1c16141c031c 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | #include <linux/bio.h> | 26 | #include <linux/bio.h> |
27 | #include <linux/blkdev.h> | 27 | #include <linux/blkdev.h> |
28 | #include <linux/buffer_head.h> | ||
29 | #include <linux/device.h> | 28 | #include <linux/device.h> |
30 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
31 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c new file mode 100644 index 000000000000..b31f19f61031 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_ifc.c | |||
@@ -0,0 +1,310 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Freescale Semiconductor, Inc | ||
3 | * | ||
4 | * Freescale Integrated Flash Controller | ||
5 | * | ||
6 | * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/compiler.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/of.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <asm/prom.h> | ||
34 | #include <asm/fsl_ifc.h> | ||
35 | |||
36 | struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; | ||
37 | EXPORT_SYMBOL(fsl_ifc_ctrl_dev); | ||
38 | |||
39 | /* | ||
40 | * convert_ifc_address - convert the base address | ||
41 | * @addr_base: base address of the memory bank | ||
42 | */ | ||
43 | unsigned int convert_ifc_address(phys_addr_t addr_base) | ||
44 | { | ||
45 | return addr_base & CSPR_BA; | ||
46 | } | ||
47 | EXPORT_SYMBOL(convert_ifc_address); | ||
48 | |||
49 | /* | ||
50 | * fsl_ifc_find - find IFC bank | ||
51 | * @addr_base: base address of the memory bank | ||
52 | * | ||
53 | * This function walks IFC banks comparing "Base address" field of the CSPR | ||
54 | * registers with the supplied addr_base argument. When bases match this | ||
55 | * function returns bank number (starting with 0), otherwise it returns | ||
56 | * appropriate errno value. | ||
57 | */ | ||
58 | int fsl_ifc_find(phys_addr_t addr_base) | ||
59 | { | ||
60 | int i = 0; | ||
61 | |||
62 | if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) | ||
63 | return -ENODEV; | ||
64 | |||
65 | for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { | ||
66 | __be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); | ||
67 | if (cspr & CSPR_V && (cspr & CSPR_BA) == | ||
68 | convert_ifc_address(addr_base)) | ||
69 | return i; | ||
70 | } | ||
71 | |||
72 | return -ENOENT; | ||
73 | } | ||
74 | EXPORT_SYMBOL(fsl_ifc_find); | ||
75 | |||
76 | static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) | ||
77 | { | ||
78 | struct fsl_ifc_regs __iomem *ifc = ctrl->regs; | ||
79 | |||
80 | /* | ||
81 | * Clear all the common status and event registers | ||
82 | */ | ||
83 | if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) | ||
84 | out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); | ||
85 | |||
86 | /* enable all error and events */ | ||
87 | out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); | ||
88 | |||
89 | /* enable all error and event interrupts */ | ||
90 | out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); | ||
91 | out_be32(&ifc->cm_erattr0, 0x0); | ||
92 | out_be32(&ifc->cm_erattr1, 0x0); | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int fsl_ifc_ctrl_remove(struct platform_device *dev) | ||
98 | { | ||
99 | struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev); | ||
100 | |||
101 | free_irq(ctrl->nand_irq, ctrl); | ||
102 | free_irq(ctrl->irq, ctrl); | ||
103 | |||
104 | irq_dispose_mapping(ctrl->nand_irq); | ||
105 | irq_dispose_mapping(ctrl->irq); | ||
106 | |||
107 | iounmap(ctrl->regs); | ||
108 | |||
109 | dev_set_drvdata(&dev->dev, NULL); | ||
110 | kfree(ctrl); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * NAND events are split between an operational interrupt which only | ||
117 | * receives OPC, and an error interrupt that receives everything else, | ||
118 | * including non-NAND errors. Whichever interrupt gets to it first | ||
119 | * records the status and wakes the wait queue. | ||
120 | */ | ||
121 | static DEFINE_SPINLOCK(nand_irq_lock); | ||
122 | |||
123 | static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) | ||
124 | { | ||
125 | struct fsl_ifc_regs __iomem *ifc = ctrl->regs; | ||
126 | unsigned long flags; | ||
127 | u32 stat; | ||
128 | |||
129 | spin_lock_irqsave(&nand_irq_lock, flags); | ||
130 | |||
131 | stat = in_be32(&ifc->ifc_nand.nand_evter_stat); | ||
132 | if (stat) { | ||
133 | out_be32(&ifc->ifc_nand.nand_evter_stat, stat); | ||
134 | ctrl->nand_stat = stat; | ||
135 | wake_up(&ctrl->nand_wait); | ||
136 | } | ||
137 | |||
138 | spin_unlock_irqrestore(&nand_irq_lock, flags); | ||
139 | |||
140 | return stat; | ||
141 | } | ||
142 | |||
143 | static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) | ||
144 | { | ||
145 | struct fsl_ifc_ctrl *ctrl = data; | ||
146 | |||
147 | if (check_nand_stat(ctrl)) | ||
148 | return IRQ_HANDLED; | ||
149 | |||
150 | return IRQ_NONE; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * NOTE: This interrupt is used to report ifc events of various kinds, | ||
155 | * such as transaction errors on the chipselects. | ||
156 | */ | ||
157 | static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) | ||
158 | { | ||
159 | struct fsl_ifc_ctrl *ctrl = data; | ||
160 | struct fsl_ifc_regs __iomem *ifc = ctrl->regs; | ||
161 | u32 err_axiid, err_srcid, status, cs_err, err_addr; | ||
162 | irqreturn_t ret = IRQ_NONE; | ||
163 | |||
164 | /* read for chip select error */ | ||
165 | cs_err = in_be32(&ifc->cm_evter_stat); | ||
166 | if (cs_err) { | ||
167 | dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" | ||
168 | "any memory bank 0x%08X\n", cs_err); | ||
169 | /* clear the chip select error */ | ||
170 | out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); | ||
171 | |||
172 | /* read error attribute registers print the error information */ | ||
173 | status = in_be32(&ifc->cm_erattr0); | ||
174 | err_addr = in_be32(&ifc->cm_erattr1); | ||
175 | |||
176 | if (status & IFC_CM_ERATTR0_ERTYP_READ) | ||
177 | dev_err(ctrl->dev, "Read transaction error" | ||
178 | "CM_ERATTR0 0x%08X\n", status); | ||
179 | else | ||
180 | dev_err(ctrl->dev, "Write transaction error" | ||
181 | "CM_ERATTR0 0x%08X\n", status); | ||
182 | |||
183 | err_axiid = (status & IFC_CM_ERATTR0_ERAID) >> | ||
184 | IFC_CM_ERATTR0_ERAID_SHIFT; | ||
185 | dev_err(ctrl->dev, "AXI ID of the error" | ||
186 | "transaction 0x%08X\n", err_axiid); | ||
187 | |||
188 | err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >> | ||
189 | IFC_CM_ERATTR0_ESRCID_SHIFT; | ||
190 | dev_err(ctrl->dev, "SRC ID of the error" | ||
191 | "transaction 0x%08X\n", err_srcid); | ||
192 | |||
193 | dev_err(ctrl->dev, "Transaction Address corresponding to error" | ||
194 | "ERADDR 0x%08X\n", err_addr); | ||
195 | |||
196 | ret = IRQ_HANDLED; | ||
197 | } | ||
198 | |||
199 | if (check_nand_stat(ctrl)) | ||
200 | ret = IRQ_HANDLED; | ||
201 | |||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * fsl_ifc_ctrl_probe | ||
207 | * | ||
208 | * called by device layer when it finds a device matching | ||
209 | * one our driver can handled. This code allocates all of | ||
210 | * the resources needed for the controller only. The | ||
211 | * resources for the NAND banks themselves are allocated | ||
212 | * in the chip probe function. | ||
213 | */ | ||
214 | static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev) | ||
215 | { | ||
216 | int ret = 0; | ||
217 | |||
218 | |||
219 | dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); | ||
220 | |||
221 | fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); | ||
222 | if (!fsl_ifc_ctrl_dev) | ||
223 | return -ENOMEM; | ||
224 | |||
225 | dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); | ||
226 | |||
227 | /* IOMAP the entire IFC region */ | ||
228 | fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); | ||
229 | if (!fsl_ifc_ctrl_dev->regs) { | ||
230 | dev_err(&dev->dev, "failed to get memory region\n"); | ||
231 | ret = -ENODEV; | ||
232 | goto err; | ||
233 | } | ||
234 | |||
235 | /* get the Controller level irq */ | ||
236 | fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); | ||
237 | if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { | ||
238 | dev_err(&dev->dev, "failed to get irq resource " | ||
239 | "for IFC\n"); | ||
240 | ret = -ENODEV; | ||
241 | goto err; | ||
242 | } | ||
243 | |||
244 | /* get the nand machine irq */ | ||
245 | fsl_ifc_ctrl_dev->nand_irq = | ||
246 | irq_of_parse_and_map(dev->dev.of_node, 1); | ||
247 | if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) { | ||
248 | dev_err(&dev->dev, "failed to get irq resource " | ||
249 | "for NAND Machine\n"); | ||
250 | ret = -ENODEV; | ||
251 | goto err; | ||
252 | } | ||
253 | |||
254 | fsl_ifc_ctrl_dev->dev = &dev->dev; | ||
255 | |||
256 | ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev); | ||
257 | if (ret < 0) | ||
258 | goto err; | ||
259 | |||
260 | init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait); | ||
261 | |||
262 | ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED, | ||
263 | "fsl-ifc", fsl_ifc_ctrl_dev); | ||
264 | if (ret != 0) { | ||
265 | dev_err(&dev->dev, "failed to install irq (%d)\n", | ||
266 | fsl_ifc_ctrl_dev->irq); | ||
267 | goto err_irq; | ||
268 | } | ||
269 | |||
270 | ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0, | ||
271 | "fsl-ifc-nand", fsl_ifc_ctrl_dev); | ||
272 | if (ret != 0) { | ||
273 | dev_err(&dev->dev, "failed to install irq (%d)\n", | ||
274 | fsl_ifc_ctrl_dev->nand_irq); | ||
275 | goto err_nandirq; | ||
276 | } | ||
277 | |||
278 | return 0; | ||
279 | |||
280 | err_nandirq: | ||
281 | free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); | ||
282 | irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq); | ||
283 | err_irq: | ||
284 | free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); | ||
285 | irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); | ||
286 | err: | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static const struct of_device_id fsl_ifc_match[] = { | ||
291 | { | ||
292 | .compatible = "fsl,ifc", | ||
293 | }, | ||
294 | {}, | ||
295 | }; | ||
296 | |||
297 | static struct platform_driver fsl_ifc_ctrl_driver = { | ||
298 | .driver = { | ||
299 | .name = "fsl-ifc", | ||
300 | .of_match_table = fsl_ifc_match, | ||
301 | }, | ||
302 | .probe = fsl_ifc_ctrl_probe, | ||
303 | .remove = fsl_ifc_ctrl_remove, | ||
304 | }; | ||
305 | |||
306 | module_platform_driver(fsl_ifc_ctrl_driver); | ||
307 | |||
308 | MODULE_LICENSE("GPL"); | ||
309 | MODULE_AUTHOR("Freescale Semiconductor"); | ||
310 | MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver"); | ||
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index d5c3c90ee698..483126d7b3c0 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -332,6 +332,38 @@ err: | |||
332 | return ret; | 332 | return ret; |
333 | } | 333 | } |
334 | 334 | ||
335 | #ifdef CONFIG_SUSPEND | ||
336 | |||
337 | /* save lbc registers */ | ||
338 | static int fsl_lbc_suspend(struct platform_device *pdev, pm_message_t state) | ||
339 | { | ||
340 | struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev); | ||
341 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; | ||
342 | |||
343 | ctrl->saved_regs = kmalloc(sizeof(struct fsl_lbc_regs), GFP_KERNEL); | ||
344 | if (!ctrl->saved_regs) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | _memcpy_fromio(ctrl->saved_regs, lbc, sizeof(struct fsl_lbc_regs)); | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | /* restore lbc registers */ | ||
352 | static int fsl_lbc_resume(struct platform_device *pdev) | ||
353 | { | ||
354 | struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev); | ||
355 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; | ||
356 | |||
357 | if (ctrl->saved_regs) { | ||
358 | _memcpy_toio(lbc, ctrl->saved_regs, | ||
359 | sizeof(struct fsl_lbc_regs)); | ||
360 | kfree(ctrl->saved_regs); | ||
361 | ctrl->saved_regs = NULL; | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | #endif /* CONFIG_SUSPEND */ | ||
366 | |||
335 | static const struct of_device_id fsl_lbc_match[] = { | 367 | static const struct of_device_id fsl_lbc_match[] = { |
336 | { .compatible = "fsl,elbc", }, | 368 | { .compatible = "fsl,elbc", }, |
337 | { .compatible = "fsl,pq3-localbus", }, | 369 | { .compatible = "fsl,pq3-localbus", }, |
@@ -346,6 +378,10 @@ static struct platform_driver fsl_lbc_ctrl_driver = { | |||
346 | .of_match_table = fsl_lbc_match, | 378 | .of_match_table = fsl_lbc_match, |
347 | }, | 379 | }, |
348 | .probe = fsl_lbc_ctrl_probe, | 380 | .probe = fsl_lbc_ctrl_probe, |
381 | #ifdef CONFIG_SUSPEND | ||
382 | .suspend = fsl_lbc_suspend, | ||
383 | .resume = fsl_lbc_resume, | ||
384 | #endif | ||
349 | }; | 385 | }; |
350 | 386 | ||
351 | static int __init fsl_lbc_init(void) | 387 | static int __init fsl_lbc_init(void) |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index e5c344d336ea..ecb5c1946d22 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <asm/hw_irq.h> | 23 | #include <asm/hw_irq.h> |
24 | #include <asm/ppc-pci.h> | 24 | #include <asm/ppc-pci.h> |
25 | #include <asm/mpic.h> | 25 | #include <asm/mpic.h> |
26 | #include <asm/fsl_hcalls.h> | ||
27 | |||
26 | #include "fsl_msi.h" | 28 | #include "fsl_msi.h" |
27 | #include "fsl_pci.h" | 29 | #include "fsl_pci.h" |
28 | 30 | ||
@@ -148,14 +150,49 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, | |||
148 | 150 | ||
149 | static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | 151 | static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) |
150 | { | 152 | { |
153 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | ||
154 | struct device_node *np; | ||
155 | phandle phandle = 0; | ||
151 | int rc, hwirq = -ENOMEM; | 156 | int rc, hwirq = -ENOMEM; |
152 | unsigned int virq; | 157 | unsigned int virq; |
153 | struct msi_desc *entry; | 158 | struct msi_desc *entry; |
154 | struct msi_msg msg; | 159 | struct msi_msg msg; |
155 | struct fsl_msi *msi_data; | 160 | struct fsl_msi *msi_data; |
156 | 161 | ||
162 | /* | ||
163 | * If the PCI node has an fsl,msi property, then we need to use it | ||
164 | * to find the specific MSI. | ||
165 | */ | ||
166 | np = of_parse_phandle(hose->dn, "fsl,msi", 0); | ||
167 | if (np) { | ||
168 | if (of_device_is_compatible(np, "fsl,mpic-msi") || | ||
169 | of_device_is_compatible(np, "fsl,vmpic-msi")) | ||
170 | phandle = np->phandle; | ||
171 | else { | ||
172 | dev_err(&pdev->dev, | ||
173 | "node %s has an invalid fsl,msi phandle %u\n", | ||
174 | hose->dn->full_name, np->phandle); | ||
175 | return -EINVAL; | ||
176 | } | ||
177 | } | ||
178 | |||
157 | list_for_each_entry(entry, &pdev->msi_list, list) { | 179 | list_for_each_entry(entry, &pdev->msi_list, list) { |
180 | /* | ||
181 | * Loop over all the MSI devices until we find one that has an | ||
182 | * available interrupt. | ||
183 | */ | ||
158 | list_for_each_entry(msi_data, &msi_head, list) { | 184 | list_for_each_entry(msi_data, &msi_head, list) { |
185 | /* | ||
186 | * If the PCI node has an fsl,msi property, then we | ||
187 | * restrict our search to the corresponding MSI node. | ||
188 | * The simplest way is to skip over MSI nodes with the | ||
189 | * wrong phandle. Under the Freescale hypervisor, this | ||
190 | * has the additional benefit of skipping over MSI | ||
191 | * nodes that are not mapped in the PAMU. | ||
192 | */ | ||
193 | if (phandle && (phandle != msi_data->phandle)) | ||
194 | continue; | ||
195 | |||
159 | hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); | 196 | hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); |
160 | if (hwirq >= 0) | 197 | if (hwirq >= 0) |
161 | break; | 198 | break; |
@@ -163,16 +200,14 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
163 | 200 | ||
164 | if (hwirq < 0) { | 201 | if (hwirq < 0) { |
165 | rc = hwirq; | 202 | rc = hwirq; |
166 | pr_debug("%s: fail allocating msi interrupt\n", | 203 | dev_err(&pdev->dev, "could not allocate MSI interrupt\n"); |
167 | __func__); | ||
168 | goto out_free; | 204 | goto out_free; |
169 | } | 205 | } |
170 | 206 | ||
171 | virq = irq_create_mapping(msi_data->irqhost, hwirq); | 207 | virq = irq_create_mapping(msi_data->irqhost, hwirq); |
172 | 208 | ||
173 | if (virq == NO_IRQ) { | 209 | if (virq == NO_IRQ) { |
174 | pr_debug("%s: fail mapping hwirq 0x%x\n", | 210 | dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); |
175 | __func__, hwirq); | ||
176 | msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); | 211 | msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); |
177 | rc = -ENOSPC; | 212 | rc = -ENOSPC; |
178 | goto out_free; | 213 | goto out_free; |
@@ -201,6 +236,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
201 | u32 intr_index; | 236 | u32 intr_index; |
202 | u32 have_shift = 0; | 237 | u32 have_shift = 0; |
203 | struct fsl_msi_cascade_data *cascade_data; | 238 | struct fsl_msi_cascade_data *cascade_data; |
239 | unsigned int ret; | ||
204 | 240 | ||
205 | cascade_data = irq_get_handler_data(irq); | 241 | cascade_data = irq_get_handler_data(irq); |
206 | msi_data = cascade_data->msi_data; | 242 | msi_data = cascade_data->msi_data; |
@@ -232,6 +268,14 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
232 | case FSL_PIC_IP_IPIC: | 268 | case FSL_PIC_IP_IPIC: |
233 | msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); | 269 | msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); |
234 | break; | 270 | break; |
271 | case FSL_PIC_IP_VMPIC: | ||
272 | ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); | ||
273 | if (ret) { | ||
274 | pr_err("fsl-msi: fh_vmpic_get_msir() failed for " | ||
275 | "irq %u (ret=%u)\n", irq, ret); | ||
276 | msir_value = 0; | ||
277 | } | ||
278 | break; | ||
235 | } | 279 | } |
236 | 280 | ||
237 | while (msir_value) { | 281 | while (msir_value) { |
@@ -249,6 +293,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
249 | 293 | ||
250 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 294 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
251 | case FSL_PIC_IP_MPIC: | 295 | case FSL_PIC_IP_MPIC: |
296 | case FSL_PIC_IP_VMPIC: | ||
252 | chip->irq_eoi(idata); | 297 | chip->irq_eoi(idata); |
253 | break; | 298 | break; |
254 | case FSL_PIC_IP_IPIC: | 299 | case FSL_PIC_IP_IPIC: |
@@ -278,7 +323,8 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) | |||
278 | } | 323 | } |
279 | if (msi->bitmap.bitmap) | 324 | if (msi->bitmap.bitmap) |
280 | msi_bitmap_free(&msi->bitmap); | 325 | msi_bitmap_free(&msi->bitmap); |
281 | iounmap(msi->msi_regs); | 326 | if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) |
327 | iounmap(msi->msi_regs); | ||
282 | kfree(msi); | 328 | kfree(msi); |
283 | 329 | ||
284 | return 0; | 330 | return 0; |
@@ -350,25 +396,37 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) | |||
350 | goto error_out; | 396 | goto error_out; |
351 | } | 397 | } |
352 | 398 | ||
353 | /* Get the MSI reg base */ | 399 | /* |
354 | err = of_address_to_resource(dev->dev.of_node, 0, &res); | 400 | * Under the Freescale hypervisor, the msi nodes don't have a 'reg' |
355 | if (err) { | 401 | * property. Instead, we use hypercalls to access the MSI. |
356 | dev_err(&dev->dev, "%s resource error!\n", | 402 | */ |
403 | if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { | ||
404 | err = of_address_to_resource(dev->dev.of_node, 0, &res); | ||
405 | if (err) { | ||
406 | dev_err(&dev->dev, "invalid resource for node %s\n", | ||
357 | dev->dev.of_node->full_name); | 407 | dev->dev.of_node->full_name); |
358 | goto error_out; | 408 | goto error_out; |
359 | } | 409 | } |
360 | 410 | ||
361 | msi->msi_regs = ioremap(res.start, resource_size(&res)); | 411 | msi->msi_regs = ioremap(res.start, resource_size(&res)); |
362 | if (!msi->msi_regs) { | 412 | if (!msi->msi_regs) { |
363 | dev_err(&dev->dev, "ioremap problem failed\n"); | 413 | dev_err(&dev->dev, "could not map node %s\n", |
364 | goto error_out; | 414 | dev->dev.of_node->full_name); |
415 | goto error_out; | ||
416 | } | ||
417 | msi->msiir_offset = | ||
418 | features->msiir_offset + (res.start & 0xfffff); | ||
365 | } | 419 | } |
366 | 420 | ||
367 | msi->feature = features->fsl_pic_ip; | 421 | msi->feature = features->fsl_pic_ip; |
368 | 422 | ||
369 | msi->irqhost->host_data = msi; | 423 | msi->irqhost->host_data = msi; |
370 | 424 | ||
371 | msi->msiir_offset = features->msiir_offset + (res.start & 0xfffff); | 425 | /* |
426 | * Remember the phandle, so that we can match with any PCI nodes | ||
427 | * that have an "fsl,msi" property. | ||
428 | */ | ||
429 | msi->phandle = dev->dev.of_node->phandle; | ||
372 | 430 | ||
373 | rc = fsl_msi_init_allocator(msi); | 431 | rc = fsl_msi_init_allocator(msi); |
374 | if (rc) { | 432 | if (rc) { |
@@ -437,6 +495,11 @@ static const struct fsl_msi_feature ipic_msi_feature = { | |||
437 | .msiir_offset = 0x38, | 495 | .msiir_offset = 0x38, |
438 | }; | 496 | }; |
439 | 497 | ||
498 | static const struct fsl_msi_feature vmpic_msi_feature = { | ||
499 | .fsl_pic_ip = FSL_PIC_IP_VMPIC, | ||
500 | .msiir_offset = 0, | ||
501 | }; | ||
502 | |||
440 | static const struct of_device_id fsl_of_msi_ids[] = { | 503 | static const struct of_device_id fsl_of_msi_ids[] = { |
441 | { | 504 | { |
442 | .compatible = "fsl,mpic-msi", | 505 | .compatible = "fsl,mpic-msi", |
@@ -446,6 +509,10 @@ static const struct of_device_id fsl_of_msi_ids[] = { | |||
446 | .compatible = "fsl,ipic-msi", | 509 | .compatible = "fsl,ipic-msi", |
447 | .data = (void *)&ipic_msi_feature, | 510 | .data = (void *)&ipic_msi_feature, |
448 | }, | 511 | }, |
512 | { | ||
513 | .compatible = "fsl,vmpic-msi", | ||
514 | .data = (void *)&vmpic_msi_feature, | ||
515 | }, | ||
449 | {} | 516 | {} |
450 | }; | 517 | }; |
451 | 518 | ||
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h index 1313abbc5200..f6c646a52541 100644 --- a/arch/powerpc/sysdev/fsl_msi.h +++ b/arch/powerpc/sysdev/fsl_msi.h | |||
@@ -13,15 +13,17 @@ | |||
13 | #ifndef _POWERPC_SYSDEV_FSL_MSI_H | 13 | #ifndef _POWERPC_SYSDEV_FSL_MSI_H |
14 | #define _POWERPC_SYSDEV_FSL_MSI_H | 14 | #define _POWERPC_SYSDEV_FSL_MSI_H |
15 | 15 | ||
16 | #include <linux/of.h> | ||
16 | #include <asm/msi_bitmap.h> | 17 | #include <asm/msi_bitmap.h> |
17 | 18 | ||
18 | #define NR_MSI_REG 8 | 19 | #define NR_MSI_REG 8 |
19 | #define IRQS_PER_MSI_REG 32 | 20 | #define IRQS_PER_MSI_REG 32 |
20 | #define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) | 21 | #define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) |
21 | 22 | ||
22 | #define FSL_PIC_IP_MASK 0x0000000F | 23 | #define FSL_PIC_IP_MASK 0x0000000F |
23 | #define FSL_PIC_IP_MPIC 0x00000001 | 24 | #define FSL_PIC_IP_MPIC 0x00000001 |
24 | #define FSL_PIC_IP_IPIC 0x00000002 | 25 | #define FSL_PIC_IP_IPIC 0x00000002 |
26 | #define FSL_PIC_IP_VMPIC 0x00000003 | ||
25 | 27 | ||
26 | struct fsl_msi { | 28 | struct fsl_msi { |
27 | struct irq_host *irqhost; | 29 | struct irq_host *irqhost; |
@@ -36,6 +38,8 @@ struct fsl_msi { | |||
36 | struct msi_bitmap bitmap; | 38 | struct msi_bitmap bitmap; |
37 | 39 | ||
38 | struct list_head list; /* support multiple MSI banks */ | 40 | struct list_head list; /* support multiple MSI banks */ |
41 | |||
42 | phandle phandle; | ||
39 | }; | 43 | }; |
40 | 44 | ||
41 | #endif /* _POWERPC_SYSDEV_FSL_MSI_H */ | 45 | #endif /* _POWERPC_SYSDEV_FSL_MSI_H */ |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 4ce547e00473..3b61e8cf3421 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -65,6 +65,30 @@ static int __init fsl_pcie_check_link(struct pci_controller *hose) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) | 67 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) |
68 | |||
69 | #define MAX_PHYS_ADDR_BITS 40 | ||
70 | static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; | ||
71 | |||
72 | static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) | ||
73 | { | ||
74 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
75 | return -EIO; | ||
76 | |||
77 | /* | ||
78 | * Fixup PCI devices that are able to DMA to above the physical | ||
79 | * address width of the SoC such that we can address any internal | ||
80 | * SoC address from across PCI if needed | ||
81 | */ | ||
82 | if ((dev->bus == &pci_bus_type) && | ||
83 | dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { | ||
84 | set_dma_ops(dev, &dma_direct_ops); | ||
85 | set_dma_offset(dev, pci64_dma_offset); | ||
86 | } | ||
87 | |||
88 | *dev->dma_mask = dma_mask; | ||
89 | return 0; | ||
90 | } | ||
91 | |||
68 | static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, | 92 | static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, |
69 | unsigned int index, const struct resource *res, | 93 | unsigned int index, const struct resource *res, |
70 | resource_size_t offset) | 94 | resource_size_t offset) |
@@ -113,6 +137,8 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
113 | u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | | 137 | u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | |
114 | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; | 138 | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; |
115 | char *name = hose->dn->full_name; | 139 | char *name = hose->dn->full_name; |
140 | const u64 *reg; | ||
141 | int len; | ||
116 | 142 | ||
117 | pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", | 143 | pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", |
118 | (u64)rsrc->start, (u64)resource_size(rsrc)); | 144 | (u64)rsrc->start, (u64)resource_size(rsrc)); |
@@ -205,6 +231,33 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
205 | 231 | ||
206 | /* Setup inbound mem window */ | 232 | /* Setup inbound mem window */ |
207 | mem = memblock_end_of_DRAM(); | 233 | mem = memblock_end_of_DRAM(); |
234 | |||
235 | /* | ||
236 | * The msi-address-64 property, if it exists, indicates the physical | ||
237 | * address of the MSIIR register. Normally, this register is located | ||
238 | * inside CCSR, so the ATMU that covers all of CCSR is used. But if | ||
239 | * this property exists, then we normally need to create a new ATMU | ||
240 | * for it. For now, however, we cheat. The only entity that creates | ||
241 | * this property is the Freescale hypervisor, and the address is | ||
242 | * specified in the partition configuration. Typically, the address | ||
243 | * is located in the page immediately after the end of DDR. If so, we | ||
244 | * can avoid allocating a new ATMU by extending the DDR ATMU by one | ||
245 | * page. | ||
246 | */ | ||
247 | reg = of_get_property(hose->dn, "msi-address-64", &len); | ||
248 | if (reg && (len == sizeof(u64))) { | ||
249 | u64 address = be64_to_cpup(reg); | ||
250 | |||
251 | if ((address >= mem) && (address < (mem + PAGE_SIZE))) { | ||
252 | pr_info("%s: extending DDR ATMU to cover MSIIR", name); | ||
253 | mem += PAGE_SIZE; | ||
254 | } else { | ||
255 | /* TODO: Create a new ATMU for MSIIR */ | ||
256 | pr_warn("%s: msi-address-64 address of %llx is " | ||
257 | "unsupported\n", name, address); | ||
258 | } | ||
259 | } | ||
260 | |||
208 | sz = min(mem, paddr_lo); | 261 | sz = min(mem, paddr_lo); |
209 | mem_log = __ilog2_u64(sz); | 262 | mem_log = __ilog2_u64(sz); |
210 | 263 | ||
@@ -228,6 +281,37 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
228 | 281 | ||
229 | hose->dma_window_base_cur = 0x00000000; | 282 | hose->dma_window_base_cur = 0x00000000; |
230 | hose->dma_window_size = (resource_size_t)sz; | 283 | hose->dma_window_size = (resource_size_t)sz; |
284 | |||
285 | /* | ||
286 | * if we have >4G of memory setup second PCI inbound window to | ||
287 | * let devices that are 64-bit address capable to work w/o | ||
288 | * SWIOTLB and access the full range of memory | ||
289 | */ | ||
290 | if (sz != mem) { | ||
291 | mem_log = __ilog2_u64(mem); | ||
292 | |||
293 | /* Size window up if we dont fit in exact power-of-2 */ | ||
294 | if ((1ull << mem_log) != mem) | ||
295 | mem_log++; | ||
296 | |||
297 | piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); | ||
298 | |||
299 | /* Setup inbound memory window */ | ||
300 | out_be32(&pci->piw[win_idx].pitar, 0x00000000); | ||
301 | out_be32(&pci->piw[win_idx].piwbear, | ||
302 | pci64_dma_offset >> 44); | ||
303 | out_be32(&pci->piw[win_idx].piwbar, | ||
304 | pci64_dma_offset >> 12); | ||
305 | out_be32(&pci->piw[win_idx].piwar, piwar); | ||
306 | |||
307 | /* | ||
308 | * install our own dma_set_mask handler to fixup dma_ops | ||
309 | * and dma_offset | ||
310 | */ | ||
311 | ppc_md.dma_set_mask = fsl_pci_dma_set_mask; | ||
312 | |||
313 | pr_info("%s: Setup 64-bit PCI DMA window\n", name); | ||
314 | } | ||
231 | } else { | 315 | } else { |
232 | u64 paddr = 0; | 316 | u64 paddr = 0; |
233 | 317 | ||
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 22ffccd8bef5..a4c4f4a932d8 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * - Added Port-Write message handling | 10 | * - Added Port-Write message handling |
11 | * - Added Machine Check exception handling | 11 | * - Added Machine Check exception handling |
12 | * | 12 | * |
13 | * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. | 13 | * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. |
14 | * Zhang Wei <wei.zhang@freescale.com> | 14 | * Zhang Wei <wei.zhang@freescale.com> |
15 | * | 15 | * |
16 | * Copyright 2005 MontaVista Software, Inc. | 16 | * Copyright 2005 MontaVista Software, Inc. |
@@ -28,240 +28,33 @@ | |||
28 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/rio.h> | ||
32 | #include <linux/rio_drv.h> | ||
33 | #include <linux/of_platform.h> | 31 | #include <linux/of_platform.h> |
34 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
35 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
36 | #include <linux/kfifo.h> | ||
37 | 34 | ||
38 | #include <asm/io.h> | 35 | #include <linux/io.h> |
36 | #include <linux/uaccess.h> | ||
39 | #include <asm/machdep.h> | 37 | #include <asm/machdep.h> |
40 | #include <asm/uaccess.h> | ||
41 | 38 | ||
42 | #undef DEBUG_PW /* Port-Write debugging */ | 39 | #include "fsl_rio.h" |
43 | 40 | ||
44 | /* RapidIO definition irq, which read from OF-tree */ | 41 | #undef DEBUG_PW /* Port-Write debugging */ |
45 | #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) | ||
46 | #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) | ||
47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) | ||
48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) | ||
49 | |||
50 | #define IPWSR_CLEAR 0x98 | ||
51 | #define OMSR_CLEAR 0x1cb3 | ||
52 | #define IMSR_CLEAR 0x491 | ||
53 | #define IDSR_CLEAR 0x91 | ||
54 | #define ODSR_CLEAR 0x1c00 | ||
55 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC | ||
56 | #define ESCSR_CLEAR 0x07120204 | ||
57 | #define IECSR_CLEAR 0x80000000 | ||
58 | 42 | ||
59 | #define RIO_PORT1_EDCSR 0x0640 | 43 | #define RIO_PORT1_EDCSR 0x0640 |
60 | #define RIO_PORT2_EDCSR 0x0680 | 44 | #define RIO_PORT2_EDCSR 0x0680 |
61 | #define RIO_PORT1_IECSR 0x10130 | 45 | #define RIO_PORT1_IECSR 0x10130 |
62 | #define RIO_PORT2_IECSR 0x101B0 | 46 | #define RIO_PORT2_IECSR 0x101B0 |
63 | #define RIO_IM0SR 0x13064 | 47 | |
64 | #define RIO_IM1SR 0x13164 | ||
65 | #define RIO_OM0SR 0x13004 | ||
66 | #define RIO_OM1SR 0x13104 | ||
67 | |||
68 | #define RIO_ATMU_REGS_OFFSET 0x10c00 | ||
69 | #define RIO_P_MSG_REGS_OFFSET 0x11000 | ||
70 | #define RIO_S_MSG_REGS_OFFSET 0x13000 | ||
71 | #define RIO_GCCSR 0x13c | 48 | #define RIO_GCCSR 0x13c |
72 | #define RIO_ESCSR 0x158 | 49 | #define RIO_ESCSR 0x158 |
50 | #define ESCSR_CLEAR 0x07120204 | ||
73 | #define RIO_PORT2_ESCSR 0x178 | 51 | #define RIO_PORT2_ESCSR 0x178 |
74 | #define RIO_CCSR 0x15c | 52 | #define RIO_CCSR 0x15c |
75 | #define RIO_LTLEDCSR 0x0608 | ||
76 | #define RIO_LTLEDCSR_IER 0x80000000 | 53 | #define RIO_LTLEDCSR_IER 0x80000000 |
77 | #define RIO_LTLEDCSR_PRT 0x01000000 | 54 | #define RIO_LTLEDCSR_PRT 0x01000000 |
78 | #define RIO_LTLEECSR 0x060c | 55 | #define IECSR_CLEAR 0x80000000 |
79 | #define RIO_EPWISR 0x10010 | ||
80 | #define RIO_ISR_AACR 0x10120 | 56 | #define RIO_ISR_AACR 0x10120 |
81 | #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ | 57 | #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ |
82 | #define RIO_MAINT_WIN_SIZE 0x400000 | ||
83 | #define RIO_DBELL_WIN_SIZE 0x1000 | ||
84 | |||
85 | #define RIO_MSG_OMR_MUI 0x00000002 | ||
86 | #define RIO_MSG_OSR_TE 0x00000080 | ||
87 | #define RIO_MSG_OSR_QOI 0x00000020 | ||
88 | #define RIO_MSG_OSR_QFI 0x00000010 | ||
89 | #define RIO_MSG_OSR_MUB 0x00000004 | ||
90 | #define RIO_MSG_OSR_EOMI 0x00000002 | ||
91 | #define RIO_MSG_OSR_QEI 0x00000001 | ||
92 | |||
93 | #define RIO_MSG_IMR_MI 0x00000002 | ||
94 | #define RIO_MSG_ISR_TE 0x00000080 | ||
95 | #define RIO_MSG_ISR_QFI 0x00000010 | ||
96 | #define RIO_MSG_ISR_DIQI 0x00000001 | ||
97 | |||
98 | #define RIO_IPWMR_SEN 0x00100000 | ||
99 | #define RIO_IPWMR_QFIE 0x00000100 | ||
100 | #define RIO_IPWMR_EIE 0x00000020 | ||
101 | #define RIO_IPWMR_CQ 0x00000002 | ||
102 | #define RIO_IPWMR_PWE 0x00000001 | ||
103 | |||
104 | #define RIO_IPWSR_QF 0x00100000 | ||
105 | #define RIO_IPWSR_TE 0x00000080 | ||
106 | #define RIO_IPWSR_QFI 0x00000010 | ||
107 | #define RIO_IPWSR_PWD 0x00000008 | ||
108 | #define RIO_IPWSR_PWB 0x00000004 | ||
109 | |||
110 | /* EPWISR Error match value */ | ||
111 | #define RIO_EPWISR_PINT1 0x80000000 | ||
112 | #define RIO_EPWISR_PINT2 0x40000000 | ||
113 | #define RIO_EPWISR_MU 0x00000002 | ||
114 | #define RIO_EPWISR_PW 0x00000001 | ||
115 | |||
116 | #define RIO_MSG_DESC_SIZE 32 | ||
117 | #define RIO_MSG_BUFFER_SIZE 4096 | ||
118 | #define RIO_MIN_TX_RING_SIZE 2 | ||
119 | #define RIO_MAX_TX_RING_SIZE 2048 | ||
120 | #define RIO_MIN_RX_RING_SIZE 2 | ||
121 | #define RIO_MAX_RX_RING_SIZE 2048 | ||
122 | |||
123 | #define DOORBELL_DMR_DI 0x00000002 | ||
124 | #define DOORBELL_DSR_TE 0x00000080 | ||
125 | #define DOORBELL_DSR_QFI 0x00000010 | ||
126 | #define DOORBELL_DSR_DIQI 0x00000001 | ||
127 | #define DOORBELL_TID_OFFSET 0x02 | ||
128 | #define DOORBELL_SID_OFFSET 0x04 | ||
129 | #define DOORBELL_INFO_OFFSET 0x06 | ||
130 | |||
131 | #define DOORBELL_MESSAGE_SIZE 0x08 | ||
132 | #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) | ||
133 | #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) | ||
134 | #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) | ||
135 | |||
136 | struct rio_atmu_regs { | ||
137 | u32 rowtar; | ||
138 | u32 rowtear; | ||
139 | u32 rowbar; | ||
140 | u32 pad2; | ||
141 | u32 rowar; | ||
142 | u32 pad3[3]; | ||
143 | }; | ||
144 | |||
145 | struct rio_msg_regs { | ||
146 | u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ | ||
147 | u32 osr; /* 0xD_3004 - Outbound message 0 status register */ | ||
148 | u32 pad1; | ||
149 | u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue | ||
150 | dequeue pointer address register */ | ||
151 | u32 pad2; | ||
152 | u32 osar; /* 0xD_3014 - Outbound message 0 source address | ||
153 | register */ | ||
154 | u32 odpr; /* 0xD_3018 - Outbound message 0 destination port | ||
155 | register */ | ||
156 | u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes | ||
157 | Register*/ | ||
158 | u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count | ||
159 | register */ | ||
160 | u32 pad3; | ||
161 | u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue | ||
162 | enqueue pointer address register */ | ||
163 | u32 pad4[13]; | ||
164 | u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ | ||
165 | u32 isr; /* 0xD_3064 - Inbound message 0 status register */ | ||
166 | u32 pad5; | ||
167 | u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue | ||
168 | pointer address register*/ | ||
169 | u32 pad6; | ||
170 | u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue | ||
171 | pointer address register */ | ||
172 | u32 pad7[226]; | ||
173 | u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ | ||
174 | u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ | ||
175 | u32 res0[4]; | ||
176 | u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port | ||
177 | register */ | ||
178 | u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes | ||
179 | register */ | ||
180 | u32 res1[3]; | ||
181 | u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold | ||
182 | configuration register */ | ||
183 | u32 res2[12]; | ||
184 | u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ | ||
185 | u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ | ||
186 | u32 pad8; | ||
187 | u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer | ||
188 | address register */ | ||
189 | u32 pad9; | ||
190 | u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer | ||
191 | address register */ | ||
192 | u32 pad10[26]; | ||
193 | u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ | ||
194 | u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ | ||
195 | u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address | ||
196 | register */ | ||
197 | u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address | ||
198 | register */ | ||
199 | }; | ||
200 | |||
201 | struct rio_tx_desc { | ||
202 | u32 res1; | ||
203 | u32 saddr; | ||
204 | u32 dport; | ||
205 | u32 dattr; | ||
206 | u32 res2; | ||
207 | u32 res3; | ||
208 | u32 dwcnt; | ||
209 | u32 res4; | ||
210 | }; | ||
211 | |||
212 | struct rio_dbell_ring { | ||
213 | void *virt; | ||
214 | dma_addr_t phys; | ||
215 | }; | ||
216 | |||
217 | struct rio_msg_tx_ring { | ||
218 | void *virt; | ||
219 | dma_addr_t phys; | ||
220 | void *virt_buffer[RIO_MAX_TX_RING_SIZE]; | ||
221 | dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; | ||
222 | int tx_slot; | ||
223 | int size; | ||
224 | void *dev_id; | ||
225 | }; | ||
226 | |||
227 | struct rio_msg_rx_ring { | ||
228 | void *virt; | ||
229 | dma_addr_t phys; | ||
230 | void *virt_buffer[RIO_MAX_RX_RING_SIZE]; | ||
231 | int rx_slot; | ||
232 | int size; | ||
233 | void *dev_id; | ||
234 | }; | ||
235 | |||
236 | struct rio_port_write_msg { | ||
237 | void *virt; | ||
238 | dma_addr_t phys; | ||
239 | u32 msg_count; | ||
240 | u32 err_count; | ||
241 | u32 discard_count; | ||
242 | }; | ||
243 | |||
244 | struct rio_priv { | ||
245 | struct device *dev; | ||
246 | void __iomem *regs_win; | ||
247 | struct rio_atmu_regs __iomem *atmu_regs; | ||
248 | struct rio_atmu_regs __iomem *maint_atmu_regs; | ||
249 | struct rio_atmu_regs __iomem *dbell_atmu_regs; | ||
250 | void __iomem *dbell_win; | ||
251 | void __iomem *maint_win; | ||
252 | struct rio_msg_regs __iomem *msg_regs; | ||
253 | struct rio_dbell_ring dbell_ring; | ||
254 | struct rio_msg_tx_ring msg_tx_ring; | ||
255 | struct rio_msg_rx_ring msg_rx_ring; | ||
256 | struct rio_port_write_msg port_write_msg; | ||
257 | int bellirq; | ||
258 | int txirq; | ||
259 | int rxirq; | ||
260 | int pwirq; | ||
261 | struct work_struct pw_work; | ||
262 | struct kfifo pw_fifo; | ||
263 | spinlock_t pw_fifo_lock; | ||
264 | }; | ||
265 | 58 | ||
266 | #define __fsl_read_rio_config(x, addr, err, op) \ | 59 | #define __fsl_read_rio_config(x, addr, err, op) \ |
267 | __asm__ __volatile__( \ | 60 | __asm__ __volatile__( \ |
@@ -279,7 +72,12 @@ struct rio_priv { | |||
279 | : "=r" (err), "=r" (x) \ | 72 | : "=r" (err), "=r" (x) \ |
280 | : "b" (addr), "i" (-EFAULT), "0" (err)) | 73 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
281 | 74 | ||
282 | static void __iomem *rio_regs_win; | 75 | void __iomem *rio_regs_win; |
76 | void __iomem *rmu_regs_win; | ||
77 | resource_size_t rio_law_start; | ||
78 | |||
79 | struct fsl_rio_dbell *dbell; | ||
80 | struct fsl_rio_pw *pw; | ||
283 | 81 | ||
284 | #ifdef CONFIG_E500 | 82 | #ifdef CONFIG_E500 |
285 | int fsl_rio_mcheck_exception(struct pt_regs *regs) | 83 | int fsl_rio_mcheck_exception(struct pt_regs *regs) |
@@ -311,42 +109,6 @@ EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); | |||
311 | #endif | 109 | #endif |
312 | 110 | ||
313 | /** | 111 | /** |
314 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message | ||
315 | * @mport: RapidIO master port info | ||
316 | * @index: ID of RapidIO interface | ||
317 | * @destid: Destination ID of target device | ||
318 | * @data: 16-bit info field of RapidIO doorbell message | ||
319 | * | ||
320 | * Sends a MPC85xx doorbell message. Returns %0 on success or | ||
321 | * %-EINVAL on failure. | ||
322 | */ | ||
323 | static int fsl_rio_doorbell_send(struct rio_mport *mport, | ||
324 | int index, u16 destid, u16 data) | ||
325 | { | ||
326 | struct rio_priv *priv = mport->priv; | ||
327 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", | ||
328 | index, destid, data); | ||
329 | switch (mport->phy_type) { | ||
330 | case RIO_PHY_PARALLEL: | ||
331 | out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); | ||
332 | out_be16(priv->dbell_win, data); | ||
333 | break; | ||
334 | case RIO_PHY_SERIAL: | ||
335 | /* In the serial version silicons, such as MPC8548, MPC8641, | ||
336 | * below operations is must be. | ||
337 | */ | ||
338 | out_be32(&priv->msg_regs->odmr, 0x00000000); | ||
339 | out_be32(&priv->msg_regs->odretcr, 0x00000004); | ||
340 | out_be32(&priv->msg_regs->oddpr, destid << 16); | ||
341 | out_be32(&priv->msg_regs->oddatr, data); | ||
342 | out_be32(&priv->msg_regs->odmr, 0x00000001); | ||
343 | break; | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | /** | ||
350 | * fsl_local_config_read - Generate a MPC85xx local config space read | 112 | * fsl_local_config_read - Generate a MPC85xx local config space read |
351 | * @mport: RapidIO master port info | 113 | * @mport: RapidIO master port info |
352 | * @index: ID of RapdiIO interface | 114 | * @index: ID of RapdiIO interface |
@@ -384,8 +146,8 @@ static int fsl_local_config_write(struct rio_mport *mport, | |||
384 | { | 146 | { |
385 | struct rio_priv *priv = mport->priv; | 147 | struct rio_priv *priv = mport->priv; |
386 | pr_debug | 148 | pr_debug |
387 | ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", | 149 | ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", |
388 | index, offset, data); | 150 | index, offset, data); |
389 | out_be32(priv->regs_win + offset, data); | 151 | out_be32(priv->regs_win + offset, data); |
390 | 152 | ||
391 | return 0; | 153 | return 0; |
@@ -413,8 +175,9 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
413 | u32 rval, err = 0; | 175 | u32 rval, err = 0; |
414 | 176 | ||
415 | pr_debug | 177 | pr_debug |
416 | ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", | 178 | ("fsl_rio_config_read:" |
417 | index, destid, hopcount, offset, len); | 179 | " index %d destid %d hopcount %d offset %8.8x len %d\n", |
180 | index, destid, hopcount, offset, len); | ||
418 | 181 | ||
419 | /* 16MB maintenance window possible */ | 182 | /* 16MB maintenance window possible */ |
420 | /* allow only aligned access to maintenance registers */ | 183 | /* allow only aligned access to maintenance registers */ |
@@ -423,7 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
423 | 186 | ||
424 | out_be32(&priv->maint_atmu_regs->rowtar, | 187 | out_be32(&priv->maint_atmu_regs->rowtar, |
425 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 188 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
426 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 189 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
427 | 190 | ||
428 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); | 191 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); |
429 | switch (len) { | 192 | switch (len) { |
@@ -470,8 +233,9 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
470 | struct rio_priv *priv = mport->priv; | 233 | struct rio_priv *priv = mport->priv; |
471 | u8 *data; | 234 | u8 *data; |
472 | pr_debug | 235 | pr_debug |
473 | ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", | 236 | ("fsl_rio_config_write:" |
474 | index, destid, hopcount, offset, len, val); | 237 | " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", |
238 | index, destid, hopcount, offset, len, val); | ||
475 | 239 | ||
476 | /* 16MB maintenance windows possible */ | 240 | /* 16MB maintenance windows possible */ |
477 | /* allow only aligned access to maintenance registers */ | 241 | /* allow only aligned access to maintenance registers */ |
@@ -480,7 +244,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
480 | 244 | ||
481 | out_be32(&priv->maint_atmu_regs->rowtar, | 245 | out_be32(&priv->maint_atmu_regs->rowtar, |
482 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 246 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
483 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 247 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
484 | 248 | ||
485 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); | 249 | data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); |
486 | switch (len) { | 250 | switch (len) { |
@@ -500,590 +264,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
500 | return 0; | 264 | return 0; |
501 | } | 265 | } |
502 | 266 | ||
503 | /** | 267 | void fsl_rio_port_error_handler(int offset) |
504 | * fsl_add_outb_message - Add message to the MPC85xx outbound message queue | ||
505 | * @mport: Master port with outbound message queue | ||
506 | * @rdev: Target of outbound message | ||
507 | * @mbox: Outbound mailbox | ||
508 | * @buffer: Message to add to outbound queue | ||
509 | * @len: Length of message | ||
510 | * | ||
511 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns | ||
512 | * %0 on success or %-EINVAL on failure. | ||
513 | */ | ||
514 | static int | ||
515 | fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | ||
516 | void *buffer, size_t len) | ||
517 | { | ||
518 | struct rio_priv *priv = mport->priv; | ||
519 | u32 omr; | ||
520 | struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt | ||
521 | + priv->msg_tx_ring.tx_slot; | ||
522 | int ret = 0; | ||
523 | |||
524 | pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ | ||
525 | "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); | ||
526 | |||
527 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { | ||
528 | ret = -EINVAL; | ||
529 | goto out; | ||
530 | } | ||
531 | |||
532 | /* Copy and clear rest of buffer */ | ||
533 | memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, | ||
534 | len); | ||
535 | if (len < (RIO_MAX_MSG_SIZE - 4)) | ||
536 | memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] | ||
537 | + len, 0, RIO_MAX_MSG_SIZE - len); | ||
538 | |||
539 | switch (mport->phy_type) { | ||
540 | case RIO_PHY_PARALLEL: | ||
541 | /* Set mbox field for message */ | ||
542 | desc->dport = mbox & 0x3; | ||
543 | |||
544 | /* Enable EOMI interrupt, set priority, and set destid */ | ||
545 | desc->dattr = 0x28000000 | (rdev->destid << 2); | ||
546 | break; | ||
547 | case RIO_PHY_SERIAL: | ||
548 | /* Set mbox field for message, and set destid */ | ||
549 | desc->dport = (rdev->destid << 16) | (mbox & 0x3); | ||
550 | |||
551 | /* Enable EOMI interrupt and priority */ | ||
552 | desc->dattr = 0x28000000; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | /* Set transfer size aligned to next power of 2 (in double words) */ | ||
557 | desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); | ||
558 | |||
559 | /* Set snooping and source buffer address */ | ||
560 | desc->saddr = 0x00000004 | ||
561 | | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; | ||
562 | |||
563 | /* Increment enqueue pointer */ | ||
564 | omr = in_be32(&priv->msg_regs->omr); | ||
565 | out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); | ||
566 | |||
567 | /* Go to next descriptor */ | ||
568 | if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) | ||
569 | priv->msg_tx_ring.tx_slot = 0; | ||
570 | |||
571 | out: | ||
572 | return ret; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler | ||
577 | * @irq: Linux interrupt number | ||
578 | * @dev_instance: Pointer to interrupt-specific data | ||
579 | * | ||
580 | * Handles outbound message interrupts. Executes a register outbound | ||
581 | * mailbox event handler and acks the interrupt occurrence. | ||
582 | */ | ||
583 | static irqreturn_t | ||
584 | fsl_rio_tx_handler(int irq, void *dev_instance) | ||
585 | { | ||
586 | int osr; | ||
587 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
588 | struct rio_priv *priv = port->priv; | ||
589 | |||
590 | osr = in_be32(&priv->msg_regs->osr); | ||
591 | |||
592 | if (osr & RIO_MSG_OSR_TE) { | ||
593 | pr_info("RIO: outbound message transmission error\n"); | ||
594 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); | ||
595 | goto out; | ||
596 | } | ||
597 | |||
598 | if (osr & RIO_MSG_OSR_QOI) { | ||
599 | pr_info("RIO: outbound message queue overflow\n"); | ||
600 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); | ||
601 | goto out; | ||
602 | } | ||
603 | |||
604 | if (osr & RIO_MSG_OSR_EOMI) { | ||
605 | u32 dqp = in_be32(&priv->msg_regs->odqdpar); | ||
606 | int slot = (dqp - priv->msg_tx_ring.phys) >> 5; | ||
607 | port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, | ||
608 | slot); | ||
609 | |||
610 | /* Ack the end-of-message interrupt */ | ||
611 | out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); | ||
612 | } | ||
613 | |||
614 | out: | ||
615 | return IRQ_HANDLED; | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox | ||
620 | * @mport: Master port implementing the outbound message unit | ||
621 | * @dev_id: Device specific pointer to pass on event | ||
622 | * @mbox: Mailbox to open | ||
623 | * @entries: Number of entries in the outbound mailbox ring | ||
624 | * | ||
625 | * Initializes buffer ring, request the outbound message interrupt, | ||
626 | * and enables the outbound message unit. Returns %0 on success and | ||
627 | * %-EINVAL or %-ENOMEM on failure. | ||
628 | */ | ||
629 | static int | ||
630 | fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
631 | { | ||
632 | int i, j, rc = 0; | ||
633 | struct rio_priv *priv = mport->priv; | ||
634 | |||
635 | if ((entries < RIO_MIN_TX_RING_SIZE) || | ||
636 | (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { | ||
637 | rc = -EINVAL; | ||
638 | goto out; | ||
639 | } | ||
640 | |||
641 | /* Initialize shadow copy ring */ | ||
642 | priv->msg_tx_ring.dev_id = dev_id; | ||
643 | priv->msg_tx_ring.size = entries; | ||
644 | |||
645 | for (i = 0; i < priv->msg_tx_ring.size; i++) { | ||
646 | priv->msg_tx_ring.virt_buffer[i] = | ||
647 | dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
648 | &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); | ||
649 | if (!priv->msg_tx_ring.virt_buffer[i]) { | ||
650 | rc = -ENOMEM; | ||
651 | for (j = 0; j < priv->msg_tx_ring.size; j++) | ||
652 | if (priv->msg_tx_ring.virt_buffer[j]) | ||
653 | dma_free_coherent(priv->dev, | ||
654 | RIO_MSG_BUFFER_SIZE, | ||
655 | priv->msg_tx_ring. | ||
656 | virt_buffer[j], | ||
657 | priv->msg_tx_ring. | ||
658 | phys_buffer[j]); | ||
659 | goto out; | ||
660 | } | ||
661 | } | ||
662 | |||
663 | /* Initialize outbound message descriptor ring */ | ||
664 | priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, | ||
665 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
666 | &priv->msg_tx_ring.phys, GFP_KERNEL); | ||
667 | if (!priv->msg_tx_ring.virt) { | ||
668 | rc = -ENOMEM; | ||
669 | goto out_dma; | ||
670 | } | ||
671 | memset(priv->msg_tx_ring.virt, 0, | ||
672 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); | ||
673 | priv->msg_tx_ring.tx_slot = 0; | ||
674 | |||
675 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
676 | out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); | ||
677 | out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); | ||
678 | |||
679 | /* Configure for snooping */ | ||
680 | out_be32(&priv->msg_regs->osar, 0x00000004); | ||
681 | |||
682 | /* Clear interrupt status */ | ||
683 | out_be32(&priv->msg_regs->osr, 0x000000b3); | ||
684 | |||
685 | /* Hook up outbound message handler */ | ||
686 | rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, | ||
687 | "msg_tx", (void *)mport); | ||
688 | if (rc < 0) | ||
689 | goto out_irq; | ||
690 | |||
691 | /* | ||
692 | * Configure outbound message unit | ||
693 | * Snooping | ||
694 | * Interrupts (all enabled, except QEIE) | ||
695 | * Chaining mode | ||
696 | * Disable | ||
697 | */ | ||
698 | out_be32(&priv->msg_regs->omr, 0x00100220); | ||
699 | |||
700 | /* Set number of entries */ | ||
701 | out_be32(&priv->msg_regs->omr, | ||
702 | in_be32(&priv->msg_regs->omr) | | ||
703 | ((get_bitmask_order(entries) - 2) << 12)); | ||
704 | |||
705 | /* Now enable the unit */ | ||
706 | out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); | ||
707 | |||
708 | out: | ||
709 | return rc; | ||
710 | |||
711 | out_irq: | ||
712 | dma_free_coherent(priv->dev, | ||
713 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
714 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); | ||
715 | |||
716 | out_dma: | ||
717 | for (i = 0; i < priv->msg_tx_ring.size; i++) | ||
718 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
719 | priv->msg_tx_ring.virt_buffer[i], | ||
720 | priv->msg_tx_ring.phys_buffer[i]); | ||
721 | |||
722 | return rc; | ||
723 | } | ||
724 | |||
725 | /** | ||
726 | * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox | ||
727 | * @mport: Master port implementing the outbound message unit | ||
728 | * @mbox: Mailbox to close | ||
729 | * | ||
730 | * Disables the outbound message unit, free all buffers, and | ||
731 | * frees the outbound message interrupt. | ||
732 | */ | ||
733 | static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) | ||
734 | { | ||
735 | struct rio_priv *priv = mport->priv; | ||
736 | /* Disable inbound message unit */ | ||
737 | out_be32(&priv->msg_regs->omr, 0); | ||
738 | |||
739 | /* Free ring */ | ||
740 | dma_free_coherent(priv->dev, | ||
741 | priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
742 | priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); | ||
743 | |||
744 | /* Free interrupt */ | ||
745 | free_irq(IRQ_RIO_TX(mport), (void *)mport); | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler | ||
750 | * @irq: Linux interrupt number | ||
751 | * @dev_instance: Pointer to interrupt-specific data | ||
752 | * | ||
753 | * Handles inbound message interrupts. Executes a registered inbound | ||
754 | * mailbox event handler and acks the interrupt occurrence. | ||
755 | */ | ||
756 | static irqreturn_t | ||
757 | fsl_rio_rx_handler(int irq, void *dev_instance) | ||
758 | { | ||
759 | int isr; | ||
760 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
761 | struct rio_priv *priv = port->priv; | ||
762 | |||
763 | isr = in_be32(&priv->msg_regs->isr); | ||
764 | |||
765 | if (isr & RIO_MSG_ISR_TE) { | ||
766 | pr_info("RIO: inbound message reception error\n"); | ||
767 | out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); | ||
768 | goto out; | ||
769 | } | ||
770 | |||
771 | /* XXX Need to check/dispatch until queue empty */ | ||
772 | if (isr & RIO_MSG_ISR_DIQI) { | ||
773 | /* | ||
774 | * We implement *only* mailbox 0, but can receive messages | ||
775 | * for any mailbox/letter to that mailbox destination. So, | ||
776 | * make the callback with an unknown/invalid mailbox number | ||
777 | * argument. | ||
778 | */ | ||
779 | port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); | ||
780 | |||
781 | /* Ack the queueing interrupt */ | ||
782 | out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); | ||
783 | } | ||
784 | |||
785 | out: | ||
786 | return IRQ_HANDLED; | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox | ||
791 | * @mport: Master port implementing the inbound message unit | ||
792 | * @dev_id: Device specific pointer to pass on event | ||
793 | * @mbox: Mailbox to open | ||
794 | * @entries: Number of entries in the inbound mailbox ring | ||
795 | * | ||
796 | * Initializes buffer ring, request the inbound message interrupt, | ||
797 | * and enables the inbound message unit. Returns %0 on success | ||
798 | * and %-EINVAL or %-ENOMEM on failure. | ||
799 | */ | ||
800 | static int | ||
801 | fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
802 | { | ||
803 | int i, rc = 0; | ||
804 | struct rio_priv *priv = mport->priv; | ||
805 | |||
806 | if ((entries < RIO_MIN_RX_RING_SIZE) || | ||
807 | (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { | ||
808 | rc = -EINVAL; | ||
809 | goto out; | ||
810 | } | ||
811 | |||
812 | /* Initialize client buffer ring */ | ||
813 | priv->msg_rx_ring.dev_id = dev_id; | ||
814 | priv->msg_rx_ring.size = entries; | ||
815 | priv->msg_rx_ring.rx_slot = 0; | ||
816 | for (i = 0; i < priv->msg_rx_ring.size; i++) | ||
817 | priv->msg_rx_ring.virt_buffer[i] = NULL; | ||
818 | |||
819 | /* Initialize inbound message ring */ | ||
820 | priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, | ||
821 | priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, | ||
822 | &priv->msg_rx_ring.phys, GFP_KERNEL); | ||
823 | if (!priv->msg_rx_ring.virt) { | ||
824 | rc = -ENOMEM; | ||
825 | goto out; | ||
826 | } | ||
827 | |||
828 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
829 | out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); | ||
830 | out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); | ||
831 | |||
832 | /* Clear interrupt status */ | ||
833 | out_be32(&priv->msg_regs->isr, 0x00000091); | ||
834 | |||
835 | /* Hook up inbound message handler */ | ||
836 | rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, | ||
837 | "msg_rx", (void *)mport); | ||
838 | if (rc < 0) { | ||
839 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
840 | priv->msg_tx_ring.virt_buffer[i], | ||
841 | priv->msg_tx_ring.phys_buffer[i]); | ||
842 | goto out; | ||
843 | } | ||
844 | |||
845 | /* | ||
846 | * Configure inbound message unit: | ||
847 | * Snooping | ||
848 | * 4KB max message size | ||
849 | * Unmask all interrupt sources | ||
850 | * Disable | ||
851 | */ | ||
852 | out_be32(&priv->msg_regs->imr, 0x001b0060); | ||
853 | |||
854 | /* Set number of queue entries */ | ||
855 | setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); | ||
856 | |||
857 | /* Now enable the unit */ | ||
858 | setbits32(&priv->msg_regs->imr, 0x1); | ||
859 | |||
860 | out: | ||
861 | return rc; | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox | ||
866 | * @mport: Master port implementing the inbound message unit | ||
867 | * @mbox: Mailbox to close | ||
868 | * | ||
869 | * Disables the inbound message unit, free all buffers, and | ||
870 | * frees the inbound message interrupt. | ||
871 | */ | ||
872 | static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) | ||
873 | { | ||
874 | struct rio_priv *priv = mport->priv; | ||
875 | /* Disable inbound message unit */ | ||
876 | out_be32(&priv->msg_regs->imr, 0); | ||
877 | |||
878 | /* Free ring */ | ||
879 | dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, | ||
880 | priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); | ||
881 | |||
882 | /* Free interrupt */ | ||
883 | free_irq(IRQ_RIO_RX(mport), (void *)mport); | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue | ||
888 | * @mport: Master port implementing the inbound message unit | ||
889 | * @mbox: Inbound mailbox number | ||
890 | * @buf: Buffer to add to inbound queue | ||
891 | * | ||
892 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns | ||
893 | * %0 on success or %-EINVAL on failure. | ||
894 | */ | ||
895 | static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) | ||
896 | { | ||
897 | int rc = 0; | ||
898 | struct rio_priv *priv = mport->priv; | ||
899 | |||
900 | pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", | ||
901 | priv->msg_rx_ring.rx_slot); | ||
902 | |||
903 | if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { | ||
904 | printk(KERN_ERR | ||
905 | "RIO: error adding inbound buffer %d, buffer exists\n", | ||
906 | priv->msg_rx_ring.rx_slot); | ||
907 | rc = -EINVAL; | ||
908 | goto out; | ||
909 | } | ||
910 | |||
911 | priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; | ||
912 | if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) | ||
913 | priv->msg_rx_ring.rx_slot = 0; | ||
914 | |||
915 | out: | ||
916 | return rc; | ||
917 | } | ||
918 | |||
919 | /** | ||
920 | * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit | ||
921 | * @mport: Master port implementing the inbound message unit | ||
922 | * @mbox: Inbound mailbox number | ||
923 | * | ||
924 | * Gets the next available inbound message from the inbound message queue. | ||
925 | * A pointer to the message is returned on success or NULL on failure. | ||
926 | */ | ||
927 | static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) | ||
928 | { | ||
929 | struct rio_priv *priv = mport->priv; | ||
930 | u32 phys_buf, virt_buf; | ||
931 | void *buf = NULL; | ||
932 | int buf_idx; | ||
933 | |||
934 | phys_buf = in_be32(&priv->msg_regs->ifqdpar); | ||
935 | |||
936 | /* If no more messages, then bail out */ | ||
937 | if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) | ||
938 | goto out2; | ||
939 | |||
940 | virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf | ||
941 | - priv->msg_rx_ring.phys); | ||
942 | buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; | ||
943 | buf = priv->msg_rx_ring.virt_buffer[buf_idx]; | ||
944 | |||
945 | if (!buf) { | ||
946 | printk(KERN_ERR | ||
947 | "RIO: inbound message copy failed, no buffers\n"); | ||
948 | goto out1; | ||
949 | } | ||
950 | |||
951 | /* Copy max message size, caller is expected to allocate that big */ | ||
952 | memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); | ||
953 | |||
954 | /* Clear the available buffer */ | ||
955 | priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; | ||
956 | |||
957 | out1: | ||
958 | setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); | ||
959 | |||
960 | out2: | ||
961 | return buf; | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler | ||
966 | * @irq: Linux interrupt number | ||
967 | * @dev_instance: Pointer to interrupt-specific data | ||
968 | * | ||
969 | * Handles doorbell interrupts. Parses a list of registered | ||
970 | * doorbell event handlers and executes a matching event handler. | ||
971 | */ | ||
972 | static irqreturn_t | ||
973 | fsl_rio_dbell_handler(int irq, void *dev_instance) | ||
974 | { | ||
975 | int dsr; | ||
976 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
977 | struct rio_priv *priv = port->priv; | ||
978 | |||
979 | dsr = in_be32(&priv->msg_regs->dsr); | ||
980 | |||
981 | if (dsr & DOORBELL_DSR_TE) { | ||
982 | pr_info("RIO: doorbell reception error\n"); | ||
983 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); | ||
984 | goto out; | ||
985 | } | ||
986 | |||
987 | if (dsr & DOORBELL_DSR_QFI) { | ||
988 | pr_info("RIO: doorbell queue full\n"); | ||
989 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); | ||
990 | } | ||
991 | |||
992 | /* XXX Need to check/dispatch until queue empty */ | ||
993 | if (dsr & DOORBELL_DSR_DIQI) { | ||
994 | u32 dmsg = | ||
995 | (u32) priv->dbell_ring.virt + | ||
996 | (in_be32(&priv->msg_regs->dqdpar) & 0xfff); | ||
997 | struct rio_dbell *dbell; | ||
998 | int found = 0; | ||
999 | |||
1000 | pr_debug | ||
1001 | ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", | ||
1002 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); | ||
1003 | |||
1004 | list_for_each_entry(dbell, &port->dbells, node) { | ||
1005 | if ((dbell->res->start <= DBELL_INF(dmsg)) && | ||
1006 | (dbell->res->end >= DBELL_INF(dmsg))) { | ||
1007 | found = 1; | ||
1008 | break; | ||
1009 | } | ||
1010 | } | ||
1011 | if (found) { | ||
1012 | dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), | ||
1013 | DBELL_INF(dmsg)); | ||
1014 | } else { | ||
1015 | pr_debug | ||
1016 | ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", | ||
1017 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); | ||
1018 | } | ||
1019 | setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); | ||
1020 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); | ||
1021 | } | ||
1022 | |||
1023 | out: | ||
1024 | return IRQ_HANDLED; | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * fsl_rio_doorbell_init - MPC85xx doorbell interface init | ||
1029 | * @mport: Master port implementing the inbound doorbell unit | ||
1030 | * | ||
1031 | * Initializes doorbell unit hardware and inbound DMA buffer | ||
1032 | * ring. Called from fsl_rio_setup(). Returns %0 on success | ||
1033 | * or %-ENOMEM on failure. | ||
1034 | */ | ||
1035 | static int fsl_rio_doorbell_init(struct rio_mport *mport) | ||
1036 | { | ||
1037 | struct rio_priv *priv = mport->priv; | ||
1038 | int rc = 0; | ||
1039 | |||
1040 | /* Map outbound doorbell window immediately after maintenance window */ | ||
1041 | priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, | ||
1042 | RIO_DBELL_WIN_SIZE); | ||
1043 | if (!priv->dbell_win) { | ||
1044 | printk(KERN_ERR | ||
1045 | "RIO: unable to map outbound doorbell window\n"); | ||
1046 | rc = -ENOMEM; | ||
1047 | goto out; | ||
1048 | } | ||
1049 | |||
1050 | /* Initialize inbound doorbells */ | ||
1051 | priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * | ||
1052 | DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); | ||
1053 | if (!priv->dbell_ring.virt) { | ||
1054 | printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); | ||
1055 | rc = -ENOMEM; | ||
1056 | iounmap(priv->dbell_win); | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
1061 | out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); | ||
1062 | out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); | ||
1063 | |||
1064 | /* Clear interrupt status */ | ||
1065 | out_be32(&priv->msg_regs->dsr, 0x00000091); | ||
1066 | |||
1067 | /* Hook up doorbell handler */ | ||
1068 | rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, | ||
1069 | "dbell_rx", (void *)mport); | ||
1070 | if (rc < 0) { | ||
1071 | iounmap(priv->dbell_win); | ||
1072 | dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, | ||
1073 | priv->dbell_ring.virt, priv->dbell_ring.phys); | ||
1074 | printk(KERN_ERR | ||
1075 | "MPC85xx RIO: unable to request inbound doorbell irq"); | ||
1076 | goto out; | ||
1077 | } | ||
1078 | |||
1079 | /* Configure doorbells for snooping, 512 entries, and enable */ | ||
1080 | out_be32(&priv->msg_regs->dmr, 0x00108161); | ||
1081 | |||
1082 | out: | ||
1083 | return rc; | ||
1084 | } | ||
1085 | |||
1086 | static void port_error_handler(struct rio_mport *port, int offset) | ||
1087 | { | 268 | { |
1088 | /*XXX: Error recovery is not implemented, we just clear errors */ | 269 | /*XXX: Error recovery is not implemented, we just clear errors */ |
1089 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | 270 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); |
@@ -1098,263 +279,6 @@ static void port_error_handler(struct rio_mport *port, int offset) | |||
1098 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); | 279 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); |
1099 | } | 280 | } |
1100 | } | 281 | } |
1101 | |||
1102 | static void msg_unit_error_handler(struct rio_mport *port) | ||
1103 | { | ||
1104 | struct rio_priv *priv = port->priv; | ||
1105 | |||
1106 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
1107 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
1108 | |||
1109 | out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); | ||
1110 | out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); | ||
1111 | out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); | ||
1112 | out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); | ||
1113 | |||
1114 | out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); | ||
1115 | out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); | ||
1116 | |||
1117 | out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); | ||
1118 | } | ||
1119 | |||
1120 | /** | ||
1121 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler | ||
1122 | * @irq: Linux interrupt number | ||
1123 | * @dev_instance: Pointer to interrupt-specific data | ||
1124 | * | ||
1125 | * Handles port write interrupts. Parses a list of registered | ||
1126 | * port write event handlers and executes a matching event handler. | ||
1127 | */ | ||
1128 | static irqreturn_t | ||
1129 | fsl_rio_port_write_handler(int irq, void *dev_instance) | ||
1130 | { | ||
1131 | u32 ipwmr, ipwsr; | ||
1132 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
1133 | struct rio_priv *priv = port->priv; | ||
1134 | u32 epwisr, tmp; | ||
1135 | |||
1136 | epwisr = in_be32(priv->regs_win + RIO_EPWISR); | ||
1137 | if (!(epwisr & RIO_EPWISR_PW)) | ||
1138 | goto pw_done; | ||
1139 | |||
1140 | ipwmr = in_be32(&priv->msg_regs->pwmr); | ||
1141 | ipwsr = in_be32(&priv->msg_regs->pwsr); | ||
1142 | |||
1143 | #ifdef DEBUG_PW | ||
1144 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); | ||
1145 | if (ipwsr & RIO_IPWSR_QF) | ||
1146 | pr_debug(" QF"); | ||
1147 | if (ipwsr & RIO_IPWSR_TE) | ||
1148 | pr_debug(" TE"); | ||
1149 | if (ipwsr & RIO_IPWSR_QFI) | ||
1150 | pr_debug(" QFI"); | ||
1151 | if (ipwsr & RIO_IPWSR_PWD) | ||
1152 | pr_debug(" PWD"); | ||
1153 | if (ipwsr & RIO_IPWSR_PWB) | ||
1154 | pr_debug(" PWB"); | ||
1155 | pr_debug(" )\n"); | ||
1156 | #endif | ||
1157 | /* Schedule deferred processing if PW was received */ | ||
1158 | if (ipwsr & RIO_IPWSR_QFI) { | ||
1159 | /* Save PW message (if there is room in FIFO), | ||
1160 | * otherwise discard it. | ||
1161 | */ | ||
1162 | if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { | ||
1163 | priv->port_write_msg.msg_count++; | ||
1164 | kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, | ||
1165 | RIO_PW_MSG_SIZE); | ||
1166 | } else { | ||
1167 | priv->port_write_msg.discard_count++; | ||
1168 | pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", | ||
1169 | priv->port_write_msg.discard_count); | ||
1170 | } | ||
1171 | /* Clear interrupt and issue Clear Queue command. This allows | ||
1172 | * another port-write to be received. | ||
1173 | */ | ||
1174 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); | ||
1175 | out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); | ||
1176 | |||
1177 | schedule_work(&priv->pw_work); | ||
1178 | } | ||
1179 | |||
1180 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { | ||
1181 | priv->port_write_msg.err_count++; | ||
1182 | pr_debug("RIO: Port-Write Transaction Err (%d)\n", | ||
1183 | priv->port_write_msg.err_count); | ||
1184 | /* Clear Transaction Error: port-write controller should be | ||
1185 | * disabled when clearing this error | ||
1186 | */ | ||
1187 | out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); | ||
1188 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); | ||
1189 | out_be32(&priv->msg_regs->pwmr, ipwmr); | ||
1190 | } | ||
1191 | |||
1192 | if (ipwsr & RIO_IPWSR_PWD) { | ||
1193 | priv->port_write_msg.discard_count++; | ||
1194 | pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", | ||
1195 | priv->port_write_msg.discard_count); | ||
1196 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); | ||
1197 | } | ||
1198 | |||
1199 | pw_done: | ||
1200 | if (epwisr & RIO_EPWISR_PINT1) { | ||
1201 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1202 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1203 | port_error_handler(port, 0); | ||
1204 | } | ||
1205 | |||
1206 | if (epwisr & RIO_EPWISR_PINT2) { | ||
1207 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1208 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1209 | port_error_handler(port, 1); | ||
1210 | } | ||
1211 | |||
1212 | if (epwisr & RIO_EPWISR_MU) { | ||
1213 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1214 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1215 | msg_unit_error_handler(port); | ||
1216 | } | ||
1217 | |||
1218 | return IRQ_HANDLED; | ||
1219 | } | ||
1220 | |||
1221 | static void fsl_pw_dpc(struct work_struct *work) | ||
1222 | { | ||
1223 | struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); | ||
1224 | unsigned long flags; | ||
1225 | u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; | ||
1226 | |||
1227 | /* | ||
1228 | * Process port-write messages | ||
1229 | */ | ||
1230 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); | ||
1231 | while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, | ||
1232 | RIO_PW_MSG_SIZE)) { | ||
1233 | /* Process one message */ | ||
1234 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); | ||
1235 | #ifdef DEBUG_PW | ||
1236 | { | ||
1237 | u32 i; | ||
1238 | pr_debug("%s : Port-Write Message:", __func__); | ||
1239 | for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { | ||
1240 | if ((i%4) == 0) | ||
1241 | pr_debug("\n0x%02x: 0x%08x", i*4, | ||
1242 | msg_buffer[i]); | ||
1243 | else | ||
1244 | pr_debug(" 0x%08x", msg_buffer[i]); | ||
1245 | } | ||
1246 | pr_debug("\n"); | ||
1247 | } | ||
1248 | #endif | ||
1249 | /* Pass the port-write message to RIO core for processing */ | ||
1250 | rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); | ||
1251 | spin_lock_irqsave(&priv->pw_fifo_lock, flags); | ||
1252 | } | ||
1253 | spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); | ||
1254 | } | ||
1255 | |||
1256 | /** | ||
1257 | * fsl_rio_pw_enable - enable/disable port-write interface init | ||
1258 | * @mport: Master port implementing the port write unit | ||
1259 | * @enable: 1=enable; 0=disable port-write message handling | ||
1260 | */ | ||
1261 | static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) | ||
1262 | { | ||
1263 | struct rio_priv *priv = mport->priv; | ||
1264 | u32 rval; | ||
1265 | |||
1266 | rval = in_be32(&priv->msg_regs->pwmr); | ||
1267 | |||
1268 | if (enable) | ||
1269 | rval |= RIO_IPWMR_PWE; | ||
1270 | else | ||
1271 | rval &= ~RIO_IPWMR_PWE; | ||
1272 | |||
1273 | out_be32(&priv->msg_regs->pwmr, rval); | ||
1274 | |||
1275 | return 0; | ||
1276 | } | ||
1277 | |||
1278 | /** | ||
1279 | * fsl_rio_port_write_init - MPC85xx port write interface init | ||
1280 | * @mport: Master port implementing the port write unit | ||
1281 | * | ||
1282 | * Initializes port write unit hardware and DMA buffer | ||
1283 | * ring. Called from fsl_rio_setup(). Returns %0 on success | ||
1284 | * or %-ENOMEM on failure. | ||
1285 | */ | ||
1286 | static int fsl_rio_port_write_init(struct rio_mport *mport) | ||
1287 | { | ||
1288 | struct rio_priv *priv = mport->priv; | ||
1289 | int rc = 0; | ||
1290 | |||
1291 | /* Following configurations require a disabled port write controller */ | ||
1292 | out_be32(&priv->msg_regs->pwmr, | ||
1293 | in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); | ||
1294 | |||
1295 | /* Initialize port write */ | ||
1296 | priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, | ||
1297 | RIO_PW_MSG_SIZE, | ||
1298 | &priv->port_write_msg.phys, GFP_KERNEL); | ||
1299 | if (!priv->port_write_msg.virt) { | ||
1300 | pr_err("RIO: unable allocate port write queue\n"); | ||
1301 | return -ENOMEM; | ||
1302 | } | ||
1303 | |||
1304 | priv->port_write_msg.err_count = 0; | ||
1305 | priv->port_write_msg.discard_count = 0; | ||
1306 | |||
1307 | /* Point dequeue/enqueue pointers at first entry */ | ||
1308 | out_be32(&priv->msg_regs->epwqbar, 0); | ||
1309 | out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); | ||
1310 | |||
1311 | pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", | ||
1312 | in_be32(&priv->msg_regs->epwqbar), | ||
1313 | in_be32(&priv->msg_regs->pwqbar)); | ||
1314 | |||
1315 | /* Clear interrupt status IPWSR */ | ||
1316 | out_be32(&priv->msg_regs->pwsr, | ||
1317 | (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); | ||
1318 | |||
1319 | /* Configure port write contoller for snooping enable all reporting, | ||
1320 | clear queue full */ | ||
1321 | out_be32(&priv->msg_regs->pwmr, | ||
1322 | RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); | ||
1323 | |||
1324 | |||
1325 | /* Hook up port-write handler */ | ||
1326 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, | ||
1327 | IRQF_SHARED, "port-write", (void *)mport); | ||
1328 | if (rc < 0) { | ||
1329 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); | ||
1330 | goto err_out; | ||
1331 | } | ||
1332 | /* Enable Error Interrupt */ | ||
1333 | out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); | ||
1334 | |||
1335 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); | ||
1336 | spin_lock_init(&priv->pw_fifo_lock); | ||
1337 | if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { | ||
1338 | pr_err("FIFO allocation failed\n"); | ||
1339 | rc = -ENOMEM; | ||
1340 | goto err_out_irq; | ||
1341 | } | ||
1342 | |||
1343 | pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", | ||
1344 | in_be32(&priv->msg_regs->pwmr), | ||
1345 | in_be32(&priv->msg_regs->pwsr)); | ||
1346 | |||
1347 | return rc; | ||
1348 | |||
1349 | err_out_irq: | ||
1350 | free_irq(IRQ_RIO_PW(mport), (void *)mport); | ||
1351 | err_out: | ||
1352 | dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, | ||
1353 | priv->port_write_msg.virt, | ||
1354 | priv->port_write_msg.phys); | ||
1355 | return rc; | ||
1356 | } | ||
1357 | |||
1358 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) | 282 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) |
1359 | { | 283 | { |
1360 | const char *str; | 284 | const char *str; |
@@ -1411,16 +335,21 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1411 | struct rio_mport *port; | 335 | struct rio_mport *port; |
1412 | struct rio_priv *priv; | 336 | struct rio_priv *priv; |
1413 | int rc = 0; | 337 | int rc = 0; |
1414 | const u32 *dt_range, *cell; | 338 | const u32 *dt_range, *cell, *port_index; |
1415 | struct resource regs; | 339 | u32 active_ports = 0; |
340 | struct resource regs, rmu_regs; | ||
341 | struct device_node *np, *rmu_node; | ||
1416 | int rlen; | 342 | int rlen; |
1417 | u32 ccsr; | 343 | u32 ccsr; |
1418 | u64 law_start, law_size; | 344 | u64 range_start, range_size; |
1419 | int paw, aw, sw; | 345 | int paw, aw, sw; |
346 | u32 i; | ||
347 | static int tmp; | ||
348 | struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL}; | ||
1420 | 349 | ||
1421 | if (!dev->dev.of_node) { | 350 | if (!dev->dev.of_node) { |
1422 | dev_err(&dev->dev, "Device OF-Node is NULL"); | 351 | dev_err(&dev->dev, "Device OF-Node is NULL"); |
1423 | return -EFAULT; | 352 | return -ENODEV; |
1424 | } | 353 | } |
1425 | 354 | ||
1426 | rc = of_address_to_resource(dev->dev.of_node, 0, ®s); | 355 | rc = of_address_to_resource(dev->dev.of_node, 0, ®s); |
@@ -1429,37 +358,17 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1429 | dev->dev.of_node->full_name); | 358 | dev->dev.of_node->full_name); |
1430 | return -EFAULT; | 359 | return -EFAULT; |
1431 | } | 360 | } |
1432 | dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); | 361 | dev_info(&dev->dev, "Of-device full name %s\n", |
362 | dev->dev.of_node->full_name); | ||
1433 | dev_info(&dev->dev, "Regs: %pR\n", ®s); | 363 | dev_info(&dev->dev, "Regs: %pR\n", ®s); |
1434 | 364 | ||
1435 | dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); | 365 | rio_regs_win = ioremap(regs.start, resource_size(®s)); |
1436 | if (!dt_range) { | 366 | if (!rio_regs_win) { |
1437 | dev_err(&dev->dev, "Can't get %s property 'ranges'\n", | 367 | dev_err(&dev->dev, "Unable to map rio register window\n"); |
1438 | dev->dev.of_node->full_name); | 368 | rc = -ENOMEM; |
1439 | return -EFAULT; | 369 | goto err_rio_regs; |
1440 | } | 370 | } |
1441 | 371 | ||
1442 | /* Get node address wide */ | ||
1443 | cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); | ||
1444 | if (cell) | ||
1445 | aw = *cell; | ||
1446 | else | ||
1447 | aw = of_n_addr_cells(dev->dev.of_node); | ||
1448 | /* Get node size wide */ | ||
1449 | cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); | ||
1450 | if (cell) | ||
1451 | sw = *cell; | ||
1452 | else | ||
1453 | sw = of_n_size_cells(dev->dev.of_node); | ||
1454 | /* Get parent address wide wide */ | ||
1455 | paw = of_n_addr_cells(dev->dev.of_node); | ||
1456 | |||
1457 | law_start = of_read_number(dt_range + aw, paw); | ||
1458 | law_size = of_read_number(dt_range + aw + paw, sw); | ||
1459 | |||
1460 | dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", | ||
1461 | law_start, law_size); | ||
1462 | |||
1463 | ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); | 372 | ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); |
1464 | if (!ops) { | 373 | if (!ops) { |
1465 | rc = -ENOMEM; | 374 | rc = -ENOMEM; |
@@ -1479,143 +388,257 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1479 | ops->add_inb_buffer = fsl_add_inb_buffer; | 388 | ops->add_inb_buffer = fsl_add_inb_buffer; |
1480 | ops->get_inb_message = fsl_get_inb_message; | 389 | ops->get_inb_message = fsl_get_inb_message; |
1481 | 390 | ||
1482 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); | 391 | rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); |
1483 | if (!port) { | 392 | if (!rmu_node) |
393 | goto err_rmu; | ||
394 | rc = of_address_to_resource(rmu_node, 0, &rmu_regs); | ||
395 | if (rc) { | ||
396 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
397 | rmu_node->full_name); | ||
398 | goto err_rmu; | ||
399 | } | ||
400 | rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); | ||
401 | if (!rmu_regs_win) { | ||
402 | dev_err(&dev->dev, "Unable to map rmu register window\n"); | ||
1484 | rc = -ENOMEM; | 403 | rc = -ENOMEM; |
1485 | goto err_port; | 404 | goto err_rmu; |
405 | } | ||
406 | for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") { | ||
407 | rmu_np[tmp] = np; | ||
408 | tmp++; | ||
1486 | } | 409 | } |
1487 | port->index = 0; | ||
1488 | 410 | ||
1489 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); | 411 | /*set up doobell node*/ |
1490 | if (!priv) { | 412 | np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit"); |
1491 | printk(KERN_ERR "Can't alloc memory for 'priv'\n"); | 413 | if (!np) { |
414 | rc = -ENODEV; | ||
415 | goto err_dbell; | ||
416 | } | ||
417 | dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL); | ||
418 | if (!(dbell)) { | ||
419 | dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n"); | ||
1492 | rc = -ENOMEM; | 420 | rc = -ENOMEM; |
1493 | goto err_priv; | 421 | goto err_dbell; |
1494 | } | 422 | } |
423 | dbell->dev = &dev->dev; | ||
424 | dbell->bellirq = irq_of_parse_and_map(np, 1); | ||
425 | dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq); | ||
1495 | 426 | ||
1496 | INIT_LIST_HEAD(&port->dbells); | 427 | aw = of_n_addr_cells(np); |
1497 | port->iores.start = law_start; | 428 | dt_range = of_get_property(np, "reg", &rlen); |
1498 | port->iores.end = law_start + law_size - 1; | 429 | if (!dt_range) { |
1499 | port->iores.flags = IORESOURCE_MEM; | 430 | pr_err("%s: unable to find 'reg' property\n", |
1500 | port->iores.name = "rio_io_win"; | 431 | np->full_name); |
1501 | 432 | rc = -ENOMEM; | |
1502 | if (request_resource(&iomem_resource, &port->iores) < 0) { | 433 | goto err_pw; |
1503 | dev_err(&dev->dev, "RIO: Error requesting master port region" | ||
1504 | " 0x%016llx-0x%016llx\n", | ||
1505 | (u64)port->iores.start, (u64)port->iores.end); | ||
1506 | rc = -ENOMEM; | ||
1507 | goto err_res; | ||
1508 | } | 434 | } |
435 | range_start = of_read_number(dt_range, aw); | ||
436 | dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win + | ||
437 | (u32)range_start); | ||
1509 | 438 | ||
1510 | priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); | 439 | /*set up port write node*/ |
1511 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); | 440 | np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit"); |
1512 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); | 441 | if (!np) { |
1513 | priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); | 442 | rc = -ENODEV; |
1514 | dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", | 443 | goto err_pw; |
1515 | priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); | 444 | } |
1516 | 445 | pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL); | |
1517 | rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); | 446 | if (!(pw)) { |
1518 | rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); | 447 | dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n"); |
1519 | rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); | 448 | rc = -ENOMEM; |
1520 | strcpy(port->name, "RIO0 mport"); | 449 | goto err_pw; |
1521 | 450 | } | |
1522 | priv->dev = &dev->dev; | 451 | pw->dev = &dev->dev; |
1523 | 452 | pw->pwirq = irq_of_parse_and_map(np, 0); | |
1524 | port->ops = ops; | 453 | dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq); |
1525 | port->priv = priv; | 454 | aw = of_n_addr_cells(np); |
1526 | port->phys_efptr = 0x100; | 455 | dt_range = of_get_property(np, "reg", &rlen); |
1527 | 456 | if (!dt_range) { | |
1528 | priv->regs_win = ioremap(regs.start, resource_size(®s)); | 457 | pr_err("%s: unable to find 'reg' property\n", |
1529 | rio_regs_win = priv->regs_win; | 458 | np->full_name); |
1530 | 459 | rc = -ENOMEM; | |
1531 | /* Probe the master port phy type */ | 460 | goto err; |
1532 | ccsr = in_be32(priv->regs_win + RIO_CCSR); | 461 | } |
1533 | port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; | 462 | range_start = of_read_number(dt_range, aw); |
1534 | dev_info(&dev->dev, "RapidIO PHY type: %s\n", | 463 | pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start); |
1535 | (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : | 464 | |
1536 | ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : | 465 | /*set up ports node*/ |
1537 | "unknown")); | 466 | for_each_child_of_node(dev->dev.of_node, np) { |
1538 | /* Checking the port training status */ | 467 | port_index = of_get_property(np, "cell-index", NULL); |
1539 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { | 468 | if (!port_index) { |
1540 | dev_err(&dev->dev, "Port is not ready. " | 469 | dev_err(&dev->dev, "Can't get %s property 'cell-index'\n", |
1541 | "Try to restart connection...\n"); | 470 | np->full_name); |
1542 | switch (port->phy_type) { | 471 | continue; |
1543 | case RIO_PHY_SERIAL: | 472 | } |
473 | |||
474 | dt_range = of_get_property(np, "ranges", &rlen); | ||
475 | if (!dt_range) { | ||
476 | dev_err(&dev->dev, "Can't get %s property 'ranges'\n", | ||
477 | np->full_name); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | /* Get node address wide */ | ||
482 | cell = of_get_property(np, "#address-cells", NULL); | ||
483 | if (cell) | ||
484 | aw = *cell; | ||
485 | else | ||
486 | aw = of_n_addr_cells(np); | ||
487 | /* Get node size wide */ | ||
488 | cell = of_get_property(np, "#size-cells", NULL); | ||
489 | if (cell) | ||
490 | sw = *cell; | ||
491 | else | ||
492 | sw = of_n_size_cells(np); | ||
493 | /* Get parent address wide wide */ | ||
494 | paw = of_n_addr_cells(np); | ||
495 | range_start = of_read_number(dt_range + aw, paw); | ||
496 | range_size = of_read_number(dt_range + aw + paw, sw); | ||
497 | |||
498 | dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n", | ||
499 | np->full_name, range_start, range_size); | ||
500 | |||
501 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); | ||
502 | if (!port) | ||
503 | continue; | ||
504 | |||
505 | i = *port_index - 1; | ||
506 | port->index = (unsigned char)i; | ||
507 | |||
508 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); | ||
509 | if (!priv) { | ||
510 | dev_err(&dev->dev, "Can't alloc memory for 'priv'\n"); | ||
511 | kfree(port); | ||
512 | continue; | ||
513 | } | ||
514 | |||
515 | INIT_LIST_HEAD(&port->dbells); | ||
516 | port->iores.start = range_start; | ||
517 | port->iores.end = port->iores.start + range_size - 1; | ||
518 | port->iores.flags = IORESOURCE_MEM; | ||
519 | port->iores.name = "rio_io_win"; | ||
520 | |||
521 | if (request_resource(&iomem_resource, &port->iores) < 0) { | ||
522 | dev_err(&dev->dev, "RIO: Error requesting master port region" | ||
523 | " 0x%016llx-0x%016llx\n", | ||
524 | (u64)port->iores.start, (u64)port->iores.end); | ||
525 | kfree(priv); | ||
526 | kfree(port); | ||
527 | continue; | ||
528 | } | ||
529 | sprintf(port->name, "RIO mport %d", i); | ||
530 | |||
531 | priv->dev = &dev->dev; | ||
532 | port->ops = ops; | ||
533 | port->priv = priv; | ||
534 | port->phys_efptr = 0x100; | ||
535 | priv->regs_win = rio_regs_win; | ||
536 | |||
537 | /* Probe the master port phy type */ | ||
538 | ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20); | ||
539 | port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; | ||
540 | if (port->phy_type == RIO_PHY_PARALLEL) { | ||
541 | dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n"); | ||
542 | release_resource(&port->iores); | ||
543 | kfree(priv); | ||
544 | kfree(port); | ||
545 | continue; | ||
546 | } | ||
547 | dev_info(&dev->dev, "RapidIO PHY type: Serial\n"); | ||
548 | /* Checking the port training status */ | ||
549 | if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) { | ||
550 | dev_err(&dev->dev, "Port %d is not ready. " | ||
551 | "Try to restart connection...\n", i); | ||
1544 | /* Disable ports */ | 552 | /* Disable ports */ |
1545 | out_be32(priv->regs_win + RIO_CCSR, 0); | 553 | out_be32(priv->regs_win |
554 | + RIO_CCSR + i*0x20, 0); | ||
1546 | /* Set 1x lane */ | 555 | /* Set 1x lane */ |
1547 | setbits32(priv->regs_win + RIO_CCSR, 0x02000000); | 556 | setbits32(priv->regs_win |
557 | + RIO_CCSR + i*0x20, 0x02000000); | ||
1548 | /* Enable ports */ | 558 | /* Enable ports */ |
1549 | setbits32(priv->regs_win + RIO_CCSR, 0x00600000); | 559 | setbits32(priv->regs_win |
1550 | break; | 560 | + RIO_CCSR + i*0x20, 0x00600000); |
1551 | case RIO_PHY_PARALLEL: | 561 | msleep(100); |
1552 | /* Disable ports */ | 562 | if (in_be32((priv->regs_win |
1553 | out_be32(priv->regs_win + RIO_CCSR, 0x22000000); | 563 | + RIO_ESCSR + i*0x20)) & 1) { |
1554 | /* Enable ports */ | 564 | dev_err(&dev->dev, |
1555 | out_be32(priv->regs_win + RIO_CCSR, 0x44000000); | 565 | "Port %d restart failed.\n", i); |
1556 | break; | 566 | release_resource(&port->iores); |
1557 | } | 567 | kfree(priv); |
1558 | msleep(100); | 568 | kfree(port); |
1559 | if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { | 569 | continue; |
1560 | dev_err(&dev->dev, "Port restart failed.\n"); | 570 | } |
1561 | rc = -ENOLINK; | 571 | dev_info(&dev->dev, "Port %d restart success!\n", i); |
1562 | goto err; | ||
1563 | } | 572 | } |
1564 | dev_info(&dev->dev, "Port restart success!\n"); | 573 | fsl_rio_info(&dev->dev, ccsr); |
1565 | } | ||
1566 | fsl_rio_info(&dev->dev, ccsr); | ||
1567 | 574 | ||
1568 | port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) | 575 | port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) |
1569 | & RIO_PEF_CTLS) >> 4; | 576 | & RIO_PEF_CTLS) >> 4; |
1570 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", | 577 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
1571 | port->sys_size ? 65536 : 256); | 578 | port->sys_size ? 65536 : 256); |
579 | |||
580 | if (rio_register_mport(port)) { | ||
581 | release_resource(&port->iores); | ||
582 | kfree(priv); | ||
583 | kfree(port); | ||
584 | continue; | ||
585 | } | ||
586 | if (port->host_deviceid >= 0) | ||
587 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | ||
588 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | ||
589 | else | ||
590 | out_be32(priv->regs_win + RIO_GCCSR, | ||
591 | RIO_PORT_GEN_MASTER); | ||
592 | |||
593 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win | ||
594 | + ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET : | ||
595 | RIO_ATMU_REGS_PORT2_OFFSET)); | ||
1572 | 596 | ||
1573 | if (rio_register_mport(port)) | 597 | priv->maint_atmu_regs = priv->atmu_regs + 1; |
598 | |||
599 | /* Set to receive any dist ID for serial RapidIO controller. */ | ||
600 | if (port->phy_type == RIO_PHY_SERIAL) | ||
601 | out_be32((priv->regs_win | ||
602 | + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA); | ||
603 | |||
604 | /* Configure maintenance transaction window */ | ||
605 | out_be32(&priv->maint_atmu_regs->rowbar, | ||
606 | port->iores.start >> 12); | ||
607 | out_be32(&priv->maint_atmu_regs->rowar, | ||
608 | 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); | ||
609 | |||
610 | priv->maint_win = ioremap(port->iores.start, | ||
611 | RIO_MAINT_WIN_SIZE); | ||
612 | |||
613 | rio_law_start = range_start; | ||
614 | |||
615 | fsl_rio_setup_rmu(port, rmu_np[i]); | ||
616 | |||
617 | dbell->mport[i] = port; | ||
618 | |||
619 | active_ports++; | ||
620 | } | ||
621 | |||
622 | if (!active_ports) { | ||
623 | rc = -ENOLINK; | ||
1574 | goto err; | 624 | goto err; |
625 | } | ||
1575 | 626 | ||
1576 | if (port->host_deviceid >= 0) | 627 | fsl_rio_doorbell_init(dbell); |
1577 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | 628 | fsl_rio_port_write_init(pw); |
1578 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | ||
1579 | else | ||
1580 | out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); | ||
1581 | |||
1582 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win | ||
1583 | + RIO_ATMU_REGS_OFFSET); | ||
1584 | priv->maint_atmu_regs = priv->atmu_regs + 1; | ||
1585 | priv->dbell_atmu_regs = priv->atmu_regs + 2; | ||
1586 | priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + | ||
1587 | ((port->phy_type == RIO_PHY_SERIAL) ? | ||
1588 | RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); | ||
1589 | |||
1590 | /* Set to receive any dist ID for serial RapidIO controller. */ | ||
1591 | if (port->phy_type == RIO_PHY_SERIAL) | ||
1592 | out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); | ||
1593 | |||
1594 | /* Configure maintenance transaction window */ | ||
1595 | out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); | ||
1596 | out_be32(&priv->maint_atmu_regs->rowar, | ||
1597 | 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); | ||
1598 | |||
1599 | priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); | ||
1600 | |||
1601 | /* Configure outbound doorbell window */ | ||
1602 | out_be32(&priv->dbell_atmu_regs->rowbar, | ||
1603 | (law_start + RIO_MAINT_WIN_SIZE) >> 12); | ||
1604 | out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ | ||
1605 | fsl_rio_doorbell_init(port); | ||
1606 | fsl_rio_port_write_init(port); | ||
1607 | 629 | ||
1608 | return 0; | 630 | return 0; |
1609 | err: | 631 | err: |
1610 | iounmap(priv->regs_win); | 632 | kfree(pw); |
1611 | release_resource(&port->iores); | 633 | err_pw: |
1612 | err_res: | 634 | kfree(dbell); |
1613 | kfree(priv); | 635 | err_dbell: |
1614 | err_priv: | 636 | iounmap(rmu_regs_win); |
1615 | kfree(port); | 637 | err_rmu: |
1616 | err_port: | ||
1617 | kfree(ops); | 638 | kfree(ops); |
1618 | err_ops: | 639 | err_ops: |
640 | iounmap(rio_regs_win); | ||
641 | err_rio_regs: | ||
1619 | return rc; | 642 | return rc; |
1620 | } | 643 | } |
1621 | 644 | ||
@@ -1631,7 +654,7 @@ static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) | |||
1631 | 654 | ||
1632 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { | 655 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { |
1633 | { | 656 | { |
1634 | .compatible = "fsl,rapidio-delta", | 657 | .compatible = "fsl,srio", |
1635 | }, | 658 | }, |
1636 | {}, | 659 | {}, |
1637 | }; | 660 | }; |
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h new file mode 100644 index 000000000000..ae8e27405a0d --- /dev/null +++ b/arch/powerpc/sysdev/fsl_rio.h | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx/MPC86xx RapidIO support | ||
3 | * | ||
4 | * Copyright 2009 Sysgo AG | ||
5 | * Thomas Moll <thomas.moll@sysgo.com> | ||
6 | * - fixed maintenance access routines, check for aligned access | ||
7 | * | ||
8 | * Copyright 2009 Integrated Device Technology, Inc. | ||
9 | * Alex Bounine <alexandre.bounine@idt.com> | ||
10 | * - Added Port-Write message handling | ||
11 | * - Added Machine Check exception handling | ||
12 | * | ||
13 | * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. | ||
14 | * Zhang Wei <wei.zhang@freescale.com> | ||
15 | * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com> | ||
16 | * Liu Gang <Gang.Liu@freescale.com> | ||
17 | * | ||
18 | * Copyright 2005 MontaVista Software, Inc. | ||
19 | * Matt Porter <mporter@kernel.crashing.org> | ||
20 | * | ||
21 | * This program is free software; you can redistribute it and/or modify it | ||
22 | * under the terms of the GNU General Public License as published by the | ||
23 | * Free Software Foundation; either version 2 of the License, or (at your | ||
24 | * option) any later version. | ||
25 | */ | ||
26 | |||
27 | #ifndef __FSL_RIO_H | ||
28 | #define __FSL_RIO_H | ||
29 | |||
30 | #include <linux/rio.h> | ||
31 | #include <linux/rio_drv.h> | ||
32 | #include <linux/kfifo.h> | ||
33 | |||
34 | #define RIO_REGS_WIN(mport) (((struct rio_priv *)(mport->priv))->regs_win) | ||
35 | |||
36 | #define RIO_MAINT_WIN_SIZE 0x400000 | ||
37 | #define RIO_LTLEDCSR 0x0608 | ||
38 | |||
39 | #define DOORBELL_ROWAR_EN 0x80000000 | ||
40 | #define DOORBELL_ROWAR_TFLOWLV 0x08000000 /* highest priority level */ | ||
41 | #define DOORBELL_ROWAR_PCI 0x02000000 /* PCI window */ | ||
42 | #define DOORBELL_ROWAR_NREAD 0x00040000 /* NREAD */ | ||
43 | #define DOORBELL_ROWAR_MAINTRD 0x00070000 /* maintenance read */ | ||
44 | #define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserverd */ | ||
45 | #define DOORBELL_ROWAR_MAINTWD 0x00007000 | ||
46 | #define DOORBELL_ROWAR_SIZE 0x0000000b /* window size is 4k */ | ||
47 | |||
48 | #define RIO_ATMU_REGS_PORT1_OFFSET 0x10c00 | ||
49 | #define RIO_ATMU_REGS_PORT2_OFFSET 0x10e00 | ||
50 | #define RIO_S_DBELL_REGS_OFFSET 0x13400 | ||
51 | #define RIO_S_PW_REGS_OFFSET 0x134e0 | ||
52 | #define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40 | ||
53 | |||
54 | #define MAX_MSG_UNIT_NUM 2 | ||
55 | #define MAX_PORT_NUM 4 | ||
56 | |||
57 | struct rio_atmu_regs { | ||
58 | u32 rowtar; | ||
59 | u32 rowtear; | ||
60 | u32 rowbar; | ||
61 | u32 pad1; | ||
62 | u32 rowar; | ||
63 | u32 pad2[3]; | ||
64 | }; | ||
65 | |||
66 | struct rio_dbell_ring { | ||
67 | void *virt; | ||
68 | dma_addr_t phys; | ||
69 | }; | ||
70 | |||
71 | struct rio_port_write_msg { | ||
72 | void *virt; | ||
73 | dma_addr_t phys; | ||
74 | u32 msg_count; | ||
75 | u32 err_count; | ||
76 | u32 discard_count; | ||
77 | }; | ||
78 | |||
79 | struct fsl_rio_dbell { | ||
80 | struct rio_mport *mport[MAX_PORT_NUM]; | ||
81 | struct device *dev; | ||
82 | struct rio_dbell_regs __iomem *dbell_regs; | ||
83 | struct rio_dbell_ring dbell_ring; | ||
84 | int bellirq; | ||
85 | }; | ||
86 | |||
87 | struct fsl_rio_pw { | ||
88 | struct device *dev; | ||
89 | struct rio_pw_regs __iomem *pw_regs; | ||
90 | struct rio_port_write_msg port_write_msg; | ||
91 | int pwirq; | ||
92 | struct work_struct pw_work; | ||
93 | struct kfifo pw_fifo; | ||
94 | spinlock_t pw_fifo_lock; | ||
95 | }; | ||
96 | |||
97 | struct rio_priv { | ||
98 | struct device *dev; | ||
99 | void __iomem *regs_win; | ||
100 | struct rio_atmu_regs __iomem *atmu_regs; | ||
101 | struct rio_atmu_regs __iomem *maint_atmu_regs; | ||
102 | void __iomem *maint_win; | ||
103 | void *rmm_handle; /* RapidIO message manager(unit) Handle */ | ||
104 | }; | ||
105 | |||
106 | extern void __iomem *rio_regs_win; | ||
107 | extern void __iomem *rmu_regs_win; | ||
108 | |||
109 | extern resource_size_t rio_law_start; | ||
110 | |||
111 | extern struct fsl_rio_dbell *dbell; | ||
112 | extern struct fsl_rio_pw *pw; | ||
113 | |||
114 | extern int fsl_rio_setup_rmu(struct rio_mport *mport, | ||
115 | struct device_node *node); | ||
116 | extern int fsl_rio_port_write_init(struct fsl_rio_pw *pw); | ||
117 | extern int fsl_rio_pw_enable(struct rio_mport *mport, int enable); | ||
118 | extern void fsl_rio_port_error_handler(int offset); | ||
119 | extern int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell); | ||
120 | |||
121 | extern int fsl_rio_doorbell_send(struct rio_mport *mport, | ||
122 | int index, u16 destid, u16 data); | ||
123 | extern int fsl_add_outb_message(struct rio_mport *mport, | ||
124 | struct rio_dev *rdev, | ||
125 | int mbox, void *buffer, size_t len); | ||
126 | extern int fsl_open_outb_mbox(struct rio_mport *mport, | ||
127 | void *dev_id, int mbox, int entries); | ||
128 | extern void fsl_close_outb_mbox(struct rio_mport *mport, int mbox); | ||
129 | extern int fsl_open_inb_mbox(struct rio_mport *mport, | ||
130 | void *dev_id, int mbox, int entries); | ||
131 | extern void fsl_close_inb_mbox(struct rio_mport *mport, int mbox); | ||
132 | extern int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf); | ||
133 | extern void *fsl_get_inb_message(struct rio_mport *mport, int mbox); | ||
134 | |||
135 | #endif | ||
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c new file mode 100644 index 000000000000..15485789e9db --- /dev/null +++ b/arch/powerpc/sysdev/fsl_rmu.c | |||
@@ -0,0 +1,1104 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx/MPC86xx RapidIO RMU support | ||
3 | * | ||
4 | * Copyright 2009 Sysgo AG | ||
5 | * Thomas Moll <thomas.moll@sysgo.com> | ||
6 | * - fixed maintenance access routines, check for aligned access | ||
7 | * | ||
8 | * Copyright 2009 Integrated Device Technology, Inc. | ||
9 | * Alex Bounine <alexandre.bounine@idt.com> | ||
10 | * - Added Port-Write message handling | ||
11 | * - Added Machine Check exception handling | ||
12 | * | ||
13 | * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. | ||
14 | * Zhang Wei <wei.zhang@freescale.com> | ||
15 | * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com> | ||
16 | * Liu Gang <Gang.Liu@freescale.com> | ||
17 | * | ||
18 | * Copyright 2005 MontaVista Software, Inc. | ||
19 | * Matt Porter <mporter@kernel.crashing.org> | ||
20 | * | ||
21 | * This program is free software; you can redistribute it and/or modify it | ||
22 | * under the terms of the GNU General Public License as published by the | ||
23 | * Free Software Foundation; either version 2 of the License, or (at your | ||
24 | * option) any later version. | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | #include <linux/slab.h> | ||
32 | |||
33 | #include "fsl_rio.h" | ||
34 | |||
35 | #define GET_RMM_HANDLE(mport) \ | ||
36 | (((struct rio_priv *)(mport->priv))->rmm_handle) | ||
37 | |||
38 | /* RapidIO definition irq, which read from OF-tree */ | ||
39 | #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq) | ||
40 | #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq) | ||
41 | #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq) | ||
42 | #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq) | ||
43 | |||
44 | #define RIO_MIN_TX_RING_SIZE 2 | ||
45 | #define RIO_MAX_TX_RING_SIZE 2048 | ||
46 | #define RIO_MIN_RX_RING_SIZE 2 | ||
47 | #define RIO_MAX_RX_RING_SIZE 2048 | ||
48 | |||
49 | #define RIO_IPWMR_SEN 0x00100000 | ||
50 | #define RIO_IPWMR_QFIE 0x00000100 | ||
51 | #define RIO_IPWMR_EIE 0x00000020 | ||
52 | #define RIO_IPWMR_CQ 0x00000002 | ||
53 | #define RIO_IPWMR_PWE 0x00000001 | ||
54 | |||
55 | #define RIO_IPWSR_QF 0x00100000 | ||
56 | #define RIO_IPWSR_TE 0x00000080 | ||
57 | #define RIO_IPWSR_QFI 0x00000010 | ||
58 | #define RIO_IPWSR_PWD 0x00000008 | ||
59 | #define RIO_IPWSR_PWB 0x00000004 | ||
60 | |||
61 | #define RIO_EPWISR 0x10010 | ||
62 | /* EPWISR Error match value */ | ||
63 | #define RIO_EPWISR_PINT1 0x80000000 | ||
64 | #define RIO_EPWISR_PINT2 0x40000000 | ||
65 | #define RIO_EPWISR_MU 0x00000002 | ||
66 | #define RIO_EPWISR_PW 0x00000001 | ||
67 | |||
68 | #define IPWSR_CLEAR 0x98 | ||
69 | #define OMSR_CLEAR 0x1cb3 | ||
70 | #define IMSR_CLEAR 0x491 | ||
71 | #define IDSR_CLEAR 0x91 | ||
72 | #define ODSR_CLEAR 0x1c00 | ||
73 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC | ||
74 | #define RIO_LTLEECSR 0x060c | ||
75 | |||
76 | #define RIO_IM0SR 0x64 | ||
77 | #define RIO_IM1SR 0x164 | ||
78 | #define RIO_OM0SR 0x4 | ||
79 | #define RIO_OM1SR 0x104 | ||
80 | |||
81 | #define RIO_DBELL_WIN_SIZE 0x1000 | ||
82 | |||
83 | #define RIO_MSG_OMR_MUI 0x00000002 | ||
84 | #define RIO_MSG_OSR_TE 0x00000080 | ||
85 | #define RIO_MSG_OSR_QOI 0x00000020 | ||
86 | #define RIO_MSG_OSR_QFI 0x00000010 | ||
87 | #define RIO_MSG_OSR_MUB 0x00000004 | ||
88 | #define RIO_MSG_OSR_EOMI 0x00000002 | ||
89 | #define RIO_MSG_OSR_QEI 0x00000001 | ||
90 | |||
91 | #define RIO_MSG_IMR_MI 0x00000002 | ||
92 | #define RIO_MSG_ISR_TE 0x00000080 | ||
93 | #define RIO_MSG_ISR_QFI 0x00000010 | ||
94 | #define RIO_MSG_ISR_DIQI 0x00000001 | ||
95 | |||
96 | #define RIO_MSG_DESC_SIZE 32 | ||
97 | #define RIO_MSG_BUFFER_SIZE 4096 | ||
98 | |||
99 | #define DOORBELL_DMR_DI 0x00000002 | ||
100 | #define DOORBELL_DSR_TE 0x00000080 | ||
101 | #define DOORBELL_DSR_QFI 0x00000010 | ||
102 | #define DOORBELL_DSR_DIQI 0x00000001 | ||
103 | #define DOORBELL_TID_OFFSET 0x02 | ||
104 | #define DOORBELL_SID_OFFSET 0x04 | ||
105 | #define DOORBELL_INFO_OFFSET 0x06 | ||
106 | |||
107 | #define DOORBELL_MESSAGE_SIZE 0x08 | ||
108 | #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) | ||
109 | #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) | ||
110 | #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) | ||
111 | |||
112 | struct rio_msg_regs { | ||
113 | u32 omr; | ||
114 | u32 osr; | ||
115 | u32 pad1; | ||
116 | u32 odqdpar; | ||
117 | u32 pad2; | ||
118 | u32 osar; | ||
119 | u32 odpr; | ||
120 | u32 odatr; | ||
121 | u32 odcr; | ||
122 | u32 pad3; | ||
123 | u32 odqepar; | ||
124 | u32 pad4[13]; | ||
125 | u32 imr; | ||
126 | u32 isr; | ||
127 | u32 pad5; | ||
128 | u32 ifqdpar; | ||
129 | u32 pad6; | ||
130 | u32 ifqepar; | ||
131 | }; | ||
132 | |||
133 | struct rio_dbell_regs { | ||
134 | u32 odmr; | ||
135 | u32 odsr; | ||
136 | u32 pad1[4]; | ||
137 | u32 oddpr; | ||
138 | u32 oddatr; | ||
139 | u32 pad2[3]; | ||
140 | u32 odretcr; | ||
141 | u32 pad3[12]; | ||
142 | u32 dmr; | ||
143 | u32 dsr; | ||
144 | u32 pad4; | ||
145 | u32 dqdpar; | ||
146 | u32 pad5; | ||
147 | u32 dqepar; | ||
148 | }; | ||
149 | |||
150 | struct rio_pw_regs { | ||
151 | u32 pwmr; | ||
152 | u32 pwsr; | ||
153 | u32 epwqbar; | ||
154 | u32 pwqbar; | ||
155 | }; | ||
156 | |||
157 | |||
158 | struct rio_tx_desc { | ||
159 | u32 pad1; | ||
160 | u32 saddr; | ||
161 | u32 dport; | ||
162 | u32 dattr; | ||
163 | u32 pad2; | ||
164 | u32 pad3; | ||
165 | u32 dwcnt; | ||
166 | u32 pad4; | ||
167 | }; | ||
168 | |||
169 | struct rio_msg_tx_ring { | ||
170 | void *virt; | ||
171 | dma_addr_t phys; | ||
172 | void *virt_buffer[RIO_MAX_TX_RING_SIZE]; | ||
173 | dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; | ||
174 | int tx_slot; | ||
175 | int size; | ||
176 | void *dev_id; | ||
177 | }; | ||
178 | |||
179 | struct rio_msg_rx_ring { | ||
180 | void *virt; | ||
181 | dma_addr_t phys; | ||
182 | void *virt_buffer[RIO_MAX_RX_RING_SIZE]; | ||
183 | int rx_slot; | ||
184 | int size; | ||
185 | void *dev_id; | ||
186 | }; | ||
187 | |||
188 | struct fsl_rmu { | ||
189 | struct rio_msg_regs __iomem *msg_regs; | ||
190 | struct rio_msg_tx_ring msg_tx_ring; | ||
191 | struct rio_msg_rx_ring msg_rx_ring; | ||
192 | int txirq; | ||
193 | int rxirq; | ||
194 | }; | ||
195 | |||
196 | /** | ||
197 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler | ||
198 | * @irq: Linux interrupt number | ||
199 | * @dev_instance: Pointer to interrupt-specific data | ||
200 | * | ||
201 | * Handles outbound message interrupts. Executes a register outbound | ||
202 | * mailbox event handler and acks the interrupt occurrence. | ||
203 | */ | ||
204 | static irqreturn_t | ||
205 | fsl_rio_tx_handler(int irq, void *dev_instance) | ||
206 | { | ||
207 | int osr; | ||
208 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
209 | struct fsl_rmu *rmu = GET_RMM_HANDLE(port); | ||
210 | |||
211 | osr = in_be32(&rmu->msg_regs->osr); | ||
212 | |||
213 | if (osr & RIO_MSG_OSR_TE) { | ||
214 | pr_info("RIO: outbound message transmission error\n"); | ||
215 | out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); | ||
216 | goto out; | ||
217 | } | ||
218 | |||
219 | if (osr & RIO_MSG_OSR_QOI) { | ||
220 | pr_info("RIO: outbound message queue overflow\n"); | ||
221 | out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); | ||
222 | goto out; | ||
223 | } | ||
224 | |||
225 | if (osr & RIO_MSG_OSR_EOMI) { | ||
226 | u32 dqp = in_be32(&rmu->msg_regs->odqdpar); | ||
227 | int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; | ||
228 | if (port->outb_msg[0].mcback != NULL) { | ||
229 | port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, | ||
230 | -1, | ||
231 | slot); | ||
232 | } | ||
233 | /* Ack the end-of-message interrupt */ | ||
234 | out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); | ||
235 | } | ||
236 | |||
237 | out: | ||
238 | return IRQ_HANDLED; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler | ||
243 | * @irq: Linux interrupt number | ||
244 | * @dev_instance: Pointer to interrupt-specific data | ||
245 | * | ||
246 | * Handles inbound message interrupts. Executes a registered inbound | ||
247 | * mailbox event handler and acks the interrupt occurrence. | ||
248 | */ | ||
249 | static irqreturn_t | ||
250 | fsl_rio_rx_handler(int irq, void *dev_instance) | ||
251 | { | ||
252 | int isr; | ||
253 | struct rio_mport *port = (struct rio_mport *)dev_instance; | ||
254 | struct fsl_rmu *rmu = GET_RMM_HANDLE(port); | ||
255 | |||
256 | isr = in_be32(&rmu->msg_regs->isr); | ||
257 | |||
258 | if (isr & RIO_MSG_ISR_TE) { | ||
259 | pr_info("RIO: inbound message reception error\n"); | ||
260 | out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); | ||
261 | goto out; | ||
262 | } | ||
263 | |||
264 | /* XXX Need to check/dispatch until queue empty */ | ||
265 | if (isr & RIO_MSG_ISR_DIQI) { | ||
266 | /* | ||
267 | * Can receive messages for any mailbox/letter to that | ||
268 | * mailbox destination. So, make the callback with an | ||
269 | * unknown/invalid mailbox number argument. | ||
270 | */ | ||
271 | if (port->inb_msg[0].mcback != NULL) | ||
272 | port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, | ||
273 | -1, | ||
274 | -1); | ||
275 | |||
276 | /* Ack the queueing interrupt */ | ||
277 | out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); | ||
278 | } | ||
279 | |||
280 | out: | ||
281 | return IRQ_HANDLED; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler | ||
286 | * @irq: Linux interrupt number | ||
287 | * @dev_instance: Pointer to interrupt-specific data | ||
288 | * | ||
289 | * Handles doorbell interrupts. Parses a list of registered | ||
290 | * doorbell event handlers and executes a matching event handler. | ||
291 | */ | ||
292 | static irqreturn_t | ||
293 | fsl_rio_dbell_handler(int irq, void *dev_instance) | ||
294 | { | ||
295 | int dsr; | ||
296 | struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance; | ||
297 | int i; | ||
298 | |||
299 | dsr = in_be32(&fsl_dbell->dbell_regs->dsr); | ||
300 | |||
301 | if (dsr & DOORBELL_DSR_TE) { | ||
302 | pr_info("RIO: doorbell reception error\n"); | ||
303 | out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE); | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | if (dsr & DOORBELL_DSR_QFI) { | ||
308 | pr_info("RIO: doorbell queue full\n"); | ||
309 | out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI); | ||
310 | } | ||
311 | |||
312 | /* XXX Need to check/dispatch until queue empty */ | ||
313 | if (dsr & DOORBELL_DSR_DIQI) { | ||
314 | u32 dmsg = | ||
315 | (u32) fsl_dbell->dbell_ring.virt + | ||
316 | (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); | ||
317 | struct rio_dbell *dbell; | ||
318 | int found = 0; | ||
319 | |||
320 | pr_debug | ||
321 | ("RIO: processing doorbell," | ||
322 | " sid %2.2x tid %2.2x info %4.4x\n", | ||
323 | DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); | ||
324 | |||
325 | for (i = 0; i < MAX_PORT_NUM; i++) { | ||
326 | if (fsl_dbell->mport[i]) { | ||
327 | list_for_each_entry(dbell, | ||
328 | &fsl_dbell->mport[i]->dbells, node) { | ||
329 | if ((dbell->res->start | ||
330 | <= DBELL_INF(dmsg)) | ||
331 | && (dbell->res->end | ||
332 | >= DBELL_INF(dmsg))) { | ||
333 | found = 1; | ||
334 | break; | ||
335 | } | ||
336 | } | ||
337 | if (found && dbell->dinb) { | ||
338 | dbell->dinb(fsl_dbell->mport[i], | ||
339 | dbell->dev_id, DBELL_SID(dmsg), | ||
340 | DBELL_TID(dmsg), | ||
341 | DBELL_INF(dmsg)); | ||
342 | break; | ||
343 | } | ||
344 | } | ||
345 | } | ||
346 | |||
347 | if (!found) { | ||
348 | pr_debug | ||
349 | ("RIO: spurious doorbell," | ||
350 | " sid %2.2x tid %2.2x info %4.4x\n", | ||
351 | DBELL_SID(dmsg), DBELL_TID(dmsg), | ||
352 | DBELL_INF(dmsg)); | ||
353 | } | ||
354 | setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); | ||
355 | out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); | ||
356 | } | ||
357 | |||
358 | out: | ||
359 | return IRQ_HANDLED; | ||
360 | } | ||
361 | |||
362 | void msg_unit_error_handler(void) | ||
363 | { | ||
364 | |||
365 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
366 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
367 | |||
368 | out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR); | ||
369 | out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR); | ||
370 | out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR); | ||
371 | out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR); | ||
372 | |||
373 | out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR); | ||
374 | out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR); | ||
375 | |||
376 | out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler | ||
381 | * @irq: Linux interrupt number | ||
382 | * @dev_instance: Pointer to interrupt-specific data | ||
383 | * | ||
384 | * Handles port write interrupts. Parses a list of registered | ||
385 | * port write event handlers and executes a matching event handler. | ||
386 | */ | ||
387 | static irqreturn_t | ||
388 | fsl_rio_port_write_handler(int irq, void *dev_instance) | ||
389 | { | ||
390 | u32 ipwmr, ipwsr; | ||
391 | struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance; | ||
392 | u32 epwisr, tmp; | ||
393 | |||
394 | epwisr = in_be32(rio_regs_win + RIO_EPWISR); | ||
395 | if (!(epwisr & RIO_EPWISR_PW)) | ||
396 | goto pw_done; | ||
397 | |||
398 | ipwmr = in_be32(&pw->pw_regs->pwmr); | ||
399 | ipwsr = in_be32(&pw->pw_regs->pwsr); | ||
400 | |||
401 | #ifdef DEBUG_PW | ||
402 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); | ||
403 | if (ipwsr & RIO_IPWSR_QF) | ||
404 | pr_debug(" QF"); | ||
405 | if (ipwsr & RIO_IPWSR_TE) | ||
406 | pr_debug(" TE"); | ||
407 | if (ipwsr & RIO_IPWSR_QFI) | ||
408 | pr_debug(" QFI"); | ||
409 | if (ipwsr & RIO_IPWSR_PWD) | ||
410 | pr_debug(" PWD"); | ||
411 | if (ipwsr & RIO_IPWSR_PWB) | ||
412 | pr_debug(" PWB"); | ||
413 | pr_debug(" )\n"); | ||
414 | #endif | ||
415 | /* Schedule deferred processing if PW was received */ | ||
416 | if (ipwsr & RIO_IPWSR_QFI) { | ||
417 | /* Save PW message (if there is room in FIFO), | ||
418 | * otherwise discard it. | ||
419 | */ | ||
420 | if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) { | ||
421 | pw->port_write_msg.msg_count++; | ||
422 | kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt, | ||
423 | RIO_PW_MSG_SIZE); | ||
424 | } else { | ||
425 | pw->port_write_msg.discard_count++; | ||
426 | pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", | ||
427 | pw->port_write_msg.discard_count); | ||
428 | } | ||
429 | /* Clear interrupt and issue Clear Queue command. This allows | ||
430 | * another port-write to be received. | ||
431 | */ | ||
432 | out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI); | ||
433 | out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ); | ||
434 | |||
435 | schedule_work(&pw->pw_work); | ||
436 | } | ||
437 | |||
438 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { | ||
439 | pw->port_write_msg.err_count++; | ||
440 | pr_debug("RIO: Port-Write Transaction Err (%d)\n", | ||
441 | pw->port_write_msg.err_count); | ||
442 | /* Clear Transaction Error: port-write controller should be | ||
443 | * disabled when clearing this error | ||
444 | */ | ||
445 | out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); | ||
446 | out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE); | ||
447 | out_be32(&pw->pw_regs->pwmr, ipwmr); | ||
448 | } | ||
449 | |||
450 | if (ipwsr & RIO_IPWSR_PWD) { | ||
451 | pw->port_write_msg.discard_count++; | ||
452 | pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", | ||
453 | pw->port_write_msg.discard_count); | ||
454 | out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD); | ||
455 | } | ||
456 | |||
457 | pw_done: | ||
458 | if (epwisr & RIO_EPWISR_PINT1) { | ||
459 | tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); | ||
460 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
461 | fsl_rio_port_error_handler(0); | ||
462 | } | ||
463 | |||
464 | if (epwisr & RIO_EPWISR_PINT2) { | ||
465 | tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); | ||
466 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
467 | fsl_rio_port_error_handler(1); | ||
468 | } | ||
469 | |||
470 | if (epwisr & RIO_EPWISR_MU) { | ||
471 | tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); | ||
472 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
473 | msg_unit_error_handler(); | ||
474 | } | ||
475 | |||
476 | return IRQ_HANDLED; | ||
477 | } | ||
478 | |||
479 | static void fsl_pw_dpc(struct work_struct *work) | ||
480 | { | ||
481 | struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); | ||
482 | u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; | ||
483 | |||
484 | /* | ||
485 | * Process port-write messages | ||
486 | */ | ||
487 | while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer, | ||
488 | RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) { | ||
489 | /* Process one message */ | ||
490 | #ifdef DEBUG_PW | ||
491 | { | ||
492 | u32 i; | ||
493 | pr_debug("%s : Port-Write Message:", __func__); | ||
494 | for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { | ||
495 | if ((i%4) == 0) | ||
496 | pr_debug("\n0x%02x: 0x%08x", i*4, | ||
497 | msg_buffer[i]); | ||
498 | else | ||
499 | pr_debug(" 0x%08x", msg_buffer[i]); | ||
500 | } | ||
501 | pr_debug("\n"); | ||
502 | } | ||
503 | #endif | ||
504 | /* Pass the port-write message to RIO core for processing */ | ||
505 | rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); | ||
506 | } | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * fsl_rio_pw_enable - enable/disable port-write interface init | ||
511 | * @mport: Master port implementing the port write unit | ||
512 | * @enable: 1=enable; 0=disable port-write message handling | ||
513 | */ | ||
514 | int fsl_rio_pw_enable(struct rio_mport *mport, int enable) | ||
515 | { | ||
516 | u32 rval; | ||
517 | |||
518 | rval = in_be32(&pw->pw_regs->pwmr); | ||
519 | |||
520 | if (enable) | ||
521 | rval |= RIO_IPWMR_PWE; | ||
522 | else | ||
523 | rval &= ~RIO_IPWMR_PWE; | ||
524 | |||
525 | out_be32(&pw->pw_regs->pwmr, rval); | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * fsl_rio_port_write_init - MPC85xx port write interface init | ||
532 | * @mport: Master port implementing the port write unit | ||
533 | * | ||
534 | * Initializes port write unit hardware and DMA buffer | ||
535 | * ring. Called from fsl_rio_setup(). Returns %0 on success | ||
536 | * or %-ENOMEM on failure. | ||
537 | */ | ||
538 | |||
539 | int fsl_rio_port_write_init(struct fsl_rio_pw *pw) | ||
540 | { | ||
541 | int rc = 0; | ||
542 | |||
543 | /* Following configurations require a disabled port write controller */ | ||
544 | out_be32(&pw->pw_regs->pwmr, | ||
545 | in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE); | ||
546 | |||
547 | /* Initialize port write */ | ||
548 | pw->port_write_msg.virt = dma_alloc_coherent(pw->dev, | ||
549 | RIO_PW_MSG_SIZE, | ||
550 | &pw->port_write_msg.phys, GFP_KERNEL); | ||
551 | if (!pw->port_write_msg.virt) { | ||
552 | pr_err("RIO: unable allocate port write queue\n"); | ||
553 | return -ENOMEM; | ||
554 | } | ||
555 | |||
556 | pw->port_write_msg.err_count = 0; | ||
557 | pw->port_write_msg.discard_count = 0; | ||
558 | |||
559 | /* Point dequeue/enqueue pointers at first entry */ | ||
560 | out_be32(&pw->pw_regs->epwqbar, 0); | ||
561 | out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys); | ||
562 | |||
563 | pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", | ||
564 | in_be32(&pw->pw_regs->epwqbar), | ||
565 | in_be32(&pw->pw_regs->pwqbar)); | ||
566 | |||
567 | /* Clear interrupt status IPWSR */ | ||
568 | out_be32(&pw->pw_regs->pwsr, | ||
569 | (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); | ||
570 | |||
571 | /* Configure port write contoller for snooping enable all reporting, | ||
572 | clear queue full */ | ||
573 | out_be32(&pw->pw_regs->pwmr, | ||
574 | RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); | ||
575 | |||
576 | |||
577 | /* Hook up port-write handler */ | ||
578 | rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler, | ||
579 | IRQF_SHARED, "port-write", (void *)pw); | ||
580 | if (rc < 0) { | ||
581 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); | ||
582 | goto err_out; | ||
583 | } | ||
584 | /* Enable Error Interrupt */ | ||
585 | out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); | ||
586 | |||
587 | INIT_WORK(&pw->pw_work, fsl_pw_dpc); | ||
588 | spin_lock_init(&pw->pw_fifo_lock); | ||
589 | if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { | ||
590 | pr_err("FIFO allocation failed\n"); | ||
591 | rc = -ENOMEM; | ||
592 | goto err_out_irq; | ||
593 | } | ||
594 | |||
595 | pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", | ||
596 | in_be32(&pw->pw_regs->pwmr), | ||
597 | in_be32(&pw->pw_regs->pwsr)); | ||
598 | |||
599 | return rc; | ||
600 | |||
601 | err_out_irq: | ||
602 | free_irq(IRQ_RIO_PW(pw), (void *)pw); | ||
603 | err_out: | ||
604 | dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE, | ||
605 | pw->port_write_msg.virt, | ||
606 | pw->port_write_msg.phys); | ||
607 | return rc; | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * fsl_rio_doorbell_send - Send a MPC85xx doorbell message | ||
612 | * @mport: RapidIO master port info | ||
613 | * @index: ID of RapidIO interface | ||
614 | * @destid: Destination ID of target device | ||
615 | * @data: 16-bit info field of RapidIO doorbell message | ||
616 | * | ||
617 | * Sends a MPC85xx doorbell message. Returns %0 on success or | ||
618 | * %-EINVAL on failure. | ||
619 | */ | ||
620 | int fsl_rio_doorbell_send(struct rio_mport *mport, | ||
621 | int index, u16 destid, u16 data) | ||
622 | { | ||
623 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", | ||
624 | index, destid, data); | ||
625 | |||
626 | /* In the serial version silicons, such as MPC8548, MPC8641, | ||
627 | * below operations is must be. | ||
628 | */ | ||
629 | out_be32(&dbell->dbell_regs->odmr, 0x00000000); | ||
630 | out_be32(&dbell->dbell_regs->odretcr, 0x00000004); | ||
631 | out_be32(&dbell->dbell_regs->oddpr, destid << 16); | ||
632 | out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); | ||
633 | out_be32(&dbell->dbell_regs->odmr, 0x00000001); | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | /** | ||
639 | * fsl_add_outb_message - Add message to the MPC85xx outbound message queue | ||
640 | * @mport: Master port with outbound message queue | ||
641 | * @rdev: Target of outbound message | ||
642 | * @mbox: Outbound mailbox | ||
643 | * @buffer: Message to add to outbound queue | ||
644 | * @len: Length of message | ||
645 | * | ||
646 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns | ||
647 | * %0 on success or %-EINVAL on failure. | ||
648 | */ | ||
649 | int | ||
650 | fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | ||
651 | void *buffer, size_t len) | ||
652 | { | ||
653 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
654 | u32 omr; | ||
655 | struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt | ||
656 | + rmu->msg_tx_ring.tx_slot; | ||
657 | int ret = 0; | ||
658 | |||
659 | pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ | ||
660 | "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); | ||
661 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { | ||
662 | ret = -EINVAL; | ||
663 | goto out; | ||
664 | } | ||
665 | |||
666 | /* Copy and clear rest of buffer */ | ||
667 | memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, | ||
668 | len); | ||
669 | if (len < (RIO_MAX_MSG_SIZE - 4)) | ||
670 | memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] | ||
671 | + len, 0, RIO_MAX_MSG_SIZE - len); | ||
672 | |||
673 | /* Set mbox field for message, and set destid */ | ||
674 | desc->dport = (rdev->destid << 16) | (mbox & 0x3); | ||
675 | |||
676 | /* Enable EOMI interrupt and priority */ | ||
677 | desc->dattr = 0x28000000 | ((mport->index) << 20); | ||
678 | |||
679 | /* Set transfer size aligned to next power of 2 (in double words) */ | ||
680 | desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); | ||
681 | |||
682 | /* Set snooping and source buffer address */ | ||
683 | desc->saddr = 0x00000004 | ||
684 | | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; | ||
685 | |||
686 | /* Increment enqueue pointer */ | ||
687 | omr = in_be32(&rmu->msg_regs->omr); | ||
688 | out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); | ||
689 | |||
690 | /* Go to next descriptor */ | ||
691 | if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) | ||
692 | rmu->msg_tx_ring.tx_slot = 0; | ||
693 | |||
694 | out: | ||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | /** | ||
699 | * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox | ||
700 | * @mport: Master port implementing the outbound message unit | ||
701 | * @dev_id: Device specific pointer to pass on event | ||
702 | * @mbox: Mailbox to open | ||
703 | * @entries: Number of entries in the outbound mailbox ring | ||
704 | * | ||
705 | * Initializes buffer ring, request the outbound message interrupt, | ||
706 | * and enables the outbound message unit. Returns %0 on success and | ||
707 | * %-EINVAL or %-ENOMEM on failure. | ||
708 | */ | ||
709 | int | ||
710 | fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
711 | { | ||
712 | int i, j, rc = 0; | ||
713 | struct rio_priv *priv = mport->priv; | ||
714 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
715 | |||
716 | if ((entries < RIO_MIN_TX_RING_SIZE) || | ||
717 | (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { | ||
718 | rc = -EINVAL; | ||
719 | goto out; | ||
720 | } | ||
721 | |||
722 | /* Initialize shadow copy ring */ | ||
723 | rmu->msg_tx_ring.dev_id = dev_id; | ||
724 | rmu->msg_tx_ring.size = entries; | ||
725 | |||
726 | for (i = 0; i < rmu->msg_tx_ring.size; i++) { | ||
727 | rmu->msg_tx_ring.virt_buffer[i] = | ||
728 | dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
729 | &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); | ||
730 | if (!rmu->msg_tx_ring.virt_buffer[i]) { | ||
731 | rc = -ENOMEM; | ||
732 | for (j = 0; j < rmu->msg_tx_ring.size; j++) | ||
733 | if (rmu->msg_tx_ring.virt_buffer[j]) | ||
734 | dma_free_coherent(priv->dev, | ||
735 | RIO_MSG_BUFFER_SIZE, | ||
736 | rmu->msg_tx_ring. | ||
737 | virt_buffer[j], | ||
738 | rmu->msg_tx_ring. | ||
739 | phys_buffer[j]); | ||
740 | goto out; | ||
741 | } | ||
742 | } | ||
743 | |||
744 | /* Initialize outbound message descriptor ring */ | ||
745 | rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, | ||
746 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
747 | &rmu->msg_tx_ring.phys, GFP_KERNEL); | ||
748 | if (!rmu->msg_tx_ring.virt) { | ||
749 | rc = -ENOMEM; | ||
750 | goto out_dma; | ||
751 | } | ||
752 | memset(rmu->msg_tx_ring.virt, 0, | ||
753 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); | ||
754 | rmu->msg_tx_ring.tx_slot = 0; | ||
755 | |||
756 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
757 | out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); | ||
758 | out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); | ||
759 | |||
760 | /* Configure for snooping */ | ||
761 | out_be32(&rmu->msg_regs->osar, 0x00000004); | ||
762 | |||
763 | /* Clear interrupt status */ | ||
764 | out_be32(&rmu->msg_regs->osr, 0x000000b3); | ||
765 | |||
766 | /* Hook up outbound message handler */ | ||
767 | rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, | ||
768 | "msg_tx", (void *)mport); | ||
769 | if (rc < 0) | ||
770 | goto out_irq; | ||
771 | |||
772 | /* | ||
773 | * Configure outbound message unit | ||
774 | * Snooping | ||
775 | * Interrupts (all enabled, except QEIE) | ||
776 | * Chaining mode | ||
777 | * Disable | ||
778 | */ | ||
779 | out_be32(&rmu->msg_regs->omr, 0x00100220); | ||
780 | |||
781 | /* Set number of entries */ | ||
782 | out_be32(&rmu->msg_regs->omr, | ||
783 | in_be32(&rmu->msg_regs->omr) | | ||
784 | ((get_bitmask_order(entries) - 2) << 12)); | ||
785 | |||
786 | /* Now enable the unit */ | ||
787 | out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); | ||
788 | |||
789 | out: | ||
790 | return rc; | ||
791 | |||
792 | out_irq: | ||
793 | dma_free_coherent(priv->dev, | ||
794 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
795 | rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); | ||
796 | |||
797 | out_dma: | ||
798 | for (i = 0; i < rmu->msg_tx_ring.size; i++) | ||
799 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
800 | rmu->msg_tx_ring.virt_buffer[i], | ||
801 | rmu->msg_tx_ring.phys_buffer[i]); | ||
802 | |||
803 | return rc; | ||
804 | } | ||
805 | |||
806 | /** | ||
807 | * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox | ||
808 | * @mport: Master port implementing the outbound message unit | ||
809 | * @mbox: Mailbox to close | ||
810 | * | ||
811 | * Disables the outbound message unit, free all buffers, and | ||
812 | * frees the outbound message interrupt. | ||
813 | */ | ||
814 | void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) | ||
815 | { | ||
816 | struct rio_priv *priv = mport->priv; | ||
817 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
818 | |||
819 | /* Disable inbound message unit */ | ||
820 | out_be32(&rmu->msg_regs->omr, 0); | ||
821 | |||
822 | /* Free ring */ | ||
823 | dma_free_coherent(priv->dev, | ||
824 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||
825 | rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); | ||
826 | |||
827 | /* Free interrupt */ | ||
828 | free_irq(IRQ_RIO_TX(mport), (void *)mport); | ||
829 | } | ||
830 | |||
831 | /** | ||
832 | * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox | ||
833 | * @mport: Master port implementing the inbound message unit | ||
834 | * @dev_id: Device specific pointer to pass on event | ||
835 | * @mbox: Mailbox to open | ||
836 | * @entries: Number of entries in the inbound mailbox ring | ||
837 | * | ||
838 | * Initializes buffer ring, request the inbound message interrupt, | ||
839 | * and enables the inbound message unit. Returns %0 on success | ||
840 | * and %-EINVAL or %-ENOMEM on failure. | ||
841 | */ | ||
842 | int | ||
843 | fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
844 | { | ||
845 | int i, rc = 0; | ||
846 | struct rio_priv *priv = mport->priv; | ||
847 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
848 | |||
849 | if ((entries < RIO_MIN_RX_RING_SIZE) || | ||
850 | (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { | ||
851 | rc = -EINVAL; | ||
852 | goto out; | ||
853 | } | ||
854 | |||
855 | /* Initialize client buffer ring */ | ||
856 | rmu->msg_rx_ring.dev_id = dev_id; | ||
857 | rmu->msg_rx_ring.size = entries; | ||
858 | rmu->msg_rx_ring.rx_slot = 0; | ||
859 | for (i = 0; i < rmu->msg_rx_ring.size; i++) | ||
860 | rmu->msg_rx_ring.virt_buffer[i] = NULL; | ||
861 | |||
862 | /* Initialize inbound message ring */ | ||
863 | rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, | ||
864 | rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, | ||
865 | &rmu->msg_rx_ring.phys, GFP_KERNEL); | ||
866 | if (!rmu->msg_rx_ring.virt) { | ||
867 | rc = -ENOMEM; | ||
868 | goto out; | ||
869 | } | ||
870 | |||
871 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
872 | out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); | ||
873 | out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); | ||
874 | |||
875 | /* Clear interrupt status */ | ||
876 | out_be32(&rmu->msg_regs->isr, 0x00000091); | ||
877 | |||
878 | /* Hook up inbound message handler */ | ||
879 | rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, | ||
880 | "msg_rx", (void *)mport); | ||
881 | if (rc < 0) { | ||
882 | dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, | ||
883 | rmu->msg_tx_ring.virt_buffer[i], | ||
884 | rmu->msg_tx_ring.phys_buffer[i]); | ||
885 | goto out; | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Configure inbound message unit: | ||
890 | * Snooping | ||
891 | * 4KB max message size | ||
892 | * Unmask all interrupt sources | ||
893 | * Disable | ||
894 | */ | ||
895 | out_be32(&rmu->msg_regs->imr, 0x001b0060); | ||
896 | |||
897 | /* Set number of queue entries */ | ||
898 | setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); | ||
899 | |||
900 | /* Now enable the unit */ | ||
901 | setbits32(&rmu->msg_regs->imr, 0x1); | ||
902 | |||
903 | out: | ||
904 | return rc; | ||
905 | } | ||
906 | |||
907 | /** | ||
908 | * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox | ||
909 | * @mport: Master port implementing the inbound message unit | ||
910 | * @mbox: Mailbox to close | ||
911 | * | ||
912 | * Disables the inbound message unit, free all buffers, and | ||
913 | * frees the inbound message interrupt. | ||
914 | */ | ||
915 | void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) | ||
916 | { | ||
917 | struct rio_priv *priv = mport->priv; | ||
918 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
919 | |||
920 | /* Disable inbound message unit */ | ||
921 | out_be32(&rmu->msg_regs->imr, 0); | ||
922 | |||
923 | /* Free ring */ | ||
924 | dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, | ||
925 | rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); | ||
926 | |||
927 | /* Free interrupt */ | ||
928 | free_irq(IRQ_RIO_RX(mport), (void *)mport); | ||
929 | } | ||
930 | |||
931 | /** | ||
932 | * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue | ||
933 | * @mport: Master port implementing the inbound message unit | ||
934 | * @mbox: Inbound mailbox number | ||
935 | * @buf: Buffer to add to inbound queue | ||
936 | * | ||
937 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns | ||
938 | * %0 on success or %-EINVAL on failure. | ||
939 | */ | ||
940 | int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) | ||
941 | { | ||
942 | int rc = 0; | ||
943 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
944 | |||
945 | pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", | ||
946 | rmu->msg_rx_ring.rx_slot); | ||
947 | |||
948 | if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { | ||
949 | printk(KERN_ERR | ||
950 | "RIO: error adding inbound buffer %d, buffer exists\n", | ||
951 | rmu->msg_rx_ring.rx_slot); | ||
952 | rc = -EINVAL; | ||
953 | goto out; | ||
954 | } | ||
955 | |||
956 | rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; | ||
957 | if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) | ||
958 | rmu->msg_rx_ring.rx_slot = 0; | ||
959 | |||
960 | out: | ||
961 | return rc; | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit | ||
966 | * @mport: Master port implementing the inbound message unit | ||
967 | * @mbox: Inbound mailbox number | ||
968 | * | ||
969 | * Gets the next available inbound message from the inbound message queue. | ||
970 | * A pointer to the message is returned on success or NULL on failure. | ||
971 | */ | ||
972 | void *fsl_get_inb_message(struct rio_mport *mport, int mbox) | ||
973 | { | ||
974 | struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); | ||
975 | u32 phys_buf, virt_buf; | ||
976 | void *buf = NULL; | ||
977 | int buf_idx; | ||
978 | |||
979 | phys_buf = in_be32(&rmu->msg_regs->ifqdpar); | ||
980 | |||
981 | /* If no more messages, then bail out */ | ||
982 | if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) | ||
983 | goto out2; | ||
984 | |||
985 | virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf | ||
986 | - rmu->msg_rx_ring.phys); | ||
987 | buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; | ||
988 | buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; | ||
989 | |||
990 | if (!buf) { | ||
991 | printk(KERN_ERR | ||
992 | "RIO: inbound message copy failed, no buffers\n"); | ||
993 | goto out1; | ||
994 | } | ||
995 | |||
996 | /* Copy max message size, caller is expected to allocate that big */ | ||
997 | memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); | ||
998 | |||
999 | /* Clear the available buffer */ | ||
1000 | rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; | ||
1001 | |||
1002 | out1: | ||
1003 | setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); | ||
1004 | |||
1005 | out2: | ||
1006 | return buf; | ||
1007 | } | ||
1008 | |||
1009 | /** | ||
1010 | * fsl_rio_doorbell_init - MPC85xx doorbell interface init | ||
1011 | * @mport: Master port implementing the inbound doorbell unit | ||
1012 | * | ||
1013 | * Initializes doorbell unit hardware and inbound DMA buffer | ||
1014 | * ring. Called from fsl_rio_setup(). Returns %0 on success | ||
1015 | * or %-ENOMEM on failure. | ||
1016 | */ | ||
1017 | int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell) | ||
1018 | { | ||
1019 | int rc = 0; | ||
1020 | |||
1021 | /* Initialize inbound doorbells */ | ||
1022 | dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 * | ||
1023 | DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL); | ||
1024 | if (!dbell->dbell_ring.virt) { | ||
1025 | printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); | ||
1026 | rc = -ENOMEM; | ||
1027 | goto out; | ||
1028 | } | ||
1029 | |||
1030 | /* Point dequeue/enqueue pointers at first entry in ring */ | ||
1031 | out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys); | ||
1032 | out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys); | ||
1033 | |||
1034 | /* Clear interrupt status */ | ||
1035 | out_be32(&dbell->dbell_regs->dsr, 0x00000091); | ||
1036 | |||
1037 | /* Hook up doorbell handler */ | ||
1038 | rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0, | ||
1039 | "dbell_rx", (void *)dbell); | ||
1040 | if (rc < 0) { | ||
1041 | dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE, | ||
1042 | dbell->dbell_ring.virt, dbell->dbell_ring.phys); | ||
1043 | printk(KERN_ERR | ||
1044 | "MPC85xx RIO: unable to request inbound doorbell irq"); | ||
1045 | goto out; | ||
1046 | } | ||
1047 | |||
1048 | /* Configure doorbells for snooping, 512 entries, and enable */ | ||
1049 | out_be32(&dbell->dbell_regs->dmr, 0x00108161); | ||
1050 | |||
1051 | out: | ||
1052 | return rc; | ||
1053 | } | ||
1054 | |||
1055 | int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) | ||
1056 | { | ||
1057 | struct rio_priv *priv; | ||
1058 | struct fsl_rmu *rmu; | ||
1059 | u64 msg_start; | ||
1060 | const u32 *msg_addr; | ||
1061 | int mlen; | ||
1062 | int aw; | ||
1063 | |||
1064 | if (!mport || !mport->priv) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | priv = mport->priv; | ||
1068 | |||
1069 | if (!node) { | ||
1070 | dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n", | ||
1071 | priv->dev->of_node->full_name); | ||
1072 | return -EINVAL; | ||
1073 | } | ||
1074 | |||
1075 | rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); | ||
1076 | if (!rmu) | ||
1077 | return -ENOMEM; | ||
1078 | |||
1079 | aw = of_n_addr_cells(node); | ||
1080 | msg_addr = of_get_property(node, "reg", &mlen); | ||
1081 | if (!msg_addr) { | ||
1082 | pr_err("%s: unable to find 'reg' property of message-unit\n", | ||
1083 | node->full_name); | ||
1084 | kfree(rmu); | ||
1085 | return -ENOMEM; | ||
1086 | } | ||
1087 | msg_start = of_read_number(msg_addr, aw); | ||
1088 | |||
1089 | rmu->msg_regs = (struct rio_msg_regs *) | ||
1090 | (rmu_regs_win + (u32)msg_start); | ||
1091 | |||
1092 | rmu->txirq = irq_of_parse_and_map(node, 0); | ||
1093 | rmu->rxirq = irq_of_parse_and_map(node, 1); | ||
1094 | printk(KERN_INFO "%s: txirq: %d, rxirq %d\n", | ||
1095 | node->full_name, rmu->txirq, rmu->rxirq); | ||
1096 | |||
1097 | priv->rmm_handle = rmu; | ||
1098 | |||
1099 | rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); | ||
1100 | rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); | ||
1101 | rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); | ||
1102 | |||
1103 | return 0; | ||
1104 | } | ||
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 8c7e8528e7c4..4e9ccb1015de 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -154,7 +154,7 @@ static inline unsigned int mpic_processor_id(struct mpic *mpic) | |||
154 | { | 154 | { |
155 | unsigned int cpu = 0; | 155 | unsigned int cpu = 0; |
156 | 156 | ||
157 | if (mpic->flags & MPIC_PRIMARY) | 157 | if (!(mpic->flags & MPIC_SECONDARY)) |
158 | cpu = hard_smp_processor_id(); | 158 | cpu = hard_smp_processor_id(); |
159 | 159 | ||
160 | return cpu; | 160 | return cpu; |
@@ -315,29 +315,25 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, | |||
315 | } | 315 | } |
316 | 316 | ||
317 | #ifdef CONFIG_PPC_DCR | 317 | #ifdef CONFIG_PPC_DCR |
318 | static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, | 318 | static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, |
319 | struct mpic_reg_bank *rb, | ||
320 | unsigned int offset, unsigned int size) | 319 | unsigned int offset, unsigned int size) |
321 | { | 320 | { |
322 | const u32 *dbasep; | 321 | phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0); |
323 | 322 | rb->dhost = dcr_map(mpic->node, phys_addr + offset, size); | |
324 | dbasep = of_get_property(node, "dcr-reg", NULL); | ||
325 | |||
326 | rb->dhost = dcr_map(node, *dbasep + offset, size); | ||
327 | BUG_ON(!DCR_MAP_OK(rb->dhost)); | 323 | BUG_ON(!DCR_MAP_OK(rb->dhost)); |
328 | } | 324 | } |
329 | 325 | ||
330 | static inline void mpic_map(struct mpic *mpic, struct device_node *node, | 326 | static inline void mpic_map(struct mpic *mpic, |
331 | phys_addr_t phys_addr, struct mpic_reg_bank *rb, | 327 | phys_addr_t phys_addr, struct mpic_reg_bank *rb, |
332 | unsigned int offset, unsigned int size) | 328 | unsigned int offset, unsigned int size) |
333 | { | 329 | { |
334 | if (mpic->flags & MPIC_USES_DCR) | 330 | if (mpic->flags & MPIC_USES_DCR) |
335 | _mpic_map_dcr(mpic, node, rb, offset, size); | 331 | _mpic_map_dcr(mpic, rb, offset, size); |
336 | else | 332 | else |
337 | _mpic_map_mmio(mpic, phys_addr, rb, offset, size); | 333 | _mpic_map_mmio(mpic, phys_addr, rb, offset, size); |
338 | } | 334 | } |
339 | #else /* CONFIG_PPC_DCR */ | 335 | #else /* CONFIG_PPC_DCR */ |
340 | #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) | 336 | #define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) |
341 | #endif /* !CONFIG_PPC_DCR */ | 337 | #endif /* !CONFIG_PPC_DCR */ |
342 | 338 | ||
343 | 339 | ||
@@ -901,7 +897,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
901 | if (vold != vnew) | 897 | if (vold != vnew) |
902 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); | 898 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); |
903 | 899 | ||
904 | return IRQ_SET_MASK_OK_NOCOPY;; | 900 | return IRQ_SET_MASK_OK_NOCOPY; |
905 | } | 901 | } |
906 | 902 | ||
907 | void mpic_set_vector(unsigned int virq, unsigned int vector) | 903 | void mpic_set_vector(unsigned int virq, unsigned int vector) |
@@ -990,7 +986,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
990 | 986 | ||
991 | #ifdef CONFIG_SMP | 987 | #ifdef CONFIG_SMP |
992 | else if (hw >= mpic->ipi_vecs[0]) { | 988 | else if (hw >= mpic->ipi_vecs[0]) { |
993 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | 989 | WARN_ON(mpic->flags & MPIC_SECONDARY); |
994 | 990 | ||
995 | DBG("mpic: mapping as IPI\n"); | 991 | DBG("mpic: mapping as IPI\n"); |
996 | irq_set_chip_data(virq, mpic); | 992 | irq_set_chip_data(virq, mpic); |
@@ -1001,7 +997,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
1001 | #endif /* CONFIG_SMP */ | 997 | #endif /* CONFIG_SMP */ |
1002 | 998 | ||
1003 | if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { | 999 | if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { |
1004 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | 1000 | WARN_ON(mpic->flags & MPIC_SECONDARY); |
1005 | 1001 | ||
1006 | DBG("mpic: mapping as timer\n"); | 1002 | DBG("mpic: mapping as timer\n"); |
1007 | irq_set_chip_data(virq, mpic); | 1003 | irq_set_chip_data(virq, mpic); |
@@ -1115,17 +1111,28 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, | |||
1115 | return 0; | 1111 | return 0; |
1116 | } | 1112 | } |
1117 | 1113 | ||
1114 | /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ | ||
1115 | static void mpic_cascade(unsigned int irq, struct irq_desc *desc) | ||
1116 | { | ||
1117 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
1118 | struct mpic *mpic = irq_desc_get_handler_data(desc); | ||
1119 | unsigned int virq; | ||
1120 | |||
1121 | BUG_ON(!(mpic->flags & MPIC_SECONDARY)); | ||
1122 | |||
1123 | virq = mpic_get_one_irq(mpic); | ||
1124 | if (virq != NO_IRQ) | ||
1125 | generic_handle_irq(virq); | ||
1126 | |||
1127 | chip->irq_eoi(&desc->irq_data); | ||
1128 | } | ||
1129 | |||
1118 | static struct irq_host_ops mpic_host_ops = { | 1130 | static struct irq_host_ops mpic_host_ops = { |
1119 | .match = mpic_host_match, | 1131 | .match = mpic_host_match, |
1120 | .map = mpic_host_map, | 1132 | .map = mpic_host_map, |
1121 | .xlate = mpic_host_xlate, | 1133 | .xlate = mpic_host_xlate, |
1122 | }; | 1134 | }; |
1123 | 1135 | ||
1124 | static int mpic_reset_prohibited(struct device_node *node) | ||
1125 | { | ||
1126 | return node && of_get_property(node, "pic-no-reset", NULL); | ||
1127 | } | ||
1128 | |||
1129 | /* | 1136 | /* |
1130 | * Exported functions | 1137 | * Exported functions |
1131 | */ | 1138 | */ |
@@ -1137,27 +1144,60 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1137 | unsigned int irq_count, | 1144 | unsigned int irq_count, |
1138 | const char *name) | 1145 | const char *name) |
1139 | { | 1146 | { |
1140 | struct mpic *mpic; | 1147 | int i, psize, intvec_top; |
1141 | u32 greg_feature; | 1148 | struct mpic *mpic; |
1142 | const char *vers; | 1149 | u32 greg_feature; |
1143 | int i; | 1150 | const char *vers; |
1144 | int intvec_top; | 1151 | const u32 *psrc; |
1145 | u64 paddr = phys_addr; | 1152 | |
1153 | /* Default MPIC search parameters */ | ||
1154 | static const struct of_device_id __initconst mpic_device_id[] = { | ||
1155 | { .type = "open-pic", }, | ||
1156 | { .compatible = "open-pic", }, | ||
1157 | {}, | ||
1158 | }; | ||
1159 | |||
1160 | /* | ||
1161 | * If we were not passed a device-tree node, then perform the default | ||
1162 | * search for standardized a standardized OpenPIC. | ||
1163 | */ | ||
1164 | if (node) { | ||
1165 | node = of_node_get(node); | ||
1166 | } else { | ||
1167 | node = of_find_matching_node(NULL, mpic_device_id); | ||
1168 | if (!node) | ||
1169 | return NULL; | ||
1170 | } | ||
1171 | |||
1172 | /* Pick the physical address from the device tree if unspecified */ | ||
1173 | if (!phys_addr) { | ||
1174 | /* Check if it is DCR-based */ | ||
1175 | if (of_get_property(node, "dcr-reg", NULL)) { | ||
1176 | flags |= MPIC_USES_DCR; | ||
1177 | } else { | ||
1178 | struct resource r; | ||
1179 | if (of_address_to_resource(node, 0, &r)) | ||
1180 | goto err_of_node_put; | ||
1181 | phys_addr = r.start; | ||
1182 | } | ||
1183 | } | ||
1146 | 1184 | ||
1147 | mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); | 1185 | mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); |
1148 | if (mpic == NULL) | 1186 | if (mpic == NULL) |
1149 | return NULL; | 1187 | goto err_of_node_put; |
1150 | 1188 | ||
1151 | mpic->name = name; | 1189 | mpic->name = name; |
1190 | mpic->node = node; | ||
1191 | mpic->paddr = phys_addr; | ||
1152 | 1192 | ||
1153 | mpic->hc_irq = mpic_irq_chip; | 1193 | mpic->hc_irq = mpic_irq_chip; |
1154 | mpic->hc_irq.name = name; | 1194 | mpic->hc_irq.name = name; |
1155 | if (flags & MPIC_PRIMARY) | 1195 | if (!(flags & MPIC_SECONDARY)) |
1156 | mpic->hc_irq.irq_set_affinity = mpic_set_affinity; | 1196 | mpic->hc_irq.irq_set_affinity = mpic_set_affinity; |
1157 | #ifdef CONFIG_MPIC_U3_HT_IRQS | 1197 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
1158 | mpic->hc_ht_irq = mpic_irq_ht_chip; | 1198 | mpic->hc_ht_irq = mpic_irq_ht_chip; |
1159 | mpic->hc_ht_irq.name = name; | 1199 | mpic->hc_ht_irq.name = name; |
1160 | if (flags & MPIC_PRIMARY) | 1200 | if (!(flags & MPIC_SECONDARY)) |
1161 | mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; | 1201 | mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; |
1162 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ | 1202 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
1163 | 1203 | ||
@@ -1194,28 +1234,22 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1194 | mpic->spurious_vec = intvec_top; | 1234 | mpic->spurious_vec = intvec_top; |
1195 | 1235 | ||
1196 | /* Check for "big-endian" in device-tree */ | 1236 | /* Check for "big-endian" in device-tree */ |
1197 | if (node && of_get_property(node, "big-endian", NULL) != NULL) | 1237 | if (of_get_property(mpic->node, "big-endian", NULL) != NULL) |
1198 | mpic->flags |= MPIC_BIG_ENDIAN; | 1238 | mpic->flags |= MPIC_BIG_ENDIAN; |
1199 | if (node && of_device_is_compatible(node, "fsl,mpic")) | 1239 | if (of_device_is_compatible(mpic->node, "fsl,mpic")) |
1200 | mpic->flags |= MPIC_FSL; | 1240 | mpic->flags |= MPIC_FSL; |
1201 | 1241 | ||
1202 | /* Look for protected sources */ | 1242 | /* Look for protected sources */ |
1203 | if (node) { | 1243 | psrc = of_get_property(mpic->node, "protected-sources", &psize); |
1204 | int psize; | 1244 | if (psrc) { |
1205 | unsigned int bits, mapsize; | 1245 | /* Allocate a bitmap with one bit per interrupt */ |
1206 | const u32 *psrc = | 1246 | unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1); |
1207 | of_get_property(node, "protected-sources", &psize); | 1247 | mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL); |
1208 | if (psrc) { | 1248 | BUG_ON(mpic->protected == NULL); |
1209 | psize /= 4; | 1249 | for (i = 0; i < psize/sizeof(u32); i++) { |
1210 | bits = intvec_top + 1; | 1250 | if (psrc[i] > intvec_top) |
1211 | mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); | 1251 | continue; |
1212 | mpic->protected = kzalloc(mapsize, GFP_KERNEL); | 1252 | __set_bit(psrc[i], mpic->protected); |
1213 | BUG_ON(mpic->protected == NULL); | ||
1214 | for (i = 0; i < psize; i++) { | ||
1215 | if (psrc[i] > intvec_top) | ||
1216 | continue; | ||
1217 | __set_bit(psrc[i], mpic->protected); | ||
1218 | } | ||
1219 | } | 1253 | } |
1220 | } | 1254 | } |
1221 | 1255 | ||
@@ -1224,42 +1258,32 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1224 | #endif | 1258 | #endif |
1225 | 1259 | ||
1226 | /* default register type */ | 1260 | /* default register type */ |
1227 | mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? | 1261 | if (flags & MPIC_BIG_ENDIAN) |
1228 | mpic_access_mmio_be : mpic_access_mmio_le; | 1262 | mpic->reg_type = mpic_access_mmio_be; |
1229 | 1263 | else | |
1230 | /* If no physical address is passed in, a device-node is mandatory */ | 1264 | mpic->reg_type = mpic_access_mmio_le; |
1231 | BUG_ON(paddr == 0 && node == NULL); | ||
1232 | 1265 | ||
1233 | /* If no physical address passed in, check if it's dcr based */ | 1266 | /* |
1234 | if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { | 1267 | * An MPIC with a "dcr-reg" property must be accessed that way, but |
1268 | * only if the kernel includes DCR support. | ||
1269 | */ | ||
1235 | #ifdef CONFIG_PPC_DCR | 1270 | #ifdef CONFIG_PPC_DCR |
1236 | mpic->flags |= MPIC_USES_DCR; | 1271 | if (flags & MPIC_USES_DCR) |
1237 | mpic->reg_type = mpic_access_dcr; | 1272 | mpic->reg_type = mpic_access_dcr; |
1238 | #else | 1273 | #else |
1239 | BUG(); | 1274 | BUG_ON(flags & MPIC_USES_DCR); |
1240 | #endif /* CONFIG_PPC_DCR */ | 1275 | #endif |
1241 | } | ||
1242 | |||
1243 | /* If the MPIC is not DCR based, and no physical address was passed | ||
1244 | * in, try to obtain one | ||
1245 | */ | ||
1246 | if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { | ||
1247 | const u32 *reg = of_get_property(node, "reg", NULL); | ||
1248 | BUG_ON(reg == NULL); | ||
1249 | paddr = of_translate_address(node, reg); | ||
1250 | BUG_ON(paddr == OF_BAD_ADDR); | ||
1251 | } | ||
1252 | 1276 | ||
1253 | /* Map the global registers */ | 1277 | /* Map the global registers */ |
1254 | mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); | 1278 | mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); |
1255 | mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); | 1279 | mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); |
1256 | 1280 | ||
1257 | /* Reset */ | 1281 | /* Reset */ |
1258 | 1282 | ||
1259 | /* When using a device-node, reset requests are only honored if the MPIC | 1283 | /* When using a device-node, reset requests are only honored if the MPIC |
1260 | * is allowed to reset. | 1284 | * is allowed to reset. |
1261 | */ | 1285 | */ |
1262 | if (mpic_reset_prohibited(node)) | 1286 | if (of_get_property(mpic->node, "pic-no-reset", NULL)) |
1263 | mpic->flags |= MPIC_NO_RESET; | 1287 | mpic->flags |= MPIC_NO_RESET; |
1264 | 1288 | ||
1265 | if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { | 1289 | if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { |
@@ -1307,7 +1331,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1307 | for_each_possible_cpu(i) { | 1331 | for_each_possible_cpu(i) { |
1308 | unsigned int cpu = get_hard_smp_processor_id(i); | 1332 | unsigned int cpu = get_hard_smp_processor_id(i); |
1309 | 1333 | ||
1310 | mpic_map(mpic, node, paddr, &mpic->cpuregs[cpu], | 1334 | mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu], |
1311 | MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE), | 1335 | MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE), |
1312 | 0x1000); | 1336 | 0x1000); |
1313 | } | 1337 | } |
@@ -1315,16 +1339,21 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1315 | /* Initialize main ISU if none provided */ | 1339 | /* Initialize main ISU if none provided */ |
1316 | if (mpic->isu_size == 0) { | 1340 | if (mpic->isu_size == 0) { |
1317 | mpic->isu_size = mpic->num_sources; | 1341 | mpic->isu_size = mpic->num_sources; |
1318 | mpic_map(mpic, node, paddr, &mpic->isus[0], | 1342 | mpic_map(mpic, mpic->paddr, &mpic->isus[0], |
1319 | MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); | 1343 | MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); |
1320 | } | 1344 | } |
1321 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); | 1345 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); |
1322 | mpic->isu_mask = (1 << mpic->isu_shift) - 1; | 1346 | mpic->isu_mask = (1 << mpic->isu_shift) - 1; |
1323 | 1347 | ||
1324 | mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, | 1348 | mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, |
1325 | isu_size ? isu_size : mpic->num_sources, | 1349 | isu_size ? isu_size : mpic->num_sources, |
1326 | &mpic_host_ops, | 1350 | &mpic_host_ops, |
1327 | flags & MPIC_LARGE_VECTORS ? 2048 : 256); | 1351 | flags & MPIC_LARGE_VECTORS ? 2048 : 256); |
1352 | |||
1353 | /* | ||
1354 | * FIXME: The code leaks the MPIC object and mappings here; this | ||
1355 | * is very unlikely to fail but it ought to be fixed anyways. | ||
1356 | */ | ||
1328 | if (mpic->irqhost == NULL) | 1357 | if (mpic->irqhost == NULL) |
1329 | return NULL; | 1358 | return NULL; |
1330 | 1359 | ||
@@ -1347,19 +1376,23 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1347 | } | 1376 | } |
1348 | printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," | 1377 | printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," |
1349 | " max %d CPUs\n", | 1378 | " max %d CPUs\n", |
1350 | name, vers, (unsigned long long)paddr, num_possible_cpus()); | 1379 | name, vers, (unsigned long long)mpic->paddr, num_possible_cpus()); |
1351 | printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", | 1380 | printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", |
1352 | mpic->isu_size, mpic->isu_shift, mpic->isu_mask); | 1381 | mpic->isu_size, mpic->isu_shift, mpic->isu_mask); |
1353 | 1382 | ||
1354 | mpic->next = mpics; | 1383 | mpic->next = mpics; |
1355 | mpics = mpic; | 1384 | mpics = mpic; |
1356 | 1385 | ||
1357 | if (flags & MPIC_PRIMARY) { | 1386 | if (!(flags & MPIC_SECONDARY)) { |
1358 | mpic_primary = mpic; | 1387 | mpic_primary = mpic; |
1359 | irq_set_default_host(mpic->irqhost); | 1388 | irq_set_default_host(mpic->irqhost); |
1360 | } | 1389 | } |
1361 | 1390 | ||
1362 | return mpic; | 1391 | return mpic; |
1392 | |||
1393 | err_of_node_put: | ||
1394 | of_node_put(node); | ||
1395 | return NULL; | ||
1363 | } | 1396 | } |
1364 | 1397 | ||
1365 | void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | 1398 | void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, |
@@ -1369,7 +1402,7 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | |||
1369 | 1402 | ||
1370 | BUG_ON(isu_num >= MPIC_MAX_ISU); | 1403 | BUG_ON(isu_num >= MPIC_MAX_ISU); |
1371 | 1404 | ||
1372 | mpic_map(mpic, mpic->irqhost->of_node, | 1405 | mpic_map(mpic, |
1373 | paddr, &mpic->isus[isu_num], 0, | 1406 | paddr, &mpic->isus[isu_num], 0, |
1374 | MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); | 1407 | MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); |
1375 | 1408 | ||
@@ -1385,8 +1418,7 @@ void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) | |||
1385 | 1418 | ||
1386 | void __init mpic_init(struct mpic *mpic) | 1419 | void __init mpic_init(struct mpic *mpic) |
1387 | { | 1420 | { |
1388 | int i; | 1421 | int i, cpu; |
1389 | int cpu; | ||
1390 | 1422 | ||
1391 | BUG_ON(mpic->num_sources == 0); | 1423 | BUG_ON(mpic->num_sources == 0); |
1392 | 1424 | ||
@@ -1424,7 +1456,7 @@ void __init mpic_init(struct mpic *mpic) | |||
1424 | 1456 | ||
1425 | /* Do the HT PIC fixups on U3 broken mpic */ | 1457 | /* Do the HT PIC fixups on U3 broken mpic */ |
1426 | DBG("MPIC flags: %x\n", mpic->flags); | 1458 | DBG("MPIC flags: %x\n", mpic->flags); |
1427 | if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { | 1459 | if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) { |
1428 | mpic_scan_ht_pics(mpic); | 1460 | mpic_scan_ht_pics(mpic); |
1429 | mpic_u3msi_init(mpic); | 1461 | mpic_u3msi_init(mpic); |
1430 | } | 1462 | } |
@@ -1471,6 +1503,17 @@ void __init mpic_init(struct mpic *mpic) | |||
1471 | GFP_KERNEL); | 1503 | GFP_KERNEL); |
1472 | BUG_ON(mpic->save_data == NULL); | 1504 | BUG_ON(mpic->save_data == NULL); |
1473 | #endif | 1505 | #endif |
1506 | |||
1507 | /* Check if this MPIC is chained from a parent interrupt controller */ | ||
1508 | if (mpic->flags & MPIC_SECONDARY) { | ||
1509 | int virq = irq_of_parse_and_map(mpic->node, 0); | ||
1510 | if (virq != NO_IRQ) { | ||
1511 | printk(KERN_INFO "%s: hooking up to IRQ %d\n", | ||
1512 | mpic->node->full_name, virq); | ||
1513 | irq_set_handler_data(virq, mpic); | ||
1514 | irq_set_chained_handler(virq, &mpic_cascade); | ||
1515 | } | ||
1516 | } | ||
1474 | } | 1517 | } |
1475 | 1518 | ||
1476 | void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) | 1519 | void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) |
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c index 73b86cc5ea74..82e2cfe35c62 100644 --- a/arch/powerpc/sysdev/ppc4xx_cpm.c +++ b/arch/powerpc/sysdev/ppc4xx_cpm.c | |||
@@ -179,12 +179,12 @@ static struct kobj_attribute cpm_idle_attr = | |||
179 | 179 | ||
180 | static void cpm_idle_config_sysfs(void) | 180 | static void cpm_idle_config_sysfs(void) |
181 | { | 181 | { |
182 | struct sys_device *sys_dev; | 182 | struct device *dev; |
183 | unsigned long ret; | 183 | unsigned long ret; |
184 | 184 | ||
185 | sys_dev = get_cpu_sysdev(0); | 185 | dev = get_cpu_device(0); |
186 | 186 | ||
187 | ret = sysfs_create_file(&sys_dev->kobj, | 187 | ret = sysfs_create_file(&dev->kobj, |
188 | &cpm_idle_attr.attr); | 188 | &cpm_idle_attr.attr); |
189 | if (ret) | 189 | if (ret) |
190 | printk(KERN_WARNING | 190 | printk(KERN_WARNING |
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index 862f11b3821e..4f05f7542346 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c | |||
@@ -185,9 +185,15 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, | |||
185 | out: | 185 | out: |
186 | dma_offset_set = 1; | 186 | dma_offset_set = 1; |
187 | pci_dram_offset = res->start; | 187 | pci_dram_offset = res->start; |
188 | hose->dma_window_base_cur = res->start; | ||
189 | hose->dma_window_size = resource_size(res); | ||
188 | 190 | ||
189 | printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n", | 191 | printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n", |
190 | pci_dram_offset); | 192 | pci_dram_offset); |
193 | printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n", | ||
194 | (unsigned long long)hose->dma_window_base_cur); | ||
195 | printk(KERN_INFO "DMA window size 0x%016llx\n", | ||
196 | (unsigned long long)hose->dma_window_size); | ||
191 | return 0; | 197 | return 0; |
192 | } | 198 | } |
193 | 199 | ||
@@ -647,6 +653,7 @@ static unsigned int ppc4xx_pciex_port_count; | |||
647 | 653 | ||
648 | struct ppc4xx_pciex_hwops | 654 | struct ppc4xx_pciex_hwops |
649 | { | 655 | { |
656 | bool want_sdr; | ||
650 | int (*core_init)(struct device_node *np); | 657 | int (*core_init)(struct device_node *np); |
651 | int (*port_init_hw)(struct ppc4xx_pciex_port *port); | 658 | int (*port_init_hw)(struct ppc4xx_pciex_port *port); |
652 | int (*setup_utl)(struct ppc4xx_pciex_port *port); | 659 | int (*setup_utl)(struct ppc4xx_pciex_port *port); |
@@ -916,6 +923,7 @@ static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port) | |||
916 | 923 | ||
917 | static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = | 924 | static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = |
918 | { | 925 | { |
926 | .want_sdr = true, | ||
919 | .core_init = ppc440spe_pciex_core_init, | 927 | .core_init = ppc440spe_pciex_core_init, |
920 | .port_init_hw = ppc440speA_pciex_init_port_hw, | 928 | .port_init_hw = ppc440speA_pciex_init_port_hw, |
921 | .setup_utl = ppc440speA_pciex_init_utl, | 929 | .setup_utl = ppc440speA_pciex_init_utl, |
@@ -924,6 +932,7 @@ static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = | |||
924 | 932 | ||
925 | static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = | 933 | static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = |
926 | { | 934 | { |
935 | .want_sdr = true, | ||
927 | .core_init = ppc440spe_pciex_core_init, | 936 | .core_init = ppc440spe_pciex_core_init, |
928 | .port_init_hw = ppc440speB_pciex_init_port_hw, | 937 | .port_init_hw = ppc440speB_pciex_init_port_hw, |
929 | .setup_utl = ppc440speB_pciex_init_utl, | 938 | .setup_utl = ppc440speB_pciex_init_utl, |
@@ -1034,6 +1043,7 @@ static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port) | |||
1034 | 1043 | ||
1035 | static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata = | 1044 | static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata = |
1036 | { | 1045 | { |
1046 | .want_sdr = true, | ||
1037 | .core_init = ppc460ex_pciex_core_init, | 1047 | .core_init = ppc460ex_pciex_core_init, |
1038 | .port_init_hw = ppc460ex_pciex_init_port_hw, | 1048 | .port_init_hw = ppc460ex_pciex_init_port_hw, |
1039 | .setup_utl = ppc460ex_pciex_init_utl, | 1049 | .setup_utl = ppc460ex_pciex_init_utl, |
@@ -1181,6 +1191,7 @@ done: | |||
1181 | } | 1191 | } |
1182 | 1192 | ||
1183 | static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { | 1193 | static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { |
1194 | .want_sdr = true, | ||
1184 | .core_init = ppc460sx_pciex_core_init, | 1195 | .core_init = ppc460sx_pciex_core_init, |
1185 | .port_init_hw = ppc460sx_pciex_init_port_hw, | 1196 | .port_init_hw = ppc460sx_pciex_init_port_hw, |
1186 | .setup_utl = ppc460sx_pciex_init_utl, | 1197 | .setup_utl = ppc460sx_pciex_init_utl, |
@@ -1276,6 +1287,7 @@ static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) | |||
1276 | 1287 | ||
1277 | static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = | 1288 | static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = |
1278 | { | 1289 | { |
1290 | .want_sdr = true, | ||
1279 | .core_init = ppc405ex_pciex_core_init, | 1291 | .core_init = ppc405ex_pciex_core_init, |
1280 | .port_init_hw = ppc405ex_pciex_init_port_hw, | 1292 | .port_init_hw = ppc405ex_pciex_init_port_hw, |
1281 | .setup_utl = ppc405ex_pciex_init_utl, | 1293 | .setup_utl = ppc405ex_pciex_init_utl, |
@@ -1284,6 +1296,52 @@ static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = | |||
1284 | 1296 | ||
1285 | #endif /* CONFIG_40x */ | 1297 | #endif /* CONFIG_40x */ |
1286 | 1298 | ||
1299 | #ifdef CONFIG_476FPE | ||
1300 | static int __init ppc_476fpe_pciex_core_init(struct device_node *np) | ||
1301 | { | ||
1302 | return 4; | ||
1303 | } | ||
1304 | |||
1305 | static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port) | ||
1306 | { | ||
1307 | u32 timeout_ms = 20; | ||
1308 | u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT); | ||
1309 | void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000, | ||
1310 | 0x1000); | ||
1311 | |||
1312 | printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); | ||
1313 | |||
1314 | if (mbase == NULL) { | ||
1315 | printk(KERN_WARNING "PCIE%d: failed to get cfg space\n", | ||
1316 | port->index); | ||
1317 | return; | ||
1318 | } | ||
1319 | |||
1320 | while (timeout_ms--) { | ||
1321 | val = in_le32(mbase + PECFG_TLDLP); | ||
1322 | |||
1323 | if ((val & mask) == mask) | ||
1324 | break; | ||
1325 | msleep(10); | ||
1326 | } | ||
1327 | |||
1328 | if (val & PECFG_TLDLP_PRESENT) { | ||
1329 | printk(KERN_INFO "PCIE%d: link is up !\n", port->index); | ||
1330 | port->link = 1; | ||
1331 | } else | ||
1332 | printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index); | ||
1333 | |||
1334 | iounmap(mbase); | ||
1335 | return; | ||
1336 | } | ||
1337 | |||
1338 | static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata = | ||
1339 | { | ||
1340 | .core_init = ppc_476fpe_pciex_core_init, | ||
1341 | .check_link = ppc_476fpe_pciex_check_link, | ||
1342 | }; | ||
1343 | #endif /* CONFIG_476FPE */ | ||
1344 | |||
1287 | /* Check that the core has been initied and if not, do it */ | 1345 | /* Check that the core has been initied and if not, do it */ |
1288 | static int __init ppc4xx_pciex_check_core_init(struct device_node *np) | 1346 | static int __init ppc4xx_pciex_check_core_init(struct device_node *np) |
1289 | { | 1347 | { |
@@ -1309,6 +1367,10 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) | |||
1309 | if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) | 1367 | if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) |
1310 | ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; | 1368 | ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; |
1311 | #endif | 1369 | #endif |
1370 | #ifdef CONFIG_476FPE | ||
1371 | if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")) | ||
1372 | ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops; | ||
1373 | #endif | ||
1312 | if (ppc4xx_pciex_hwops == NULL) { | 1374 | if (ppc4xx_pciex_hwops == NULL) { |
1313 | printk(KERN_WARNING "PCIE: unknown host type %s\n", | 1375 | printk(KERN_WARNING "PCIE: unknown host type %s\n", |
1314 | np->full_name); | 1376 | np->full_name); |
@@ -1617,6 +1679,10 @@ static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, | |||
1617 | dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, | 1679 | dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, |
1618 | sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT | 1680 | sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT |
1619 | | DCRO_PEGPL_OMRxMSKL_VAL); | 1681 | | DCRO_PEGPL_OMRxMSKL_VAL); |
1682 | else if (of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe")) | ||
1683 | dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, | ||
1684 | sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT | ||
1685 | | DCRO_PEGPL_OMRxMSKL_VAL); | ||
1620 | else | 1686 | else |
1621 | dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, | 1687 | dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, |
1622 | sa | DCRO_PEGPL_OMR1MSKL_UOT | 1688 | sa | DCRO_PEGPL_OMR1MSKL_UOT |
@@ -1739,9 +1805,10 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, | |||
1739 | /* Calculate window size */ | 1805 | /* Calculate window size */ |
1740 | sa = (0xffffffffffffffffull << ilog2(size)); | 1806 | sa = (0xffffffffffffffffull << ilog2(size)); |
1741 | if (res->flags & IORESOURCE_PREFETCH) | 1807 | if (res->flags & IORESOURCE_PREFETCH) |
1742 | sa |= 0x8; | 1808 | sa |= PCI_BASE_ADDRESS_MEM_PREFETCH; |
1743 | 1809 | ||
1744 | if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx")) | 1810 | if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") || |
1811 | of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe")) | ||
1745 | sa |= PCI_BASE_ADDRESS_MEM_TYPE_64; | 1812 | sa |= PCI_BASE_ADDRESS_MEM_TYPE_64; |
1746 | 1813 | ||
1747 | out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); | 1814 | out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); |
@@ -1972,13 +2039,15 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) | |||
1972 | } | 2039 | } |
1973 | 2040 | ||
1974 | port->node = of_node_get(np); | 2041 | port->node = of_node_get(np); |
1975 | pval = of_get_property(np, "sdr-base", NULL); | 2042 | if (ppc4xx_pciex_hwops->want_sdr) { |
1976 | if (pval == NULL) { | 2043 | pval = of_get_property(np, "sdr-base", NULL); |
1977 | printk(KERN_ERR "PCIE: missing sdr-base for %s\n", | 2044 | if (pval == NULL) { |
1978 | np->full_name); | 2045 | printk(KERN_ERR "PCIE: missing sdr-base for %s\n", |
1979 | return; | 2046 | np->full_name); |
2047 | return; | ||
2048 | } | ||
2049 | port->sdr_base = *pval; | ||
1980 | } | 2050 | } |
1981 | port->sdr_base = *pval; | ||
1982 | 2051 | ||
1983 | /* Check if device_type property is set to "pci" or "pci-endpoint". | 2052 | /* Check if device_type property is set to "pci" or "pci-endpoint". |
1984 | * Resulting from this setup this PCIe port will be configured | 2053 | * Resulting from this setup this PCIe port will be configured |
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h index 32ce763a375a..bb4821938ab1 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.h +++ b/arch/powerpc/sysdev/ppc4xx_pci.h | |||
@@ -476,6 +476,13 @@ | |||
476 | #define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002 | 476 | #define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002 |
477 | #define DCRO_PEGPL_OMR3MSKL_IO 0x00000002 | 477 | #define DCRO_PEGPL_OMR3MSKL_IO 0x00000002 |
478 | 478 | ||
479 | /* 476FPE */ | ||
480 | #define PCCFG_LCPA 0x270 | ||
481 | #define PECFG_TLDLP 0x3F8 | ||
482 | #define PECFG_TLDLP_LNKUP 0x00000008 | ||
483 | #define PECFG_TLDLP_PRESENT 0x00000010 | ||
484 | #define DCRO_PEGPL_476FPE_OMR1MSKL_UOT 0x00000004 | ||
485 | |||
479 | /* SDR Bit Mappings */ | 486 | /* SDR Bit Mappings */ |
480 | #define PESDRx_RCSSET_HLDPLB 0x10000000 | 487 | #define PESDRx_RCSSET_HLDPLB 0x10000000 |
481 | #define PESDRx_RCSSET_RSTGU 0x01000000 | 488 | #define PESDRx_RCSSET_RSTGU 0x01000000 |
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c index e23f23cf9f5c..521e67a49dc4 100644 --- a/arch/powerpc/sysdev/qe_lib/gpio.c +++ b/arch/powerpc/sysdev/qe_lib/gpio.c | |||
@@ -139,14 +139,10 @@ struct qe_pin { | |||
139 | struct qe_pin *qe_pin_request(struct device_node *np, int index) | 139 | struct qe_pin *qe_pin_request(struct device_node *np, int index) |
140 | { | 140 | { |
141 | struct qe_pin *qe_pin; | 141 | struct qe_pin *qe_pin; |
142 | struct device_node *gpio_np; | ||
143 | struct gpio_chip *gc; | 142 | struct gpio_chip *gc; |
144 | struct of_mm_gpio_chip *mm_gc; | 143 | struct of_mm_gpio_chip *mm_gc; |
145 | struct qe_gpio_chip *qe_gc; | 144 | struct qe_gpio_chip *qe_gc; |
146 | int err; | 145 | int err; |
147 | int size; | ||
148 | const void *gpio_spec; | ||
149 | const u32 *gpio_cells; | ||
150 | unsigned long flags; | 146 | unsigned long flags; |
151 | 147 | ||
152 | qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); | 148 | qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); |
@@ -155,45 +151,25 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) | |||
155 | return ERR_PTR(-ENOMEM); | 151 | return ERR_PTR(-ENOMEM); |
156 | } | 152 | } |
157 | 153 | ||
158 | err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, | 154 | err = of_get_gpio(np, index); |
159 | &gpio_np, &gpio_spec); | 155 | if (err < 0) |
160 | if (err) { | 156 | goto err0; |
161 | pr_debug("%s: can't parse gpios property\n", __func__); | 157 | gc = gpio_to_chip(err); |
158 | if (WARN_ON(!gc)) | ||
162 | goto err0; | 159 | goto err0; |
163 | } | ||
164 | 160 | ||
165 | if (!of_device_is_compatible(gpio_np, "fsl,mpc8323-qe-pario-bank")) { | 161 | if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) { |
166 | pr_debug("%s: tried to get a non-qe pin\n", __func__); | 162 | pr_debug("%s: tried to get a non-qe pin\n", __func__); |
167 | err = -EINVAL; | 163 | err = -EINVAL; |
168 | goto err1; | 164 | goto err0; |
169 | } | ||
170 | |||
171 | gc = of_node_to_gpiochip(gpio_np); | ||
172 | if (!gc) { | ||
173 | pr_debug("%s: gpio controller %s isn't registered\n", | ||
174 | np->full_name, gpio_np->full_name); | ||
175 | err = -ENODEV; | ||
176 | goto err1; | ||
177 | } | ||
178 | |||
179 | gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size); | ||
180 | if (!gpio_cells || size != sizeof(*gpio_cells) || | ||
181 | *gpio_cells != gc->of_gpio_n_cells) { | ||
182 | pr_debug("%s: wrong #gpio-cells for %s\n", | ||
183 | np->full_name, gpio_np->full_name); | ||
184 | err = -EINVAL; | ||
185 | goto err1; | ||
186 | } | 165 | } |
187 | 166 | ||
188 | err = gc->of_xlate(gc, np, gpio_spec, NULL); | ||
189 | if (err < 0) | ||
190 | goto err1; | ||
191 | |||
192 | mm_gc = to_of_mm_gpio_chip(gc); | 167 | mm_gc = to_of_mm_gpio_chip(gc); |
193 | qe_gc = to_qe_gpio_chip(mm_gc); | 168 | qe_gc = to_qe_gpio_chip(mm_gc); |
194 | 169 | ||
195 | spin_lock_irqsave(&qe_gc->lock, flags); | 170 | spin_lock_irqsave(&qe_gc->lock, flags); |
196 | 171 | ||
172 | err -= gc->base; | ||
197 | if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { | 173 | if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { |
198 | qe_pin->controller = qe_gc; | 174 | qe_pin->controller = qe_gc; |
199 | qe_pin->num = err; | 175 | qe_pin->num = err; |
@@ -206,8 +182,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) | |||
206 | 182 | ||
207 | if (!err) | 183 | if (!err) |
208 | return qe_pin; | 184 | return qe_pin; |
209 | err1: | ||
210 | of_node_put(gpio_np); | ||
211 | err0: | 185 | err0: |
212 | kfree(qe_pin); | 186 | kfree(qe_pin); |
213 | pr_debug("%s failed with status %d\n", __func__, err); | 187 | pr_debug("%s failed with status %d\n", __func__, err); |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 18e75ca19fe6..73034bd203c4 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/stddef.h> | 22 | #include <linux/stddef.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/signal.h> | 24 | #include <linux/signal.h> |
25 | #include <linux/sysdev.h> | ||
26 | #include <linux/device.h> | 25 | #include <linux/device.h> |
27 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> |
28 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
@@ -484,13 +483,14 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) | |||
484 | return 0; | 483 | return 0; |
485 | } | 484 | } |
486 | 485 | ||
487 | static struct sysdev_class qe_ic_sysclass = { | 486 | static struct bus_type qe_ic_subsys = { |
488 | .name = "qe_ic", | 487 | .name = "qe_ic", |
488 | .dev_name = "qe_ic", | ||
489 | }; | 489 | }; |
490 | 490 | ||
491 | static struct sys_device device_qe_ic = { | 491 | static struct device device_qe_ic = { |
492 | .id = 0, | 492 | .id = 0, |
493 | .cls = &qe_ic_sysclass, | 493 | .bus = &qe_ic_subsys, |
494 | }; | 494 | }; |
495 | 495 | ||
496 | static int __init init_qe_ic_sysfs(void) | 496 | static int __init init_qe_ic_sysfs(void) |
@@ -499,12 +499,12 @@ static int __init init_qe_ic_sysfs(void) | |||
499 | 499 | ||
500 | printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); | 500 | printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); |
501 | 501 | ||
502 | rc = sysdev_class_register(&qe_ic_sysclass); | 502 | rc = subsys_system_register(&qe_ic_subsys, NULL); |
503 | if (rc) { | 503 | if (rc) { |
504 | printk(KERN_ERR "Failed registering qe_ic sys class\n"); | 504 | printk(KERN_ERR "Failed registering qe_ic sys class\n"); |
505 | return -ENODEV; | 505 | return -ENODEV; |
506 | } | 506 | } |
507 | rc = sysdev_register(&device_qe_ic); | 507 | rc = device_register(&device_qe_ic); |
508 | if (rc) { | 508 | if (rc) { |
509 | printk(KERN_ERR "Failed registering qe_ic sys device\n"); | 509 | printk(KERN_ERR "Failed registering qe_ic sys device\n"); |
510 | return -ENODEV; | 510 | return -ENODEV; |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 3330feca7502..063c901b1265 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
21 | #include <linux/sysdev.h> | ||
22 | #include <linux/device.h> | 21 | #include <linux/device.h> |
23 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index 9518d367a64f..253dce98c16e 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c | |||
@@ -27,33 +27,50 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr) | |||
27 | { | 27 | { |
28 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 28 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
29 | long rc; | 29 | long rc; |
30 | unsigned int ret = XICS_IRQ_SPURIOUS; | ||
30 | 31 | ||
31 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | 32 | rc = plpar_hcall(H_XIRR, retbuf, cppr); |
32 | if (rc != H_SUCCESS) | 33 | if (rc == H_SUCCESS) { |
33 | panic(" bad return code xirr - rc = %lx\n", rc); | 34 | ret = (unsigned int)retbuf[0]; |
34 | return (unsigned int)retbuf[0]; | 35 | } else { |
35 | } | 36 | pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n", |
37 | __func__, cppr, rc); | ||
38 | WARN_ON_ONCE(1); | ||
39 | } | ||
36 | 40 | ||
37 | static inline void icp_hv_set_xirr(unsigned int value) | 41 | return ret; |
38 | { | ||
39 | long rc = plpar_hcall_norets(H_EOI, value); | ||
40 | if (rc != H_SUCCESS) | ||
41 | panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); | ||
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void icp_hv_set_cppr(u8 value) | 44 | static inline void icp_hv_set_cppr(u8 value) |
45 | { | 45 | { |
46 | long rc = plpar_hcall_norets(H_CPPR, value); | 46 | long rc = plpar_hcall_norets(H_CPPR, value); |
47 | if (rc != H_SUCCESS) | 47 | if (rc != H_SUCCESS) { |
48 | panic("bad return code cppr - rc = %lx\n", rc); | 48 | pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n", |
49 | __func__, value, rc); | ||
50 | WARN_ON_ONCE(1); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static inline void icp_hv_set_xirr(unsigned int value) | ||
55 | { | ||
56 | long rc = plpar_hcall_norets(H_EOI, value); | ||
57 | if (rc != H_SUCCESS) { | ||
58 | pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n", | ||
59 | __func__, value, rc); | ||
60 | WARN_ON_ONCE(1); | ||
61 | icp_hv_set_cppr(value >> 24); | ||
62 | } | ||
49 | } | 63 | } |
50 | 64 | ||
51 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | 65 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) |
52 | { | 66 | { |
53 | long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), | 67 | int hw_cpu = get_hard_smp_processor_id(n_cpu); |
54 | value); | 68 | long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); |
55 | if (rc != H_SUCCESS) | 69 | if (rc != H_SUCCESS) { |
56 | panic("bad return code qirr - rc = %lx\n", rc); | 70 | pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " |
71 | "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); | ||
72 | WARN_ON_ONCE(1); | ||
73 | } | ||
57 | } | 74 | } |
58 | 75 | ||
59 | static void icp_hv_eoi(struct irq_data *d) | 76 | static void icp_hv_eoi(struct irq_data *d) |
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 63762c672a03..d72eda6a4c05 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -137,7 +137,7 @@ static void xics_request_ipi(void) | |||
137 | * IPIs are marked IRQF_PERCPU. The handler was set in map. | 137 | * IPIs are marked IRQF_PERCPU. The handler was set in map. |
138 | */ | 138 | */ |
139 | BUG_ON(request_irq(ipi, icp_ops->ipi_action, | 139 | BUG_ON(request_irq(ipi, icp_ops->ipi_action, |
140 | IRQF_PERCPU, "IPI", NULL)); | 140 | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); |
141 | } | 141 | } |
142 | 142 | ||
143 | int __init xics_smp_probe(void) | 143 | int __init xics_smp_probe(void) |