diff options
author | Manuel Lauss <manuel.lauss@googlemail.com> | 2011-08-12 05:39:44 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2011-10-24 18:34:24 -0400 |
commit | 7517de348663b08a808aff44b5300e817157a568 (patch) | |
tree | b70b0bdbb30f2ef796c4ca319ad922e7686ca51c /arch/mips/pci/pci-alchemy.c | |
parent | 7cc2e272da3d88c0de9e05b32729402785bd9206 (diff) |
MIPS: Alchemy: Redo PCI as platform driver
- Rewrite Alchemy PCI support as a platform driver.
- Fixup boards which have PCI.
Run-tested on DB1500 and DB1550.
Signed-off-by: Manuel Lauss <manuel.lauss@googlemail.com>
To: Linux-MIPS <linux-mips@linux-mips.org>
Patchwork: https://patchwork.linux-mips.org/patch/2706/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
delete mode 100644 arch/mips/alchemy/common/pci.c
delete mode 100644 arch/mips/pci/fixup-au1000.c
delete mode 100644 arch/mips/pci/ops-au1000.c
create mode 100644 arch/mips/pci/pci-alchemy.c
Diffstat (limited to 'arch/mips/pci/pci-alchemy.c')
-rw-r--r-- | arch/mips/pci/pci-alchemy.c | 516 |
1 files changed, 516 insertions, 0 deletions
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c new file mode 100644 index 000000000000..4ee57104e47b --- /dev/null +++ b/arch/mips/pci/pci-alchemy.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Alchemy PCI host mode support. | ||
3 | * | ||
4 | * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. | ||
5 | * Author: MontaVista Software, Inc. <source@mvista.com> | ||
6 | * | ||
7 | * Support for all devices (greater than 16) added by David Gathright. | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | |||
17 | #include <asm/mach-au1x00/au1000.h> | ||
18 | |||
19 | #ifdef CONFIG_DEBUG_PCI | ||
20 | #define DBG(x...) printk(KERN_DEBUG x) | ||
21 | #else | ||
22 | #define DBG(x...) do {} while (0) | ||
23 | #endif | ||
24 | |||
25 | #define PCI_ACCESS_READ 0 | ||
26 | #define PCI_ACCESS_WRITE 1 | ||
27 | |||
28 | struct alchemy_pci_context { | ||
29 | struct pci_controller alchemy_pci_ctrl; /* leave as first member! */ | ||
30 | void __iomem *regs; /* ctrl base */ | ||
31 | /* tools for wired entry for config space access */ | ||
32 | unsigned long last_elo0; | ||
33 | unsigned long last_elo1; | ||
34 | int wired_entry; | ||
35 | struct vm_struct *pci_cfg_vm; | ||
36 | |||
37 | unsigned long pm[12]; | ||
38 | |||
39 | int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin); | ||
40 | int (*board_pci_idsel)(unsigned int devsel, int assert); | ||
41 | }; | ||
42 | |||
43 | /* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr | ||
44 | * in arch/mips/alchemy/common/setup.c | ||
45 | */ | ||
46 | static struct resource alchemy_pci_def_memres = { | ||
47 | .start = ALCHEMY_PCI_MEMWIN_START, | ||
48 | .end = ALCHEMY_PCI_MEMWIN_END, | ||
49 | .name = "PCI memory space", | ||
50 | .flags = IORESOURCE_MEM | ||
51 | }; | ||
52 | |||
53 | static struct resource alchemy_pci_def_iores = { | ||
54 | .start = ALCHEMY_PCI_IOWIN_START, | ||
55 | .end = ALCHEMY_PCI_IOWIN_END, | ||
56 | .name = "PCI IO space", | ||
57 | .flags = IORESOURCE_IO | ||
58 | }; | ||
59 | |||
60 | static void mod_wired_entry(int entry, unsigned long entrylo0, | ||
61 | unsigned long entrylo1, unsigned long entryhi, | ||
62 | unsigned long pagemask) | ||
63 | { | ||
64 | unsigned long old_pagemask; | ||
65 | unsigned long old_ctx; | ||
66 | |||
67 | /* Save old context and create impossible VPN2 value */ | ||
68 | old_ctx = read_c0_entryhi() & 0xff; | ||
69 | old_pagemask = read_c0_pagemask(); | ||
70 | write_c0_index(entry); | ||
71 | write_c0_pagemask(pagemask); | ||
72 | write_c0_entryhi(entryhi); | ||
73 | write_c0_entrylo0(entrylo0); | ||
74 | write_c0_entrylo1(entrylo1); | ||
75 | tlb_write_indexed(); | ||
76 | write_c0_entryhi(old_ctx); | ||
77 | write_c0_pagemask(old_pagemask); | ||
78 | } | ||
79 | |||
80 | static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx) | ||
81 | { | ||
82 | ctx->wired_entry = read_c0_wired(); | ||
83 | add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); | ||
84 | ctx->last_elo0 = ctx->last_elo1 = ~0; | ||
85 | } | ||
86 | |||
87 | static int config_access(unsigned char access_type, struct pci_bus *bus, | ||
88 | unsigned int dev_fn, unsigned char where, u32 *data) | ||
89 | { | ||
90 | struct alchemy_pci_context *ctx = bus->sysdata; | ||
91 | unsigned int device = PCI_SLOT(dev_fn); | ||
92 | unsigned int function = PCI_FUNC(dev_fn); | ||
93 | unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r; | ||
94 | int error = PCIBIOS_SUCCESSFUL; | ||
95 | |||
96 | if (device > 19) { | ||
97 | *data = 0xffffffff; | ||
98 | return -1; | ||
99 | } | ||
100 | |||
101 | /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired | ||
102 | * on resume, clearing our wired entry. Unfortunately the ->resume() | ||
103 | * callback is called way way way too late (and ->suspend() too early) | ||
104 | * to have them destroy and recreate it. Instead just test if c0_wired | ||
105 | * is now lower than the index we retrieved before suspending and then | ||
106 | * recreate the entry if necessary. Of course this is totally bonkers | ||
107 | * and breaks as soon as someone else adds another wired entry somewhere | ||
108 | * else. Anyone have any ideas how to handle this better? | ||
109 | */ | ||
110 | if (unlikely(read_c0_wired() < ctx->wired_entry)) | ||
111 | alchemy_pci_wired_entry(ctx); | ||
112 | |||
113 | local_irq_save(flags); | ||
114 | r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; | ||
115 | r |= PCI_STATCMD_STATUS(0x2000); | ||
116 | __raw_writel(r, ctx->regs + PCI_REG_STATCMD); | ||
117 | wmb(); | ||
118 | |||
119 | /* Allow board vendors to implement their own off-chip IDSEL. | ||
120 | * If it doesn't succeed, may as well bail out at this point. | ||
121 | */ | ||
122 | if (ctx->board_pci_idsel(device, 1) == 0) { | ||
123 | *data = 0xffffffff; | ||
124 | local_irq_restore(flags); | ||
125 | return -1; | ||
126 | } | ||
127 | |||
128 | /* Setup the config window */ | ||
129 | if (bus->number == 0) | ||
130 | cfg_base = (1 << device) << 11; | ||
131 | else | ||
132 | cfg_base = 0x80000000 | (bus->number << 16) | (device << 11); | ||
133 | |||
134 | /* Setup the lower bits of the 36-bit address */ | ||
135 | offset = (function << 8) | (where & ~0x3); | ||
136 | /* Pick up any address that falls below the page mask */ | ||
137 | offset |= cfg_base & ~PAGE_MASK; | ||
138 | |||
139 | /* Page boundary */ | ||
140 | cfg_base = cfg_base & PAGE_MASK; | ||
141 | |||
142 | /* To improve performance, if the current device is the same as | ||
143 | * the last device accessed, we don't touch the TLB. | ||
144 | */ | ||
145 | entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7; | ||
146 | entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7; | ||
147 | if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) { | ||
148 | mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1, | ||
149 | (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); | ||
150 | ctx->last_elo0 = entryLo0; | ||
151 | ctx->last_elo1 = entryLo1; | ||
152 | } | ||
153 | |||
154 | if (access_type == PCI_ACCESS_WRITE) | ||
155 | __raw_writel(*data, ctx->pci_cfg_vm->addr + offset); | ||
156 | else | ||
157 | *data = __raw_readl(ctx->pci_cfg_vm->addr + offset); | ||
158 | wmb(); | ||
159 | |||
160 | DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n", | ||
161 | access_type, bus->number, device, where, *data, offset); | ||
162 | |||
163 | /* check for errors, master abort */ | ||
164 | status = __raw_readl(ctx->regs + PCI_REG_STATCMD); | ||
165 | if (status & (1 << 29)) { | ||
166 | *data = 0xffffffff; | ||
167 | error = -1; | ||
168 | DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", | ||
169 | access_type, bus->number, device); | ||
170 | } else if ((status >> 28) & 0xf) { | ||
171 | DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", | ||
172 | device, (status >> 28) & 0xf); | ||
173 | |||
174 | /* clear errors */ | ||
175 | __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD); | ||
176 | |||
177 | *data = 0xffffffff; | ||
178 | error = -1; | ||
179 | } | ||
180 | |||
181 | /* Take away the IDSEL. */ | ||
182 | (void)ctx->board_pci_idsel(device, 0); | ||
183 | |||
184 | local_irq_restore(flags); | ||
185 | return error; | ||
186 | } | ||
187 | |||
188 | static int read_config_byte(struct pci_bus *bus, unsigned int devfn, | ||
189 | int where, u8 *val) | ||
190 | { | ||
191 | u32 data; | ||
192 | int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); | ||
193 | |||
194 | if (where & 1) | ||
195 | data >>= 8; | ||
196 | if (where & 2) | ||
197 | data >>= 16; | ||
198 | *val = data & 0xff; | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static int read_config_word(struct pci_bus *bus, unsigned int devfn, | ||
203 | int where, u16 *val) | ||
204 | { | ||
205 | u32 data; | ||
206 | int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); | ||
207 | |||
208 | if (where & 2) | ||
209 | data >>= 16; | ||
210 | *val = data & 0xffff; | ||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | static int read_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
215 | int where, u32 *val) | ||
216 | { | ||
217 | return config_access(PCI_ACCESS_READ, bus, devfn, where, val); | ||
218 | } | ||
219 | |||
220 | static int write_config_byte(struct pci_bus *bus, unsigned int devfn, | ||
221 | int where, u8 val) | ||
222 | { | ||
223 | u32 data = 0; | ||
224 | |||
225 | if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) | ||
226 | return -1; | ||
227 | |||
228 | data = (data & ~(0xff << ((where & 3) << 3))) | | ||
229 | (val << ((where & 3) << 3)); | ||
230 | |||
231 | if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) | ||
232 | return -1; | ||
233 | |||
234 | return PCIBIOS_SUCCESSFUL; | ||
235 | } | ||
236 | |||
237 | static int write_config_word(struct pci_bus *bus, unsigned int devfn, | ||
238 | int where, u16 val) | ||
239 | { | ||
240 | u32 data = 0; | ||
241 | |||
242 | if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) | ||
243 | return -1; | ||
244 | |||
245 | data = (data & ~(0xffff << ((where & 3) << 3))) | | ||
246 | (val << ((where & 3) << 3)); | ||
247 | |||
248 | if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) | ||
249 | return -1; | ||
250 | |||
251 | return PCIBIOS_SUCCESSFUL; | ||
252 | } | ||
253 | |||
254 | static int write_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
255 | int where, u32 val) | ||
256 | { | ||
257 | return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val); | ||
258 | } | ||
259 | |||
260 | static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn, | ||
261 | int where, int size, u32 *val) | ||
262 | { | ||
263 | switch (size) { | ||
264 | case 1: { | ||
265 | u8 _val; | ||
266 | int rc = read_config_byte(bus, devfn, where, &_val); | ||
267 | |||
268 | *val = _val; | ||
269 | return rc; | ||
270 | } | ||
271 | case 2: { | ||
272 | u16 _val; | ||
273 | int rc = read_config_word(bus, devfn, where, &_val); | ||
274 | |||
275 | *val = _val; | ||
276 | return rc; | ||
277 | } | ||
278 | default: | ||
279 | return read_config_dword(bus, devfn, where, val); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn, | ||
284 | int where, int size, u32 val) | ||
285 | { | ||
286 | switch (size) { | ||
287 | case 1: | ||
288 | return write_config_byte(bus, devfn, where, (u8) val); | ||
289 | case 2: | ||
290 | return write_config_word(bus, devfn, where, (u16) val); | ||
291 | default: | ||
292 | return write_config_dword(bus, devfn, where, val); | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static struct pci_ops alchemy_pci_ops = { | ||
297 | .read = alchemy_pci_read, | ||
298 | .write = alchemy_pci_write, | ||
299 | }; | ||
300 | |||
301 | static int alchemy_pci_def_idsel(unsigned int devsel, int assert) | ||
302 | { | ||
303 | return 1; /* success */ | ||
304 | } | ||
305 | |||
306 | static int __devinit alchemy_pci_probe(struct platform_device *pdev) | ||
307 | { | ||
308 | struct alchemy_pci_platdata *pd = pdev->dev.platform_data; | ||
309 | struct alchemy_pci_context *ctx; | ||
310 | void __iomem *virt_io; | ||
311 | unsigned long val; | ||
312 | struct resource *r; | ||
313 | int ret; | ||
314 | |||
315 | /* need at least PCI IRQ mapping table */ | ||
316 | if (!pd) { | ||
317 | dev_err(&pdev->dev, "need platform data for PCI setup\n"); | ||
318 | ret = -ENODEV; | ||
319 | goto out; | ||
320 | } | ||
321 | |||
322 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
323 | if (!ctx) { | ||
324 | dev_err(&pdev->dev, "no memory for pcictl context\n"); | ||
325 | ret = -ENOMEM; | ||
326 | goto out; | ||
327 | } | ||
328 | |||
329 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
330 | if (!r) { | ||
331 | dev_err(&pdev->dev, "no pcictl ctrl regs resource\n"); | ||
332 | ret = -ENODEV; | ||
333 | goto out1; | ||
334 | } | ||
335 | |||
336 | if (!request_mem_region(r->start, resource_size(r), pdev->name)) { | ||
337 | dev_err(&pdev->dev, "cannot claim pci regs\n"); | ||
338 | ret = -ENODEV; | ||
339 | goto out1; | ||
340 | } | ||
341 | |||
342 | ctx->regs = ioremap_nocache(r->start, resource_size(r)); | ||
343 | if (!ctx->regs) { | ||
344 | dev_err(&pdev->dev, "cannot map pci regs\n"); | ||
345 | ret = -ENODEV; | ||
346 | goto out2; | ||
347 | } | ||
348 | |||
349 | /* map parts of the PCI IO area */ | ||
350 | /* REVISIT: if this changes with a newer variant (doubt it) make this | ||
351 | * a platform resource. | ||
352 | */ | ||
353 | virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000); | ||
354 | if (!virt_io) { | ||
355 | dev_err(&pdev->dev, "cannot remap pci io space\n"); | ||
356 | ret = -ENODEV; | ||
357 | goto out3; | ||
358 | } | ||
359 | ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; | ||
360 | |||
361 | #ifdef CONFIG_DMA_NONCOHERENT | ||
362 | /* Au1500 revisions older than AD have borked coherent PCI */ | ||
363 | if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) && | ||
364 | (read_c0_prid() < 0x01030202)) { | ||
365 | val = __raw_readl(ctx->regs + PCI_REG_CONFIG); | ||
366 | val |= PCI_CONFIG_NC; | ||
367 | __raw_writel(val, ctx->regs + PCI_REG_CONFIG); | ||
368 | wmb(); | ||
369 | dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n"); | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | if (pd->board_map_irq) | ||
374 | ctx->board_map_irq = pd->board_map_irq; | ||
375 | |||
376 | if (pd->board_pci_idsel) | ||
377 | ctx->board_pci_idsel = pd->board_pci_idsel; | ||
378 | else | ||
379 | ctx->board_pci_idsel = alchemy_pci_def_idsel; | ||
380 | |||
381 | /* fill in relevant pci_controller members */ | ||
382 | ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops; | ||
383 | ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres; | ||
384 | ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores; | ||
385 | |||
386 | /* we can't ioremap the entire pci config space because it's too large, | ||
387 | * nor can we dynamically ioremap it because some drivers use the | ||
388 | * PCI config routines from within atomic contex and that becomes a | ||
389 | * problem in get_vm_area(). Instead we use one wired TLB entry to | ||
390 | * handle all config accesses for all busses. | ||
391 | */ | ||
392 | ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); | ||
393 | if (!ctx->pci_cfg_vm) { | ||
394 | dev_err(&pdev->dev, "unable to get vm area\n"); | ||
395 | ret = -ENOMEM; | ||
396 | goto out4; | ||
397 | } | ||
398 | ctx->wired_entry = 8192; /* impossibly high value */ | ||
399 | |||
400 | set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); | ||
401 | |||
402 | /* board may want to modify bits in the config register, do it now */ | ||
403 | val = __raw_readl(ctx->regs + PCI_REG_CONFIG); | ||
404 | val &= ~pd->pci_cfg_clr; | ||
405 | val |= pd->pci_cfg_set; | ||
406 | val &= ~PCI_CONFIG_PD; /* clear disable bit */ | ||
407 | __raw_writel(val, ctx->regs + PCI_REG_CONFIG); | ||
408 | wmb(); | ||
409 | |||
410 | platform_set_drvdata(pdev, ctx); | ||
411 | register_pci_controller(&ctx->alchemy_pci_ctrl); | ||
412 | |||
413 | return 0; | ||
414 | |||
415 | out4: | ||
416 | iounmap(virt_io); | ||
417 | out3: | ||
418 | iounmap(ctx->regs); | ||
419 | out2: | ||
420 | release_mem_region(r->start, resource_size(r)); | ||
421 | out1: | ||
422 | kfree(ctx); | ||
423 | out: | ||
424 | return ret; | ||
425 | } | ||
426 | |||
427 | |||
428 | #ifdef CONFIG_PM | ||
429 | /* save PCI controller register contents. */ | ||
430 | static int alchemy_pci_suspend(struct device *dev) | ||
431 | { | ||
432 | struct alchemy_pci_context *ctx = dev_get_drvdata(dev); | ||
433 | |||
434 | ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM); | ||
435 | ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff; | ||
436 | ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH); | ||
437 | ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID); | ||
438 | ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID); | ||
439 | ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV); | ||
440 | ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL); | ||
441 | ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID); | ||
442 | ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV); | ||
443 | ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM); | ||
444 | ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR); | ||
445 | ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT); | ||
446 | |||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static int alchemy_pci_resume(struct device *dev) | ||
451 | { | ||
452 | struct alchemy_pci_context *ctx = dev_get_drvdata(dev); | ||
453 | |||
454 | __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM); | ||
455 | __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH); | ||
456 | __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID); | ||
457 | __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID); | ||
458 | __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV); | ||
459 | __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL); | ||
460 | __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID); | ||
461 | __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV); | ||
462 | __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM); | ||
463 | __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR); | ||
464 | __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT); | ||
465 | wmb(); | ||
466 | __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG); | ||
467 | wmb(); | ||
468 | |||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static const struct dev_pm_ops alchemy_pci_pmops = { | ||
473 | .suspend = alchemy_pci_suspend, | ||
474 | .resume = alchemy_pci_resume, | ||
475 | }; | ||
476 | |||
477 | #define ALCHEMY_PCICTL_PM (&alchemy_pci_pmops) | ||
478 | |||
479 | #else | ||
480 | #define ALCHEMY_PCICTL_PM NULL | ||
481 | #endif | ||
482 | |||
483 | static struct platform_driver alchemy_pcictl_driver = { | ||
484 | .probe = alchemy_pci_probe, | ||
485 | .driver = { | ||
486 | .name = "alchemy-pci", | ||
487 | .owner = THIS_MODULE, | ||
488 | .pm = ALCHEMY_PCICTL_PM, | ||
489 | }, | ||
490 | }; | ||
491 | |||
492 | static int __init alchemy_pci_init(void) | ||
493 | { | ||
494 | /* Au1500/Au1550 have PCI */ | ||
495 | switch (alchemy_get_cputype()) { | ||
496 | case ALCHEMY_CPU_AU1500: | ||
497 | case ALCHEMY_CPU_AU1550: | ||
498 | return platform_driver_register(&alchemy_pcictl_driver); | ||
499 | } | ||
500 | return 0; | ||
501 | } | ||
502 | arch_initcall(alchemy_pci_init); | ||
503 | |||
504 | |||
505 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
506 | { | ||
507 | struct alchemy_pci_context *ctx = dev->sysdata; | ||
508 | if (ctx && ctx->board_map_irq) | ||
509 | return ctx->board_map_irq(dev, slot, pin); | ||
510 | return -1; | ||
511 | } | ||
512 | |||
513 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
514 | { | ||
515 | return 0; | ||
516 | } | ||