diff options
Diffstat (limited to 'arch/s390/pci/pci.c')
-rw-r--r-- | arch/s390/pci/pci.c | 1103 |
1 files changed, 1103 insertions, 0 deletions
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c new file mode 100644 index 000000000000..7ed38e5e3028 --- /dev/null +++ b/arch/s390/pci/pci.c | |||
@@ -0,0 +1,1103 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2012 | ||
3 | * | ||
4 | * Author(s): | ||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | ||
6 | * | ||
7 | * The System z PCI code is a rewrite from a prototype by | ||
8 | * the following people (Kudoz!): | ||
9 | * Alexander Schmidt | ||
10 | * Christoph Raisch | ||
11 | * Hannes Hering | ||
12 | * Hoang-Nam Nguyen | ||
13 | * Jan-Bernd Themann | ||
14 | * Stefan Roscher | ||
15 | * Thomas Klein | ||
16 | */ | ||
17 | |||
18 | #define COMPONENT "zPCI" | ||
19 | #define pr_fmt(fmt) COMPONENT ": " fmt | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/export.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/kernel_stat.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/msi.h> | ||
31 | |||
32 | #include <asm/isc.h> | ||
33 | #include <asm/airq.h> | ||
34 | #include <asm/facility.h> | ||
35 | #include <asm/pci_insn.h> | ||
36 | #include <asm/pci_clp.h> | ||
37 | #include <asm/pci_dma.h> | ||
38 | |||
39 | #define DEBUG /* enable pr_debug */ | ||
40 | |||
41 | #define SIC_IRQ_MODE_ALL 0 | ||
42 | #define SIC_IRQ_MODE_SINGLE 1 | ||
43 | |||
44 | #define ZPCI_NR_DMA_SPACES 1 | ||
45 | #define ZPCI_MSI_VEC_BITS 6 | ||
46 | #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS | ||
47 | |||
48 | /* list of all detected zpci devices */ | ||
49 | LIST_HEAD(zpci_list); | ||
50 | EXPORT_SYMBOL_GPL(zpci_list); | ||
51 | DEFINE_MUTEX(zpci_list_lock); | ||
52 | EXPORT_SYMBOL_GPL(zpci_list_lock); | ||
53 | |||
54 | struct pci_hp_callback_ops hotplug_ops; | ||
55 | EXPORT_SYMBOL_GPL(hotplug_ops); | ||
56 | |||
57 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); | ||
58 | static DEFINE_SPINLOCK(zpci_domain_lock); | ||
59 | |||
60 | struct callback { | ||
61 | irq_handler_t handler; | ||
62 | void *data; | ||
63 | }; | ||
64 | |||
65 | struct zdev_irq_map { | ||
66 | unsigned long aibv; /* AI bit vector */ | ||
67 | int msi_vecs; /* consecutive MSI-vectors used */ | ||
68 | int __unused; | ||
69 | struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ | ||
70 | spinlock_t lock; /* protect callbacks against de-reg */ | ||
71 | }; | ||
72 | |||
73 | struct intr_bucket { | ||
74 | /* amap of adapters, one bit per dev, corresponds to one irq nr */ | ||
75 | unsigned long *alloc; | ||
76 | /* AI summary bit, global page for all devices */ | ||
77 | unsigned long *aisb; | ||
78 | /* pointer to aibv and callback data in zdev */ | ||
79 | struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; | ||
80 | /* protects the whole bucket struct */ | ||
81 | spinlock_t lock; | ||
82 | }; | ||
83 | |||
84 | static struct intr_bucket *bucket; | ||
85 | |||
86 | /* Adapter local summary indicator */ | ||
87 | static u8 *zpci_irq_si; | ||
88 | |||
89 | static atomic_t irq_retries = ATOMIC_INIT(0); | ||
90 | |||
91 | /* I/O Map */ | ||
92 | static DEFINE_SPINLOCK(zpci_iomap_lock); | ||
93 | static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | ||
94 | struct zpci_iomap_entry *zpci_iomap_start; | ||
95 | EXPORT_SYMBOL_GPL(zpci_iomap_start); | ||
96 | |||
97 | /* highest irq summary bit */ | ||
98 | static int __read_mostly aisb_max; | ||
99 | |||
100 | static struct kmem_cache *zdev_irq_cache; | ||
101 | |||
102 | static inline int irq_to_msi_nr(unsigned int irq) | ||
103 | { | ||
104 | return irq & ZPCI_MSI_MASK; | ||
105 | } | ||
106 | |||
107 | static inline int irq_to_dev_nr(unsigned int irq) | ||
108 | { | ||
109 | return irq >> ZPCI_MSI_VEC_BITS; | ||
110 | } | ||
111 | |||
112 | static inline struct zdev_irq_map *get_imap(unsigned int irq) | ||
113 | { | ||
114 | return bucket->imap[irq_to_dev_nr(irq)]; | ||
115 | } | ||
116 | |||
117 | struct zpci_dev *get_zdev(struct pci_dev *pdev) | ||
118 | { | ||
119 | return (struct zpci_dev *) pdev->sysdata; | ||
120 | } | ||
121 | |||
122 | struct zpci_dev *get_zdev_by_fid(u32 fid) | ||
123 | { | ||
124 | struct zpci_dev *tmp, *zdev = NULL; | ||
125 | |||
126 | mutex_lock(&zpci_list_lock); | ||
127 | list_for_each_entry(tmp, &zpci_list, entry) { | ||
128 | if (tmp->fid == fid) { | ||
129 | zdev = tmp; | ||
130 | break; | ||
131 | } | ||
132 | } | ||
133 | mutex_unlock(&zpci_list_lock); | ||
134 | return zdev; | ||
135 | } | ||
136 | |||
137 | bool zpci_fid_present(u32 fid) | ||
138 | { | ||
139 | return (get_zdev_by_fid(fid) != NULL) ? true : false; | ||
140 | } | ||
141 | |||
142 | static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) | ||
143 | { | ||
144 | return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; | ||
145 | } | ||
146 | |||
147 | int pci_domain_nr(struct pci_bus *bus) | ||
148 | { | ||
149 | return ((struct zpci_dev *) bus->sysdata)->domain; | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(pci_domain_nr); | ||
152 | |||
153 | int pci_proc_domain(struct pci_bus *bus) | ||
154 | { | ||
155 | return pci_domain_nr(bus); | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(pci_proc_domain); | ||
158 | |||
159 | /* Store PCI function information block */ | ||
160 | static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc) | ||
161 | { | ||
162 | struct zpci_fib *fib; | ||
163 | u8 status, cc; | ||
164 | |||
165 | fib = (void *) get_zeroed_page(GFP_KERNEL); | ||
166 | if (!fib) | ||
167 | return -ENOMEM; | ||
168 | |||
169 | do { | ||
170 | cc = __stpcifc(zdev->fh, 0, fib, &status); | ||
171 | if (cc == 2) { | ||
172 | msleep(ZPCI_INSN_BUSY_DELAY); | ||
173 | memset(fib, 0, PAGE_SIZE); | ||
174 | } | ||
175 | } while (cc == 2); | ||
176 | |||
177 | if (cc) | ||
178 | pr_err_once("%s: cc: %u status: %u\n", | ||
179 | __func__, cc, status); | ||
180 | |||
181 | /* Return PCI function controls */ | ||
182 | *fc = fib->fc; | ||
183 | |||
184 | free_page((unsigned long) fib); | ||
185 | return (cc) ? -EIO : 0; | ||
186 | } | ||
187 | |||
188 | /* Modify PCI: Register adapter interruptions */ | ||
189 | static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, | ||
190 | u64 aibv) | ||
191 | { | ||
192 | u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); | ||
193 | struct zpci_fib *fib; | ||
194 | int rc; | ||
195 | |||
196 | fib = (void *) get_zeroed_page(GFP_KERNEL); | ||
197 | if (!fib) | ||
198 | return -ENOMEM; | ||
199 | |||
200 | fib->isc = PCI_ISC; | ||
201 | fib->noi = zdev->irq_map->msi_vecs; | ||
202 | fib->sum = 1; /* enable summary notifications */ | ||
203 | fib->aibv = aibv; | ||
204 | fib->aibvo = 0; /* every function has its own page */ | ||
205 | fib->aisb = (u64) bucket->aisb + aisb / 8; | ||
206 | fib->aisbo = aisb & ZPCI_MSI_MASK; | ||
207 | |||
208 | rc = mpcifc_instr(req, fib); | ||
209 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); | ||
210 | |||
211 | free_page((unsigned long) fib); | ||
212 | return rc; | ||
213 | } | ||
214 | |||
215 | struct mod_pci_args { | ||
216 | u64 base; | ||
217 | u64 limit; | ||
218 | u64 iota; | ||
219 | }; | ||
220 | |||
221 | static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) | ||
222 | { | ||
223 | u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); | ||
224 | struct zpci_fib *fib; | ||
225 | int rc; | ||
226 | |||
227 | /* The FIB must be available even if it's not used */ | ||
228 | fib = (void *) get_zeroed_page(GFP_KERNEL); | ||
229 | if (!fib) | ||
230 | return -ENOMEM; | ||
231 | |||
232 | fib->pba = args->base; | ||
233 | fib->pal = args->limit; | ||
234 | fib->iota = args->iota; | ||
235 | |||
236 | rc = mpcifc_instr(req, fib); | ||
237 | free_page((unsigned long) fib); | ||
238 | return rc; | ||
239 | } | ||
240 | |||
241 | /* Modify PCI: Register I/O address translation parameters */ | ||
242 | int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, | ||
243 | u64 base, u64 limit, u64 iota) | ||
244 | { | ||
245 | struct mod_pci_args args = { base, limit, iota }; | ||
246 | |||
247 | WARN_ON_ONCE(iota & 0x3fff); | ||
248 | args.iota |= ZPCI_IOTA_RTTO_FLAG; | ||
249 | return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); | ||
250 | } | ||
251 | |||
252 | /* Modify PCI: Unregister I/O address translation parameters */ | ||
253 | int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) | ||
254 | { | ||
255 | struct mod_pci_args args = { 0, 0, 0 }; | ||
256 | |||
257 | return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); | ||
258 | } | ||
259 | |||
260 | /* Modify PCI: Unregister adapter interruptions */ | ||
261 | static int zpci_unregister_airq(struct zpci_dev *zdev) | ||
262 | { | ||
263 | struct mod_pci_args args = { 0, 0, 0 }; | ||
264 | |||
265 | return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); | ||
266 | } | ||
267 | |||
268 | #define ZPCI_PCIAS_CFGSPC 15 | ||
269 | |||
270 | static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) | ||
271 | { | ||
272 | u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); | ||
273 | u64 data; | ||
274 | int rc; | ||
275 | |||
276 | rc = pcilg_instr(&data, req, offset); | ||
277 | data = data << ((8 - len) * 8); | ||
278 | data = le64_to_cpu(data); | ||
279 | if (!rc) | ||
280 | *val = (u32) data; | ||
281 | else | ||
282 | *val = 0xffffffff; | ||
283 | return rc; | ||
284 | } | ||
285 | |||
286 | static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | ||
287 | { | ||
288 | u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); | ||
289 | u64 data = val; | ||
290 | int rc; | ||
291 | |||
292 | data = cpu_to_le64(data); | ||
293 | data = data >> ((8 - len) * 8); | ||
294 | rc = pcistg_instr(data, req, offset); | ||
295 | return rc; | ||
296 | } | ||
297 | |||
298 | void synchronize_irq(unsigned int irq) | ||
299 | { | ||
300 | /* | ||
301 | * Not needed, the handler is protected by a lock and IRQs that occur | ||
302 | * after the handler is deleted are just NOPs. | ||
303 | */ | ||
304 | } | ||
305 | EXPORT_SYMBOL_GPL(synchronize_irq); | ||
306 | |||
307 | void enable_irq(unsigned int irq) | ||
308 | { | ||
309 | struct msi_desc *msi = irq_get_msi_desc(irq); | ||
310 | |||
311 | zpci_msi_set_mask_bits(msi, 1, 0); | ||
312 | } | ||
313 | EXPORT_SYMBOL_GPL(enable_irq); | ||
314 | |||
315 | void disable_irq(unsigned int irq) | ||
316 | { | ||
317 | struct msi_desc *msi = irq_get_msi_desc(irq); | ||
318 | |||
319 | zpci_msi_set_mask_bits(msi, 1, 1); | ||
320 | } | ||
321 | EXPORT_SYMBOL_GPL(disable_irq); | ||
322 | |||
323 | void disable_irq_nosync(unsigned int irq) | ||
324 | { | ||
325 | disable_irq(irq); | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(disable_irq_nosync); | ||
328 | |||
329 | unsigned long probe_irq_on(void) | ||
330 | { | ||
331 | return 0; | ||
332 | } | ||
333 | EXPORT_SYMBOL_GPL(probe_irq_on); | ||
334 | |||
335 | int probe_irq_off(unsigned long val) | ||
336 | { | ||
337 | return 0; | ||
338 | } | ||
339 | EXPORT_SYMBOL_GPL(probe_irq_off); | ||
340 | |||
341 | unsigned int probe_irq_mask(unsigned long val) | ||
342 | { | ||
343 | return val; | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(probe_irq_mask); | ||
346 | |||
347 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
348 | { | ||
349 | } | ||
350 | |||
351 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | ||
352 | resource_size_t size, | ||
353 | resource_size_t align) | ||
354 | { | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | /* combine single writes by using store-block insn */ | ||
359 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count) | ||
360 | { | ||
361 | zpci_memcpy_toio(to, from, count); | ||
362 | } | ||
363 | |||
364 | /* Create a virtual mapping cookie for a PCI BAR */ | ||
365 | void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) | ||
366 | { | ||
367 | struct zpci_dev *zdev = get_zdev(pdev); | ||
368 | u64 addr; | ||
369 | int idx; | ||
370 | |||
371 | if ((bar & 7) != bar) | ||
372 | return NULL; | ||
373 | |||
374 | idx = zdev->bars[bar].map_idx; | ||
375 | spin_lock(&zpci_iomap_lock); | ||
376 | zpci_iomap_start[idx].fh = zdev->fh; | ||
377 | zpci_iomap_start[idx].bar = bar; | ||
378 | spin_unlock(&zpci_iomap_lock); | ||
379 | |||
380 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | ||
381 | return (void __iomem *) addr; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(pci_iomap); | ||
384 | |||
385 | void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | ||
386 | { | ||
387 | unsigned int idx; | ||
388 | |||
389 | idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; | ||
390 | spin_lock(&zpci_iomap_lock); | ||
391 | zpci_iomap_start[idx].fh = 0; | ||
392 | zpci_iomap_start[idx].bar = 0; | ||
393 | spin_unlock(&zpci_iomap_lock); | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(pci_iounmap); | ||
396 | |||
397 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | ||
398 | int size, u32 *val) | ||
399 | { | ||
400 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | ||
401 | |||
402 | if (!zdev || devfn != ZPCI_DEVFN) | ||
403 | return 0; | ||
404 | return zpci_cfg_load(zdev, where, val, size); | ||
405 | } | ||
406 | |||
407 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, | ||
408 | int size, u32 val) | ||
409 | { | ||
410 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | ||
411 | |||
412 | if (!zdev || devfn != ZPCI_DEVFN) | ||
413 | return 0; | ||
414 | return zpci_cfg_store(zdev, where, val, size); | ||
415 | } | ||
416 | |||
417 | static struct pci_ops pci_root_ops = { | ||
418 | .read = pci_read, | ||
419 | .write = pci_write, | ||
420 | }; | ||
421 | |||
422 | /* store the last handled bit to implement fair scheduling of devices */ | ||
423 | static DEFINE_PER_CPU(unsigned long, next_sbit); | ||
424 | |||
425 | static void zpci_irq_handler(void *dont, void *need) | ||
426 | { | ||
427 | unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); | ||
428 | int rescan = 0, max = aisb_max; | ||
429 | struct zdev_irq_map *imap; | ||
430 | |||
431 | kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++; | ||
432 | sbit = start; | ||
433 | |||
434 | scan: | ||
435 | /* find summary_bit */ | ||
436 | for_each_set_bit_left_cont(sbit, bucket->aisb, max) { | ||
437 | clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); | ||
438 | last = sbit; | ||
439 | |||
440 | /* find vector bit */ | ||
441 | imap = bucket->imap[sbit]; | ||
442 | for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { | ||
443 | kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++; | ||
444 | clear_bit(63 - mbit, &imap->aibv); | ||
445 | |||
446 | spin_lock(&imap->lock); | ||
447 | if (imap->cb[mbit].handler) | ||
448 | imap->cb[mbit].handler(mbit, | ||
449 | imap->cb[mbit].data); | ||
450 | spin_unlock(&imap->lock); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | if (rescan) | ||
455 | goto out; | ||
456 | |||
457 | /* scan the skipped bits */ | ||
458 | if (start > 0) { | ||
459 | sbit = 0; | ||
460 | max = start; | ||
461 | start = 0; | ||
462 | goto scan; | ||
463 | } | ||
464 | |||
465 | /* enable interrupts again */ | ||
466 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | ||
467 | |||
468 | /* check again to not lose initiative */ | ||
469 | rmb(); | ||
470 | max = aisb_max; | ||
471 | sbit = find_first_bit_left(bucket->aisb, max); | ||
472 | if (sbit != max) { | ||
473 | atomic_inc(&irq_retries); | ||
474 | rescan++; | ||
475 | goto scan; | ||
476 | } | ||
477 | out: | ||
478 | /* store next device bit to scan */ | ||
479 | __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; | ||
480 | } | ||
481 | |||
482 | /* msi_vecs - number of requested interrupts, 0 place function to error state */ | ||
483 | static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) | ||
484 | { | ||
485 | struct zpci_dev *zdev = get_zdev(pdev); | ||
486 | unsigned int aisb, msi_nr; | ||
487 | struct msi_desc *msi; | ||
488 | int rc; | ||
489 | |||
490 | /* store the number of used MSI vectors */ | ||
491 | zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); | ||
492 | |||
493 | spin_lock(&bucket->lock); | ||
494 | aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); | ||
495 | /* alloc map exhausted? */ | ||
496 | if (aisb == PAGE_SIZE) { | ||
497 | spin_unlock(&bucket->lock); | ||
498 | return -EIO; | ||
499 | } | ||
500 | set_bit(aisb, bucket->alloc); | ||
501 | spin_unlock(&bucket->lock); | ||
502 | |||
503 | zdev->aisb = aisb; | ||
504 | if (aisb + 1 > aisb_max) | ||
505 | aisb_max = aisb + 1; | ||
506 | |||
507 | /* wire up IRQ shortcut pointer */ | ||
508 | bucket->imap[zdev->aisb] = zdev->irq_map; | ||
509 | pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); | ||
510 | |||
511 | /* TODO: irq number 0 wont be found if we return less than requested MSIs. | ||
512 | * ignore it for now and fix in common code. | ||
513 | */ | ||
514 | msi_nr = aisb << ZPCI_MSI_VEC_BITS; | ||
515 | |||
516 | list_for_each_entry(msi, &pdev->msi_list, list) { | ||
517 | rc = zpci_setup_msi_irq(zdev, msi, msi_nr, | ||
518 | aisb << ZPCI_MSI_VEC_BITS); | ||
519 | if (rc) | ||
520 | return rc; | ||
521 | msi_nr++; | ||
522 | } | ||
523 | |||
524 | rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); | ||
525 | if (rc) { | ||
526 | clear_bit(aisb, bucket->alloc); | ||
527 | dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); | ||
528 | return rc; | ||
529 | } | ||
530 | return (zdev->irq_map->msi_vecs == msi_vecs) ? | ||
531 | 0 : zdev->irq_map->msi_vecs; | ||
532 | } | ||
533 | |||
534 | static void zpci_teardown_msi(struct pci_dev *pdev) | ||
535 | { | ||
536 | struct zpci_dev *zdev = get_zdev(pdev); | ||
537 | struct msi_desc *msi; | ||
538 | int aisb, rc; | ||
539 | |||
540 | rc = zpci_unregister_airq(zdev); | ||
541 | if (rc) { | ||
542 | dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); | ||
547 | aisb = irq_to_dev_nr(msi->irq); | ||
548 | |||
549 | list_for_each_entry(msi, &pdev->msi_list, list) | ||
550 | zpci_teardown_msi_irq(zdev, msi); | ||
551 | |||
552 | clear_bit(aisb, bucket->alloc); | ||
553 | if (aisb + 1 == aisb_max) | ||
554 | aisb_max--; | ||
555 | } | ||
556 | |||
557 | int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | ||
558 | { | ||
559 | pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); | ||
560 | if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) | ||
561 | return -EINVAL; | ||
562 | return zpci_setup_msi(pdev, nvec); | ||
563 | } | ||
564 | |||
565 | void arch_teardown_msi_irqs(struct pci_dev *pdev) | ||
566 | { | ||
567 | pr_info("%s: on pdev: %p\n", __func__, pdev); | ||
568 | zpci_teardown_msi(pdev); | ||
569 | } | ||
570 | |||
571 | static void zpci_map_resources(struct zpci_dev *zdev) | ||
572 | { | ||
573 | struct pci_dev *pdev = zdev->pdev; | ||
574 | resource_size_t len; | ||
575 | int i; | ||
576 | |||
577 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
578 | len = pci_resource_len(pdev, i); | ||
579 | if (!len) | ||
580 | continue; | ||
581 | pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); | ||
582 | pdev->resource[i].end = pdev->resource[i].start + len - 1; | ||
583 | pr_debug("BAR%i: -> start: %Lx end: %Lx\n", | ||
584 | i, pdev->resource[i].start, pdev->resource[i].end); | ||
585 | } | ||
586 | }; | ||
587 | |||
588 | static void zpci_unmap_resources(struct pci_dev *pdev) | ||
589 | { | ||
590 | resource_size_t len; | ||
591 | int i; | ||
592 | |||
593 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
594 | len = pci_resource_len(pdev, i); | ||
595 | if (!len) | ||
596 | continue; | ||
597 | pci_iounmap(pdev, (void *) pdev->resource[i].start); | ||
598 | } | ||
599 | }; | ||
600 | |||
601 | struct zpci_dev *zpci_alloc_device(void) | ||
602 | { | ||
603 | struct zpci_dev *zdev; | ||
604 | |||
605 | /* Alloc memory for our private pci device data */ | ||
606 | zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); | ||
607 | if (!zdev) | ||
608 | return ERR_PTR(-ENOMEM); | ||
609 | |||
610 | /* Alloc aibv & callback space */ | ||
611 | zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); | ||
612 | if (!zdev->irq_map) | ||
613 | goto error; | ||
614 | WARN_ON((u64) zdev->irq_map & 0xff); | ||
615 | return zdev; | ||
616 | |||
617 | error: | ||
618 | kfree(zdev); | ||
619 | return ERR_PTR(-ENOMEM); | ||
620 | } | ||
621 | |||
622 | void zpci_free_device(struct zpci_dev *zdev) | ||
623 | { | ||
624 | kmem_cache_free(zdev_irq_cache, zdev->irq_map); | ||
625 | kfree(zdev); | ||
626 | } | ||
627 | |||
628 | /* Called on removal of pci_dev, leaves zpci and bus device */ | ||
629 | static void zpci_remove_device(struct pci_dev *pdev) | ||
630 | { | ||
631 | struct zpci_dev *zdev = get_zdev(pdev); | ||
632 | |||
633 | dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); | ||
634 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | ||
635 | zpci_dma_exit_device(zdev); | ||
636 | zpci_sysfs_remove_device(&pdev->dev); | ||
637 | zpci_unmap_resources(pdev); | ||
638 | list_del(&zdev->entry); /* can be called from init */ | ||
639 | zdev->pdev = NULL; | ||
640 | } | ||
641 | |||
642 | static void zpci_scan_devices(void) | ||
643 | { | ||
644 | struct zpci_dev *zdev; | ||
645 | |||
646 | mutex_lock(&zpci_list_lock); | ||
647 | list_for_each_entry(zdev, &zpci_list, entry) | ||
648 | if (zdev->state == ZPCI_FN_STATE_CONFIGURED) | ||
649 | zpci_scan_device(zdev); | ||
650 | mutex_unlock(&zpci_list_lock); | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * Too late for any s390 specific setup, since interrupts must be set up | ||
655 | * already which requires DMA setup too and the pci scan will access the | ||
656 | * config space, which only works if the function handle is enabled. | ||
657 | */ | ||
658 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | ||
659 | { | ||
660 | struct resource *res; | ||
661 | u16 cmd; | ||
662 | int i; | ||
663 | |||
664 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
665 | |||
666 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
667 | res = &pdev->resource[i]; | ||
668 | |||
669 | if (res->flags & IORESOURCE_IO) | ||
670 | return -EINVAL; | ||
671 | |||
672 | if (res->flags & IORESOURCE_MEM) | ||
673 | cmd |= PCI_COMMAND_MEMORY; | ||
674 | } | ||
675 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | void pcibios_disable_device(struct pci_dev *pdev) | ||
680 | { | ||
681 | zpci_remove_device(pdev); | ||
682 | pdev->sysdata = NULL; | ||
683 | } | ||
684 | |||
685 | int pcibios_add_platform_entries(struct pci_dev *pdev) | ||
686 | { | ||
687 | return zpci_sysfs_add_device(&pdev->dev); | ||
688 | } | ||
689 | |||
690 | int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) | ||
691 | { | ||
692 | int msi_nr = irq_to_msi_nr(irq); | ||
693 | struct zdev_irq_map *imap; | ||
694 | struct msi_desc *msi; | ||
695 | |||
696 | msi = irq_get_msi_desc(irq); | ||
697 | if (!msi) | ||
698 | return -EIO; | ||
699 | |||
700 | imap = get_imap(irq); | ||
701 | spin_lock_init(&imap->lock); | ||
702 | |||
703 | pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); | ||
704 | imap->cb[msi_nr].handler = handler; | ||
705 | imap->cb[msi_nr].data = data; | ||
706 | |||
707 | /* | ||
708 | * The generic MSI code returns with the interrupt disabled on the | ||
709 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
710 | * at that level, so we do it here by hand. | ||
711 | */ | ||
712 | zpci_msi_set_mask_bits(msi, 1, 0); | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | void zpci_free_irq(unsigned int irq) | ||
717 | { | ||
718 | struct zdev_irq_map *imap = get_imap(irq); | ||
719 | int msi_nr = irq_to_msi_nr(irq); | ||
720 | unsigned long flags; | ||
721 | |||
722 | pr_debug("%s: for irq: %d\n", __func__, irq); | ||
723 | |||
724 | spin_lock_irqsave(&imap->lock, flags); | ||
725 | imap->cb[msi_nr].handler = NULL; | ||
726 | imap->cb[msi_nr].data = NULL; | ||
727 | spin_unlock_irqrestore(&imap->lock, flags); | ||
728 | } | ||
729 | |||
730 | int request_irq(unsigned int irq, irq_handler_t handler, | ||
731 | unsigned long irqflags, const char *devname, void *dev_id) | ||
732 | { | ||
733 | pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", | ||
734 | __func__, irq, handler, irqflags, devname); | ||
735 | |||
736 | return zpci_request_irq(irq, handler, dev_id); | ||
737 | } | ||
738 | EXPORT_SYMBOL_GPL(request_irq); | ||
739 | |||
740 | void free_irq(unsigned int irq, void *dev_id) | ||
741 | { | ||
742 | zpci_free_irq(irq); | ||
743 | } | ||
744 | EXPORT_SYMBOL_GPL(free_irq); | ||
745 | |||
746 | static int __init zpci_irq_init(void) | ||
747 | { | ||
748 | int cpu, rc; | ||
749 | |||
750 | bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); | ||
751 | if (!bucket) | ||
752 | return -ENOMEM; | ||
753 | |||
754 | bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); | ||
755 | if (!bucket->aisb) { | ||
756 | rc = -ENOMEM; | ||
757 | goto out_aisb; | ||
758 | } | ||
759 | |||
760 | bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); | ||
761 | if (!bucket->alloc) { | ||
762 | rc = -ENOMEM; | ||
763 | goto out_alloc; | ||
764 | } | ||
765 | |||
766 | isc_register(PCI_ISC); | ||
767 | zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); | ||
768 | if (IS_ERR(zpci_irq_si)) { | ||
769 | rc = PTR_ERR(zpci_irq_si); | ||
770 | zpci_irq_si = NULL; | ||
771 | goto out_ai; | ||
772 | } | ||
773 | |||
774 | for_each_online_cpu(cpu) | ||
775 | per_cpu(next_sbit, cpu) = 0; | ||
776 | |||
777 | spin_lock_init(&bucket->lock); | ||
778 | /* set summary to 1 to be called every time for the ISC */ | ||
779 | *zpci_irq_si = 1; | ||
780 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | ||
781 | return 0; | ||
782 | |||
783 | out_ai: | ||
784 | isc_unregister(PCI_ISC); | ||
785 | free_page((unsigned long) bucket->alloc); | ||
786 | out_alloc: | ||
787 | free_page((unsigned long) bucket->aisb); | ||
788 | out_aisb: | ||
789 | kfree(bucket); | ||
790 | return rc; | ||
791 | } | ||
792 | |||
793 | static void zpci_irq_exit(void) | ||
794 | { | ||
795 | free_page((unsigned long) bucket->alloc); | ||
796 | free_page((unsigned long) bucket->aisb); | ||
797 | s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); | ||
798 | isc_unregister(PCI_ISC); | ||
799 | kfree(bucket); | ||
800 | } | ||
801 | |||
802 | static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, | ||
803 | unsigned long flags, int domain) | ||
804 | { | ||
805 | struct resource *r; | ||
806 | char *name; | ||
807 | int rc; | ||
808 | |||
809 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
810 | if (!r) | ||
811 | return ERR_PTR(-ENOMEM); | ||
812 | r->start = start; | ||
813 | r->end = r->start + size - 1; | ||
814 | r->flags = flags; | ||
815 | r->parent = &iomem_resource; | ||
816 | name = kmalloc(18, GFP_KERNEL); | ||
817 | if (!name) { | ||
818 | kfree(r); | ||
819 | return ERR_PTR(-ENOMEM); | ||
820 | } | ||
821 | sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); | ||
822 | r->name = name; | ||
823 | |||
824 | rc = request_resource(&iomem_resource, r); | ||
825 | if (rc) | ||
826 | pr_debug("request resource %pR failed\n", r); | ||
827 | return r; | ||
828 | } | ||
829 | |||
830 | static int zpci_alloc_iomap(struct zpci_dev *zdev) | ||
831 | { | ||
832 | int entry; | ||
833 | |||
834 | spin_lock(&zpci_iomap_lock); | ||
835 | entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | ||
836 | if (entry == ZPCI_IOMAP_MAX_ENTRIES) { | ||
837 | spin_unlock(&zpci_iomap_lock); | ||
838 | return -ENOSPC; | ||
839 | } | ||
840 | set_bit(entry, zpci_iomap); | ||
841 | spin_unlock(&zpci_iomap_lock); | ||
842 | return entry; | ||
843 | } | ||
844 | |||
845 | static void zpci_free_iomap(struct zpci_dev *zdev, int entry) | ||
846 | { | ||
847 | spin_lock(&zpci_iomap_lock); | ||
848 | memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); | ||
849 | clear_bit(entry, zpci_iomap); | ||
850 | spin_unlock(&zpci_iomap_lock); | ||
851 | } | ||
852 | |||
853 | static int zpci_create_device_bus(struct zpci_dev *zdev) | ||
854 | { | ||
855 | struct resource *res; | ||
856 | LIST_HEAD(resources); | ||
857 | int i; | ||
858 | |||
859 | /* allocate mapping entry for each used bar */ | ||
860 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
861 | unsigned long addr, size, flags; | ||
862 | int entry; | ||
863 | |||
864 | if (!zdev->bars[i].size) | ||
865 | continue; | ||
866 | entry = zpci_alloc_iomap(zdev); | ||
867 | if (entry < 0) | ||
868 | return entry; | ||
869 | zdev->bars[i].map_idx = entry; | ||
870 | |||
871 | /* only MMIO is supported */ | ||
872 | flags = IORESOURCE_MEM; | ||
873 | if (zdev->bars[i].val & 8) | ||
874 | flags |= IORESOURCE_PREFETCH; | ||
875 | if (zdev->bars[i].val & 4) | ||
876 | flags |= IORESOURCE_MEM_64; | ||
877 | |||
878 | addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); | ||
879 | |||
880 | size = 1UL << zdev->bars[i].size; | ||
881 | |||
882 | res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); | ||
883 | if (IS_ERR(res)) { | ||
884 | zpci_free_iomap(zdev, entry); | ||
885 | return PTR_ERR(res); | ||
886 | } | ||
887 | pci_add_resource(&resources, res); | ||
888 | } | ||
889 | |||
890 | zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, | ||
891 | zdev, &resources); | ||
892 | if (!zdev->bus) | ||
893 | return -EIO; | ||
894 | |||
895 | zdev->bus->max_bus_speed = zdev->max_bus_speed; | ||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | static int zpci_alloc_domain(struct zpci_dev *zdev) | ||
900 | { | ||
901 | spin_lock(&zpci_domain_lock); | ||
902 | zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); | ||
903 | if (zdev->domain == ZPCI_NR_DEVICES) { | ||
904 | spin_unlock(&zpci_domain_lock); | ||
905 | return -ENOSPC; | ||
906 | } | ||
907 | set_bit(zdev->domain, zpci_domain); | ||
908 | spin_unlock(&zpci_domain_lock); | ||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | static void zpci_free_domain(struct zpci_dev *zdev) | ||
913 | { | ||
914 | spin_lock(&zpci_domain_lock); | ||
915 | clear_bit(zdev->domain, zpci_domain); | ||
916 | spin_unlock(&zpci_domain_lock); | ||
917 | } | ||
918 | |||
919 | int zpci_enable_device(struct zpci_dev *zdev) | ||
920 | { | ||
921 | int rc; | ||
922 | |||
923 | rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); | ||
924 | if (rc) | ||
925 | goto out; | ||
926 | pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); | ||
927 | |||
928 | rc = zpci_dma_init_device(zdev); | ||
929 | if (rc) | ||
930 | goto out_dma; | ||
931 | return 0; | ||
932 | |||
933 | out_dma: | ||
934 | clp_disable_fh(zdev); | ||
935 | out: | ||
936 | return rc; | ||
937 | } | ||
938 | EXPORT_SYMBOL_GPL(zpci_enable_device); | ||
939 | |||
940 | int zpci_create_device(struct zpci_dev *zdev) | ||
941 | { | ||
942 | int rc; | ||
943 | |||
944 | rc = zpci_alloc_domain(zdev); | ||
945 | if (rc) | ||
946 | goto out; | ||
947 | |||
948 | rc = zpci_create_device_bus(zdev); | ||
949 | if (rc) | ||
950 | goto out_bus; | ||
951 | |||
952 | mutex_lock(&zpci_list_lock); | ||
953 | list_add_tail(&zdev->entry, &zpci_list); | ||
954 | if (hotplug_ops.create_slot) | ||
955 | hotplug_ops.create_slot(zdev); | ||
956 | mutex_unlock(&zpci_list_lock); | ||
957 | |||
958 | if (zdev->state == ZPCI_FN_STATE_STANDBY) | ||
959 | return 0; | ||
960 | |||
961 | rc = zpci_enable_device(zdev); | ||
962 | if (rc) | ||
963 | goto out_start; | ||
964 | return 0; | ||
965 | |||
966 | out_start: | ||
967 | mutex_lock(&zpci_list_lock); | ||
968 | list_del(&zdev->entry); | ||
969 | if (hotplug_ops.remove_slot) | ||
970 | hotplug_ops.remove_slot(zdev); | ||
971 | mutex_unlock(&zpci_list_lock); | ||
972 | out_bus: | ||
973 | zpci_free_domain(zdev); | ||
974 | out: | ||
975 | return rc; | ||
976 | } | ||
977 | |||
978 | void zpci_stop_device(struct zpci_dev *zdev) | ||
979 | { | ||
980 | zpci_dma_exit_device(zdev); | ||
981 | /* | ||
982 | * Note: SCLP disables fh via set-pci-fn so don't | ||
983 | * do that here. | ||
984 | */ | ||
985 | } | ||
986 | EXPORT_SYMBOL_GPL(zpci_stop_device); | ||
987 | |||
988 | int zpci_scan_device(struct zpci_dev *zdev) | ||
989 | { | ||
990 | zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); | ||
991 | if (!zdev->pdev) { | ||
992 | pr_err("pci_scan_single_device failed for fid: 0x%x\n", | ||
993 | zdev->fid); | ||
994 | goto out; | ||
995 | } | ||
996 | |||
997 | zpci_map_resources(zdev); | ||
998 | pci_bus_add_devices(zdev->bus); | ||
999 | |||
1000 | /* now that pdev was added to the bus mark it as used */ | ||
1001 | zdev->state = ZPCI_FN_STATE_ONLINE; | ||
1002 | return 0; | ||
1003 | |||
1004 | out: | ||
1005 | zpci_dma_exit_device(zdev); | ||
1006 | clp_disable_fh(zdev); | ||
1007 | return -EIO; | ||
1008 | } | ||
1009 | EXPORT_SYMBOL_GPL(zpci_scan_device); | ||
1010 | |||
1011 | static inline int barsize(u8 size) | ||
1012 | { | ||
1013 | return (size) ? (1 << size) >> 10 : 0; | ||
1014 | } | ||
1015 | |||
1016 | static int zpci_mem_init(void) | ||
1017 | { | ||
1018 | zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), | ||
1019 | L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); | ||
1020 | if (!zdev_irq_cache) | ||
1021 | goto error_zdev; | ||
1022 | |||
1023 | /* TODO: use realloc */ | ||
1024 | zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), | ||
1025 | GFP_KERNEL); | ||
1026 | if (!zpci_iomap_start) | ||
1027 | goto error_iomap; | ||
1028 | return 0; | ||
1029 | |||
1030 | error_iomap: | ||
1031 | kmem_cache_destroy(zdev_irq_cache); | ||
1032 | error_zdev: | ||
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | |||
1036 | static void zpci_mem_exit(void) | ||
1037 | { | ||
1038 | kfree(zpci_iomap_start); | ||
1039 | kmem_cache_destroy(zdev_irq_cache); | ||
1040 | } | ||
1041 | |||
1042 | unsigned int pci_probe = 1; | ||
1043 | EXPORT_SYMBOL_GPL(pci_probe); | ||
1044 | |||
1045 | char * __init pcibios_setup(char *str) | ||
1046 | { | ||
1047 | if (!strcmp(str, "off")) { | ||
1048 | pci_probe = 0; | ||
1049 | return NULL; | ||
1050 | } | ||
1051 | return str; | ||
1052 | } | ||
1053 | |||
1054 | static int __init pci_base_init(void) | ||
1055 | { | ||
1056 | int rc; | ||
1057 | |||
1058 | if (!pci_probe) | ||
1059 | return 0; | ||
1060 | |||
1061 | if (!test_facility(2) || !test_facility(69) | ||
1062 | || !test_facility(71) || !test_facility(72)) | ||
1063 | return 0; | ||
1064 | |||
1065 | pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", | ||
1066 | test_facility(69), test_facility(70), | ||
1067 | test_facility(71)); | ||
1068 | |||
1069 | rc = zpci_mem_init(); | ||
1070 | if (rc) | ||
1071 | goto out_mem; | ||
1072 | |||
1073 | rc = zpci_msihash_init(); | ||
1074 | if (rc) | ||
1075 | goto out_hash; | ||
1076 | |||
1077 | rc = zpci_irq_init(); | ||
1078 | if (rc) | ||
1079 | goto out_irq; | ||
1080 | |||
1081 | rc = zpci_dma_init(); | ||
1082 | if (rc) | ||
1083 | goto out_dma; | ||
1084 | |||
1085 | rc = clp_find_pci_devices(); | ||
1086 | if (rc) | ||
1087 | goto out_find; | ||
1088 | |||
1089 | zpci_scan_devices(); | ||
1090 | return 0; | ||
1091 | |||
1092 | out_find: | ||
1093 | zpci_dma_exit(); | ||
1094 | out_dma: | ||
1095 | zpci_irq_exit(); | ||
1096 | out_irq: | ||
1097 | zpci_msihash_exit(); | ||
1098 | out_hash: | ||
1099 | zpci_mem_exit(); | ||
1100 | out_mem: | ||
1101 | return rc; | ||
1102 | } | ||
1103 | subsys_initcall(pci_base_init); | ||