aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/host/pcie-designware.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/host/pcie-designware.c')
-rw-r--r--drivers/pci/host/pcie-designware.c268
1 files changed, 100 insertions, 168 deletions
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a143563..dfed00aa3ac0 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@ static unsigned long global_io_offset;
73 73
74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) 74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
75{ 75{
76 BUG_ON(!sys->private_data);
77
76 return sys->private_data; 78 return sys->private_data;
77} 79}
78 80
@@ -194,30 +196,6 @@ void dw_pcie_msi_init(struct pcie_port *pp)
194 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); 196 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
195} 197}
196 198
197static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
198{
199 int flag = 1;
200
201 do {
202 pos = find_next_zero_bit(pp->msi_irq_in_use,
203 MAX_MSI_IRQS, pos);
204 /*if you have reached to the end then get out from here.*/
205 if (pos == MAX_MSI_IRQS)
206 return -ENOSPC;
207 /*
208 * Check if this position is at correct offset.nvec is always a
209 * power of two. pos0 must be nvec bit aligned.
210 */
211 if (pos % msgvec)
212 pos += msgvec - (pos % msgvec);
213 else
214 flag = 0;
215 } while (flag);
216
217 *pos0 = pos;
218 return 0;
219}
220
221static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 199static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
222{ 200{
223 unsigned int res, bit, val; 201 unsigned int res, bit, val;
@@ -236,13 +214,14 @@ static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
236 214
237 for (i = 0; i < nvec; i++) { 215 for (i = 0; i < nvec; i++) {
238 irq_set_msi_desc_off(irq_base, i, NULL); 216 irq_set_msi_desc_off(irq_base, i, NULL);
239 clear_bit(pos + i, pp->msi_irq_in_use);
240 /* Disable corresponding interrupt on MSI controller */ 217 /* Disable corresponding interrupt on MSI controller */
241 if (pp->ops->msi_clear_irq) 218 if (pp->ops->msi_clear_irq)
242 pp->ops->msi_clear_irq(pp, pos + i); 219 pp->ops->msi_clear_irq(pp, pos + i);
243 else 220 else
244 dw_pcie_msi_clear_irq(pp, pos + i); 221 dw_pcie_msi_clear_irq(pp, pos + i);
245 } 222 }
223
224 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
246} 225}
247 226
248static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) 227static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -258,31 +237,13 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
258 237
259static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 238static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
260{ 239{
261 int irq, pos0, pos1, i; 240 int irq, pos0, i;
262 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); 241 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
263 242
264 if (!pp) { 243 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
265 BUG(); 244 order_base_2(no_irqs));
266 return -EINVAL; 245 if (pos0 < 0)
267 } 246 goto no_valid_irq;
268
269 pos0 = find_first_zero_bit(pp->msi_irq_in_use,
270 MAX_MSI_IRQS);
271 if (pos0 % no_irqs) {
272 if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
273 goto no_valid_irq;
274 }
275 if (no_irqs > 1) {
276 pos1 = find_next_bit(pp->msi_irq_in_use,
277 MAX_MSI_IRQS, pos0);
278 /* there must be nvec number of consecutive free bits */
279 while ((pos1 - pos0) < no_irqs) {
280 if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
281 goto no_valid_irq;
282 pos1 = find_next_bit(pp->msi_irq_in_use,
283 MAX_MSI_IRQS, pos0);
284 }
285 }
286 247
287 irq = irq_find_mapping(pp->irq_domain, pos0); 248 irq = irq_find_mapping(pp->irq_domain, pos0);
288 if (!irq) 249 if (!irq)
@@ -300,7 +261,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
300 clear_irq_range(pp, irq, i, pos0); 261 clear_irq_range(pp, irq, i, pos0);
301 goto no_valid_irq; 262 goto no_valid_irq;
302 } 263 }
303 set_bit(pos0 + i, pp->msi_irq_in_use);
304 /*Enable corresponding interrupt in MSI interrupt controller */ 264 /*Enable corresponding interrupt in MSI interrupt controller */
305 if (pp->ops->msi_set_irq) 265 if (pp->ops->msi_set_irq)
306 pp->ops->msi_set_irq(pp, pos0 + i); 266 pp->ops->msi_set_irq(pp, pos0 + i);
@@ -316,69 +276,28 @@ no_valid_irq:
316 return -ENOSPC; 276 return -ENOSPC;
317} 277}
318 278
319static void clear_irq(unsigned int irq)
320{
321 unsigned int pos, nvec;
322 struct msi_desc *msi;
323 struct pcie_port *pp;
324 struct irq_data *data = irq_get_irq_data(irq);
325
326 /* get the port structure */
327 msi = irq_data_get_msi(data);
328 pp = sys_to_pcie(msi->dev->bus->sysdata);
329 if (!pp) {
330 BUG();
331 return;
332 }
333
334 /* undo what was done in assign_irq */
335 pos = data->hwirq;
336 nvec = 1 << msi->msi_attrib.multiple;
337
338 clear_irq_range(pp, irq, nvec, pos);
339
340 /* all irqs cleared; reset attributes */
341 msi->irq = 0;
342 msi->msi_attrib.multiple = 0;
343}
344
345static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
346 struct msi_desc *desc) 280 struct msi_desc *desc)
347{ 281{
348 int irq, pos, msgvec; 282 int irq, pos;
349 u16 msg_ctr;
350 struct msi_msg msg; 283 struct msi_msg msg;
351 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
352 285
353 if (!pp) { 286 irq = assign_irq(1, desc, &pos);
354 BUG();
355 return -EINVAL;
356 }
357
358 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
359 &msg_ctr);
360 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
361 if (msgvec == 0)
362 msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
363 if (msgvec > 5)
364 msgvec = 0;
365
366 irq = assign_irq((1 << msgvec), desc, &pos);
367 if (irq < 0) 287 if (irq < 0)
368 return irq; 288 return irq;
369 289
370 /* 290 if (pp->ops->get_msi_addr)
371 * write_msi_msg() will update PCI_MSI_FLAGS so there is 291 msg.address_lo = pp->ops->get_msi_addr(pp);
372 * no need to explicitly call pci_write_config_word().
373 */
374 desc->msi_attrib.multiple = msgvec;
375
376 if (pp->ops->get_msi_data)
377 msg.address_lo = pp->ops->get_msi_data(pp);
378 else 292 else
379 msg.address_lo = virt_to_phys((void *)pp->msi_data); 293 msg.address_lo = virt_to_phys((void *)pp->msi_data);
380 msg.address_hi = 0x0; 294 msg.address_hi = 0x0;
381 msg.data = pos; 295
296 if (pp->ops->get_msi_data)
297 msg.data = pp->ops->get_msi_data(pp, pos);
298 else
299 msg.data = pos;
300
382 write_msi_msg(irq, &msg); 301 write_msi_msg(irq, &msg);
383 302
384 return 0; 303 return 0;
@@ -386,7 +305,11 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
386 305
387static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
388{ 307{
389 clear_irq(irq); 308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data);
310 struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
311
312 clear_irq_range(pp, irq, 1, data->hwirq);
390} 313}
391 314
392static struct msi_chip dw_pcie_msi_chip = { 315static struct msi_chip dw_pcie_msi_chip = {
@@ -425,7 +348,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
425 struct resource *cfg_res; 348 struct resource *cfg_res;
426 u32 val, na, ns; 349 u32 val, na, ns;
427 const __be32 *addrp; 350 const __be32 *addrp;
428 int i, index; 351 int i, index, ret;
429 352
430 /* Find the address cell size and the number of cells in order to get 353 /* Find the address cell size and the number of cells in order to get
431 * the untranslated address. 354 * the untranslated address.
@@ -435,16 +358,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
435 358
436 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 359 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
437 if (cfg_res) { 360 if (cfg_res) {
438 pp->config.cfg0_size = resource_size(cfg_res)/2; 361 pp->cfg0_size = resource_size(cfg_res)/2;
439 pp->config.cfg1_size = resource_size(cfg_res)/2; 362 pp->cfg1_size = resource_size(cfg_res)/2;
440 pp->cfg0_base = cfg_res->start; 363 pp->cfg0_base = cfg_res->start;
441 pp->cfg1_base = cfg_res->start + pp->config.cfg0_size; 364 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
442 365
443 /* Find the untranslated configuration space address */ 366 /* Find the untranslated configuration space address */
444 index = of_property_match_string(np, "reg-names", "config"); 367 index = of_property_match_string(np, "reg-names", "config");
445 addrp = of_get_address(np, index, false, false); 368 addrp = of_get_address(np, index, NULL, NULL);
446 pp->cfg0_mod_base = of_read_number(addrp, ns); 369 pp->cfg0_mod_base = of_read_number(addrp, ns);
447 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size; 370 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
448 } else { 371 } else {
449 dev_err(pp->dev, "missing *config* reg space\n"); 372 dev_err(pp->dev, "missing *config* reg space\n");
450 } 373 }
@@ -466,9 +389,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
466 pp->io.end = min_t(resource_size_t, 389 pp->io.end = min_t(resource_size_t,
467 IO_SPACE_LIMIT, 390 IO_SPACE_LIMIT,
468 range.pci_addr + range.size 391 range.pci_addr + range.size
469 + global_io_offset); 392 + global_io_offset - 1);
470 pp->config.io_size = resource_size(&pp->io); 393 pp->io_size = resource_size(&pp->io);
471 pp->config.io_bus_addr = range.pci_addr; 394 pp->io_bus_addr = range.pci_addr;
472 pp->io_base = range.cpu_addr; 395 pp->io_base = range.cpu_addr;
473 396
474 /* Find the untranslated IO space address */ 397 /* Find the untranslated IO space address */
@@ -478,8 +401,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
478 if (restype == IORESOURCE_MEM) { 401 if (restype == IORESOURCE_MEM) {
479 of_pci_range_to_resource(&range, np, &pp->mem); 402 of_pci_range_to_resource(&range, np, &pp->mem);
480 pp->mem.name = "MEM"; 403 pp->mem.name = "MEM";
481 pp->config.mem_size = resource_size(&pp->mem); 404 pp->mem_size = resource_size(&pp->mem);
482 pp->config.mem_bus_addr = range.pci_addr; 405 pp->mem_bus_addr = range.pci_addr;
483 406
484 /* Find the untranslated MEM space address */ 407 /* Find the untranslated MEM space address */
485 pp->mem_mod_base = of_read_number(parser.range - 408 pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +410,29 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
487 } 410 }
488 if (restype == 0) { 411 if (restype == 0) {
489 of_pci_range_to_resource(&range, np, &pp->cfg); 412 of_pci_range_to_resource(&range, np, &pp->cfg);
490 pp->config.cfg0_size = resource_size(&pp->cfg)/2; 413 pp->cfg0_size = resource_size(&pp->cfg)/2;
491 pp->config.cfg1_size = resource_size(&pp->cfg)/2; 414 pp->cfg1_size = resource_size(&pp->cfg)/2;
492 pp->cfg0_base = pp->cfg.start; 415 pp->cfg0_base = pp->cfg.start;
493 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; 416 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
494 417
495 /* Find the untranslated configuration space address */ 418 /* Find the untranslated configuration space address */
496 pp->cfg0_mod_base = of_read_number(parser.range - 419 pp->cfg0_mod_base = of_read_number(parser.range -
497 parser.np + na, ns); 420 parser.np + na, ns);
498 pp->cfg1_mod_base = pp->cfg0_mod_base + 421 pp->cfg1_mod_base = pp->cfg0_mod_base +
499 pp->config.cfg0_size; 422 pp->cfg0_size;
500 } 423 }
501 } 424 }
502 425
426 ret = of_pci_parse_bus_range(np, &pp->busn);
427 if (ret < 0) {
428 pp->busn.name = np->name;
429 pp->busn.start = 0;
430 pp->busn.end = 0xff;
431 pp->busn.flags = IORESOURCE_BUS;
432 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
433 ret, &pp->busn);
434 }
435
503 if (!pp->dbi_base) { 436 if (!pp->dbi_base) {
504 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, 437 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
505 resource_size(&pp->cfg)); 438 resource_size(&pp->cfg));
@@ -511,17 +444,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 444
512 pp->mem_base = pp->mem.start; 445 pp->mem_base = pp->mem.start;
513 446
514 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
515 pp->config.cfg0_size);
516 if (!pp->va_cfg0_base) { 447 if (!pp->va_cfg0_base) {
517 dev_err(pp->dev, "error with ioremap in function\n"); 448 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
518 return -ENOMEM; 449 pp->cfg0_size);
450 if (!pp->va_cfg0_base) {
451 dev_err(pp->dev, "error with ioremap in function\n");
452 return -ENOMEM;
453 }
519 } 454 }
520 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 455
521 pp->config.cfg1_size);
522 if (!pp->va_cfg1_base) { 456 if (!pp->va_cfg1_base) {
523 dev_err(pp->dev, "error with ioremap\n"); 457 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
524 return -ENOMEM; 458 pp->cfg1_size);
459 if (!pp->va_cfg1_base) {
460 dev_err(pp->dev, "error with ioremap\n");
461 return -ENOMEM;
462 }
525 } 463 }
526 464
527 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { 465 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +468,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
530 } 468 }
531 469
532 if (IS_ENABLED(CONFIG_PCI_MSI)) { 470 if (IS_ENABLED(CONFIG_PCI_MSI)) {
533 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 471 if (!pp->ops->msi_host_init) {
534 MAX_MSI_IRQS, &msi_domain_ops, 472 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
535 &dw_pcie_msi_chip); 473 MAX_MSI_IRQS, &msi_domain_ops,
536 if (!pp->irq_domain) { 474 &dw_pcie_msi_chip);
537 dev_err(pp->dev, "irq domain init failed\n"); 475 if (!pp->irq_domain) {
538 return -ENXIO; 476 dev_err(pp->dev, "irq domain init failed\n");
539 } 477 return -ENXIO;
478 }
540 479
541 for (i = 0; i < MAX_MSI_IRQS; i++) 480 for (i = 0; i < MAX_MSI_IRQS; i++)
542 irq_create_mapping(pp->irq_domain, i); 481 irq_create_mapping(pp->irq_domain, i);
482 } else {
483 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
484 if (ret < 0)
485 return ret;
486 }
543 } 487 }
544 488
545 if (pp->ops->host_init) 489 if (pp->ops->host_init)
@@ -558,7 +502,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
558 dw_pci.private_data = (void **)&pp; 502 dw_pci.private_data = (void **)&pp;
559 503
560 pci_common_init_dev(pp->dev, &dw_pci); 504 pci_common_init_dev(pp->dev, &dw_pci);
561 pci_assign_unassigned_resources();
562#ifdef CONFIG_PCI_DOMAINS 505#ifdef CONFIG_PCI_DOMAINS
563 dw_pci.domain++; 506 dw_pci.domain++;
564#endif 507#endif
@@ -573,7 +516,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
573 PCIE_ATU_VIEWPORT); 516 PCIE_ATU_VIEWPORT);
574 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); 517 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
575 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); 518 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
576 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1, 519 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
577 PCIE_ATU_LIMIT); 520 PCIE_ATU_LIMIT);
578 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 521 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
579 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 522 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +532,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
589 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 532 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
590 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); 533 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
591 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); 534 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
592 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1, 535 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
593 PCIE_ATU_LIMIT); 536 PCIE_ATU_LIMIT);
594 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 537 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
595 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 538 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +547,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
604 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 547 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
605 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); 548 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
606 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); 549 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
607 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1, 550 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
608 PCIE_ATU_LIMIT); 551 PCIE_ATU_LIMIT);
609 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); 552 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
610 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 553 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
611 PCIE_ATU_UPPER_TARGET); 554 PCIE_ATU_UPPER_TARGET);
612 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 555 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
613} 556}
@@ -620,10 +563,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
620 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 563 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
621 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); 564 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
622 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); 565 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
623 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1, 566 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
624 PCIE_ATU_LIMIT); 567 PCIE_ATU_LIMIT);
625 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); 568 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
626 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 569 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
627 PCIE_ATU_UPPER_TARGET); 570 PCIE_ATU_UPPER_TARGET);
628 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 571 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
629} 572}
@@ -707,11 +650,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
707 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 650 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
708 int ret; 651 int ret;
709 652
710 if (!pp) {
711 BUG();
712 return -EINVAL;
713 }
714
715 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { 653 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
716 *val = 0xffffffff; 654 *val = 0xffffffff;
717 return PCIBIOS_DEVICE_NOT_FOUND; 655 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +674,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
736 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 674 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
737 int ret; 675 int ret;
738 676
739 if (!pp) {
740 BUG();
741 return -EINVAL;
742 }
743
744 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) 677 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
745 return PCIBIOS_DEVICE_NOT_FOUND; 678 return PCIBIOS_DEVICE_NOT_FOUND;
746 679
@@ -768,19 +701,17 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
768 701
769 pp = sys_to_pcie(sys); 702 pp = sys_to_pcie(sys);
770 703
771 if (!pp) 704 if (global_io_offset < SZ_1M && pp->io_size > 0) {
772 return 0; 705 sys->io_offset = global_io_offset - pp->io_bus_addr;
773
774 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
775 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
776 pci_ioremap_io(global_io_offset, pp->io_base); 706 pci_ioremap_io(global_io_offset, pp->io_base);
777 global_io_offset += SZ_64K; 707 global_io_offset += SZ_64K;
778 pci_add_resource_offset(&sys->resources, &pp->io, 708 pci_add_resource_offset(&sys->resources, &pp->io,
779 sys->io_offset); 709 sys->io_offset);
780 } 710 }
781 711
782 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; 712 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
783 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); 713 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
714 pci_add_resource(&sys->resources, &pp->busn);
784 715
785 return 1; 716 return 1;
786} 717}
@@ -790,14 +721,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
790 struct pci_bus *bus; 721 struct pci_bus *bus;
791 struct pcie_port *pp = sys_to_pcie(sys); 722 struct pcie_port *pp = sys_to_pcie(sys);
792 723
793 if (pp) { 724 pp->root_bus_nr = sys->busnr;
794 pp->root_bus_nr = sys->busnr; 725 bus = pci_create_root_bus(pp->dev, sys->busnr,
795 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, 726 &dw_pcie_ops, sys, &sys->resources);
796 sys, &sys->resources); 727 if (!bus)
797 } else { 728 return NULL;
798 bus = NULL; 729
799 BUG(); 730 pci_scan_child_bus(bus);
800 } 731
732 if (bus && pp->ops->scan_bus)
733 pp->ops->scan_bus(pp);
801 734
802 return bus; 735 return bus;
803} 736}
@@ -833,7 +766,6 @@ static struct hw_pci dw_pci = {
833 766
834void dw_pcie_setup_rc(struct pcie_port *pp) 767void dw_pcie_setup_rc(struct pcie_port *pp)
835{ 768{
836 struct pcie_port_info *config = &pp->config;
837 u32 val; 769 u32 val;
838 u32 membase; 770 u32 membase;
839 u32 memlimit; 771 u32 memlimit;
@@ -888,7 +820,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
888 820
889 /* setup memory base, memory limit */ 821 /* setup memory base, memory limit */
890 membase = ((u32)pp->mem_base & 0xfff00000) >> 16; 822 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
891 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; 823 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
892 val = memlimit | membase; 824 val = memlimit | membase;
893 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); 825 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
894 826