diff options
Diffstat (limited to 'drivers')
102 files changed, 864 insertions, 518 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8bea100a160..b34390347c16 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
| 2868 | }, | 2868 | }, |
| 2869 | .driver_data = "F.23", /* cutoff BIOS version */ | 2869 | .driver_data = "F.23", /* cutoff BIOS version */ |
| 2870 | }, | 2870 | }, |
| 2871 | /* | ||
| 2872 | * Acer eMachines G725 has the same problem. BIOS | ||
| 2873 | * V1.03 is known to be broken. V3.04 is known to | ||
| 2874 | * work. Inbetween, there are V1.06, V2.06 and V3.03 | ||
| 2875 | * that we don't have much idea about. For now, | ||
| 2876 | * blacklist anything older than V3.04. | ||
| 2877 | */ | ||
| 2878 | { | ||
| 2879 | .ident = "G725", | ||
| 2880 | .matches = { | ||
| 2881 | DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), | ||
| 2882 | DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), | ||
| 2883 | }, | ||
| 2884 | .driver_data = "V3.04", /* cutoff BIOS version */ | ||
| 2885 | }, | ||
| 2871 | { } /* terminate list */ | 2886 | { } /* terminate list */ |
| 2872 | }; | 2887 | }; |
| 2873 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | 2888 | const struct dmi_system_id *dmi = dmi_first_match(sysids); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4ea5a8c325b..d096fbcbc771 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
| 2875 | * write indication (used for PIO/DMA setup), result TF is | 2875 | * write indication (used for PIO/DMA setup), result TF is |
| 2876 | * copied back and we don't whine too much about its failure. | 2876 | * copied back and we don't whine too much about its failure. |
| 2877 | */ | 2877 | */ |
| 2878 | tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 2878 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
| 2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) | 2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) |
| 2880 | tf->flags |= ATA_TFLAG_WRITE; | 2880 | tf->flags |= ATA_TFLAG_WRITE; |
| 2881 | 2881 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 741065c9da67..730ef3c384ca 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 893 | do_write); | 893 | do_write); |
| 894 | } | 894 | } |
| 895 | 895 | ||
| 896 | if (!do_write) | ||
| 897 | flush_dcache_page(page); | ||
| 898 | |||
| 896 | qc->curbytes += qc->sect_size; | 899 | qc->curbytes += qc->sect_size; |
| 897 | qc->cursg_ofs += qc->sect_size; | 900 | qc->cursg_ofs += qc->sect_size; |
| 898 | 901 | ||
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index f36defa37764..57d965b7f521 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c | |||
| @@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, | |||
| 808 | 808 | ||
| 809 | exit: | 809 | exit: |
| 810 | sdio_release_host(card->func); | 810 | sdio_release_host(card->func); |
| 811 | kfree(tmpbuf); | ||
| 811 | 812 | ||
| 812 | return ret; | 813 | return ret; |
| 813 | } | 814 | } |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 34cf04e21795..fd50ead59c79 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
| @@ -767,16 +767,19 @@ int __init agp_amd64_init(void) | |||
| 767 | 767 | ||
| 768 | static int __init agp_amd64_mod_init(void) | 768 | static int __init agp_amd64_mod_init(void) |
| 769 | { | 769 | { |
| 770 | #ifndef MODULE | ||
| 770 | if (gart_iommu_aperture) | 771 | if (gart_iommu_aperture) |
| 771 | return agp_bridges_found ? 0 : -ENODEV; | 772 | return agp_bridges_found ? 0 : -ENODEV; |
| 772 | 773 | #endif | |
| 773 | return agp_amd64_init(); | 774 | return agp_amd64_init(); |
| 774 | } | 775 | } |
| 775 | 776 | ||
| 776 | static void __exit agp_amd64_cleanup(void) | 777 | static void __exit agp_amd64_cleanup(void) |
| 777 | { | 778 | { |
| 779 | #ifndef MODULE | ||
| 778 | if (gart_iommu_aperture) | 780 | if (gart_iommu_aperture) |
| 779 | return; | 781 | return; |
| 782 | #endif | ||
| 780 | if (aperture_resource) | 783 | if (aperture_resource) |
| 781 | release_resource(aperture_resource); | 784 | release_resource(aperture_resource); |
| 782 | pci_unregister_driver(&agp_amd64_pci_driver); | 785 | pci_unregister_driver(&agp_amd64_pci_driver); |
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index ecba4942fc8e..f58440791e65 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c | |||
| @@ -39,12 +39,12 @@ | |||
| 39 | struct tpm_inf_dev { | 39 | struct tpm_inf_dev { |
| 40 | int iotype; | 40 | int iotype; |
| 41 | 41 | ||
| 42 | void __iomem *mem_base; /* MMIO ioremap'd addr */ | 42 | void __iomem *mem_base; /* MMIO ioremap'd addr */ |
| 43 | unsigned long map_base; /* phys MMIO base */ | 43 | unsigned long map_base; /* phys MMIO base */ |
| 44 | unsigned long map_size; /* MMIO region size */ | 44 | unsigned long map_size; /* MMIO region size */ |
| 45 | unsigned int index_off; /* index register offset */ | 45 | unsigned int index_off; /* index register offset */ |
| 46 | 46 | ||
| 47 | unsigned int data_regs; /* Data registers */ | 47 | unsigned int data_regs; /* Data registers */ |
| 48 | unsigned int data_size; | 48 | unsigned int data_size; |
| 49 | 49 | ||
| 50 | unsigned int config_port; /* IO Port config index reg */ | 50 | unsigned int config_port; /* IO Port config index reg */ |
| @@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { | |||
| 406 | .miscdev = {.fops = &inf_ops,}, | 406 | .miscdev = {.fops = &inf_ops,}, |
| 407 | }; | 407 | }; |
| 408 | 408 | ||
| 409 | static const struct pnp_device_id tpm_pnp_tbl[] = { | 409 | static const struct pnp_device_id tpm_inf_pnp_tbl[] = { |
| 410 | /* Infineon TPMs */ | 410 | /* Infineon TPMs */ |
| 411 | {"IFX0101", 0}, | 411 | {"IFX0101", 0}, |
| 412 | {"IFX0102", 0}, | 412 | {"IFX0102", 0}, |
| 413 | {"", 0} | 413 | {"", 0} |
| 414 | }; | 414 | }; |
| 415 | 415 | ||
| 416 | MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); | 416 | MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); |
| 417 | 417 | ||
| 418 | static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | 418 | static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, |
| 419 | const struct pnp_device_id *dev_id) | 419 | const struct pnp_device_id *dev_id) |
| @@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
| 430 | if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && | 430 | if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && |
| 431 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { | 431 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { |
| 432 | 432 | ||
| 433 | tpm_dev.iotype = TPM_INF_IO_PORT; | 433 | tpm_dev.iotype = TPM_INF_IO_PORT; |
| 434 | 434 | ||
| 435 | tpm_dev.config_port = pnp_port_start(dev, 0); | 435 | tpm_dev.config_port = pnp_port_start(dev, 0); |
| 436 | tpm_dev.config_size = pnp_port_len(dev, 0); | 436 | tpm_dev.config_size = pnp_port_len(dev, 0); |
| @@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
| 459 | goto err_last; | 459 | goto err_last; |
| 460 | } | 460 | } |
| 461 | } else if (pnp_mem_valid(dev, 0) && | 461 | } else if (pnp_mem_valid(dev, 0) && |
| 462 | !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { | 462 | !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { |
| 463 | 463 | ||
| 464 | tpm_dev.iotype = TPM_INF_IO_MEM; | 464 | tpm_dev.iotype = TPM_INF_IO_MEM; |
| 465 | 465 | ||
| 466 | tpm_dev.map_base = pnp_mem_start(dev, 0); | 466 | tpm_dev.map_base = pnp_mem_start(dev, 0); |
| 467 | tpm_dev.map_size = pnp_mem_len(dev, 0); | 467 | tpm_dev.map_size = pnp_mem_len(dev, 0); |
| @@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
| 563 | "product id 0x%02x%02x" | 563 | "product id 0x%02x%02x" |
| 564 | "%s\n", | 564 | "%s\n", |
| 565 | tpm_dev.iotype == TPM_INF_IO_PORT ? | 565 | tpm_dev.iotype == TPM_INF_IO_PORT ? |
| 566 | tpm_dev.config_port : | 566 | tpm_dev.config_port : |
| 567 | tpm_dev.map_base + tpm_dev.index_off, | 567 | tpm_dev.map_base + tpm_dev.index_off, |
| 568 | tpm_dev.iotype == TPM_INF_IO_PORT ? | 568 | tpm_dev.iotype == TPM_INF_IO_PORT ? |
| 569 | tpm_dev.data_regs : | 569 | tpm_dev.data_regs : |
| 570 | tpm_dev.map_base + tpm_dev.data_regs, | 570 | tpm_dev.map_base + tpm_dev.data_regs, |
| 571 | version[0], version[1], | 571 | version[0], version[1], |
| 572 | vendorid[0], vendorid[1], | 572 | vendorid[0], vendorid[1], |
| 573 | productid[0], productid[1], chipname); | 573 | productid[0], productid[1], chipname); |
| @@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) | |||
| 607 | iounmap(tpm_dev.mem_base); | 607 | iounmap(tpm_dev.mem_base); |
| 608 | release_mem_region(tpm_dev.map_base, tpm_dev.map_size); | 608 | release_mem_region(tpm_dev.map_base, tpm_dev.map_size); |
| 609 | } | 609 | } |
| 610 | tpm_dev_vendor_release(chip); | ||
| 610 | tpm_remove_hardware(chip->dev); | 611 | tpm_remove_hardware(chip->dev); |
| 611 | } | 612 | } |
| 612 | } | 613 | } |
| 613 | 614 | ||
| 615 | static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) | ||
| 616 | { | ||
| 617 | struct tpm_chip *chip = pnp_get_drvdata(dev); | ||
| 618 | int rc; | ||
| 619 | if (chip) { | ||
| 620 | u8 savestate[] = { | ||
| 621 | 0, 193, /* TPM_TAG_RQU_COMMAND */ | ||
| 622 | 0, 0, 0, 10, /* blob length (in bytes) */ | ||
| 623 | 0, 0, 0, 152 /* TPM_ORD_SaveState */ | ||
| 624 | }; | ||
| 625 | dev_info(&dev->dev, "saving TPM state\n"); | ||
| 626 | rc = tpm_inf_send(chip, savestate, sizeof(savestate)); | ||
| 627 | if (rc < 0) { | ||
| 628 | dev_err(&dev->dev, "error while saving TPM state\n"); | ||
| 629 | return rc; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | return 0; | ||
| 633 | } | ||
| 634 | |||
| 635 | static int tpm_inf_pnp_resume(struct pnp_dev *dev) | ||
| 636 | { | ||
| 637 | /* Re-configure TPM after suspending */ | ||
| 638 | tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
| 639 | tpm_config_out(IOLIMH, TPM_INF_ADDR); | ||
| 640 | tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); | ||
| 641 | tpm_config_out(IOLIML, TPM_INF_ADDR); | ||
| 642 | tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); | ||
| 643 | /* activate register */ | ||
| 644 | tpm_config_out(TPM_DAR, TPM_INF_ADDR); | ||
| 645 | tpm_config_out(0x01, TPM_INF_DATA); | ||
| 646 | tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
| 647 | /* disable RESET, LP and IRQC */ | ||
| 648 | tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); | ||
| 649 | return tpm_pm_resume(&dev->dev); | ||
| 650 | } | ||
| 651 | |||
| 614 | static struct pnp_driver tpm_inf_pnp_driver = { | 652 | static struct pnp_driver tpm_inf_pnp_driver = { |
| 615 | .name = "tpm_inf_pnp", | 653 | .name = "tpm_inf_pnp", |
| 616 | .driver = { | 654 | .id_table = tpm_inf_pnp_tbl, |
| 617 | .owner = THIS_MODULE, | ||
| 618 | .suspend = tpm_pm_suspend, | ||
| 619 | .resume = tpm_pm_resume, | ||
| 620 | }, | ||
| 621 | .id_table = tpm_pnp_tbl, | ||
| 622 | .probe = tpm_inf_pnp_probe, | 655 | .probe = tpm_inf_pnp_probe, |
| 623 | .remove = __devexit_p(tpm_inf_pnp_remove), | 656 | .suspend = tpm_inf_pnp_suspend, |
| 657 | .resume = tpm_inf_pnp_resume, | ||
| 658 | .remove = __devexit_p(tpm_inf_pnp_remove) | ||
| 624 | }; | 659 | }; |
| 625 | 660 | ||
| 626 | static int __init init_inf(void) | 661 | static int __init init_inf(void) |
| @@ -638,5 +673,5 @@ module_exit(cleanup_inf); | |||
| 638 | 673 | ||
| 639 | MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); | 674 | MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); |
| 640 | MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); | 675 | MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); |
| 641 | MODULE_VERSION("1.9"); | 676 | MODULE_VERSION("1.9.2"); |
| 642 | MODULE_LICENSE("GPL"); | 677 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c6f3b48be9dd..dcb9083ecde0 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
| @@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on) | |||
| 1951 | pid = task_pid(current); | 1951 | pid = task_pid(current); |
| 1952 | type = PIDTYPE_PID; | 1952 | type = PIDTYPE_PID; |
| 1953 | } | 1953 | } |
| 1954 | retval = __f_setown(filp, pid, type, 0); | 1954 | get_pid(pid); |
| 1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | 1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
| 1956 | retval = __f_setown(filp, pid, type, 0); | ||
| 1957 | put_pid(pid); | ||
| 1956 | if (retval) | 1958 | if (retval) |
| 1957 | goto out; | 1959 | goto out; |
| 1958 | } else { | 1960 | } else { |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b34ade2332b..bd444dc93cf2 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
| 554 | (dbs_tuners_ins.up_threshold - | 554 | (dbs_tuners_ins.up_threshold - |
| 555 | dbs_tuners_ins.down_differential); | 555 | dbs_tuners_ins.down_differential); |
| 556 | 556 | ||
| 557 | if (freq_next < policy->min) | ||
| 558 | freq_next = policy->min; | ||
| 559 | |||
| 557 | if (!dbs_tuners_ins.powersave_bias) { | 560 | if (!dbs_tuners_ins.powersave_bias) { |
| 558 | __cpufreq_driver_target(policy, freq_next, | 561 | __cpufreq_driver_target(policy, freq_next, |
| 559 | CPUFREQ_RELATION_L); | 562 | CPUFREQ_RELATION_L); |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index b5f2ee0f8e2c..64a937262a40 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data) | |||
| 613 | cohd_fin->pending_irqs--; | 613 | cohd_fin->pending_irqs--; |
| 614 | cohc->completed = cohd_fin->desc.cookie; | 614 | cohc->completed = cohd_fin->desc.cookie; |
| 615 | 615 | ||
| 616 | BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); | ||
| 617 | |||
| 618 | if (cohc->nbr_active_done == 0) | 616 | if (cohc->nbr_active_done == 0) |
| 619 | return; | 617 | return; |
| 620 | 618 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6f51a0a7a8bb..e7a3230fb7d5 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device) | |||
| 826 | chan->dev->chan = NULL; | 826 | chan->dev->chan = NULL; |
| 827 | mutex_unlock(&dma_list_mutex); | 827 | mutex_unlock(&dma_list_mutex); |
| 828 | device_unregister(&chan->dev->device); | 828 | device_unregister(&chan->dev->device); |
| 829 | free_percpu(chan->local); | ||
| 829 | } | 830 | } |
| 830 | } | 831 | } |
| 831 | EXPORT_SYMBOL(dma_async_device_unregister); | 832 | EXPORT_SYMBOL(dma_async_device_unregister); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 8b905161fbf4..948d563941c9 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -467,7 +467,7 @@ err_srcs: | |||
| 467 | 467 | ||
| 468 | if (iterations > 0) | 468 | if (iterations > 0) |
| 469 | while (!kthread_should_stop()) { | 469 | while (!kthread_should_stop()) { |
| 470 | DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); | 470 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
| 471 | interruptible_sleep_on(&wait_dmatest_exit); | 471 | interruptible_sleep_on(&wait_dmatest_exit); |
| 472 | } | 472 | } |
| 473 | 473 | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5f7a500e18d0..5cc37afe2bc1 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) | |||
| 249 | if (is_ioat_active(status) || is_ioat_idle(status)) | 249 | if (is_ioat_active(status) || is_ioat_idle(status)) |
| 250 | ioat_suspend(chan); | 250 | ioat_suspend(chan); |
| 251 | while (is_ioat_active(status) || is_ioat_idle(status)) { | 251 | while (is_ioat_active(status) || is_ioat_idle(status)) { |
| 252 | if (end && time_after(jiffies, end)) { | 252 | if (tmo && time_after(jiffies, end)) { |
| 253 | err = -ETIMEDOUT; | 253 | err = -ETIMEDOUT; |
| 254 | break; | 254 | break; |
| 255 | } | 255 | } |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 9a5bc1a7389e..e80bae1673fa 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
| @@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) | |||
| 761 | * @buffer_n: buffer number to update. | 761 | * @buffer_n: buffer number to update. |
| 762 | * 0 or 1 are the only valid values. | 762 | * 0 or 1 are the only valid values. |
| 763 | * @phyaddr: buffer physical address. | 763 | * @phyaddr: buffer physical address. |
| 764 | * @return: Returns 0 on success or negative error code on failure. This | ||
| 765 | * function will fail if the buffer is set to ready. | ||
| 766 | */ | 764 | */ |
| 767 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ | 765 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ |
| 768 | static int ipu_update_channel_buffer(struct idmac_channel *ichan, | 766 | static void ipu_update_channel_buffer(struct idmac_channel *ichan, |
| 769 | int buffer_n, dma_addr_t phyaddr) | 767 | int buffer_n, dma_addr_t phyaddr) |
| 770 | { | 768 | { |
| 771 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 769 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
| 772 | uint32_t reg; | 770 | uint32_t reg; |
| @@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, | |||
| 806 | } | 804 | } |
| 807 | 805 | ||
| 808 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 806 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
| 809 | |||
| 810 | return 0; | ||
| 811 | } | 807 | } |
| 812 | 808 | ||
| 813 | /* Called under spin_lock_irqsave(&ichan->lock) */ | 809 | /* Called under spin_lock_irqsave(&ichan->lock) */ |
| @@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, | |||
| 816 | { | 812 | { |
| 817 | unsigned int chan_id = ichan->dma_chan.chan_id; | 813 | unsigned int chan_id = ichan->dma_chan.chan_id; |
| 818 | struct device *dev = &ichan->dma_chan.dev->device; | 814 | struct device *dev = &ichan->dma_chan.dev->device; |
| 819 | int ret; | ||
| 820 | 815 | ||
| 821 | if (async_tx_test_ack(&desc->txd)) | 816 | if (async_tx_test_ack(&desc->txd)) |
| 822 | return -EINTR; | 817 | return -EINTR; |
| @@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, | |||
| 827 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but | 822 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but |
| 828 | * doing it again shouldn't hurt either. | 823 | * doing it again shouldn't hurt either. |
| 829 | */ | 824 | */ |
| 830 | ret = ipu_update_channel_buffer(ichan, buf_idx, | 825 | ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); |
| 831 | sg_dma_address(sg)); | ||
| 832 | |||
| 833 | if (ret < 0) { | ||
| 834 | dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", | ||
| 835 | sg, chan_id, buf_idx); | ||
| 836 | return ret; | ||
| 837 | } | ||
| 838 | 826 | ||
| 839 | ipu_select_buffer(chan_id, buf_idx); | 827 | ipu_select_buffer(chan_id, buf_idx); |
| 840 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", | 828 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", |
| @@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
| 1379 | 1367 | ||
| 1380 | if (likely(sgnew) && | 1368 | if (likely(sgnew) && |
| 1381 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { | 1369 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { |
| 1382 | callback = desc->txd.callback; | 1370 | callback = descnew->txd.callback; |
| 1383 | callback_param = desc->txd.callback_param; | 1371 | callback_param = descnew->txd.callback_param; |
| 1384 | spin_unlock(&ichan->lock); | 1372 | spin_unlock(&ichan->lock); |
| 1385 | callback(callback_param); | 1373 | if (callback) |
| 1374 | callback(callback_param); | ||
| 1386 | spin_lock(&ichan->lock); | 1375 | spin_lock(&ichan->lock); |
| 1387 | } | 1376 | } |
| 1388 | 1377 | ||
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index cf27402af97b..ecd5928d7110 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
| @@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
| 804 | end <<= (24 - PAGE_SHIFT); | 804 | end <<= (24 - PAGE_SHIFT); |
| 805 | end |= (1 << (24 - PAGE_SHIFT)) - 1; | 805 | end |= (1 << (24 - PAGE_SHIFT)) - 1; |
| 806 | 806 | ||
| 807 | csrow->first_page = start >> PAGE_SHIFT; | 807 | csrow->first_page = start; |
| 808 | csrow->last_page = end >> PAGE_SHIFT; | 808 | csrow->last_page = end; |
| 809 | csrow->nr_pages = end + 1 - start; | 809 | csrow->nr_pages = end + 1 - start; |
| 810 | csrow->grain = 8; | 810 | csrow->grain = 8; |
| 811 | csrow->mtype = mtype; | 811 | csrow->mtype = mtype; |
| @@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, | |||
| 892 | 892 | ||
| 893 | mpc85xx_init_csrows(mci); | 893 | mpc85xx_init_csrows(mci); |
| 894 | 894 | ||
| 895 | #ifdef CONFIG_EDAC_DEBUG | ||
| 896 | edac_mc_register_mcidev_debug((struct attribute **)debug_attr); | ||
| 897 | #endif | ||
| 898 | |||
| 899 | /* store the original error disable bits */ | 895 | /* store the original error disable bits */ |
| 900 | orig_ddr_err_disable = | 896 | orig_ddr_err_disable = |
| 901 | in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); | 897 | in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index a1fce68e3bbe..17be051b7aa3 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c | |||
| @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
| 113 | 113 | ||
| 114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { | 114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { |
| 115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", | 115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", |
| 116 | gart_info->table_mask); | 116 | (unsigned long long)gart_info->table_mask); |
| 117 | ret = 1; | 117 | ret = 1; |
| 118 | goto done; | 118 | goto done; |
| 119 | } | 119 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d88965852a..ecac882e1d54 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { | |||
| 120 | 120 | ||
| 121 | const static struct intel_device_info intel_pineview_info = { | 121 | const static struct intel_device_info intel_pineview_info = { |
| 122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
| 123 | .has_pipe_cxsr = 1, | 123 | .need_gfx_hws = 1, |
| 124 | .has_hotplug = 1, | 124 | .has_hotplug = 1, |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dda787aafcc6..b4c8c0230689 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | |||
| 3564 | uint32_t reloc_count = 0, i; | 3564 | uint32_t reloc_count = 0, i; |
| 3565 | int ret = 0; | 3565 | int ret = 0; |
| 3566 | 3566 | ||
| 3567 | if (relocs == NULL) | ||
| 3568 | return 0; | ||
| 3569 | |||
| 3567 | for (i = 0; i < buffer_count; i++) { | 3570 | for (i = 0; i < buffer_count; i++) { |
| 3568 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3571 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
| 3569 | int unwritten; | 3572 | int unwritten; |
| @@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3653 | struct drm_gem_object *batch_obj; | 3656 | struct drm_gem_object *batch_obj; |
| 3654 | struct drm_i915_gem_object *obj_priv; | 3657 | struct drm_i915_gem_object *obj_priv; |
| 3655 | struct drm_clip_rect *cliprects = NULL; | 3658 | struct drm_clip_rect *cliprects = NULL; |
| 3656 | struct drm_i915_gem_relocation_entry *relocs; | 3659 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
| 3657 | int ret = 0, ret2, i, pinned = 0; | 3660 | int ret = 0, ret2, i, pinned = 0; |
| 3658 | uint64_t exec_offset; | 3661 | uint64_t exec_offset; |
| 3659 | uint32_t seqno, flush_domains, reloc_index; | 3662 | uint32_t seqno, flush_domains, reloc_index; |
| @@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3722 | if (object_list[i] == NULL) { | 3725 | if (object_list[i] == NULL) { |
| 3723 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3726 | DRM_ERROR("Invalid object handle %d at index %d\n", |
| 3724 | exec_list[i].handle, i); | 3727 | exec_list[i].handle, i); |
| 3728 | /* prevent error path from reading uninitialized data */ | ||
| 3729 | args->buffer_count = i + 1; | ||
| 3725 | ret = -EBADF; | 3730 | ret = -EBADF; |
| 3726 | goto err; | 3731 | goto err; |
| 3727 | } | 3732 | } |
| @@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3730 | if (obj_priv->in_execbuffer) { | 3735 | if (obj_priv->in_execbuffer) { |
| 3731 | DRM_ERROR("Object %p appears more than once in object list\n", | 3736 | DRM_ERROR("Object %p appears more than once in object list\n", |
| 3732 | object_list[i]); | 3737 | object_list[i]); |
| 3738 | /* prevent error path from reading uninitialized data */ | ||
| 3739 | args->buffer_count = i + 1; | ||
| 3733 | ret = -EBADF; | 3740 | ret = -EBADF; |
| 3734 | goto err; | 3741 | goto err; |
| 3735 | } | 3742 | } |
| @@ -3926,6 +3933,7 @@ err: | |||
| 3926 | 3933 | ||
| 3927 | mutex_unlock(&dev->struct_mutex); | 3934 | mutex_unlock(&dev->struct_mutex); |
| 3928 | 3935 | ||
| 3936 | pre_mutex_err: | ||
| 3929 | /* Copy the updated relocations out regardless of current error | 3937 | /* Copy the updated relocations out regardless of current error |
| 3930 | * state. Failure to update the relocs would mean that the next | 3938 | * state. Failure to update the relocs would mean that the next |
| 3931 | * time userland calls execbuf, it would do so with presumed offset | 3939 | * time userland calls execbuf, it would do so with presumed offset |
| @@ -3940,7 +3948,6 @@ err: | |||
| 3940 | ret = ret2; | 3948 | ret = ret2; |
| 3941 | } | 3949 | } |
| 3942 | 3950 | ||
| 3943 | pre_mutex_err: | ||
| 3944 | drm_free_large(object_list); | 3951 | drm_free_large(object_list); |
| 3945 | kfree(cliprects); | 3952 | kfree(cliprects); |
| 3946 | 3953 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a3e6fb..50ddf4a95c5e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 309 | if (de_iir & DE_GSE) | 309 | if (de_iir & DE_GSE) |
| 310 | ironlake_opregion_gse_intr(dev); | 310 | ironlake_opregion_gse_intr(dev); |
| 311 | 311 | ||
| 312 | if (de_iir & DE_PLANEA_FLIP_DONE) | ||
| 313 | intel_prepare_page_flip(dev, 0); | ||
| 314 | |||
| 315 | if (de_iir & DE_PLANEB_FLIP_DONE) | ||
| 316 | intel_prepare_page_flip(dev, 1); | ||
| 317 | |||
| 318 | if (de_iir & DE_PIPEA_VBLANK) { | ||
| 319 | drm_handle_vblank(dev, 0); | ||
| 320 | intel_finish_page_flip(dev, 0); | ||
| 321 | } | ||
| 322 | |||
| 323 | if (de_iir & DE_PIPEB_VBLANK) { | ||
| 324 | drm_handle_vblank(dev, 1); | ||
| 325 | intel_finish_page_flip(dev, 1); | ||
| 326 | } | ||
| 327 | |||
| 312 | /* check event from PCH */ | 328 | /* check event from PCH */ |
| 313 | if ((de_iir & DE_PCH_EVENT) && | 329 | if ((de_iir & DE_PCH_EVENT) && |
| 314 | (pch_iir & SDE_HOTPLUG_MASK)) { | 330 | (pch_iir & SDE_HOTPLUG_MASK)) { |
| @@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
| 844 | if (!(pipeconf & PIPEACONF_ENABLE)) | 860 | if (!(pipeconf & PIPEACONF_ENABLE)) |
| 845 | return -EINVAL; | 861 | return -EINVAL; |
| 846 | 862 | ||
| 847 | if (IS_IRONLAKE(dev)) | ||
| 848 | return 0; | ||
| 849 | |||
| 850 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 863 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
| 851 | if (IS_I965G(dev)) | 864 | if (IS_IRONLAKE(dev)) |
| 865 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
| 866 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
| 867 | else if (IS_I965G(dev)) | ||
| 852 | i915_enable_pipestat(dev_priv, pipe, | 868 | i915_enable_pipestat(dev_priv, pipe, |
| 853 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 869 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 854 | else | 870 | else |
| @@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
| 866 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 882 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 867 | unsigned long irqflags; | 883 | unsigned long irqflags; |
| 868 | 884 | ||
| 869 | if (IS_IRONLAKE(dev)) | ||
| 870 | return; | ||
| 871 | |||
| 872 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 885 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
| 873 | i915_disable_pipestat(dev_priv, pipe, | 886 | if (IS_IRONLAKE(dev)) |
| 874 | PIPE_VBLANK_INTERRUPT_ENABLE | | 887 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
| 875 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 888 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
| 889 | else | ||
| 890 | i915_disable_pipestat(dev_priv, pipe, | ||
| 891 | PIPE_VBLANK_INTERRUPT_ENABLE | | ||
| 892 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
| 876 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 893 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
| 877 | } | 894 | } |
| 878 | 895 | ||
| @@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 1015 | { | 1032 | { |
| 1016 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1033 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1017 | /* enable kind of interrupts always enabled */ | 1034 | /* enable kind of interrupts always enabled */ |
| 1018 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; | 1035 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 1036 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | ||
| 1019 | u32 render_mask = GT_USER_INTERRUPT; | 1037 | u32 render_mask = GT_USER_INTERRUPT; |
| 1020 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1038 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
| 1021 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1039 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
| 1022 | 1040 | ||
| 1023 | dev_priv->irq_mask_reg = ~display_mask; | 1041 | dev_priv->irq_mask_reg = ~display_mask; |
| 1024 | dev_priv->de_irq_enable_reg = display_mask; | 1042 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
| 1025 | 1043 | ||
| 1026 | /* should always can generate irq */ | 1044 | /* should always can generate irq */ |
| 1027 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1045 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc871edfe..79dd4026586f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
| 157 | adpa = I915_READ(PCH_ADPA); | 157 | adpa = I915_READ(PCH_ADPA); |
| 158 | 158 | ||
| 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
| 160 | /* disable HPD first */ | ||
| 161 | I915_WRITE(PCH_ADPA, adpa); | ||
| 162 | (void)I915_READ(PCH_ADPA); | ||
| 160 | 163 | ||
| 161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
| 162 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 165 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78ef4a92..12775df1bbfd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1638 | case DRM_MODE_DPMS_OFF: | 1638 | case DRM_MODE_DPMS_OFF: |
| 1639 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 1639 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
| 1640 | 1640 | ||
| 1641 | drm_vblank_off(dev, pipe); | ||
| 1641 | /* Disable display plane */ | 1642 | /* Disable display plane */ |
| 1642 | temp = I915_READ(dspcntr_reg); | 1643 | temp = I915_READ(dspcntr_reg); |
| 1643 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 1644 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
| @@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2519 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2520 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
| 2520 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2521 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
| 2521 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2522 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
| 2523 | } else { | ||
| 2524 | /* Turn off self refresh if both pipes are enabled */ | ||
| 2525 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
| 2526 | & ~FW_BLC_SELF_EN); | ||
| 2522 | } | 2527 | } |
| 2523 | 2528 | ||
| 2524 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 2529 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", |
| @@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2562 | srwm = 1; | 2567 | srwm = 1; |
| 2563 | srwm &= 0x3f; | 2568 | srwm &= 0x3f; |
| 2564 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2569 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
| 2570 | } else { | ||
| 2571 | /* Turn off self refresh if both pipes are enabled */ | ||
| 2572 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
| 2573 | & ~FW_BLC_SELF_EN); | ||
| 2565 | } | 2574 | } |
| 2566 | 2575 | ||
| 2567 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2576 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
| @@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2630 | if (srwm < 0) | 2639 | if (srwm < 0) |
| 2631 | srwm = 1; | 2640 | srwm = 1; |
| 2632 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2641 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); |
| 2642 | } else { | ||
| 2643 | /* Turn off self refresh if both pipes are enabled */ | ||
| 2644 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
| 2645 | & ~FW_BLC_SELF_EN); | ||
| 2633 | } | 2646 | } |
| 2634 | 2647 | ||
| 2635 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2648 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
| @@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 3984 | spin_lock_irqsave(&dev->event_lock, flags); | 3997 | spin_lock_irqsave(&dev->event_lock, flags); |
| 3985 | work = intel_crtc->unpin_work; | 3998 | work = intel_crtc->unpin_work; |
| 3986 | if (work == NULL || !work->pending) { | 3999 | if (work == NULL || !work->pending) { |
| 4000 | if (work && !work->pending) { | ||
| 4001 | obj_priv = work->obj->driver_private; | ||
| 4002 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
| 4003 | obj_priv, | ||
| 4004 | atomic_read(&obj_priv->pending_flip)); | ||
| 4005 | } | ||
| 3987 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4006 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 3988 | return; | 4007 | return; |
| 3989 | } | 4008 | } |
| @@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4005 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4024 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4006 | 4025 | ||
| 4007 | obj_priv = work->obj->driver_private; | 4026 | obj_priv = work->obj->driver_private; |
| 4008 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | 4027 | |
| 4028 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
| 4029 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
| 4030 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
| 4009 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | 4031 | DRM_WAKEUP(&dev_priv->pending_flip_queue); |
| 4010 | schedule_work(&work->work); | 4032 | schedule_work(&work->work); |
| 4011 | } | 4033 | } |
| @@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
| 4018 | unsigned long flags; | 4040 | unsigned long flags; |
| 4019 | 4041 | ||
| 4020 | spin_lock_irqsave(&dev->event_lock, flags); | 4042 | spin_lock_irqsave(&dev->event_lock, flags); |
| 4021 | if (intel_crtc->unpin_work) | 4043 | if (intel_crtc->unpin_work) { |
| 4022 | intel_crtc->unpin_work->pending = 1; | 4044 | intel_crtc->unpin_work->pending = 1; |
| 4045 | } else { | ||
| 4046 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | ||
| 4047 | } | ||
| 4023 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4048 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4024 | } | 4049 | } |
| 4025 | 4050 | ||
| @@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4053 | /* We borrow the event spin lock for protecting unpin_work */ | 4078 | /* We borrow the event spin lock for protecting unpin_work */ |
| 4054 | spin_lock_irqsave(&dev->event_lock, flags); | 4079 | spin_lock_irqsave(&dev->event_lock, flags); |
| 4055 | if (intel_crtc->unpin_work) { | 4080 | if (intel_crtc->unpin_work) { |
| 4081 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
| 4056 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4082 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4057 | kfree(work); | 4083 | kfree(work); |
| 4058 | mutex_unlock(&dev->struct_mutex); | 4084 | mutex_unlock(&dev->struct_mutex); |
| @@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4066 | 4092 | ||
| 4067 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4093 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
| 4068 | if (ret != 0) { | 4094 | if (ret != 0) { |
| 4095 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
| 4096 | obj->driver_private); | ||
| 4069 | kfree(work); | 4097 | kfree(work); |
| 4098 | intel_crtc->unpin_work = NULL; | ||
| 4070 | mutex_unlock(&dev->struct_mutex); | 4099 | mutex_unlock(&dev->struct_mutex); |
| 4071 | return ret; | 4100 | return ret; |
| 4072 | } | 4101 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59bec61..b1d0acbae4e4 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
| 611 | { | 611 | { |
| 612 | .ident = "Samsung SX20S", | 612 | .ident = "Samsung SX20S", |
| 613 | .matches = { | 613 | .matches = { |
| 614 | DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), | 614 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), |
| 615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | 615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), |
| 616 | }, | 616 | }, |
| 617 | }, | 617 | }, |
| @@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
| 623 | }, | 623 | }, |
| 624 | }, | 624 | }, |
| 625 | { | 625 | { |
| 626 | .ident = "Aspire 1810T", | ||
| 627 | .matches = { | ||
| 628 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
| 629 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
| 630 | }, | ||
| 631 | }, | ||
| 632 | { | ||
| 626 | .ident = "PC-81005", | 633 | .ident = "PC-81005", |
| 627 | .matches = { | 634 | .matches = { |
| 628 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | 635 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), |
| @@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
| 643 | { | 650 | { |
| 644 | enum drm_connector_status status = connector_status_connected; | 651 | enum drm_connector_status status = connector_status_connected; |
| 645 | 652 | ||
| 646 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | 653 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) |
| 647 | status = connector_status_disconnected; | 654 | status = connector_status_disconnected; |
| 648 | 655 | ||
| 649 | return status; | 656 | return status; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0920df..82678d30ab06 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
| 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
| 2347 | (1 << INTEL_ANALOG_CLONE_BIT); | 2347 | (1 << INTEL_ANALOG_CLONE_BIT); |
| 2348 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
| 2349 | |||
| 2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
| 2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
| 2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
| 2353 | sdvo_priv->is_tv = true; | ||
| 2354 | intel_output->needs_tv_clock = true; | ||
| 2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
| 2348 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2356 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
| 2349 | 2357 | ||
| 2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 1cf488247a16..48227e744753 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev) | |||
| 90 | { | 90 | { |
| 91 | int result; | 91 | int result; |
| 92 | 92 | ||
| 93 | if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, | 93 | if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, |
| 94 | &result)) | 94 | &result)) |
| 95 | return -ENODEV; | 95 | return -ENODEV; |
| 96 | 96 | ||
| 97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); | 97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); |
| 98 | 98 | ||
| 99 | if (result & 0x1) { /* Stamina mode - disable the external GPU */ | 99 | if (result) { /* Ensure that the external GPU is enabled */ |
| 100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
| 101 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
| 102 | NULL); | ||
| 103 | } else { /* Stamina mode - disable the external GPU */ | ||
| 100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, | 104 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, |
| 101 | NULL); | 105 | NULL); |
| 102 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, | 106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, |
| 103 | NULL); | 107 | NULL); |
| 104 | } else { /* Ensure that the external GPU is enabled */ | ||
| 105 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
| 106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
| 107 | NULL); | ||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | return 0; | 110 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d7f8d8b4a4b8..2cd0fad17dac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1865 | 1865 | ||
| 1866 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; | 1866 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
| 1867 | 1867 | ||
| 1868 | if (dev_priv->card_type >= NV_50) | 1868 | if (dev_priv->card_type >= NV_40) |
| 1869 | return 1; | 1869 | return 1; |
| 1870 | 1870 | ||
| 1871 | /* | 1871 | /* |
| @@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3765 | */ | 3765 | */ |
| 3766 | 3766 | ||
| 3767 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 3767 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 3768 | struct init_exec iexec = {true, false}; | ||
| 3769 | struct nvbios *bios = &dev_priv->VBIOS; | 3768 | struct nvbios *bios = &dev_priv->VBIOS; |
| 3770 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; | 3769 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; |
| 3771 | uint8_t *otable = NULL; | 3770 | uint8_t *otable = NULL; |
| @@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3845 | } | 3844 | } |
| 3846 | } | 3845 | } |
| 3847 | 3846 | ||
| 3848 | bios->display.output = dcbent; | ||
| 3849 | |||
| 3850 | if (pxclk == 0) { | 3847 | if (pxclk == 0) { |
| 3851 | script = ROM16(otable[6]); | 3848 | script = ROM16(otable[6]); |
| 3852 | if (!script) { | 3849 | if (!script) { |
| @@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3855 | } | 3852 | } |
| 3856 | 3853 | ||
| 3857 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); | 3854 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); |
| 3858 | parse_init_table(bios, script, &iexec); | 3855 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 3859 | } else | 3856 | } else |
| 3860 | if (pxclk == -1) { | 3857 | if (pxclk == -1) { |
| 3861 | script = ROM16(otable[8]); | 3858 | script = ROM16(otable[8]); |
| @@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3865 | } | 3862 | } |
| 3866 | 3863 | ||
| 3867 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); | 3864 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); |
| 3868 | parse_init_table(bios, script, &iexec); | 3865 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 3869 | } else | 3866 | } else |
| 3870 | if (pxclk == -2) { | 3867 | if (pxclk == -2) { |
| 3871 | if (table[4] >= 12) | 3868 | if (table[4] >= 12) |
| @@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3878 | } | 3875 | } |
| 3879 | 3876 | ||
| 3880 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); | 3877 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); |
| 3881 | parse_init_table(bios, script, &iexec); | 3878 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 3882 | } else | 3879 | } else |
| 3883 | if (pxclk > 0) { | 3880 | if (pxclk > 0) { |
| 3884 | script = ROM16(otable[table[4] + i*6 + 2]); | 3881 | script = ROM16(otable[table[4] + i*6 + 2]); |
| @@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3890 | } | 3887 | } |
| 3891 | 3888 | ||
| 3892 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); | 3889 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); |
| 3893 | parse_init_table(bios, script, &iexec); | 3890 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 3894 | } else | 3891 | } else |
| 3895 | if (pxclk < 0) { | 3892 | if (pxclk < 0) { |
| 3896 | script = ROM16(otable[table[4] + i*6 + 4]); | 3893 | script = ROM16(otable[table[4] + i*6 + 4]); |
| @@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3902 | } | 3899 | } |
| 3903 | 3900 | ||
| 3904 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); | 3901 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); |
| 3905 | parse_init_table(bios, script, &iexec); | 3902 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 3906 | } | 3903 | } |
| 3907 | 3904 | ||
| 3908 | return 0; | 3905 | return 0; |
| @@ -5864,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
| 5864 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 5865 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; |
| 5866 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; |
| 5864 | unsigned long flags; | ||
| 5867 | 5865 | ||
| 5866 | spin_lock_irqsave(&bios->lock, flags); | ||
| 5868 | bios->display.output = dcbent; | 5867 | bios->display.output = dcbent; |
| 5869 | parse_init_table(bios, table, &iexec); | 5868 | parse_init_table(bios, table, &iexec); |
| 5870 | bios->display.output = NULL; | 5869 | bios->display.output = NULL; |
| 5870 | spin_unlock_irqrestore(&bios->lock, flags); | ||
| 5871 | } | 5871 | } |
| 5872 | 5872 | ||
| 5873 | static bool NVInitVBIOS(struct drm_device *dev) | 5873 | static bool NVInitVBIOS(struct drm_device *dev) |
| @@ -5876,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
| 5876 | struct nvbios *bios = &dev_priv->VBIOS; | 5876 | struct nvbios *bios = &dev_priv->VBIOS; |
| 5877 | 5877 | ||
| 5878 | memset(bios, 0, sizeof(struct nvbios)); | 5878 | memset(bios, 0, sizeof(struct nvbios)); |
| 5879 | spin_lock_init(&bios->lock); | ||
| 5879 | bios->dev = dev; | 5880 | bios->dev = dev; |
| 5880 | 5881 | ||
| 5881 | if (!NVShadowVBIOS(dev, bios->data)) | 5882 | if (!NVShadowVBIOS(dev, bios->data)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 058e98c76d89..68446fd4146b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
| @@ -205,6 +205,8 @@ struct nvbios { | |||
| 205 | struct drm_device *dev; | 205 | struct drm_device *dev; |
| 206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; |
| 207 | 207 | ||
| 208 | spinlock_t lock; | ||
| 209 | |||
| 208 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; |
| 209 | unsigned int length; | 211 | unsigned int length; |
| 210 | bool execute; | 212 | bool execute; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db0ed4c13f98..028719fddf76 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
| 65 | 65 | ||
| 66 | /* | 66 | /* |
| 67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | 67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, |
| 68 | * align to to that as well as the page size. Overallocate memory to | 68 | * align to to that as well as the page size. Align the size to the |
| 69 | * avoid corruption of other buffer objects. | 69 | * appropriate boundaries. This does imply that sizes are rounded up |
| 70 | * 3-7 pages, so be aware of this and do not waste memory by allocating | ||
| 71 | * many small buffers. | ||
| 70 | */ | 72 | */ |
| 71 | if (dev_priv->card_type == NV_50) { | 73 | if (dev_priv->card_type == NV_50) { |
| 72 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | 74 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; |
| @@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
| 77 | case 0x2800: | 79 | case 0x2800: |
| 78 | case 0x4800: | 80 | case 0x4800: |
| 79 | case 0x7a00: | 81 | case 0x7a00: |
| 80 | *size = roundup(*size, block_size); | ||
| 81 | if (is_power_of_2(block_size)) { | 82 | if (is_power_of_2(block_size)) { |
| 82 | *size += 3 * block_size; | ||
| 83 | for (i = 1; i < 10; i++) { | 83 | for (i = 1; i < 10; i++) { |
| 84 | *align = 12 * i * block_size; | 84 | *align = 12 * i * block_size; |
| 85 | if (!(*align % 65536)) | 85 | if (!(*align % 65536)) |
| 86 | break; | 86 | break; |
| 87 | } | 87 | } |
| 88 | } else { | 88 | } else { |
| 89 | *size += 6 * block_size; | ||
| 90 | for (i = 1; i < 10; i++) { | 89 | for (i = 1; i < 10; i++) { |
| 91 | *align = 8 * i * block_size; | 90 | *align = 8 * i * block_size; |
| 92 | if (!(*align % 65536)) | 91 | if (!(*align % 65536)) |
| 93 | break; | 92 | break; |
| 94 | } | 93 | } |
| 95 | } | 94 | } |
| 95 | *size = roundup(*size, *align); | ||
| 96 | break; | 96 | break; |
| 97 | default: | 97 | default: |
| 98 | break; | 98 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 343d718a9667..2281f99da7fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
| @@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
| 278 | /* Ensure the channel is no longer active on the GPU */ | 278 | /* Ensure the channel is no longer active on the GPU */ |
| 279 | pfifo->reassign(dev, false); | 279 | pfifo->reassign(dev, false); |
| 280 | 280 | ||
| 281 | if (pgraph->channel(dev) == chan) { | 281 | pgraph->fifo_access(dev, false); |
| 282 | pgraph->fifo_access(dev, false); | 282 | if (pgraph->channel(dev) == chan) |
| 283 | pgraph->unload_context(dev); | 283 | pgraph->unload_context(dev); |
| 284 | pgraph->fifo_access(dev, true); | ||
| 285 | } | ||
| 286 | pgraph->destroy_context(chan); | 284 | pgraph->destroy_context(chan); |
| 285 | pgraph->fifo_access(dev, true); | ||
| 287 | 286 | ||
| 288 | if (pfifo->channel_id(dev) == chan->id) { | 287 | if (pfifo->channel_id(dev) == chan->id) { |
| 289 | pfifo->disable(dev); | 288 | pfifo->disable(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e6d673f3a23..d2f63353ea97 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) | |||
| 88 | { | 88 | { |
| 89 | struct nouveau_connector *nv_connector = | 89 | struct nouveau_connector *nv_connector = |
| 90 | nouveau_connector(drm_connector); | 90 | nouveau_connector(drm_connector); |
| 91 | struct drm_device *dev = nv_connector->base.dev; | 91 | struct drm_device *dev; |
| 92 | |||
| 93 | NV_DEBUG_KMS(dev, "\n"); | ||
| 94 | 92 | ||
| 95 | if (!nv_connector) | 93 | if (!nv_connector) |
| 96 | return; | 94 | return; |
| 97 | 95 | ||
| 96 | dev = nv_connector->base.dev; | ||
| 97 | NV_DEBUG_KMS(dev, "\n"); | ||
| 98 | |||
| 98 | kfree(nv_connector->edid); | 99 | kfree(nv_connector->edid); |
| 99 | drm_sysfs_connector_remove(drm_connector); | 100 | drm_sysfs_connector_remove(drm_connector); |
| 100 | drm_connector_cleanup(drm_connector); | 101 | drm_connector_cleanup(drm_connector); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index dd4937224220..f954ad93e81f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
| @@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 502 | break; | 502 | break; |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
| 506 | ret = -EREMOTEIO; | ||
| 507 | goto out; | ||
| 508 | } | ||
| 509 | |||
| 510 | if (cmd & 1) { | 505 | if (cmd & 1) { |
| 506 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
| 507 | ret = -EREMOTEIO; | ||
| 508 | goto out; | ||
| 509 | } | ||
| 510 | |||
| 511 | for (i = 0; i < 4; i++) { | 511 | for (i = 0; i < 4; i++) { |
| 512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | 512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); |
| 513 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); | 513 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 343ab7f17ccc..da3b93b84502 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
| @@ -56,7 +56,7 @@ int nouveau_vram_pushbuf; | |||
| 56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | 56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); |
| 57 | 57 | ||
| 58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); | 58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); |
| 59 | int nouveau_vram_notify; | 59 | int nouveau_vram_notify = 1; |
| 60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); | 60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); |
| 61 | 61 | ||
| 62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); | 62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); |
| @@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); | |||
| 75 | int nouveau_ignorelid = 0; | 75 | int nouveau_ignorelid = 0; |
| 76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | 76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); |
| 77 | 77 | ||
| 78 | MODULE_PARM_DESC(noagp, "Disable all acceleration"); | ||
| 79 | int nouveau_noaccel = 0; | ||
| 80 | module_param_named(noaccel, nouveau_noaccel, int, 0400); | ||
| 81 | |||
| 82 | MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); | ||
| 83 | int nouveau_nofbaccel = 0; | ||
| 84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | ||
| 85 | |||
| 78 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 86 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
| 79 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 87 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
| 80 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 88 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6b9690418bc7..5445cefdd03e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -678,6 +678,8 @@ extern int nouveau_reg_debug; | |||
| 678 | extern char *nouveau_vbios; | 678 | extern char *nouveau_vbios; |
| 679 | extern int nouveau_ctxfw; | 679 | extern int nouveau_ctxfw; |
| 680 | extern int nouveau_ignorelid; | 680 | extern int nouveau_ignorelid; |
| 681 | extern int nouveau_nofbaccel; | ||
| 682 | extern int nouveau_noaccel; | ||
| 681 | 683 | ||
| 682 | /* nouveau_state.c */ | 684 | /* nouveau_state.c */ |
| 683 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 685 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0b05c869e0e7..ea879a2efef3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
| 107 | .fb_setcmap = drm_fb_helper_setcmap, | 107 | .fb_setcmap = drm_fb_helper_setcmap, |
| 108 | }; | 108 | }; |
| 109 | 109 | ||
| 110 | static struct fb_ops nv04_fbcon_ops = { | ||
| 111 | .owner = THIS_MODULE, | ||
| 112 | .fb_check_var = drm_fb_helper_check_var, | ||
| 113 | .fb_set_par = drm_fb_helper_set_par, | ||
| 114 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
| 115 | .fb_fillrect = nv04_fbcon_fillrect, | ||
| 116 | .fb_copyarea = nv04_fbcon_copyarea, | ||
| 117 | .fb_imageblit = nv04_fbcon_imageblit, | ||
| 118 | .fb_sync = nouveau_fbcon_sync, | ||
| 119 | .fb_pan_display = drm_fb_helper_pan_display, | ||
| 120 | .fb_blank = drm_fb_helper_blank, | ||
| 121 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 122 | }; | ||
| 123 | |||
| 124 | static struct fb_ops nv50_fbcon_ops = { | ||
| 125 | .owner = THIS_MODULE, | ||
| 126 | .fb_check_var = drm_fb_helper_check_var, | ||
| 127 | .fb_set_par = drm_fb_helper_set_par, | ||
| 128 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
| 129 | .fb_fillrect = nv50_fbcon_fillrect, | ||
| 130 | .fb_copyarea = nv50_fbcon_copyarea, | ||
| 131 | .fb_imageblit = nv50_fbcon_imageblit, | ||
| 132 | .fb_sync = nouveau_fbcon_sync, | ||
| 133 | .fb_pan_display = drm_fb_helper_pan_display, | ||
| 134 | .fb_blank = drm_fb_helper_blank, | ||
| 135 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 136 | }; | ||
| 137 | |||
| 110 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 138 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 111 | u16 blue, int regno) | 139 | u16 blue, int regno) |
| 112 | { | 140 | { |
| @@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
| 267 | dev_priv->fbdev_info = info; | 295 | dev_priv->fbdev_info = info; |
| 268 | 296 | ||
| 269 | strcpy(info->fix.id, "nouveaufb"); | 297 | strcpy(info->fix.id, "nouveaufb"); |
| 270 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | 298 | if (nouveau_nofbaccel) |
| 271 | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; | 299 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; |
| 300 | else | ||
| 301 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | ||
| 302 | FBINFO_HWACCEL_FILLRECT | | ||
| 303 | FBINFO_HWACCEL_IMAGEBLIT; | ||
| 272 | info->fbops = &nouveau_fbcon_ops; | 304 | info->fbops = &nouveau_fbcon_ops; |
| 273 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - | 305 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - |
| 274 | dev_priv->vm_vram_base; | 306 | dev_priv->vm_vram_base; |
| @@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
| 316 | par->nouveau_fb = nouveau_fb; | 348 | par->nouveau_fb = nouveau_fb; |
| 317 | par->dev = dev; | 349 | par->dev = dev; |
| 318 | 350 | ||
| 319 | if (dev_priv->channel) { | 351 | if (dev_priv->channel && !nouveau_nofbaccel) { |
| 320 | switch (dev_priv->card_type) { | 352 | switch (dev_priv->card_type) { |
| 321 | case NV_50: | 353 | case NV_50: |
| 322 | nv50_fbcon_accel_init(info); | 354 | nv50_fbcon_accel_init(info); |
| 355 | info->fbops = &nv50_fbcon_ops; | ||
| 323 | break; | 356 | break; |
| 324 | default: | 357 | default: |
| 325 | nv04_fbcon_accel_init(info); | 358 | nv04_fbcon_accel_init(info); |
| 359 | info->fbops = &nv04_fbcon_ops; | ||
| 326 | break; | 360 | break; |
| 327 | }; | 361 | }; |
| 328 | } | 362 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 462e0b87b4bd..f9c34e1a8c11 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
| @@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); | |||
| 40 | void nouveau_fbcon_restore(void); | 40 | void nouveau_fbcon_restore(void); |
| 41 | void nouveau_fbcon_zfill(struct drm_device *dev); | 41 | void nouveau_fbcon_zfill(struct drm_device *dev); |
| 42 | 42 | ||
| 43 | void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
| 44 | void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
| 45 | void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
| 43 | int nv04_fbcon_accel_init(struct fb_info *info); | 46 | int nv04_fbcon_accel_init(struct fb_info *info); |
| 47 | void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
| 48 | void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
| 49 | void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
| 44 | int nv50_fbcon_accel_init(struct fb_info *info); | 50 | int nv50_fbcon_accel_init(struct fb_info *info); |
| 45 | 51 | ||
| 46 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | 52 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 6ac804b0c9f9..70cc30803e3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | 927 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { |
| 928 | spin_lock(&nvbo->bo.lock); | ||
| 928 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); | 929 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); |
| 930 | spin_unlock(&nvbo->bo.lock); | ||
| 929 | } else { | 931 | } else { |
| 930 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | 932 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); |
| 931 | if (ret == 0) | 933 | if (ret == 0) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 419f4c2b3b89..c7ebec696747 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c | |||
| @@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev) | |||
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | 99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); |
| 100 | if (!pgraph->ctxprog) { | 100 | if (!pgraph->ctxvals) { |
| 101 | NV_ERROR(dev, "OOM copying ctxprog\n"); | 101 | NV_ERROR(dev, "OOM copying ctxvals\n"); |
| 102 | release_firmware(fw); | 102 | release_firmware(fw); |
| 103 | nouveau_grctx_fini(dev); | 103 | nouveau_grctx_fini(dev); |
| 104 | return -ENOMEM; | 104 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 3b9bad66162a..447f9f69d6b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
| @@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
| 211 | get + 4); | 211 | get + 4); |
| 212 | } | 212 | } |
| 213 | 213 | ||
| 214 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
| 215 | uint32_t sem; | ||
| 216 | |||
| 217 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
| 218 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
| 219 | NV_PFIFO_INTR_SEMAPHORE); | ||
| 220 | |||
| 221 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
| 222 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
| 223 | |||
| 224 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
| 225 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
| 226 | } | ||
| 227 | |||
| 214 | if (status) { | 228 | if (status) { |
| 215 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | 229 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", |
| 216 | status, chid); | 230 | status, chid); |
| @@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) | |||
| 566 | static void | 580 | static void |
| 567 | nv50_pgraph_irq_handler(struct drm_device *dev) | 581 | nv50_pgraph_irq_handler(struct drm_device *dev) |
| 568 | { | 582 | { |
| 569 | uint32_t status, nsource; | 583 | uint32_t status; |
| 570 | 584 | ||
| 571 | status = nv_rd32(dev, NV03_PGRAPH_INTR); | 585 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { |
| 572 | nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | 586 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); |
| 573 | 587 | ||
| 574 | if (status & 0x00000001) { | 588 | if (status & 0x00000001) { |
| 575 | nouveau_pgraph_intr_notify(dev, nsource); | 589 | nouveau_pgraph_intr_notify(dev, nsource); |
| 576 | status &= ~0x00000001; | 590 | status &= ~0x00000001; |
| 577 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | 591 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); |
| 578 | } | 592 | } |
| 579 | 593 | ||
| 580 | if (status & 0x00000010) { | 594 | if (status & 0x00000010) { |
| 581 | nouveau_pgraph_intr_error(dev, nsource | | 595 | nouveau_pgraph_intr_error(dev, nsource | |
| 582 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); | 596 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); |
| 583 | 597 | ||
| 584 | status &= ~0x00000010; | 598 | status &= ~0x00000010; |
| 585 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | 599 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); |
| 586 | } | 600 | } |
| 587 | 601 | ||
| 588 | if (status & 0x00001000) { | 602 | if (status & 0x00001000) { |
| 589 | nv_wr32(dev, 0x400500, 0x00000000); | 603 | nv_wr32(dev, 0x400500, 0x00000000); |
| 590 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | 604 | nv_wr32(dev, NV03_PGRAPH_INTR, |
| 591 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, | 605 | NV_PGRAPH_INTR_CONTEXT_SWITCH); |
| 592 | NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | 606 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, |
| 593 | nv_wr32(dev, 0x400500, 0x00010001); | 607 | NV40_PGRAPH_INTR_EN) & |
| 608 | ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
| 609 | nv_wr32(dev, 0x400500, 0x00010001); | ||
| 594 | 610 | ||
| 595 | nv50_graph_context_switch(dev); | 611 | nv50_graph_context_switch(dev); |
| 596 | 612 | ||
| 597 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | 613 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; |
| 598 | } | 614 | } |
| 599 | 615 | ||
| 600 | if (status & 0x00100000) { | 616 | if (status & 0x00100000) { |
| 601 | nouveau_pgraph_intr_error(dev, nsource | | 617 | nouveau_pgraph_intr_error(dev, nsource | |
| 602 | NV03_PGRAPH_NSOURCE_DATA_ERROR); | 618 | NV03_PGRAPH_NSOURCE_DATA_ERROR); |
| 603 | 619 | ||
| 604 | status &= ~0x00100000; | 620 | status &= ~0x00100000; |
| 605 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | 621 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); |
| 606 | } | 622 | } |
| 607 | 623 | ||
| 608 | if (status & 0x00200000) { | 624 | if (status & 0x00200000) { |
| 609 | int r; | 625 | int r; |
| 610 | 626 | ||
| 611 | nouveau_pgraph_intr_error(dev, nsource | | 627 | nouveau_pgraph_intr_error(dev, nsource | |
| 612 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); | 628 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); |
| 613 | 629 | ||
| 614 | NV_ERROR(dev, "magic set 1:\n"); | 630 | NV_ERROR(dev, "magic set 1:\n"); |
| 615 | for (r = 0x408900; r <= 0x408910; r += 4) | 631 | for (r = 0x408900; r <= 0x408910; r += 4) |
| 616 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 632 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
| 617 | nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); | 633 | nv_rd32(dev, r)); |
| 618 | for (r = 0x408e08; r <= 0x408e24; r += 4) | 634 | nv_wr32(dev, 0x408900, |
| 619 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 635 | nv_rd32(dev, 0x408904) | 0xc0000000); |
| 620 | nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); | 636 | for (r = 0x408e08; r <= 0x408e24; r += 4) |
| 621 | 637 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | |
| 622 | NV_ERROR(dev, "magic set 2:\n"); | 638 | nv_rd32(dev, r)); |
| 623 | for (r = 0x409900; r <= 0x409910; r += 4) | 639 | nv_wr32(dev, 0x408e08, |
| 624 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 640 | nv_rd32(dev, 0x408e08) | 0xc0000000); |
| 625 | nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); | 641 | |
| 626 | for (r = 0x409e08; r <= 0x409e24; r += 4) | 642 | NV_ERROR(dev, "magic set 2:\n"); |
| 627 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 643 | for (r = 0x409900; r <= 0x409910; r += 4) |
| 628 | nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); | 644 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
| 629 | 645 | nv_rd32(dev, r)); | |
| 630 | status &= ~0x00200000; | 646 | nv_wr32(dev, 0x409900, |
| 631 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | 647 | nv_rd32(dev, 0x409904) | 0xc0000000); |
| 632 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | 648 | for (r = 0x409e08; r <= 0x409e24; r += 4) |
| 633 | } | 649 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
| 650 | nv_rd32(dev, r)); | ||
| 651 | nv_wr32(dev, 0x409e08, | ||
| 652 | nv_rd32(dev, 0x409e08) | 0xc0000000); | ||
| 653 | |||
| 654 | status &= ~0x00200000; | ||
| 655 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | ||
| 656 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | ||
| 657 | } | ||
| 634 | 658 | ||
| 635 | if (status) { | 659 | if (status) { |
| 636 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); | 660 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", |
| 637 | nv_wr32(dev, NV03_PGRAPH_INTR, status); | 661 | status); |
| 638 | } | 662 | nv_wr32(dev, NV03_PGRAPH_INTR, status); |
| 663 | } | ||
| 639 | 664 | ||
| 640 | { | 665 | { |
| 641 | const int isb = (1 << 16) | (1 << 0); | 666 | const int isb = (1 << 16) | (1 << 0); |
| 642 | 667 | ||
| 643 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | 668 | if ((nv_rd32(dev, 0x400500) & isb) != isb) |
| 644 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); | 669 | nv_wr32(dev, 0x400500, |
| 645 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | 670 | nv_rd32(dev, 0x400500) | isb); |
| 671 | } | ||
| 646 | } | 672 | } |
| 647 | 673 | ||
| 648 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 674 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
| 675 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
| 649 | } | 676 | } |
| 650 | 677 | ||
| 651 | static void | 678 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 6c66a34b6345..d99dc087f9b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
| @@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
| 34 | { | 34 | { |
| 35 | struct drm_device *dev = chan->dev; | 35 | struct drm_device *dev = chan->dev; |
| 36 | struct nouveau_bo *ntfy = NULL; | 36 | struct nouveau_bo *ntfy = NULL; |
| 37 | uint32_t flags; | ||
| 37 | int ret; | 38 | int ret; |
| 38 | 39 | ||
| 39 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? | 40 | if (nouveau_vram_notify) |
| 40 | TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, | 41 | flags = TTM_PL_FLAG_VRAM; |
| 42 | else | ||
| 43 | flags = TTM_PL_FLAG_TT; | ||
| 44 | |||
| 45 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, | ||
| 41 | 0, 0x0000, false, true, &ntfy); | 46 | 0, 0x0000, false, true, &ntfy); |
| 42 | if (ret) | 47 | if (ret) |
| 43 | return ret; | 48 | return ret; |
| 44 | 49 | ||
| 45 | ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); | 50 | ret = nouveau_bo_pin(ntfy, flags); |
| 46 | if (ret) | 51 | if (ret) |
| 47 | goto out_err; | 52 | goto out_err; |
| 48 | 53 | ||
| @@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
| 128 | target = NV_DMA_TARGET_PCI; | 133 | target = NV_DMA_TARGET_PCI; |
| 129 | } else { | 134 | } else { |
| 130 | target = NV_DMA_TARGET_AGP; | 135 | target = NV_DMA_TARGET_AGP; |
| 136 | if (dev_priv->card_type >= NV_50) | ||
| 137 | offset += dev_priv->vm_gart_base; | ||
| 131 | } | 138 | } |
| 132 | } else { | 139 | } else { |
| 133 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", | 140 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6c2cf81716df..e7c100ba63a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
| @@ -885,11 +885,12 @@ int | |||
| 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
| 886 | struct nouveau_gpuobj **gpuobj_ret) | 886 | struct nouveau_gpuobj **gpuobj_ret) |
| 887 | { | 887 | { |
| 888 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 888 | struct drm_nouveau_private *dev_priv; |
| 889 | struct nouveau_gpuobj *gpuobj; | 889 | struct nouveau_gpuobj *gpuobj; |
| 890 | 890 | ||
| 891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | 891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) |
| 892 | return -EINVAL; | 892 | return -EINVAL; |
| 893 | dev_priv = chan->dev->dev_private; | ||
| 893 | 894 | ||
| 894 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 895 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
| 895 | if (!gpuobj) | 896 | if (!gpuobj) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 251f1b3b38b9..aa9b310e41be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
| @@ -99,6 +99,7 @@ | |||
| 99 | * the card will hang early on in the X init process. | 99 | * the card will hang early on in the X init process. |
| 100 | */ | 100 | */ |
| 101 | # define NV_PMC_ENABLE_UNK13 (1<<13) | 101 | # define NV_PMC_ENABLE_UNK13 (1<<13) |
| 102 | #define NV40_PMC_GRAPH_UNITS 0x00001540 | ||
| 102 | #define NV40_PMC_BACKLIGHT 0x000015f0 | 103 | #define NV40_PMC_BACKLIGHT 0x000015f0 |
| 103 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 | 104 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 |
| 104 | #define NV40_PMC_1700 0x00001700 | 105 | #define NV40_PMC_1700 0x00001700 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4c7f1e403e80..ed1590577b6c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -54,11 +54,12 @@ static void | |||
| 54 | nouveau_sgdma_clear(struct ttm_backend *be) | 54 | nouveau_sgdma_clear(struct ttm_backend *be) |
| 55 | { | 55 | { |
| 56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 57 | struct drm_device *dev = nvbe->dev; | 57 | struct drm_device *dev; |
| 58 | |||
| 59 | NV_DEBUG(nvbe->dev, "\n"); | ||
| 60 | 58 | ||
| 61 | if (nvbe && nvbe->pages) { | 59 | if (nvbe && nvbe->pages) { |
| 60 | dev = nvbe->dev; | ||
| 61 | NV_DEBUG(dev, "\n"); | ||
| 62 | |||
| 62 | if (nvbe->bound) | 63 | if (nvbe->bound) |
| 63 | be->func->unbind(be); | 64 | be->func->unbind(be); |
| 64 | 65 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f2d0187ba152..a4851af5b05e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 310 | static unsigned int | 310 | static unsigned int |
| 311 | nouveau_vga_set_decode(void *priv, bool state) | 311 | nouveau_vga_set_decode(void *priv, bool state) |
| 312 | { | 312 | { |
| 313 | struct drm_device *dev = priv; | ||
| 314 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 315 | |||
| 316 | if (dev_priv->chipset >= 0x40) | ||
| 317 | nv_wr32(dev, 0x88054, state); | ||
| 318 | else | ||
| 319 | nv_wr32(dev, 0x1854, state); | ||
| 320 | |||
| 313 | if (state) | 321 | if (state) |
| 314 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 322 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
| 315 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 323 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| @@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev) | |||
| 427 | if (ret) | 435 | if (ret) |
| 428 | goto out_timer; | 436 | goto out_timer; |
| 429 | 437 | ||
| 430 | /* PGRAPH */ | 438 | if (nouveau_noaccel) |
| 431 | ret = engine->graph.init(dev); | 439 | engine->graph.accel_blocked = true; |
| 432 | if (ret) | 440 | else { |
| 433 | goto out_fb; | 441 | /* PGRAPH */ |
| 442 | ret = engine->graph.init(dev); | ||
| 443 | if (ret) | ||
| 444 | goto out_fb; | ||
| 434 | 445 | ||
| 435 | /* PFIFO */ | 446 | /* PFIFO */ |
| 436 | ret = engine->fifo.init(dev); | 447 | ret = engine->fifo.init(dev); |
| 437 | if (ret) | 448 | if (ret) |
| 438 | goto out_graph; | 449 | goto out_graph; |
| 450 | } | ||
| 439 | 451 | ||
| 440 | /* this call irq_preinstall, register irq handler and | 452 | /* this call irq_preinstall, register irq handler and |
| 441 | * call irq_postinstall | 453 | * call irq_postinstall |
| @@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev) | |||
| 479 | out_irq: | 491 | out_irq: |
| 480 | drm_irq_uninstall(dev); | 492 | drm_irq_uninstall(dev); |
| 481 | out_fifo: | 493 | out_fifo: |
| 482 | engine->fifo.takedown(dev); | 494 | if (!nouveau_noaccel) |
| 495 | engine->fifo.takedown(dev); | ||
| 483 | out_graph: | 496 | out_graph: |
| 484 | engine->graph.takedown(dev); | 497 | if (!nouveau_noaccel) |
| 498 | engine->graph.takedown(dev); | ||
| 485 | out_fb: | 499 | out_fb: |
| 486 | engine->fb.takedown(dev); | 500 | engine->fb.takedown(dev); |
| 487 | out_timer: | 501 | out_timer: |
| @@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
| 518 | dev_priv->channel = NULL; | 532 | dev_priv->channel = NULL; |
| 519 | } | 533 | } |
| 520 | 534 | ||
| 521 | engine->fifo.takedown(dev); | 535 | if (!nouveau_noaccel) { |
| 522 | engine->graph.takedown(dev); | 536 | engine->fifo.takedown(dev); |
| 537 | engine->graph.takedown(dev); | ||
| 538 | } | ||
| 523 | engine->fb.takedown(dev); | 539 | engine->fb.takedown(dev); |
| 524 | engine->timer.takedown(dev); | 540 | engine->timer.takedown(dev); |
| 525 | engine->mc.takedown(dev); | 541 | engine->mc.takedown(dev); |
| @@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
| 817 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 833 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
| 818 | getparam->value = dev_priv->vm_vram_base; | 834 | getparam->value = dev_priv->vm_vram_base; |
| 819 | break; | 835 | break; |
| 836 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||
| 837 | /* NV40 and NV50 versions are quite different, but register | ||
| 838 | * address is the same. User is supposed to know the card | ||
| 839 | * family anyway... */ | ||
| 840 | if (dev_priv->chipset >= 0x40) { | ||
| 841 | getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||
| 842 | break; | ||
| 843 | } | ||
| 844 | /* FALLTHRU */ | ||
| 820 | default: | 845 | default: |
| 821 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); | 846 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); |
| 822 | return -EINVAL; | 847 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index d910873c1368..fd01caabd5c3 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
| 28 | #include "nouveau_fbcon.h" | 28 | #include "nouveau_fbcon.h" |
| 29 | 29 | ||
| 30 | static void | 30 | void |
| 31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
| 32 | { | 32 | { |
| 33 | struct nouveau_fbcon_par *par = info->par; | 33 | struct nouveau_fbcon_par *par = info->par; |
| @@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
| 54 | FIRE_RING(chan); | 54 | FIRE_RING(chan); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static void | 57 | void |
| 58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
| 59 | { | 59 | { |
| 60 | struct nouveau_fbcon_par *par = info->par; | 60 | struct nouveau_fbcon_par *par = info->par; |
| @@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 88 | FIRE_RING(chan); | 88 | FIRE_RING(chan); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static void | 91 | void |
| 92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
| 93 | { | 93 | { |
| 94 | struct nouveau_fbcon_par *par = info->par; | 94 | struct nouveau_fbcon_par *par = info->par; |
| @@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 307 | 307 | ||
| 308 | FIRE_RING(chan); | 308 | FIRE_RING(chan); |
| 309 | 309 | ||
| 310 | info->fbops->fb_fillrect = nv04_fbcon_fillrect; | ||
| 311 | info->fbops->fb_copyarea = nv04_fbcon_copyarea; | ||
| 312 | info->fbops->fb_imageblit = nv04_fbcon_imageblit; | ||
| 313 | return 0; | 310 | return 0; |
| 314 | } | 311 | } |
| 315 | 312 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 40b7360841f8..d1a651e3400c 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
| @@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) | |||
| 298 | static void | 298 | static void |
| 299 | nv50_crtc_destroy(struct drm_crtc *crtc) | 299 | nv50_crtc_destroy(struct drm_crtc *crtc) |
| 300 | { | 300 | { |
| 301 | struct drm_device *dev = crtc->dev; | 301 | struct drm_device *dev; |
| 302 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 302 | struct nouveau_crtc *nv_crtc; |
| 303 | |||
| 304 | NV_DEBUG_KMS(dev, "\n"); | ||
| 305 | 303 | ||
| 306 | if (!crtc) | 304 | if (!crtc) |
| 307 | return; | 305 | return; |
| 308 | 306 | ||
| 307 | dev = crtc->dev; | ||
| 308 | nv_crtc = nouveau_crtc(crtc); | ||
| 309 | |||
| 310 | NV_DEBUG_KMS(dev, "\n"); | ||
| 311 | |||
| 309 | drm_crtc_cleanup(&nv_crtc->base); | 312 | drm_crtc_cleanup(&nv_crtc->base); |
| 310 | 313 | ||
| 311 | nv50_cursor_fini(nv_crtc); | 314 | nv50_cursor_fini(nv_crtc); |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e4f279ee61cf..0f57cdf7ccb2 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
| 4 | #include "nouveau_fbcon.h" | 4 | #include "nouveau_fbcon.h" |
| 5 | 5 | ||
| 6 | static void | 6 | void |
| 7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
| 8 | { | 8 | { |
| 9 | struct nouveau_fbcon_par *par = info->par; | 9 | struct nouveau_fbcon_par *par = info->par; |
| @@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 46 | FIRE_RING(chan); | 46 | FIRE_RING(chan); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static void | 49 | void |
| 50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
| 51 | { | 51 | { |
| 52 | struct nouveau_fbcon_par *par = info->par; | 52 | struct nouveau_fbcon_par *par = info->par; |
| @@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
| 81 | FIRE_RING(chan); | 81 | FIRE_RING(chan); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static void | 84 | void |
| 85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
| 86 | { | 86 | { |
| 87 | struct nouveau_fbcon_par *par = info->par; | 87 | struct nouveau_fbcon_par *par = info->par; |
| @@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
| 262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + | 262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + |
| 263 | dev_priv->vm_vram_base); | 263 | dev_priv->vm_vram_base); |
| 264 | 264 | ||
| 265 | info->fbops->fb_fillrect = nv50_fbcon_fillrect; | ||
| 266 | info->fbops->fb_copyarea = nv50_fbcon_copyarea; | ||
| 267 | info->fbops->fb_imageblit = nv50_fbcon_imageblit; | ||
| 268 | return 0; | 265 | return 0; |
| 269 | } | 266 | } |
| 270 | 267 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 32b244bcb482..204a79ff10f4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
| @@ -317,17 +317,20 @@ void | |||
| 317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
| 318 | { | 318 | { |
| 319 | struct drm_device *dev = chan->dev; | 319 | struct drm_device *dev = chan->dev; |
| 320 | struct nouveau_gpuobj_ref *ramfc = chan->ramfc; | ||
| 320 | 321 | ||
| 321 | NV_DEBUG(dev, "ch%d\n", chan->id); | 322 | NV_DEBUG(dev, "ch%d\n", chan->id); |
| 322 | 323 | ||
| 323 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 324 | /* This will ensure the channel is seen as disabled. */ |
| 324 | nouveau_gpuobj_ref_del(dev, &chan->cache); | 325 | chan->ramfc = NULL; |
| 325 | |||
| 326 | nv50_fifo_channel_disable(dev, chan->id, false); | 326 | nv50_fifo_channel_disable(dev, chan->id, false); |
| 327 | 327 | ||
| 328 | /* Dummy channel, also used on ch 127 */ | 328 | /* Dummy channel, also used on ch 127 */ |
| 329 | if (chan->id == 0) | 329 | if (chan->id == 0) |
| 330 | nv50_fifo_channel_disable(dev, 127, false); | 330 | nv50_fifo_channel_disable(dev, 127, false); |
| 331 | |||
| 332 | nouveau_gpuobj_ref_del(dev, &ramfc); | ||
| 333 | nouveau_gpuobj_ref_del(dev, &chan->cache); | ||
| 331 | } | 334 | } |
| 332 | 335 | ||
| 333 | int | 336 | int |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 20319e59d368..6d504801b514 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev) | |||
| 165 | uint32_t inst; | 165 | uint32_t inst; |
| 166 | int i; | 166 | int i; |
| 167 | 167 | ||
| 168 | /* Be sure we're not in the middle of a context switch or bad things | ||
| 169 | * will happen, such as unloading the wrong pgraph context. | ||
| 170 | */ | ||
| 171 | if (!nv_wait(0x400300, 0x00000001, 0x00000000)) | ||
| 172 | NV_ERROR(dev, "Ctxprog is still running\n"); | ||
| 173 | |||
| 168 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 174 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
| 169 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 175 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
| 170 | return NULL; | 176 | return NULL; |
| @@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan) | |||
| 275 | int | 281 | int |
| 276 | nv50_graph_unload_context(struct drm_device *dev) | 282 | nv50_graph_unload_context(struct drm_device *dev) |
| 277 | { | 283 | { |
| 278 | uint32_t inst, fifo = nv_rd32(dev, 0x400500); | 284 | uint32_t inst; |
| 279 | 285 | ||
| 280 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 286 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
| 281 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 287 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
| @@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev) | |||
| 283 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; | 289 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; |
| 284 | 290 | ||
| 285 | nouveau_wait_for_idle(dev); | 291 | nouveau_wait_for_idle(dev); |
| 286 | nv_wr32(dev, 0x400500, fifo & ~1); | ||
| 287 | nv_wr32(dev, 0x400784, inst); | 292 | nv_wr32(dev, 0x400784, inst); |
| 288 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); | 293 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); |
| 289 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); | 294 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); |
| 290 | nouveau_wait_for_idle(dev); | 295 | nouveau_wait_for_idle(dev); |
| 291 | nv_wr32(dev, 0x400500, fifo); | ||
| 292 | 296 | ||
| 293 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); | 297 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); |
| 294 | return 0; | 298 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index ecf1936b8224..c2fff543b06f 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
| @@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
| 101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); | 101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); |
| 102 | 102 | ||
| 103 | if (nvenc == nv_encoder || | 103 | if (nvenc == nv_encoder || |
| 104 | nvenc->disconnect != nv50_sor_disconnect || | ||
| 104 | nvenc->dcb->or != nv_encoder->dcb->or) | 105 | nvenc->dcb->or != nv_encoder->dcb->or) |
| 105 | continue; | 106 | continue; |
| 106 | 107 | ||
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 5982321be4d5..1c02d23f6fcc 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig | |||
| @@ -1,10 +1,14 @@ | |||
| 1 | config DRM_RADEON_KMS | 1 | config DRM_RADEON_KMS |
| 2 | bool "Enable modesetting on radeon by default" | 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" |
| 3 | depends on DRM_RADEON | 3 | depends on DRM_RADEON |
| 4 | help | 4 | help |
| 5 | Choose this option if you want kernel modesetting enabled by default, | 5 | Choose this option if you want kernel modesetting enabled by default. |
| 6 | and you have a new enough userspace to support this. Running old | 6 | |
| 7 | userspaces with this enabled will cause pain. | 7 | This is a completely new driver. It's only part of the existing drm |
| 8 | for compatibility reasons. It requires an entirely different graphics | ||
| 9 | stack above it and works very differently from the old drm stack. | ||
| 10 | i.e. don't enable this unless you know what you are doing it may | ||
| 11 | cause issues or bugs compared to the previous userspace driver stack. | ||
| 8 | 12 | ||
| 9 | When kernel modesetting is enabled the IOCTL of radeon/drm | 13 | When kernel modesetting is enabled the IOCTL of radeon/drm |
| 10 | driver are considered as invalid and an error message is printed | 14 | driver are considered as invalid and an error message is printed |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 71060114d5de..b32eeea5bb8b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
| 332 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; | 332 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; |
| 333 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | 333 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); |
| 334 | unsigned char *base; | 334 | unsigned char *base; |
| 335 | int retry_count = 0; | ||
| 335 | 336 | ||
| 336 | memset(&args, 0, sizeof(args)); | 337 | memset(&args, 0, sizeof(args)); |
| 337 | 338 | ||
| 338 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | 339 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
| 339 | 340 | ||
| 341 | retry: | ||
| 340 | memcpy(base, req_bytes, num_bytes); | 342 | memcpy(base, req_bytes, num_bytes); |
| 341 | 343 | ||
| 342 | args.lpAuxRequest = 0; | 344 | args.lpAuxRequest = 0; |
| @@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
| 347 | 349 | ||
| 348 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 350 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 349 | 351 | ||
| 350 | if (args.ucReplyStatus) { | 352 | if (args.ucReplyStatus && !args.ucDataOutLen) { |
| 351 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", | 353 | if (args.ucReplyStatus == 0x20 && retry_count < 10) |
| 354 | goto retry; | ||
| 355 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", | ||
| 352 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | 356 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], |
| 353 | chan->rec.i2c_id, args.ucReplyStatus); | 357 | chan->rec.i2c_id, args.ucReplyStatus, retry_count); |
| 354 | return false; | 358 | return false; |
| 355 | } | 359 | } |
| 356 | 360 | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 11c9a3fe6810..c0d4650cdb79 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
| 354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
| 355 | } | 355 | } |
| 356 | 356 | ||
| 357 | /* Who ever call radeon_fence_emit should call ring_lock and ask | ||
| 358 | * for enough space (today caller are ib schedule and buffer move) */ | ||
| 357 | void r100_fence_ring_emit(struct radeon_device *rdev, | 359 | void r100_fence_ring_emit(struct radeon_device *rdev, |
| 358 | struct radeon_fence *fence) | 360 | struct radeon_fence *fence) |
| 359 | { | 361 | { |
| 360 | /* Who ever call radeon_fence_emit should call ring_lock and ask | 362 | /* We have to make sure that caches are flushed before |
| 361 | * for enough space (today caller are ib schedule and buffer move) */ | 363 | * CPU might read something from VRAM. */ |
| 364 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
| 365 | radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); | ||
| 366 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); | ||
| 367 | radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); | ||
| 362 | /* Wait until IDLE & CLEAN */ | 368 | /* Wait until IDLE & CLEAN */ |
| 363 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 369 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
| 364 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | 370 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); |
| @@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev) | |||
| 3369 | 3375 | ||
| 3370 | void r100_fini(struct radeon_device *rdev) | 3376 | void r100_fini(struct radeon_device *rdev) |
| 3371 | { | 3377 | { |
| 3372 | r100_suspend(rdev); | ||
| 3373 | r100_cp_fini(rdev); | 3378 | r100_cp_fini(rdev); |
| 3374 | r100_wb_fini(rdev); | 3379 | r100_wb_fini(rdev); |
| 3375 | r100_ib_fini(rdev); | 3380 | r100_ib_fini(rdev); |
| @@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev) | |||
| 3481 | if (r) { | 3486 | if (r) { |
| 3482 | /* Somethings want wront with the accel init stop accel */ | 3487 | /* Somethings want wront with the accel init stop accel */ |
| 3483 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3488 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 3484 | r100_suspend(rdev); | ||
| 3485 | r100_cp_fini(rdev); | 3489 | r100_cp_fini(rdev); |
| 3486 | r100_wb_fini(rdev); | 3490 | r100_wb_fini(rdev); |
| 3487 | r100_ib_fini(rdev); | 3491 | r100_ib_fini(rdev); |
| 3492 | radeon_irq_kms_fini(rdev); | ||
| 3488 | if (rdev->flags & RADEON_IS_PCI) | 3493 | if (rdev->flags & RADEON_IS_PCI) |
| 3489 | r100_pci_gart_fini(rdev); | 3494 | r100_pci_gart_fini(rdev); |
| 3490 | radeon_irq_kms_fini(rdev); | ||
| 3491 | rdev->accel_working = false; | 3495 | rdev->accel_working = false; |
| 3492 | } | 3496 | } |
| 3493 | return 0; | 3497 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0051d11b907c..43b55a030b4d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev) | |||
| 506 | 506 | ||
| 507 | /* DDR for all card after R300 & IGP */ | 507 | /* DDR for all card after R300 & IGP */ |
| 508 | rdev->mc.vram_is_ddr = true; | 508 | rdev->mc.vram_is_ddr = true; |
| 509 | |||
| 509 | tmp = RREG32(RADEON_MEM_CNTL); | 510 | tmp = RREG32(RADEON_MEM_CNTL); |
| 510 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | 511 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
| 511 | rdev->mc.vram_width = 128; | 512 | switch (tmp) { |
| 512 | } else { | 513 | case 0: rdev->mc.vram_width = 64; break; |
| 513 | rdev->mc.vram_width = 64; | 514 | case 1: rdev->mc.vram_width = 128; break; |
| 515 | case 2: rdev->mc.vram_width = 256; break; | ||
| 516 | default: rdev->mc.vram_width = 128; break; | ||
| 514 | } | 517 | } |
| 515 | 518 | ||
| 516 | r100_vram_init_sizes(rdev); | 519 | r100_vram_init_sizes(rdev); |
| @@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev) | |||
| 1327 | 1330 | ||
| 1328 | void r300_fini(struct radeon_device *rdev) | 1331 | void r300_fini(struct radeon_device *rdev) |
| 1329 | { | 1332 | { |
| 1330 | r300_suspend(rdev); | ||
| 1331 | r100_cp_fini(rdev); | 1333 | r100_cp_fini(rdev); |
| 1332 | r100_wb_fini(rdev); | 1334 | r100_wb_fini(rdev); |
| 1333 | r100_ib_fini(rdev); | 1335 | r100_ib_fini(rdev); |
| @@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev) | |||
| 1418 | if (r) { | 1420 | if (r) { |
| 1419 | /* Somethings want wront with the accel init stop accel */ | 1421 | /* Somethings want wront with the accel init stop accel */ |
| 1420 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 1422 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 1421 | r300_suspend(rdev); | ||
| 1422 | r100_cp_fini(rdev); | 1423 | r100_cp_fini(rdev); |
| 1423 | r100_wb_fini(rdev); | 1424 | r100_wb_fini(rdev); |
| 1424 | r100_ib_fini(rdev); | 1425 | r100_ib_fini(rdev); |
| 1426 | radeon_irq_kms_fini(rdev); | ||
| 1425 | if (rdev->flags & RADEON_IS_PCIE) | 1427 | if (rdev->flags & RADEON_IS_PCIE) |
| 1426 | rv370_pcie_gart_fini(rdev); | 1428 | rv370_pcie_gart_fini(rdev); |
| 1427 | if (rdev->flags & RADEON_IS_PCI) | 1429 | if (rdev->flags & RADEON_IS_PCI) |
| 1428 | r100_pci_gart_fini(rdev); | 1430 | r100_pci_gart_fini(rdev); |
| 1429 | radeon_irq_kms_fini(rdev); | 1431 | radeon_agp_fini(rdev); |
| 1430 | rdev->accel_working = false; | 1432 | rdev->accel_working = false; |
| 1431 | } | 1433 | } |
| 1432 | return 0; | 1434 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4526faaacca8..d9373246c97f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev) | |||
| 389 | if (r) { | 389 | if (r) { |
| 390 | /* Somethings want wront with the accel init stop accel */ | 390 | /* Somethings want wront with the accel init stop accel */ |
| 391 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 391 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 392 | r420_suspend(rdev); | ||
| 393 | r100_cp_fini(rdev); | 392 | r100_cp_fini(rdev); |
| 394 | r100_wb_fini(rdev); | 393 | r100_wb_fini(rdev); |
| 395 | r100_ib_fini(rdev); | 394 | r100_ib_fini(rdev); |
| 395 | radeon_irq_kms_fini(rdev); | ||
| 396 | if (rdev->flags & RADEON_IS_PCIE) | 396 | if (rdev->flags & RADEON_IS_PCIE) |
| 397 | rv370_pcie_gart_fini(rdev); | 397 | rv370_pcie_gart_fini(rdev); |
| 398 | if (rdev->flags & RADEON_IS_PCI) | 398 | if (rdev->flags & RADEON_IS_PCI) |
| 399 | r100_pci_gart_fini(rdev); | 399 | r100_pci_gart_fini(rdev); |
| 400 | radeon_agp_fini(rdev); | 400 | radeon_agp_fini(rdev); |
| 401 | radeon_irq_kms_fini(rdev); | ||
| 402 | rdev->accel_working = false; | 401 | rdev->accel_working = false; |
| 403 | } | 402 | } |
| 404 | return 0; | 403 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 9a189072f2b9..ddf5731eba0d 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev) | |||
| 294 | if (r) { | 294 | if (r) { |
| 295 | /* Somethings want wront with the accel init stop accel */ | 295 | /* Somethings want wront with the accel init stop accel */ |
| 296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 297 | rv515_suspend(rdev); | ||
| 298 | r100_cp_fini(rdev); | 297 | r100_cp_fini(rdev); |
| 299 | r100_wb_fini(rdev); | 298 | r100_wb_fini(rdev); |
| 300 | r100_ib_fini(rdev); | 299 | r100_ib_fini(rdev); |
| 300 | radeon_irq_kms_fini(rdev); | ||
| 301 | rv370_pcie_gart_fini(rdev); | 301 | rv370_pcie_gart_fini(rdev); |
| 302 | radeon_agp_fini(rdev); | 302 | radeon_agp_fini(rdev); |
| 303 | radeon_irq_kms_fini(rdev); | ||
| 304 | rdev->accel_working = false; | 303 | rdev->accel_working = false; |
| 305 | } | 304 | } |
| 306 | return 0; | 305 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1b6d0001b20e..2ffcf5a03551 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 1654 | rdev->cp.align_mask = 16 - 1; | 1654 | rdev->cp.align_mask = 16 - 1; |
| 1655 | } | 1655 | } |
| 1656 | 1656 | ||
| 1657 | void r600_cp_fini(struct radeon_device *rdev) | ||
| 1658 | { | ||
| 1659 | r600_cp_stop(rdev); | ||
| 1660 | radeon_ring_fini(rdev); | ||
| 1661 | } | ||
| 1662 | |||
| 1657 | 1663 | ||
| 1658 | /* | 1664 | /* |
| 1659 | * GPU scratch registers helpers function. | 1665 | * GPU scratch registers helpers function. |
| @@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1861 | return r; | 1867 | return r; |
| 1862 | } | 1868 | } |
| 1863 | r600_gpu_init(rdev); | 1869 | r600_gpu_init(rdev); |
| 1870 | r = r600_blit_init(rdev); | ||
| 1871 | if (r) { | ||
| 1872 | r600_blit_fini(rdev); | ||
| 1873 | rdev->asic->copy = NULL; | ||
| 1874 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
| 1875 | } | ||
| 1864 | /* pin copy shader into vram */ | 1876 | /* pin copy shader into vram */ |
| 1865 | if (rdev->r600_blit.shader_obj) { | 1877 | if (rdev->r600_blit.shader_obj) { |
| 1866 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1878 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| @@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1938 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1950 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1939 | return r; | 1951 | return r; |
| 1940 | } | 1952 | } |
| 1953 | |||
| 1954 | r = r600_audio_init(rdev); | ||
| 1955 | if (r) { | ||
| 1956 | DRM_ERROR("radeon: audio resume failed\n"); | ||
| 1957 | return r; | ||
| 1958 | } | ||
| 1959 | |||
| 1941 | return r; | 1960 | return r; |
| 1942 | } | 1961 | } |
| 1943 | 1962 | ||
| @@ -1945,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
| 1945 | { | 1964 | { |
| 1946 | int r; | 1965 | int r; |
| 1947 | 1966 | ||
| 1967 | r600_audio_fini(rdev); | ||
| 1948 | /* FIXME: we should wait for ring to be empty */ | 1968 | /* FIXME: we should wait for ring to be empty */ |
| 1949 | r600_cp_stop(rdev); | 1969 | r600_cp_stop(rdev); |
| 1950 | rdev->cp.ready = false; | 1970 | rdev->cp.ready = false; |
| @@ -2045,19 +2065,15 @@ int r600_init(struct radeon_device *rdev) | |||
| 2045 | r = r600_pcie_gart_init(rdev); | 2065 | r = r600_pcie_gart_init(rdev); |
| 2046 | if (r) | 2066 | if (r) |
| 2047 | return r; | 2067 | return r; |
| 2048 | r = r600_blit_init(rdev); | ||
| 2049 | if (r) { | ||
| 2050 | r600_blit_fini(rdev); | ||
| 2051 | rdev->asic->copy = NULL; | ||
| 2052 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
| 2053 | } | ||
| 2054 | 2068 | ||
| 2055 | rdev->accel_working = true; | 2069 | rdev->accel_working = true; |
| 2056 | r = r600_startup(rdev); | 2070 | r = r600_startup(rdev); |
| 2057 | if (r) { | 2071 | if (r) { |
| 2058 | r600_suspend(rdev); | 2072 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
| 2073 | r600_cp_fini(rdev); | ||
| 2059 | r600_wb_fini(rdev); | 2074 | r600_wb_fini(rdev); |
| 2060 | radeon_ring_fini(rdev); | 2075 | r600_irq_fini(rdev); |
| 2076 | radeon_irq_kms_fini(rdev); | ||
| 2061 | r600_pcie_gart_fini(rdev); | 2077 | r600_pcie_gart_fini(rdev); |
| 2062 | rdev->accel_working = false; | 2078 | rdev->accel_working = false; |
| 2063 | } | 2079 | } |
| @@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev) | |||
| 2083 | 2099 | ||
| 2084 | void r600_fini(struct radeon_device *rdev) | 2100 | void r600_fini(struct radeon_device *rdev) |
| 2085 | { | 2101 | { |
| 2086 | /* Suspend operations */ | ||
| 2087 | r600_suspend(rdev); | ||
| 2088 | |||
| 2089 | r600_audio_fini(rdev); | 2102 | r600_audio_fini(rdev); |
| 2090 | r600_blit_fini(rdev); | 2103 | r600_blit_fini(rdev); |
| 2104 | r600_cp_fini(rdev); | ||
| 2105 | r600_wb_fini(rdev); | ||
| 2091 | r600_irq_fini(rdev); | 2106 | r600_irq_fini(rdev); |
| 2092 | radeon_irq_kms_fini(rdev); | 2107 | radeon_irq_kms_fini(rdev); |
| 2093 | radeon_ring_fini(rdev); | ||
| 2094 | r600_wb_fini(rdev); | ||
| 2095 | r600_pcie_gart_fini(rdev); | 2108 | r600_pcie_gart_fini(rdev); |
| 2109 | radeon_agp_fini(rdev); | ||
| 2096 | radeon_gem_fini(rdev); | 2110 | radeon_gem_fini(rdev); |
| 2097 | radeon_fence_driver_fini(rdev); | 2111 | radeon_fence_driver_fini(rdev); |
| 2098 | radeon_clocks_fini(rdev); | 2112 | radeon_clocks_fini(rdev); |
| 2099 | radeon_agp_fini(rdev); | ||
| 2100 | radeon_bo_fini(rdev); | 2113 | radeon_bo_fini(rdev); |
| 2101 | radeon_atombios_fini(rdev); | 2114 | radeon_atombios_fini(rdev); |
| 2102 | kfree(rdev->bios); | 2115 | kfree(rdev->bios); |
| @@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
| 2900 | return 0; | 2913 | return 0; |
| 2901 | #endif | 2914 | #endif |
| 2902 | } | 2915 | } |
| 2916 | |||
| 2917 | /** | ||
| 2918 | * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl | ||
| 2919 | * rdev: radeon device structure | ||
| 2920 | * bo: buffer object struct which userspace is waiting for idle | ||
| 2921 | * | ||
| 2922 | * Some R6XX/R7XX doesn't seems to take into account HDP flush performed | ||
| 2923 | * through ring buffer, this leads to corruption in rendering, see | ||
| 2924 | * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we | ||
| 2925 | * directly perform HDP flush by writing register through MMIO. | ||
| 2926 | */ | ||
| 2927 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | ||
| 2928 | { | ||
| 2929 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
| 2930 | } | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 99e2c3891a7d..0dcb6904c4ff 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | */ | 35 | */ |
| 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
| 37 | { | 37 | { |
| 38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) |
| 39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
| 40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
| 41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
| @@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev) | |||
| 261 | if (!r600_audio_chipset_supported(rdev)) | 261 | if (!r600_audio_chipset_supported(rdev)) |
| 262 | return; | 262 | return; |
| 263 | 263 | ||
| 264 | WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); | ||
| 265 | |||
| 266 | del_timer(&rdev->audio_timer); | 264 | del_timer(&rdev->audio_timer); |
| 265 | WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); | ||
| 267 | } | 266 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2d5f2bfa7201..f57480ba1355 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -661,6 +661,13 @@ struct radeon_asic { | |||
| 661 | void (*hpd_fini)(struct radeon_device *rdev); | 661 | void (*hpd_fini)(struct radeon_device *rdev); |
| 662 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 662 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| 663 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 663 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| 664 | /* ioctl hw specific callback. Some hw might want to perform special | ||
| 665 | * operation on specific ioctl. For instance on wait idle some hw | ||
| 666 | * might want to perform and HDP flush through MMIO as it seems that | ||
| 667 | * some R6XX/R7XX hw doesn't take HDP flush into account if programmed | ||
| 668 | * through ring. | ||
| 669 | */ | ||
| 670 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); | ||
| 664 | }; | 671 | }; |
| 665 | 672 | ||
| 666 | /* | 673 | /* |
| @@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev); | |||
| 1143 | extern void r600_cp_stop(struct radeon_device *rdev); | 1150 | extern void r600_cp_stop(struct radeon_device *rdev); |
| 1144 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | 1151 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); |
| 1145 | extern int r600_cp_resume(struct radeon_device *rdev); | 1152 | extern int r600_cp_resume(struct radeon_device *rdev); |
| 1153 | extern void r600_cp_fini(struct radeon_device *rdev); | ||
| 1146 | extern int r600_count_pipe_bits(uint32_t val); | 1154 | extern int r600_count_pipe_bits(uint32_t val); |
| 1147 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); | 1155 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); |
| 1148 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); | 1156 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2fbd2e4e9df..05ee1aeac3fd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = { | |||
| 117 | .hpd_fini = &r100_hpd_fini, | 117 | .hpd_fini = &r100_hpd_fini, |
| 118 | .hpd_sense = &r100_hpd_sense, | 118 | .hpd_sense = &r100_hpd_sense, |
| 119 | .hpd_set_polarity = &r100_hpd_set_polarity, | 119 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 120 | .ioctl_wait_idle = NULL, | ||
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | 123 | ||
| @@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = { | |||
| 176 | .hpd_fini = &r100_hpd_fini, | 177 | .hpd_fini = &r100_hpd_fini, |
| 177 | .hpd_sense = &r100_hpd_sense, | 178 | .hpd_sense = &r100_hpd_sense, |
| 178 | .hpd_set_polarity = &r100_hpd_set_polarity, | 179 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 180 | .ioctl_wait_idle = NULL, | ||
| 179 | }; | 181 | }; |
| 180 | 182 | ||
| 181 | /* | 183 | /* |
| @@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = { | |||
| 219 | .hpd_fini = &r100_hpd_fini, | 221 | .hpd_fini = &r100_hpd_fini, |
| 220 | .hpd_sense = &r100_hpd_sense, | 222 | .hpd_sense = &r100_hpd_sense, |
| 221 | .hpd_set_polarity = &r100_hpd_set_polarity, | 223 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 224 | .ioctl_wait_idle = NULL, | ||
| 222 | }; | 225 | }; |
| 223 | 226 | ||
| 224 | 227 | ||
| @@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = { | |||
| 267 | .hpd_fini = &r100_hpd_fini, | 270 | .hpd_fini = &r100_hpd_fini, |
| 268 | .hpd_sense = &r100_hpd_sense, | 271 | .hpd_sense = &r100_hpd_sense, |
| 269 | .hpd_set_polarity = &r100_hpd_set_polarity, | 272 | .hpd_set_polarity = &r100_hpd_set_polarity, |
| 273 | .ioctl_wait_idle = NULL, | ||
| 270 | }; | 274 | }; |
| 271 | 275 | ||
| 272 | 276 | ||
| @@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = { | |||
| 323 | .hpd_fini = &rs600_hpd_fini, | 327 | .hpd_fini = &rs600_hpd_fini, |
| 324 | .hpd_sense = &rs600_hpd_sense, | 328 | .hpd_sense = &rs600_hpd_sense, |
| 325 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 329 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 330 | .ioctl_wait_idle = NULL, | ||
| 326 | }; | 331 | }; |
| 327 | 332 | ||
| 328 | 333 | ||
| @@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = { | |||
| 370 | .hpd_fini = &rs600_hpd_fini, | 375 | .hpd_fini = &rs600_hpd_fini, |
| 371 | .hpd_sense = &rs600_hpd_sense, | 376 | .hpd_sense = &rs600_hpd_sense, |
| 372 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 377 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 378 | .ioctl_wait_idle = NULL, | ||
| 373 | }; | 379 | }; |
| 374 | 380 | ||
| 375 | 381 | ||
| @@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = { | |||
| 421 | .hpd_fini = &rs600_hpd_fini, | 427 | .hpd_fini = &rs600_hpd_fini, |
| 422 | .hpd_sense = &rs600_hpd_sense, | 428 | .hpd_sense = &rs600_hpd_sense, |
| 423 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 429 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 430 | .ioctl_wait_idle = NULL, | ||
| 424 | }; | 431 | }; |
| 425 | 432 | ||
| 426 | 433 | ||
| @@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = { | |||
| 463 | .hpd_fini = &rs600_hpd_fini, | 470 | .hpd_fini = &rs600_hpd_fini, |
| 464 | .hpd_sense = &rs600_hpd_sense, | 471 | .hpd_sense = &rs600_hpd_sense, |
| 465 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 472 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
| 473 | .ioctl_wait_idle = NULL, | ||
| 466 | }; | 474 | }; |
| 467 | 475 | ||
| 468 | /* | 476 | /* |
| @@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev); | |||
| 504 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 512 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| 505 | void r600_hpd_set_polarity(struct radeon_device *rdev, | 513 | void r600_hpd_set_polarity(struct radeon_device *rdev, |
| 506 | enum radeon_hpd_id hpd); | 514 | enum radeon_hpd_id hpd); |
| 515 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | ||
| 507 | 516 | ||
| 508 | static struct radeon_asic r600_asic = { | 517 | static struct radeon_asic r600_asic = { |
| 509 | .init = &r600_init, | 518 | .init = &r600_init, |
| @@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = { | |||
| 538 | .hpd_fini = &r600_hpd_fini, | 547 | .hpd_fini = &r600_hpd_fini, |
| 539 | .hpd_sense = &r600_hpd_sense, | 548 | .hpd_sense = &r600_hpd_sense, |
| 540 | .hpd_set_polarity = &r600_hpd_set_polarity, | 549 | .hpd_set_polarity = &r600_hpd_set_polarity, |
| 550 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
| 541 | }; | 551 | }; |
| 542 | 552 | ||
| 543 | /* | 553 | /* |
| @@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = { | |||
| 582 | .hpd_fini = &r600_hpd_fini, | 592 | .hpd_fini = &r600_hpd_fini, |
| 583 | .hpd_sense = &r600_hpd_sense, | 593 | .hpd_sense = &r600_hpd_sense, |
| 584 | .hpd_set_polarity = &r600_hpd_set_polarity, | 594 | .hpd_set_polarity = &r600_hpd_set_polarity, |
| 595 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
| 585 | }; | 596 | }; |
| 586 | 597 | ||
| 587 | #endif | 598 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fa82ca74324e..2dcda6115874 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 287 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 287 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | /* XFX Pine Group device rv730 reports no VGA DDC lines | ||
| 291 | * even though they are wired up to record 0x93 | ||
| 292 | */ | ||
| 293 | if ((dev->pdev->device == 0x9498) && | ||
| 294 | (dev->pdev->subsystem_vendor == 0x1682) && | ||
| 295 | (dev->pdev->subsystem_device == 0x2452)) { | ||
| 296 | struct radeon_device *rdev = dev->dev_private; | ||
| 297 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); | ||
| 298 | } | ||
| 290 | return true; | 299 | return true; |
| 291 | } | 300 | } |
| 292 | 301 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 4ddfd4b5bc51..7932dc4d6b90 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
| @@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
| 65 | if (r) { | 65 | if (r) { |
| 66 | goto out_cleanup; | 66 | goto out_cleanup; |
| 67 | } | 67 | } |
| 68 | start_jiffies = jiffies; | 68 | |
| 69 | for (i = 0; i < n; i++) { | 69 | /* r100 doesn't have dma engine so skip the test */ |
| 70 | r = radeon_fence_create(rdev, &fence); | 70 | if (rdev->asic->copy_dma) { |
| 71 | if (r) { | 71 | |
| 72 | goto out_cleanup; | 72 | start_jiffies = jiffies; |
| 73 | for (i = 0; i < n; i++) { | ||
| 74 | r = radeon_fence_create(rdev, &fence); | ||
| 75 | if (r) { | ||
| 76 | goto out_cleanup; | ||
| 77 | } | ||
| 78 | |||
| 79 | r = radeon_copy_dma(rdev, saddr, daddr, | ||
| 80 | size / RADEON_GPU_PAGE_SIZE, fence); | ||
| 81 | |||
| 82 | if (r) { | ||
| 83 | goto out_cleanup; | ||
| 84 | } | ||
| 85 | r = radeon_fence_wait(fence, false); | ||
| 86 | if (r) { | ||
| 87 | goto out_cleanup; | ||
| 88 | } | ||
| 89 | radeon_fence_unref(&fence); | ||
| 73 | } | 90 | } |
| 74 | r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); | 91 | end_jiffies = jiffies; |
| 75 | if (r) { | 92 | time = end_jiffies - start_jiffies; |
| 76 | goto out_cleanup; | 93 | time = jiffies_to_msecs(time); |
| 94 | if (time > 0) { | ||
| 95 | i = ((n * size) >> 10) / time; | ||
| 96 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" | ||
| 97 | " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", | ||
| 98 | n, size >> 10, | ||
| 99 | sdomain, ddomain, time, | ||
| 100 | i, i * 1000, (i * 1000) / 1024); | ||
| 77 | } | 101 | } |
| 78 | r = radeon_fence_wait(fence, false); | ||
| 79 | if (r) { | ||
| 80 | goto out_cleanup; | ||
| 81 | } | ||
| 82 | radeon_fence_unref(&fence); | ||
| 83 | } | ||
| 84 | end_jiffies = jiffies; | ||
| 85 | time = end_jiffies - start_jiffies; | ||
| 86 | time = jiffies_to_msecs(time); | ||
| 87 | if (time > 0) { | ||
| 88 | i = ((n * size) >> 10) / time; | ||
| 89 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" | ||
| 90 | " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, | ||
| 91 | sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); | ||
| 92 | } | 102 | } |
| 103 | |||
| 93 | start_jiffies = jiffies; | 104 | start_jiffies = jiffies; |
| 94 | for (i = 0; i < n; i++) { | 105 | for (i = 0; i < n; i++) { |
| 95 | r = radeon_fence_create(rdev, &fence); | 106 | r = radeon_fence_create(rdev, &fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 579c8920e081..e7b19440102e 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
| 971 | lvds->native_mode.vdisplay); | 971 | lvds->native_mode.vdisplay); |
| 972 | 972 | ||
| 973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); | 973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); |
| 974 | if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) | 974 | lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); |
| 975 | lvds->panel_vcc_delay = 2000; | ||
| 976 | 975 | ||
| 977 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); | 976 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); |
| 978 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; | 977 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 55266416fa47..238188540017 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
| 580 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 580 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 581 | struct drm_encoder *encoder; | 581 | struct drm_encoder *encoder; |
| 582 | struct drm_encoder_helper_funcs *encoder_funcs; | 582 | struct drm_encoder_helper_funcs *encoder_funcs; |
| 583 | bool dret; | 583 | bool dret = false; |
| 584 | enum drm_connector_status ret = connector_status_disconnected; | 584 | enum drm_connector_status ret = connector_status_disconnected; |
| 585 | 585 | ||
| 586 | encoder = radeon_best_single_encoder(connector); | 586 | encoder = radeon_best_single_encoder(connector); |
| 587 | if (!encoder) | 587 | if (!encoder) |
| 588 | ret = connector_status_disconnected; | 588 | ret = connector_status_disconnected; |
| 589 | 589 | ||
| 590 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); | 590 | if (radeon_connector->ddc_bus) { |
| 591 | dret = radeon_ddc_probe(radeon_connector); | 591 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 592 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | 592 | dret = radeon_ddc_probe(radeon_connector); |
| 593 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | ||
| 594 | } | ||
| 593 | if (dret) { | 595 | if (dret) { |
| 594 | if (radeon_connector->edid) { | 596 | if (radeon_connector->edid) { |
| 595 | kfree(radeon_connector->edid); | 597 | kfree(radeon_connector->edid); |
| @@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
| 740 | struct drm_mode_object *obj; | 742 | struct drm_mode_object *obj; |
| 741 | int i; | 743 | int i; |
| 742 | enum drm_connector_status ret = connector_status_disconnected; | 744 | enum drm_connector_status ret = connector_status_disconnected; |
| 743 | bool dret; | 745 | bool dret = false; |
| 744 | 746 | ||
| 745 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); | 747 | if (radeon_connector->ddc_bus) { |
| 746 | dret = radeon_ddc_probe(radeon_connector); | 748 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 747 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | 749 | dret = radeon_ddc_probe(radeon_connector); |
| 750 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | ||
| 751 | } | ||
| 748 | if (dret) { | 752 | if (dret) { |
| 749 | if (radeon_connector->edid) { | 753 | if (radeon_connector->edid) { |
| 750 | kfree(radeon_connector->edid); | 754 | kfree(radeon_connector->edid); |
| @@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1343 | radeon_connector->dac_load_detect = false; | 1347 | radeon_connector->dac_load_detect = false; |
| 1344 | drm_connector_attach_property(&radeon_connector->base, | 1348 | drm_connector_attach_property(&radeon_connector->base, |
| 1345 | rdev->mode_info.load_detect_property, | 1349 | rdev->mode_info.load_detect_property, |
| 1346 | 1); | 1350 | radeon_connector->dac_load_detect); |
| 1347 | drm_connector_attach_property(&radeon_connector->base, | 1351 | drm_connector_attach_property(&radeon_connector->base, |
| 1348 | rdev->mode_info.tv_std_property, | 1352 | rdev->mode_info.tv_std_property, |
| 1349 | radeon_combios_get_tv_info(rdev)); | 1353 | radeon_combios_get_tv_info(rdev)); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a92f994cc26..7e17a362b54b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
| 278 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); | 278 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); |
| 279 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | 279 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
| 280 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); | 280 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); |
| 281 | if (radeon_connector->ddc_bus) | 281 | if (radeon_connector->ddc_bus) { |
| 282 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | 282 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", |
| 283 | radeon_connector->ddc_bus->rec.mask_clk_reg, | 283 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
| 284 | radeon_connector->ddc_bus->rec.mask_data_reg, | 284 | radeon_connector->ddc_bus->rec.mask_data_reg, |
| @@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
| 288 | radeon_connector->ddc_bus->rec.en_data_reg, | 288 | radeon_connector->ddc_bus->rec.en_data_reg, |
| 289 | radeon_connector->ddc_bus->rec.y_clk_reg, | 289 | radeon_connector->ddc_bus->rec.y_clk_reg, |
| 290 | radeon_connector->ddc_bus->rec.y_data_reg); | 290 | radeon_connector->ddc_bus->rec.y_data_reg); |
| 291 | } else { | ||
| 292 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | ||
| 293 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | ||
| 294 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
| 295 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || | ||
| 296 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
| 297 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
| 298 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); | ||
| 299 | } | ||
| 291 | DRM_INFO(" Encoders:\n"); | 300 | DRM_INFO(" Encoders:\n"); |
| 292 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 301 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 293 | radeon_encoder = to_radeon_encoder(encoder); | 302 | radeon_encoder = to_radeon_encoder(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 3ba213d1b06c..d71e346e9ab5 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 248 | if (ret) | 248 | if (ret) |
| 249 | goto out_unref; | 249 | goto out_unref; |
| 250 | 250 | ||
| 251 | memset_io(fbptr, 0xff, aligned_size); | 251 | memset_io(fbptr, 0x0, aligned_size); |
| 252 | 252 | ||
| 253 | strcpy(info->fix.id, "radeondrmfb"); | 253 | strcpy(info->fix.id, "radeondrmfb"); |
| 254 | 254 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0e1325e18534..db8e9a355a01 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
| 308 | } | 308 | } |
| 309 | robj = gobj->driver_private; | 309 | robj = gobj->driver_private; |
| 310 | r = radeon_bo_wait(robj, NULL, false); | 310 | r = radeon_bo_wait(robj, NULL, false); |
| 311 | /* callback hw specific functions if any */ | ||
| 312 | if (robj->rdev->asic->ioctl_wait_idle) | ||
| 313 | robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); | ||
| 311 | mutex_lock(&dev->struct_mutex); | 314 | mutex_lock(&dev->struct_mutex); |
| 312 | drm_gem_object_unreference(gobj); | 315 | drm_gem_object_unreference(gobj); |
| 313 | mutex_unlock(&dev->struct_mutex); | 316 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9f5418983e2a..287fcebfb4e6 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | ||
| 227 | { | ||
| 228 | unsigned i; | ||
| 229 | uint32_t tmp; | ||
| 230 | |||
| 231 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 232 | /* read MC_STATUS */ | ||
| 233 | tmp = RREG32(0x0150); | ||
| 234 | if (tmp & (1 << 2)) { | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | DRM_UDELAY(1); | ||
| 238 | } | ||
| 239 | return -1; | ||
| 240 | } | ||
| 241 | |||
| 226 | void rs400_gpu_init(struct radeon_device *rdev) | 242 | void rs400_gpu_init(struct radeon_device *rdev) |
| 227 | { | 243 | { |
| 228 | /* FIXME: HDP same place on rs400 ? */ | 244 | /* FIXME: HDP same place on rs400 ? */ |
| 229 | r100_hdp_reset(rdev); | 245 | r100_hdp_reset(rdev); |
| 230 | /* FIXME: is this correct ? */ | 246 | /* FIXME: is this correct ? */ |
| 231 | r420_pipes_init(rdev); | 247 | r420_pipes_init(rdev); |
| 232 | if (r300_mc_wait_for_idle(rdev)) { | 248 | if (rs400_mc_wait_for_idle(rdev)) { |
| 233 | printk(KERN_WARNING "Failed to wait MC idle while " | 249 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
| 234 | "programming pipes. Bad things might happen.\n"); | 250 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); |
| 235 | } | 251 | } |
| 236 | } | 252 | } |
| 237 | 253 | ||
| @@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev) | |||
| 370 | r100_mc_stop(rdev, &save); | 386 | r100_mc_stop(rdev, &save); |
| 371 | 387 | ||
| 372 | /* Wait for mc idle */ | 388 | /* Wait for mc idle */ |
| 373 | if (r300_mc_wait_for_idle(rdev)) | 389 | if (rs400_mc_wait_for_idle(rdev)) |
| 374 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 390 | dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); |
| 375 | WREG32(R_000148_MC_FB_LOCATION, | 391 | WREG32(R_000148_MC_FB_LOCATION, |
| 376 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | 392 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
| 377 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 393 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
| @@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev) | |||
| 448 | 464 | ||
| 449 | void rs400_fini(struct radeon_device *rdev) | 465 | void rs400_fini(struct radeon_device *rdev) |
| 450 | { | 466 | { |
| 451 | rs400_suspend(rdev); | ||
| 452 | r100_cp_fini(rdev); | 467 | r100_cp_fini(rdev); |
| 453 | r100_wb_fini(rdev); | 468 | r100_wb_fini(rdev); |
| 454 | r100_ib_fini(rdev); | 469 | r100_ib_fini(rdev); |
| @@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev) | |||
| 527 | if (r) { | 542 | if (r) { |
| 528 | /* Somethings want wront with the accel init stop accel */ | 543 | /* Somethings want wront with the accel init stop accel */ |
| 529 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 544 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 530 | rs400_suspend(rdev); | ||
| 531 | r100_cp_fini(rdev); | 545 | r100_cp_fini(rdev); |
| 532 | r100_wb_fini(rdev); | 546 | r100_wb_fini(rdev); |
| 533 | r100_ib_fini(rdev); | 547 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d5255751e7b3..c3818562a13e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev) | |||
| 610 | 610 | ||
| 611 | void rs600_fini(struct radeon_device *rdev) | 611 | void rs600_fini(struct radeon_device *rdev) |
| 612 | { | 612 | { |
| 613 | rs600_suspend(rdev); | ||
| 614 | r100_cp_fini(rdev); | 613 | r100_cp_fini(rdev); |
| 615 | r100_wb_fini(rdev); | 614 | r100_wb_fini(rdev); |
| 616 | r100_ib_fini(rdev); | 615 | r100_ib_fini(rdev); |
| @@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev) | |||
| 689 | if (r) { | 688 | if (r) { |
| 690 | /* Somethings want wront with the accel init stop accel */ | 689 | /* Somethings want wront with the accel init stop accel */ |
| 691 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 690 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 692 | rs600_suspend(rdev); | ||
| 693 | r100_cp_fini(rdev); | 691 | r100_cp_fini(rdev); |
| 694 | r100_wb_fini(rdev); | 692 | r100_wb_fini(rdev); |
| 695 | r100_ib_fini(rdev); | 693 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index cd31da913771..06e2771aee5a 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) | |||
| 676 | 676 | ||
| 677 | void rs690_fini(struct radeon_device *rdev) | 677 | void rs690_fini(struct radeon_device *rdev) |
| 678 | { | 678 | { |
| 679 | rs690_suspend(rdev); | ||
| 680 | r100_cp_fini(rdev); | 679 | r100_cp_fini(rdev); |
| 681 | r100_wb_fini(rdev); | 680 | r100_wb_fini(rdev); |
| 682 | r100_ib_fini(rdev); | 681 | r100_ib_fini(rdev); |
| @@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev) | |||
| 756 | if (r) { | 755 | if (r) { |
| 757 | /* Somethings want wront with the accel init stop accel */ | 756 | /* Somethings want wront with the accel init stop accel */ |
| 758 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 757 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 759 | rs690_suspend(rdev); | ||
| 760 | r100_cp_fini(rdev); | 758 | r100_cp_fini(rdev); |
| 761 | r100_wb_fini(rdev); | 759 | r100_wb_fini(rdev); |
| 762 | r100_ib_fini(rdev); | 760 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 62756717b044..0e1e6b8632b8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
| 537 | 537 | ||
| 538 | void rv515_fini(struct radeon_device *rdev) | 538 | void rv515_fini(struct radeon_device *rdev) |
| 539 | { | 539 | { |
| 540 | rv515_suspend(rdev); | ||
| 541 | r100_cp_fini(rdev); | 540 | r100_cp_fini(rdev); |
| 542 | r100_wb_fini(rdev); | 541 | r100_wb_fini(rdev); |
| 543 | r100_ib_fini(rdev); | 542 | r100_ib_fini(rdev); |
| @@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev) | |||
| 615 | if (r) { | 614 | if (r) { |
| 616 | /* Somethings want wront with the accel init stop accel */ | 615 | /* Somethings want wront with the accel init stop accel */ |
| 617 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 616 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 618 | rv515_suspend(rdev); | ||
| 619 | r100_cp_fini(rdev); | 617 | r100_cp_fini(rdev); |
| 620 | r100_wb_fini(rdev); | 618 | r100_wb_fini(rdev); |
| 621 | r100_ib_fini(rdev); | 619 | r100_ib_fini(rdev); |
| 620 | radeon_irq_kms_fini(rdev); | ||
| 622 | rv370_pcie_gart_fini(rdev); | 621 | rv370_pcie_gart_fini(rdev); |
| 623 | radeon_agp_fini(rdev); | 622 | radeon_agp_fini(rdev); |
| 624 | radeon_irq_kms_fini(rdev); | ||
| 625 | rdev->accel_working = false; | 623 | rdev->accel_working = false; |
| 626 | } | 624 | } |
| 627 | return 0; | 625 | return 0; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index afd9e8213c29..5943d561fd1e 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 887 | return r; | 887 | return r; |
| 888 | } | 888 | } |
| 889 | rv770_gpu_init(rdev); | 889 | rv770_gpu_init(rdev); |
| 890 | r = r600_blit_init(rdev); | ||
| 891 | if (r) { | ||
| 892 | r600_blit_fini(rdev); | ||
| 893 | rdev->asic->copy = NULL; | ||
| 894 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
| 895 | } | ||
| 890 | /* pin copy shader into vram */ | 896 | /* pin copy shader into vram */ |
| 891 | if (rdev->r600_blit.shader_obj) { | 897 | if (rdev->r600_blit.shader_obj) { |
| 892 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 898 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| @@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1055 | r = r600_pcie_gart_init(rdev); | 1061 | r = r600_pcie_gart_init(rdev); |
| 1056 | if (r) | 1062 | if (r) |
| 1057 | return r; | 1063 | return r; |
| 1058 | r = r600_blit_init(rdev); | ||
| 1059 | if (r) { | ||
| 1060 | r600_blit_fini(rdev); | ||
| 1061 | rdev->asic->copy = NULL; | ||
| 1062 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
| 1063 | } | ||
| 1064 | 1064 | ||
| 1065 | rdev->accel_working = true; | 1065 | rdev->accel_working = true; |
| 1066 | r = rv770_startup(rdev); | 1066 | r = rv770_startup(rdev); |
| 1067 | if (r) { | 1067 | if (r) { |
| 1068 | rv770_suspend(rdev); | 1068 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
| 1069 | r600_cp_fini(rdev); | ||
| 1069 | r600_wb_fini(rdev); | 1070 | r600_wb_fini(rdev); |
| 1070 | radeon_ring_fini(rdev); | 1071 | r600_irq_fini(rdev); |
| 1072 | radeon_irq_kms_fini(rdev); | ||
| 1071 | rv770_pcie_gart_fini(rdev); | 1073 | rv770_pcie_gart_fini(rdev); |
| 1072 | rdev->accel_working = false; | 1074 | rdev->accel_working = false; |
| 1073 | } | 1075 | } |
| @@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1089 | 1091 | ||
| 1090 | void rv770_fini(struct radeon_device *rdev) | 1092 | void rv770_fini(struct radeon_device *rdev) |
| 1091 | { | 1093 | { |
| 1092 | rv770_suspend(rdev); | ||
| 1093 | |||
| 1094 | r600_blit_fini(rdev); | 1094 | r600_blit_fini(rdev); |
| 1095 | r600_cp_fini(rdev); | ||
| 1096 | r600_wb_fini(rdev); | ||
| 1095 | r600_irq_fini(rdev); | 1097 | r600_irq_fini(rdev); |
| 1096 | radeon_irq_kms_fini(rdev); | 1098 | radeon_irq_kms_fini(rdev); |
| 1097 | radeon_ring_fini(rdev); | ||
| 1098 | r600_wb_fini(rdev); | ||
| 1099 | rv770_pcie_gart_fini(rdev); | 1099 | rv770_pcie_gart_fini(rdev); |
| 1100 | radeon_gem_fini(rdev); | 1100 | radeon_gem_fini(rdev); |
| 1101 | radeon_fence_driver_fini(rdev); | 1101 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 135be9688c90..356dc935ec13 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -39,10 +39,10 @@ | |||
| 39 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
| 40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
| 41 | 41 | ||
| 42 | #define VMWGFX_DRIVER_DATE "20090724" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
| 43 | #define VMWGFX_DRIVER_MAJOR 0 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
| 44 | #define VMWGFX_DRIVER_MINOR 1 | 44 | #define VMWGFX_DRIVER_MINOR 0 |
| 45 | #define VMWGFX_DRIVER_PATCHLEVEL 2 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| 48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
| @@ -113,6 +113,7 @@ struct vmw_fifo_state { | |||
| 113 | unsigned long static_buffer_size; | 113 | unsigned long static_buffer_size; |
| 114 | bool using_bounce_buffer; | 114 | bool using_bounce_buffer; |
| 115 | uint32_t capabilities; | 115 | uint32_t capabilities; |
| 116 | struct mutex fifo_mutex; | ||
| 116 | struct rw_semaphore rwsem; | 117 | struct rw_semaphore rwsem; |
| 117 | }; | 118 | }; |
| 118 | 119 | ||
| @@ -213,7 +214,7 @@ struct vmw_private { | |||
| 213 | * Fencing and IRQs. | 214 | * Fencing and IRQs. |
| 214 | */ | 215 | */ |
| 215 | 216 | ||
| 216 | uint32_t fence_seq; | 217 | atomic_t fence_seq; |
| 217 | wait_queue_head_t fence_queue; | 218 | wait_queue_head_t fence_queue; |
| 218 | wait_queue_head_t fifo_queue; | 219 | wait_queue_head_t fifo_queue; |
| 219 | atomic_t fence_queue_waiters; | 220 | atomic_t fence_queue_waiters; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 4157547cc6e4..39d43a01d846 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 74 | fifo->reserved_size = 0; | 74 | fifo->reserved_size = 0; |
| 75 | fifo->using_bounce_buffer = false; | 75 | fifo->using_bounce_buffer = false; |
| 76 | 76 | ||
| 77 | mutex_init(&fifo->fifo_mutex); | ||
| 77 | init_rwsem(&fifo->rwsem); | 78 | init_rwsem(&fifo->rwsem); |
| 78 | 79 | ||
| 79 | /* | 80 | /* |
| @@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 117 | (unsigned int) min, | 118 | (unsigned int) min, |
| 118 | (unsigned int) fifo->capabilities); | 119 | (unsigned int) fifo->capabilities); |
| 119 | 120 | ||
| 120 | dev_priv->fence_seq = dev_priv->last_read_sequence; | 121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
| 121 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
| 122 | 123 | ||
| 123 | return vmw_fifo_send_fence(dev_priv, &dummy); | 124 | return vmw_fifo_send_fence(dev_priv, &dummy); |
| @@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
| 283 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | 284 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
| 284 | int ret; | 285 | int ret; |
| 285 | 286 | ||
| 286 | down_write(&fifo_state->rwsem); | 287 | mutex_lock(&fifo_state->fifo_mutex); |
| 287 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 288 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
| 288 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 289 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
| 289 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | 290 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); |
| @@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
| 351 | } | 352 | } |
| 352 | out_err: | 353 | out_err: |
| 353 | fifo_state->reserved_size = 0; | 354 | fifo_state->reserved_size = 0; |
| 354 | up_write(&fifo_state->rwsem); | 355 | mutex_unlock(&fifo_state->fifo_mutex); |
| 355 | return NULL; | 356 | return NULL; |
| 356 | } | 357 | } |
| 357 | 358 | ||
| @@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
| 426 | 427 | ||
| 427 | } | 428 | } |
| 428 | 429 | ||
| 430 | down_write(&fifo_state->rwsem); | ||
| 429 | if (fifo_state->using_bounce_buffer || reserveable) { | 431 | if (fifo_state->using_bounce_buffer || reserveable) { |
| 430 | next_cmd += bytes; | 432 | next_cmd += bytes; |
| 431 | if (next_cmd >= max) | 433 | if (next_cmd >= max) |
| @@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
| 437 | if (reserveable) | 439 | if (reserveable) |
| 438 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); | 440 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); |
| 439 | mb(); | 441 | mb(); |
| 440 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
| 441 | up_write(&fifo_state->rwsem); | 442 | up_write(&fifo_state->rwsem); |
| 443 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
| 444 | mutex_unlock(&fifo_state->fifo_mutex); | ||
| 442 | } | 445 | } |
| 443 | 446 | ||
| 444 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | 447 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) |
| @@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
| 451 | 454 | ||
| 452 | fm = vmw_fifo_reserve(dev_priv, bytes); | 455 | fm = vmw_fifo_reserve(dev_priv, bytes); |
| 453 | if (unlikely(fm == NULL)) { | 456 | if (unlikely(fm == NULL)) { |
| 454 | down_write(&fifo_state->rwsem); | 457 | *sequence = atomic_read(&dev_priv->fence_seq); |
| 455 | *sequence = dev_priv->fence_seq; | ||
| 456 | up_write(&fifo_state->rwsem); | ||
| 457 | ret = -ENOMEM; | 458 | ret = -ENOMEM; |
| 458 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | 459 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, |
| 459 | false, 3*HZ); | 460 | false, 3*HZ); |
| @@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
| 461 | } | 462 | } |
| 462 | 463 | ||
| 463 | do { | 464 | do { |
| 464 | *sequence = dev_priv->fence_seq++; | 465 | *sequence = atomic_add_return(1, &dev_priv->fence_seq); |
| 465 | } while (*sequence == 0); | 466 | } while (*sequence == 0); |
| 466 | 467 | ||
| 467 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | 468 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 778851f9f1d6..1c7a316454d8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 48 | case DRM_VMW_PARAM_FIFO_OFFSET: | 48 | case DRM_VMW_PARAM_FIFO_OFFSET: |
| 49 | param->value = dev_priv->mmio_start; | 49 | param->value = dev_priv->mmio_start; |
| 50 | break; | 50 | break; |
| 51 | case DRM_VMW_PARAM_HW_CAPS: | ||
| 52 | param->value = dev_priv->capabilities; | ||
| 53 | break; | ||
| 54 | case DRM_VMW_PARAM_FIFO_CAPS: | ||
| 55 | param->value = dev_priv->fifo.capabilities; | ||
| 56 | break; | ||
| 51 | default: | 57 | default: |
| 52 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 58 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| 53 | param->param); | 59 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index d40086fc8647..4d7cb5393860 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, | |||
| 85 | return true; | 85 | return true; |
| 86 | 86 | ||
| 87 | /** | 87 | /** |
| 88 | * Below is to signal stale fences that have wrapped. | ||
| 89 | * First, block fence submission. | ||
| 90 | */ | ||
| 91 | |||
| 92 | down_read(&fifo_state->rwsem); | ||
| 93 | |||
| 94 | /** | ||
| 95 | * Then check if the sequence is higher than what we've actually | 88 | * Then check if the sequence is higher than what we've actually |
| 96 | * emitted. Then the fence is stale and signaled. | 89 | * emitted. Then the fence is stale and signaled. |
| 97 | */ | 90 | */ |
| 98 | 91 | ||
| 99 | ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); | 92 | ret = ((atomic_read(&dev_priv->fence_seq) - sequence) |
| 100 | up_read(&fifo_state->rwsem); | 93 | > VMW_FENCE_WRAP); |
| 101 | 94 | ||
| 102 | return ret; | 95 | return ret; |
| 103 | } | 96 | } |
| @@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
| 127 | 120 | ||
| 128 | if (fifo_idle) | 121 | if (fifo_idle) |
| 129 | down_read(&fifo_state->rwsem); | 122 | down_read(&fifo_state->rwsem); |
| 130 | signal_seq = dev_priv->fence_seq; | 123 | signal_seq = atomic_read(&dev_priv->fence_seq); |
| 131 | ret = 0; | 124 | ret = 0; |
| 132 | 125 | ||
| 133 | for (;;) { | 126 | for (;;) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index eeba6d1d06e4..31f9afed0a63 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
| 769 | 769 | ||
| 770 | drm_mode_config_init(dev); | 770 | drm_mode_config_init(dev); |
| 771 | dev->mode_config.funcs = &vmw_kms_funcs; | 771 | dev->mode_config.funcs = &vmw_kms_funcs; |
| 772 | dev->mode_config.min_width = 640; | 772 | dev->mode_config.min_width = 1; |
| 773 | dev->mode_config.min_height = 480; | 773 | dev->mode_config.min_height = 1; |
| 774 | dev->mode_config.max_width = 2048; | 774 | dev->mode_config.max_width = dev_priv->fb_max_width; |
| 775 | dev->mode_config.max_height = 2048; | 775 | dev->mode_config.max_height = dev_priv->fb_max_height; |
| 776 | 776 | ||
| 777 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 777 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
| 778 | 778 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7efbd47ab84..f8fbbc67a406 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -35,11 +35,6 @@ | |||
| 35 | #define VMW_RES_SURFACE ttm_driver_type1 | 35 | #define VMW_RES_SURFACE ttm_driver_type1 |
| 36 | #define VMW_RES_STREAM ttm_driver_type2 | 36 | #define VMW_RES_STREAM ttm_driver_type2 |
| 37 | 37 | ||
| 38 | /* XXX: This isn't a real hardware flag, but just a hack for kernel to | ||
| 39 | * know about primary surfaces. Find a better way to accomplish this. | ||
| 40 | */ | ||
| 41 | #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) | ||
| 42 | |||
| 43 | struct vmw_user_context { | 38 | struct vmw_user_context { |
| 44 | struct ttm_base_object base; | 39 | struct ttm_base_object base; |
| 45 | struct vmw_resource res; | 40 | struct vmw_resource res; |
| @@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 579 | 574 | ||
| 580 | srf->flags = req->flags; | 575 | srf->flags = req->flags; |
| 581 | srf->format = req->format; | 576 | srf->format = req->format; |
| 577 | srf->scanout = req->scanout; | ||
| 582 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 578 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
| 583 | srf->num_sizes = 0; | 579 | srf->num_sizes = 0; |
| 584 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | 580 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) |
| @@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 604 | if (unlikely(ret != 0)) | 600 | if (unlikely(ret != 0)) |
| 605 | goto out_err1; | 601 | goto out_err1; |
| 606 | 602 | ||
| 607 | if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { | ||
| 608 | /* we should not send this flag down to hardware since | ||
| 609 | * its not a official one | ||
| 610 | */ | ||
| 611 | srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; | ||
| 612 | srf->scanout = true; | ||
| 613 | } else { | ||
| 614 | srf->scanout = false; | ||
| 615 | } | ||
| 616 | |||
| 617 | if (srf->scanout && | 603 | if (srf->scanout && |
| 618 | srf->num_sizes == 1 && | 604 | srf->num_sizes == 1 && |
| 619 | srf->sizes[0].width == 64 && | 605 | srf->sizes[0].width == 64 && |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 1ac0c93603c9..24b56dc54597 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
| @@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 961 | remaining -= 7; | 961 | remaining -= 7; |
| 962 | pr_devel("client 0x%p called 'target'\n", priv); | 962 | pr_devel("client 0x%p called 'target'\n", priv); |
| 963 | /* if target is default */ | 963 | /* if target is default */ |
| 964 | if (!strncmp(buf, "default", 7)) | 964 | if (!strncmp(kbuf, "default", 7)) |
| 965 | pdev = pci_dev_get(vga_default_device()); | 965 | pdev = pci_dev_get(vga_default_device()); |
| 966 | else { | 966 | else { |
| 967 | if (!vga_pci_str_to_vars(curr_pos, remaining, | 967 | if (!vga_pci_str_to_vars(curr_pos, remaining, |
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a31e77c776ae..b8156b4893bb 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c | |||
| @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; | |||
| 179 | * | 179 | * |
| 180 | * Some, but not all, of these voltages have low/high limits. | 180 | * Some, but not all, of these voltages have low/high limits. |
| 181 | */ | 181 | */ |
| 182 | #define ADT7462_VOLT_COUNT 12 | 182 | #define ADT7462_VOLT_COUNT 13 |
| 183 | 183 | ||
| 184 | #define ADT7462_VENDOR 0x41 | 184 | #define ADT7462_VENDOR 0x41 |
| 185 | #define ADT7462_DEVICE 0x62 | 185 | #define ADT7462_DEVICE 0x62 |
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cadcbd90ff3b..72ff2c4e757d 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c | |||
| @@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev) | |||
| 851 | static int __init lm78_isa_found(unsigned short address) | 851 | static int __init lm78_isa_found(unsigned short address) |
| 852 | { | 852 | { |
| 853 | int val, save, found = 0; | 853 | int val, save, found = 0; |
| 854 | 854 | int port; | |
| 855 | /* We have to request the region in two parts because some | 855 | |
| 856 | boards declare base+4 to base+7 as a PNP device */ | 856 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
| 857 | if (!request_region(address, 4, "lm78")) { | 857 | * to base+7 and some base+5 to base+6. So we better request each port |
| 858 | pr_debug("lm78: Failed to request low part of region\n"); | 858 | * individually for the probing phase. */ |
| 859 | return 0; | 859 | for (port = address; port < address + LM78_EXTENT; port++) { |
| 860 | } | 860 | if (!request_region(port, 1, "lm78")) { |
| 861 | if (!request_region(address + 4, 4, "lm78")) { | 861 | pr_debug("lm78: Failed to request port 0x%x\n", port); |
| 862 | pr_debug("lm78: Failed to request high part of region\n"); | 862 | goto release; |
| 863 | release_region(address, 4); | 863 | } |
| 864 | return 0; | ||
| 865 | } | 864 | } |
| 866 | 865 | ||
| 867 | #define REALLY_SLOW_IO | 866 | #define REALLY_SLOW_IO |
| @@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address) | |||
| 925 | val & 0x80 ? "LM79" : "LM78", (int)address); | 924 | val & 0x80 ? "LM79" : "LM78", (int)address); |
| 926 | 925 | ||
| 927 | release: | 926 | release: |
| 928 | release_region(address + 4, 4); | 927 | for (port--; port >= address; port--) |
| 929 | release_region(address, 4); | 928 | release_region(port, 1); |
| 930 | return found; | 929 | return found; |
| 931 | } | 930 | } |
| 932 | 931 | ||
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 05f9225b6f94..32d4adee73db 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c | |||
| @@ -1793,17 +1793,17 @@ static int __init | |||
| 1793 | w83781d_isa_found(unsigned short address) | 1793 | w83781d_isa_found(unsigned short address) |
| 1794 | { | 1794 | { |
| 1795 | int val, save, found = 0; | 1795 | int val, save, found = 0; |
| 1796 | 1796 | int port; | |
| 1797 | /* We have to request the region in two parts because some | 1797 | |
| 1798 | boards declare base+4 to base+7 as a PNP device */ | 1798 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
| 1799 | if (!request_region(address, 4, "w83781d")) { | 1799 | * to base+7 and some base+5 to base+6. So we better request each port |
| 1800 | pr_debug("w83781d: Failed to request low part of region\n"); | 1800 | * individually for the probing phase. */ |
| 1801 | return 0; | 1801 | for (port = address; port < address + W83781D_EXTENT; port++) { |
| 1802 | } | 1802 | if (!request_region(port, 1, "w83781d")) { |
| 1803 | if (!request_region(address + 4, 4, "w83781d")) { | 1803 | pr_debug("w83781d: Failed to request port 0x%x\n", |
| 1804 | pr_debug("w83781d: Failed to request high part of region\n"); | 1804 | port); |
| 1805 | release_region(address, 4); | 1805 | goto release; |
| 1806 | return 0; | 1806 | } |
| 1807 | } | 1807 | } |
| 1808 | 1808 | ||
| 1809 | #define REALLY_SLOW_IO | 1809 | #define REALLY_SLOW_IO |
| @@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) | |||
| 1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); | 1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); |
| 1878 | 1878 | ||
| 1879 | release: | 1879 | release: |
| 1880 | release_region(address + 4, 4); | 1880 | for (port--; port >= address; port--) |
| 1881 | release_region(address, 4); | 1881 | release_region(port, 1); |
| 1882 | return found; | 1882 | return found; |
| 1883 | } | 1883 | } |
| 1884 | 1884 | ||
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index b1c050ff311d..e29b6d5ba8ef 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/types.h> | ||
| 16 | 17 | ||
| 17 | /* include interfaces to usb layer */ | 18 | /* include interfaces to usb layer */ |
| 18 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
| @@ -31,8 +32,8 @@ | |||
| 31 | #define CMD_I2C_IO_END (1<<1) | 32 | #define CMD_I2C_IO_END (1<<1) |
| 32 | 33 | ||
| 33 | /* i2c bit delay, default is 10us -> 100kHz */ | 34 | /* i2c bit delay, default is 10us -> 100kHz */ |
| 34 | static int delay = 10; | 35 | static unsigned short delay = 10; |
| 35 | module_param(delay, int, 0); | 36 | module_param(delay, ushort, 0); |
| 36 | MODULE_PARM_DESC(delay, "bit delay in microseconds, " | 37 | MODULE_PARM_DESC(delay, "bit delay in microseconds, " |
| 37 | "e.g. 10 for 100kHz (default is 100kHz)"); | 38 | "e.g. 10 for 100kHz (default is 100kHz)"); |
| 38 | 39 | ||
| @@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) | |||
| 109 | 110 | ||
| 110 | static u32 usb_func(struct i2c_adapter *adapter) | 111 | static u32 usb_func(struct i2c_adapter *adapter) |
| 111 | { | 112 | { |
| 112 | u32 func; | 113 | __le32 func; |
| 113 | 114 | ||
| 114 | /* get functionality from adapter */ | 115 | /* get functionality from adapter */ |
| 115 | if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != | 116 | if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != |
| @@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter) | |||
| 118 | return 0; | 119 | return 0; |
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | return func; | 122 | return le32_to_cpu(func); |
| 122 | } | 123 | } |
| 123 | 124 | ||
| 124 | /* This is the actual algorithm we define */ | 125 | /* This is the actual algorithm we define */ |
| @@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, | |||
| 216 | "i2c-tiny-usb at bus %03d device %03d", | 217 | "i2c-tiny-usb at bus %03d device %03d", |
| 217 | dev->usb_dev->bus->busnum, dev->usb_dev->devnum); | 218 | dev->usb_dev->bus->busnum, dev->usb_dev->devnum); |
| 218 | 219 | ||
| 219 | if (usb_write(&dev->adapter, CMD_SET_DELAY, | 220 | if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { |
| 220 | cpu_to_le16(delay), 0, NULL, 0) != 0) { | ||
| 221 | dev_err(&dev->adapter.dev, | 221 | dev_err(&dev->adapter.dev, |
| 222 | "failure setting delay to %dus\n", delay); | 222 | "failure setting delay to %dus\n", delay); |
| 223 | retval = -EIO; | 223 | retval = -EIO; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cc9b5940fa97..875e34e0b235 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
| 2115 | if (ret) | 2115 | if (ret) |
| 2116 | goto err1; | 2116 | goto err1; |
| 2117 | 2117 | ||
| 2118 | if (cma_loopback_addr(addr)) { | 2118 | if (!cma_any_addr(addr)) { |
| 2119 | ret = cma_bind_loopback(id_priv); | ||
| 2120 | } else if (!cma_zero_addr(addr)) { | ||
| 2121 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); | 2119 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); |
| 2122 | if (ret) | 2120 | if (ret) |
| 2123 | goto err1; | 2121 | goto err1; |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 9774bdfaa482..d8c0c8d6992c 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
| @@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio) | |||
| 1141 | psmouse_deactivate(parent); | 1141 | psmouse_deactivate(parent); |
| 1142 | } | 1142 | } |
| 1143 | 1143 | ||
| 1144 | psmouse_deactivate(psmouse); | 1144 | psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); |
| 1145 | |||
| 1146 | /* | ||
| 1147 | * Disable stream mode so cleanup routine can proceed undisturbed. | ||
| 1148 | */ | ||
| 1149 | if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) | ||
| 1150 | printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n", | ||
| 1151 | psmouse->ps2dev.serio->phys); | ||
| 1145 | 1152 | ||
| 1146 | if (psmouse->cleanup) | 1153 | if (psmouse->cleanup) |
| 1147 | psmouse->cleanup(psmouse); | 1154 | psmouse->cleanup(psmouse); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index dd3dfe42d5a9..a20a71e5efd3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws) | |||
| 4075 | { | 4075 | { |
| 4076 | mddev_t *mddev = container_of(ws, mddev_t, del_work); | 4076 | mddev_t *mddev = container_of(ws, mddev_t, del_work); |
| 4077 | 4077 | ||
| 4078 | if (mddev->private == &md_redundancy_group) { | 4078 | if (mddev->private) { |
| 4079 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); | 4079 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); |
| 4080 | if (mddev->private != (void*)1) | ||
| 4081 | sysfs_remove_group(&mddev->kobj, mddev->private); | ||
| 4080 | if (mddev->sysfs_action) | 4082 | if (mddev->sysfs_action) |
| 4081 | sysfs_put(mddev->sysfs_action); | 4083 | sysfs_put(mddev->sysfs_action); |
| 4082 | mddev->sysfs_action = NULL; | 4084 | mddev->sysfs_action = NULL; |
| @@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev) | |||
| 4287 | sysfs_notify_dirent(rdev->sysfs_state); | 4289 | sysfs_notify_dirent(rdev->sysfs_state); |
| 4288 | } | 4290 | } |
| 4289 | 4291 | ||
| 4290 | md_probe(mddev->unit, NULL, NULL); | ||
| 4291 | disk = mddev->gendisk; | 4292 | disk = mddev->gendisk; |
| 4292 | if (!disk) | ||
| 4293 | return -ENOMEM; | ||
| 4294 | 4293 | ||
| 4295 | spin_lock(&pers_lock); | 4294 | spin_lock(&pers_lock); |
| 4296 | pers = find_pers(mddev->level, mddev->clevel); | 4295 | pers = find_pers(mddev->level, mddev->clevel); |
| @@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
| 4530 | mddev->queue->unplug_fn = NULL; | 4529 | mddev->queue->unplug_fn = NULL; |
| 4531 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4530 | mddev->queue->backing_dev_info.congested_fn = NULL; |
| 4532 | module_put(mddev->pers->owner); | 4531 | module_put(mddev->pers->owner); |
| 4533 | if (mddev->pers->sync_request) | 4532 | if (mddev->pers->sync_request && mddev->private == NULL) |
| 4534 | mddev->private = &md_redundancy_group; | 4533 | mddev->private = (void*)1; |
| 4535 | mddev->pers = NULL; | 4534 | mddev->pers = NULL; |
| 4536 | /* tell userspace to handle 'inactive' */ | 4535 | /* tell userspace to handle 'inactive' */ |
| 4537 | sysfs_notify_dirent(mddev->sysfs_state); | 4536 | sysfs_notify_dirent(mddev->sysfs_state); |
| @@ -4578,9 +4577,6 @@ out: | |||
| 4578 | } | 4577 | } |
| 4579 | mddev->bitmap_info.offset = 0; | 4578 | mddev->bitmap_info.offset = 0; |
| 4580 | 4579 | ||
| 4581 | /* make sure all md_delayed_delete calls have finished */ | ||
| 4582 | flush_scheduled_work(); | ||
| 4583 | |||
| 4584 | export_array(mddev); | 4580 | export_array(mddev); |
| 4585 | 4581 | ||
| 4586 | mddev->array_sectors = 0; | 4582 | mddev->array_sectors = 0; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e84204eb12df..ceb24afdc147 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev) | |||
| 5136 | mddev->thread = NULL; | 5136 | mddev->thread = NULL; |
| 5137 | mddev->queue->backing_dev_info.congested_fn = NULL; | 5137 | mddev->queue->backing_dev_info.congested_fn = NULL; |
| 5138 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 5138 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
| 5139 | sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); | ||
| 5140 | free_conf(conf); | 5139 | free_conf(conf); |
| 5141 | mddev->private = NULL; | 5140 | mddev->private = &raid5_attrs_group; |
| 5142 | return 0; | 5141 | return 0; |
| 5143 | } | 5142 | } |
| 5144 | 5143 | ||
| @@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
| 5464 | !test_bit(Faulty, &rdev->flags)) { | 5463 | !test_bit(Faulty, &rdev->flags)) { |
| 5465 | if (raid5_add_disk(mddev, rdev) == 0) { | 5464 | if (raid5_add_disk(mddev, rdev) == 0) { |
| 5466 | char nm[20]; | 5465 | char nm[20]; |
| 5467 | if (rdev->raid_disk >= conf->previous_raid_disks) | 5466 | if (rdev->raid_disk >= conf->previous_raid_disks) { |
| 5468 | set_bit(In_sync, &rdev->flags); | 5467 | set_bit(In_sync, &rdev->flags); |
| 5469 | else | 5468 | added_devices++; |
| 5469 | } else | ||
| 5470 | rdev->recovery_offset = 0; | 5470 | rdev->recovery_offset = 0; |
| 5471 | added_devices++; | ||
| 5472 | sprintf(nm, "rd%d", rdev->raid_disk); | 5471 | sprintf(nm, "rd%d", rdev->raid_disk); |
| 5473 | if (sysfs_create_link(&mddev->kobj, | 5472 | if (sysfs_create_link(&mddev->kobj, |
| 5474 | &rdev->kobj, nm)) | 5473 | &rdev->kobj, nm)) |
| @@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
| 5480 | break; | 5479 | break; |
| 5481 | } | 5480 | } |
| 5482 | 5481 | ||
| 5482 | /* When a reshape changes the number of devices, ->degraded | ||
| 5483 | * is measured against the large of the pre and post number of | ||
| 5484 | * devices.*/ | ||
| 5483 | if (mddev->delta_disks > 0) { | 5485 | if (mddev->delta_disks > 0) { |
| 5484 | spin_lock_irqsave(&conf->device_lock, flags); | 5486 | spin_lock_irqsave(&conf->device_lock, flags); |
| 5485 | mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) | 5487 | mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) |
| 5486 | - added_devices; | 5488 | - added_devices; |
| 5487 | spin_unlock_irqrestore(&conf->device_lock, flags); | 5489 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 5488 | } | 5490 | } |
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index c37790ad92d0..9ddc57909d49 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c | |||
| @@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file) | |||
| 761 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); | 761 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); |
| 762 | dmxdevfilter->type = DMXDEV_TYPE_NONE; | 762 | dmxdevfilter->type = DMXDEV_TYPE_NONE; |
| 763 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); | 763 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); |
| 764 | INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
| 765 | init_timer(&dmxdevfilter->timer); | 764 | init_timer(&dmxdevfilter->timer); |
| 766 | 765 | ||
| 767 | dvbdev->users++; | 766 | dvbdev->users++; |
| @@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, | |||
| 887 | dmxdevfilter->type = DMXDEV_TYPE_PES; | 886 | dmxdevfilter->type = DMXDEV_TYPE_PES; |
| 888 | memcpy(&dmxdevfilter->params, params, | 887 | memcpy(&dmxdevfilter->params, params, |
| 889 | sizeof(struct dmx_pes_filter_params)); | 888 | sizeof(struct dmx_pes_filter_params)); |
| 889 | INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
| 890 | 890 | ||
| 891 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); | 891 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); |
| 892 | 892 | ||
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index b78cfb7d1897..67f189b7aa1f 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c | |||
| @@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
| 426 | }; | 426 | }; |
| 427 | }; | 427 | }; |
| 428 | 428 | ||
| 429 | if (dvb_demux_tscheck) { | 429 | if (demux->cnt_storage) { |
| 430 | if (!demux->cnt_storage) | ||
| 431 | demux->cnt_storage = vmalloc(MAX_PID + 1); | ||
| 432 | |||
| 433 | if (!demux->cnt_storage) { | ||
| 434 | printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); | ||
| 435 | dvb_demux_tscheck = 0; | ||
| 436 | goto no_dvb_demux_tscheck; | ||
| 437 | } | ||
| 438 | |||
| 439 | /* check pkt counter */ | 430 | /* check pkt counter */ |
| 440 | if (pid < MAX_PID) { | 431 | if (pid < MAX_PID) { |
| 441 | if (buf[1] & 0x80) | 432 | if (buf[1] & 0x80) |
| @@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
| 454 | }; | 445 | }; |
| 455 | /* end check */ | 446 | /* end check */ |
| 456 | }; | 447 | }; |
| 457 | no_dvb_demux_tscheck: | ||
| 458 | 448 | ||
| 459 | list_for_each_entry(feed, &demux->feed_list, list_head) { | 449 | list_for_each_entry(feed, &demux->feed_list, list_head) { |
| 460 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | 450 | if ((feed->pid != pid) && (feed->pid != 0x2000)) |
| @@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) | |||
| 1246 | dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); | 1236 | dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); |
| 1247 | if (!dvbdemux->feed) { | 1237 | if (!dvbdemux->feed) { |
| 1248 | vfree(dvbdemux->filter); | 1238 | vfree(dvbdemux->filter); |
| 1239 | dvbdemux->filter = NULL; | ||
| 1249 | return -ENOMEM; | 1240 | return -ENOMEM; |
| 1250 | } | 1241 | } |
| 1251 | for (i = 0; i < dvbdemux->filternum; i++) { | 1242 | for (i = 0; i < dvbdemux->filternum; i++) { |
| @@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) | |||
| 1257 | dvbdemux->feed[i].index = i; | 1248 | dvbdemux->feed[i].index = i; |
| 1258 | } | 1249 | } |
| 1259 | 1250 | ||
| 1251 | if (dvb_demux_tscheck) { | ||
| 1252 | dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); | ||
| 1253 | |||
| 1254 | if (!dvbdemux->cnt_storage) | ||
| 1255 | printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); | ||
| 1256 | } | ||
| 1257 | |||
| 1260 | INIT_LIST_HEAD(&dvbdemux->frontend_list); | 1258 | INIT_LIST_HEAD(&dvbdemux->frontend_list); |
| 1261 | 1259 | ||
| 1262 | for (i = 0; i < DMX_TS_PES_OTHER; i++) { | 1260 | for (i = 0; i < DMX_TS_PES_OTHER; i++) { |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 57752751712b..81279b3d694c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
| @@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | |||
| 1796 | dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " | 1796 | dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " |
| 1797 | "Command not in the active list! (sc=%p)\n", ioc->name, | 1797 | "Command not in the active list! (sc=%p)\n", ioc->name, |
| 1798 | SCpnt)); | 1798 | SCpnt)); |
| 1799 | retval = 0; | 1799 | retval = SUCCESS; |
| 1800 | goto out; | 1800 | goto out; |
| 1801 | } | 1801 | } |
| 1802 | 1802 | ||
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b9f1e84897cc..e7f8027165e6 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
| @@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, | |||
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | mrq->cmd->arg = dev_addr; | 76 | mrq->cmd->arg = dev_addr; |
| 77 | if (!mmc_card_blockaddr(test->card)) | ||
| 78 | mrq->cmd->arg <<= 9; | ||
| 79 | |||
| 77 | mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; | 80 | mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; |
| 78 | 81 | ||
| 79 | if (blocks == 1) | 82 | if (blocks == 1) |
| @@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write) | |||
| 190 | } | 193 | } |
| 191 | 194 | ||
| 192 | for (i = 0;i < BUFFER_SIZE / 512;i++) { | 195 | for (i = 0;i < BUFFER_SIZE / 512;i++) { |
| 193 | ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); | 196 | ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); |
| 194 | if (ret) | 197 | if (ret) |
| 195 | return ret; | 198 | return ret; |
| 196 | } | 199 | } |
| @@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test) | |||
| 219 | memset(test->buffer, 0, 512); | 222 | memset(test->buffer, 0, 512); |
| 220 | 223 | ||
| 221 | for (i = 0;i < BUFFER_SIZE / 512;i++) { | 224 | for (i = 0;i < BUFFER_SIZE / 512;i++) { |
| 222 | ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); | 225 | ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); |
| 223 | if (ret) | 226 | if (ret) |
| 224 | return ret; | 227 | return ret; |
| 225 | } | 228 | } |
| @@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, | |||
| 426 | for (i = 0;i < sectors;i++) { | 429 | for (i = 0;i < sectors;i++) { |
| 427 | ret = mmc_test_buffer_transfer(test, | 430 | ret = mmc_test_buffer_transfer(test, |
| 428 | test->buffer + i * 512, | 431 | test->buffer + i * 512, |
| 429 | dev_addr + i * 512, 512, 0); | 432 | dev_addr + i, 512, 0); |
| 430 | if (ret) | 433 | if (ret) |
| 431 | return ret; | 434 | return ret; |
| 432 | } | 435 | } |
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 62d9c9cc5671..1dd4403247ca 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c | |||
| @@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev) | |||
| 921 | size = (res->end - res->start) + 1; | 921 | size = (res->end - res->start) + 1; |
| 922 | 922 | ||
| 923 | ax->mem2 = request_mem_region(res->start, size, pdev->name); | 923 | ax->mem2 = request_mem_region(res->start, size, pdev->name); |
| 924 | if (ax->mem == NULL) { | 924 | if (ax->mem2 == NULL) { |
| 925 | dev_err(&pdev->dev, "cannot reserve registers\n"); | 925 | dev_err(&pdev->dev, "cannot reserve registers\n"); |
| 926 | ret = -ENXIO; | 926 | ret = -ENXIO; |
| 927 | goto exit_mem1; | 927 | goto exit_mem1; |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index bdbd14727e4b..318a018ca7c5 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
| @@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
| 2079 | struct sge_fl *fl, int len, int complete) | 2079 | struct sge_fl *fl, int len, int complete) |
| 2080 | { | 2080 | { |
| 2081 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | 2081 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; |
| 2082 | struct port_info *pi = netdev_priv(qs->netdev); | ||
| 2082 | struct sk_buff *skb = NULL; | 2083 | struct sk_buff *skb = NULL; |
| 2083 | struct cpl_rx_pkt *cpl; | 2084 | struct cpl_rx_pkt *cpl; |
| 2084 | struct skb_frag_struct *rx_frag; | 2085 | struct skb_frag_struct *rx_frag; |
| @@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
| 2116 | 2117 | ||
| 2117 | if (!nr_frags) { | 2118 | if (!nr_frags) { |
| 2118 | offset = 2 + sizeof(struct cpl_rx_pkt); | 2119 | offset = 2 + sizeof(struct cpl_rx_pkt); |
| 2119 | qs->lro_va = sd->pg_chunk.va + 2; | 2120 | cpl = qs->lro_va = sd->pg_chunk.va + 2; |
| 2120 | } | ||
| 2121 | len -= offset; | ||
| 2122 | 2121 | ||
| 2123 | prefetch(qs->lro_va); | 2122 | if ((pi->rx_offload & T3_RX_CSUM) && |
| 2123 | cpl->csum_valid && cpl->csum == htons(0xffff)) { | ||
| 2124 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 2125 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; | ||
| 2126 | } else | ||
| 2127 | skb->ip_summed = CHECKSUM_NONE; | ||
| 2128 | } else | ||
| 2129 | cpl = qs->lro_va; | ||
| 2130 | |||
| 2131 | len -= offset; | ||
| 2124 | 2132 | ||
| 2125 | rx_frag += nr_frags; | 2133 | rx_frag += nr_frags; |
| 2126 | rx_frag->page = sd->pg_chunk.page; | 2134 | rx_frag->page = sd->pg_chunk.page; |
| @@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
| 2136 | return; | 2144 | return; |
| 2137 | 2145 | ||
| 2138 | skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); | 2146 | skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); |
| 2139 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 2140 | cpl = qs->lro_va; | ||
| 2141 | 2147 | ||
| 2142 | if (unlikely(cpl->vlan_valid)) { | 2148 | if (unlikely(cpl->vlan_valid)) { |
| 2143 | struct net_device *dev = qs->netdev; | ||
| 2144 | struct port_info *pi = netdev_priv(dev); | ||
| 2145 | struct vlan_group *grp = pi->vlan_grp; | 2149 | struct vlan_group *grp = pi->vlan_grp; |
| 2146 | 2150 | ||
| 2147 | if (likely(grp != NULL)) { | 2151 | if (likely(grp != NULL)) { |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 997124d2992a..c881347cb26d 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
| @@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
| 421 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; | 421 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
| 422 | if (tx_queue > IGB_N0_QUEUE) | 422 | if (tx_queue > IGB_N0_QUEUE) |
| 423 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; | 423 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; |
| 424 | if (!adapter->msix_entries && msix_vector == 0) | ||
| 425 | msixbm |= E1000_EIMS_OTHER; | ||
| 424 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 426 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
| 425 | q_vector->eims_value = msixbm; | 427 | q_vector->eims_value = msixbm; |
| 426 | break; | 428 | break; |
| @@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
| 877 | { | 879 | { |
| 878 | struct net_device *netdev = adapter->netdev; | 880 | struct net_device *netdev = adapter->netdev; |
| 879 | struct pci_dev *pdev = adapter->pdev; | 881 | struct pci_dev *pdev = adapter->pdev; |
| 880 | struct e1000_hw *hw = &adapter->hw; | ||
| 881 | int err = 0; | 882 | int err = 0; |
| 882 | 883 | ||
| 883 | if (adapter->msix_entries) { | 884 | if (adapter->msix_entries) { |
| @@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
| 909 | igb_setup_all_tx_resources(adapter); | 910 | igb_setup_all_tx_resources(adapter); |
| 910 | igb_setup_all_rx_resources(adapter); | 911 | igb_setup_all_rx_resources(adapter); |
| 911 | } else { | 912 | } else { |
| 912 | switch (hw->mac.type) { | 913 | igb_assign_vector(adapter->q_vector[0], 0); |
| 913 | case e1000_82575: | ||
| 914 | wr32(E1000_MSIXBM(0), | ||
| 915 | (E1000_EICR_RX_QUEUE0 | | ||
| 916 | E1000_EICR_TX_QUEUE0 | | ||
| 917 | E1000_EIMS_OTHER)); | ||
| 918 | break; | ||
| 919 | case e1000_82580: | ||
| 920 | case e1000_82576: | ||
| 921 | wr32(E1000_IVAR0, E1000_IVAR_VALID); | ||
| 922 | break; | ||
| 923 | default: | ||
| 924 | break; | ||
| 925 | } | ||
| 926 | } | 914 | } |
| 927 | 915 | ||
| 928 | if (adapter->flags & IGB_FLAG_HAS_MSI) { | 916 | if (adapter->flags & IGB_FLAG_HAS_MSI) { |
| @@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter) | |||
| 1140 | } | 1128 | } |
| 1141 | if (adapter->msix_entries) | 1129 | if (adapter->msix_entries) |
| 1142 | igb_configure_msix(adapter); | 1130 | igb_configure_msix(adapter); |
| 1131 | else | ||
| 1132 | igb_assign_vector(adapter->q_vector[0], 0); | ||
| 1143 | 1133 | ||
| 1144 | /* Clear any pending interrupts. */ | 1134 | /* Clear any pending interrupts. */ |
| 1145 | rd32(E1000_ICR); | 1135 | rd32(E1000_ICR); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5f64ad67975..7b7c8486c0bf 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
| @@ -5179,7 +5179,7 @@ dma_error: | |||
| 5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); |
| 5180 | } | 5180 | } |
| 5181 | 5181 | ||
| 5182 | return count; | 5182 | return 0; |
| 5183 | } | 5183 | } |
| 5184 | 5184 | ||
| 5185 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | 5185 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, |
| @@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
| 5329 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 5329 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
| 5330 | int txq = smp_processor_id(); | 5330 | int txq = smp_processor_id(); |
| 5331 | 5331 | ||
| 5332 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | 5332 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
| 5333 | while (unlikely(txq >= dev->real_num_tx_queues)) | ||
| 5334 | txq -= dev->real_num_tx_queues; | ||
| 5333 | return txq; | 5335 | return txq; |
| 5336 | } | ||
| 5334 | 5337 | ||
| 5335 | #ifdef IXGBE_FCOE | 5338 | #ifdef IXGBE_FCOE |
| 5336 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5339 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9f9d6081959b..24279e6e55f5 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) | |||
| 1941 | netif_wake_queue(adapter->netdev); | 1941 | netif_wake_queue(adapter->netdev); |
| 1942 | 1942 | ||
| 1943 | clear_bit(__NX_RESETTING, &adapter->state); | 1943 | clear_bit(__NX_RESETTING, &adapter->state); |
| 1944 | 1944 | return; | |
| 1945 | } else { | 1945 | } else { |
| 1946 | clear_bit(__NX_RESETTING, &adapter->state); | 1946 | clear_bit(__NX_RESETTING, &adapter->state); |
| 1947 | if (!netxen_nic_reset_context(adapter)) { | 1947 | if (!netxen_nic_reset_context(adapter)) { |
| @@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work) | |||
| 2240 | 2240 | ||
| 2241 | netxen_nic_down(adapter, netdev); | 2241 | netxen_nic_down(adapter, netdev); |
| 2242 | 2242 | ||
| 2243 | rtnl_lock(); | ||
| 2243 | netxen_nic_detach(adapter); | 2244 | netxen_nic_detach(adapter); |
| 2245 | rtnl_unlock(); | ||
| 2244 | 2246 | ||
| 2245 | status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); | 2247 | status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); |
| 2246 | 2248 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index d760650c5c04..67249c3c9f50 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, | |||
| 1025 | static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) | 1025 | static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) |
| 1026 | { | 1026 | { |
| 1027 | struct sky2_tx_le *le = sky2->tx_le + *slot; | 1027 | struct sky2_tx_le *le = sky2->tx_le + *slot; |
| 1028 | struct tx_ring_info *re = sky2->tx_ring + *slot; | ||
| 1029 | 1028 | ||
| 1030 | *slot = RING_NEXT(*slot, sky2->tx_ring_size); | 1029 | *slot = RING_NEXT(*slot, sky2->tx_ring_size); |
| 1031 | re->flags = 0; | ||
| 1032 | re->skb = NULL; | ||
| 1033 | le->ctrl = 0; | 1030 | le->ctrl = 0; |
| 1034 | return le; | 1031 | return le; |
| 1035 | } | 1032 | } |
| @@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
| 1622 | return count; | 1619 | return count; |
| 1623 | } | 1620 | } |
| 1624 | 1621 | ||
| 1625 | static void sky2_tx_unmap(struct pci_dev *pdev, | 1622 | static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) |
| 1626 | const struct tx_ring_info *re) | ||
| 1627 | { | 1623 | { |
| 1628 | if (re->flags & TX_MAP_SINGLE) | 1624 | if (re->flags & TX_MAP_SINGLE) |
| 1629 | pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), | 1625 | pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), |
| @@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev, | |||
| 1633 | pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), | 1629 | pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), |
| 1634 | pci_unmap_len(re, maplen), | 1630 | pci_unmap_len(re, maplen), |
| 1635 | PCI_DMA_TODEVICE); | 1631 | PCI_DMA_TODEVICE); |
| 1632 | re->flags = 0; | ||
| 1636 | } | 1633 | } |
| 1637 | 1634 | ||
| 1638 | /* | 1635 | /* |
| @@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
| 1839 | dev->stats.tx_packets++; | 1836 | dev->stats.tx_packets++; |
| 1840 | dev->stats.tx_bytes += skb->len; | 1837 | dev->stats.tx_bytes += skb->len; |
| 1841 | 1838 | ||
| 1839 | re->skb = NULL; | ||
| 1842 | dev_kfree_skb_any(skb); | 1840 | dev_kfree_skb_any(skb); |
| 1843 | 1841 | ||
| 1844 | sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); | 1842 | sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c74694345b6e..d58b94030ef3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) | |||
| 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
| 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
| 340 | 340 | ||
| 341 | /* | ||
| 342 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | ||
| 343 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | ||
| 344 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | ||
| 345 | * (which conflicts w/ BAR1's memory range). | ||
| 346 | */ | ||
| 347 | static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) | ||
| 348 | { | ||
| 349 | if (pci_resource_len(dev, 0) != 8) { | ||
| 350 | struct resource *res = &dev->resource[0]; | ||
| 351 | res->end = res->start + 8 - 1; | ||
| 352 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected " | ||
| 353 | "(incorrect header); workaround applied.\n"); | ||
| 354 | } | ||
| 355 | } | ||
| 356 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | ||
| 357 | |||
| 341 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 358 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
| 342 | unsigned size, int nr, const char *name) | 359 | unsigned size, int nr, const char *name) |
| 343 | { | 360 | { |
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index fa39e759a275..6ea3cb5837c7 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c | |||
| @@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) | |||
| 175 | dev_err(&dev->dev, "Do not pass platform_data through " | 175 | dev_err(&dev->dev, "Do not pass platform_data through " |
| 176 | "wm97xx_bat_set_pdata!\n"); | 176 | "wm97xx_bat_set_pdata!\n"); |
| 177 | return -EINVAL; | 177 | return -EINVAL; |
| 178 | } else | 178 | } |
| 179 | pdata = wmdata->batt_pdata; | 179 | |
| 180 | if (!wmdata) { | ||
| 181 | dev_err(&dev->dev, "No platform data supplied\n"); | ||
| 182 | return -EINVAL; | ||
| 183 | } | ||
| 184 | |||
| 185 | pdata = wmdata->batt_pdata; | ||
| 180 | 186 | ||
| 181 | if (dev->id != -1) | 187 | if (dev->id != -1) |
| 182 | return -EINVAL; | 188 | return -EINVAL; |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 999fe80c4051..62b654af9237 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
| @@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) | |||
| 531 | qdio_siga_sync_q(q); | 531 | qdio_siga_sync_q(q); |
| 532 | get_buf_state(q, q->first_to_check, &state, 0); | 532 | get_buf_state(q, q->first_to_check, &state, 0); |
| 533 | 533 | ||
| 534 | if (state == SLSB_P_INPUT_PRIMED) | 534 | if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) |
| 535 | /* more work coming */ | 535 | /* more work coming */ |
| 536 | return 0; | 536 | return 0; |
| 537 | 537 | ||
| @@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 960 | qdio_handle_activate_check(cdev, intparm, cstat, | 960 | qdio_handle_activate_check(cdev, intparm, cstat, |
| 961 | dstat); | 961 | dstat); |
| 962 | break; | 962 | break; |
| 963 | case QDIO_IRQ_STATE_STOPPED: | ||
| 964 | break; | ||
| 963 | default: | 965 | default: |
| 964 | WARN_ON(1); | 966 | WARN_ON(1); |
| 965 | } | 967 | } |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 0f7b493fb105..271399f62f1b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
| @@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data) | |||
| 671 | { | 671 | { |
| 672 | struct fc_bsg_job *job = data; | 672 | struct fc_bsg_job *job = data; |
| 673 | struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; | 673 | struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; |
| 674 | int status = zfcp_ct_els->status; | 674 | struct fc_bsg_reply *jr = job->reply; |
| 675 | int reply_status; | ||
| 676 | 675 | ||
| 677 | reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; | 676 | jr->reply_payload_rcv_len = job->reply_payload.payload_len; |
| 678 | job->reply->reply_data.ctels_reply.status = reply_status; | 677 | jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; |
| 679 | job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; | 678 | jr->result = zfcp_ct_els->status ? -EIO : 0; |
| 680 | job->job_done(job); | 679 | job->job_done(job); |
| 681 | } | 680 | } |
| 682 | 681 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f61fb8d01330..8bc6f53691e9 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); | |||
| 453 | extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); | 453 | extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); |
| 454 | extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); | 454 | extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); |
| 455 | extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); | 455 | extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); |
| 456 | extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); | ||
| 457 | 456 | ||
| 458 | #endif /* _QLA_GBL_H */ | 457 | #endif /* _QLA_GBL_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ffd0efdff40e..6fc63b98818c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1917 | struct rsp_que *rsp; | 1917 | struct rsp_que *rsp; |
| 1918 | struct device_reg_24xx __iomem *reg; | 1918 | struct device_reg_24xx __iomem *reg; |
| 1919 | struct scsi_qla_host *vha; | 1919 | struct scsi_qla_host *vha; |
| 1920 | unsigned long flags; | ||
| 1920 | 1921 | ||
| 1921 | rsp = (struct rsp_que *) dev_id; | 1922 | rsp = (struct rsp_que *) dev_id; |
| 1922 | if (!rsp) { | 1923 | if (!rsp) { |
| @@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1927 | ha = rsp->hw; | 1928 | ha = rsp->hw; |
| 1928 | reg = &ha->iobase->isp24; | 1929 | reg = &ha->iobase->isp24; |
| 1929 | 1930 | ||
| 1930 | spin_lock_irq(&ha->hardware_lock); | 1931 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 1931 | 1932 | ||
| 1932 | vha = qla25xx_get_host(rsp); | 1933 | vha = pci_get_drvdata(ha->pdev); |
| 1933 | qla24xx_process_response_queue(vha, rsp); | 1934 | qla24xx_process_response_queue(vha, rsp); |
| 1934 | if (!ha->flags.disable_msix_handshake) { | 1935 | if (!ha->flags.disable_msix_handshake) { |
| 1935 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1936 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
| 1936 | RD_REG_DWORD_RELAXED(®->hccr); | 1937 | RD_REG_DWORD_RELAXED(®->hccr); |
| 1937 | } | 1938 | } |
| 1938 | spin_unlock_irq(&ha->hardware_lock); | 1939 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 1939 | 1940 | ||
| 1940 | return IRQ_HANDLED; | 1941 | return IRQ_HANDLED; |
| 1941 | } | 1942 | } |
| @@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1946 | struct qla_hw_data *ha; | 1947 | struct qla_hw_data *ha; |
| 1947 | struct rsp_que *rsp; | 1948 | struct rsp_que *rsp; |
| 1948 | struct device_reg_24xx __iomem *reg; | 1949 | struct device_reg_24xx __iomem *reg; |
| 1950 | unsigned long flags; | ||
| 1949 | 1951 | ||
| 1950 | rsp = (struct rsp_que *) dev_id; | 1952 | rsp = (struct rsp_que *) dev_id; |
| 1951 | if (!rsp) { | 1953 | if (!rsp) { |
| @@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1958 | /* Clear the interrupt, if enabled, for this response queue */ | 1960 | /* Clear the interrupt, if enabled, for this response queue */ |
| 1959 | if (rsp->options & ~BIT_6) { | 1961 | if (rsp->options & ~BIT_6) { |
| 1960 | reg = &ha->iobase->isp24; | 1962 | reg = &ha->iobase->isp24; |
| 1961 | spin_lock_irq(&ha->hardware_lock); | 1963 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 1962 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1964 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
| 1963 | RD_REG_DWORD_RELAXED(®->hccr); | 1965 | RD_REG_DWORD_RELAXED(®->hccr); |
| 1964 | spin_unlock_irq(&ha->hardware_lock); | 1966 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 1965 | } | 1967 | } |
| 1966 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); | 1968 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); |
| 1967 | 1969 | ||
| @@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
| 1979 | uint32_t stat; | 1981 | uint32_t stat; |
| 1980 | uint32_t hccr; | 1982 | uint32_t hccr; |
| 1981 | uint16_t mb[4]; | 1983 | uint16_t mb[4]; |
| 1984 | unsigned long flags; | ||
| 1982 | 1985 | ||
| 1983 | rsp = (struct rsp_que *) dev_id; | 1986 | rsp = (struct rsp_que *) dev_id; |
| 1984 | if (!rsp) { | 1987 | if (!rsp) { |
| @@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
| 1990 | reg = &ha->iobase->isp24; | 1993 | reg = &ha->iobase->isp24; |
| 1991 | status = 0; | 1994 | status = 0; |
| 1992 | 1995 | ||
| 1993 | spin_lock_irq(&ha->hardware_lock); | 1996 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 1994 | vha = pci_get_drvdata(ha->pdev); | 1997 | vha = pci_get_drvdata(ha->pdev); |
| 1995 | do { | 1998 | do { |
| 1996 | stat = RD_REG_DWORD(®->host_status); | 1999 | stat = RD_REG_DWORD(®->host_status); |
| @@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
| 2039 | } | 2042 | } |
| 2040 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2043 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
| 2041 | } while (0); | 2044 | } while (0); |
| 2042 | spin_unlock_irq(&ha->hardware_lock); | 2045 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 2043 | 2046 | ||
| 2044 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 2047 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
| 2045 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 2048 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
| @@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) | |||
| 2277 | msix->rsp = rsp; | 2280 | msix->rsp = rsp; |
| 2278 | return ret; | 2281 | return ret; |
| 2279 | } | 2282 | } |
| 2280 | |||
| 2281 | struct scsi_qla_host * | ||
| 2282 | qla25xx_get_host(struct rsp_que *rsp) | ||
| 2283 | { | ||
| 2284 | srb_t *sp; | ||
| 2285 | struct qla_hw_data *ha = rsp->hw; | ||
| 2286 | struct scsi_qla_host *vha = NULL; | ||
| 2287 | struct sts_entry_24xx *pkt; | ||
| 2288 | struct req_que *req; | ||
| 2289 | uint16_t que; | ||
| 2290 | uint32_t handle; | ||
| 2291 | |||
| 2292 | pkt = (struct sts_entry_24xx *) rsp->ring_ptr; | ||
| 2293 | que = MSW(pkt->handle); | ||
| 2294 | handle = (uint32_t) LSW(pkt->handle); | ||
| 2295 | req = ha->req_q_map[que]; | ||
| 2296 | if (handle < MAX_OUTSTANDING_COMMANDS) { | ||
| 2297 | sp = req->outstanding_cmds[handle]; | ||
| 2298 | if (sp) | ||
| 2299 | return sp->fcport->vha; | ||
| 2300 | else | ||
| 2301 | goto base_que; | ||
| 2302 | } | ||
| 2303 | base_que: | ||
| 2304 | vha = pci_get_drvdata(ha->pdev); | ||
| 2305 | return vha; | ||
| 2306 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b901aa267e7d..ff17dee28613 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -636,13 +636,15 @@ failed: | |||
| 636 | 636 | ||
| 637 | static void qla_do_work(struct work_struct *work) | 637 | static void qla_do_work(struct work_struct *work) |
| 638 | { | 638 | { |
| 639 | unsigned long flags; | ||
| 639 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); | 640 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); |
| 640 | struct scsi_qla_host *vha; | 641 | struct scsi_qla_host *vha; |
| 642 | struct qla_hw_data *ha = rsp->hw; | ||
| 641 | 643 | ||
| 642 | spin_lock_irq(&rsp->hw->hardware_lock); | 644 | spin_lock_irqsave(&rsp->hw->hardware_lock, flags); |
| 643 | vha = qla25xx_get_host(rsp); | 645 | vha = pci_get_drvdata(ha->pdev); |
| 644 | qla24xx_process_response_queue(vha, rsp); | 646 | qla24xx_process_response_queue(vha, rsp); |
| 645 | spin_unlock_irq(&rsp->hw->hardware_lock); | 647 | spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); |
| 646 | } | 648 | } |
| 647 | 649 | ||
| 648 | /* create response queue */ | 650 | /* create response queue */ |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 0ceec123ddfd..bee558aed427 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
| @@ -35,7 +35,9 @@ | |||
| 35 | #include <linux/usb.h> | 35 | #include <linux/usb.h> |
| 36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
| 37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
| 38 | #include <linux/mm.h> | ||
| 38 | #include <linux/irq.h> | 39 | #include <linux/irq.h> |
| 40 | #include <asm/cacheflush.h> | ||
| 39 | 41 | ||
| 40 | #include "../core/hcd.h" | 42 | #include "../core/hcd.h" |
| 41 | #include "r8a66597.h" | 43 | #include "r8a66597.h" |
| @@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, | |||
| 820 | enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); | 822 | enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); |
| 821 | } | 823 | } |
| 822 | 824 | ||
| 825 | static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, | ||
| 826 | int status) | ||
| 827 | __releases(r8a66597->lock) | ||
| 828 | __acquires(r8a66597->lock) | ||
| 829 | { | ||
| 830 | if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { | ||
| 831 | void *ptr; | ||
| 832 | |||
| 833 | for (ptr = urb->transfer_buffer; | ||
| 834 | ptr < urb->transfer_buffer + urb->transfer_buffer_length; | ||
| 835 | ptr += PAGE_SIZE) | ||
| 836 | flush_dcache_page(virt_to_page(ptr)); | ||
| 837 | } | ||
| 838 | |||
| 839 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | ||
| 840 | spin_unlock(&r8a66597->lock); | ||
| 841 | usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); | ||
| 842 | spin_lock(&r8a66597->lock); | ||
| 843 | } | ||
| 844 | |||
| 823 | /* this function must be called with interrupt disabled */ | 845 | /* this function must be called with interrupt disabled */ |
| 824 | static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | 846 | static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) |
| 825 | { | 847 | { |
| @@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | |||
| 838 | list_del(&td->queue); | 860 | list_del(&td->queue); |
| 839 | kfree(td); | 861 | kfree(td); |
| 840 | 862 | ||
| 841 | if (urb) { | 863 | if (urb) |
| 842 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), | 864 | r8a66597_urb_done(r8a66597, urb, -ENODEV); |
| 843 | urb); | ||
| 844 | 865 | ||
| 845 | spin_unlock(&r8a66597->lock); | ||
| 846 | usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, | ||
| 847 | -ENODEV); | ||
| 848 | spin_lock(&r8a66597->lock); | ||
| 849 | } | ||
| 850 | break; | 866 | break; |
| 851 | } | 867 | } |
| 852 | } | 868 | } |
| @@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, | |||
| 1006 | /* this function must be called with interrupt disabled */ | 1022 | /* this function must be called with interrupt disabled */ |
| 1007 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | 1023 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, |
| 1008 | u16 syssts) | 1024 | u16 syssts) |
| 1025 | __releases(r8a66597->lock) | ||
| 1026 | __acquires(r8a66597->lock) | ||
| 1009 | { | 1027 | { |
| 1010 | if (syssts == SE0) { | 1028 | if (syssts == SE0) { |
| 1011 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); | 1029 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); |
| @@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | |||
| 1023 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); | 1041 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); |
| 1024 | } | 1042 | } |
| 1025 | 1043 | ||
| 1044 | spin_unlock(&r8a66597->lock); | ||
| 1026 | usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); | 1045 | usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); |
| 1046 | spin_lock(&r8a66597->lock); | ||
| 1027 | } | 1047 | } |
| 1028 | 1048 | ||
| 1029 | /* this function must be called with interrupt disabled */ | 1049 | /* this function must be called with interrupt disabled */ |
| @@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) | |||
| 1283 | if (usb_pipeisoc(urb->pipe)) | 1303 | if (usb_pipeisoc(urb->pipe)) |
| 1284 | urb->start_frame = r8a66597_get_frame(hcd); | 1304 | urb->start_frame = r8a66597_get_frame(hcd); |
| 1285 | 1305 | ||
| 1286 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | 1306 | r8a66597_urb_done(r8a66597, urb, status); |
| 1287 | spin_unlock(&r8a66597->lock); | ||
| 1288 | usb_hcd_giveback_urb(hcd, urb, status); | ||
| 1289 | spin_lock(&r8a66597->lock); | ||
| 1290 | } | 1307 | } |
| 1291 | 1308 | ||
| 1292 | if (restart) { | 1309 | if (restart) { |
