aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/char/agp/amd64-agp.c5
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c13
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c168
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c245
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c1
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c16
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c9
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/net/benet/be_cmds.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/sfc/efx.c1
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/via-velocity.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/host/ehci-hub.c13
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c25
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h18
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h2
111 files changed, 1099 insertions, 592 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..6e2c3b064f53 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
59 else 59 else
60 pr_debug("class '%s' does not have a release() function, " 60 pr_debug("class '%s' does not have a release() function, "
61 "be careful\n", class->name); 61 "be careful\n", class->name);
62
63 kfree(cp);
62} 64}
63 65
64static struct sysfs_ops class_sysfs_ops = { 66static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594860d3..9291614ac6b7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
337 if (*pos > h->highest_lun) 337 if (*pos > h->highest_lun)
338 return 0; 338 return 0;
339 339
340 if (drv == NULL) /* it's possible for h->drv[] to have holes. */
341 return 0;
342
340 if (drv->heads == 0) 343 if (drv->heads == 0)
341 return 0; 344 return 0;
342 345
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 34cf04e21795..fd50ead59c79 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
767 767
768static int __init agp_amd64_mod_init(void) 768static int __init agp_amd64_mod_init(void)
769{ 769{
770#ifndef MODULE
770 if (gart_iommu_aperture) 771 if (gart_iommu_aperture)
771 return agp_bridges_found ? 0 : -ENODEV; 772 return agp_bridges_found ? 0 : -ENODEV;
772 773#endif
773 return agp_amd64_init(); 774 return agp_amd64_init();
774} 775}
775 776
776static void __exit agp_amd64_cleanup(void) 777static void __exit agp_amd64_cleanup(void)
777{ 778{
779#ifndef MODULE
778 if (gart_iommu_aperture) 780 if (gart_iommu_aperture)
779 return; 781 return;
782#endif
780 if (aperture_resource) 783 if (aperture_resource)
781 release_resource(aperture_resource); 784 release_resource(aperture_resource);
782 pci_unregister_driver(&agp_amd64_pci_driver); 785 pci_unregister_driver(&agp_amd64_pci_driver);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
39struct tpm_inf_dev { 39struct tpm_inf_dev {
40 int iotype; 40 int iotype;
41 41
42 void __iomem *mem_base; /* MMIO ioremap'd addr */ 42 void __iomem *mem_base; /* MMIO ioremap'd addr */
43 unsigned long map_base; /* phys MMIO base */ 43 unsigned long map_base; /* phys MMIO base */
44 unsigned long map_size; /* MMIO region size */ 44 unsigned long map_size; /* MMIO region size */
45 unsigned int index_off; /* index register offset */ 45 unsigned int index_off; /* index register offset */
46 46
47 unsigned int data_regs; /* Data registers */ 47 unsigned int data_regs; /* Data registers */
48 unsigned int data_size; 48 unsigned int data_size;
49 49
50 unsigned int config_port; /* IO Port config index reg */ 50 unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
406 .miscdev = {.fops = &inf_ops,}, 406 .miscdev = {.fops = &inf_ops,},
407}; 407};
408 408
409static const struct pnp_device_id tpm_pnp_tbl[] = { 409static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
410 /* Infineon TPMs */ 410 /* Infineon TPMs */
411 {"IFX0101", 0}, 411 {"IFX0101", 0},
412 {"IFX0102", 0}, 412 {"IFX0102", 0},
413 {"", 0} 413 {"", 0}
414}; 414};
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
432 432
433 tpm_dev.iotype = TPM_INF_IO_PORT; 433 tpm_dev.iotype = TPM_INF_IO_PORT;
434 434
435 tpm_dev.config_port = pnp_port_start(dev, 0); 435 tpm_dev.config_port = pnp_port_start(dev, 0);
436 tpm_dev.config_size = pnp_port_len(dev, 0); 436 tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
459 goto err_last; 459 goto err_last;
460 } 460 }
461 } else if (pnp_mem_valid(dev, 0) && 461 } else if (pnp_mem_valid(dev, 0) &&
462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { 462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
463 463
464 tpm_dev.iotype = TPM_INF_IO_MEM; 464 tpm_dev.iotype = TPM_INF_IO_MEM;
465 465
466 tpm_dev.map_base = pnp_mem_start(dev, 0); 466 tpm_dev.map_base = pnp_mem_start(dev, 0);
467 tpm_dev.map_size = pnp_mem_len(dev, 0); 467 tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
563 "product id 0x%02x%02x" 563 "product id 0x%02x%02x"
564 "%s\n", 564 "%s\n",
565 tpm_dev.iotype == TPM_INF_IO_PORT ? 565 tpm_dev.iotype == TPM_INF_IO_PORT ?
566 tpm_dev.config_port : 566 tpm_dev.config_port :
567 tpm_dev.map_base + tpm_dev.index_off, 567 tpm_dev.map_base + tpm_dev.index_off,
568 tpm_dev.iotype == TPM_INF_IO_PORT ? 568 tpm_dev.iotype == TPM_INF_IO_PORT ?
569 tpm_dev.data_regs : 569 tpm_dev.data_regs :
570 tpm_dev.map_base + tpm_dev.data_regs, 570 tpm_dev.map_base + tpm_dev.data_regs,
571 version[0], version[1], 571 version[0], version[1],
572 vendorid[0], vendorid[1], 572 vendorid[0], vendorid[1],
573 productid[0], productid[1], chipname); 573 productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
607 iounmap(tpm_dev.mem_base); 607 iounmap(tpm_dev.mem_base);
608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size); 608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
609 } 609 }
610 tpm_dev_vendor_release(chip);
610 tpm_remove_hardware(chip->dev); 611 tpm_remove_hardware(chip->dev);
611 } 612 }
612} 613}
613 614
615static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
616{
617 struct tpm_chip *chip = pnp_get_drvdata(dev);
618 int rc;
619 if (chip) {
620 u8 savestate[] = {
621 0, 193, /* TPM_TAG_RQU_COMMAND */
622 0, 0, 0, 10, /* blob length (in bytes) */
623 0, 0, 0, 152 /* TPM_ORD_SaveState */
624 };
625 dev_info(&dev->dev, "saving TPM state\n");
626 rc = tpm_inf_send(chip, savestate, sizeof(savestate));
627 if (rc < 0) {
628 dev_err(&dev->dev, "error while saving TPM state\n");
629 return rc;
630 }
631 }
632 return 0;
633}
634
635static int tpm_inf_pnp_resume(struct pnp_dev *dev)
636{
637 /* Re-configure TPM after suspending */
638 tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
639 tpm_config_out(IOLIMH, TPM_INF_ADDR);
640 tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
641 tpm_config_out(IOLIML, TPM_INF_ADDR);
642 tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
643 /* activate register */
644 tpm_config_out(TPM_DAR, TPM_INF_ADDR);
645 tpm_config_out(0x01, TPM_INF_DATA);
646 tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
647 /* disable RESET, LP and IRQC */
648 tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
649 return tpm_pm_resume(&dev->dev);
650}
651
614static struct pnp_driver tpm_inf_pnp_driver = { 652static struct pnp_driver tpm_inf_pnp_driver = {
615 .name = "tpm_inf_pnp", 653 .name = "tpm_inf_pnp",
616 .driver = { 654 .id_table = tpm_inf_pnp_tbl,
617 .owner = THIS_MODULE,
618 .suspend = tpm_pm_suspend,
619 .resume = tpm_pm_resume,
620 },
621 .id_table = tpm_pnp_tbl,
622 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
623 .remove = __devexit_p(tpm_inf_pnp_remove), 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume,
658 .remove = __devexit_p(tpm_inf_pnp_remove)
624}; 659};
625 660
626static int __init init_inf(void) 661static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
638 673
639MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
640MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
641MODULE_VERSION("1.9"); 676MODULE_VERSION("1.9.2");
642MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
613 cohd_fin->pending_irqs--; 613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie; 614 cohc->completed = cohd_fin->desc.cookie;
615 615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0) 616 if (cohc->nbr_active_done == 0)
619 return; 617 return;
620 618
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5f7a500e18d0..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) { 252 if (tmo && time_after(jiffies, end)) {
253 err = -ETIMEDOUT; 253 err = -ETIMEDOUT;
254 break; 254 break;
255 } 255 }
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
761 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
762 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
763 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
764 * @return: Returns 0 on success or negative error code on failure. This
765 * function will fail if the buffer is set to ready.
766 */ 764 */
767/* Called under spin_lock(_irqsave)(&ichan->lock) */ 765/* Called under spin_lock(_irqsave)(&ichan->lock) */
768static int ipu_update_channel_buffer(struct idmac_channel *ichan, 766static void ipu_update_channel_buffer(struct idmac_channel *ichan,
769 int buffer_n, dma_addr_t phyaddr) 767 int buffer_n, dma_addr_t phyaddr)
770{ 768{
771 enum ipu_channel channel = ichan->dma_chan.chan_id; 769 enum ipu_channel channel = ichan->dma_chan.chan_id;
772 uint32_t reg; 770 uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
806 } 804 }
807 805
808 spin_unlock_irqrestore(&ipu_data.lock, flags); 806 spin_unlock_irqrestore(&ipu_data.lock, flags);
809
810 return 0;
811} 807}
812 808
813/* Called under spin_lock_irqsave(&ichan->lock) */ 809/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
816{ 812{
817 unsigned int chan_id = ichan->dma_chan.chan_id; 813 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device; 814 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820 815
821 if (async_tx_test_ack(&desc->txd)) 816 if (async_tx_test_ack(&desc->txd))
822 return -EINTR; 817 return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either. 823 * doing it again shouldn't hurt either.
829 */ 824 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx, 825 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838 826
839 ipu_select_buffer(chan_id, buf_idx); 827 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 828 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1379 1367
1380 if (likely(sgnew) && 1368 if (likely(sgnew) &&
1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1382 callback = desc->txd.callback; 1370 callback = descnew->txd.callback;
1383 callback_param = desc->txd.callback_param; 1371 callback_param = descnew->txd.callback_param;
1384 spin_unlock(&ichan->lock); 1372 spin_unlock(&ichan->lock);
1385 callback(callback_param); 1373 if (callback)
1374 callback(callback_param);
1386 spin_lock(&ichan->lock); 1375 spin_lock(&ichan->lock);
1387 } 1376 }
1388 1377
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 000dc67b85b7..3391e6739d06 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2658 * the memory system completely. A command line option allows to force-enable 2658 * the memory system completely. A command line option allows to force-enable
2659 * hardware ECC later in amd64_enable_ecc_error_reporting(). 2659 * hardware ECC later in amd64_enable_ecc_error_reporting().
2660 */ 2660 */
2661static const char *ecc_warning = 2661static const char *ecc_msg =
2662 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" 2662 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2663 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" 2663 " Either enable ECC checking or force module loading by setting "
2664 " Also, use of the override can cause unknown side effects.\n"; 2664 "'ecc_enable_override'.\n"
2665 " (Note that use of the override may cause unknown side effects.)\n";
2665 2666
2666static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2667static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2667{ 2668{
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2673 2674
2674 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2675 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2675 if (!ecc_enabled) 2676 if (!ecc_enabled)
2676 amd64_printk(KERN_WARNING, "This node reports that Memory ECC " 2677 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2677 "is currently disabled, set F3x%x[22] (%s).\n", 2678 "is currently disabled, set F3x%x[22] (%s).\n",
2678 K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2679 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2679 else 2680 else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2681 2682
2682 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); 2683 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2683 if (!nb_mce_en) 2684 if (!nb_mce_en)
2684 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " 2685 amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
2685 "0x%08x[4] on node %d to enable.\n", 2686 "0x%08x[4] on node %d to enable.\n",
2686 MSR_IA32_MCG_CTL, pvt->mc_node_id); 2687 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2687 2688
2688 if (!ecc_enabled || !nb_mce_en) { 2689 if (!ecc_enabled || !nb_mce_en) {
2689 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2690 amd64_printk(KERN_WARNING, "%s", ecc_warning); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2691 return -ENODEV; 2692 return -ENODEV;
2692 } 2693 }
2693 ecc_enable_override = 0; 2694 ecc_enable_override = 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
804 end <<= (24 - PAGE_SHIFT); 804 end <<= (24 - PAGE_SHIFT);
805 end |= (1 << (24 - PAGE_SHIFT)) - 1; 805 end |= (1 << (24 - PAGE_SHIFT)) - 1;
806 806
807 csrow->first_page = start >> PAGE_SHIFT; 807 csrow->first_page = start;
808 csrow->last_page = end >> PAGE_SHIFT; 808 csrow->last_page = end;
809 csrow->nr_pages = end + 1 - start; 809 csrow->nr_pages = end + 1 - start;
810 csrow->grain = 8; 810 csrow->grain = 8;
811 csrow->mtype = mtype; 811 csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
892 892
893 mpc85xx_init_csrows(mci); 893 mpc85xx_init_csrows(mci);
894 894
895#ifdef CONFIG_EDAC_DEBUG
896 edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
897#endif
898
899 /* store the original error disable bits */ 895 /* store the original error disable bits */
900 orig_ddr_err_disable = 896 orig_ddr_err_disable =
901 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 897 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420c36c5..2d3dc7ded0a9 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
893 893
894static struct kmem_cache *fwnet_packet_task_cache; 894static struct kmem_cache *fwnet_packet_task_cache;
895 895
896static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
897{
898 dev_kfree_skb_any(ptask->skb);
899 kmem_cache_free(fwnet_packet_task_cache, ptask);
900}
901
896static int fwnet_send_packet(struct fwnet_packet_task *ptask); 902static int fwnet_send_packet(struct fwnet_packet_task *ptask);
897 903
898static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) 904static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
899{ 905{
900 struct fwnet_device *dev; 906 struct fwnet_device *dev = ptask->dev;
901 unsigned long flags; 907 unsigned long flags;
902 908 bool free;
903 dev = ptask->dev;
904 909
905 spin_lock_irqsave(&dev->lock, flags); 910 spin_lock_irqsave(&dev->lock, flags);
906 list_del(&ptask->pt_link);
907 spin_unlock_irqrestore(&dev->lock, flags);
908 911
909 ptask->outstanding_pkts--; /* FIXME access inside lock */ 912 ptask->outstanding_pkts--;
913
914 /* Check whether we or the networking TX soft-IRQ is last user. */
915 free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
916
917 if (ptask->outstanding_pkts == 0)
918 list_del(&ptask->pt_link);
919
920 spin_unlock_irqrestore(&dev->lock, flags);
910 921
911 if (ptask->outstanding_pkts > 0) { 922 if (ptask->outstanding_pkts > 0) {
912 u16 dg_size; 923 u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
951 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; 962 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
952 } 963 }
953 fwnet_send_packet(ptask); 964 fwnet_send_packet(ptask);
954 } else {
955 dev_kfree_skb_any(ptask->skb);
956 kmem_cache_free(fwnet_packet_task_cache, ptask);
957 } 965 }
966
967 if (free)
968 fwnet_free_ptask(ptask);
958} 969}
959 970
960static void fwnet_write_complete(struct fw_card *card, int rcode, 971static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
977 unsigned tx_len; 988 unsigned tx_len;
978 struct rfc2734_header *bufhdr; 989 struct rfc2734_header *bufhdr;
979 unsigned long flags; 990 unsigned long flags;
991 bool free;
980 992
981 dev = ptask->dev; 993 dev = ptask->dev;
982 tx_len = ptask->max_payload; 994 tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1022 generation, SCODE_100, 0ULL, ptask->skb->data, 1034 generation, SCODE_100, 0ULL, ptask->skb->data,
1023 tx_len + 8, fwnet_write_complete, ptask); 1035 tx_len + 8, fwnet_write_complete, ptask);
1024 1036
1025 /* FIXME race? */
1026 spin_lock_irqsave(&dev->lock, flags); 1037 spin_lock_irqsave(&dev->lock, flags);
1027 list_add_tail(&ptask->pt_link, &dev->broadcasted_list); 1038
1039 /* If the AT tasklet already ran, we may be last user. */
1040 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1041 if (!free)
1042 list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
1043
1028 spin_unlock_irqrestore(&dev->lock, flags); 1044 spin_unlock_irqrestore(&dev->lock, flags);
1029 1045
1030 return 0; 1046 goto out;
1031 } 1047 }
1032 1048
1033 fw_send_request(dev->card, &ptask->transaction, 1049 fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1035 ptask->generation, ptask->speed, ptask->fifo_addr, 1051 ptask->generation, ptask->speed, ptask->fifo_addr,
1036 ptask->skb->data, tx_len, fwnet_write_complete, ptask); 1052 ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1037 1053
1038 /* FIXME race? */
1039 spin_lock_irqsave(&dev->lock, flags); 1054 spin_lock_irqsave(&dev->lock, flags);
1040 list_add_tail(&ptask->pt_link, &dev->sent_list); 1055
1056 /* If the AT tasklet already ran, we may be last user. */
1057 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1058 if (!free)
1059 list_add_tail(&ptask->pt_link, &dev->sent_list);
1060
1041 spin_unlock_irqrestore(&dev->lock, flags); 1061 spin_unlock_irqrestore(&dev->lock, flags);
1042 1062
1043 dev->netdev->trans_start = jiffies; 1063 dev->netdev->trans_start = jiffies;
1064 out:
1065 if (free)
1066 fwnet_free_ptask(ptask);
1044 1067
1045 return 0; 1068 return 0;
1046} 1069}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1298 spin_unlock_irqrestore(&dev->lock, flags); 1321 spin_unlock_irqrestore(&dev->lock, flags);
1299 1322
1300 ptask->max_payload = max_payload; 1323 ptask->max_payload = max_payload;
1324 INIT_LIST_HEAD(&ptask->pt_link);
1325
1301 fwnet_send_packet(ptask); 1326 fwnet_send_packet(ptask);
1302 1327
1303 return NETDEV_TX_OK; 1328 return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2345d4103fe6..43ebf337b131 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2101 u32 payload_index, payload_end_index, next_page_index; 2101 u32 payload_index, payload_end_index, next_page_index;
2102 int page, end_page, i, length, offset; 2102 int page, end_page, i, length, offset;
2103 2103
2104 /*
2105 * FIXME: Cycle lost behavior should be configurable: lose
2106 * packet, retransmit or terminate..
2107 */
2108
2109 p = packet; 2104 p = packet;
2110 payload_index = payload; 2105 payload_index = payload;
2111 2106
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2135 if (!p->skip) { 2130 if (!p->skip) {
2136 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2131 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2137 d[0].req_count = cpu_to_le16(8); 2132 d[0].req_count = cpu_to_le16(8);
2133 /*
2134 * Link the skip address to this descriptor itself. This causes
2135 * a context to skip a cycle whenever lost cycles or FIFO
2136 * overruns occur, without dropping the data. The application
2137 * should then decide whether this is an error condition or not.
2138 * FIXME: Make the context's cycle-lost behaviour configurable?
2139 */
2140 d[0].branch_address = cpu_to_le32(d_bus | z);
2138 2141
2139 header = (__le32 *) &d[1]; 2142 header = (__le32 *) &d[1];
2140 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2143 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ecac882e1d54..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 179 pci_save_state(dev->pdev);
191 180
192 /* If KMS is active, we do the leavevt stuff here */ 181 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 183 int error = i915_gem_idle(dev);
184 if (error) {
195 dev_err(&dev->pdev->dev, 185 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 186 "GEM idle failed, resume might fail\n");
187 return error;
188 }
197 drm_irq_uninstall(dev); 189 drm_irq_uninstall(dev);
198 } 190 }
199 191
200 i915_save_state(dev); 192 i915_save_state(dev);
201 193
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
202 intel_opregion_free(dev, 1); 201 intel_opregion_free(dev, 1);
203 202
203 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0;
205}
206
207static int i915_suspend(struct drm_device *dev, pm_message_t state)
208{
209 int error;
210
211 if (!dev || !dev->dev_private) {
212 DRM_ERROR("dev: %p\n", dev);
213 DRM_ERROR("DRM not initialized, aborting suspend.\n");
214 return -ENODEV;
215 }
216
217 if (state.event == PM_EVENT_PRETHAW)
218 return 0;
219
220 error = i915_drm_freeze(dev);
221 if (error)
222 return error;
223
224 i915_drm_suspend(dev);
225
204 if (state.event == PM_EVENT_SUSPEND) { 226 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 227 /* Shut down the device */
206 pci_disable_device(dev->pdev); 228 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 229 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 230 }
209 231
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 232 return 0;
214} 233}
215 234
216static int i915_resume(struct drm_device *dev) 235static int i915_drm_thaw(struct drm_device *dev)
217{ 236{
218 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 238 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224
225 i915_restore_state(dev);
226
227 intel_opregion_init(dev, 1);
228 239
229 /* KMS EnterVT equivalent */ 240 /* KMS EnterVT equivalent */
230 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
231 mutex_lock(&dev->struct_mutex); 242 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 243 dev_priv->mm.suspended = 0;
233 244
234 ret = i915_gem_init_ringbuffer(dev); 245 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 246 mutex_unlock(&dev->struct_mutex);
238 247
239 drm_irq_install(dev); 248 drm_irq_install(dev);
240 } 249
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 250 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 251 drm_helper_resume_force_mode(dev);
244 } 252 }
245 253
246 dev_priv->modeset_on_lid = 0; 254 dev_priv->modeset_on_lid = 0;
247 255
248 return ret; 256 return error;
257}
258
259static int i915_resume(struct drm_device *dev)
260{
261 if (pci_enable_device(dev->pdev))
262 return -EIO;
263
264 pci_set_master(dev->pdev);
265
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev);
249} 271}
250 272
251/** 273/**
@@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 408 drm_put_dev(dev);
387} 409}
388 410
389static int 411static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 412{
392 struct drm_device *dev = pci_get_drvdata(pdev); 413 struct pci_dev *pdev = to_pci_dev(dev);
414 struct drm_device *drm_dev = pci_get_drvdata(pdev);
415 int error;
393 416
394 return i915_suspend(dev, state); 417 if (!drm_dev || !drm_dev->dev_private) {
395} 418 dev_err(dev, "DRM not initialized, aborting suspend.\n");
419 return -ENODEV;
420 }
396 421
397static int 422 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 423 if (error)
399{ 424 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 425
402 return i915_resume(dev); 426 i915_drm_suspend(drm_dev);
403}
404 427
405static int 428 pci_disable_device(pdev);
406i915_pm_suspend(struct device *dev) 429 pci_set_power_state(pdev, PCI_D3hot);
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409}
410 430
411static int 431 return 0;
412i915_pm_resume(struct device *dev)
413{
414 return i915_pci_resume(to_pci_dev(dev));
415} 432}
416 433
417static int 434static int i915_pm_resume(struct device *dev)
418i915_pm_freeze(struct device *dev)
419{ 435{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); 436 struct pci_dev *pdev = to_pci_dev(dev);
437 struct drm_device *drm_dev = pci_get_drvdata(pdev);
438
439 return i915_resume(drm_dev);
421} 440}
422 441
423static int 442static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 443{
426 /* thaw during hibernate, do nothing! */ 444 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
446
447 if (!drm_dev || !drm_dev->dev_private) {
448 dev_err(dev, "DRM not initialized, aborting suspend.\n");
449 return -ENODEV;
450 }
451
452 return i915_drm_freeze(drm_dev);
428} 453}
429 454
430static int 455static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 456{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 457 struct pci_dev *pdev = to_pci_dev(dev);
458 struct drm_device *drm_dev = pci_get_drvdata(pdev);
459
460 return i915_drm_thaw(drm_dev);
434} 461}
435 462
436static int 463static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 464{
439 return i915_pci_resume(to_pci_dev(dev)); 465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472
473 return error;
440} 474}
441 475
442const struct dev_pm_ops i915_pm_ops = { 476const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 479 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 480 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 481 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 482 .restore = i915_pm_resume,
449}; 483};
450 484
451static struct vm_operations_struct i915_gem_vm_ops = { 485static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aaf934d96f21..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b4c8c0230689..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 else 1552 else
1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1554 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1555 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1556 if (obj_priv->active) { 1558 if (obj_priv->active) {
1557 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1623 1625
1624 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1625 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1626 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1627 1630
1628 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1630 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1631 1634
1632 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1633 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1634 1638
1635 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
2084i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2085{ 2089{
2086 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2087 uint32_t seqno;
2088 int ret; 2091 int ret;
2092 uint32_t seqno;
2089 bool lists_empty; 2093 bool lists_empty;
2090 2094
2091 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2107 if (ret) 2111 if (ret)
2108 return ret; 2112 return ret;
2109 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2110 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2111 if (ret) 2117 if (ret)
2112 return ret; 2118 return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2701 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2702 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2703 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2704 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2705 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2706 2712
2707 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3682,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3682 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3683 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3684 GFP_KERNEL); 3690 GFP_KERNEL);
3685 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3686 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3687 3695
3688 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3689 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3850,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3850 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3851 dev->invalidate_domains, 3859 dev->invalidate_domains,
3852 dev->flush_domains); 3860 dev->flush_domains);
3853 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3854 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3855 dev->flush_domains); 3863 dev->flush_domains);
3856 } 3864 }
3857 3865
3858 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3859 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3860 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3861 3870
3862 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3863 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3864 obj->read_domains, 3879 obj->read_domains,
3865 old_write_domain); 3880 old_write_domain);
@@ -4370,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4370 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4371 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4372 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4373 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4374 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4375 4391
@@ -4821,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4821 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4822 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4823 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4824 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4825 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4826 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 50ddf4a95c5e..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,21 +309,21 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0); 313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
314 316
315 if (de_iir & DE_PLANEB_FLIP_DONE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
316 intel_prepare_page_flip(dev, 1); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
317 321
318 if (de_iir & DE_PIPEA_VBLANK) { 322 if (de_iir & DE_PIPEA_VBLANK)
319 drm_handle_vblank(dev, 0); 323 drm_handle_vblank(dev, 0);
320 intel_finish_page_flip(dev, 0);
321 }
322 324
323 if (de_iir & DE_PIPEB_VBLANK) { 325 if (de_iir & DE_PIPEB_VBLANK)
324 drm_handle_vblank(dev, 1); 326 drm_handle_vblank(dev, 1);
325 intel_finish_page_flip(dev, 1);
326 }
327 327
328 /* check event from PCH */ 328 /* check event from PCH */
329 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 12775df1bbfd..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -3962,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3962struct intel_unpin_work { 4081struct intel_unpin_work {
3963 struct work_struct work; 4082 struct work_struct work;
3964 struct drm_device *dev; 4083 struct drm_device *dev;
3965 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3966 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3967 int pending; 4087 int pending;
3968}; 4088};
@@ -3973,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3973 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3974 4094
3975 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3976 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3977 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3978 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3979 kfree(work); 4100 kfree(work);
3980} 4101}
@@ -3998,7 +4119,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3998 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3999 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4000 if (work && !work->pending) { 4121 if (work && !work->pending) {
4001 obj_priv = work->obj->driver_private; 4122 obj_priv = work->pending_flip_obj->driver_private;
4002 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4003 obj_priv, 4124 obj_priv,
4004 atomic_read(&obj_priv->pending_flip)); 4125 atomic_read(&obj_priv->pending_flip));
@@ -4023,7 +4144,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4023 4144
4024 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4025 4146
4026 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4027 4148
4028 /* Initial scanout buffer will have a 0 pending flip count */ 4149 /* Initial scanout buffer will have a 0 pending flip count */
4029 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4060,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4061 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4062 unsigned long flags; 4183 unsigned long flags;
4063 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4064 RING_LOCALS; 4186 RING_LOCALS;
4065 4187
4066 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4072,7 +4194,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4072 work->event = event; 4194 work->event = event;
4073 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4074 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4075 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4076 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4077 4199
4078 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
@@ -4100,14 +4222,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4100 return ret; 4222 return ret;
4101 } 4223 }
4102 4224
4103 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4104 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4105 4228
4106 crtc->fb = fb; 4229 crtc->fb = fb;
4107 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4108 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4109 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4110 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4111 4235
4112 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4113 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4115,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4115 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4116 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4117 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4118 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4119 } else { 4244 } else {
4120 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4121 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d7f8d8b4a4b8..2cd0fad17dac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1865 1865
1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1867 1867
1868 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1869 return 1; 1869 return 1;
1870 1870
1871 /* 1871 /*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3765 */
3766 3766
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct init_exec iexec = {true, false};
3769 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3770 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3771 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3845 } 3844 }
3846 } 3845 }
3847 3846
3848 bios->display.output = dcbent;
3849
3850 if (pxclk == 0) { 3847 if (pxclk == 0) {
3851 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3852 if (!script) { 3849 if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3855 } 3852 }
3856 3853
3857 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3858 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3859 } else 3856 } else
3860 if (pxclk == -1) { 3857 if (pxclk == -1) {
3861 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3865 } 3862 }
3866 3863
3867 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3868 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3869 } else 3866 } else
3870 if (pxclk == -2) { 3867 if (pxclk == -2) {
3871 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3878 } 3875 }
3879 3876
3880 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3881 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3882 } else 3879 } else
3883 if (pxclk > 0) { 3880 if (pxclk > 0) {
3884 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3890 } 3887 }
3891 3888
3892 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3893 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3894 } else 3891 } else
3895 if (pxclk < 0) { 3892 if (pxclk < 0) {
3896 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3902 } 3899 }
3903 3900
3904 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3905 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3906 } 3903 }
3907 3904
3908 return 0; 3905 return 0;
@@ -5864,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5864 struct drm_nouveau_private *dev_priv = dev->dev_private; 5861 struct drm_nouveau_private *dev_priv = dev->dev_private;
5865 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5866 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5864 unsigned long flags;
5867 5865
5866 spin_lock_irqsave(&bios->lock, flags);
5868 bios->display.output = dcbent; 5867 bios->display.output = dcbent;
5869 parse_init_table(bios, table, &iexec); 5868 parse_init_table(bios, table, &iexec);
5870 bios->display.output = NULL; 5869 bios->display.output = NULL;
5870 spin_unlock_irqrestore(&bios->lock, flags);
5871} 5871}
5872 5872
5873static bool NVInitVBIOS(struct drm_device *dev) 5873static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5876 struct nvbios *bios = &dev_priv->VBIOS; 5876 struct nvbios *bios = &dev_priv->VBIOS;
5877 5877
5878 memset(bios, 0, sizeof(struct nvbios)); 5878 memset(bios, 0, sizeof(struct nvbios));
5879 spin_lock_init(&bios->lock);
5879 bios->dev = dev; 5880 bios->dev = dev;
5880 5881
5881 if (!NVShadowVBIOS(dev, bios->data)) 5882 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..68446fd4146b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 spinlock_t lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db0ed4c13f98..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
65 65
66 /* 66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to 68 * align to to that as well as the page size. Align the size to the
69 * avoid corruption of other buffer objects. 69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
70 */ 72 */
71 if (dev_priv->card_type == NV_50) { 73 if (dev_priv->card_type == NV_50) {
72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
77 case 0x2800: 79 case 0x2800:
78 case 0x4800: 80 case 0x4800:
79 case 0x7a00: 81 case 0x7a00:
80 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) { 82 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) { 83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size; 84 *align = 12 * i * block_size;
85 if (!(*align % 65536)) 85 if (!(*align % 65536))
86 break; 86 break;
87 } 87 }
88 } else { 88 } else {
89 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) { 89 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size; 90 *align = 8 * i * block_size;
92 if (!(*align % 65536)) 91 if (!(*align % 65536))
93 break; 92 break;
94 } 93 }
95 } 94 }
95 *size = roundup(*size, *align);
96 break; 96 break;
97 default: 97 default:
98 break; 98 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
278 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
279 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
280 280
281 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
282 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
284 pgraph->fifo_access(dev, true);
285 }
286 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
287 286
288 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
289 pfifo->disable(dev); 288 pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e6d673f3a23..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
88{ 88{
89 struct nouveau_connector *nv_connector = 89 struct nouveau_connector *nv_connector =
90 nouveau_connector(drm_connector); 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev; 91 struct drm_device *dev;
92
93 NV_DEBUG_KMS(dev, "\n");
94 92
95 if (!nv_connector) 93 if (!nv_connector)
96 return; 94 return;
97 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
98 kfree(nv_connector->edid); 99 kfree(nv_connector->edid);
99 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
100 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index dd4937224220..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
510 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
511 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 343ab7f17ccc..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6b9690418bc7..5445cefdd03e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -678,6 +678,8 @@ extern int nouveau_reg_debug;
678extern char *nouveau_vbios; 678extern char *nouveau_vbios;
679extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid; 680extern int nouveau_ignorelid;
681extern int nouveau_nofbaccel;
682extern int nouveau_noaccel;
681 683
682/* nouveau_state.c */ 684/* nouveau_state.c */
683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 685extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
107 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
108}; 108};
109 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
110static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 139 u16 blue, int regno)
112{ 140{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
267 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
268 296
269 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
270 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
271 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
272 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
273 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
274 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
316 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
317 par->dev = dev; 349 par->dev = dev;
318 350
319 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
320 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
321 case NV_50: 353 case NV_50:
322 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
323 break; 356 break;
324 default: 357 default:
325 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
326 break; 360 break;
327 }; 361 };
328 } 362 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
46void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6ac804b0c9f9..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
925 } 925 }
926 926
927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
928 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
929 } else { 931 } else {
930 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
931 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 3b9bad66162a..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
566static void 580static void
567nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
568{ 582{
569 uint32_t status, nsource; 583 uint32_t status;
570 584
571 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
572 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
573 587
574 if (status & 0x00000001) { 588 if (status & 0x00000001) {
575 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
576 status &= ~0x00000001; 590 status &= ~0x00000001;
577 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
578 } 592 }
579 593
580 if (status & 0x00000010) { 594 if (status & 0x00000010) {
581 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
582 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
583 597
584 status &= ~0x00000010; 598 status &= ~0x00000010;
585 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
586 } 600 }
587 601
588 if (status & 0x00001000) { 602 if (status & 0x00001000) {
589 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
590 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
591 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
592 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
593 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
594 610
595 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
596 612
597 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
598 } 614 }
599 615
600 if (status & 0x00100000) { 616 if (status & 0x00100000) {
601 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
602 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
603 619
604 status &= ~0x00100000; 620 status &= ~0x00100000;
605 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
606 } 622 }
607 623
608 if (status & 0x00200000) { 624 if (status & 0x00200000) {
609 int r; 625 int r;
610 626
611 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
612 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
613 629
614 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
615 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
616 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
617 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
618 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
619 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
620 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
621 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
623 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
624 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
625 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
626 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
627 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
628 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
629 645 nv_rd32(dev, r));
630 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
631 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
632 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
633 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
634 658
635 if (status) { 659 if (status) {
636 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
637 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
638 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
639 664
640 { 665 {
641 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
642 667
643 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
644 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
645 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 670 nv_rd32(dev, 0x400500) | isb);
671 }
646 } 672 }
647 673
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
649} 676}
650 677
651static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f2d0187ba152..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
310static unsigned int 310static unsigned int
311nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
312{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
313 if (state) 321 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
427 if (ret) 435 if (ret)
428 goto out_timer; 436 goto out_timer;
429 437
430 /* PGRAPH */ 438 if (nouveau_noaccel)
431 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
432 if (ret) 440 else {
433 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
434 445
435 /* PFIFO */ 446 /* PFIFO */
436 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
437 if (ret) 448 if (ret)
438 goto out_graph; 449 goto out_graph;
450 }
439 451
440 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall 453 * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
479out_irq: 491out_irq:
480 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
481out_fifo: 493out_fifo:
482 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
483out_graph: 496out_graph:
484 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
485out_fb: 499out_fb:
486 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
487out_timer: 501out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
518 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
519 } 533 }
520 534
521 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
522 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
523 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
524 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
525 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
817 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
818 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
819 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
820 default: 845 default:
821 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
822 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
54 FIRE_RING(chan); 54 FIRE_RING(chan);
55} 55}
56 56
57static void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
88 FIRE_RING(chan); 88 FIRE_RING(chan);
89} 89}
90 90
91static void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
307 307
308 FIRE_RING(chan); 308 FIRE_RING(chan);
309 309
310 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
311 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
312 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
313 return 0; 310 return 0;
314} 311}
315 312
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 40b7360841f8..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
46 FIRE_RING(chan); 46 FIRE_RING(chan);
47} 47}
48 48
49static void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
263 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
264 264
265 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
266 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
267 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
268 return 0; 265 return 0;
269} 266}
270 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 32b244bcb482..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 20319e59d368..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nouveau_wait_for_idle(dev); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo & ~1);
287 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
289 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
290 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
291 nv_wr32(dev, 0x400500, fifo);
292 296
293 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
294 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ecf1936b8224..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102 102
103 if (nvenc == nv_encoder || 103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
104 nvenc->dcb->or != nv_encoder->dcb->or) 105 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue; 106 continue;
106 107
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..2a3df5599ab4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 71060114d5de..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a1198d99cdf9..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1950,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1951 return r; 1951 return r;
1952 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1953 return r; 1960 return r;
1954} 1961}
1955 1962
@@ -1957,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
1957{ 1964{
1958 int r; 1965 int r;
1959 1966
1967 r600_audio_fini(rdev);
1960 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1961 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1962 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b1c1d3433454..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..2dcda6115874 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
287 *connector_type = DRM_MODE_CONNECTOR_DVID; 287 *connector_type = DRM_MODE_CONNECTOR_DVID;
288 } 288 }
289 289
290 /* XFX Pine Group device rv730 reports no VGA DDC lines
291 * even though they are wired up to record 0x93
292 */
293 if ((dev->pdev->device == 0x9498) &&
294 (dev->pdev->subsystem_vendor == 0x1682) &&
295 (dev->pdev->subsystem_device == 0x2452)) {
296 struct radeon_device *rdev = dev->dev_private;
297 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
298 }
290 return true; 299 return true;
291} 300}
292 301
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2d8e5a70f284..238188540017 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
582 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
583 bool dret; 583 bool dret = false;
584 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
585 585
586 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
587 if (!encoder) 587 if (!encoder)
588 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
589 589
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
591 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
593 if (dret) { 595 if (dret) {
594 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
595 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
740 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
741 int i; 743 int i;
742 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
743 bool dret; 745 bool dret = false;
744 746
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
746 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
747 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
748 if (dret) { 752 if (dret) {
749 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
750 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a92f994cc26..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
281 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
283 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
284 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
288 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
289 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
290 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
291 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
293 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1021{ 1021{
1022 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1023 1029
1024 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1025 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 135be9688c90..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,10 +39,10 @@
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41 41
42#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
45#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
113 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
114 bool using_bounce_buffer; 114 bool using_bounce_buffer;
115 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
116 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
117}; 118};
118 119
@@ -213,7 +214,7 @@ struct vmw_private {
213 * Fencing and IRQs. 214 * Fencing and IRQs.
214 */ 215 */
215 216
216 uint32_t fence_seq; 217 atomic_t fence_seq;
217 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
218 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
219 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 4157547cc6e4..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
74 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
75 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
76 76
77 mutex_init(&fifo->fifo_mutex);
77 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
78 79
79 /* 80 /*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
117 (unsigned int) min, 118 (unsigned int) min,
118 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
119 120
120 dev_priv->fence_seq = dev_priv->last_read_sequence; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
121 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
122 123
123 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
283 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
284 int ret; 285 int ret;
285 286
286 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
287 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
288 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
289 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
351 } 352 }
352out_err: 353out_err:
353 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
354 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
355 return NULL; 356 return NULL;
356} 357}
357 358
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
426 427
427 } 428 }
428 429
430 down_write(&fifo_state->rwsem);
429 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
430 next_cmd += bytes; 432 next_cmd += bytes;
431 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
437 if (reserveable) 439 if (reserveable)
438 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
439 mb(); 441 mb();
440 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
441 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
442} 445}
443 446
444int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
451 454
452 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
453 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
454 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
455 *sequence = dev_priv->fence_seq;
456 up_write(&fifo_state->rwsem);
457 ret = -ENOMEM; 458 ret = -ENOMEM;
458 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
459 false, 3*HZ); 460 false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
461 } 462 }
462 463
463 do { 464 do {
464 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
465 } while (*sequence == 0); 466 } while (*sequence == 0);
466 467
467 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 778851f9f1d6..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index eeba6d1d06e4..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
769 769
770 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
771 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
772 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
773 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
774 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
775 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
776 776
777 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
778 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7efbd47ab84..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,11 +35,6 @@
35#define VMW_RES_SURFACE ttm_driver_type1 35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2 36#define VMW_RES_STREAM ttm_driver_type2
37 37
38/* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
40 */
41#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
42
43struct vmw_user_context { 38struct vmw_user_context {
44 struct ttm_base_object base; 39 struct ttm_base_object base;
45 struct vmw_resource res; 40 struct vmw_resource res;
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
579 574
580 srf->flags = req->flags; 575 srf->flags = req->flags;
581 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 srf->num_sizes = 0; 579 srf->num_sizes = 0;
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
604 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
605 goto out_err1; 601 goto out_err1;
606 602
607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
610 */
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
612 srf->scanout = true;
613 } else {
614 srf->scanout = false;
615 }
616
617 if (srf->scanout && 603 if (srf->scanout &&
618 srf->num_sizes == 1 && 604 srf->num_sizes == 1 &&
619 srf->sizes[0].width == 64 && 605 srf->sizes[0].width == 64 &&
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..24b56dc54597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 963 /* if target is default */
964 if (!strncmp(buf, "default", 7)) 964 if (!strncmp(kbuf, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 965 pdev = pci_dev_get(vga_default_device());
966 else { 966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cc9b5940fa97..875e34e0b235 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2115 if (ret) 2115 if (ret)
2116 goto err1; 2116 goto err1;
2117 2117
2118 if (cma_loopback_addr(addr)) { 2118 if (!cma_any_addr(addr)) {
2119 ret = cma_bind_loopback(id_priv);
2120 } else if (!cma_zero_addr(addr)) {
2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2119 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2122 if (ret) 2120 if (ret)
2123 goto err1; 2121 goto err1;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9774bdfaa482..d8c0c8d6992c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio)
1141 psmouse_deactivate(parent); 1141 psmouse_deactivate(parent);
1142 } 1142 }
1143 1143
1144 psmouse_deactivate(psmouse); 1144 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
1145
1146 /*
1147 * Disable stream mode so cleanup routine can proceed undisturbed.
1148 */
1149 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1150 printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
1151 psmouse->ps2dev.serio->phys);
1145 1152
1146 if (psmouse->cleanup) 1153 if (psmouse->cleanup)
1147 psmouse->cleanup(psmouse); 1154 psmouse->cleanup(psmouse);
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e303b7..f1c8cae70b4b 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
172{ 172{
173 int r = 0; 173 int r = 0;
174 size_t dummy = 0; 174 size_t dummy = 0;
175 int overhead_size = 175 int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
176 sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
177 struct dm_ulog_request *tfr = prealloced_ulog_tfr; 176 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
178 struct receiving_pkg pkg; 177 struct receiving_pkg pkg;
179 178
179 /*
180 * Given the space needed to hold the 'struct cn_msg' and
181 * 'struct dm_ulog_request' - do we have enough payload
182 * space remaining?
183 */
180 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { 184 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
181 DMINFO("Size of tfr exceeds preallocated size"); 185 DMINFO("Size of tfr exceeds preallocated size");
182 return -EINVAL; 186 return -EINVAL;
@@ -191,7 +195,7 @@ resend:
191 */ 195 */
192 mutex_lock(&dm_ulog_lock); 196 mutex_lock(&dm_ulog_lock);
193 197
194 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 198 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
195 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 199 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
196 tfr->luid = luid; 200 tfr->luid = luid;
197 tfr->seq = dm_ulog_seq++; 201 tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd13aec..6c1046df81f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
724 /* 724 /*
725 * Dispatch io. 725 * Dispatch io.
726 */ 726 */
727 if (unlikely(ms->log_failure)) { 727 if (unlikely(ms->log_failure) && errors_handled(ms)) {
728 spin_lock_irq(&ms->lock); 728 spin_lock_irq(&ms->lock);
729 bio_list_merge(&ms->failures, &sync); 729 bio_list_merge(&ms->failures, &sync);
730 spin_unlock_irq(&ms->lock); 730 spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb6fe91..168bd38f5006 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
660 spin_lock_irq(&rh->region_lock); 660 spin_lock_irq(&rh->region_lock);
661 if (success) 661 if (success)
662 list_add(&reg->list, &reg->rh->recovered_regions); 662 list_add(&reg->list, &reg->rh->recovered_regions);
663 else { 663 else
664 reg->state = DM_RH_NOSYNC;
665 list_add(&reg->list, &reg->rh->failed_recovered_regions); 664 list_add(&reg->list, &reg->rh->failed_recovered_regions);
666 } 665
667 spin_unlock_irq(&rh->region_lock); 666 spin_unlock_irq(&rh->region_lock);
668 667
669 rh->wakeup_workers(rh->context); 668 rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879689ac..c097d8a4823d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
254 * Issue the synchronous I/O from a different thread 254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion. 255 * to avoid generic_make_request recursion.
256 */ 256 */
257 INIT_WORK(&req.work, do_metadata); 257 INIT_WORK_ON_STACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq); 259 flush_workqueue(ps->metadata_wq);
260 260
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1adcaff..bd58703ee8f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
110 } 110 }
111 111
112 stripes = simple_strtoul(argv[0], &end, 10); 112 stripes = simple_strtoul(argv[0], &end, 10);
113 if (*end) { 113 if (!stripes || *end) {
114 ti->error = "Invalid stripe count"; 114 ti->error = "Invalid stripe count";
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392df7b97..f91b40942e07 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
80}; 80};
81 81
82/* 82/*
83 * The sysfs structure is embedded in md struct, nothing to do here
84 */
85static void dm_sysfs_release(struct kobject *kobj)
86{
87}
88
89/*
90 * dm kobject is embedded in mapped_device structure 83 * dm kobject is embedded in mapped_device structure
91 * no need to define release function here 84 * no need to define release function here
92 */ 85 */
93static struct kobj_type dm_ktype = { 86static struct kobj_type dm_ktype = {
94 .sysfs_ops = &dm_sysfs_ops, 87 .sysfs_ops = &dm_sysfs_ops,
95 .default_attrs = dm_attrs, 88 .default_attrs = dm_attrs,
96 .release = dm_sysfs_release
97}; 89};
98 90
99/* 91/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480b532c..aa4e2aa86d49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1595 return BLKPREP_OK; 1595 return BLKPREP_OK;
1596} 1596}
1597 1597
1598static void map_request(struct dm_target *ti, struct request *clone, 1598/*
1599 struct mapped_device *md) 1599 * Returns:
1600 * 0 : the request has been processed (not requeued)
1601 * !0 : the request has been requeued
1602 */
1603static int map_request(struct dm_target *ti, struct request *clone,
1604 struct mapped_device *md)
1600{ 1605{
1601 int r; 1606 int r, requeued = 0;
1602 struct dm_rq_target_io *tio = clone->end_io_data; 1607 struct dm_rq_target_io *tio = clone->end_io_data;
1603 1608
1604 /* 1609 /*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
1625 case DM_MAPIO_REQUEUE: 1630 case DM_MAPIO_REQUEUE:
1626 /* The target wants to requeue the I/O */ 1631 /* The target wants to requeue the I/O */
1627 dm_requeue_unmapped_request(clone); 1632 dm_requeue_unmapped_request(clone);
1633 requeued = 1;
1628 break; 1634 break;
1629 default: 1635 default:
1630 if (r > 0) { 1636 if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
1636 dm_kill_unmapped_request(clone, r); 1642 dm_kill_unmapped_request(clone, r);
1637 break; 1643 break;
1638 } 1644 }
1645
1646 return requeued;
1639} 1647}
1640 1648
1641/* 1649/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
1677 atomic_inc(&md->pending[rq_data_dir(clone)]); 1685 atomic_inc(&md->pending[rq_data_dir(clone)]);
1678 1686
1679 spin_unlock(q->queue_lock); 1687 spin_unlock(q->queue_lock);
1680 map_request(ti, clone, md); 1688 if (map_request(ti, clone, md))
1689 goto requeued;
1690
1681 spin_lock_irq(q->queue_lock); 1691 spin_lock_irq(q->queue_lock);
1682 } 1692 }
1683 1693
1684 goto out; 1694 goto out;
1685 1695
1696requeued:
1697 spin_lock_irq(q->queue_lock);
1698
1686plug_and_out: 1699plug_and_out:
1687 if (!elv_queue_empty(q)) 1700 if (!elv_queue_empty(q))
1688 /* Some requests still remain, retry later */ 1701 /* Some requests still remain, retry later */
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 57752751712b..81279b3d694c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " 1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
1797 "Command not in the active list! (sc=%p)\n", ioc->name, 1797 "Command not in the active list! (sc=%p)\n", ioc->name,
1798 SCpnt)); 1798 SCpnt));
1799 retval = 0; 1799 retval = SUCCESS;
1800 goto out; 1800 goto out;
1801 } 1801 }
1802 1802
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
74 } 74 }
75 75
76 mrq->cmd->arg = dev_addr; 76 mrq->cmd->arg = dev_addr;
77 if (!mmc_card_blockaddr(test->card))
78 mrq->cmd->arg <<= 9;
79
77 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 80 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
78 81
79 if (blocks == 1) 82 if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
190 } 193 }
191 194
192 for (i = 0;i < BUFFER_SIZE / 512;i++) { 195 for (i = 0;i < BUFFER_SIZE / 512;i++) {
193 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 196 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
196 } 199 }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
219 memset(test->buffer, 0, 512); 222 memset(test->buffer, 0, 512);
220 223
221 for (i = 0;i < BUFFER_SIZE / 512;i++) { 224 for (i = 0;i < BUFFER_SIZE / 512;i++) {
222 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 225 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
223 if (ret) 226 if (ret)
224 return ret; 227 return ret;
225 } 228 }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
426 for (i = 0;i < sectors;i++) { 429 for (i = 0;i < sectors;i++) {
427 ret = mmc_test_buffer_transfer(test, 430 ret = mmc_test_buffer_transfer(test,
428 test->buffer + i * 512, 431 test->buffer + i * 512,
429 dev_addr + i * 512, 512, 0); 432 dev_addr + i, 512, 0);
430 if (ret) 433 if (ret)
431 return ret; 434 return ret;
432 } 435 }
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5b..006cb2efcd22 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
296 req_hdr->opcode = opcode; 296 req_hdr->opcode = opcode;
297 req_hdr->subsystem = subsystem; 297 req_hdr->subsystem = subsystem;
298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
299 req_hdr->version = 0;
299} 300}
300 301
301static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 302static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 3103f4165311..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 357 u32 fctrl_reg;
358 u32 rmcs_reg; 358 u32 rmcs_reg;
359 u32 reg; 359 u32 reg;
360 u32 link_speed = 0;
361 bool link_up;
360 362
361#ifdef CONFIG_DCB 363#ifdef CONFIG_DCB
362 if (hw->fc.requested_mode == ixgbe_fc_pfc) 364 if (hw->fc.requested_mode == ixgbe_fc_pfc)
363 goto out; 365 goto out;
364 366
365#endif /* CONFIG_DCB */ 367#endif /* CONFIG_DCB */
368 /*
369 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For
371 * more details see 82598 Specification update.
372 */
373 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
374 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
375 switch (hw->fc.requested_mode) {
376 case ixgbe_fc_full:
377 hw->fc.requested_mode = ixgbe_fc_tx_pause;
378 break;
379 case ixgbe_fc_rx_pause:
380 hw->fc.requested_mode = ixgbe_fc_none;
381 break;
382 default:
383 /* no change */
384 break;
385 }
386 }
387
366 /* Negotiate the fc mode to use */ 388 /* Negotiate the fc mode to use */
367 ret_val = ixgbe_fc_autoneg(hw); 389 ret_val = ixgbe_fc_autoneg(hw);
368 if (ret_val) 390 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7b7c8486c0bf..951b73cf5ca2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5763,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5763 if (err) 5763 if (err)
5764 goto err_sw_init; 5764 goto err_sw_init;
5765 5765
5766 /* Make it possible the adapter to be woken up via WOL */
5767 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5769
5766 /* 5770 /*
5767 * If there is a fan on this device and it has failed log the 5771 * If there is a fan on this device and it has failed log the
5768 * failure. 5772 * failure.
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0d..46997e177ee3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2284 fail2: 2284 fail2:
2285 efx_fini_struct(efx); 2285 efx_fini_struct(efx);
2286 fail1: 2286 fail1:
2287 WARN_ON(rc > 0);
2287 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2288 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2288 free_netdev(net_dev); 2289 free_netdev(net_dev);
2289 return rc; 2290 return rc;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e0d13a451019..67eec7a6e487 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
320 320
321 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
322 322
323 return rc; 323 return 0;
324 324
325 fail: 325 fail:
326 EFX_ERR(efx, "PHY reset timed out\n"); 326 EFX_ERR(efx, "PHY reset timed out\n");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f2..317aa34b21cf 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
1877/** 1877/**
1878 * tx_srv - transmit interrupt service 1878 * tx_srv - transmit interrupt service
1879 * @vptr; Velocity 1879 * @vptr; Velocity
1880 * @status:
1881 * 1880 *
1882 * Scan the queues looking for transmitted packets that 1881 * Scan the queues looking for transmitted packets that
1883 * we can complete and clean up. Update any statistics as 1882 * we can complete and clean up. Update any statistics as
1884 * necessary/ 1883 * necessary/
1885 */ 1884 */
1886static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1885static int velocity_tx_srv(struct velocity_info *vptr)
1887{ 1886{
1888 struct tx_desc *td; 1887 struct tx_desc *td;
1889 int qnum; 1888 int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2090/** 2089/**
2091 * velocity_rx_srv - service RX interrupt 2090 * velocity_rx_srv - service RX interrupt
2092 * @vptr: velocity 2091 * @vptr: velocity
2093 * @status: adapter status (unused)
2094 * 2092 *
2095 * Walk the receive ring of the velocity adapter and remove 2093 * Walk the receive ring of the velocity adapter and remove
2096 * any received packets from the receive queue. Hand the ring 2094 * any received packets from the receive queue. Hand the ring
2097 * slots back to the adapter for reuse. 2095 * slots back to the adapter for reuse.
2098 */ 2096 */
2099static int velocity_rx_srv(struct velocity_info *vptr, int status, 2097static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2100 int budget_left)
2101{ 2098{
2102 struct net_device_stats *stats = &vptr->dev->stats; 2099 struct net_device_stats *stats = &vptr->dev->stats;
2103 int rd_curr = vptr->rx.curr; 2100 int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2151 struct velocity_info *vptr = container_of(napi, 2148 struct velocity_info *vptr = container_of(napi,
2152 struct velocity_info, napi); 2149 struct velocity_info, napi);
2153 unsigned int rx_done; 2150 unsigned int rx_done;
2154 u32 isr_status; 2151 unsigned long flags;
2155
2156 spin_lock(&vptr->lock);
2157 isr_status = mac_read_isr(vptr->mac_regs);
2158
2159 /* Ack the interrupt */
2160 mac_write_isr(vptr->mac_regs, isr_status);
2161 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2162 velocity_error(vptr, isr_status);
2163 2152
2153 spin_lock_irqsave(&vptr->lock, flags);
2164 /* 2154 /*
2165 * Do rx and tx twice for performance (taken from the VIA 2155 * Do rx and tx twice for performance (taken from the VIA
2166 * out-of-tree driver). 2156 * out-of-tree driver).
2167 */ 2157 */
2168 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); 2158 rx_done = velocity_rx_srv(vptr, budget / 2);
2169 velocity_tx_srv(vptr, isr_status); 2159 velocity_tx_srv(vptr);
2170 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); 2160 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2171 velocity_tx_srv(vptr, isr_status); 2161 velocity_tx_srv(vptr);
2172
2173 spin_unlock(&vptr->lock);
2174 2162
2175 /* If budget not fully consumed, exit the polling mode */ 2163 /* If budget not fully consumed, exit the polling mode */
2176 if (rx_done < budget) { 2164 if (rx_done < budget) {
2177 napi_complete(napi); 2165 napi_complete(napi);
2178 mac_enable_int(vptr->mac_regs); 2166 mac_enable_int(vptr->mac_regs);
2179 } 2167 }
2168 spin_unlock_irqrestore(&vptr->lock, flags);
2180 2169
2181 return rx_done; 2170 return rx_done;
2182} 2171}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2206 return IRQ_NONE; 2195 return IRQ_NONE;
2207 } 2196 }
2208 2197
2198 /* Ack the interrupt */
2199 mac_write_isr(vptr->mac_regs, isr_status);
2200
2209 if (likely(napi_schedule_prep(&vptr->napi))) { 2201 if (likely(napi_schedule_prep(&vptr->napi))) {
2210 mac_disable_int(vptr->mac_regs); 2202 mac_disable_int(vptr->mac_regs);
2211 __napi_schedule(&vptr->napi); 2203 __napi_schedule(&vptr->napi);
2212 } 2204 }
2205
2206 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2207 velocity_error(vptr, isr_status);
2208
2213 spin_unlock(&vptr->lock); 2209 spin_unlock(&vptr->lock);
2214 2210
2215 return IRQ_HANDLED; 2211 return IRQ_HANDLED;
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
3100 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3096 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3101 mac_disable_int(vptr->mac_regs); 3097 mac_disable_int(vptr->mac_regs);
3102 3098
3103 velocity_tx_srv(vptr, 0); 3099 velocity_tx_srv(vptr);
3104 3100
3105 for (i = 0; i < vptr->tx.numq; i++) { 3101 for (i = 0; i < vptr->tx.numq; i++) {
3106 if (vptr->tx.used[i]) 3102 if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3344{ 3340{
3345 struct velocity_info *vptr = netdev_priv(dev); 3341 struct velocity_info *vptr = netdev_priv(dev);
3346 int max_us = 0x3f * 64; 3342 int max_us = 0x3f * 64;
3343 unsigned long flags;
3347 3344
3348 /* 6 bits of */ 3345 /* 6 bits of */
3349 if (ecmd->tx_coalesce_usecs > max_us) 3346 if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3365 ecmd->tx_coalesce_usecs); 3362 ecmd->tx_coalesce_usecs);
3366 3363
3367 /* Setup the interrupt suppression and queue timers */ 3364 /* Setup the interrupt suppression and queue timers */
3365 spin_lock_irqsave(&vptr->lock, flags);
3368 mac_disable_int(vptr->mac_regs); 3366 mac_disable_int(vptr->mac_regs);
3369 setup_adaptive_interrupts(vptr); 3367 setup_adaptive_interrupts(vptr);
3370 setup_queue_timers(vptr); 3368 setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); 3370 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3373 mac_clear_isr(vptr->mac_regs); 3371 mac_clear_isr(vptr->mac_regs);
3374 mac_enable_int(vptr->mac_regs); 3372 mac_enable_int(vptr->mac_regs);
3373 spin_unlock_irqrestore(&vptr->lock, flags);
3375 3374
3376 return 0; 3375 return 0;
3377} 3376}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..29bf33692f71 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1615 bf->bf_frmlen -= padsize; 1615 bf->bf_frmlen -= padsize;
1616 } 1616 }
1617 1617
1618 if (conf_is_ht(&hw->conf) && !is_pae(skb)) 1618 if (conf_is_ht(&hw->conf))
1619 bf->bf_state.bf_type |= BUF_HT; 1619 bf->bf_state.bf_type |= BUF_HT;
1620 1620
1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1701 goto tx_done; 1701 goto tx_done;
1702 } 1702 }
1703 1703
1704 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1704 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1705 /* 1705 /*
1706 * Try aggregation if it's a unicast data frame 1706 * Try aggregation if it's a unicast data frame
1707 * and the destination is HT capable. 1707 * and the destination is HT capable.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..c484cc253892 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
117#define B43_MMIO_RNG 0x65A 117#define B43_MMIO_RNG 0x65A
118#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
118#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 119#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
119#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 120#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
120#define B43_MMIO_POWERUP_DELAY 0x6A8 121#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..490fb45d1d05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) 628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
629{ 629{
630 /* slot_time is in usec. */ 630 /* slot_time is in usec. */
631 if (dev->phy.type != B43_PHYTYPE_G) 631 /* This test used to exit for all but a G PHY. */
632 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
632 return; 633 return;
633 b43_write16(dev, 0x684, 510 + slot_time); 634 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
634 b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 635 /* Shared memory location 0x0010 is the slot time and should be
636 * set to slot_time; however, this register is initially 0 and changing
637 * the value adversely affects the transmit rate for BCM4311
638 * devices. Until this behavior is unterstood, delete this step
639 *
640 * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
641 */
635} 642}
636 643
637static void b43_short_slot_timing_enable(struct b43_wldev *dev) 644static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2d..d10bea64fce3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2744,6 +2744,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2744 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 2744 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2745 priv->staging_rxon.flags = 0; 2745 priv->staging_rxon.flags = 0;
2746 2746
2747 iwl_set_rxon_ht(priv, ht_conf);
2747 iwl_set_rxon_channel(priv, conf->channel); 2748 iwl_set_rxon_channel(priv, conf->channel);
2748 2749
2749 iwl_set_flags_for_band(priv, conf->channel->band); 2750 iwl_set_flags_for_band(priv, conf->channel->band);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f36b6e79f5e..2dbce85404aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
928 if (ieee80211_is_mgmt(fc) || 928 if (ieee80211_is_mgmt(fc) ||
929 ieee80211_has_protected(fc) || 929 ieee80211_has_protected(fc) ||
930 ieee80211_has_morefrags(fc) || 930 ieee80211_has_morefrags(fc) ||
931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) 931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
932 (ieee80211_is_data_qos(fc) &&
933 *ieee80211_get_qos_ctl(hdr) &
934 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
932 ret = skb_linearize(skb); 935 ret = skb_linearize(skb);
933 else 936 else
934 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 937 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed7485175..f727b4a83196 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
794 } 794 }
795 795
796 bss->bss = kzalloc(bss_len, GFP_KERNEL); 796 bss->bss = kzalloc(bss_len, GFP_KERNEL);
797 if (!bss) { 797 if (!bss->bss) {
798 kfree(bss); 798 kfree(bss);
799 IWM_ERR(iwm, "Couldn't allocate bss\n"); 799 IWM_ERR(iwm, "Couldn't allocate bss\n");
800 return -ENOMEM; 800 return -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..7ba3052b0708 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
65 /* Sitecom */ 65 /* Sitecom */
66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
68 {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
68 /* Sphairon Access Systems GmbH */ 69 /* Sphairon Access Systems GmbH */
69 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, 70 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
70 /* Dick Smith Electronics */ 71 /* Dick Smith Electronics */
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e759a275..6ea3cb5837c7 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
175 dev_err(&dev->dev, "Do not pass platform_data through " 175 dev_err(&dev->dev, "Do not pass platform_data through "
176 "wm97xx_bat_set_pdata!\n"); 176 "wm97xx_bat_set_pdata!\n");
177 return -EINVAL; 177 return -EINVAL;
178 } else 178 }
179 pdata = wmdata->batt_pdata; 179
180 if (!wmdata) {
181 dev_err(&dev->dev, "No platform data supplied\n");
182 return -EINVAL;
183 }
184
185 pdata = wmdata->batt_pdata;
180 186
181 if (dev->id != -1) 187 if (dev->id != -1)
182 return -EINVAL; 188 return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..b60a4c9f8f16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
661static void print_constraints(struct regulator_dev *rdev) 661static void print_constraints(struct regulator_dev *rdev)
662{ 662{
663 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
664 char buf[80]; 664 char buf[80] = "";
665 int count = 0; 665 int count = 0;
666 int ret; 666 int ret;
667 667
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..4f33a0f4a179 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
183 if (vol_map[val] >= min_vol) 183 if (vol_map[val] >= min_vol)
184 break; 184 break;
185 185
186 if (vol_map[val] > max_vol) 186 if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
187 return -EINVAL; 187 return -EINVAL;
188 188
189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), 189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
272 if (vol_map[val] >= min_vol) 272 if (vol_map[val] >= min_vol)
273 break; 273 break;
274 274
275 if (vol_map[val] > max_vol) 275 if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), 278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 0f7b493fb105..271399f62f1b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data)
671{ 671{
672 struct fc_bsg_job *job = data; 672 struct fc_bsg_job *job = data;
673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
674 int status = zfcp_ct_els->status; 674 struct fc_bsg_reply *jr = job->reply;
675 int reply_status;
676 675
677 reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
678 job->reply->reply_data.ctels_reply.status = reply_status; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
679 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
680 job->job_done(job); 679 job->job_done(job);
681} 680}
682 681
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f61fb8d01330..8bc6f53691e9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
456extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
457 456
458#endif /* _QLA_GBL_H */ 457#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ffd0efdff40e..6fc63b98818c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1917 struct rsp_que *rsp; 1917 struct rsp_que *rsp;
1918 struct device_reg_24xx __iomem *reg; 1918 struct device_reg_24xx __iomem *reg;
1919 struct scsi_qla_host *vha; 1919 struct scsi_qla_host *vha;
1920 unsigned long flags;
1920 1921
1921 rsp = (struct rsp_que *) dev_id; 1922 rsp = (struct rsp_que *) dev_id;
1922 if (!rsp) { 1923 if (!rsp) {
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1927 ha = rsp->hw; 1928 ha = rsp->hw;
1928 reg = &ha->iobase->isp24; 1929 reg = &ha->iobase->isp24;
1929 1930
1930 spin_lock_irq(&ha->hardware_lock); 1931 spin_lock_irqsave(&ha->hardware_lock, flags);
1931 1932
1932 vha = qla25xx_get_host(rsp); 1933 vha = pci_get_drvdata(ha->pdev);
1933 qla24xx_process_response_queue(vha, rsp); 1934 qla24xx_process_response_queue(vha, rsp);
1934 if (!ha->flags.disable_msix_handshake) { 1935 if (!ha->flags.disable_msix_handshake) {
1935 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1936 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1936 RD_REG_DWORD_RELAXED(&reg->hccr); 1937 RD_REG_DWORD_RELAXED(&reg->hccr);
1937 } 1938 }
1938 spin_unlock_irq(&ha->hardware_lock); 1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1939 1940
1940 return IRQ_HANDLED; 1941 return IRQ_HANDLED;
1941} 1942}
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1946 struct qla_hw_data *ha; 1947 struct qla_hw_data *ha;
1947 struct rsp_que *rsp; 1948 struct rsp_que *rsp;
1948 struct device_reg_24xx __iomem *reg; 1949 struct device_reg_24xx __iomem *reg;
1950 unsigned long flags;
1949 1951
1950 rsp = (struct rsp_que *) dev_id; 1952 rsp = (struct rsp_que *) dev_id;
1951 if (!rsp) { 1953 if (!rsp) {
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1958 /* Clear the interrupt, if enabled, for this response queue */ 1960 /* Clear the interrupt, if enabled, for this response queue */
1959 if (rsp->options & ~BIT_6) { 1961 if (rsp->options & ~BIT_6) {
1960 reg = &ha->iobase->isp24; 1962 reg = &ha->iobase->isp24;
1961 spin_lock_irq(&ha->hardware_lock); 1963 spin_lock_irqsave(&ha->hardware_lock, flags);
1962 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1964 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1963 RD_REG_DWORD_RELAXED(&reg->hccr); 1965 RD_REG_DWORD_RELAXED(&reg->hccr);
1964 spin_unlock_irq(&ha->hardware_lock); 1966 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1965 } 1967 }
1966 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1968 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1967 1969
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1979 uint32_t stat; 1981 uint32_t stat;
1980 uint32_t hccr; 1982 uint32_t hccr;
1981 uint16_t mb[4]; 1983 uint16_t mb[4];
1984 unsigned long flags;
1982 1985
1983 rsp = (struct rsp_que *) dev_id; 1986 rsp = (struct rsp_que *) dev_id;
1984 if (!rsp) { 1987 if (!rsp) {
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1990 reg = &ha->iobase->isp24; 1993 reg = &ha->iobase->isp24;
1991 status = 0; 1994 status = 0;
1992 1995
1993 spin_lock_irq(&ha->hardware_lock); 1996 spin_lock_irqsave(&ha->hardware_lock, flags);
1994 vha = pci_get_drvdata(ha->pdev); 1997 vha = pci_get_drvdata(ha->pdev);
1995 do { 1998 do {
1996 stat = RD_REG_DWORD(&reg->host_status); 1999 stat = RD_REG_DWORD(&reg->host_status);
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2039 } 2042 }
2040 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2043 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2041 } while (0); 2044 } while (0);
2042 spin_unlock_irq(&ha->hardware_lock); 2045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2043 2046
2044 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2047 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2045 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2048 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2277 msix->rsp = rsp; 2280 msix->rsp = rsp;
2278 return ret; 2281 return ret;
2279} 2282}
2280
2281struct scsi_qla_host *
2282qla25xx_get_host(struct rsp_que *rsp)
2283{
2284 srb_t *sp;
2285 struct qla_hw_data *ha = rsp->hw;
2286 struct scsi_qla_host *vha = NULL;
2287 struct sts_entry_24xx *pkt;
2288 struct req_que *req;
2289 uint16_t que;
2290 uint32_t handle;
2291
2292 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2293 que = MSW(pkt->handle);
2294 handle = (uint32_t) LSW(pkt->handle);
2295 req = ha->req_q_map[que];
2296 if (handle < MAX_OUTSTANDING_COMMANDS) {
2297 sp = req->outstanding_cmds[handle];
2298 if (sp)
2299 return sp->fcport->vha;
2300 else
2301 goto base_que;
2302 }
2303base_que:
2304 vha = pci_get_drvdata(ha->pdev);
2305 return vha;
2306}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b901aa267e7d..ff17dee28613 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,13 +636,15 @@ failed:
636 636
637static void qla_do_work(struct work_struct *work) 637static void qla_do_work(struct work_struct *work)
638{ 638{
639 unsigned long flags;
639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 640 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
640 struct scsi_qla_host *vha; 641 struct scsi_qla_host *vha;
642 struct qla_hw_data *ha = rsp->hw;
641 643
642 spin_lock_irq(&rsp->hw->hardware_lock); 644 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
643 vha = qla25xx_get_host(rsp); 645 vha = pci_get_drvdata(ha->pdev);
644 qla24xx_process_response_queue(vha, rsp); 646 qla24xx_process_response_queue(vha, rsp);
645 spin_unlock_irq(&rsp->hw->hardware_lock); 647 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
646} 648}
647 649
648/* create response queue */ 650/* create response queue */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e7e26..e9b15c3746fa 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
83 83
84#define PASS_LIMIT 256 84#define PASS_LIMIT 256
85 85
86#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
87
88
86/* 89/*
87 * We default to IRQ0 for the "no irq" hack. Some 90 * We default to IRQ0 for the "no irq" hack. Some
88 * machine types want others as well - they're free 91 * machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
1792 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1795 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1793 spin_unlock_irqrestore(&up->port.lock, flags); 1796 spin_unlock_irqrestore(&up->port.lock, flags);
1794 1797
1795 return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 1798 return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
1796} 1799}
1797 1800
1798static unsigned int serial8250_get_mctrl(struct uart_port *port) 1801static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
1850 spin_unlock_irqrestore(&up->port.lock, flags); 1853 spin_unlock_irqrestore(&up->port.lock, flags);
1851} 1854}
1852 1855
1853#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1854
1855/* 1856/*
1856 * Wait for transmitter & holding register to empty 1857 * Wait for transmitter & holding register to empty
1857 */ 1858 */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c65..03dfd27c4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
494#endif 494#endif
495 break; 495 break;
496 case SSB_BUSTYPE_SDIO: 496 case SSB_BUSTYPE_SDIO:
497#ifdef CONFIG_SSB_SDIO 497#ifdef CONFIG_SSB_SDIOHOST
498 sdev->irq = bus->host_sdio->dev.irq;
499 dev->parent = &bus->host_sdio->dev; 498 dev->parent = &bus->host_sdio->dev;
500#endif 499#endif
501 break; 500 break;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdfd23b4..a678186f218f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
1312 void __user *addr = as->userurb; 1312 void __user *addr = as->userurb;
1313 unsigned int i; 1313 unsigned int i;
1314 1314
1315 if (as->userbuffer) 1315 if (as->userbuffer && urb->actual_length)
1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1317 urb->transfer_buffer_length)) 1317 urb->actual_length))
1318 goto err_out; 1318 goto err_out;
1319 if (put_user(as->status, &userurb->status)) 1319 if (put_user(as->status, &userurb->status))
1320 goto err_out; 1320 goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
1334 } 1334 }
1335 } 1335 }
1336 1336
1337 free_async(as);
1338
1339 if (put_user(addr, (void __user * __user *)arg)) 1337 if (put_user(addr, (void __user * __user *)arg))
1340 return -EFAULT; 1338 return -EFAULT;
1341 return 0; 1339 return 0;
1342 1340
1343err_out: 1341err_out:
1344 free_async(as);
1345 return -EFAULT; 1342 return -EFAULT;
1346} 1343}
1347 1344
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
1371static int proc_reapurb(struct dev_state *ps, void __user *arg) 1368static int proc_reapurb(struct dev_state *ps, void __user *arg)
1372{ 1369{
1373 struct async *as = reap_as(ps); 1370 struct async *as = reap_as(ps);
1374 if (as) 1371 if (as) {
1375 return processcompl(as, (void __user * __user *)arg); 1372 int retval = processcompl(as, (void __user * __user *)arg);
1373 free_async(as);
1374 return retval;
1375 }
1376 if (signal_pending(current)) 1376 if (signal_pending(current))
1377 return -EINTR; 1377 return -EINTR;
1378 return -EIO; 1378 return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
1380 1380
1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) 1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
1382{ 1382{
1383 int retval;
1383 struct async *as; 1384 struct async *as;
1384 1385
1385 if (!(as = async_getcompleted(ps))) 1386 as = async_getcompleted(ps);
1386 return -EAGAIN; 1387 retval = -EAGAIN;
1387 return processcompl(as, (void __user * __user *)arg); 1388 if (as) {
1389 retval = processcompl(as, (void __user * __user *)arg);
1390 free_async(as);
1391 }
1392 return retval;
1388} 1393}
1389 1394
1390#ifdef CONFIG_COMPAT 1395#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1475 void __user *addr = as->userurb; 1480 void __user *addr = as->userurb;
1476 unsigned int i; 1481 unsigned int i;
1477 1482
1478 if (as->userbuffer) 1483 if (as->userbuffer && urb->actual_length)
1479 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1484 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1480 urb->transfer_buffer_length)) 1485 urb->actual_length))
1481 return -EFAULT; 1486 return -EFAULT;
1482 if (put_user(as->status, &userurb->status)) 1487 if (put_user(as->status, &userurb->status))
1483 return -EFAULT; 1488 return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1497 } 1502 }
1498 } 1503 }
1499 1504
1500 free_async(as);
1501 if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) 1505 if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
1502 return -EFAULT; 1506 return -EFAULT;
1503 return 0; 1507 return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1506static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) 1510static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1507{ 1511{
1508 struct async *as = reap_as(ps); 1512 struct async *as = reap_as(ps);
1509 if (as) 1513 if (as) {
1510 return processcompl_compat(as, (void __user * __user *)arg); 1514 int retval = processcompl_compat(as, (void __user * __user *)arg);
1515 free_async(as);
1516 return retval;
1517 }
1511 if (signal_pending(current)) 1518 if (signal_pending(current))
1512 return -EINTR; 1519 return -EINTR;
1513 return -EIO; 1520 return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1515 1522
1516static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) 1523static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
1517{ 1524{
1525 int retval;
1518 struct async *as; 1526 struct async *as;
1519 1527
1520 if (!(as = async_getcompleted(ps))) 1528 retval = -EAGAIN;
1521 return -EAGAIN; 1529 as = async_getcompleted(ps);
1522 return processcompl_compat(as, (void __user * __user *)arg); 1530 if (as) {
1531 retval = processcompl_compat(as, (void __user * __user *)arg);
1532 free_async(as);
1533 }
1534 return retval;
1523} 1535}
1524 1536
1525 1537
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5694fd..d4f0db58a8ad 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
358 * b15: bmType (0 == data) 358 * b15: bmType (0 == data)
359 */ 359 */
360 len = skb->len; 360 len = skb->len;
361 put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); 361 put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
362 362
363 /* add a zero-length EEM packet, if needed */ 363 /* add a zero-length EEM packet, if needed */
364 if (padlen) 364 if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
464 } 464 }
465 465
466 /* validate CRC */ 466 /* validate CRC */
467 crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
468 if (header & BIT(14)) { 467 if (header & BIT(14)) {
469 crc = get_unaligned_le32(skb->data + len 468 crc = get_unaligned_le32(skb->data + len
470 - ETH_FCS_LEN); 469 - ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 429560100b10..76496f5d272c 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
29#if defined USB_ETH_RNDIS 29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS 30# undef USB_ETH_RNDIS
31#endif 31#endif
32#ifdef CONFIG_USB_ETH_RNDIS 32#ifdef CONFIG_USB_G_MULTI_RNDIS
33# define USB_ETH_RNDIS y 33# define USB_ETH_RNDIS y
34#endif 34#endif
35 35
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8091a3..8b45145b9136 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h>
29 30
30#include <linux/usb/ch9.h> 31#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h> 32#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0127f5..5fc80a104150 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
2582 hsotg->gadget.dev.driver = NULL; 2582 hsotg->gadget.dev.driver = NULL;
2583 return ret; 2583 return ret;
2584} 2584}
2585EXPORT_SYMBOL(usb_gadget_register_driver);
2585 2586
2586int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2587int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2587{ 2588{
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c75d9270c752..19372673bf09 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
196 if (hostpc_reg) { 196 if (hostpc_reg) {
197 u32 t3; 197 u32 t3;
198 198
199 spin_unlock_irq(&ehci->lock);
199 msleep(5);/* 5ms for HCD enter low pwr mode */ 200 msleep(5);/* 5ms for HCD enter low pwr mode */
201 spin_lock_irq(&ehci->lock);
200 t3 = ehci_readl(ehci, hostpc_reg); 202 t3 = ehci_readl(ehci, hostpc_reg);
201 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
202 t3 = ehci_readl(ehci, hostpc_reg); 204 t3 = ehci_readl(ehci, hostpc_reg);
@@ -904,17 +906,18 @@ static int ehci_hub_control (
904 if ((temp & PORT_PE) == 0 906 if ((temp & PORT_PE) == 0
905 || (temp & PORT_RESET) != 0) 907 || (temp & PORT_RESET) != 0)
906 goto error; 908 goto error;
907 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 909
908 /* After above check the port must be connected. 910 /* After above check the port must be connected.
909 * Set appropriate bit thus could put phy into low power 911 * Set appropriate bit thus could put phy into low power
910 * mode if we have hostpc feature 912 * mode if we have hostpc feature
911 */ 913 */
914 temp &= ~PORT_WKCONN_E;
915 temp |= PORT_WKDISC_E | PORT_WKOC_E;
916 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
912 if (hostpc_reg) { 917 if (hostpc_reg) {
913 temp &= ~PORT_WKCONN_E; 918 spin_unlock_irqrestore(&ehci->lock, flags);
914 temp |= (PORT_WKDISC_E | PORT_WKOC_E);
915 ehci_writel(ehci, temp | PORT_SUSPEND,
916 status_reg);
917 msleep(5);/* 5ms for HCD enter low pwr mode */ 919 msleep(5);/* 5ms for HCD enter low pwr mode */
920 spin_lock_irqsave(&ehci->lock, flags);
918 temp1 = ehci_readl(ehci, hostpc_reg); 921 temp1 = ehci_readl(ehci, hostpc_reg);
919 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 ehci_writel(ehci, temp1 | HOSTPC_PHCD,
920 hostpc_reg); 923 hostpc_reg);
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index d224ab467a40..e1232890c78b 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
105 if (ep->td_base) 105 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 107
108 if (ep->conf_frame_Q) { 108 if (kfifo_initialized(&ep->conf_frame_Q)) {
109 size = cq_howmany(&ep->conf_frame_Q); 109 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 110 for (; size; size--) {
111 struct packet *pkt = cq_get(&ep->conf_frame_Q); 111 struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
115 cq_delete(&ep->conf_frame_Q); 115 cq_delete(&ep->conf_frame_Q);
116 } 116 }
117 117
118 if (ep->empty_frame_Q) { 118 if (kfifo_initialized(&ep->empty_frame_Q)) {
119 size = cq_howmany(&ep->empty_frame_Q); 119 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 120 for (; size; size--) {
121 struct packet *pkt = cq_get(&ep->empty_frame_Q); 121 struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
125 cq_delete(&ep->empty_frame_Q); 125 cq_delete(&ep->empty_frame_Q);
126 } 126 }
127 127
128 if (ep->dummy_packets_Q) { 128 if (kfifo_initialized(&ep->dummy_packets_Q)) {
129 size = cq_howmany(&ep->dummy_packets_Q); 129 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 130 for (; size; size--) {
131 u8 *buff = cq_get(&ep->dummy_packets_Q); 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847743f3..8b37a4b9839e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
3245 { USB_DEVICE(0x0711, 0x0902) }, 3245 { USB_DEVICE(0x0711, 0x0902) },
3246 { USB_DEVICE(0x0711, 0x0903) }, 3246 { USB_DEVICE(0x0711, 0x0903) },
3247 { USB_DEVICE(0x0711, 0x0918) }, 3247 { USB_DEVICE(0x0711, 0x0918) },
3248 { USB_DEVICE(0x0711, 0x0920) },
3248 { USB_DEVICE(0x182d, 0x021c) }, 3249 { USB_DEVICE(0x182d, 0x021c) },
3249 { USB_DEVICE(0x182d, 0x0269) }, 3250 { USB_DEVICE(0x182d, 0x0269) },
3250 { } 3251 { }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d743d7..3d2d3e549bd1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
44config USB_ULPI 44config USB_ULPI
45 bool "Generic ULPI Transceiver Driver" 45 bool "Generic ULPI Transceiver Driver"
46 depends on ARM 46 depends on ARM
47 select USB_OTG_UTILS
47 help 48 help
48 Enable this to support ULPI connected USB OTG transceivers which 49 Enable this to support ULPI connected USB OTG transceivers which
49 are likely found on embedded boards. 50 are likely found on embedded boards.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 216f187582ab..7638828e7317 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -50,7 +50,7 @@
50 * Version Information 50 * Version Information
51 */ 51 */
52#define DRIVER_VERSION "v1.5.0" 52#define DRIVER_VERSION "v1.5.0"
53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
54#define DRIVER_DESC "USB FTDI Serial Converters Driver" 54#define DRIVER_DESC "USB FTDI Serial Converters Driver"
55 55
56static int debug; 56static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
145 145
146 146
147 147
148/*
149 * Device ID not listed? Test via module params product/vendor or
150 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
151 */
148static struct usb_device_id id_table_combined [] = { 152static struct usb_device_id id_table_combined [] = {
149 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 153 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
150 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 154 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
151 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 155 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
152 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
153 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 159 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
552 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, 557 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
553 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, 558 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
554 /* 559 /*
555 * Due to many user requests for multiple ELV devices we enable 560 * ELV devices:
556 * them by default.
557 */ 561 */
562 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
567 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
568 { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
569 { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
558 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, 570 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
559 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, 571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
560 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, 572 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, 583 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
572 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, 584 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
573 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 585 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
586 { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
574 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 587 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
588 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
575 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
576 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
577 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 591 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
578 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, 592 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
593 { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
594 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
595 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
596 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
579 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 597 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
580 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 598 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
581 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 599 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
697 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 715 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
698 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 716 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
699 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 717 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
718 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
700 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 719 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
701 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
702 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 721 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index da92b4952ffb..c8951aeed983 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,8 @@
38/* www.candapter.com Ewert Energy Systems CANdapter device */ 38/* www.candapter.com Ewert Energy Systems CANdapter device */
39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
40 40
41#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
42
41/* OOCDlink by Joern Kaipf <joernk@web.de> 43/* OOCDlink by Joern Kaipf <joernk@web.de>
42 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 44 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
43#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 45#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -161,22 +163,37 @@
161/* 163/*
162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 164 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
163 * All of these devices use FTDI's vendor ID (0x0403). 165 * All of these devices use FTDI's vendor ID (0x0403).
166 * Further IDs taken from ELV Windows .inf file.
164 * 167 *
165 * The previously included PID for the UO 100 module was incorrect. 168 * The previously included PID for the UO 100 module was incorrect.
166 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). 169 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
167 * 170 *
168 * Armin Laeuger originally sent the PID for the UM 100 module. 171 * Armin Laeuger originally sent the PID for the UM 100 module.
169 */ 172 */
173#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
174#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
175#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
176#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
177#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
178#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
179#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
180#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
170#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 181#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
171#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 182#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
172#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ 183#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
184#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
173#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
174#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
175#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
176#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
177#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ 192#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
178#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ 193#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
194#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
179#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 195#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
196#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
180#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ 197#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
181#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ 198#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
182#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ 199#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
@@ -968,6 +985,7 @@
968#define PAPOUCH_VID 0x5050 /* Vendor ID */ 985#define PAPOUCH_VID 0x5050 /* Vendor ID */
969#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 986#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
970#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 987#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
988#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
971 989
972/* 990/*
973 * Marvell SheevaPlug 991 * Marvell SheevaPlug
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b6449fb6a..3eb6143bb646 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 }, 300 },
301 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
301 302
302 { } 303 { }
303}; 304};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c932f9053188..49575fba3756 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
942 "Microtech", 942 "Microtech",
943 "USB-SCSI-DB25", 943 "USB-SCSI-DB25",
944 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
945 US_FL_SCM_MULT_TARG ), 945 US_FL_SCM_MULT_TARG ),
946 946
947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, 947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,