aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c21
-rw-r--r--drivers/ata/pata_cs5535.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/ioat_dma.c11
-rw-r--r--drivers/dma/iovlock.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c80
-rw-r--r--drivers/gpu/drm/drm_lock.c9
-rw-r--r--drivers/gpu/drm/drm_stub.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c383
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2
-rw-r--r--drivers/net/mlx4/en_netdev.c2
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/ssb/Kconfig5
-rw-r--r--drivers/watchdog/booke_wdt.c5
26 files changed, 108 insertions, 533 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0cd3ad49713..4214bfb13bb 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -612,7 +612,7 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
612 if (tf->flags & ATA_TFLAG_LBA48) { 612 if (tf->flags & ATA_TFLAG_LBA48) {
613 block |= (u64)tf->hob_lbah << 40; 613 block |= (u64)tf->hob_lbah << 40;
614 block |= (u64)tf->hob_lbam << 32; 614 block |= (u64)tf->hob_lbam << 32;
615 block |= tf->hob_lbal << 24; 615 block |= (u64)tf->hob_lbal << 24;
616 } else 616 } else
617 block |= (tf->device & 0xf) << 24; 617 block |= (tf->device & 0xf) << 24;
618 618
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 8077bdf5d30..32da9a93ce4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -610,9 +610,6 @@ void ata_scsi_error(struct Scsi_Host *host)
610 if (ata_ncq_enabled(dev)) 610 if (ata_ncq_enabled(dev))
611 ehc->saved_ncq_enabled |= 1 << devno; 611 ehc->saved_ncq_enabled |= 1 << devno;
612 } 612 }
613
614 /* set last reset timestamp to some time in the past */
615 ehc->last_reset = jiffies - 60 * HZ;
616 } 613 }
617 614
618 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 615 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -2281,17 +2278,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
2281 if (link->flags & ATA_LFLAG_NO_SRST) 2278 if (link->flags & ATA_LFLAG_NO_SRST)
2282 softreset = NULL; 2279 softreset = NULL;
2283 2280
2284 now = jiffies; 2281 /* make sure each reset attemp is at least COOL_DOWN apart */
2285 deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); 2282 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2286 if (time_before(now, deadline)) 2283 now = jiffies;
2287 schedule_timeout_uninterruptible(deadline - now); 2284 WARN_ON(time_after(ehc->last_reset, now));
2285 deadline = ata_deadline(ehc->last_reset,
2286 ATA_EH_RESET_COOL_DOWN);
2287 if (time_before(now, deadline))
2288 schedule_timeout_uninterruptible(deadline - now);
2289 }
2288 2290
2289 spin_lock_irqsave(ap->lock, flags); 2291 spin_lock_irqsave(ap->lock, flags);
2290 ap->pflags |= ATA_PFLAG_RESETTING; 2292 ap->pflags |= ATA_PFLAG_RESETTING;
2291 spin_unlock_irqrestore(ap->lock, flags); 2293 spin_unlock_irqrestore(ap->lock, flags);
2292 2294
2293 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2295 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2294 ehc->last_reset = jiffies;
2295 2296
2296 ata_link_for_each_dev(dev, link) { 2297 ata_link_for_each_dev(dev, link) {
2297 /* If we issue an SRST then an ATA drive (not ATAPI) 2298 /* If we issue an SRST then an ATA drive (not ATAPI)
@@ -2379,7 +2380,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
2379 /* 2380 /*
2380 * Perform reset 2381 * Perform reset
2381 */ 2382 */
2382 ehc->last_reset = jiffies;
2383 if (ata_is_host_link(link)) 2383 if (ata_is_host_link(link))
2384 ata_eh_freeze_port(ap); 2384 ata_eh_freeze_port(ap);
2385 2385
@@ -2391,6 +2391,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2391 reset == softreset ? "soft" : "hard"); 2391 reset == softreset ? "soft" : "hard");
2392 2392
2393 /* mark that this EH session started with reset */ 2393 /* mark that this EH session started with reset */
2394 ehc->last_reset = jiffies;
2394 if (reset == hardreset) 2395 if (reset == hardreset)
2395 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2396 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2396 else 2397 else
@@ -2535,7 +2536,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2535 ata_eh_done(link, NULL, ATA_EH_RESET); 2536 ata_eh_done(link, NULL, ATA_EH_RESET);
2536 if (slave) 2537 if (slave)
2537 ata_eh_done(slave, NULL, ATA_EH_RESET); 2538 ata_eh_done(slave, NULL, ATA_EH_RESET);
2538 ehc->last_reset = jiffies; 2539 ehc->last_reset = jiffies; /* update to completion time */
2539 ehc->i.action |= ATA_EH_REVALIDATE; 2540 ehc->i.action |= ATA_EH_REVALIDATE;
2540 2541
2541 rc = 0; 2542 rc = 0;
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 1b2d4a0f5f7..8b236af84c2 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -72,7 +72,6 @@
72/** 72/**
73 * cs5535_cable_detect - detect cable type 73 * cs5535_cable_detect - detect cable type
74 * @ap: Port to detect on 74 * @ap: Port to detect on
75 * @deadline: deadline jiffies for the operation
76 * 75 *
77 * Perform cable detection for ATA66 capable cable. Return a libata 76 * Perform cable detection for ATA66 capable cable. Return a libata
78 * cable type. 77 * cable type.
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 73f8332cb67..afed9297619 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -110,7 +110,6 @@ static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
110/** 110/**
111 * cs5536_cable_detect - detect cable type 111 * cs5536_cable_detect - detect cable type
112 * @ap: Port to detect on 112 * @ap: Port to detect on
113 * @deadline: deadline jiffies for the operation
114 * 113 *
115 * Perform cable detection for ATA66 capable cable. Return a libata 114 * Perform cable detection for ATA66 capable cable. Return a libata
116 * cable type. 115 * cable type.
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 271cb64d429..64b2e2281ee 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -416,6 +416,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
416 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 416 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
417 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 417 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
418 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 418 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
419 PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
419 PCMCIA_DEVICE_NULL, 420 PCMCIA_DEVICE_NULL,
420}; 421};
421 422
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index c8cc027789f..6aeeeeb3412 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -83,7 +83,7 @@ static struct ata_port_operations sch_pata_ops = {
83}; 83};
84 84
85static struct ata_port_info sch_port_info = { 85static struct ata_port_info sch_port_info = {
86 .flags = 0, 86 .flags = ATA_FLAG_SLAVE_POSS,
87 .pio_mask = ATA_PIO4, /* pio0-4 */ 87 .pio_mask = ATA_PIO4, /* pio0-4 */
88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */ 88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
89 .udma_mask = ATA_UDMA5, /* udma0-5 */ 89 .udma_mask = ATA_UDMA5, /* udma0-5 */
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index ec249d2db63..d883e1b8bb8 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
270 dca_sysfs_exit(); 270 dca_sysfs_exit();
271} 271}
272 272
273module_init(dca_init); 273subsys_initcall(dca_init);
274module_exit(dca_exit); 274module_exit(dca_exit);
275 275
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index b0438c4f0c3..ecd743f7cc6 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -525,7 +525,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
525 } 525 }
526 526
527 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 527 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
528 if (new->async_tx.callback) { 528 if (first->async_tx.callback) {
529 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 529 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
530 if (first != new) { 530 if (first != new) {
531 /* move callback into to last desc */ 531 /* move callback into to last desc */
@@ -617,7 +617,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
617 } 617 }
618 618
619 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 619 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
620 if (new->async_tx.callback) { 620 if (first->async_tx.callback) {
621 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 621 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
622 if (first != new) { 622 if (first != new) {
623 /* move callback into to last desc */ 623 /* move callback into to last desc */
@@ -807,6 +807,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
807 struct ioat_desc_sw *desc, *_desc; 807 struct ioat_desc_sw *desc, *_desc;
808 int in_use_descs = 0; 808 int in_use_descs = 0;
809 809
810 /* Before freeing channel resources first check
811 * if they have been previously allocated for this channel.
812 */
813 if (ioat_chan->desccount == 0)
814 return;
815
810 tasklet_disable(&ioat_chan->cleanup_task); 816 tasklet_disable(&ioat_chan->cleanup_task);
811 ioat_dma_memcpy_cleanup(ioat_chan); 817 ioat_dma_memcpy_cleanup(ioat_chan);
812 818
@@ -869,6 +875,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
869 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 875 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
870 ioat_chan->pending = 0; 876 ioat_chan->pending = 0;
871 ioat_chan->dmacount = 0; 877 ioat_chan->dmacount = 0;
878 ioat_chan->desccount = 0;
872 ioat_chan->watchdog_completion = 0; 879 ioat_chan->watchdog_completion = 0;
873 ioat_chan->last_compl_desc_addr_hw = 0; 880 ioat_chan->last_compl_desc_addr_hw = 0;
874 ioat_chan->watchdog_tcp_cookie = 881 ioat_chan->watchdog_tcp_cookie =
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index e763d723e4c..9f6fe46a9b8 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
55 int nr_iovecs = 0; 55 int nr_iovecs = 0;
56 int iovec_len_used = 0; 56 int iovec_len_used = 0;
57 int iovec_pages_used = 0; 57 int iovec_pages_used = 0;
58 long err;
59 58
60 /* don't pin down non-user-based iovecs */ 59 /* don't pin down non-user-based iovecs */
61 if (segment_eq(get_fs(), KERNEL_DS)) 60 if (segment_eq(get_fs(), KERNEL_DS))
@@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
72 local_list = kmalloc(sizeof(*local_list) 71 local_list = kmalloc(sizeof(*local_list)
73 + (nr_iovecs * sizeof (struct dma_page_list)) 72 + (nr_iovecs * sizeof (struct dma_page_list))
74 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); 73 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
75 if (!local_list) { 74 if (!local_list)
76 err = -ENOMEM;
77 goto out; 75 goto out;
78 }
79 76
80 /* list of pages starts right after the page list array */ 77 /* list of pages starts right after the page list array */
81 pages = (struct page **) &local_list->page_list[nr_iovecs]; 78 pages = (struct page **) &local_list->page_list[nr_iovecs];
82 79
80 local_list->nr_iovecs = 0;
81
83 for (i = 0; i < nr_iovecs; i++) { 82 for (i = 0; i < nr_iovecs; i++) {
84 struct dma_page_list *page_list = &local_list->page_list[i]; 83 struct dma_page_list *page_list = &local_list->page_list[i];
85 84
86 len -= iov[i].iov_len; 85 len -= iov[i].iov_len;
87 86
88 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) { 87 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
89 err = -EFAULT;
90 goto unpin; 88 goto unpin;
91 }
92 89
93 page_list->nr_pages = num_pages_spanned(&iov[i]); 90 page_list->nr_pages = num_pages_spanned(&iov[i]);
94 page_list->base_address = iov[i].iov_base; 91 page_list->base_address = iov[i].iov_base;
@@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
109 NULL); 106 NULL);
110 up_read(&current->mm->mmap_sem); 107 up_read(&current->mm->mmap_sem);
111 108
112 if (ret != page_list->nr_pages) { 109 if (ret != page_list->nr_pages)
113 err = -ENOMEM;
114 goto unpin; 110 goto unpin;
115 }
116 111
117 local_list->nr_iovecs = i + 1; 112 local_list->nr_iovecs = i + 1;
118 } 113 }
@@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
122unpin: 117unpin:
123 dma_unpin_iovec_pages(local_list); 118 dma_unpin_iovec_pages(local_list);
124out: 119out:
125 return ERR_PTR(err); 120 return NULL;
126} 121}
127 122
128void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) 123void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 96f416afc3f..3ab1e9cc469 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -266,11 +266,19 @@ int drm_init(struct drm_driver *driver)
266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; 267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
268 268
269 /* Loop around setting up a DRM device for each PCI device
270 * matching our ID and device class. If we had the internal
271 * function that pci_get_subsys and pci_get_class used, we'd
272 * be able to just pass pid in instead of doing a two-stage
273 * thing.
274 */
269 pdev = NULL; 275 pdev = NULL;
270 /* pass back in pdev to account for multiple identical cards */
271 while ((pdev = 276 while ((pdev =
272 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 277 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
273 pid->subdevice, pdev)) != NULL) { 278 pid->subdevice, pdev)) != NULL) {
279 if ((pdev->class & pid->class_mask) != pid->class)
280 continue;
281
274 /* stealth mode requires a manual probe */ 282 /* stealth mode requires a manual probe */
275 pci_dev_get(pdev); 283 pci_dev_get(pdev);
276 drm_get_dev(pdev, pid, driver); 284 drm_get_dev(pdev, pid, driver);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 212a94f715b..15c8dabc3e9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -280,8 +280,6 @@ int drm_irq_uninstall(struct drm_device * dev)
280 280
281 drm_vblank_cleanup(dev); 281 drm_vblank_cleanup(dev);
282 282
283 dev->locked_tasklet_func = NULL;
284
285 return 0; 283 return 0;
286} 284}
287EXPORT_SYMBOL(drm_irq_uninstall); 285EXPORT_SYMBOL(drm_irq_uninstall);
@@ -699,81 +697,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
699 drm_vbl_send_signals(dev, crtc); 697 drm_vbl_send_signals(dev, crtc);
700} 698}
701EXPORT_SYMBOL(drm_handle_vblank); 699EXPORT_SYMBOL(drm_handle_vblank);
702
703/**
704 * Tasklet wrapper function.
705 *
706 * \param data DRM device in disguise.
707 *
708 * Attempts to grab the HW lock and calls the driver callback on success. On
709 * failure, leave the lock marked as contended so the callback can be called
710 * from drm_unlock().
711 */
712static void drm_locked_tasklet_func(unsigned long data)
713{
714 struct drm_device *dev = (struct drm_device *)data;
715 unsigned long irqflags;
716 void (*tasklet_func)(struct drm_device *);
717
718 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
719 tasklet_func = dev->locked_tasklet_func;
720 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
721
722 if (!tasklet_func ||
723 !drm_lock_take(&dev->lock,
724 DRM_KERNEL_CONTEXT)) {
725 return;
726 }
727
728 dev->lock.lock_time = jiffies;
729 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
730
731 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
732 tasklet_func = dev->locked_tasklet_func;
733 dev->locked_tasklet_func = NULL;
734 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
735
736 if (tasklet_func != NULL)
737 tasklet_func(dev);
738
739 drm_lock_free(&dev->lock,
740 DRM_KERNEL_CONTEXT);
741}
742
743/**
744 * Schedule a tasklet to call back a driver hook with the HW lock held.
745 *
746 * \param dev DRM device.
747 * \param func Driver callback.
748 *
749 * This is intended for triggering actions that require the HW lock from an
750 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
751 * completes. Note that the callback may be called from interrupt or process
752 * context, it must not make any assumptions about this. Also, the HW lock will
753 * be held with the kernel context or any client context.
754 */
755void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
756{
757 unsigned long irqflags;
758 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
759
760 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
761 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
762 return;
763
764 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
765
766 if (dev->locked_tasklet_func) {
767 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
768 return;
769 }
770
771 dev->locked_tasklet_func = func;
772
773 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
774
775 drm_tasklet.data = (unsigned long)dev;
776
777 tasklet_hi_schedule(&drm_tasklet);
778}
779EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 888159e03d2..1cfa72031f8 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -154,8 +154,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
154int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 154int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
155{ 155{
156 struct drm_lock *lock = data; 156 struct drm_lock *lock = data;
157 unsigned long irqflags;
158 void (*tasklet_func)(struct drm_device *);
159 157
160 if (lock->context == DRM_KERNEL_CONTEXT) { 158 if (lock->context == DRM_KERNEL_CONTEXT) {
161 DRM_ERROR("Process %d using kernel context %d\n", 159 DRM_ERROR("Process %d using kernel context %d\n",
@@ -163,13 +161,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
163 return -EINVAL; 161 return -EINVAL;
164 } 162 }
165 163
166 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
167 tasklet_func = dev->locked_tasklet_func;
168 dev->locked_tasklet_func = NULL;
169 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
170 if (tasklet_func != NULL)
171 tasklet_func(dev);
172
173 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 164 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
174 165
175 /* kernel_context_switch isn't used by any of the x86 drm 166 /* kernel_context_switch isn't used by any of the x86 drm
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 141e33004a7..66c96ec6667 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -92,7 +92,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
92 92
93 spin_lock_init(&dev->count_lock); 93 spin_lock_init(&dev->count_lock);
94 spin_lock_init(&dev->drw_lock); 94 spin_lock_init(&dev->drw_lock);
95 spin_lock_init(&dev->tasklet_lock);
96 spin_lock_init(&dev->lock.spinlock); 95 spin_lock_init(&dev->lock.spinlock);
97 init_timer(&dev->timer); 96 init_timer(&dev->timer);
98 mutex_init(&dev->struct_mutex); 97 mutex_init(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 256e22963ae..0d215e38606 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -154,6 +154,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
154 if (I915_NEED_GFX_HWS(dev)) 154 if (I915_NEED_GFX_HWS(dev))
155 i915_free_hws(dev); 155 i915_free_hws(dev);
156 156
157 dev_priv->sarea = NULL;
158 dev_priv->sarea_priv = NULL;
159
157 return 0; 160 return 0;
158} 161}
159 162
@@ -442,7 +445,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
442 445
443 BEGIN_LP_RING(4); 446 BEGIN_LP_RING(4);
444 OUT_RING(MI_STORE_DWORD_INDEX); 447 OUT_RING(MI_STORE_DWORD_INDEX);
445 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 448 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
446 OUT_RING(dev_priv->counter); 449 OUT_RING(dev_priv->counter);
447 OUT_RING(0); 450 OUT_RING(0);
448 ADVANCE_LP_RING(); 451 ADVANCE_LP_RING();
@@ -573,7 +576,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
573 576
574 BEGIN_LP_RING(4); 577 BEGIN_LP_RING(4);
575 OUT_RING(MI_STORE_DWORD_INDEX); 578 OUT_RING(MI_STORE_DWORD_INDEX);
576 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 579 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
577 OUT_RING(dev_priv->counter); 580 OUT_RING(dev_priv->counter);
578 OUT_RING(0); 581 OUT_RING(0);
579 ADVANCE_LP_RING(); 582 ADVANCE_LP_RING();
@@ -608,7 +611,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
608 struct drm_file *file_priv) 611 struct drm_file *file_priv)
609{ 612{
610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
611 u32 *hw_status = dev_priv->hw_status_page;
612 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
613 dev_priv->sarea_priv; 615 dev_priv->sarea_priv;
614 drm_i915_batchbuffer_t *batch = data; 616 drm_i915_batchbuffer_t *batch = data;
@@ -634,7 +636,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
634 mutex_unlock(&dev->struct_mutex); 636 mutex_unlock(&dev->struct_mutex);
635 637
636 if (sarea_priv) 638 if (sarea_priv)
637 sarea_priv->last_dispatch = (int)hw_status[5]; 639 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
638 return ret; 640 return ret;
639} 641}
640 642
@@ -642,7 +644,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
642 struct drm_file *file_priv) 644 struct drm_file *file_priv)
643{ 645{
644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
645 u32 *hw_status = dev_priv->hw_status_page;
646 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 647 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
647 dev_priv->sarea_priv; 648 dev_priv->sarea_priv;
648 drm_i915_cmdbuffer_t *cmdbuf = data; 649 drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -670,7 +671,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
670 } 671 }
671 672
672 if (sarea_priv) 673 if (sarea_priv)
673 sarea_priv->last_dispatch = (int)hw_status[5]; 674 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
674 return 0; 675 return 0;
675} 676}
676 677
@@ -849,8 +850,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
849 * be lost or delayed 850 * be lost or delayed
850 */ 851 */
851 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 852 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
852 if (pci_enable_msi(dev->pdev)) 853 pci_enable_msi(dev->pdev);
853 DRM_ERROR("failed to enable MSI\n");
854 854
855 intel_opregion_init(dev); 855 intel_opregion_init(dev);
856 856
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 572dcd0e3e0..ef1c0b8f8d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -88,13 +88,6 @@ struct mem_block {
88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
89}; 89};
90 90
91typedef struct _drm_i915_vbl_swap {
92 struct list_head head;
93 drm_drawable_t drw_id;
94 unsigned int pipe;
95 unsigned int sequence;
96} drm_i915_vbl_swap_t;
97
98struct opregion_header; 91struct opregion_header;
99struct opregion_acpi; 92struct opregion_acpi;
100struct opregion_swsci; 93struct opregion_swsci;
@@ -146,10 +139,6 @@ typedef struct drm_i915_private {
146 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 139 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
147 int vblank_pipe; 140 int vblank_pipe;
148 141
149 spinlock_t swaps_lock;
150 drm_i915_vbl_swap_t vbl_swaps;
151 unsigned int swaps_pending;
152
153 struct intel_opregion opregion; 142 struct intel_opregion opregion;
154 143
155 /* Register state */ 144 /* Register state */
@@ -157,6 +146,7 @@ typedef struct drm_i915_private {
157 u32 saveDSPACNTR; 146 u32 saveDSPACNTR;
158 u32 saveDSPBCNTR; 147 u32 saveDSPBCNTR;
159 u32 saveDSPARB; 148 u32 saveDSPARB;
149 u32 saveRENDERSTANDBY;
160 u32 savePIPEACONF; 150 u32 savePIPEACONF;
161 u32 savePIPEBCONF; 151 u32 savePIPEBCONF;
162 u32 savePIPEASRC; 152 u32 savePIPEASRC;
@@ -241,9 +231,6 @@ typedef struct drm_i915_private {
241 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
242 u8 saveCR[37]; 232 u8 saveCR[37];
243 233
244 /** Work task for vblank-related ring access */
245 struct work_struct vblank_work;
246
247 struct { 234 struct {
248 struct drm_mm gtt_space; 235 struct drm_mm gtt_space;
249 236
@@ -444,7 +431,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
444void i915_user_irq_get(struct drm_device *dev); 431void i915_user_irq_get(struct drm_device *dev);
445void i915_user_irq_put(struct drm_device *dev); 432void i915_user_irq_put(struct drm_device *dev);
446 433
447extern void i915_vblank_work_handler(struct work_struct *work);
448extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 434extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
449extern void i915_driver_irq_preinstall(struct drm_device * dev); 435extern void i915_driver_irq_preinstall(struct drm_device * dev);
450extern int i915_driver_irq_postinstall(struct drm_device *dev); 436extern int i915_driver_irq_postinstall(struct drm_device *dev);
@@ -622,8 +608,9 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
622 * The area from dword 0x20 to 0x3ff is available for driver usage. 608 * The area from dword 0x20 to 0x3ff is available for driver usage.
623 */ 609 */
624#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 610#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
625#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 611#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
626#define I915_GEM_HWS_INDEX 0x20 612#define I915_GEM_HWS_INDEX 0x20
613#define I915_BREADCRUMB_INDEX 0x21
627 614
628extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 615extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
629 616
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b0ec73fa6a9..6b4a2bd2064 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1455,11 +1455,9 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1455 read_domains, write_domain); 1455 read_domains, write_domain);
1456 1456
1457 /* Wait on any GPU rendering to the object to be flushed. */ 1457 /* Wait on any GPU rendering to the object to be flushed. */
1458 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { 1458 ret = i915_gem_object_wait_rendering(obj);
1459 ret = i915_gem_object_wait_rendering(obj); 1459 if (ret)
1460 if (ret) 1460 return ret;
1461 return ret;
1462 }
1463 1461
1464 if (obj_priv->page_cpu_valid == NULL) { 1462 if (obj_priv->page_cpu_valid == NULL) {
1465 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1463 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 26f48932a51..82752d6177a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -80,211 +80,6 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
80 return 0; 80 return 0;
81} 81}
82 82
83/**
84 * Emit blits for scheduled buffer swaps.
85 *
86 * This function will be called with the HW lock held.
87 * Because this function must grab the ring mutex (dev->struct_mutex),
88 * it can no longer run at soft irq time. We'll fix this when we do
89 * the DRI2 swap buffer work.
90 */
91static void i915_vblank_tasklet(struct drm_device *dev)
92{
93 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
94 unsigned long irqflags;
95 struct list_head *list, *tmp, hits, *hit;
96 int nhits, nrects, slice[2], upper[2], lower[2], i;
97 unsigned counter[2];
98 struct drm_drawable_info *drw;
99 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
100 u32 cpp = dev_priv->cpp;
101 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
102 XY_SRC_COPY_BLT_WRITE_ALPHA |
103 XY_SRC_COPY_BLT_WRITE_RGB)
104 : XY_SRC_COPY_BLT_CMD;
105 u32 src_pitch = sarea_priv->pitch * cpp;
106 u32 dst_pitch = sarea_priv->pitch * cpp;
107 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
108 RING_LOCALS;
109
110 mutex_lock(&dev->struct_mutex);
111
112 if (IS_I965G(dev) && sarea_priv->front_tiled) {
113 cmd |= XY_SRC_COPY_BLT_DST_TILED;
114 dst_pitch >>= 2;
115 }
116 if (IS_I965G(dev) && sarea_priv->back_tiled) {
117 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
118 src_pitch >>= 2;
119 }
120
121 counter[0] = drm_vblank_count(dev, 0);
122 counter[1] = drm_vblank_count(dev, 1);
123
124 DRM_DEBUG("\n");
125
126 INIT_LIST_HEAD(&hits);
127
128 nhits = nrects = 0;
129
130 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
131
132 /* Find buffer swaps scheduled for this vertical blank */
133 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
134 drm_i915_vbl_swap_t *vbl_swap =
135 list_entry(list, drm_i915_vbl_swap_t, head);
136 int pipe = vbl_swap->pipe;
137
138 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
139 continue;
140
141 list_del(list);
142 dev_priv->swaps_pending--;
143 drm_vblank_put(dev, pipe);
144
145 spin_unlock(&dev_priv->swaps_lock);
146 spin_lock(&dev->drw_lock);
147
148 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
149
150 list_for_each(hit, &hits) {
151 drm_i915_vbl_swap_t *swap_cmp =
152 list_entry(hit, drm_i915_vbl_swap_t, head);
153 struct drm_drawable_info *drw_cmp =
154 drm_get_drawable_info(dev, swap_cmp->drw_id);
155
156 /* Make sure both drawables are still
157 * around and have some rectangles before
158 * we look inside to order them for the
159 * blts below.
160 */
161 if (drw_cmp && drw_cmp->num_rects > 0 &&
162 drw && drw->num_rects > 0 &&
163 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
164 list_add_tail(list, hit);
165 break;
166 }
167 }
168
169 spin_unlock(&dev->drw_lock);
170
171 /* List of hits was empty, or we reached the end of it */
172 if (hit == &hits)
173 list_add_tail(list, hits.prev);
174
175 nhits++;
176
177 spin_lock(&dev_priv->swaps_lock);
178 }
179
180 if (nhits == 0) {
181 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
182 mutex_unlock(&dev->struct_mutex);
183 return;
184 }
185
186 spin_unlock(&dev_priv->swaps_lock);
187
188 i915_kernel_lost_context(dev);
189
190 if (IS_I965G(dev)) {
191 BEGIN_LP_RING(4);
192
193 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
194 OUT_RING(0);
195 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
196 OUT_RING(0);
197 ADVANCE_LP_RING();
198 } else {
199 BEGIN_LP_RING(6);
200
201 OUT_RING(GFX_OP_DRAWRECT_INFO);
202 OUT_RING(0);
203 OUT_RING(0);
204 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
205 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
206 OUT_RING(0);
207
208 ADVANCE_LP_RING();
209 }
210
211 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
212
213 upper[0] = upper[1] = 0;
214 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
215 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
216 lower[0] = sarea_priv->pipeA_y + slice[0];
217 lower[1] = sarea_priv->pipeB_y + slice[0];
218
219 spin_lock(&dev->drw_lock);
220
221 /* Emit blits for buffer swaps, partitioning both outputs into as many
222 * slices as there are buffer swaps scheduled in order to avoid tearing
223 * (based on the assumption that a single buffer swap would always
224 * complete before scanout starts).
225 */
226 for (i = 0; i++ < nhits;
227 upper[0] = lower[0], lower[0] += slice[0],
228 upper[1] = lower[1], lower[1] += slice[1]) {
229 if (i == nhits)
230 lower[0] = lower[1] = sarea_priv->height;
231
232 list_for_each(hit, &hits) {
233 drm_i915_vbl_swap_t *swap_hit =
234 list_entry(hit, drm_i915_vbl_swap_t, head);
235 struct drm_clip_rect *rect;
236 int num_rects, pipe;
237 unsigned short top, bottom;
238
239 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
240
241 /* The drawable may have been destroyed since
242 * the vblank swap was queued
243 */
244 if (!drw)
245 continue;
246
247 rect = drw->rects;
248 pipe = swap_hit->pipe;
249 top = upper[pipe];
250 bottom = lower[pipe];
251
252 for (num_rects = drw->num_rects; num_rects--; rect++) {
253 int y1 = max(rect->y1, top);
254 int y2 = min(rect->y2, bottom);
255
256 if (y1 >= y2)
257 continue;
258
259 BEGIN_LP_RING(8);
260
261 OUT_RING(cmd);
262 OUT_RING(ropcpp | dst_pitch);
263 OUT_RING((y1 << 16) | rect->x1);
264 OUT_RING((y2 << 16) | rect->x2);
265 OUT_RING(sarea_priv->front_offset);
266 OUT_RING((y1 << 16) | rect->x1);
267 OUT_RING(src_pitch);
268 OUT_RING(sarea_priv->back_offset);
269
270 ADVANCE_LP_RING();
271 }
272 }
273 }
274
275 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
276 mutex_unlock(&dev->struct_mutex);
277
278 list_for_each_safe(hit, tmp, &hits) {
279 drm_i915_vbl_swap_t *swap_hit =
280 list_entry(hit, drm_i915_vbl_swap_t, head);
281
282 list_del(hit);
283
284 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
285 }
286}
287
288/* Called from drm generic code, passed a 'crtc', which 83/* Called from drm generic code, passed a 'crtc', which
289 * we use as a pipe index 84 * we use as a pipe index
290 */ 85 */
@@ -322,40 +117,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
322 return count; 117 return count;
323} 118}
324 119
325void
326i915_vblank_work_handler(struct work_struct *work)
327{
328 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
329 vblank_work);
330 struct drm_device *dev = dev_priv->dev;
331 unsigned long irqflags;
332
333 if (dev->lock.hw_lock == NULL) {
334 i915_vblank_tasklet(dev);
335 return;
336 }
337
338 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
339 dev->locked_tasklet_func = i915_vblank_tasklet;
340 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
341
342 /* Try to get the lock now, if this fails, the lock
343 * holder will execute the tasklet during unlock
344 */
345 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
346 return;
347
348 dev->lock.lock_time = jiffies;
349 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
350
351 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
352 dev->locked_tasklet_func = NULL;
353 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
354
355 i915_vblank_tasklet(dev);
356 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
357}
358
359irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 120irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
360{ 121{
361 struct drm_device *dev = (struct drm_device *) arg; 122 struct drm_device *dev = (struct drm_device *) arg;
@@ -433,9 +194,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
433 if (iir & I915_ASLE_INTERRUPT) 194 if (iir & I915_ASLE_INTERRUPT)
434 opregion_asle_intr(dev); 195 opregion_asle_intr(dev);
435 196
436 if (vblank && dev_priv->swaps_pending > 0)
437 schedule_work(&dev_priv->vblank_work);
438
439 return IRQ_HANDLED; 197 return IRQ_HANDLED;
440} 198}
441 199
@@ -454,12 +212,10 @@ static int i915_emit_irq(struct drm_device * dev)
454 if (dev_priv->sarea_priv) 212 if (dev_priv->sarea_priv)
455 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 213 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
456 214
457 BEGIN_LP_RING(6); 215 BEGIN_LP_RING(4);
458 OUT_RING(MI_STORE_DWORD_INDEX); 216 OUT_RING(MI_STORE_DWORD_INDEX);
459 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 217 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
460 OUT_RING(dev_priv->counter); 218 OUT_RING(dev_priv->counter);
461 OUT_RING(0);
462 OUT_RING(0);
463 OUT_RING(MI_USER_INTERRUPT); 219 OUT_RING(MI_USER_INTERRUPT);
464 ADVANCE_LP_RING(); 220 ADVANCE_LP_RING();
465 221
@@ -696,123 +452,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
696int i915_vblank_swap(struct drm_device *dev, void *data, 452int i915_vblank_swap(struct drm_device *dev, void *data,
697 struct drm_file *file_priv) 453 struct drm_file *file_priv)
698{ 454{
699 drm_i915_private_t *dev_priv = dev->dev_private; 455 /* The delayed swap mechanism was fundamentally racy, and has been
700 drm_i915_vblank_swap_t *swap = data; 456 * removed. The model was that the client requested a delayed flip/swap
701 drm_i915_vbl_swap_t *vbl_swap, *vbl_old; 457 * from the kernel, then waited for vblank before continuing to perform
702 unsigned int pipe, seqtype, curseq; 458 * rendering. The problem was that the kernel might wake the client
703 unsigned long irqflags; 459 * up before it dispatched the vblank swap (since the lock has to be
704 struct list_head *list; 460 * held while touching the ringbuffer), in which case the client would
705 int ret; 461 * clear and start the next frame before the swap occurred, and
706 462 * flicker would occur in addition to likely missing the vblank.
707 if (!dev_priv || !dev_priv->sarea_priv) { 463 *
708 DRM_ERROR("%s called with no initialization\n", __func__); 464 * In the absence of this ioctl, userland falls back to a correct path
709 return -EINVAL; 465 * of waiting for a vblank, then dispatching the swap on its own.
710 } 466 * Context switching to userland and back is plenty fast enough for
711 467 * meeting the requirements of vblank swapping.
712 if (dev_priv->sarea_priv->rotation) {
713 DRM_DEBUG("Rotation not supported\n");
714 return -EINVAL;
715 }
716
717 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
718 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
719 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
720 return -EINVAL;
721 }
722
723 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
724
725 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
726
727 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
728 DRM_ERROR("Invalid pipe %d\n", pipe);
729 return -EINVAL;
730 }
731
732 spin_lock_irqsave(&dev->drw_lock, irqflags);
733
734 if (!drm_get_drawable_info(dev, swap->drawable)) {
735 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
736 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
737 return -EINVAL;
738 }
739
740 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
741
742 /*
743 * We take the ref here and put it when the swap actually completes
744 * in the tasklet.
745 */ 468 */
746 ret = drm_vblank_get(dev, pipe); 469 return -EINVAL;
747 if (ret)
748 return ret;
749 curseq = drm_vblank_count(dev, pipe);
750
751 if (seqtype == _DRM_VBLANK_RELATIVE)
752 swap->sequence += curseq;
753
754 if ((curseq - swap->sequence) <= (1<<23)) {
755 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
756 swap->sequence = curseq + 1;
757 } else {
758 DRM_DEBUG("Missed target sequence\n");
759 drm_vblank_put(dev, pipe);
760 return -EINVAL;
761 }
762 }
763
764 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
765
766 if (!vbl_swap) {
767 DRM_ERROR("Failed to allocate memory to queue swap\n");
768 drm_vblank_put(dev, pipe);
769 return -ENOMEM;
770 }
771
772 vbl_swap->drw_id = swap->drawable;
773 vbl_swap->pipe = pipe;
774 vbl_swap->sequence = swap->sequence;
775
776 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
777
778 list_for_each(list, &dev_priv->vbl_swaps.head) {
779 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
780
781 if (vbl_old->drw_id == swap->drawable &&
782 vbl_old->pipe == pipe &&
783 vbl_old->sequence == swap->sequence) {
784 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
785 drm_vblank_put(dev, pipe);
786 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
787 DRM_DEBUG("Already scheduled\n");
788 return 0;
789 }
790 }
791
792 if (dev_priv->swaps_pending >= 10) {
793 DRM_DEBUG("Too many swaps queued\n");
794 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
795 drm_vblank_count(dev, 0),
796 drm_vblank_count(dev, 1));
797
798 list_for_each(list, &dev_priv->vbl_swaps.head) {
799 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
800 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
801 vbl_old->drw_id, vbl_old->pipe,
802 vbl_old->sequence);
803 }
804 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
805 drm_vblank_put(dev, pipe);
806 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
807 return -EBUSY;
808 }
809
810 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
811 dev_priv->swaps_pending++;
812
813 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
814
815 return 0;
816} 470}
817 471
818/* drm_dma.h hooks 472/* drm_dma.h hooks
@@ -831,11 +485,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
831 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
832 int ret, num_pipes = 2; 486 int ret, num_pipes = 2;
833 487
834 spin_lock_init(&dev_priv->swaps_lock);
835 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
836 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
837 dev_priv->swaps_pending = 0;
838
839 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 488 /* Set initial unmasked IRQs to just the selected vblank pipes. */
840 dev_priv->irq_mask_reg = ~0; 489 dev_priv->irq_mask_reg = ~0;
841 490
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5c2d9f206d0..0e476eba36e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -527,6 +527,9 @@
527#define C0DRB3 0x10206 527#define C0DRB3 0x10206
528#define C1DRB3 0x10606 528#define C1DRB3 0x10606
529 529
530/** GM965 GM45 render standby register */
531#define MCHBAR_RENDER_STANDBY 0x111B8
532
530/* 533/*
531 * Overlay regs 534 * Overlay regs
532 */ 535 */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 603fe742ccd..5ddc6e595c0 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -240,6 +240,10 @@ int i915_save_state(struct drm_device *dev)
240 240
241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
242 242
243 /* Render Standby */
244 if (IS_I965G(dev) && IS_MOBILE(dev))
245 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
246
243 /* Display arbitration control */ 247 /* Display arbitration control */
244 dev_priv->saveDSPARB = I915_READ(DSPARB); 248 dev_priv->saveDSPARB = I915_READ(DSPARB);
245 249
@@ -365,6 +369,11 @@ int i915_restore_state(struct drm_device *dev)
365 369
366 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 370 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
367 371
372 /* Render Standby */
373 if (IS_I965G(dev) && IS_MOBILE(dev))
374 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
375
376 /* Display arbitration */
368 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 377 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
369 378
370 /* Pipe & plane A info */ 379 /* Pipe & plane A info */
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 073894824e6..abdc1ae3846 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1751,6 +1751,12 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1751 else 1751 else
1752 dev_priv->flags |= RADEON_IS_PCI; 1752 dev_priv->flags |= RADEON_IS_PCI;
1753 1753
1754 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1755 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1756 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
1757 if (ret != 0)
1758 return ret;
1759
1754 DRM_DEBUG("%s card detected\n", 1760 DRM_DEBUG("%s card detected\n",
1755 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1761 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1756 return ret; 1762 return ret;
@@ -1767,12 +1773,6 @@ int radeon_driver_firstopen(struct drm_device *dev)
1767 1773
1768 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 1774 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1769 1775
1770 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1771 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1772 _DRM_READ_ONLY, &dev_priv->mmio);
1773 if (ret != 0)
1774 return ret;
1775
1776 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); 1776 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1777 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 1777 ret = drm_addmap(dev, dev_priv->fb_aper_offset,
1778 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, 1778 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
@@ -1788,6 +1788,9 @@ int radeon_driver_unload(struct drm_device *dev)
1788 drm_radeon_private_t *dev_priv = dev->dev_private; 1788 drm_radeon_private_t *dev_priv = dev->dev_private;
1789 1789
1790 DRM_DEBUG("\n"); 1790 DRM_DEBUG("\n");
1791
1792 drm_rmmap(dev, dev_priv->mmio);
1793
1791 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 1794 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1792 1795
1793 dev->dev_private = NULL; 1796 dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 02f5575ba39..7a183789be9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -287,7 +287,6 @@ typedef struct drm_radeon_private {
287 unsigned long gart_textures_offset; 287 unsigned long gart_textures_offset;
288 288
289 drm_local_map_t *sarea; 289 drm_local_map_t *sarea;
290 drm_local_map_t *mmio;
291 drm_local_map_t *cp_ring; 290 drm_local_map_t *cp_ring;
292 drm_local_map_t *ring_rptr; 291 drm_local_map_t *ring_rptr;
293 drm_local_map_t *gart_textures; 292 drm_local_map_t *gart_textures;
@@ -318,6 +317,7 @@ typedef struct drm_radeon_private {
318 317
319 int num_gb_pipes; 318 int num_gb_pipes;
320 int track_flush; 319 int track_flush;
320 drm_local_map_t *mmio;
321} drm_radeon_private_t; 321} drm_radeon_private_t;
322 322
323typedef struct drm_radeon_buf_priv { 323typedef struct drm_radeon_buf_priv {
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a339afbeed3..a3f732418c4 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -706,7 +706,7 @@ tx_err:
706 mlx4_en_release_rss_steer(priv); 706 mlx4_en_release_rss_steer(priv);
707rx_err: 707rx_err:
708 for (i = 0; i < priv->rx_ring_num; i++) 708 for (i = 0; i < priv->rx_ring_num; i++)
709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]); 709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
710cq_err: 710cq_err:
711 while (rx_index--) 711 while (rx_index--)
712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fa98af58223..cd0d0873d97 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -174,8 +174,8 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
174 174
175/* EEPROM range with gPXE configuration */ 175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x100U 177#define EFX_ETHTOOL_EEPROM_MIN 0x800U
178#define EFX_ETHTOOL_EEPROM_MAX 0x400U 178#define EFX_ETHTOOL_EEPROM_MAX 0x1800U
179 179
180/************************************************************************** 180/**************************************************************************
181 * 181 *
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1164c52e2c0..8e90891f0e4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2184,19 +2184,20 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2184 struct usb_interface *interface) 2184 struct usb_interface *interface)
2185{ 2185{
2186 struct hso_net *hso_net = dev2net(hso_dev); 2186 struct hso_net *hso_net = dev2net(hso_dev);
2187 struct device *dev = hso_dev->dev; 2187 struct device *dev = &hso_net->net->dev;
2188 char *rfkn; 2188 char *rfkn;
2189 2189
2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev, 2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
2191 RFKILL_TYPE_WLAN); 2191 RFKILL_TYPE_WWAN);
2192 if (!hso_net->rfkill) { 2192 if (!hso_net->rfkill) {
2193 dev_err(dev, "%s - Out of memory", __func__); 2193 dev_err(dev, "%s - Out of memory\n", __func__);
2194 return; 2194 return;
2195 } 2195 }
2196 rfkn = kzalloc(20, GFP_KERNEL); 2196 rfkn = kzalloc(20, GFP_KERNEL);
2197 if (!rfkn) { 2197 if (!rfkn) {
2198 rfkill_free(hso_net->rfkill); 2198 rfkill_free(hso_net->rfkill);
2199 dev_err(dev, "%s - Out of memory", __func__); 2199 hso_net->rfkill = NULL;
2200 dev_err(dev, "%s - Out of memory\n", __func__);
2200 return; 2201 return;
2201 } 2202 }
2202 snprintf(rfkn, 20, "hso-%d", 2203 snprintf(rfkn, 20, "hso-%d",
@@ -2209,7 +2210,8 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2209 kfree(rfkn); 2210 kfree(rfkn);
2210 hso_net->rfkill->name = NULL; 2211 hso_net->rfkill->name = NULL;
2211 rfkill_free(hso_net->rfkill); 2212 rfkill_free(hso_net->rfkill);
2212 dev_err(dev, "%s - Failed to register rfkill", __func__); 2213 hso_net->rfkill = NULL;
2214 dev_err(dev, "%s - Failed to register rfkill\n", __func__);
2213 return; 2215 return;
2214 } 2216 }
2215} 2217}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 307b1f62d94..b1b947edcf0 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -1,10 +1,11 @@
1menu "Sonics Silicon Backplane"
2
3config SSB_POSSIBLE 1config SSB_POSSIBLE
4 bool 2 bool
5 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM && HAS_DMA
6 default y 4 default y
7 5
6menu "Sonics Silicon Backplane"
7 depends on SSB_POSSIBLE
8
8config SSB 9config SSB
9 tristate "Sonics Silicon Backplane support" 10 tristate "Sonics Silicon Backplane support"
10 depends on SSB_POSSIBLE 11 depends on SSB_POSSIBLE
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index c3b78a76f17..225398fd504 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -42,8 +42,10 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
42 42
43#ifdef CONFIG_FSL_BOOKE 43#ifdef CONFIG_FSL_BOOKE
44#define WDTP(x) ((((63-x)&0x3)<<30)|(((63-x)&0x3c)<<15)) 44#define WDTP(x) ((((63-x)&0x3)<<30)|(((63-x)&0x3c)<<15))
45#define WDTP_MASK (WDTP(0))
45#else 46#else
46#define WDTP(x) (TCR_WP(x)) 47#define WDTP(x) (TCR_WP(x))
48#define WDTP_MASK (TCR_WP_MASK)
47#endif 49#endif
48 50
49static DEFINE_SPINLOCK(booke_wdt_lock); 51static DEFINE_SPINLOCK(booke_wdt_lock);
@@ -65,6 +67,7 @@ static void __booke_wdt_enable(void *data)
65 /* clear status before enabling watchdog */ 67 /* clear status before enabling watchdog */
66 __booke_wdt_ping(NULL); 68 __booke_wdt_ping(NULL);
67 val = mfspr(SPRN_TCR); 69 val = mfspr(SPRN_TCR);
70 val &= ~WDTP_MASK;
68 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period)); 71 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
69 72
70 mtspr(SPRN_TCR, val); 73 mtspr(SPRN_TCR, val);
@@ -114,7 +117,7 @@ static long booke_wdt_ioctl(struct file *file,
114 case WDIOC_SETTIMEOUT: 117 case WDIOC_SETTIMEOUT:
115 if (get_user(booke_wdt_period, p)) 118 if (get_user(booke_wdt_period, p))
116 return -EFAULT; 119 return -EFAULT;
117 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP(0)) | 120 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
118 WDTP(booke_wdt_period)); 121 WDTP(booke_wdt_period));
119 return 0; 122 return 0;
120 case WDIOC_GETTIMEOUT: 123 case WDIOC_GETTIMEOUT: