aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_drawable.c15
-rw-r--r--drivers/gpu/drm/drm_ioc32.c34
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c277
9 files changed, 221 insertions, 144 deletions
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c5..80be1cab62af 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -76,11 +76,18 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
76{ 76{
77 struct drm_draw *draw = data; 77 struct drm_draw *draw = data;
78 unsigned long irqflags; 78 unsigned long irqflags;
79 struct drm_drawable_info *info;
79 80
80 spin_lock_irqsave(&dev->drw_lock, irqflags); 81 spin_lock_irqsave(&dev->drw_lock, irqflags);
81 82
82 drm_free(drm_get_drawable_info(dev, draw->handle), 83 info = drm_get_drawable_info(dev, draw->handle);
83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 84 if (info == NULL) {
85 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
86 return -EINVAL;
87 }
88 drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect),
89 DRM_MEM_BUFS);
90 drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
84 91
85 idr_remove(&dev->drw_idr, draw->handle); 92 idr_remove(&dev->drw_idr, draw->handle);
86 93
@@ -111,7 +118,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
111 118
112 switch (update->type) { 119 switch (update->type) {
113 case DRM_DRAWABLE_CLIPRECTS: 120 case DRM_DRAWABLE_CLIPRECTS:
114 if (update->num != info->num_rects) { 121 if (update->num == 0)
122 rects = NULL;
123 else if (update->num != info->num_rects) {
115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 124 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
116 DRM_MEM_BUFS); 125 DRM_MEM_BUFS);
117 } else 126 } else
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdcb..920b72fbc958 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -64,6 +64,8 @@
64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) 64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) 65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
66 66
67#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
68
67#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) 69#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
68 70
69typedef struct drm_version_32 { 71typedef struct drm_version_32 {
@@ -952,6 +954,37 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
952 DRM_IOCTL_SG_FREE, (unsigned long)request); 954 DRM_IOCTL_SG_FREE, (unsigned long)request);
953} 955}
954 956
957typedef struct drm_update_draw32 {
958 drm_drawable_t handle;
959 unsigned int type;
960 unsigned int num;
961 /* 64-bit version has a 32-bit pad here */
962 u64 data; /**< Pointer */
963} __attribute__((packed)) drm_update_draw32_t;
964
965static int compat_drm_update_draw(struct file *file, unsigned int cmd,
966 unsigned long arg)
967{
968 drm_update_draw32_t update32;
969 struct drm_update_draw __user *request;
970 int err;
971
972 if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
973 return -EFAULT;
974
975 request = compat_alloc_user_space(sizeof(*request));
976 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
977 __put_user(update32.handle, &request->handle) ||
978 __put_user(update32.type, &request->type) ||
979 __put_user(update32.num, &request->num) ||
980 __put_user(update32.data, &request->data))
981 return -EFAULT;
982
983 err = drm_ioctl(file->f_path.dentry->d_inode, file,
984 DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
985 return err;
986}
987
955struct drm_wait_vblank_request32 { 988struct drm_wait_vblank_request32 {
956 enum drm_vblank_seq_type type; 989 enum drm_vblank_seq_type type;
957 unsigned int sequence; 990 unsigned int sequence;
@@ -1033,6 +1066,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
1033#endif 1066#endif
1034 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, 1067 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
1035 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, 1068 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
1069 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
1036 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, 1070 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1037}; 1071};
1038 1072
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 4091b9e291f9..212a94f715b2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -594,11 +594,14 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
594 goto done; 594 goto done;
595 } 595 }
596 596
597 /* Get a refcount on the vblank, which will be released by
598 * drm_vbl_send_signals().
599 */
597 ret = drm_vblank_get(dev, crtc); 600 ret = drm_vblank_get(dev, crtc);
598 if (ret) { 601 if (ret) {
599 drm_free(vbl_sig, sizeof(struct drm_vbl_sig), 602 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
600 DRM_MEM_DRIVER); 603 DRM_MEM_DRIVER);
601 return ret; 604 goto done;
602 } 605 }
603 606
604 atomic_inc(&dev->vbl_signal_pending); 607 atomic_inc(&dev->vbl_signal_pending);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index a4caf95485d7..888159e03d26 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -232,6 +232,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
232 } 232 }
233 return 0; 233 return 0;
234} 234}
235EXPORT_SYMBOL(drm_lock_take);
235 236
236/** 237/**
237 * This takes a lock forcibly and hands it to context. Should ONLY be used 238 * This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -299,6 +300,7 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
299 wake_up_interruptible(&lock_data->lock_queue); 300 wake_up_interruptible(&lock_data->lock_queue);
300 return 0; 301 return 0;
301} 302}
303EXPORT_SYMBOL(drm_lock_free);
302 304
303/** 305/**
304 * If we get here, it means that the process has called DRM_IOCTL_LOCK 306 * If we get here, it means that the process has called DRM_IOCTL_LOCK
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index db34780edbb2..01de536e0211 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -844,8 +844,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
844 * correctly in testing on 945G. 844 * correctly in testing on 945G.
845 * This may be a side effect of MSI having been made available for PEG 845 * This may be a side effect of MSI having been made available for PEG
846 * and the registers being closely associated. 846 * and the registers being closely associated.
847 *
848 * According to chipset errata, on the 965GM, MSI interrupts may
849 * be lost or delayed
847 */ 850 */
848 if (!IS_I945G(dev) && !IS_I945GM(dev)) 851 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
849 if (pci_enable_msi(dev->pdev)) 852 if (pci_enable_msi(dev->pdev))
850 DRM_ERROR("failed to enable MSI\n"); 853 DRM_ERROR("failed to enable MSI\n");
851 854
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eae4ed3956e0..f20ffe17df71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -90,7 +90,7 @@ struct mem_block {
90typedef struct _drm_i915_vbl_swap { 90typedef struct _drm_i915_vbl_swap {
91 struct list_head head; 91 struct list_head head;
92 drm_drawable_t drw_id; 92 drm_drawable_t drw_id;
93 unsigned int plane; 93 unsigned int pipe;
94 unsigned int sequence; 94 unsigned int sequence;
95} drm_i915_vbl_swap_t; 95} drm_i915_vbl_swap_t;
96 96
@@ -240,6 +240,9 @@ typedef struct drm_i915_private {
240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
241 u8 saveCR[37]; 241 u8 saveCR[37];
242 242
243 /** Work task for vblank-related ring access */
244 struct work_struct vblank_work;
245
243 struct { 246 struct {
244 struct drm_mm gtt_space; 247 struct drm_mm gtt_space;
245 248
@@ -285,9 +288,6 @@ typedef struct drm_i915_private {
285 */ 288 */
286 struct delayed_work retire_work; 289 struct delayed_work retire_work;
287 290
288 /** Work task for vblank-related ring access */
289 struct work_struct vblank_work;
290
291 uint32_t next_gem_seqno; 291 uint32_t next_gem_seqno;
292 292
293 /** 293 /**
@@ -441,7 +441,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
441void i915_user_irq_get(struct drm_device *dev); 441void i915_user_irq_get(struct drm_device *dev);
442void i915_user_irq_put(struct drm_device *dev); 442void i915_user_irq_put(struct drm_device *dev);
443 443
444extern void i915_gem_vblank_work_handler(struct work_struct *work); 444extern void i915_vblank_work_handler(struct work_struct *work);
445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
446extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern void i915_driver_irq_preinstall(struct drm_device * dev);
447extern int i915_driver_irq_postinstall(struct drm_device *dev); 447extern int i915_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc2e6fdb6ca3..17ae330ff269 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2564,8 +2564,6 @@ i915_gem_load(struct drm_device *dev)
2564 INIT_LIST_HEAD(&dev_priv->mm.request_list); 2564 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2566 i915_gem_retire_work_handler); 2566 i915_gem_retire_work_handler);
2567 INIT_WORK(&dev_priv->mm.vblank_work,
2568 i915_gem_vblank_work_handler);
2569 dev_priv->mm.next_gem_seqno = 1; 2567 dev_priv->mm.next_gem_seqno = 1;
2570 2568
2571 i915_gem_detect_bit_6_swizzle(dev); 2569 i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 15d4160415b0..93de15b4c9a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -192,7 +192,12 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
192 192
193 *start = &buf[offset]; 193 *start = &buf[offset];
194 *eof = 0; 194 *eof = 0;
195 DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); 195 if (dev_priv->hw_status_page != NULL) {
196 DRM_PROC_PRINT("Current sequence: %d\n",
197 i915_get_gem_seqno(dev));
198 } else {
199 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
200 }
196 DRM_PROC_PRINT("Waiter sequence: %d\n", 201 DRM_PROC_PRINT("Waiter sequence: %d\n",
197 dev_priv->mm.waiting_gem_seqno); 202 dev_priv->mm.waiting_gem_seqno);
198 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 203 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
@@ -230,8 +235,12 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
230 I915_READ(PIPEBSTAT)); 235 I915_READ(PIPEBSTAT));
231 DRM_PROC_PRINT("Interrupts received: %d\n", 236 DRM_PROC_PRINT("Interrupts received: %d\n",
232 atomic_read(&dev_priv->irq_received)); 237 atomic_read(&dev_priv->irq_received));
233 DRM_PROC_PRINT("Current sequence: %d\n", 238 if (dev_priv->hw_status_page != NULL) {
234 i915_get_gem_seqno(dev)); 239 DRM_PROC_PRINT("Current sequence: %d\n",
240 i915_get_gem_seqno(dev));
241 } else {
242 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
243 }
235 DRM_PROC_PRINT("Waiter sequence: %d\n", 244 DRM_PROC_PRINT("Waiter sequence: %d\n",
236 dev_priv->mm.waiting_gem_seqno); 245 dev_priv->mm.waiting_gem_seqno);
237 DRM_PROC_PRINT("IRQ sequence: %d\n", 246 DRM_PROC_PRINT("IRQ sequence: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index baae511c785b..26f48932a51e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,43 +60,6 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
60} 60}
61 61
62/** 62/**
63 * i915_get_pipe - return the the pipe associated with a given plane
64 * @dev: DRM device
65 * @plane: plane to look for
66 *
67 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
68 * rather than a pipe number, since they may not always be equal. This routine
69 * maps the given @plane back to a pipe number.
70 */
71static int
72i915_get_pipe(struct drm_device *dev, int plane)
73{
74 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
75 u32 dspcntr;
76
77 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
78
79 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
80}
81
82/**
83 * i915_get_plane - return the the plane associated with a given pipe
84 * @dev: DRM device
85 * @pipe: pipe to look for
86 *
87 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
88 * rather than a plane number, since they may not always be equal. This routine
89 * maps the given @pipe back to a plane number.
90 */
91static int
92i915_get_plane(struct drm_device *dev, int pipe)
93{
94 if (i915_get_pipe(dev, 0) == pipe)
95 return 0;
96 return 1;
97}
98
99/**
100 * i915_pipe_enabled - check if a pipe is enabled 63 * i915_pipe_enabled - check if a pipe is enabled
101 * @dev: DRM device 64 * @dev: DRM device
102 * @pipe: pipe to check 65 * @pipe: pipe to check
@@ -121,6 +84,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
121 * Emit blits for scheduled buffer swaps. 84 * Emit blits for scheduled buffer swaps.
122 * 85 *
123 * This function will be called with the HW lock held. 86 * This function will be called with the HW lock held.
87 * Because this function must grab the ring mutex (dev->struct_mutex),
88 * it can no longer run at soft irq time. We'll fix this when we do
89 * the DRI2 swap buffer work.
124 */ 90 */
125static void i915_vblank_tasklet(struct drm_device *dev) 91static void i915_vblank_tasklet(struct drm_device *dev)
126{ 92{
@@ -141,6 +107,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
141 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); 107 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
142 RING_LOCALS; 108 RING_LOCALS;
143 109
110 mutex_lock(&dev->struct_mutex);
111
144 if (IS_I965G(dev) && sarea_priv->front_tiled) { 112 if (IS_I965G(dev) && sarea_priv->front_tiled) {
145 cmd |= XY_SRC_COPY_BLT_DST_TILED; 113 cmd |= XY_SRC_COPY_BLT_DST_TILED;
146 dst_pitch >>= 2; 114 dst_pitch >>= 2;
@@ -165,7 +133,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 133 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
166 drm_i915_vbl_swap_t *vbl_swap = 134 drm_i915_vbl_swap_t *vbl_swap =
167 list_entry(list, drm_i915_vbl_swap_t, head); 135 list_entry(list, drm_i915_vbl_swap_t, head);
168 int pipe = i915_get_pipe(dev, vbl_swap->plane); 136 int pipe = vbl_swap->pipe;
169 137
170 if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 138 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
171 continue; 139 continue;
@@ -179,20 +147,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)
179 147
180 drw = drm_get_drawable_info(dev, vbl_swap->drw_id); 148 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
181 149
182 if (!drw) {
183 spin_unlock(&dev->drw_lock);
184 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
185 spin_lock(&dev_priv->swaps_lock);
186 continue;
187 }
188
189 list_for_each(hit, &hits) { 150 list_for_each(hit, &hits) {
190 drm_i915_vbl_swap_t *swap_cmp = 151 drm_i915_vbl_swap_t *swap_cmp =
191 list_entry(hit, drm_i915_vbl_swap_t, head); 152 list_entry(hit, drm_i915_vbl_swap_t, head);
192 struct drm_drawable_info *drw_cmp = 153 struct drm_drawable_info *drw_cmp =
193 drm_get_drawable_info(dev, swap_cmp->drw_id); 154 drm_get_drawable_info(dev, swap_cmp->drw_id);
194 155
195 if (drw_cmp && 156 /* Make sure both drawables are still
157 * around and have some rectangles before
158 * we look inside to order them for the
159 * blts below.
160 */
161 if (drw_cmp && drw_cmp->num_rects > 0 &&
162 drw && drw->num_rects > 0 &&
196 drw_cmp->rects[0].y1 > drw->rects[0].y1) { 163 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
197 list_add_tail(list, hit); 164 list_add_tail(list, hit);
198 break; 165 break;
@@ -212,6 +179,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
212 179
213 if (nhits == 0) { 180 if (nhits == 0) {
214 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 181 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
182 mutex_unlock(&dev->struct_mutex);
215 return; 183 return;
216 } 184 }
217 185
@@ -265,18 +233,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
265 drm_i915_vbl_swap_t *swap_hit = 233 drm_i915_vbl_swap_t *swap_hit =
266 list_entry(hit, drm_i915_vbl_swap_t, head); 234 list_entry(hit, drm_i915_vbl_swap_t, head);
267 struct drm_clip_rect *rect; 235 struct drm_clip_rect *rect;
268 int num_rects, plane; 236 int num_rects, pipe;
269 unsigned short top, bottom; 237 unsigned short top, bottom;
270 238
271 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 239 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
272 240
241 /* The drawable may have been destroyed since
242 * the vblank swap was queued
243 */
273 if (!drw) 244 if (!drw)
274 continue; 245 continue;
275 246
276 rect = drw->rects; 247 rect = drw->rects;
277 plane = swap_hit->plane; 248 pipe = swap_hit->pipe;
278 top = upper[plane]; 249 top = upper[pipe];
279 bottom = lower[plane]; 250 bottom = lower[pipe];
280 251
281 for (num_rects = drw->num_rects; num_rects--; rect++) { 252 for (num_rects = drw->num_rects; num_rects--; rect++) {
282 int y1 = max(rect->y1, top); 253 int y1 = max(rect->y1, top);
@@ -302,6 +273,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
302 } 273 }
303 274
304 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 275 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
276 mutex_unlock(&dev->struct_mutex);
305 277
306 list_for_each_safe(hit, tmp, &hits) { 278 list_for_each_safe(hit, tmp, &hits) {
307 drm_i915_vbl_swap_t *swap_hit = 279 drm_i915_vbl_swap_t *swap_hit =
@@ -313,15 +285,16 @@ static void i915_vblank_tasklet(struct drm_device *dev)
313 } 285 }
314} 286}
315 287
316u32 i915_get_vblank_counter(struct drm_device *dev, int plane) 288/* Called from drm generic code, passed a 'crtc', which
289 * we use as a pipe index
290 */
291u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
317{ 292{
318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
319 unsigned long high_frame; 294 unsigned long high_frame;
320 unsigned long low_frame; 295 unsigned long low_frame;
321 u32 high1, high2, low, count; 296 u32 high1, high2, low, count;
322 int pipe;
323 297
324 pipe = i915_get_pipe(dev, plane);
325 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 298 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
326 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 299 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
327 300
@@ -350,18 +323,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
350} 323}
351 324
352void 325void
353i915_gem_vblank_work_handler(struct work_struct *work) 326i915_vblank_work_handler(struct work_struct *work)
354{ 327{
355 drm_i915_private_t *dev_priv; 328 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
356 struct drm_device *dev; 329 vblank_work);
330 struct drm_device *dev = dev_priv->dev;
331 unsigned long irqflags;
357 332
358 dev_priv = container_of(work, drm_i915_private_t, 333 if (dev->lock.hw_lock == NULL) {
359 mm.vblank_work); 334 i915_vblank_tasklet(dev);
360 dev = dev_priv->dev; 335 return;
336 }
337
338 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
339 dev->locked_tasklet_func = i915_vblank_tasklet;
340 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
341
342 /* Try to get the lock now, if this fails, the lock
343 * holder will execute the tasklet during unlock
344 */
345 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
346 return;
347
348 dev->lock.lock_time = jiffies;
349 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
350
351 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
352 dev->locked_tasklet_func = NULL;
353 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
361 354
362 mutex_lock(&dev->struct_mutex);
363 i915_vblank_tasklet(dev); 355 i915_vblank_tasklet(dev);
364 mutex_unlock(&dev->struct_mutex); 356 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
365} 357}
366 358
367irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 359irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -398,7 +390,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
398 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 390 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
399 PIPE_VBLANK_INTERRUPT_STATUS)) { 391 PIPE_VBLANK_INTERRUPT_STATUS)) {
400 vblank++; 392 vblank++;
401 drm_handle_vblank(dev, i915_get_plane(dev, 0)); 393 drm_handle_vblank(dev, 0);
402 } 394 }
403 395
404 I915_WRITE(PIPEASTAT, pipea_stats); 396 I915_WRITE(PIPEASTAT, pipea_stats);
@@ -416,7 +408,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
416 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 408 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
417 PIPE_VBLANK_INTERRUPT_STATUS)) { 409 PIPE_VBLANK_INTERRUPT_STATUS)) {
418 vblank++; 410 vblank++;
419 drm_handle_vblank(dev, i915_get_plane(dev, 1)); 411 drm_handle_vblank(dev, 1);
420 } 412 }
421 413
422 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) 414 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
@@ -441,12 +433,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
441 if (iir & I915_ASLE_INTERRUPT) 433 if (iir & I915_ASLE_INTERRUPT)
442 opregion_asle_intr(dev); 434 opregion_asle_intr(dev);
443 435
444 if (vblank && dev_priv->swaps_pending > 0) { 436 if (vblank && dev_priv->swaps_pending > 0)
445 if (dev_priv->ring.ring_obj == NULL) 437 schedule_work(&dev_priv->vblank_work);
446 drm_locked_tasklet(dev, i915_vblank_tasklet);
447 else
448 schedule_work(&dev_priv->mm.vblank_work);
449 }
450 438
451 return IRQ_HANDLED; 439 return IRQ_HANDLED;
452} 440}
@@ -481,22 +469,24 @@ static int i915_emit_irq(struct drm_device * dev)
481void i915_user_irq_get(struct drm_device *dev) 469void i915_user_irq_get(struct drm_device *dev)
482{ 470{
483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 471 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
472 unsigned long irqflags;
484 473
485 spin_lock(&dev_priv->user_irq_lock); 474 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
486 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 475 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
487 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 476 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
488 spin_unlock(&dev_priv->user_irq_lock); 477 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
489} 478}
490 479
491void i915_user_irq_put(struct drm_device *dev) 480void i915_user_irq_put(struct drm_device *dev)
492{ 481{
493 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 482 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
483 unsigned long irqflags;
494 484
495 spin_lock(&dev_priv->user_irq_lock); 485 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
496 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 486 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
497 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 487 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
498 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 488 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
499 spin_unlock(&dev_priv->user_irq_lock); 489 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
500} 490}
501 491
502static int i915_wait_irq(struct drm_device * dev, int irq_nr) 492static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -578,74 +568,95 @@ int i915_irq_wait(struct drm_device *dev, void *data,
578 return i915_wait_irq(dev, irqwait->irq_seq); 568 return i915_wait_irq(dev, irqwait->irq_seq);
579} 569}
580 570
581int i915_enable_vblank(struct drm_device *dev, int plane) 571/* Called from drm generic code, passed 'crtc' which
572 * we use as a pipe index
573 */
574int i915_enable_vblank(struct drm_device *dev, int pipe)
582{ 575{
583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 576 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584 int pipe = i915_get_pipe(dev, plane);
585 u32 pipestat_reg = 0; 577 u32 pipestat_reg = 0;
586 u32 pipestat; 578 u32 pipestat;
579 u32 interrupt = 0;
580 unsigned long irqflags;
587 581
588 switch (pipe) { 582 switch (pipe) {
589 case 0: 583 case 0:
590 pipestat_reg = PIPEASTAT; 584 pipestat_reg = PIPEASTAT;
591 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 585 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
592 break; 586 break;
593 case 1: 587 case 1:
594 pipestat_reg = PIPEBSTAT; 588 pipestat_reg = PIPEBSTAT;
595 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 589 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
596 break; 590 break;
597 default: 591 default:
598 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", 592 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
599 pipe); 593 pipe);
600 break; 594 return 0;
601 } 595 }
602 596
603 if (pipestat_reg) { 597 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
604 pipestat = I915_READ(pipestat_reg); 598 /* Enabling vblank events in IMR comes before PIPESTAT write, or
605 if (IS_I965G(dev)) 599 * there's a race where the PIPESTAT vblank bit gets set to 1, so
606 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; 600 * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
607 else 601 * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
608 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; 602 * IMR masks it. It doesn't ever get set after we clear the masking
609 /* Clear any stale interrupt status */ 603 * in IMR because the ISR bit is edge, not level-triggered, on the
610 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 604 * OR of PIPESTAT bits.
611 PIPE_VBLANK_INTERRUPT_STATUS); 605 */
612 I915_WRITE(pipestat_reg, pipestat); 606 i915_enable_irq(dev_priv, interrupt);
613 } 607 pipestat = I915_READ(pipestat_reg);
608 if (IS_I965G(dev))
609 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
610 else
611 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
612 /* Clear any stale interrupt status */
613 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
614 PIPE_VBLANK_INTERRUPT_STATUS);
615 I915_WRITE(pipestat_reg, pipestat);
616 (void) I915_READ(pipestat_reg); /* Posting read */
617 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
614 618
615 return 0; 619 return 0;
616} 620}
617 621
618void i915_disable_vblank(struct drm_device *dev, int plane) 622/* Called from drm generic code, passed 'crtc' which
623 * we use as a pipe index
624 */
625void i915_disable_vblank(struct drm_device *dev, int pipe)
619{ 626{
620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621 int pipe = i915_get_pipe(dev, plane);
622 u32 pipestat_reg = 0; 628 u32 pipestat_reg = 0;
623 u32 pipestat; 629 u32 pipestat;
630 u32 interrupt = 0;
631 unsigned long irqflags;
624 632
625 switch (pipe) { 633 switch (pipe) {
626 case 0: 634 case 0:
627 pipestat_reg = PIPEASTAT; 635 pipestat_reg = PIPEASTAT;
628 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 636 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
629 break; 637 break;
630 case 1: 638 case 1:
631 pipestat_reg = PIPEBSTAT; 639 pipestat_reg = PIPEBSTAT;
632 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 640 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
633 break; 641 break;
634 default: 642 default:
635 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", 643 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
636 pipe); 644 pipe);
645 return;
637 break; 646 break;
638 } 647 }
639 648
640 if (pipestat_reg) { 649 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
641 pipestat = I915_READ(pipestat_reg); 650 i915_disable_irq(dev_priv, interrupt);
642 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 651 pipestat = I915_READ(pipestat_reg);
643 PIPE_VBLANK_INTERRUPT_ENABLE); 652 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
644 /* Clear any stale interrupt status */ 653 PIPE_VBLANK_INTERRUPT_ENABLE);
645 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 654 /* Clear any stale interrupt status */
646 PIPE_VBLANK_INTERRUPT_STATUS); 655 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
647 I915_WRITE(pipestat_reg, pipestat); 656 PIPE_VBLANK_INTERRUPT_STATUS);
648 } 657 I915_WRITE(pipestat_reg, pipestat);
658 (void) I915_READ(pipestat_reg); /* Posting read */
659 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
649} 660}
650 661
651/* Set the vblank monitor pipe 662/* Set the vblank monitor pipe
@@ -687,8 +698,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
687{ 698{
688 drm_i915_private_t *dev_priv = dev->dev_private; 699 drm_i915_private_t *dev_priv = dev->dev_private;
689 drm_i915_vblank_swap_t *swap = data; 700 drm_i915_vblank_swap_t *swap = data;
690 drm_i915_vbl_swap_t *vbl_swap; 701 drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
691 unsigned int pipe, seqtype, curseq, plane; 702 unsigned int pipe, seqtype, curseq;
692 unsigned long irqflags; 703 unsigned long irqflags;
693 struct list_head *list; 704 struct list_head *list;
694 int ret; 705 int ret;
@@ -709,8 +720,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
709 return -EINVAL; 720 return -EINVAL;
710 } 721 }
711 722
712 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 723 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
713 pipe = i915_get_pipe(dev, plane);
714 724
715 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 725 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
716 726
@@ -751,44 +761,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
751 } 761 }
752 } 762 }
753 763
764 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
765
766 if (!vbl_swap) {
767 DRM_ERROR("Failed to allocate memory to queue swap\n");
768 drm_vblank_put(dev, pipe);
769 return -ENOMEM;
770 }
771
772 vbl_swap->drw_id = swap->drawable;
773 vbl_swap->pipe = pipe;
774 vbl_swap->sequence = swap->sequence;
775
754 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 776 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
755 777
756 list_for_each(list, &dev_priv->vbl_swaps.head) { 778 list_for_each(list, &dev_priv->vbl_swaps.head) {
757 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 779 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
758 780
759 if (vbl_swap->drw_id == swap->drawable && 781 if (vbl_old->drw_id == swap->drawable &&
760 vbl_swap->plane == plane && 782 vbl_old->pipe == pipe &&
761 vbl_swap->sequence == swap->sequence) { 783 vbl_old->sequence == swap->sequence) {
762 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 784 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
785 drm_vblank_put(dev, pipe);
786 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
763 DRM_DEBUG("Already scheduled\n"); 787 DRM_DEBUG("Already scheduled\n");
764 return 0; 788 return 0;
765 } 789 }
766 } 790 }
767 791
768 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 792 if (dev_priv->swaps_pending >= 10) {
769
770 if (dev_priv->swaps_pending >= 100) {
771 DRM_DEBUG("Too many swaps queued\n"); 793 DRM_DEBUG("Too many swaps queued\n");
794 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
795 drm_vblank_count(dev, 0),
796 drm_vblank_count(dev, 1));
797
798 list_for_each(list, &dev_priv->vbl_swaps.head) {
799 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
800 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
801 vbl_old->drw_id, vbl_old->pipe,
802 vbl_old->sequence);
803 }
804 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
772 drm_vblank_put(dev, pipe); 805 drm_vblank_put(dev, pipe);
806 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
773 return -EBUSY; 807 return -EBUSY;
774 } 808 }
775 809
776 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
777
778 if (!vbl_swap) {
779 DRM_ERROR("Failed to allocate memory to queue swap\n");
780 drm_vblank_put(dev, pipe);
781 return -ENOMEM;
782 }
783
784 DRM_DEBUG("\n");
785
786 vbl_swap->drw_id = swap->drawable;
787 vbl_swap->plane = plane;
788 vbl_swap->sequence = swap->sequence;
789
790 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
791
792 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); 810 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
793 dev_priv->swaps_pending++; 811 dev_priv->swaps_pending++;
794 812
@@ -815,6 +833,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
815 833
816 spin_lock_init(&dev_priv->swaps_lock); 834 spin_lock_init(&dev_priv->swaps_lock);
817 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 835 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
836 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
818 dev_priv->swaps_pending = 0; 837 dev_priv->swaps_pending = 0;
819 838
820 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 839 /* Set initial unmasked IRQs to just the selected vblank pipes. */