aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-05-13 21:13:24 -0400
committerDave Airlie <airlied@redhat.com>2013-05-16 21:45:44 -0400
commita6ac1bc341e499ad5296f265dfa8eba5afbf4191 (patch)
tree6fb02643c90909b8167d29a7e8d2efa829091d90
parent95643359f8b31e74b35901f4e36cd069cd67fd48 (diff)
drm/qxl: fix ioport interactions for kernel submitted commands.
So qxl has ioports, but it really really really doesn't want you to write to them twice, but if you write and get a signal before the irq arrives to let you know its completed, you have to think ahead and avoid writing another time. However this works fine for update area where really multiple writes aren't the end of the world, however with create primary surface, you can't ever do multiple writes. So this stop internal kernel writes from doing interruptible waits, because otherwise we have no idea if this write is a new one or a continuation of a previous one. virtual hw sucks more than real hw. This fixes lockups and VM crashes when resizing and starting/stopping X. Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 08b0823c93d5..f86771481317 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -277,7 +277,7 @@ out_unref:
277 return 0; 277 return 0;
278} 278}
279 279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) 280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
281{ 281{
282 int irq_num; 282 int irq_num;
283 long addr = qdev->io_base + port; 283 long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
285 285
286 mutex_lock(&qdev->async_io_mutex); 286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd); 287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288
289
290 if (qdev->last_sent_io_cmd > irq_num) { 288 if (qdev->last_sent_io_cmd > irq_num) {
291 ret = wait_event_interruptible(qdev->io_cmd_event, 289 if (intr)
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 290 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
293 if (ret) 291 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
292 else
293 ret = wait_event_timeout(qdev->io_cmd_event,
294 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
295 /* 0 is timeout, just bail the "hw" has gone away */
296 if (ret <= 0)
294 goto out; 297 goto out;
295 irq_num = atomic_read(&qdev->irq_received_io_cmd); 298 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 } 299 }
297 outb(val, addr); 300 outb(val, addr);
298 qdev->last_sent_io_cmd = irq_num + 1; 301 qdev->last_sent_io_cmd = irq_num + 1;
299 ret = wait_event_interruptible(qdev->io_cmd_event, 302 if (intr)
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 303 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
304 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 else
306 ret = wait_event_timeout(qdev->io_cmd_event,
307 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301out: 308out:
309 if (ret > 0)
310 ret = 0;
302 mutex_unlock(&qdev->async_io_mutex); 311 mutex_unlock(&qdev->async_io_mutex);
303 return ret; 312 return ret;
304} 313}
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
308 int ret; 317 int ret;
309 318
310restart: 319restart:
311 ret = wait_for_io_cmd_user(qdev, val, port); 320 ret = wait_for_io_cmd_user(qdev, val, port, false);
312 if (ret == -ERESTARTSYS) 321 if (ret == -ERESTARTSYS)
313 goto restart; 322 goto restart;
314} 323}
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
340 mutex_lock(&qdev->update_area_mutex); 349 mutex_lock(&qdev->update_area_mutex);
341 qdev->ram_header->update_area = *area; 350 qdev->ram_header->update_area = *area;
342 qdev->ram_header->update_surface = surface_id; 351 qdev->ram_header->update_surface = surface_id;
343 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); 352 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
344 mutex_unlock(&qdev->update_area_mutex); 353 mutex_unlock(&qdev->update_area_mutex);
345 return ret; 354 return ret;
346} 355}