aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
authorPaulo Zanoni <paulo.r.zanoni@intel.com>2013-07-12 18:56:30 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-19 12:08:55 -0400
commitf1af8fc10cdb75da7f07f765e9af86dec064f2a8 (patch)
tree459a79fcf9b05d4f193cfffccc152748d96944ae /drivers/gpu/drm/i915/i915_irq.c
parent23a78516081c49398b6bf08d7a40e954048426bf (diff)
drm/i915: add ILK/SNB support to ivybridge_irq_handler
And then rename it to ironlake_irq_handler. Also move ilk_gt_irq_handler up to avoid forward declarations. In the previous patches I did small modifications to both ironlake_irq_handler an ivybridge_irq_handler so they became very similar functions. Now it should be very easy to verify that all we need to add ILK/SNB support is to call ilk_gt_irq_handler, call ilk_display_irq_handler and avoid reading pm_iir on gen 5. v2: - Rebase due to changes on the previous patches - Move pm_iir to a tighter scope (Chris) - Change some Gen checks for readability Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c115
1 files changed, 32 insertions, 83 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 597a3d5ae7e1..7c201f7906eb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -844,6 +844,17 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
844 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 844 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
845} 845}
846 846
847static void ilk_gt_irq_handler(struct drm_device *dev,
848 struct drm_i915_private *dev_priv,
849 u32 gt_iir)
850{
851 if (gt_iir &
852 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
853 notify_ring(dev, &dev_priv->ring[RCS]);
854 if (gt_iir & ILK_BSD_USER_INTERRUPT)
855 notify_ring(dev, &dev_priv->ring[VCS]);
856}
857
847static void snb_gt_irq_handler(struct drm_device *dev, 858static void snb_gt_irq_handler(struct drm_device *dev,
848 struct drm_i915_private *dev_priv, 859 struct drm_i915_private *dev_priv,
849 u32 gt_iir) 860 u32 gt_iir)
@@ -1285,11 +1296,11 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1285 } 1296 }
1286} 1297}
1287 1298
1288static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1299static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1289{ 1300{
1290 struct drm_device *dev = (struct drm_device *) arg; 1301 struct drm_device *dev = (struct drm_device *) arg;
1291 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1302 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1292 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 1303 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1293 irqreturn_t ret = IRQ_NONE; 1304 irqreturn_t ret = IRQ_NONE;
1294 1305
1295 atomic_inc(&dev_priv->irq_received); 1306 atomic_inc(&dev_priv->irq_received);
@@ -1329,27 +1340,34 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1329 1340
1330 gt_iir = I915_READ(GTIIR); 1341 gt_iir = I915_READ(GTIIR);
1331 if (gt_iir) { 1342 if (gt_iir) {
1332 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1343 if (IS_GEN5(dev))
1344 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1345 else
1346 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1333 I915_WRITE(GTIIR, gt_iir); 1347 I915_WRITE(GTIIR, gt_iir);
1334 ret = IRQ_HANDLED; 1348 ret = IRQ_HANDLED;
1335 } 1349 }
1336 1350
1337 de_iir = I915_READ(DEIIR); 1351 de_iir = I915_READ(DEIIR);
1338 if (de_iir) { 1352 if (de_iir) {
1339 ivb_display_irq_handler(dev, de_iir); 1353 if (INTEL_INFO(dev)->gen >= 7)
1340 1354 ivb_display_irq_handler(dev, de_iir);
1355 else
1356 ilk_display_irq_handler(dev, de_iir);
1341 I915_WRITE(DEIIR, de_iir); 1357 I915_WRITE(DEIIR, de_iir);
1342 ret = IRQ_HANDLED; 1358 ret = IRQ_HANDLED;
1343 } 1359 }
1344 1360
1345 pm_iir = I915_READ(GEN6_PMIIR); 1361 if (INTEL_INFO(dev)->gen >= 6) {
1346 if (pm_iir) { 1362 u32 pm_iir = I915_READ(GEN6_PMIIR);
1347 if (IS_HASWELL(dev)) 1363 if (pm_iir) {
1348 hsw_pm_irq_handler(dev_priv, pm_iir); 1364 if (IS_HASWELL(dev))
1349 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1365 hsw_pm_irq_handler(dev_priv, pm_iir);
1350 gen6_rps_irq_handler(dev_priv, pm_iir); 1366 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1351 I915_WRITE(GEN6_PMIIR, pm_iir); 1367 gen6_rps_irq_handler(dev_priv, pm_iir);
1352 ret = IRQ_HANDLED; 1368 I915_WRITE(GEN6_PMIIR, pm_iir);
1369 ret = IRQ_HANDLED;
1370 }
1353 } 1371 }
1354 1372
1355 if (IS_HASWELL(dev)) { 1373 if (IS_HASWELL(dev)) {
@@ -1369,75 +1387,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1369 return ret; 1387 return ret;
1370} 1388}
1371 1389
1372static void ilk_gt_irq_handler(struct drm_device *dev,
1373 struct drm_i915_private *dev_priv,
1374 u32 gt_iir)
1375{
1376 if (gt_iir &
1377 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1378 notify_ring(dev, &dev_priv->ring[RCS]);
1379 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1380 notify_ring(dev, &dev_priv->ring[VCS]);
1381}
1382
1383static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1384{
1385 struct drm_device *dev = (struct drm_device *) arg;
1386 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1387 int ret = IRQ_NONE;
1388 u32 de_iir, gt_iir, de_ier, sde_ier;
1389
1390 atomic_inc(&dev_priv->irq_received);
1391
1392 /* disable master interrupt before clearing iir */
1393 de_ier = I915_READ(DEIER);
1394 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1395 POSTING_READ(DEIER);
1396
1397 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1398 * interrupts will will be stored on its back queue, and then we'll be
1399 * able to process them after we restore SDEIER (as soon as we restore
1400 * it, we'll get an interrupt if SDEIIR still has something to process
1401 * due to its back queue). */
1402 sde_ier = I915_READ(SDEIER);
1403 I915_WRITE(SDEIER, 0);
1404 POSTING_READ(SDEIER);
1405
1406 gt_iir = I915_READ(GTIIR);
1407 if (gt_iir) {
1408 if (IS_GEN5(dev))
1409 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1410 else
1411 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1412 I915_WRITE(GTIIR, gt_iir);
1413 ret = IRQ_HANDLED;
1414 }
1415
1416 de_iir = I915_READ(DEIIR);
1417 if (de_iir) {
1418 ilk_display_irq_handler(dev, de_iir);
1419 I915_WRITE(DEIIR, de_iir);
1420 ret = IRQ_HANDLED;
1421 }
1422
1423 if (IS_GEN6(dev)) {
1424 u32 pm_iir = I915_READ(GEN6_PMIIR);
1425 if (pm_iir) {
1426 if (pm_iir & GEN6_PM_RPS_EVENTS)
1427 gen6_rps_irq_handler(dev_priv, pm_iir);
1428 I915_WRITE(GEN6_PMIIR, pm_iir);
1429 ret = IRQ_HANDLED;
1430 }
1431 }
1432
1433 I915_WRITE(DEIER, de_ier);
1434 POSTING_READ(DEIER);
1435 I915_WRITE(SDEIER, sde_ier);
1436 POSTING_READ(SDEIER);
1437
1438 return ret;
1439}
1440
1441/** 1390/**
1442 * i915_error_work_func - do process context error handling work 1391 * i915_error_work_func - do process context error handling work
1443 * @work: work struct 1392 * @work: work struct
@@ -3118,7 +3067,7 @@ void intel_irq_init(struct drm_device *dev)
3118 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3067 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3119 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 3068 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3120 /* Share uninstall handlers with ILK/SNB */ 3069 /* Share uninstall handlers with ILK/SNB */
3121 dev->driver->irq_handler = ivybridge_irq_handler; 3070 dev->driver->irq_handler = ironlake_irq_handler;
3122 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3071 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3123 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3072 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3124 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3073 dev->driver->irq_uninstall = ironlake_irq_uninstall;