aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-03-20 19:27:07 -0400
committerDave Airlie <airlied@redhat.com>2014-03-20 19:27:07 -0400
commit55004938f445d87b9ddcc29491e7abba1b173be3 (patch)
tree7884a1f5574beab8a6cfafa8cd48af25a2a020c6
parente84c20aff1ce7493bce26b75f1db363bb3f05979 (diff)
parent75144097014d1bca861b403e7e2093549114d0c9 (diff)
Merge branch 'gma500-next' of git://github.com/patjak/drm-gma500 into drm-next
Summary of what's included: - SGX MMU support - SGX IRQ handling (Page faults and blitter fences) - Minor Cedarview and Poulsbo unification - Work queue for ASLE interrupt work - Various cleanups, style fixes and removal of dead code * 'gma500-next' of git://github.com/patjak/drm-gma500: drm/gma500: remove stub .open/postclose drm/gma500: Code cleanup - inline documentation drm/gma500: Code cleanup - style fixes drm/gma500: Code cleanup - removal of centralized exiting of function drm/gma500/cdv: Cedarview display cleanups drm/gma500: Unify encoder mode fixup drm/gma500: Unify _get_core_freq for cdv and psb drm/gma500: Move asle interrupt work into a work task drm/gma500: Remove dead code drm/gma500: Add backing type and base align to psb_gem_create() drm/gma500: Remove unused ioctls drm/gma500: Always trap MMU page faults drm/gma500: Hook up the MMU drm/gma500: Add first piece of blitter code drm/gma500: Give MMU code it's own header file drm/gma500: Add support for SGX interrupts drm/gma500: Make SGX MMU driver actually do something
-rw-r--r--drivers/gpu/drm/gma500/Makefile2
-rw-r--r--drivers/gpu/drm/gma500/blitter.c51
-rw-r--r--drivers/gpu/drm/gma500/blitter.h22
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c40
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c71
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c56
-rw-r--r--drivers/gpu/drm/gma500/gem.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c56
-rw-r--r--drivers/gpu/drm/gma500/gma_device.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c7
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/gma500/gtt.c45
-rw-r--r--drivers/gpu/drm/gma500/gtt.h3
-rw-r--r--drivers/gpu/drm/gma500/mmu.c297
-rw-r--r--drivers/gpu/drm/gma500/mmu.h93
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c9
-rw-r--r--drivers/gpu/drm/gma500/opregion.c25
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c42
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c404
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h203
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c81
-rw-r--r--include/drm/gma_drm.h70
27 files changed, 722 insertions, 952 deletions
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index e9064dd9045d..b15315576376 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -13,9 +13,11 @@ gma500_gfx-y += \
13 intel_i2c.o \ 13 intel_i2c.o \
14 intel_gmbus.o \ 14 intel_gmbus.o \
15 mmu.o \ 15 mmu.o \
16 blitter.o \
16 power.o \ 17 power.o \
17 psb_drv.o \ 18 psb_drv.o \
18 gma_display.o \ 19 gma_display.o \
20 gma_device.o \
19 psb_intel_display.o \ 21 psb_intel_display.o \
20 psb_intel_lvds.o \ 22 psb_intel_lvds.o \
21 psb_intel_modes.o \ 23 psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c
new file mode 100644
index 000000000000..9cd54a6fb899
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2014, Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
15 */
16
17#include "psb_drv.h"
18
19#include "blitter.h"
20#include "psb_reg.h"
21
22/* Wait for the blitter to be completely idle */
23int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
24{
25 unsigned long stop = jiffies + HZ;
26 int busy = 1;
27
28 /* NOP for Cedarview */
29 if (IS_CDV(dev_priv->dev))
30 return 0;
31
32 /* First do a quick check */
33 if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
34 ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
35 return 0;
36
37 do {
38 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
39 } while (busy && !time_after_eq(jiffies, stop));
40
41 if (busy)
42 return -EBUSY;
43
44 do {
45 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
46 _PSB_C2B_STATUS_BUSY) != 0);
47 } while (busy && !time_after_eq(jiffies, stop));
48
49 /* If still busy, we probably have a hang */
50 return (busy) ? -EBUSY : 0;
51}
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
new file mode 100644
index 000000000000..b83648df590d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2014, Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
15 */
16
17#ifndef __BLITTER_H
18#define __BLITTER_H
19
20extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
21
22#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 5a9a6a3063a8..3531f90e53d0 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -26,6 +26,7 @@
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28#include "cdv_device.h" 28#include "cdv_device.h"
29#include "gma_device.h"
29 30
30#define VGA_SR_INDEX 0x3c4 31#define VGA_SR_INDEX 0x3c4
31#define VGA_SR_DATA 0x3c5 32#define VGA_SR_DATA 0x3c5
@@ -426,43 +427,6 @@ static int cdv_power_up(struct drm_device *dev)
426 return 0; 427 return 0;
427} 428}
428 429
429/* FIXME ? - shared with Poulsbo */
430static void cdv_get_core_freq(struct drm_device *dev)
431{
432 uint32_t clock;
433 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
434 struct drm_psb_private *dev_priv = dev->dev_private;
435
436 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
437 pci_read_config_dword(pci_root, 0xD4, &clock);
438 pci_dev_put(pci_root);
439
440 switch (clock & 0x07) {
441 case 0:
442 dev_priv->core_freq = 100;
443 break;
444 case 1:
445 dev_priv->core_freq = 133;
446 break;
447 case 2:
448 dev_priv->core_freq = 150;
449 break;
450 case 3:
451 dev_priv->core_freq = 178;
452 break;
453 case 4:
454 dev_priv->core_freq = 200;
455 break;
456 case 5:
457 case 6:
458 case 7:
459 dev_priv->core_freq = 266;
460 break;
461 default:
462 dev_priv->core_freq = 0;
463 }
464}
465
466static void cdv_hotplug_work_func(struct work_struct *work) 430static void cdv_hotplug_work_func(struct work_struct *work)
467{ 431{
468 struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, 432 struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
@@ -618,7 +582,7 @@ static int cdv_chip_setup(struct drm_device *dev)
618 if (pci_enable_msi(dev->pdev)) 582 if (pci_enable_msi(dev->pdev))
619 dev_warn(dev->dev, "Enabling MSI failed!\n"); 583 dev_warn(dev->dev, "Enabling MSI failed!\n");
620 dev_priv->regmap = cdv_regmap; 584 dev_priv->regmap = cdv_regmap;
621 cdv_get_core_freq(dev); 585 gma_get_core_freq(dev);
622 psb_intel_opregion_init(dev); 586 psb_intel_opregion_init(dev);
623 psb_intel_init_bios(dev); 587 psb_intel_init_bios(dev);
624 cdv_hotplug_enable(dev, false); 588 cdv_hotplug_enable(dev, false);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 661af492173d..c18268cd516e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -81,13 +81,6 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
81 return MODE_OK; 81 return MODE_OK;
82} 82}
83 83
84static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
85 const struct drm_display_mode *mode,
86 struct drm_display_mode *adjusted_mode)
87{
88 return true;
89}
90
91static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, 84static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
92 struct drm_display_mode *mode, 85 struct drm_display_mode *mode,
93 struct drm_display_mode *adjusted_mode) 86 struct drm_display_mode *adjusted_mode)
@@ -224,7 +217,7 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
224 217
225static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { 218static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
226 .dpms = cdv_intel_crt_dpms, 219 .dpms = cdv_intel_crt_dpms,
227 .mode_fixup = cdv_intel_crt_mode_fixup, 220 .mode_fixup = gma_encoder_mode_fixup,
228 .prepare = gma_encoder_prepare, 221 .prepare = gma_encoder_prepare,
229 .commit = gma_encoder_commit, 222 .commit = gma_encoder_commit,
230 .mode_set = cdv_intel_crt_mode_set, 223 .mode_set = cdv_intel_crt_mode_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 8fbfa06da62d..7ff91ce3b12a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -412,8 +412,11 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
412 int refclk, 412 int refclk,
413 struct gma_clock_t *best_clock) 413 struct gma_clock_t *best_clock)
414{ 414{
415 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
415 struct gma_clock_t clock; 416 struct gma_clock_t clock;
416 if (refclk == 27000) { 417
418 switch (refclk) {
419 case 27000:
417 if (target < 200000) { 420 if (target < 200000) {
418 clock.p1 = 2; 421 clock.p1 = 2;
419 clock.p2 = 10; 422 clock.p2 = 10;
@@ -427,7 +430,9 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
427 clock.m1 = 0; 430 clock.m1 = 0;
428 clock.m2 = 98; 431 clock.m2 = 98;
429 } 432 }
430 } else if (refclk == 100000) { 433 break;
434
435 case 100000:
431 if (target < 200000) { 436 if (target < 200000) {
432 clock.p1 = 2; 437 clock.p1 = 2;
433 clock.p2 = 10; 438 clock.p2 = 10;
@@ -441,12 +446,13 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
441 clock.m1 = 0; 446 clock.m1 = 0;
442 clock.m2 = 133; 447 clock.m2 = 133;
443 } 448 }
444 } else 449 break;
450
451 default:
445 return false; 452 return false;
446 clock.m = clock.m2 + 2; 453 }
447 clock.p = clock.p1 * clock.p2; 454
448 clock.vco = (refclk * clock.m) / clock.n; 455 gma_crtc->clock_funcs->clock(refclk, &clock);
449 clock.dot = clock.vco / clock.p;
450 memcpy(best_clock, &clock, sizeof(struct gma_clock_t)); 456 memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
451 return true; 457 return true;
452} 458}
@@ -468,49 +474,6 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
468 return true; 474 return true;
469} 475}
470 476
471static bool cdv_intel_single_pipe_active (struct drm_device *dev)
472{
473 uint32_t pipe_enabled = 0;
474
475 if (cdv_intel_pipe_enabled(dev, 0))
476 pipe_enabled |= FIFO_PIPEA;
477
478 if (cdv_intel_pipe_enabled(dev, 1))
479 pipe_enabled |= FIFO_PIPEB;
480
481
482 DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
483
484 if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
485 return true;
486 else
487 return false;
488}
489
490static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
491{
492 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
493 struct drm_mode_config *mode_config = &dev->mode_config;
494 struct drm_connector *connector;
495
496 if (gma_crtc->pipe != 1)
497 return false;
498
499 list_for_each_entry(connector, &mode_config->connector_list, head) {
500 struct gma_encoder *gma_encoder =
501 gma_attached_encoder(connector);
502
503 if (!connector->encoder
504 || connector->encoder->crtc != crtc)
505 continue;
506
507 if (gma_encoder->type == INTEL_OUTPUT_LVDS)
508 return true;
509 }
510
511 return false;
512}
513
514void cdv_disable_sr(struct drm_device *dev) 477void cdv_disable_sr(struct drm_device *dev)
515{ 478{
516 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { 479 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
@@ -535,8 +498,10 @@ void cdv_disable_sr(struct drm_device *dev)
535void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) 498void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
536{ 499{
537 struct drm_psb_private *dev_priv = dev->dev_private; 500 struct drm_psb_private *dev_priv = dev->dev_private;
501 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
538 502
539 if (cdv_intel_single_pipe_active(dev)) { 503 /* Is only one pipe enabled? */
504 if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
540 u32 fw; 505 u32 fw;
541 506
542 fw = REG_READ(DSPFW1); 507 fw = REG_READ(DSPFW1);
@@ -557,7 +522,9 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
557 522
558 /* ignore FW4 */ 523 /* ignore FW4 */
559 524
560 if (is_pipeb_lvds(dev, crtc)) { 525 /* Is pipe b lvds ? */
526 if (gma_crtc->pipe == 1 &&
527 gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
561 REG_WRITE(DSPFW5, 0x00040330); 528 REG_WRITE(DSPFW5, 0x00040330);
562 } else { 529 } else {
563 fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) | 530 fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1c0d723b8d24..968b42a5a32b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -89,13 +89,6 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
89 REG_READ(hdmi_priv->hdmi_reg); 89 REG_READ(hdmi_priv->hdmi_reg);
90} 90}
91 91
92static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
93 const struct drm_display_mode *mode,
94 struct drm_display_mode *adjusted_mode)
95{
96 return true;
97}
98
99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) 92static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
100{ 93{
101 struct drm_device *dev = encoder->dev; 94 struct drm_device *dev = encoder->dev;
@@ -262,7 +255,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
262 255
263static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { 256static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
264 .dpms = cdv_hdmi_dpms, 257 .dpms = cdv_hdmi_dpms,
265 .mode_fixup = cdv_hdmi_mode_fixup, 258 .mode_fixup = gma_encoder_mode_fixup,
266 .prepare = gma_encoder_prepare, 259 .prepare = gma_encoder_prepare,
267 .mode_set = cdv_hdmi_mode_set, 260 .mode_set = cdv_hdmi_mode_set,
268 .commit = gma_encoder_commit, 261 .commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 94b3fec22c28..e7fcc148f333 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -319,7 +319,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
319{ 319{
320 struct gtt_range *backing; 320 struct gtt_range *backing;
321 /* Begin by trying to use stolen memory backing */ 321 /* Begin by trying to use stolen memory backing */
322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); 322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
323 if (backing) { 323 if (backing) {
324 drm_gem_private_object_init(dev, &backing->gem, aligned_size); 324 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
325 return backing; 325 return backing;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index e2db48a81ed0..c707fa6fca85 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -62,9 +62,6 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
62 int ret = 0; 62 int ret = 0;
63 struct drm_gem_object *obj; 63 struct drm_gem_object *obj;
64 64
65 if (!(dev->driver->driver_features & DRIVER_GEM))
66 return -ENODEV;
67
68 mutex_lock(&dev->struct_mutex); 65 mutex_lock(&dev->struct_mutex);
69 66
70 /* GEM does all our handle to object mapping */ 67 /* GEM does all our handle to object mapping */
@@ -98,8 +95,8 @@ unlock:
98 * it so that userspace can speak about it. This does the core work 95 * it so that userspace can speak about it. This does the core work
99 * for the various methods that do/will create GEM objects for things 96 * for the various methods that do/will create GEM objects for things
100 */ 97 */
101static int psb_gem_create(struct drm_file *file, 98int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
102 struct drm_device *dev, uint64_t size, uint32_t *handlep) 99 u32 *handlep, int stolen, u32 align)
103{ 100{
104 struct gtt_range *r; 101 struct gtt_range *r;
105 int ret; 102 int ret;
@@ -109,7 +106,7 @@ static int psb_gem_create(struct drm_file *file,
109 106
110 /* Allocate our object - for now a direct gtt range which is not 107 /* Allocate our object - for now a direct gtt range which is not
111 stolen memory backed */ 108 stolen memory backed */
112 r = psb_gtt_alloc_range(dev, size, "gem", 0); 109 r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
113 if (r == NULL) { 110 if (r == NULL) {
114 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); 111 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
115 return -ENOSPC; 112 return -ENOSPC;
@@ -153,7 +150,8 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
153{ 150{
154 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 151 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
155 args->size = args->pitch * args->height; 152 args->size = args->pitch * args->height;
156 return psb_gem_create(file, dev, args->size, &args->handle); 153 return psb_gem_create(file, dev, args->size, &args->handle, 0,
154 PAGE_SIZE);
157} 155}
158 156
159/** 157/**
@@ -229,47 +227,3 @@ fail:
229 return VM_FAULT_SIGBUS; 227 return VM_FAULT_SIGBUS;
230 } 228 }
231} 229}
232
233static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
234 int size, u32 *handle)
235{
236 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
237 if (gtt == NULL)
238 return -ENOMEM;
239
240 drm_gem_private_object_init(dev, &gtt->gem, size);
241 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
242 return 0;
243
244 drm_gem_object_release(&gtt->gem);
245 psb_gtt_free_range(dev, gtt);
246 return -ENOMEM;
247}
248
249/*
250 * GEM interfaces for our specific client
251 */
252int psb_gem_create_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *file)
254{
255 struct drm_psb_gem_create *args = data;
256 int ret;
257 if (args->flags & GMA_GEM_CREATE_STOLEN) {
258 ret = psb_gem_create_stolen(file, dev, args->size,
259 &args->handle);
260 if (ret == 0)
261 return 0;
262 /* Fall throguh */
263 args->flags &= ~GMA_GEM_CREATE_STOLEN;
264 }
265 return psb_gem_create(file, dev, args->size, &args->handle);
266}
267
268int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file)
270{
271 struct drm_psb_gem_mmap *args = data;
272 return dev->driver->dumb_map_offset(file, dev,
273 args->handle, &args->offset);
274}
275
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
new file mode 100644
index 000000000000..1381c5190f46
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -0,0 +1,21 @@
1/**************************************************************************
2 * Copyright (c) 2014 Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#ifndef _GEM_H
17#define _GEM_H
18
19extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
20 u64 size, u32 *handlep, int stolen, u32 align);
21#endif
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
new file mode 100644
index 000000000000..4a295f9ba067
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -0,0 +1,56 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#include <drm/drmP.h>
17#include "psb_drv.h"
18
19void gma_get_core_freq(struct drm_device *dev)
20{
21 uint32_t clock;
22 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
23 struct drm_psb_private *dev_priv = dev->dev_private;
24
25 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
26 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
27
28 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
29 pci_read_config_dword(pci_root, 0xD4, &clock);
30 pci_dev_put(pci_root);
31
32 switch (clock & 0x07) {
33 case 0:
34 dev_priv->core_freq = 100;
35 break;
36 case 1:
37 dev_priv->core_freq = 133;
38 break;
39 case 2:
40 dev_priv->core_freq = 150;
41 break;
42 case 3:
43 dev_priv->core_freq = 178;
44 break;
45 case 4:
46 dev_priv->core_freq = 200;
47 break;
48 case 5:
49 case 6:
50 case 7:
51 dev_priv->core_freq = 266;
52 break;
53 default:
54 dev_priv->core_freq = 0;
55 }
56}
diff --git a/drivers/gpu/drm/gma500/gma_device.h b/drivers/gpu/drm/gma500/gma_device.h
new file mode 100644
index 000000000000..e1dbb007b820
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.h
@@ -0,0 +1,21 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#ifndef _GMA_DEVICE_H
17#define _GMA_DEVICE_H
18
19extern void gma_get_core_freq(struct drm_device *dev);
20
21#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 386de2c9dc86..d45476b72aad 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -485,6 +485,13 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
485 return 0; 485 return 0;
486} 486}
487 487
488bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
489 const struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode)
491{
492 return true;
493}
494
488bool gma_crtc_mode_fixup(struct drm_crtc *crtc, 495bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
489 const struct drm_display_mode *mode, 496 const struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode) 497 struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 78b9f986a6e5..ed569d8a6af3 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -90,6 +90,9 @@ extern void gma_crtc_restore(struct drm_crtc *crtc);
90extern void gma_encoder_prepare(struct drm_encoder *encoder); 90extern void gma_encoder_prepare(struct drm_encoder *encoder);
91extern void gma_encoder_commit(struct drm_encoder *encoder); 91extern void gma_encoder_commit(struct drm_encoder *encoder);
92extern void gma_encoder_destroy(struct drm_encoder *encoder); 92extern void gma_encoder_destroy(struct drm_encoder *encoder);
93extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
94 const struct drm_display_mode *mode,
95 struct drm_display_mode *adjusted_mode);
93 96
94/* Common clock related functions */ 97/* Common clock related functions */
95extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); 98extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 2db731f00930..592d205a0089 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -22,6 +22,7 @@
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <linux/shmem_fs.h> 23#include <linux/shmem_fs.h>
24#include "psb_drv.h" 24#include "psb_drv.h"
25#include "blitter.h"
25 26
26 27
27/* 28/*
@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
105 106
106 /* Write our page entries into the GTT itself */ 107 /* Write our page entries into the GTT itself */
107 for (i = r->roll; i < r->npage; i++) { 108 for (i = r->roll; i < r->npage; i++) {
108 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
110 PSB_MMU_CACHED_MEMORY);
109 iowrite32(pte, gtt_slot++); 111 iowrite32(pte, gtt_slot++);
110 } 112 }
111 for (i = 0; i < r->roll; i++) { 113 for (i = 0; i < r->roll; i++) {
112 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
115 PSB_MMU_CACHED_MEMORY);
113 iowrite32(pte, gtt_slot++); 116 iowrite32(pte, gtt_slot++);
114 } 117 }
115 /* Make sure all the entries are set before we return */ 118 /* Make sure all the entries are set before we return */
@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
127 * page table entries with the dummy page. This is protected via the gtt 130 * page table entries with the dummy page. This is protected via the gtt
128 * mutex which the caller must hold. 131 * mutex which the caller must hold.
129 */ 132 */
130static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 133void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
131{ 134{
132 struct drm_psb_private *dev_priv = dev->dev_private; 135 struct drm_psb_private *dev_priv = dev->dev_private;
133 u32 __iomem *gtt_slot; 136 u32 __iomem *gtt_slot;
@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
137 WARN_ON(r->stolen); 140 WARN_ON(r->stolen);
138 141
139 gtt_slot = psb_gtt_entry(dev, r); 142 gtt_slot = psb_gtt_entry(dev, r);
140 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); 143 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
144 PSB_MMU_CACHED_MEMORY);
141 145
142 for (i = 0; i < r->npage; i++) 146 for (i = 0; i < r->npage; i++)
143 iowrite32(pte, gtt_slot++); 147 iowrite32(pte, gtt_slot++);
@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
176 gtt_slot = psb_gtt_entry(dev, r); 180 gtt_slot = psb_gtt_entry(dev, r);
177 181
178 for (i = r->roll; i < r->npage; i++) { 182 for (i = r->roll; i < r->npage; i++) {
179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
184 PSB_MMU_CACHED_MEMORY);
180 iowrite32(pte, gtt_slot++); 185 iowrite32(pte, gtt_slot++);
181 } 186 }
182 for (i = 0; i < r->roll; i++) { 187 for (i = 0; i < r->roll; i++) {
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
189 PSB_MMU_CACHED_MEMORY);
184 iowrite32(pte, gtt_slot++); 190 iowrite32(pte, gtt_slot++);
185 } 191 }
186 ioread32(gtt_slot - 1); 192 ioread32(gtt_slot - 1);
@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt)
240 int ret = 0; 246 int ret = 0;
241 struct drm_device *dev = gt->gem.dev; 247 struct drm_device *dev = gt->gem.dev;
242 struct drm_psb_private *dev_priv = dev->dev_private; 248 struct drm_psb_private *dev_priv = dev->dev_private;
249 u32 gpu_base = dev_priv->gtt.gatt_start;
243 250
244 mutex_lock(&dev_priv->gtt_mutex); 251 mutex_lock(&dev_priv->gtt_mutex);
245 252
@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt)
252 psb_gtt_detach_pages(gt); 259 psb_gtt_detach_pages(gt);
253 goto out; 260 goto out;
254 } 261 }
262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
263 gt->pages, (gpu_base + gt->offset),
264 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
255 } 265 }
256 gt->in_gart++; 266 gt->in_gart++;
257out: 267out:
@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt)
274{ 284{
275 struct drm_device *dev = gt->gem.dev; 285 struct drm_device *dev = gt->gem.dev;
276 struct drm_psb_private *dev_priv = dev->dev_private; 286 struct drm_psb_private *dev_priv = dev->dev_private;
287 u32 gpu_base = dev_priv->gtt.gatt_start;
288 int ret;
277 289
290 /* While holding the gtt_mutex no new blits can be initiated */
278 mutex_lock(&dev_priv->gtt_mutex); 291 mutex_lock(&dev_priv->gtt_mutex);
279 292
293 /* Wait for any possible usage of the memory to be finished */
294 ret = gma_blt_wait_idle(dev_priv);
295 if (ret) {
296 DRM_ERROR("Failed to idle the blitter, unpin failed!");
297 goto out;
298 }
299
280 WARN_ON(!gt->in_gart); 300 WARN_ON(!gt->in_gart);
281 301
282 gt->in_gart--; 302 gt->in_gart--;
283 if (gt->in_gart == 0 && gt->stolen == 0) { 303 if (gt->in_gart == 0 && gt->stolen == 0) {
304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
305 (gpu_base + gt->offset), gt->npage, 0, 0);
284 psb_gtt_remove(dev, gt); 306 psb_gtt_remove(dev, gt);
285 psb_gtt_detach_pages(gt); 307 psb_gtt_detach_pages(gt);
286 } 308 }
309
310out:
287 mutex_unlock(&dev_priv->gtt_mutex); 311 mutex_unlock(&dev_priv->gtt_mutex);
288} 312}
289 313
@@ -306,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
306 * as in use. 330 * as in use.
307 */ 331 */
308struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, 332struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
309 const char *name, int backed) 333 const char *name, int backed, u32 align)
310{ 334{
311 struct drm_psb_private *dev_priv = dev->dev_private; 335 struct drm_psb_private *dev_priv = dev->dev_private;
312 struct gtt_range *gt; 336 struct gtt_range *gt;
@@ -334,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
334 /* Ensure this is set for non GEM objects */ 358 /* Ensure this is set for non GEM objects */
335 gt->gem.dev = dev; 359 gt->gem.dev = dev;
336 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource, 360 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
337 len, start, end, PAGE_SIZE, NULL, NULL); 361 len, start, end, align, NULL, NULL);
338 if (ret == 0) { 362 if (ret == 0) {
339 gt->offset = gt->resource.start - r->start; 363 gt->offset = gt->resource.start - r->start;
340 return gt; 364 return gt;
@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
497 if (!resume) 521 if (!resume)
498 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, 522 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
499 stolen_size); 523 stolen_size);
524
500 if (!dev_priv->vram_addr) { 525 if (!dev_priv->vram_addr) {
501 dev_err(dev->dev, "Failure to map stolen base.\n"); 526 dev_err(dev->dev, "Failure to map stolen base.\n");
502 ret = -ENOMEM; 527 ret = -ENOMEM;
@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
512 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 537 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
513 num_pages, pfn_base << PAGE_SHIFT, 0); 538 num_pages, pfn_base << PAGE_SHIFT, 0);
514 for (i = 0; i < num_pages; ++i) { 539 for (i = 0; i < num_pages; ++i) {
515 pte = psb_gtt_mask_pte(pfn_base + i, 0); 540 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
516 iowrite32(pte, dev_priv->gtt_map + i); 541 iowrite32(pte, dev_priv->gtt_map + i);
517 } 542 }
518 543
@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
521 */ 546 */
522 547
523 pfn_base = page_to_pfn(dev_priv->scratch_page); 548 pfn_base = page_to_pfn(dev_priv->scratch_page);
524 pte = psb_gtt_mask_pte(pfn_base, 0); 549 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
525 for (; i < gtt_pages; ++i) 550 for (; i < gtt_pages; ++i)
526 iowrite32(pte, dev_priv->gtt_map + i); 551 iowrite32(pte, dev_priv->gtt_map + i);
527 552
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index 6191d10acf33..f5860a739bd8 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,7 +53,8 @@ struct gtt_range {
53}; 53};
54 54
55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, 55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
56 const char *name, int backed); 56 const char *name, int backed,
57 u32 align);
57extern void psb_gtt_kref_put(struct gtt_range *gt); 58extern void psb_gtt_kref_put(struct gtt_range *gt);
58extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); 59extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
59extern int psb_gtt_pin(struct gtt_range *gt); 60extern int psb_gtt_pin(struct gtt_range *gt);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 49bac41beefb..3e14a9b35252 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -18,6 +18,7 @@
18#include <drm/drmP.h> 18#include <drm/drmP.h>
19#include "psb_drv.h" 19#include "psb_drv.h"
20#include "psb_reg.h" 20#include "psb_reg.h"
21#include "mmu.h"
21 22
22/* 23/*
23 * Code for the SGX MMU: 24 * Code for the SGX MMU:
@@ -47,51 +48,6 @@
47 * but on average it should be fast. 48 * but on average it should be fast.
48 */ 49 */
49 50
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset) 51static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{ 52{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF; 53 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
@@ -102,13 +58,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
102 return offset >> PSB_PDE_SHIFT; 58 return offset >> PSB_PDE_SHIFT;
103} 59}
104 60
61#if defined(CONFIG_X86)
105static inline void psb_clflush(void *addr) 62static inline void psb_clflush(void *addr)
106{ 63{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); 64 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108} 65}
109 66
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, 67static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
111 void *addr)
112{ 68{
113 if (!driver->has_clflush) 69 if (!driver->has_clflush)
114 return; 70 return;
@@ -117,62 +73,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
117 psb_clflush(addr); 73 psb_clflush(addr);
118 mb(); 74 mb();
119} 75}
76#else
120 77
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) 78static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
122{ 79{;
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf);
136} 80}
137 81
138static void psb_pages_clflush(struct psb_mmu_driver *driver, 82#endif
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145 83
146 for (i = 0; i < num_pages; i++) 84static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{ 85{
86 struct drm_device *dev = driver->dev;
87 struct drm_psb_private *dev_priv = dev->dev_private;
88
89 if (atomic_read(&driver->needs_tlbflush) || force) {
90 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
91 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
92
93 /* Make sure data cache is turned off before enabling it */
94 wmb();
95 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
96 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
97 if (driver->msvdx_mmu_invaldc)
98 atomic_set(driver->msvdx_mmu_invaldc, 1);
99 }
153 atomic_set(&driver->needs_tlbflush, 0); 100 atomic_set(&driver->needs_tlbflush, 0);
154} 101}
155 102
103#if 0
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) 104static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{ 105{
158 down_write(&driver->sem); 106 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force); 107 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem); 108 up_write(&driver->sem);
161} 109}
110#endif
162 111
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) 112void psb_mmu_flush(struct psb_mmu_driver *driver)
164{ 113{
165 if (rc_prot) 114 struct drm_device *dev = driver->dev;
166 down_write(&driver->sem); 115 struct drm_psb_private *dev_priv = dev->dev_private;
167 if (rc_prot) 116 uint32_t val;
168 up_write(&driver->sem); 117
118 down_write(&driver->sem);
119 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
120 if (atomic_read(&driver->needs_tlbflush))
121 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
122 else
123 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
124
125 /* Make sure data cache is turned off and MMU is flushed before
126 restoring bank interface control register */
127 wmb();
128 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
129 PSB_CR_BIF_CTRL);
130 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
131
132 atomic_set(&driver->needs_tlbflush, 0);
133 if (driver->msvdx_mmu_invaldc)
134 atomic_set(driver->msvdx_mmu_invaldc, 1);
135 up_write(&driver->sem);
169} 136}
170 137
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) 138void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{ 139{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/ 140 struct drm_device *dev = pd->driver->dev;
174 psb_pages_clflush(pd->driver, &pd->p, 1); 141 struct drm_psb_private *dev_priv = dev->dev_private;
142 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
143 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
144
175 down_write(&pd->driver->sem); 145 down_write(&pd->driver->sem);
146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
176 wmb(); 147 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1); 148 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context; 149 pd->hw_context = hw_context;
@@ -183,7 +154,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
183static inline unsigned long psb_pd_addr_end(unsigned long addr, 154static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end) 155 unsigned long end)
185{ 156{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; 157 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end; 158 return (addr < end) ? addr : end;
189} 159}
@@ -223,12 +193,10 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
223 goto out_err3; 193 goto out_err3;
224 194
225 if (!trap_pagefaults) { 195 if (!trap_pagefaults) {
226 pd->invalid_pde = 196 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), 197 invalid_type);
228 invalid_type); 198 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
229 pd->invalid_pte = 199 invalid_type);
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else { 200 } else {
233 pd->invalid_pde = 0; 201 pd->invalid_pde = 0;
234 pd->invalid_pte = 0; 202 pd->invalid_pte = 0;
@@ -279,12 +247,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) 247void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{ 248{
281 struct psb_mmu_driver *driver = pd->driver; 249 struct psb_mmu_driver *driver = pd->driver;
250 struct drm_device *dev = driver->dev;
251 struct drm_psb_private *dev_priv = dev->dev_private;
282 struct psb_mmu_pt *pt; 252 struct psb_mmu_pt *pt;
283 int i; 253 int i;
284 254
285 down_write(&driver->sem); 255 down_write(&driver->sem);
286 if (pd->hw_context != -1) 256 if (pd->hw_context != -1) {
257 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
287 psb_mmu_flush_pd_locked(driver, 1); 258 psb_mmu_flush_pd_locked(driver, 1);
259 }
288 260
289 /* Should take the spinlock here, but we don't need to do that 261 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */ 262 since we have the semaphore in write mode. */
@@ -331,7 +303,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 303 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte; 304 *ptes++ = pd->invalid_pte;
333 305
334 306#if defined(CONFIG_X86)
335 if (pd->driver->has_clflush && pd->hw_context != -1) { 307 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb(); 308 mb();
337 for (i = 0; i < clflush_count; ++i) { 309 for (i = 0; i < clflush_count; ++i) {
@@ -340,7 +312,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
340 } 312 }
341 mb(); 313 mb();
342 } 314 }
343 315#endif
344 kunmap_atomic(v); 316 kunmap_atomic(v);
345 spin_unlock(lock); 317 spin_unlock(lock);
346 318
@@ -351,7 +323,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
351 return pt; 323 return pt;
352} 324}
353 325
354static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, 326struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr) 327 unsigned long addr)
356{ 328{
357 uint32_t index = psb_mmu_pd_index(addr); 329 uint32_t index = psb_mmu_pd_index(addr);
@@ -383,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
383 kunmap_atomic((void *) v); 355 kunmap_atomic((void *) v);
384 356
385 if (pd->hw_context != -1) { 357 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]); 358 psb_mmu_clflush(pd->driver, (void *)&v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1); 359 atomic_set(&pd->driver->needs_tlbflush, 1);
388 } 360 }
389 } 361 }
@@ -420,8 +392,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
420 pd->tables[pt->index] = NULL; 392 pd->tables[pt->index] = NULL;
421 393
422 if (pd->hw_context != -1) { 394 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver, 395 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1); 396 atomic_set(&pd->driver->needs_tlbflush, 1);
426 } 397 }
427 kunmap_atomic(pt->v); 398 kunmap_atomic(pt->v);
@@ -432,8 +403,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
432 spin_unlock(&pd->driver->lock); 403 spin_unlock(&pd->driver->lock);
433} 404}
434 405
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, 406static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
436 unsigned long addr, uint32_t pte) 407 uint32_t pte)
437{ 408{
438 pt->v[psb_mmu_pt_index(addr)] = pte; 409 pt->v[psb_mmu_pt_index(addr)] = pte;
439} 410}
@@ -444,69 +415,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; 415 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445} 416}
446 417
447 418struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{ 419{
452 uint32_t *v; 420 struct psb_mmu_pd *pd;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456 421
457 down_read(&driver->sem); 422 down_read(&driver->sem);
458 spin_lock(&driver->lock); 423 pd = driver->default_pd;
459 424 up_read(&driver->sem);
460 v = kmap_atomic(pd->p);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475 425
476 up_read(&pd->driver->sem); 426 return pd;
477 psb_mmu_flush_pd(pd->driver, 0);
478} 427}
479 428
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) 429/* Returns the physical address of the PD shared by sgx/msvdx */
430uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
481{ 431{
482 struct psb_mmu_pd *pd; 432 struct psb_mmu_pd *pd;
483 433
484 /* down_read(&driver->sem); */ 434 pd = psb_mmu_get_default_pd(driver);
485 pd = driver->default_pd; 435 return page_to_pfn(pd->p) << PAGE_SHIFT;
486 /* up_read(&driver->sem); */
487
488 return pd;
489} 436}
490 437
491void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) 438void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
492{ 439{
440 struct drm_device *dev = driver->dev;
441 struct drm_psb_private *dev_priv = dev->dev_private;
442
443 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
493 psb_mmu_free_pagedir(driver->default_pd); 444 psb_mmu_free_pagedir(driver->default_pd);
494 kfree(driver); 445 kfree(driver);
495} 446}
496 447
497struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 448struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
498 int trap_pagefaults, 449 int trap_pagefaults,
499 int invalid_type, 450 int invalid_type,
500 struct drm_psb_private *dev_priv) 451 atomic_t *msvdx_mmu_invaldc)
501{ 452{
502 struct psb_mmu_driver *driver; 453 struct psb_mmu_driver *driver;
454 struct drm_psb_private *dev_priv = dev->dev_private;
503 455
504 driver = kmalloc(sizeof(*driver), GFP_KERNEL); 456 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
505 457
506 if (!driver) 458 if (!driver)
507 return NULL; 459 return NULL;
508 driver->dev_priv = dev_priv;
509 460
461 driver->dev = dev;
510 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, 462 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
511 invalid_type); 463 invalid_type);
512 if (!driver->default_pd) 464 if (!driver->default_pd)
@@ -515,17 +467,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
515 spin_lock_init(&driver->lock); 467 spin_lock_init(&driver->lock);
516 init_rwsem(&driver->sem); 468 init_rwsem(&driver->sem);
517 down_write(&driver->sem); 469 down_write(&driver->sem);
518 driver->register_map = registers;
519 atomic_set(&driver->needs_tlbflush, 1); 470 atomic_set(&driver->needs_tlbflush, 1);
471 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
472
473 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
474 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
475 PSB_CR_BIF_CTRL);
476 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
477 PSB_CR_BIF_CTRL);
520 478
521 driver->has_clflush = 0; 479 driver->has_clflush = 0;
522 480
481#if defined(CONFIG_X86)
523 if (boot_cpu_has(X86_FEATURE_CLFLSH)) { 482 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
524 uint32_t tfms, misc, cap0, cap4, clflush_size; 483 uint32_t tfms, misc, cap0, cap4, clflush_size;
525 484
526 /* 485 /*
527 * clflush size is determined at kernel setup for x86_64 486 * clflush size is determined at kernel setup for x86_64 but not
528 * but not for i386. We have to do it here. 487 * for i386. We have to do it here.
529 */ 488 */
530 489
531 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); 490 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
@@ -536,6 +495,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
536 driver->clflush_mask = driver->clflush_add - 1; 495 driver->clflush_mask = driver->clflush_add - 1;
537 driver->clflush_mask = ~driver->clflush_mask; 496 driver->clflush_mask = ~driver->clflush_mask;
538 } 497 }
498#endif
539 499
540 up_write(&driver->sem); 500 up_write(&driver->sem);
541 return driver; 501 return driver;
@@ -545,9 +505,9 @@ out_err1:
545 return NULL; 505 return NULL;
546} 506}
547 507
548static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, 508#if defined(CONFIG_X86)
549 unsigned long address, uint32_t num_pages, 509static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
550 uint32_t desired_tile_stride, 510 uint32_t num_pages, uint32_t desired_tile_stride,
551 uint32_t hw_tile_stride) 511 uint32_t hw_tile_stride)
552{ 512{
553 struct psb_mmu_pt *pt; 513 struct psb_mmu_pt *pt;
@@ -561,11 +521,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
561 unsigned long clflush_add = pd->driver->clflush_add; 521 unsigned long clflush_add = pd->driver->clflush_add;
562 unsigned long clflush_mask = pd->driver->clflush_mask; 522 unsigned long clflush_mask = pd->driver->clflush_mask;
563 523
564 if (!pd->driver->has_clflush) { 524 if (!pd->driver->has_clflush)
565 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
566 psb_pages_clflush(pd->driver, &pd->p, num_pages);
567 return; 525 return;
568 }
569 526
570 if (hw_tile_stride) 527 if (hw_tile_stride)
571 rows = num_pages / desired_tile_stride; 528 rows = num_pages / desired_tile_stride;
@@ -586,10 +543,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
586 if (!pt) 543 if (!pt)
587 continue; 544 continue;
588 do { 545 do {
589 psb_clflush(&pt->v 546 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
590 [psb_mmu_pt_index(addr)]); 547 } while (addr += clflush_add,
591 } while (addr +=
592 clflush_add,
593 (addr & clflush_mask) < next); 548 (addr & clflush_mask) < next);
594 549
595 psb_mmu_pt_unmap_unlock(pt); 550 psb_mmu_pt_unmap_unlock(pt);
@@ -598,6 +553,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
598 } 553 }
599 mb(); 554 mb();
600} 555}
556#else
557static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
558 uint32_t num_pages, uint32_t desired_tile_stride,
559 uint32_t hw_tile_stride)
560{
561 drm_ttm_cache_flush();
562}
563#endif
601 564
602void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 565void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
603 unsigned long address, uint32_t num_pages) 566 unsigned long address, uint32_t num_pages)
@@ -633,7 +596,7 @@ out:
633 up_read(&pd->driver->sem); 596 up_read(&pd->driver->sem);
634 597
635 if (pd->hw_context != -1) 598 if (pd->hw_context != -1)
636 psb_mmu_flush(pd->driver, 0); 599 psb_mmu_flush(pd->driver);
637 600
638 return; 601 return;
639} 602}
@@ -660,7 +623,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
660 add = desired_tile_stride << PAGE_SHIFT; 623 add = desired_tile_stride << PAGE_SHIFT;
661 row_add = hw_tile_stride << PAGE_SHIFT; 624 row_add = hw_tile_stride << PAGE_SHIFT;
662 625
663 /* down_read(&pd->driver->sem); */ 626 down_read(&pd->driver->sem);
664 627
665 /* Make sure we only need to flush this processor's cache */ 628 /* Make sure we only need to flush this processor's cache */
666 629
@@ -688,10 +651,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
688 psb_mmu_flush_ptes(pd, f_address, num_pages, 651 psb_mmu_flush_ptes(pd, f_address, num_pages,
689 desired_tile_stride, hw_tile_stride); 652 desired_tile_stride, hw_tile_stride);
690 653
691 /* up_read(&pd->driver->sem); */ 654 up_read(&pd->driver->sem);
692 655
693 if (pd->hw_context != -1) 656 if (pd->hw_context != -1)
694 psb_mmu_flush(pd->driver, 0); 657 psb_mmu_flush(pd->driver);
695} 658}
696 659
697int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, 660int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
@@ -704,7 +667,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
704 unsigned long end; 667 unsigned long end;
705 unsigned long next; 668 unsigned long next;
706 unsigned long f_address = address; 669 unsigned long f_address = address;
707 int ret = 0; 670 int ret = -ENOMEM;
708 671
709 down_read(&pd->driver->sem); 672 down_read(&pd->driver->sem);
710 673
@@ -726,6 +689,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
726 psb_mmu_pt_unmap_unlock(pt); 689 psb_mmu_pt_unmap_unlock(pt);
727 690
728 } while (addr = next, next != end); 691 } while (addr = next, next != end);
692 ret = 0;
729 693
730out: 694out:
731 if (pd->hw_context != -1) 695 if (pd->hw_context != -1)
@@ -734,15 +698,15 @@ out:
734 up_read(&pd->driver->sem); 698 up_read(&pd->driver->sem);
735 699
736 if (pd->hw_context != -1) 700 if (pd->hw_context != -1)
737 psb_mmu_flush(pd->driver, 1); 701 psb_mmu_flush(pd->driver);
738 702
739 return ret; 703 return 0;
740} 704}
741 705
742int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, 706int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
743 unsigned long address, uint32_t num_pages, 707 unsigned long address, uint32_t num_pages,
744 uint32_t desired_tile_stride, 708 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
745 uint32_t hw_tile_stride, int type) 709 int type)
746{ 710{
747 struct psb_mmu_pt *pt; 711 struct psb_mmu_pt *pt;
748 uint32_t rows = 1; 712 uint32_t rows = 1;
@@ -754,7 +718,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
754 unsigned long add; 718 unsigned long add;
755 unsigned long row_add; 719 unsigned long row_add;
756 unsigned long f_address = address; 720 unsigned long f_address = address;
757 int ret = 0; 721 int ret = -ENOMEM;
758 722
759 if (hw_tile_stride) { 723 if (hw_tile_stride) {
760 if (num_pages % desired_tile_stride != 0) 724 if (num_pages % desired_tile_stride != 0)
@@ -777,14 +741,11 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
777 do { 741 do {
778 next = psb_pd_addr_end(addr, end); 742 next = psb_pd_addr_end(addr, end);
779 pt = psb_mmu_pt_alloc_map_lock(pd, addr); 743 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
780 if (!pt) { 744 if (!pt)
781 ret = -ENOMEM;
782 goto out; 745 goto out;
783 }
784 do { 746 do {
785 pte = 747 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
786 psb_mmu_mask_pte(page_to_pfn(*pages++), 748 type);
787 type);
788 psb_mmu_set_pte(pt, addr, pte); 749 psb_mmu_set_pte(pt, addr, pte);
789 pt->count++; 750 pt->count++;
790 } while (addr += PAGE_SIZE, addr < next); 751 } while (addr += PAGE_SIZE, addr < next);
@@ -794,6 +755,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
794 755
795 address += row_add; 756 address += row_add;
796 } 757 }
758
759 ret = 0;
797out: 760out:
798 if (pd->hw_context != -1) 761 if (pd->hw_context != -1)
799 psb_mmu_flush_ptes(pd, f_address, num_pages, 762 psb_mmu_flush_ptes(pd, f_address, num_pages,
@@ -802,7 +765,7 @@ out:
802 up_read(&pd->driver->sem); 765 up_read(&pd->driver->sem);
803 766
804 if (pd->hw_context != -1) 767 if (pd->hw_context != -1)
805 psb_mmu_flush(pd->driver, 1); 768 psb_mmu_flush(pd->driver);
806 769
807 return ret; 770 return ret;
808} 771}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
new file mode 100644
index 000000000000..e89abec6209d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -0,0 +1,93 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 **************************************************************************/
14
15#ifndef __MMU_H
16#define __MMU_H
17
18struct psb_mmu_driver {
19 /* protects driver- and pd structures. Always take in read mode
20 * before taking the page table spinlock.
21 */
22 struct rw_semaphore sem;
23
24 /* protects page tables, directory tables and pt tables.
25 * and pt structures.
26 */
27 spinlock_t lock;
28
29 atomic_t needs_tlbflush;
30 atomic_t *msvdx_mmu_invaldc;
31 struct psb_mmu_pd *default_pd;
32 uint32_t bif_ctrl;
33 int has_clflush;
34 int clflush_add;
35 unsigned long clflush_mask;
36
37 struct drm_device *dev;
38};
39
40struct psb_mmu_pd;
41
42struct psb_mmu_pt {
43 struct psb_mmu_pd *pd;
44 uint32_t index;
45 uint32_t count;
46 struct page *p;
47 uint32_t *v;
48};
49
50struct psb_mmu_pd {
51 struct psb_mmu_driver *driver;
52 int hw_context;
53 struct psb_mmu_pt **tables;
54 struct page *p;
55 struct page *dummy_pt;
56 struct page *dummy_page;
57 uint32_t pd_mask;
58 uint32_t invalid_pde;
59 uint32_t invalid_pte;
60};
61
62extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
63 int trap_pagefaults,
64 int invalid_type,
65 atomic_t *msvdx_mmu_invaldc);
66extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
67extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
68 *driver);
69extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
70 int trap_pagefaults,
71 int invalid_type);
72extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
73extern void psb_mmu_flush(struct psb_mmu_driver *driver);
74extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
75 unsigned long address,
76 uint32_t num_pages);
77extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
78 uint32_t start_pfn,
79 unsigned long address,
80 uint32_t num_pages, int type);
81extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
82 unsigned long *pfn);
83extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
84extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
85 unsigned long address, uint32_t num_pages,
86 uint32_t desired_tile_stride,
87 uint32_t hw_tile_stride, int type);
88extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
89 unsigned long address, uint32_t num_pages,
90 uint32_t desired_tile_stride,
91 uint32_t hw_tile_stride);
92
93#endif
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 38153143ed8c..cf018ddcc5a6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -523,13 +523,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
523 return MODE_OK; 523 return MODE_OK;
524} 524}
525 525
526static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
527 const struct drm_display_mode *mode,
528 struct drm_display_mode *adjusted_mode)
529{
530 return true;
531}
532
533static enum drm_connector_status 526static enum drm_connector_status
534oaktrail_hdmi_detect(struct drm_connector *connector, bool force) 527oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
535{ 528{
@@ -608,7 +601,7 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
608 601
609static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { 602static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
610 .dpms = oaktrail_hdmi_dpms, 603 .dpms = oaktrail_hdmi_dpms,
611 .mode_fixup = oaktrail_hdmi_mode_fixup, 604 .mode_fixup = gma_encoder_mode_fixup,
612 .prepare = gma_encoder_prepare, 605 .prepare = gma_encoder_prepare,
613 .mode_set = oaktrail_hdmi_mode_set, 606 .mode_set = oaktrail_hdmi_mode_set,
614 .commit = gma_encoder_commit, 607 .commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 13ec6283bf59..ab696ca7eeec 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -173,10 +173,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
173 return 0; 173 return 0;
174} 174}
175 175
176void psb_intel_opregion_asle_intr(struct drm_device *dev) 176static void psb_intel_opregion_asle_work(struct work_struct *work)
177{ 177{
178 struct drm_psb_private *dev_priv = dev->dev_private; 178 struct psb_intel_opregion *opregion =
179 struct opregion_asle *asle = dev_priv->opregion.asle; 179 container_of(work, struct psb_intel_opregion, asle_work);
180 struct drm_psb_private *dev_priv =
181 container_of(opregion, struct drm_psb_private, opregion);
182 struct opregion_asle *asle = opregion->asle;
180 u32 asle_stat = 0; 183 u32 asle_stat = 0;
181 u32 asle_req; 184 u32 asle_req;
182 185
@@ -190,9 +193,18 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
190 } 193 }
191 194
192 if (asle_req & ASLE_SET_BACKLIGHT) 195 if (asle_req & ASLE_SET_BACKLIGHT)
193 asle_stat |= asle_set_backlight(dev, asle->bclp); 196 asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
194 197
195 asle->aslc = asle_stat; 198 asle->aslc = asle_stat;
199
200}
201
202void psb_intel_opregion_asle_intr(struct drm_device *dev)
203{
204 struct drm_psb_private *dev_priv = dev->dev_private;
205
206 if (dev_priv->opregion.asle)
207 schedule_work(&dev_priv->opregion.asle_work);
196} 208}
197 209
198#define ASLE_ALS_EN (1<<0) 210#define ASLE_ALS_EN (1<<0)
@@ -282,6 +294,8 @@ void psb_intel_opregion_fini(struct drm_device *dev)
282 unregister_acpi_notifier(&psb_intel_opregion_notifier); 294 unregister_acpi_notifier(&psb_intel_opregion_notifier);
283 } 295 }
284 296
297 cancel_work_sync(&opregion->asle_work);
298
285 /* just clear all opregion memory pointers now */ 299 /* just clear all opregion memory pointers now */
286 iounmap(opregion->header); 300 iounmap(opregion->header);
287 opregion->header = NULL; 301 opregion->header = NULL;
@@ -304,6 +318,9 @@ int psb_intel_opregion_setup(struct drm_device *dev)
304 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n"); 318 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
305 return -ENOTSUPP; 319 return -ENOTSUPP;
306 } 320 }
321
322 INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
323
307 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy); 324 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
308 base = acpi_os_ioremap(opregion_phy, 8*1024); 325 base = acpi_os_ioremap(opregion_phy, 8*1024);
309 if (!base) 326 if (!base)
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 23fb33f1471b..07df7d4eea72 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -26,6 +26,7 @@
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28#include "psb_device.h" 28#include "psb_device.h"
29#include "gma_device.h"
29 30
30static int psb_output_init(struct drm_device *dev) 31static int psb_output_init(struct drm_device *dev)
31{ 32{
@@ -257,45 +258,6 @@ static int psb_power_up(struct drm_device *dev)
257 return 0; 258 return 0;
258} 259}
259 260
260static void psb_get_core_freq(struct drm_device *dev)
261{
262 uint32_t clock;
263 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
264 struct drm_psb_private *dev_priv = dev->dev_private;
265
266 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
267 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
268
269 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
270 pci_read_config_dword(pci_root, 0xD4, &clock);
271 pci_dev_put(pci_root);
272
273 switch (clock & 0x07) {
274 case 0:
275 dev_priv->core_freq = 100;
276 break;
277 case 1:
278 dev_priv->core_freq = 133;
279 break;
280 case 2:
281 dev_priv->core_freq = 150;
282 break;
283 case 3:
284 dev_priv->core_freq = 178;
285 break;
286 case 4:
287 dev_priv->core_freq = 200;
288 break;
289 case 5:
290 case 6:
291 case 7:
292 dev_priv->core_freq = 266;
293 break;
294 default:
295 dev_priv->core_freq = 0;
296 }
297}
298
299/* Poulsbo */ 261/* Poulsbo */
300static const struct psb_offset psb_regmap[2] = { 262static const struct psb_offset psb_regmap[2] = {
301 { 263 {
@@ -352,7 +314,7 @@ static int psb_chip_setup(struct drm_device *dev)
352{ 314{
353 struct drm_psb_private *dev_priv = dev->dev_private; 315 struct drm_psb_private *dev_priv = dev->dev_private;
354 dev_priv->regmap = psb_regmap; 316 dev_priv->regmap = psb_regmap;
355 psb_get_core_freq(dev); 317 gma_get_core_freq(dev);
356 gma_intel_setup_gmbus(dev); 318 gma_intel_setup_gmbus(dev);
357 psb_intel_opregion_init(dev); 319 psb_intel_opregion_init(dev);
358 psb_intel_init_bios(dev); 320 psb_intel_init_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1199180667c9..b686e56646eb 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -21,7 +21,6 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <drm/drm.h> 23#include <drm/drm.h>
24#include <drm/gma_drm.h>
25#include "psb_drv.h" 24#include "psb_drv.h"
26#include "framebuffer.h" 25#include "framebuffer.h"
27#include "psb_reg.h" 26#include "psb_reg.h"
@@ -37,56 +36,65 @@
37#include <acpi/video.h> 36#include <acpi/video.h>
38#include <linux/module.h> 37#include <linux/module.h>
39 38
40static int drm_psb_trap_pagefaults; 39static struct drm_driver driver;
41 40static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
42static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
43
44MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
45module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
46
47 41
42/*
43 * The table below contains a mapping of the PCI vendor ID and the PCI Device ID
44 * to the different groups of PowerVR 5-series chip designs
45 *
46 * 0x8086 = Intel Corporation
47 *
48 * PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
49 * PowerVR SGX535 - Moorestown - Intel GMA 600
50 * PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
51 * PowerVR SGX540 - Medfield - Intel Atom Z2460
52 * PowerVR SGX544MP2 - Medfield -
53 * PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
54 * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
55 * N2800
56 */
48static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { 57static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
49 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, 58 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
50 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, 59 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
51#if defined(CONFIG_DRM_GMA600) 60#if defined(CONFIG_DRM_GMA600)
52 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 61 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
53 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 62 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
54 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 63 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
55 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 64 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
56 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 65 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
57 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 66 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
58 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 67 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
59 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 68 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
60 /* Atom E620 */ 69 { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
61 { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
62#endif 70#endif
63#if defined(CONFIG_DRM_MEDFIELD) 71#if defined(CONFIG_DRM_MEDFIELD)
64 {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 72 { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
65 {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 73 { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
66 {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 74 { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
67 {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 75 { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
68 {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 76 { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
69 {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 77 { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
70 {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 78 { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
71 {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 79 { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
72#endif 80#endif
73#if defined(CONFIG_DRM_GMA3600) 81#if defined(CONFIG_DRM_GMA3600)
74 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 82 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
75 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 83 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
76 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 84 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
77 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 85 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
78 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 86 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
79 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 87 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
80 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 88 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
81 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 89 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
82 { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 90 { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
83 { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 91 { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
84 { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 92 { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
85 { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 93 { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
86 { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 94 { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
87 { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 95 { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
88 { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 96 { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
89 { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 97 { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
90#endif 98#endif
91 { 0, } 99 { 0, }
92}; 100};
@@ -95,59 +103,10 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
95/* 103/*
96 * Standard IOCTLs. 104 * Standard IOCTLs.
97 */ 105 */
98
99#define DRM_IOCTL_GMA_ADB \
100 DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
101#define DRM_IOCTL_GMA_MODE_OPERATION \
102 DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
103 struct drm_psb_mode_operation_arg)
104#define DRM_IOCTL_GMA_STOLEN_MEMORY \
105 DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
106 struct drm_psb_stolen_memory_arg)
107#define DRM_IOCTL_GMA_GAMMA \
108 DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
109 struct drm_psb_dpst_lut_arg)
110#define DRM_IOCTL_GMA_DPST_BL \
111 DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
112 uint32_t)
113#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \
114 DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
115 struct drm_psb_get_pipe_from_crtc_id_arg)
116#define DRM_IOCTL_GMA_GEM_CREATE \
117 DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
118 struct drm_psb_gem_create)
119#define DRM_IOCTL_GMA_GEM_MMAP \
120 DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
121 struct drm_psb_gem_mmap)
122
123static int psb_adb_ioctl(struct drm_device *dev, void *data,
124 struct drm_file *file_priv);
125static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
126 struct drm_file *file_priv);
127static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
128 struct drm_file *file_priv);
129static int psb_gamma_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv);
131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv);
133
134static const struct drm_ioctl_desc psb_ioctls[] = { 106static const struct drm_ioctl_desc psb_ioctls[] = {
135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
137 DRM_AUTH),
138 DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
139 DRM_AUTH),
140 DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
141 DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
142 DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
143 psb_intel_get_pipe_from_crtc_id, 0),
144 DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
145 DRM_UNLOCKED | DRM_AUTH),
146 DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
147 DRM_UNLOCKED | DRM_AUTH),
148}; 107};
149 108
150static void psb_lastclose(struct drm_device *dev) 109static void psb_driver_lastclose(struct drm_device *dev)
151{ 110{
152 int ret; 111 int ret;
153 struct drm_psb_private *dev_priv = dev->dev_private; 112 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -169,19 +128,14 @@ static int psb_do_init(struct drm_device *dev)
169 128
170 uint32_t stolen_gtt; 129 uint32_t stolen_gtt;
171 130
172 int ret = -ENOMEM;
173
174 if (pg->mmu_gatt_start & 0x0FFFFFFF) { 131 if (pg->mmu_gatt_start & 0x0FFFFFFF) {
175 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n"); 132 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
176 ret = -EINVAL; 133 return -EINVAL;
177 goto out_err;
178 } 134 }
179 135
180
181 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; 136 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
182 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
183 stolen_gtt = 138 stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
184 (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
185 139
186 dev_priv->gatt_free_offset = pg->mmu_gatt_start + 140 dev_priv->gatt_free_offset = pg->mmu_gatt_start +
187 (stolen_gtt << PAGE_SHIFT) * 1024; 141 (stolen_gtt << PAGE_SHIFT) * 1024;
@@ -192,23 +146,26 @@ static int psb_do_init(struct drm_device *dev)
192 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); 146 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
193 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); 147 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
194 PSB_RSGX32(PSB_CR_BIF_BANK1); 148 PSB_RSGX32(PSB_CR_BIF_BANK1);
195 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK, 149
196 PSB_CR_BIF_CTRL); 150 /* Do not bypass any MMU access, let them pagefault instead */
151 PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
152 PSB_CR_BIF_CTRL);
153 PSB_RSGX32(PSB_CR_BIF_CTRL);
154
197 psb_spank(dev_priv); 155 psb_spank(dev_priv);
198 156
199 /* mmu_gatt ?? */ 157 /* mmu_gatt ?? */
200 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); 158 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
159 PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
160
201 return 0; 161 return 0;
202out_err:
203 return ret;
204} 162}
205 163
206static int psb_driver_unload(struct drm_device *dev) 164static int psb_driver_unload(struct drm_device *dev)
207{ 165{
208 struct drm_psb_private *dev_priv = dev->dev_private; 166 struct drm_psb_private *dev_priv = dev->dev_private;
209 167
210 /* Kill vblank etc here */ 168 /* TODO: Kill vblank etc here */
211
212 169
213 if (dev_priv) { 170 if (dev_priv) {
214 if (dev_priv->backlight_device) 171 if (dev_priv->backlight_device)
@@ -268,8 +225,7 @@ static int psb_driver_unload(struct drm_device *dev)
268 return 0; 225 return 0;
269} 226}
270 227
271 228static int psb_driver_load(struct drm_device *dev, unsigned long flags)
272static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
273{ 229{
274 struct drm_psb_private *dev_priv; 230 struct drm_psb_private *dev_priv;
275 unsigned long resource_start, resource_len; 231 unsigned long resource_start, resource_len;
@@ -277,15 +233,19 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
277 int ret = -ENOMEM; 233 int ret = -ENOMEM;
278 struct drm_connector *connector; 234 struct drm_connector *connector;
279 struct gma_encoder *gma_encoder; 235 struct gma_encoder *gma_encoder;
236 struct psb_gtt *pg;
280 237
238 /* allocating and initializing driver private data */
281 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 239 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
282 if (dev_priv == NULL) 240 if (dev_priv == NULL)
283 return -ENOMEM; 241 return -ENOMEM;
284 242
285 dev_priv->ops = (struct psb_ops *)chipset; 243 dev_priv->ops = (struct psb_ops *)flags;
286 dev_priv->dev = dev; 244 dev_priv->dev = dev;
287 dev->dev_private = (void *) dev_priv; 245 dev->dev_private = (void *) dev_priv;
288 246
247 pg = &dev_priv->gtt;
248
289 pci_set_master(dev->pdev); 249 pci_set_master(dev->pdev);
290 250
291 dev_priv->num_pipe = dev_priv->ops->pipes; 251 dev_priv->num_pipe = dev_priv->ops->pipes;
@@ -347,9 +307,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
347 if (ret) 307 if (ret)
348 goto out_err; 308 goto out_err;
349 309
350 dev_priv->mmu = psb_mmu_driver_init((void *)0, 310 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
351 drm_psb_trap_pagefaults, 0,
352 dev_priv);
353 if (!dev_priv->mmu) 311 if (!dev_priv->mmu)
354 goto out_err; 312 goto out_err;
355 313
@@ -357,18 +315,27 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
357 if (!dev_priv->pf_pd) 315 if (!dev_priv->pf_pd)
358 goto out_err; 316 goto out_err;
359 317
360 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
361 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
362
363 ret = psb_do_init(dev); 318 ret = psb_do_init(dev);
364 if (ret) 319 if (ret)
365 return ret; 320 return ret;
366 321
322 /* Add stolen memory to SGX MMU */
323 down_read(&pg->sem);
324 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
325 dev_priv->stolen_base >> PAGE_SHIFT,
326 pg->gatt_start,
327 pg->stolen_size >> PAGE_SHIFT, 0);
328 up_read(&pg->sem);
329
330 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
331 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
332
367 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); 333 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
368 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); 334 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
369 335
370 acpi_video_register(); 336 acpi_video_register();
371 337
338 /* Setup vertical blanking handling */
372 ret = drm_vblank_init(dev, dev_priv->num_pipe); 339 ret = drm_vblank_init(dev, dev_priv->num_pipe);
373 if (ret) 340 if (ret)
374 goto out_err; 341 goto out_err;
@@ -390,9 +357,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
390 drm_irq_install(dev); 357 drm_irq_install(dev);
391 358
392 dev->vblank_disable_allowed = true; 359 dev->vblank_disable_allowed = true;
393
394 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 360 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
395
396 dev->driver->get_vblank_counter = psb_get_vblank_counter; 361 dev->driver->get_vblank_counter = psb_get_vblank_counter;
397 362
398 psb_modeset_init(dev); 363 psb_modeset_init(dev);
@@ -416,11 +381,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
416 return ret; 381 return ret;
417 psb_intel_opregion_enable_asle(dev); 382 psb_intel_opregion_enable_asle(dev);
418#if 0 383#if 0
419 /*enable runtime pm at last*/ 384 /* Enable runtime pm at last */
420 pm_runtime_enable(&dev->pdev->dev); 385 pm_runtime_enable(&dev->pdev->dev);
421 pm_runtime_set_active(&dev->pdev->dev); 386 pm_runtime_set_active(&dev->pdev->dev);
422#endif 387#endif
423 /*Intel drm driver load is done, continue doing pvr load*/ 388 /* Intel drm driver load is done, continue doing pvr load */
424 return 0; 389 return 0;
425out_err: 390out_err:
426 psb_driver_unload(dev); 391 psb_driver_unload(dev);
@@ -442,161 +407,6 @@ static inline void get_brightness(struct backlight_device *bd)
442#endif 407#endif
443} 408}
444 409
445static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file_priv)
447{
448 struct drm_psb_private *dev_priv = psb_priv(dev);
449 uint32_t *arg = data;
450
451 dev_priv->blc_adj2 = *arg;
452 get_brightness(dev_priv->backlight_device);
453 return 0;
454}
455
456static int psb_adb_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 struct drm_psb_private *dev_priv = psb_priv(dev);
460 uint32_t *arg = data;
461
462 dev_priv->blc_adj1 = *arg;
463 get_brightness(dev_priv->backlight_device);
464 return 0;
465}
466
467static int psb_gamma_ioctl(struct drm_device *dev, void *data,
468 struct drm_file *file_priv)
469{
470 struct drm_psb_dpst_lut_arg *lut_arg = data;
471 struct drm_mode_object *obj;
472 struct drm_crtc *crtc;
473 struct drm_connector *connector;
474 struct gma_crtc *gma_crtc;
475 int i = 0;
476 int32_t obj_id;
477
478 obj_id = lut_arg->output_id;
479 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
480 if (!obj) {
481 dev_dbg(dev->dev, "Invalid Connector object.\n");
482 return -ENOENT;
483 }
484
485 connector = obj_to_connector(obj);
486 crtc = connector->encoder->crtc;
487 gma_crtc = to_gma_crtc(crtc);
488
489 for (i = 0; i < 256; i++)
490 gma_crtc->lut_adj[i] = lut_arg->lut[i];
491
492 gma_crtc_load_lut(crtc);
493
494 return 0;
495}
496
497static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
498 struct drm_file *file_priv)
499{
500 uint32_t obj_id;
501 uint16_t op;
502 struct drm_mode_modeinfo *umode;
503 struct drm_display_mode *mode = NULL;
504 struct drm_psb_mode_operation_arg *arg;
505 struct drm_mode_object *obj;
506 struct drm_connector *connector;
507 struct drm_connector_helper_funcs *connector_funcs;
508 int ret = 0;
509 int resp = MODE_OK;
510
511 arg = (struct drm_psb_mode_operation_arg *)data;
512 obj_id = arg->obj_id;
513 op = arg->operation;
514
515 switch (op) {
516 case PSB_MODE_OPERATION_MODE_VALID:
517 umode = &arg->mode;
518
519 drm_modeset_lock_all(dev);
520
521 obj = drm_mode_object_find(dev, obj_id,
522 DRM_MODE_OBJECT_CONNECTOR);
523 if (!obj) {
524 ret = -ENOENT;
525 goto mode_op_out;
526 }
527
528 connector = obj_to_connector(obj);
529
530 mode = drm_mode_create(dev);
531 if (!mode) {
532 ret = -ENOMEM;
533 goto mode_op_out;
534 }
535
536 /* drm_crtc_convert_umode(mode, umode); */
537 {
538 mode->clock = umode->clock;
539 mode->hdisplay = umode->hdisplay;
540 mode->hsync_start = umode->hsync_start;
541 mode->hsync_end = umode->hsync_end;
542 mode->htotal = umode->htotal;
543 mode->hskew = umode->hskew;
544 mode->vdisplay = umode->vdisplay;
545 mode->vsync_start = umode->vsync_start;
546 mode->vsync_end = umode->vsync_end;
547 mode->vtotal = umode->vtotal;
548 mode->vscan = umode->vscan;
549 mode->vrefresh = umode->vrefresh;
550 mode->flags = umode->flags;
551 mode->type = umode->type;
552 strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
553 mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
554 }
555
556 connector_funcs = (struct drm_connector_helper_funcs *)
557 connector->helper_private;
558
559 if (connector_funcs->mode_valid) {
560 resp = connector_funcs->mode_valid(connector, mode);
561 arg->data = resp;
562 }
563
564 /*do some clean up work*/
565 if (mode)
566 drm_mode_destroy(dev, mode);
567mode_op_out:
568 drm_modeset_unlock_all(dev);
569 return ret;
570
571 default:
572 dev_dbg(dev->dev, "Unsupported psb mode operation\n");
573 return -EOPNOTSUPP;
574 }
575
576 return 0;
577}
578
579static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
580 struct drm_file *file_priv)
581{
582 struct drm_psb_private *dev_priv = psb_priv(dev);
583 struct drm_psb_stolen_memory_arg *arg = data;
584
585 arg->base = dev_priv->stolen_base;
586 arg->size = dev_priv->vram_stolen_size;
587
588 return 0;
589}
590
591static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
592{
593 return 0;
594}
595
596static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
597{
598}
599
600static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, 410static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
601 unsigned long arg) 411 unsigned long arg)
602{ 412{
@@ -614,15 +424,21 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
614 /* FIXME: do we need to wrap the other side of this */ 424 /* FIXME: do we need to wrap the other side of this */
615} 425}
616 426
617 427/*
618/* When a client dies: 428 * When a client dies:
619 * - Check for and clean up flipped page state 429 * - Check for and clean up flipped page state
620 */ 430 */
621static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) 431static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
622{ 432{
623} 433}
624 434
625static void psb_remove(struct pci_dev *pdev) 435static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
436{
437 return drm_get_pci_dev(pdev, ent, &driver);
438}
439
440
441static void psb_pci_remove(struct pci_dev *pdev)
626{ 442{
627 struct drm_device *dev = pci_get_drvdata(pdev); 443 struct drm_device *dev = pci_get_drvdata(pdev);
628 drm_put_dev(dev); 444 drm_put_dev(dev);
@@ -657,11 +473,12 @@ static const struct file_operations psb_gem_fops = {
657 473
658static struct drm_driver driver = { 474static struct drm_driver driver = {
659 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 475 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
660 DRIVER_MODESET | DRIVER_GEM , 476 DRIVER_MODESET | DRIVER_GEM,
661 .load = psb_driver_load, 477 .load = psb_driver_load,
662 .unload = psb_driver_unload, 478 .unload = psb_driver_unload,
479 .lastclose = psb_driver_lastclose,
480 .preclose = psb_driver_preclose,
663 481
664 .ioctls = psb_ioctls,
665 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls), 482 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
666 .device_is_agp = psb_driver_device_is_agp, 483 .device_is_agp = psb_driver_device_is_agp,
667 .irq_preinstall = psb_irq_preinstall, 484 .irq_preinstall = psb_irq_preinstall,
@@ -671,40 +488,31 @@ static struct drm_driver driver = {
671 .enable_vblank = psb_enable_vblank, 488 .enable_vblank = psb_enable_vblank,
672 .disable_vblank = psb_disable_vblank, 489 .disable_vblank = psb_disable_vblank,
673 .get_vblank_counter = psb_get_vblank_counter, 490 .get_vblank_counter = psb_get_vblank_counter,
674 .lastclose = psb_lastclose,
675 .open = psb_driver_open,
676 .preclose = psb_driver_preclose,
677 .postclose = psb_driver_close,
678 491
679 .gem_free_object = psb_gem_free_object, 492 .gem_free_object = psb_gem_free_object,
680 .gem_vm_ops = &psb_gem_vm_ops, 493 .gem_vm_ops = &psb_gem_vm_ops,
494
681 .dumb_create = psb_gem_dumb_create, 495 .dumb_create = psb_gem_dumb_create,
682 .dumb_map_offset = psb_gem_dumb_map_gtt, 496 .dumb_map_offset = psb_gem_dumb_map_gtt,
683 .dumb_destroy = drm_gem_dumb_destroy, 497 .dumb_destroy = drm_gem_dumb_destroy,
498 .ioctls = psb_ioctls,
684 .fops = &psb_gem_fops, 499 .fops = &psb_gem_fops,
685 .name = DRIVER_NAME, 500 .name = DRIVER_NAME,
686 .desc = DRIVER_DESC, 501 .desc = DRIVER_DESC,
687 .date = PSB_DRM_DRIVER_DATE, 502 .date = DRIVER_DATE,
688 .major = PSB_DRM_DRIVER_MAJOR, 503 .major = DRIVER_MAJOR,
689 .minor = PSB_DRM_DRIVER_MINOR, 504 .minor = DRIVER_MINOR,
690 .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL 505 .patchlevel = DRIVER_PATCHLEVEL
691}; 506};
692 507
693static struct pci_driver psb_pci_driver = { 508static struct pci_driver psb_pci_driver = {
694 .name = DRIVER_NAME, 509 .name = DRIVER_NAME,
695 .id_table = pciidlist, 510 .id_table = pciidlist,
696 .probe = psb_probe, 511 .probe = psb_pci_probe,
697 .remove = psb_remove, 512 .remove = psb_pci_remove,
698 .driver = { 513 .driver.pm = &psb_pm_ops,
699 .pm = &psb_pm_ops,
700 }
701}; 514};
702 515
703static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
704{
705 return drm_get_pci_dev(pdev, ent, &driver);
706}
707
708static int __init psb_init(void) 516static int __init psb_init(void)
709{ 517{
710 return drm_pci_init(&driver, &psb_pci_driver); 518 return drm_pci_init(&driver, &psb_pci_driver);
@@ -718,6 +526,6 @@ static void __exit psb_exit(void)
718late_initcall(psb_init); 526late_initcall(psb_init);
719module_exit(psb_exit); 527module_exit(psb_exit);
720 528
721MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others"); 529MODULE_AUTHOR(DRIVER_AUTHOR);
722MODULE_DESCRIPTION(DRIVER_DESC); 530MODULE_DESCRIPTION(DRIVER_DESC);
723MODULE_LICENSE("GPL"); 531MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5ad6a03e477e..55ebe2bd88dd 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -33,6 +33,18 @@
33#include "power.h" 33#include "power.h"
34#include "opregion.h" 34#include "opregion.h"
35#include "oaktrail.h" 35#include "oaktrail.h"
36#include "mmu.h"
37
38#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
39#define DRIVER_LICENSE "GPL"
40
41#define DRIVER_NAME "gma500"
42#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
43#define DRIVER_DATE "20140314"
44
45#define DRIVER_MAJOR 1
46#define DRIVER_MINOR 0
47#define DRIVER_PATCHLEVEL 0
36 48
37/* Append new drm mode definition here, align with libdrm definition */ 49/* Append new drm mode definition here, align with libdrm definition */
38#define DRM_MODE_SCALE_NO_SCALE 2 50#define DRM_MODE_SCALE_NO_SCALE 2
@@ -49,21 +61,7 @@ enum {
49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) 61#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) 62#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 63
52/* 64/* Hardware offsets */
53 * Driver definitions
54 */
55
56#define DRIVER_NAME "gma500"
57#define DRIVER_DESC "DRM driver for the Intel GMA500"
58
59#define PSB_DRM_DRIVER_DATE "2011-06-06"
60#define PSB_DRM_DRIVER_MAJOR 1
61#define PSB_DRM_DRIVER_MINOR 0
62#define PSB_DRM_DRIVER_PATCHLEVEL 0
63
64/*
65 * Hardware offsets
66 */
67#define PSB_VDC_OFFSET 0x00000000 65#define PSB_VDC_OFFSET 0x00000000
68#define PSB_VDC_SIZE 0x000080000 66#define PSB_VDC_SIZE 0x000080000
69#define MRST_MMIO_SIZE 0x0000C0000 67#define MRST_MMIO_SIZE 0x0000C0000
@@ -71,16 +69,14 @@ enum {
71#define PSB_SGX_SIZE 0x8000 69#define PSB_SGX_SIZE 0x8000
72#define PSB_SGX_OFFSET 0x00040000 70#define PSB_SGX_OFFSET 0x00040000
73#define MRST_SGX_OFFSET 0x00080000 71#define MRST_SGX_OFFSET 0x00080000
74/* 72
75 * PCI resource identifiers 73/* PCI resource identifiers */
76 */
77#define PSB_MMIO_RESOURCE 0 74#define PSB_MMIO_RESOURCE 0
78#define PSB_AUX_RESOURCE 0 75#define PSB_AUX_RESOURCE 0
79#define PSB_GATT_RESOURCE 2 76#define PSB_GATT_RESOURCE 2
80#define PSB_GTT_RESOURCE 3 77#define PSB_GTT_RESOURCE 3
81/* 78
82 * PCI configuration 79/* PCI configuration */
83 */
84#define PSB_GMCH_CTRL 0x52 80#define PSB_GMCH_CTRL 0x52
85#define PSB_BSM 0x5C 81#define PSB_BSM 0x5C
86#define _PSB_GMCH_ENABLED 0x4 82#define _PSB_GMCH_ENABLED 0x4
@@ -88,37 +84,29 @@ enum {
88#define _PSB_PGETBL_ENABLED 0x00000001 84#define _PSB_PGETBL_ENABLED 0x00000001
89#define PSB_SGX_2D_SLAVE_PORT 0x4000 85#define PSB_SGX_2D_SLAVE_PORT 0x4000
90 86
91/* To get rid of */ 87/* TODO: To get rid of */
92#define PSB_TT_PRIV0_LIMIT (256*1024*1024) 88#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
93#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) 89#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
94 90
95/* 91/* SGX side MMU definitions (these can probably go) */
96 * SGX side MMU definitions (these can probably go)
97 */
98 92
99/* 93/* Flags for external memory type field */
100 * Flags for external memory type field.
101 */
102#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ 94#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
103#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ 95#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
104#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ 96#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
105/* 97
106 * PTE's and PDE's 98/* PTE's and PDE's */
107 */
108#define PSB_PDE_MASK 0x003FFFFF 99#define PSB_PDE_MASK 0x003FFFFF
109#define PSB_PDE_SHIFT 22 100#define PSB_PDE_SHIFT 22
110#define PSB_PTE_SHIFT 12 101#define PSB_PTE_SHIFT 12
111/* 102
112 * Cache control 103/* Cache control */
113 */
114#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ 104#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
115#define PSB_PTE_WO 0x0002 /* Write only */ 105#define PSB_PTE_WO 0x0002 /* Write only */
116#define PSB_PTE_RO 0x0004 /* Read only */ 106#define PSB_PTE_RO 0x0004 /* Read only */
117#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ 107#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
118 108
119/* 109/* VDC registers and bits */
120 * VDC registers and bits
121 */
122#define PSB_MSVDX_CLOCKGATING 0x2064 110#define PSB_MSVDX_CLOCKGATING 0x2064
123#define PSB_TOPAZ_CLOCKGATING 0x2068 111#define PSB_TOPAZ_CLOCKGATING 0x2068
124#define PSB_HWSTAM 0x2098 112#define PSB_HWSTAM 0x2098
@@ -265,6 +253,7 @@ struct psb_intel_opregion {
265 struct opregion_asle *asle; 253 struct opregion_asle *asle;
266 void *vbt; 254 void *vbt;
267 u32 __iomem *lid_state; 255 u32 __iomem *lid_state;
256 struct work_struct asle_work;
268}; 257};
269 258
270struct sdvo_device_mapping { 259struct sdvo_device_mapping {
@@ -283,10 +272,7 @@ struct intel_gmbus {
283 u32 reg0; 272 u32 reg0;
284}; 273};
285 274
286/* 275/* Register offset maps */
287 * Register offset maps
288 */
289
290struct psb_offset { 276struct psb_offset {
291 u32 fp0; 277 u32 fp0;
292 u32 fp1; 278 u32 fp1;
@@ -320,9 +306,7 @@ struct psb_offset {
320 * update the register cache instead. 306 * update the register cache instead.
321 */ 307 */
322 308
323/* 309/* Common status for pipes */
324 * Common status for pipes.
325 */
326struct psb_pipe { 310struct psb_pipe {
327 u32 fp0; 311 u32 fp0;
328 u32 fp1; 312 u32 fp1;
@@ -482,35 +466,24 @@ struct drm_psb_private {
482 struct psb_mmu_driver *mmu; 466 struct psb_mmu_driver *mmu;
483 struct psb_mmu_pd *pf_pd; 467 struct psb_mmu_pd *pf_pd;
484 468
485 /* 469 /* Register base */
486 * Register base
487 */
488
489 uint8_t __iomem *sgx_reg; 470 uint8_t __iomem *sgx_reg;
490 uint8_t __iomem *vdc_reg; 471 uint8_t __iomem *vdc_reg;
491 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */ 472 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
492 uint32_t gatt_free_offset; 473 uint32_t gatt_free_offset;
493 474
494 /* 475 /* Fencing / irq */
495 * Fencing / irq.
496 */
497
498 uint32_t vdc_irq_mask; 476 uint32_t vdc_irq_mask;
499 uint32_t pipestat[PSB_NUM_PIPE]; 477 uint32_t pipestat[PSB_NUM_PIPE];
500 478
501 spinlock_t irqmask_lock; 479 spinlock_t irqmask_lock;
502 480
503 /* 481 /* Power */
504 * Power
505 */
506
507 bool suspended; 482 bool suspended;
508 bool display_power; 483 bool display_power;
509 int display_count; 484 int display_count;
510 485
511 /* 486 /* Modesetting */
512 * Modesetting
513 */
514 struct psb_intel_mode_device mode_dev; 487 struct psb_intel_mode_device mode_dev;
515 bool modeset; /* true if we have done the mode_device setup */ 488 bool modeset; /* true if we have done the mode_device setup */
516 489
@@ -518,15 +491,10 @@ struct drm_psb_private {
518 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE]; 491 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
519 uint32_t num_pipe; 492 uint32_t num_pipe;
520 493
521 /* 494 /* OSPM info (Power management base) (TODO: can go ?) */
522 * OSPM info (Power management base) (can go ?)
523 */
524 uint32_t ospm_base; 495 uint32_t ospm_base;
525 496
526 /* 497 /* Sizes info */
527 * Sizes info
528 */
529
530 u32 fuse_reg_value; 498 u32 fuse_reg_value;
531 u32 video_device_fuse; 499 u32 video_device_fuse;
532 500
@@ -546,9 +514,7 @@ struct drm_psb_private {
546 struct drm_property *broadcast_rgb_property; 514 struct drm_property *broadcast_rgb_property;
547 struct drm_property *force_audio_property; 515 struct drm_property *force_audio_property;
548 516
549 /* 517 /* LVDS info */
550 * LVDS info
551 */
552 int backlight_duty_cycle; /* restore backlight to this value */ 518 int backlight_duty_cycle; /* restore backlight to this value */
553 bool panel_wants_dither; 519 bool panel_wants_dither;
554 struct drm_display_mode *panel_fixed_mode; 520 struct drm_display_mode *panel_fixed_mode;
@@ -582,34 +548,23 @@ struct drm_psb_private {
582 /* Oaktrail HDMI state */ 548 /* Oaktrail HDMI state */
583 struct oaktrail_hdmi_dev *hdmi_priv; 549 struct oaktrail_hdmi_dev *hdmi_priv;
584 550
585 /* 551 /* Register state */
586 * Register state
587 */
588
589 struct psb_save_area regs; 552 struct psb_save_area regs;
590 553
591 /* MSI reg save */ 554 /* MSI reg save */
592 uint32_t msi_addr; 555 uint32_t msi_addr;
593 uint32_t msi_data; 556 uint32_t msi_data;
594 557
595 /* 558 /* Hotplug handling */
596 * Hotplug handling
597 */
598
599 struct work_struct hotplug_work; 559 struct work_struct hotplug_work;
600 560
601 /* 561 /* LID-Switch */
602 * LID-Switch
603 */
604 spinlock_t lid_lock; 562 spinlock_t lid_lock;
605 struct timer_list lid_timer; 563 struct timer_list lid_timer;
606 struct psb_intel_opregion opregion; 564 struct psb_intel_opregion opregion;
607 u32 lid_last_state; 565 u32 lid_last_state;
608 566
609 /* 567 /* Watchdog */
610 * Watchdog
611 */
612
613 uint32_t apm_reg; 568 uint32_t apm_reg;
614 uint16_t apm_base; 569 uint16_t apm_base;
615 570
@@ -629,9 +584,7 @@ struct drm_psb_private {
629 /* 2D acceleration */ 584 /* 2D acceleration */
630 spinlock_t lock_2d; 585 spinlock_t lock_2d;
631 586
632 /* 587 /* Panel brightness */
633 * Panel brightness
634 */
635 int brightness; 588 int brightness;
636 int brightness_adjusted; 589 int brightness_adjusted;
637 590
@@ -664,10 +617,7 @@ struct drm_psb_private {
664}; 617};
665 618
666 619
667/* 620/* Operations for each board type */
668 * Operations for each board type
669 */
670
671struct psb_ops { 621struct psb_ops {
672 const char *name; 622 const char *name;
673 unsigned int accel_2d:1; 623 unsigned int accel_2d:1;
@@ -713,8 +663,6 @@ struct psb_ops {
713 663
714 664
715 665
716struct psb_mmu_driver;
717
718extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); 666extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
719extern int drm_pick_crtcs(struct drm_device *dev); 667extern int drm_pick_crtcs(struct drm_device *dev);
720 668
@@ -723,52 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
723 return (struct drm_psb_private *) dev->dev_private; 671 return (struct drm_psb_private *) dev->dev_private;
724} 672}
725 673
726/* 674/* psb_irq.c */
727 * MMU stuff.
728 */
729
730extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
731 int trap_pagefaults,
732 int invalid_type,
733 struct drm_psb_private *dev_priv);
734extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
735extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
736 *driver);
737extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
738 uint32_t gtt_start, uint32_t gtt_pages);
739extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
740 int trap_pagefaults,
741 int invalid_type);
742extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
743extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
744extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
745 unsigned long address,
746 uint32_t num_pages);
747extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
748 uint32_t start_pfn,
749 unsigned long address,
750 uint32_t num_pages, int type);
751extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
752 unsigned long *pfn);
753
754/*
755 * Enable / disable MMU for different requestors.
756 */
757
758
759extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
760extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
761 unsigned long address, uint32_t num_pages,
762 uint32_t desired_tile_stride,
763 uint32_t hw_tile_stride, int type);
764extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
765 unsigned long address, uint32_t num_pages,
766 uint32_t desired_tile_stride,
767 uint32_t hw_tile_stride);
768/*
769 *psb_irq.c
770 */
771
772extern irqreturn_t psb_irq_handler(int irq, void *arg); 675extern irqreturn_t psb_irq_handler(int irq, void *arg);
773extern int psb_irq_enable_dpst(struct drm_device *dev); 676extern int psb_irq_enable_dpst(struct drm_device *dev);
774extern int psb_irq_disable_dpst(struct drm_device *dev); 677extern int psb_irq_disable_dpst(struct drm_device *dev);
@@ -791,24 +694,17 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
791 694
792extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); 695extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
793 696
794/* 697/* framebuffer.c */
795 * framebuffer.c
796 */
797extern int psbfb_probed(struct drm_device *dev); 698extern int psbfb_probed(struct drm_device *dev);
798extern int psbfb_remove(struct drm_device *dev, 699extern int psbfb_remove(struct drm_device *dev,
799 struct drm_framebuffer *fb); 700 struct drm_framebuffer *fb);
800/* 701/* accel_2d.c */
801 * accel_2d.c
802 */
803extern void psbfb_copyarea(struct fb_info *info, 702extern void psbfb_copyarea(struct fb_info *info,
804 const struct fb_copyarea *region); 703 const struct fb_copyarea *region);
805extern int psbfb_sync(struct fb_info *info); 704extern int psbfb_sync(struct fb_info *info);
806extern void psb_spank(struct drm_psb_private *dev_priv); 705extern void psb_spank(struct drm_psb_private *dev_priv);
807 706
808/* 707/* psb_reset.c */
809 * psb_reset.c
810 */
811
812extern void psb_lid_timer_init(struct drm_psb_private *dev_priv); 708extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
813extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); 709extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
814extern void psb_print_pagefault(struct drm_psb_private *dev_priv); 710extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
@@ -867,9 +763,7 @@ extern const struct psb_ops mdfld_chip_ops;
867/* cdv_device.c */ 763/* cdv_device.c */
868extern const struct psb_ops cdv_chip_ops; 764extern const struct psb_ops cdv_chip_ops;
869 765
870/* 766/* Debug print bits setting */
871 * Debug print bits setting
872 */
873#define PSB_D_GENERAL (1 << 0) 767#define PSB_D_GENERAL (1 << 0)
874#define PSB_D_INIT (1 << 1) 768#define PSB_D_INIT (1 << 1)
875#define PSB_D_IRQ (1 << 2) 769#define PSB_D_IRQ (1 << 2)
@@ -885,10 +779,7 @@ extern const struct psb_ops cdv_chip_ops;
885 779
886extern int drm_idle_check_interval; 780extern int drm_idle_check_interval;
887 781
888/* 782/* Utilities */
889 * Utilities
890 */
891
892static inline u32 MRST_MSG_READ32(uint port, uint offset) 783static inline u32 MRST_MSG_READ32(uint port, uint offset)
893{ 784{
894 int mcr = (0xD0<<24) | (port << 16) | (offset << 8); 785 int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index c8841ac6c8f1..21aed85eb96e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -469,7 +469,8 @@ static void psb_intel_cursor_init(struct drm_device *dev,
469 /* Allocate 4 pages of stolen mem for a hardware cursor. That 469 /* Allocate 4 pages of stolen mem for a hardware cursor. That
470 * is enough for the 64 x 64 ARGB cursors we support. 470 * is enough for the 64 x 64 ARGB cursors we support.
471 */ 471 */
472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); 472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
473 PAGE_SIZE);
473 if (!cursor_gt) { 474 if (!cursor_gt) {
474 gma_crtc->cursor_gt = NULL; 475 gma_crtc->cursor_gt = NULL;
475 goto out; 476 goto out;
@@ -554,33 +555,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
554 gma_crtc->active = true; 555 gma_crtc->active = true;
555} 556}
556 557
557int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
558 struct drm_file *file_priv)
559{
560 struct drm_psb_private *dev_priv = dev->dev_private;
561 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
562 struct drm_mode_object *drmmode_obj;
563 struct gma_crtc *crtc;
564
565 if (!dev_priv) {
566 dev_err(dev->dev, "called with no initialization\n");
567 return -EINVAL;
568 }
569
570 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
571 DRM_MODE_OBJECT_CRTC);
572
573 if (!drmmode_obj) {
574 dev_err(dev->dev, "no such CRTC id\n");
575 return -ENOENT;
576 }
577
578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
579 pipe_from_crtc_id->pipe = crtc->pipe;
580
581 return 0;
582}
583
584struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) 558struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
585{ 559{
586 struct drm_crtc *crtc = NULL; 560 struct drm_crtc *crtc = NULL;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index dc2c8eb030fa..336bd3aa1a06 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -238,8 +238,6 @@ static inline struct gma_encoder *gma_attached_encoder(
238 238
239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
240 struct drm_crtc *crtc); 240 struct drm_crtc *crtc);
241extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
242 struct drm_file *file_priv);
243extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 241extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
244 int pipe); 242 int pipe);
245extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, 243extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index f883f9e4c524..624eb36511c5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,11 +200,64 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
200 mid_pipe_event_handler(dev, 1); 200 mid_pipe_event_handler(dev, 1);
201} 201}
202 202
203/*
204 * SGX interrupt handler
205 */
206static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
207{
208 struct drm_psb_private *dev_priv = dev->dev_private;
209 u32 val, addr;
210 int error = false;
211
212 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
213 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
214
215 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
216 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
217 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
218 if (val) {
219 if (val & _PSB_CBI_STAT_PF_N_RW)
220 DRM_ERROR("SGX MMU page fault:");
221 else
222 DRM_ERROR("SGX MMU read / write protection fault:");
223
224 if (val & _PSB_CBI_STAT_FAULT_CACHE)
225 DRM_ERROR("\tCache requestor");
226 if (val & _PSB_CBI_STAT_FAULT_TA)
227 DRM_ERROR("\tTA requestor");
228 if (val & _PSB_CBI_STAT_FAULT_VDM)
229 DRM_ERROR("\tVDM requestor");
230 if (val & _PSB_CBI_STAT_FAULT_2D)
231 DRM_ERROR("\t2D requestor");
232 if (val & _PSB_CBI_STAT_FAULT_PBE)
233 DRM_ERROR("\tPBE requestor");
234 if (val & _PSB_CBI_STAT_FAULT_TSP)
235 DRM_ERROR("\tTSP requestor");
236 if (val & _PSB_CBI_STAT_FAULT_ISP)
237 DRM_ERROR("\tISP requestor");
238 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
239 DRM_ERROR("\tUSSEPDS requestor");
240 if (val & _PSB_CBI_STAT_FAULT_HOST)
241 DRM_ERROR("\tHost requestor");
242
243 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
244 (unsigned int)addr);
245 error = true;
246 }
247 }
248
249 /* Clear bits */
250 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
251 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
252 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
253}
254
203irqreturn_t psb_irq_handler(int irq, void *arg) 255irqreturn_t psb_irq_handler(int irq, void *arg)
204{ 256{
205 struct drm_device *dev = arg; 257 struct drm_device *dev = arg;
206 struct drm_psb_private *dev_priv = dev->dev_private; 258 struct drm_psb_private *dev_priv = dev->dev_private;
207 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0; 259 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
260 u32 sgx_stat_1, sgx_stat_2;
208 int handled = 0; 261 int handled = 0;
209 262
210 spin_lock(&dev_priv->irqmask_lock); 263 spin_lock(&dev_priv->irqmask_lock);
@@ -233,14 +286,9 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
233 } 286 }
234 287
235 if (sgx_int) { 288 if (sgx_int) {
236 /* Not expected - we have it masked, shut it up */ 289 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
237 u32 s, s2; 290 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
238 s = PSB_RSGX32(PSB_CR_EVENT_STATUS); 291 psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
239 s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
240 PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
241 PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
242 /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
243 we may as well poll even if we add that ! */
244 handled = 1; 292 handled = 1;
245 } 293 }
246 294
@@ -269,8 +317,13 @@ void psb_irq_preinstall(struct drm_device *dev)
269 317
270 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 318 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
271 319
272 if (gma_power_is_on(dev)) 320 if (gma_power_is_on(dev)) {
273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 321 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
322 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
323 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
324 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
325 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
326 }
274 if (dev->vblank[0].enabled) 327 if (dev->vblank[0].enabled)
275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 328 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
276 if (dev->vblank[1].enabled) 329 if (dev->vblank[1].enabled)
@@ -286,7 +339,7 @@ void psb_irq_preinstall(struct drm_device *dev)
286 /* Revisit this area - want per device masks ? */ 339 /* Revisit this area - want per device masks ? */
287 if (dev_priv->ops->hotplug) 340 if (dev_priv->ops->hotplug)
288 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; 341 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
289 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE; 342 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
290 343
291 /* This register is safe even if display island is off */ 344 /* This register is safe even if display island is off */
292 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 345 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
@@ -295,12 +348,16 @@ void psb_irq_preinstall(struct drm_device *dev)
295 348
296int psb_irq_postinstall(struct drm_device *dev) 349int psb_irq_postinstall(struct drm_device *dev)
297{ 350{
298 struct drm_psb_private *dev_priv = 351 struct drm_psb_private *dev_priv = dev->dev_private;
299 (struct drm_psb_private *) dev->dev_private;
300 unsigned long irqflags; 352 unsigned long irqflags;
301 353
302 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 354 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
303 355
356 /* Enable 2D and MMU fault interrupts */
357 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
358 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
359 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
360
304 /* This register is safe even if display island is off */ 361 /* This register is safe even if display island is off */
305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 362 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 363 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
index 884613ee00ad..87ac5e6ca551 100644
--- a/include/drm/gma_drm.h
+++ b/include/drm/gma_drm.h
@@ -19,73 +19,7 @@
19 * 19 *
20 **************************************************************************/ 20 **************************************************************************/
21 21
22#ifndef _PSB_DRM_H_ 22#ifndef _GMA_DRM_H_
23#define _PSB_DRM_H_ 23#define _GMA_DRM_H_
24
25/*
26 * Manage the LUT for an output
27 */
28struct drm_psb_dpst_lut_arg {
29 uint8_t lut[256];
30 int output_id;
31};
32
33/*
34 * Validate modes
35 */
36struct drm_psb_mode_operation_arg {
37 u32 obj_id;
38 u16 operation;
39 struct drm_mode_modeinfo mode;
40 u64 data;
41};
42
43/*
44 * Query the stolen memory for smarter management of
45 * memory by the server
46 */
47struct drm_psb_stolen_memory_arg {
48 u32 base;
49 u32 size;
50};
51
52struct drm_psb_get_pipe_from_crtc_id_arg {
53 /** ID of CRTC being requested **/
54 u32 crtc_id;
55 /** pipe of requested CRTC **/
56 u32 pipe;
57};
58
59struct drm_psb_gem_create {
60 __u64 size;
61 __u32 handle;
62 __u32 flags;
63#define GMA_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
64};
65
66struct drm_psb_gem_mmap {
67 __u32 handle;
68 __u32 pad;
69 /**
70 * Fake offset to use for subsequent mmap call
71 *
72 * This is a fixed-size type for 32/64 compatibility.
73 */
74 __u64 offset;
75};
76
77/* Controlling the kernel modesetting buffers */
78
79#define DRM_GMA_GEM_CREATE 0x00 /* Create a GEM object */
80#define DRM_GMA_GEM_MMAP 0x01 /* Map GEM memory */
81#define DRM_GMA_STOLEN_MEMORY 0x02 /* Report stolen memory */
82#define DRM_GMA_2D_OP 0x03 /* Will be merged later */
83#define DRM_GMA_GAMMA 0x04 /* Set gamma table */
84#define DRM_GMA_ADB 0x05 /* Get backlight */
85#define DRM_GMA_DPST_BL 0x06 /* Set backlight */
86#define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */
87#define PSB_MODE_OPERATION_MODE_VALID 0x01
88#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x08 /* CRTC to physical pipe# */
89
90 24
91#endif 25#endif