diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-29 23:49:12 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-29 23:49:12 -0500 |
| commit | 9b0cd304f26b9fca140de15deeac2bf357d1f388 (patch) | |
| tree | 03a0d74614865a5b776b2a98a433232013b1d369 | |
| parent | ca2a650f3dfdc30d71d21bcbb04d2d057779f3f9 (diff) | |
| parent | ef64cf9d06049e4e9df661f3be60b217e476bee1 (diff) | |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Been a bit busy, first week of kids school, and waiting on other trees
to go in before I could send this, so its a bit later than I'd
normally like.
Highlights:
- core:
timestamp fixes, lots of misc cleanups
- new drivers:
bochs virtual vga
- vmwgfx:
major overhaul for their nextgen virt gpu.
- i915:
runtime D3 on HSW, watermark fixes, power well work, fbc fixes,
bdw is no longer prelim.
- nouveau:
gk110/208 acceleration, more pm groundwork, old overlay support
- radeon:
dpm rework and clockgating for CIK, pci config reset, big endian
fixes
- tegra:
panel support and DSI support, build as module, prime.
- armada, omap, gma500, rcar, exynos, mgag200, cirrus, ast:
fixes
- msm:
hdmi support for mdp5"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (595 commits)
drm/nouveau: resume display if any later suspend bits fail
drm/nouveau: fix lock unbalance in nouveau_crtc_page_flip
drm/nouveau: implement hooks for needed for drm vblank timestamping support
drm/nouveau/disp: add a method to fetch info needed by drm vblank timestamping
drm/nv50: fill in crtc mode struct members from crtc_mode_fixup
drm/radeon/dce8: workaround for atom BlankCrtc table
drm/radeon/DCE4+: clear bios scratch dpms bit (v2)
drm/radeon: set si_notify_smc_display_change properly
drm/radeon: fix DAC interrupt handling on DCE5+
drm/radeon: clean up active vram sizing
drm/radeon: skip async dma init on r6xx
drm/radeon/runpm: don't runtime suspend non-PX cards
drm/radeon: add ring to fence trace functions
drm/radeon: add missing trace point
drm/radeon: fix VMID use tracking
drm: ast,cirrus,mgag200: use drm_can_sleep
drm/gma500: Lock struct_mutex around cursor updates
drm/i915: Fix the offset issue for the stolen GEM objects
DRM: armada: fix missing DRM_KMS_FB_HELPER select
drm/i915: Decouple GPU error reporting from ring initialisation
...
512 files changed, 35535 insertions, 11882 deletions
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt index ab45c02aa658..efaeec8961b6 100644 --- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt +++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt | |||
| @@ -118,6 +118,9 @@ of the following host1x client modules: | |||
| 118 | See ../reset/reset.txt for details. | 118 | See ../reset/reset.txt for details. |
| 119 | - reset-names: Must include the following entries: | 119 | - reset-names: Must include the following entries: |
| 120 | - dc | 120 | - dc |
| 121 | - nvidia,head: The number of the display controller head. This is used to | ||
| 122 | setup the various types of output to receive video data from the given | ||
| 123 | head. | ||
| 121 | 124 | ||
| 122 | Each display controller node has a child node, named "rgb", that represents | 125 | Each display controller node has a child node, named "rgb", that represents |
| 123 | the RGB output associated with the controller. It can take the following | 126 | the RGB output associated with the controller. It can take the following |
| @@ -125,6 +128,7 @@ of the following host1x client modules: | |||
| 125 | - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing | 128 | - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing |
| 126 | - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection | 129 | - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection |
| 127 | - nvidia,edid: supplies a binary EDID blob | 130 | - nvidia,edid: supplies a binary EDID blob |
| 131 | - nvidia,panel: phandle of a display panel | ||
| 128 | 132 | ||
| 129 | - hdmi: High Definition Multimedia Interface | 133 | - hdmi: High Definition Multimedia Interface |
| 130 | 134 | ||
| @@ -149,6 +153,7 @@ of the following host1x client modules: | |||
| 149 | - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing | 153 | - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing |
| 150 | - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection | 154 | - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection |
| 151 | - nvidia,edid: supplies a binary EDID blob | 155 | - nvidia,edid: supplies a binary EDID blob |
| 156 | - nvidia,panel: phandle of a display panel | ||
| 152 | 157 | ||
| 153 | - tvo: TV encoder output | 158 | - tvo: TV encoder output |
| 154 | 159 | ||
| @@ -169,11 +174,21 @@ of the following host1x client modules: | |||
| 169 | - clock-names: Must include the following entries: | 174 | - clock-names: Must include the following entries: |
| 170 | - dsi | 175 | - dsi |
| 171 | This MUST be the first entry. | 176 | This MUST be the first entry. |
| 177 | - lp | ||
| 172 | - parent | 178 | - parent |
| 173 | - resets: Must contain an entry for each entry in reset-names. | 179 | - resets: Must contain an entry for each entry in reset-names. |
| 174 | See ../reset/reset.txt for details. | 180 | See ../reset/reset.txt for details. |
| 175 | - reset-names: Must include the following entries: | 181 | - reset-names: Must include the following entries: |
| 176 | - dsi | 182 | - dsi |
| 183 | - nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying | ||
| 184 | which pads are used by this DSI output and need to be calibrated. See also | ||
| 185 | ../mipi/nvidia,tegra114-mipi.txt. | ||
| 186 | |||
| 187 | Optional properties: | ||
| 188 | - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing | ||
| 189 | - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection | ||
| 190 | - nvidia,edid: supplies a binary EDID blob | ||
| 191 | - nvidia,panel: phandle of a display panel | ||
| 177 | 192 | ||
| 178 | Example: | 193 | Example: |
| 179 | 194 | ||
| @@ -253,7 +268,7 @@ Example: | |||
| 253 | interrupts = <0 73 0x04>; | 268 | interrupts = <0 73 0x04>; |
| 254 | clocks = <&tegra_car TEGRA20_CLK_DISP1>, | 269 | clocks = <&tegra_car TEGRA20_CLK_DISP1>, |
| 255 | <&tegra_car TEGRA20_CLK_PLL_P>; | 270 | <&tegra_car TEGRA20_CLK_PLL_P>; |
| 256 | clock-names = "disp1", "parent"; | 271 | clock-names = "dc", "parent"; |
| 257 | resets = <&tegra_car 27>; | 272 | resets = <&tegra_car 27>; |
| 258 | reset-names = "dc"; | 273 | reset-names = "dc"; |
| 259 | 274 | ||
| @@ -268,7 +283,7 @@ Example: | |||
| 268 | interrupts = <0 74 0x04>; | 283 | interrupts = <0 74 0x04>; |
| 269 | clocks = <&tegra_car TEGRA20_CLK_DISP2>, | 284 | clocks = <&tegra_car TEGRA20_CLK_DISP2>, |
| 270 | <&tegra_car TEGRA20_CLK_PLL_P>; | 285 | <&tegra_car TEGRA20_CLK_PLL_P>; |
| 271 | clock-names = "disp2", "parent"; | 286 | clock-names = "dc", "parent"; |
| 272 | resets = <&tegra_car 26>; | 287 | resets = <&tegra_car 26>; |
| 273 | reset-names = "dc"; | 288 | reset-names = "dc"; |
| 274 | 289 | ||
diff --git a/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt b/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt new file mode 100644 index 000000000000..973c27273772 --- /dev/null +++ b/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | MIPI DSI (Display Serial Interface) busses | ||
| 2 | ========================================== | ||
| 3 | |||
| 4 | The MIPI Display Serial Interface specifies a serial bus and a protocol for | ||
| 5 | communication between a host and up to four peripherals. This document will | ||
| 6 | define the syntax used to represent a DSI bus in a device tree. | ||
| 7 | |||
| 8 | This document describes DSI bus-specific properties only or defines existing | ||
| 9 | standard properties in the context of the DSI bus. | ||
| 10 | |||
| 11 | Each DSI host provides a DSI bus. The DSI host controller's node contains a | ||
| 12 | set of properties that characterize the bus. Child nodes describe individual | ||
| 13 | peripherals on that bus. | ||
| 14 | |||
| 15 | The following assumes that only a single peripheral is connected to a DSI | ||
| 16 | host. Experience shows that this is true for the large majority of setups. | ||
| 17 | |||
| 18 | DSI host | ||
| 19 | -------- | ||
| 20 | |||
| 21 | In addition to the standard properties and those defined by the parent bus of | ||
| 22 | a DSI host, the following properties apply to a node representing a DSI host. | ||
| 23 | |||
| 24 | Required properties: | ||
| 25 | - #address-cells: The number of cells required to represent an address on the | ||
| 26 | bus. DSI peripherals are addressed using a 2-bit virtual channel number, so | ||
| 27 | a maximum of 4 devices can be addressed on a single bus. Hence the value of | ||
| 28 | this property should be 1. | ||
| 29 | - #size-cells: Should be 0. There are cases where it makes sense to use a | ||
| 30 | different value here. See below. | ||
| 31 | |||
| 32 | DSI peripheral | ||
| 33 | -------------- | ||
| 34 | |||
| 35 | Peripherals are represented as child nodes of the DSI host's node. Properties | ||
| 36 | described here apply to all DSI peripherals, but individual bindings may want | ||
| 37 | to define additional, device-specific properties. | ||
| 38 | |||
| 39 | Required properties: | ||
| 40 | - reg: The virtual channel number of a DSI peripheral. Must be in the range | ||
| 41 | from 0 to 3. | ||
| 42 | |||
| 43 | Some DSI peripherals respond to more than a single virtual channel. In that | ||
| 44 | case two alternative representations can be chosen: | ||
| 45 | - The reg property can take multiple entries, one for each virtual channel | ||
| 46 | that the peripheral responds to. | ||
| 47 | - If the virtual channels that a peripheral responds to are consecutive, the | ||
| 48 | #size-cells can be set to 1. The first cell of each entry in the reg | ||
| 49 | property is the number of the first virtual channel and the second cell is | ||
| 50 | the number of consecutive virtual channels. | ||
| 51 | |||
| 52 | Example | ||
| 53 | ------- | ||
| 54 | |||
| 55 | dsi-host { | ||
| 56 | ... | ||
| 57 | |||
| 58 | #address-cells = <1>; | ||
| 59 | #size-cells = <0>; | ||
| 60 | |||
| 61 | /* peripheral responds to virtual channel 0 */ | ||
| 62 | peripheral@0 { | ||
| 63 | compatible = "..."; | ||
| 64 | reg = <0>; | ||
| 65 | }; | ||
| 66 | |||
| 67 | ... | ||
| 68 | }; | ||
| 69 | |||
| 70 | dsi-host { | ||
| 71 | ... | ||
| 72 | |||
| 73 | #address-cells = <1>; | ||
| 74 | #size-cells = <0>; | ||
| 75 | |||
| 76 | /* peripheral responds to virtual channels 0 and 2 */ | ||
| 77 | peripheral@0 { | ||
| 78 | compatible = "..."; | ||
| 79 | reg = <0, 2>; | ||
| 80 | }; | ||
| 81 | |||
| 82 | ... | ||
| 83 | }; | ||
| 84 | |||
| 85 | dsi-host { | ||
| 86 | ... | ||
| 87 | |||
| 88 | #address-cells = <1>; | ||
| 89 | #size-cells = <1>; | ||
| 90 | |||
| 91 | /* peripheral responds to virtual channels 1, 2 and 3 */ | ||
| 92 | peripheral@1 { | ||
| 93 | compatible = "..."; | ||
| 94 | reg = <1 3>; | ||
| 95 | }; | ||
| 96 | |||
| 97 | ... | ||
| 98 | }; | ||
diff --git a/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt b/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt new file mode 100644 index 000000000000..e4a25cedc5cf --- /dev/null +++ b/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | NVIDIA Tegra MIPI pad calibration controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: "nvidia,tegra<chip>-mipi" | ||
| 5 | - reg: Physical base address and length of the controller's registers. | ||
| 6 | - clocks: Must contain an entry for each entry in clock-names. | ||
| 7 | See ../clocks/clock-bindings.txt for details. | ||
| 8 | - clock-names: Must include the following entries: | ||
| 9 | - mipi-cal | ||
| 10 | - #nvidia,mipi-calibrate-cells: Should be 1. The cell is a bitmask of the pads | ||
| 11 | that need to be calibrated for a given device. | ||
| 12 | |||
| 13 | User nodes need to contain an nvidia,mipi-calibrate property that has a | ||
| 14 | phandle to refer to the calibration controller node and a bitmask of the pads | ||
| 15 | that need to be calibrated. | ||
| 16 | |||
| 17 | Example: | ||
| 18 | |||
| 19 | mipi: mipi@700e3000 { | ||
| 20 | compatible = "nvidia,tegra114-mipi"; | ||
| 21 | reg = <0x700e3000 0x100>; | ||
| 22 | clocks = <&tegra_car TEGRA114_CLK_MIPI_CAL>; | ||
| 23 | clock-names = "mipi-cal"; | ||
| 24 | #nvidia,mipi-calibrate-cells = <1>; | ||
| 25 | }; | ||
| 26 | |||
| 27 | ... | ||
| 28 | |||
| 29 | host1x@50000000 { | ||
| 30 | ... | ||
| 31 | |||
| 32 | dsi@54300000 { | ||
| 33 | ... | ||
| 34 | |||
| 35 | nvidia,mipi-calibrate = <&mipi 0x060>; | ||
| 36 | |||
| 37 | ... | ||
| 38 | }; | ||
| 39 | |||
| 40 | ... | ||
| 41 | }; | ||
diff --git a/Documentation/devicetree/bindings/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt new file mode 100644 index 000000000000..72e088a4fb3a --- /dev/null +++ b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | AU Optronics Corporation 10.1" WSVGA TFT LCD panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "auo,b101aw03" | ||
| 5 | |||
| 6 | This binding is compatible with the simple-panel binding, which is specified | ||
| 7 | in simple-panel.txt in this directory. | ||
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt new file mode 100644 index 000000000000..f24614e4d5ec --- /dev/null +++ b/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "chunghwa,claa101wa01a" | ||
| 5 | |||
| 6 | This binding is compatible with the simple-panel binding, which is specified | ||
| 7 | in simple-panel.txt in this directory. | ||
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt new file mode 100644 index 000000000000..0ab2c05a4c22 --- /dev/null +++ b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "chunghwa,claa101wb03" | ||
| 5 | |||
| 6 | This binding is compatible with the simple-panel binding, which is specified | ||
| 7 | in simple-panel.txt in this directory. | ||
diff --git a/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt new file mode 100644 index 000000000000..d328b0341bf4 --- /dev/null +++ b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | Panasonic Corporation 10.1" WUXGA TFT LCD panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "panasonic,vvx10f004b00" | ||
| 5 | |||
| 6 | This binding is compatible with the simple-panel binding, which is specified | ||
| 7 | in simple-panel.txt in this directory. | ||
diff --git a/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt new file mode 100644 index 000000000000..ef522c6bb85f --- /dev/null +++ b/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | Samsung Electronics 10.1" WSVGA TFT LCD panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "samsung,ltn101nt05" | ||
| 5 | |||
| 6 | This binding is compatible with the simple-panel binding, which is specified | ||
| 7 | in simple-panel.txt in this directory. | ||
diff --git a/Documentation/devicetree/bindings/panel/simple-panel.txt b/Documentation/devicetree/bindings/panel/simple-panel.txt new file mode 100644 index 000000000000..1341bbf4aa3d --- /dev/null +++ b/Documentation/devicetree/bindings/panel/simple-panel.txt | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | Simple display panel | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - power-supply: regulator to provide the supply voltage | ||
| 5 | |||
| 6 | Optional properties: | ||
| 7 | - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing | ||
| 8 | - enable-gpios: GPIO pin to enable or disable the panel | ||
| 9 | - backlight: phandle of the backlight device attached to the panel | ||
| 10 | |||
| 11 | Example: | ||
| 12 | |||
| 13 | panel: panel { | ||
| 14 | compatible = "cptt,claa101wb01"; | ||
| 15 | ddc-i2c-bus = <&panelddc>; | ||
| 16 | |||
| 17 | power-supply = <&vdd_pnl_reg>; | ||
| 18 | enable-gpios = <&gpio 90 0>; | ||
| 19 | |||
| 20 | backlight = <&backlight>; | ||
| 21 | }; | ||
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 290fe5b7fd32..a324f9303e36 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
| @@ -49,7 +49,7 @@ obj-$(CONFIG_GPIO_TB0219) += tb0219.o | |||
| 49 | obj-$(CONFIG_TELCLOCK) += tlclk.o | 49 | obj-$(CONFIG_TELCLOCK) += tlclk.o |
| 50 | 50 | ||
| 51 | obj-$(CONFIG_MWAVE) += mwave/ | 51 | obj-$(CONFIG_MWAVE) += mwave/ |
| 52 | obj-$(CONFIG_AGP) += agp/ | 52 | obj-y += agp/ |
| 53 | obj-$(CONFIG_PCMCIA) += pcmcia/ | 53 | obj-$(CONFIG_PCMCIA) += pcmcia/ |
| 54 | 54 | ||
| 55 | obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o | 55 | obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o |
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index d8b1b576556c..c528f96ee204 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig | |||
| @@ -68,6 +68,7 @@ config AGP_AMD64 | |||
| 68 | config AGP_INTEL | 68 | config AGP_INTEL |
| 69 | tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" | 69 | tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" |
| 70 | depends on AGP && X86 | 70 | depends on AGP && X86 |
| 71 | select INTEL_GTT | ||
| 71 | help | 72 | help |
| 72 | This option gives you AGP support for the GLX component of X | 73 | This option gives you AGP support for the GLX component of X |
| 73 | on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, | 74 | on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, |
| @@ -155,3 +156,7 @@ config AGP_SGI_TIOCA | |||
| 155 | This option gives you AGP GART support for the SGI TIO chipset | 156 | This option gives you AGP GART support for the SGI TIO chipset |
| 156 | for IA64 processors. | 157 | for IA64 processors. |
| 157 | 158 | ||
| 159 | config INTEL_GTT | ||
| 160 | tristate | ||
| 161 | depends on X86 && PCI | ||
| 162 | |||
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 8eb56e273e75..604489bcdbf9 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile | |||
| @@ -13,7 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o | |||
| 13 | obj-$(CONFIG_AGP_PARISC) += parisc-agp.o | 13 | obj-$(CONFIG_AGP_PARISC) += parisc-agp.o |
| 14 | obj-$(CONFIG_AGP_I460) += i460-agp.o | 14 | obj-$(CONFIG_AGP_I460) += i460-agp.o |
| 15 | obj-$(CONFIG_AGP_INTEL) += intel-agp.o | 15 | obj-$(CONFIG_AGP_INTEL) += intel-agp.o |
| 16 | obj-$(CONFIG_AGP_INTEL) += intel-gtt.o | 16 | obj-$(CONFIG_INTEL_GTT) += intel-gtt.o |
| 17 | obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o | 17 | obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o |
| 18 | obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o | 18 | obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o |
| 19 | obj-$(CONFIG_AGP_SIS) += sis-agp.o | 19 | obj-$(CONFIG_AGP_SIS) += sis-agp.o |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index a7c276585a9f..f9b9ca5d31b7 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
| @@ -14,9 +14,6 @@ | |||
| 14 | #include "intel-agp.h" | 14 | #include "intel-agp.h" |
| 15 | #include <drm/intel-gtt.h> | 15 | #include <drm/intel-gtt.h> |
| 16 | 16 | ||
| 17 | int intel_agp_enabled; | ||
| 18 | EXPORT_SYMBOL(intel_agp_enabled); | ||
| 19 | |||
| 20 | static int intel_fetch_size(void) | 17 | static int intel_fetch_size(void) |
| 21 | { | 18 | { |
| 22 | int i; | 19 | int i; |
| @@ -806,8 +803,6 @@ static int agp_intel_probe(struct pci_dev *pdev, | |||
| 806 | found_gmch: | 803 | found_gmch: |
| 807 | pci_set_drvdata(pdev, bridge); | 804 | pci_set_drvdata(pdev, bridge); |
| 808 | err = agp_add_bridge(bridge); | 805 | err = agp_add_bridge(bridge); |
| 809 | if (!err) | ||
| 810 | intel_agp_enabled = 1; | ||
| 811 | return err; | 806 | return err; |
| 812 | } | 807 | } |
| 813 | 808 | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index ad5da1ffcbe9..5c85350f4c3d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
| @@ -94,6 +94,7 @@ static struct _intel_private { | |||
| 94 | #define IS_IRONLAKE intel_private.driver->is_ironlake | 94 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
| 95 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable | 95 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
| 96 | 96 | ||
| 97 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 97 | static int intel_gtt_map_memory(struct page **pages, | 98 | static int intel_gtt_map_memory(struct page **pages, |
| 98 | unsigned int num_entries, | 99 | unsigned int num_entries, |
| 99 | struct sg_table *st) | 100 | struct sg_table *st) |
| @@ -168,6 +169,7 @@ static void i8xx_destroy_pages(struct page *page) | |||
| 168 | __free_pages(page, 2); | 169 | __free_pages(page, 2); |
| 169 | atomic_dec(&agp_bridge->current_memory_agp); | 170 | atomic_dec(&agp_bridge->current_memory_agp); |
| 170 | } | 171 | } |
| 172 | #endif | ||
| 171 | 173 | ||
| 172 | #define I810_GTT_ORDER 4 | 174 | #define I810_GTT_ORDER 4 |
| 173 | static int i810_setup(void) | 175 | static int i810_setup(void) |
| @@ -208,6 +210,7 @@ static void i810_cleanup(void) | |||
| 208 | free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); | 210 | free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); |
| 209 | } | 211 | } |
| 210 | 212 | ||
| 213 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 211 | static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, | 214 | static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, |
| 212 | int type) | 215 | int type) |
| 213 | { | 216 | { |
| @@ -288,6 +291,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr) | |||
| 288 | } | 291 | } |
| 289 | kfree(curr); | 292 | kfree(curr); |
| 290 | } | 293 | } |
| 294 | #endif | ||
| 291 | 295 | ||
| 292 | static int intel_gtt_setup_scratch_page(void) | 296 | static int intel_gtt_setup_scratch_page(void) |
| 293 | { | 297 | { |
| @@ -645,7 +649,9 @@ static int intel_gtt_init(void) | |||
| 645 | return -ENOMEM; | 649 | return -ENOMEM; |
| 646 | } | 650 | } |
| 647 | 651 | ||
| 652 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 648 | global_cache_flush(); /* FIXME: ? */ | 653 | global_cache_flush(); /* FIXME: ? */ |
| 654 | #endif | ||
| 649 | 655 | ||
| 650 | intel_private.stolen_size = intel_gtt_stolen_size(); | 656 | intel_private.stolen_size = intel_gtt_stolen_size(); |
| 651 | 657 | ||
| @@ -666,6 +672,7 @@ static int intel_gtt_init(void) | |||
| 666 | return 0; | 672 | return 0; |
| 667 | } | 673 | } |
| 668 | 674 | ||
| 675 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 669 | static int intel_fake_agp_fetch_size(void) | 676 | static int intel_fake_agp_fetch_size(void) |
| 670 | { | 677 | { |
| 671 | int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); | 678 | int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); |
| @@ -684,6 +691,7 @@ static int intel_fake_agp_fetch_size(void) | |||
| 684 | 691 | ||
| 685 | return 0; | 692 | return 0; |
| 686 | } | 693 | } |
| 694 | #endif | ||
| 687 | 695 | ||
| 688 | static void i830_cleanup(void) | 696 | static void i830_cleanup(void) |
| 689 | { | 697 | { |
| @@ -795,6 +803,7 @@ static int i830_setup(void) | |||
| 795 | return 0; | 803 | return 0; |
| 796 | } | 804 | } |
| 797 | 805 | ||
| 806 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 798 | static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) | 807 | static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) |
| 799 | { | 808 | { |
| 800 | agp_bridge->gatt_table_real = NULL; | 809 | agp_bridge->gatt_table_real = NULL; |
| @@ -819,6 +828,7 @@ static int intel_fake_agp_configure(void) | |||
| 819 | 828 | ||
| 820 | return 0; | 829 | return 0; |
| 821 | } | 830 | } |
| 831 | #endif | ||
| 822 | 832 | ||
| 823 | static bool i830_check_flags(unsigned int flags) | 833 | static bool i830_check_flags(unsigned int flags) |
| 824 | { | 834 | { |
| @@ -857,6 +867,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, | |||
| 857 | } | 867 | } |
| 858 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); | 868 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
| 859 | 869 | ||
| 870 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 860 | static void intel_gtt_insert_pages(unsigned int first_entry, | 871 | static void intel_gtt_insert_pages(unsigned int first_entry, |
| 861 | unsigned int num_entries, | 872 | unsigned int num_entries, |
| 862 | struct page **pages, | 873 | struct page **pages, |
| @@ -922,6 +933,7 @@ out_err: | |||
| 922 | mem->is_flushed = true; | 933 | mem->is_flushed = true; |
| 923 | return ret; | 934 | return ret; |
| 924 | } | 935 | } |
| 936 | #endif | ||
| 925 | 937 | ||
| 926 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) | 938 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
| 927 | { | 939 | { |
| @@ -935,6 +947,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) | |||
| 935 | } | 947 | } |
| 936 | EXPORT_SYMBOL(intel_gtt_clear_range); | 948 | EXPORT_SYMBOL(intel_gtt_clear_range); |
| 937 | 949 | ||
| 950 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 938 | static int intel_fake_agp_remove_entries(struct agp_memory *mem, | 951 | static int intel_fake_agp_remove_entries(struct agp_memory *mem, |
| 939 | off_t pg_start, int type) | 952 | off_t pg_start, int type) |
| 940 | { | 953 | { |
| @@ -976,6 +989,7 @@ static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, | |||
| 976 | /* always return NULL for other allocation types for now */ | 989 | /* always return NULL for other allocation types for now */ |
| 977 | return NULL; | 990 | return NULL; |
| 978 | } | 991 | } |
| 992 | #endif | ||
| 979 | 993 | ||
| 980 | static int intel_alloc_chipset_flush_resource(void) | 994 | static int intel_alloc_chipset_flush_resource(void) |
| 981 | { | 995 | { |
| @@ -1129,6 +1143,7 @@ static int i9xx_setup(void) | |||
| 1129 | return 0; | 1143 | return 0; |
| 1130 | } | 1144 | } |
| 1131 | 1145 | ||
| 1146 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 1132 | static const struct agp_bridge_driver intel_fake_agp_driver = { | 1147 | static const struct agp_bridge_driver intel_fake_agp_driver = { |
| 1133 | .owner = THIS_MODULE, | 1148 | .owner = THIS_MODULE, |
| 1134 | .size_type = FIXED_APER_SIZE, | 1149 | .size_type = FIXED_APER_SIZE, |
| @@ -1150,6 +1165,7 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { | |||
| 1150 | .agp_destroy_page = agp_generic_destroy_page, | 1165 | .agp_destroy_page = agp_generic_destroy_page, |
| 1151 | .agp_destroy_pages = agp_generic_destroy_pages, | 1166 | .agp_destroy_pages = agp_generic_destroy_pages, |
| 1152 | }; | 1167 | }; |
| 1168 | #endif | ||
| 1153 | 1169 | ||
| 1154 | static const struct intel_gtt_driver i81x_gtt_driver = { | 1170 | static const struct intel_gtt_driver i81x_gtt_driver = { |
| 1155 | .gen = 1, | 1171 | .gen = 1, |
| @@ -1367,11 +1383,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | |||
| 1367 | 1383 | ||
| 1368 | intel_private.refcount++; | 1384 | intel_private.refcount++; |
| 1369 | 1385 | ||
| 1386 | #if IS_ENABLED(CONFIG_AGP_INTEL) | ||
| 1370 | if (bridge) { | 1387 | if (bridge) { |
| 1371 | bridge->driver = &intel_fake_agp_driver; | 1388 | bridge->driver = &intel_fake_agp_driver; |
| 1372 | bridge->dev_private_data = &intel_private; | 1389 | bridge->dev_private_data = &intel_private; |
| 1373 | bridge->dev = bridge_pdev; | 1390 | bridge->dev = bridge_pdev; |
| 1374 | } | 1391 | } |
| 1392 | #endif | ||
| 1375 | 1393 | ||
| 1376 | intel_private.bridge_dev = pci_dev_get(bridge_pdev); | 1394 | intel_private.bridge_dev = pci_dev_get(bridge_pdev); |
| 1377 | 1395 | ||
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f86427591167..8e7fa4dbaed8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -20,6 +20,10 @@ menuconfig DRM | |||
| 20 | details. You should also select and configure AGP | 20 | details. You should also select and configure AGP |
| 21 | (/dev/agpgart) support if it is available for your platform. | 21 | (/dev/agpgart) support if it is available for your platform. |
| 22 | 22 | ||
| 23 | config DRM_MIPI_DSI | ||
| 24 | bool | ||
| 25 | depends on DRM | ||
| 26 | |||
| 23 | config DRM_USB | 27 | config DRM_USB |
| 24 | tristate | 28 | tristate |
| 25 | depends on DRM | 29 | depends on DRM |
| @@ -188,6 +192,10 @@ source "drivers/gpu/drm/tilcdc/Kconfig" | |||
| 188 | 192 | ||
| 189 | source "drivers/gpu/drm/qxl/Kconfig" | 193 | source "drivers/gpu/drm/qxl/Kconfig" |
| 190 | 194 | ||
| 195 | source "drivers/gpu/drm/bochs/Kconfig" | ||
| 196 | |||
| 191 | source "drivers/gpu/drm/msm/Kconfig" | 197 | source "drivers/gpu/drm/msm/Kconfig" |
| 192 | 198 | ||
| 193 | source "drivers/gpu/drm/tegra/Kconfig" | 199 | source "drivers/gpu/drm/tegra/Kconfig" |
| 200 | |||
| 201 | source "drivers/gpu/drm/panel/Kconfig" | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index cc08b845f965..292a79d64146 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -18,6 +18,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ | |||
| 18 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 18 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
| 19 | drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o | 19 | drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o |
| 20 | drm-$(CONFIG_PCI) += ati_pcigart.o | 20 | drm-$(CONFIG_PCI) += ati_pcigart.o |
| 21 | drm-$(CONFIG_DRM_PANEL) += drm_panel.o | ||
| 21 | 22 | ||
| 22 | drm-usb-y := drm_usb.o | 23 | drm-usb-y := drm_usb.o |
| 23 | 24 | ||
| @@ -31,6 +32,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | |||
| 31 | CFLAGS_drm_trace_points.o := -I$(src) | 32 | CFLAGS_drm_trace_points.o := -I$(src) |
| 32 | 33 | ||
| 33 | obj-$(CONFIG_DRM) += drm.o | 34 | obj-$(CONFIG_DRM) += drm.o |
| 35 | obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o | ||
| 34 | obj-$(CONFIG_DRM_USB) += drm_usb.o | 36 | obj-$(CONFIG_DRM_USB) += drm_usb.o |
| 35 | obj-$(CONFIG_DRM_TTM) += ttm/ | 37 | obj-$(CONFIG_DRM_TTM) += ttm/ |
| 36 | obj-$(CONFIG_DRM_TDFX) += tdfx/ | 38 | obj-$(CONFIG_DRM_TDFX) += tdfx/ |
| @@ -56,6 +58,8 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ | |||
| 56 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ | 58 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ |
| 57 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ | 59 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ |
| 58 | obj-$(CONFIG_DRM_QXL) += qxl/ | 60 | obj-$(CONFIG_DRM_QXL) += qxl/ |
| 61 | obj-$(CONFIG_DRM_BOCHS) += bochs/ | ||
| 59 | obj-$(CONFIG_DRM_MSM) += msm/ | 62 | obj-$(CONFIG_DRM_MSM) += msm/ |
| 60 | obj-$(CONFIG_DRM_TEGRA) += tegra/ | 63 | obj-$(CONFIG_DRM_TEGRA) += tegra/ |
| 61 | obj-y += i2c/ | 64 | obj-y += i2c/ |
| 65 | obj-y += panel/ | ||
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index 40d371521fe1..50ae88ad4d76 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig | |||
| @@ -5,6 +5,7 @@ config DRM_ARMADA | |||
| 5 | select FB_CFB_COPYAREA | 5 | select FB_CFB_COPYAREA |
| 6 | select FB_CFB_IMAGEBLIT | 6 | select FB_CFB_IMAGEBLIT |
| 7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
| 8 | select DRM_KMS_FB_HELPER | ||
| 8 | help | 9 | help |
| 9 | Support the "LCD" controllers found on the Marvell Armada 510 | 10 | Support the "LCD" controllers found on the Marvell Armada 510 |
| 10 | devices. There are two controllers on the device, each controller | 11 | devices. There are two controllers on the device, each controller |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 62d0ff3efddf..acf3a36c9ebc 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
| @@ -128,6 +128,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 128 | return -ENOMEM; | 128 | return -ENOMEM; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | platform_set_drvdata(dev->platformdev, dev); | ||
| 131 | dev->dev_private = priv; | 132 | dev->dev_private = priv; |
| 132 | 133 | ||
| 133 | /* Get the implementation specific driver data. */ | 134 | /* Get the implementation specific driver data. */ |
| @@ -381,7 +382,7 @@ static int armada_drm_probe(struct platform_device *pdev) | |||
| 381 | 382 | ||
| 382 | static int armada_drm_remove(struct platform_device *pdev) | 383 | static int armada_drm_remove(struct platform_device *pdev) |
| 383 | { | 384 | { |
| 384 | drm_platform_exit(&armada_drm_driver, pdev); | 385 | drm_put_dev(platform_get_drvdata(pdev)); |
| 385 | return 0; | 386 | return 0; |
| 386 | } | 387 | } |
| 387 | 388 | ||
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 7b33e14e44aa..3f65dd6676b2 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c | |||
| @@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, | |||
| 65 | * then the BO is being moved and we should | 65 | * then the BO is being moved and we should |
| 66 | * store up the damage until later. | 66 | * store up the damage until later. |
| 67 | */ | 67 | */ |
| 68 | if (!in_interrupt()) | 68 | if (!drm_can_sleep()) |
| 69 | ret = ast_bo_reserve(bo, true); | 69 | ret = ast_bo_reserve(bo, true); |
| 70 | if (ret) { | 70 | if (ret) { |
| 71 | if (ret != -EBUSY) | 71 | if (ret != -EBUSY) |
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index af0b868a9dfd..50535fd5a88d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c | |||
| @@ -189,53 +189,6 @@ static int ast_get_dram_info(struct drm_device *dev) | |||
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp) | ||
| 193 | { | ||
| 194 | struct ast_private *ast = dev->dev_private; | ||
| 195 | uint32_t dclk, jreg; | ||
| 196 | uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500; | ||
| 197 | |||
| 198 | dram_bus_width = ast->dram_bus_width; | ||
| 199 | mclk = ast->mclk; | ||
| 200 | |||
| 201 | if (ast->chip == AST2100 || | ||
| 202 | ast->chip == AST1100 || | ||
| 203 | ast->chip == AST2200 || | ||
| 204 | ast->chip == AST2150 || | ||
| 205 | ast->dram_bus_width == 16) | ||
| 206 | dram_efficency = 600; | ||
| 207 | else if (ast->chip == AST2300) | ||
| 208 | dram_efficency = 400; | ||
| 209 | |||
| 210 | dram_bandwidth = mclk * dram_bus_width * 2 / 8; | ||
| 211 | actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000; | ||
| 212 | |||
| 213 | if (ast->chip == AST1180) | ||
| 214 | dclk = actual_dram_bandwidth / ((bpp + 1) / 8); | ||
| 215 | else { | ||
| 216 | jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); | ||
| 217 | if ((jreg & 0x08) && (ast->chip == AST2000)) | ||
| 218 | dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8); | ||
| 219 | else if ((jreg & 0x08) && (bpp == 8)) | ||
| 220 | dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8); | ||
| 221 | else | ||
| 222 | dclk = actual_dram_bandwidth / ((bpp + 1) / 8); | ||
| 223 | } | ||
| 224 | |||
| 225 | if (ast->chip == AST2100 || | ||
| 226 | ast->chip == AST2200 || | ||
| 227 | ast->chip == AST2300 || | ||
| 228 | ast->chip == AST1180) { | ||
| 229 | if (dclk > 200) | ||
| 230 | dclk = 200; | ||
| 231 | } else { | ||
| 232 | if (dclk > 165) | ||
| 233 | dclk = 165; | ||
| 234 | } | ||
| 235 | |||
| 236 | return dclk; | ||
| 237 | } | ||
| 238 | |||
| 239 | static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) | 192 | static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) |
| 240 | { | 193 | { |
| 241 | struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); | 194 | struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); |
| @@ -449,7 +402,7 @@ int ast_dumb_create(struct drm_file *file, | |||
| 449 | return 0; | 402 | return 0; |
| 450 | } | 403 | } |
| 451 | 404 | ||
| 452 | void ast_bo_unref(struct ast_bo **bo) | 405 | static void ast_bo_unref(struct ast_bo **bo) |
| 453 | { | 406 | { |
| 454 | struct ttm_buffer_object *tbo; | 407 | struct ttm_buffer_object *tbo; |
| 455 | 408 | ||
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 7fc9f7272b56..cca063b11083 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c | |||
| @@ -404,7 +404,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode | |||
| 404 | } | 404 | } |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode, | 407 | static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode, |
| 408 | struct ast_vbios_mode_info *vbios_mode) | 408 | struct ast_vbios_mode_info *vbios_mode) |
| 409 | { | 409 | { |
| 410 | struct ast_private *ast = dev->dev_private; | 410 | struct ast_private *ast = dev->dev_private; |
| @@ -415,7 +415,7 @@ void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode, | |||
| 415 | ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); | 415 | ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, | 418 | static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, |
| 419 | struct ast_vbios_mode_info *vbios_mode) | 419 | struct ast_vbios_mode_info *vbios_mode) |
| 420 | { | 420 | { |
| 421 | switch (crtc->fb->bits_per_pixel) { | 421 | switch (crtc->fb->bits_per_pixel) { |
| @@ -427,7 +427,7 @@ bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 427 | return true; | 427 | return true; |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset) | 430 | static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset) |
| 431 | { | 431 | { |
| 432 | struct ast_private *ast = crtc->dev->dev_private; | 432 | struct ast_private *ast = crtc->dev->dev_private; |
| 433 | u32 addr; | 433 | u32 addr; |
| @@ -623,7 +623,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = { | |||
| 623 | .destroy = ast_crtc_destroy, | 623 | .destroy = ast_crtc_destroy, |
| 624 | }; | 624 | }; |
| 625 | 625 | ||
| 626 | int ast_crtc_init(struct drm_device *dev) | 626 | static int ast_crtc_init(struct drm_device *dev) |
| 627 | { | 627 | { |
| 628 | struct ast_crtc *crtc; | 628 | struct ast_crtc *crtc; |
| 629 | int i; | 629 | int i; |
| @@ -710,7 +710,7 @@ static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = { | |||
| 710 | .mode_set = ast_encoder_mode_set, | 710 | .mode_set = ast_encoder_mode_set, |
| 711 | }; | 711 | }; |
| 712 | 712 | ||
| 713 | int ast_encoder_init(struct drm_device *dev) | 713 | static int ast_encoder_init(struct drm_device *dev) |
| 714 | { | 714 | { |
| 715 | struct ast_encoder *ast_encoder; | 715 | struct ast_encoder *ast_encoder; |
| 716 | 716 | ||
| @@ -777,7 +777,7 @@ static const struct drm_connector_funcs ast_connector_funcs = { | |||
| 777 | .destroy = ast_connector_destroy, | 777 | .destroy = ast_connector_destroy, |
| 778 | }; | 778 | }; |
| 779 | 779 | ||
| 780 | int ast_connector_init(struct drm_device *dev) | 780 | static int ast_connector_init(struct drm_device *dev) |
| 781 | { | 781 | { |
| 782 | struct ast_connector *ast_connector; | 782 | struct ast_connector *ast_connector; |
| 783 | struct drm_connector *connector; | 783 | struct drm_connector *connector; |
| @@ -810,7 +810,7 @@ int ast_connector_init(struct drm_device *dev) | |||
| 810 | } | 810 | } |
| 811 | 811 | ||
| 812 | /* allocate cursor cache and pin at start of VRAM */ | 812 | /* allocate cursor cache and pin at start of VRAM */ |
| 813 | int ast_cursor_init(struct drm_device *dev) | 813 | static int ast_cursor_init(struct drm_device *dev) |
| 814 | { | 814 | { |
| 815 | struct ast_private *ast = dev->dev_private; | 815 | struct ast_private *ast = dev->dev_private; |
| 816 | int size; | 816 | int size; |
| @@ -847,7 +847,7 @@ fail: | |||
| 847 | return ret; | 847 | return ret; |
| 848 | } | 848 | } |
| 849 | 849 | ||
| 850 | void ast_cursor_fini(struct drm_device *dev) | 850 | static void ast_cursor_fini(struct drm_device *dev) |
| 851 | { | 851 | { |
| 852 | struct ast_private *ast = dev->dev_private; | 852 | struct ast_private *ast = dev->dev_private; |
| 853 | ttm_bo_kunmap(&ast->cache_kmap); | 853 | ttm_bo_kunmap(&ast->cache_kmap); |
| @@ -965,7 +965,7 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c) | |||
| 965 | kfree(i2c); | 965 | kfree(i2c); |
| 966 | } | 966 | } |
| 967 | 967 | ||
| 968 | void ast_show_cursor(struct drm_crtc *crtc) | 968 | static void ast_show_cursor(struct drm_crtc *crtc) |
| 969 | { | 969 | { |
| 970 | struct ast_private *ast = crtc->dev->dev_private; | 970 | struct ast_private *ast = crtc->dev->dev_private; |
| 971 | u8 jreg; | 971 | u8 jreg; |
| @@ -976,7 +976,7 @@ void ast_show_cursor(struct drm_crtc *crtc) | |||
| 976 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); | 976 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); |
| 977 | } | 977 | } |
| 978 | 978 | ||
| 979 | void ast_hide_cursor(struct drm_crtc *crtc) | 979 | static void ast_hide_cursor(struct drm_crtc *crtc) |
| 980 | { | 980 | { |
| 981 | struct ast_private *ast = crtc->dev->dev_private; | 981 | struct ast_private *ast = crtc->dev->dev_private; |
| 982 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); | 982 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 32aecb34dbce..4ea9b17ac17a 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
| @@ -80,7 +80,7 @@ static int ast_ttm_global_init(struct ast_private *ast) | |||
| 80 | return 0; | 80 | return 0; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | void | 83 | static void |
| 84 | ast_ttm_global_release(struct ast_private *ast) | 84 | ast_ttm_global_release(struct ast_private *ast) |
| 85 | { | 85 | { |
| 86 | if (ast->ttm.mem_global_ref.release == NULL) | 86 | if (ast->ttm.mem_global_ref.release == NULL) |
| @@ -102,7 +102,7 @@ static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) | |||
| 102 | kfree(bo); | 102 | kfree(bo); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) | 105 | static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) |
| 106 | { | 106 | { |
| 107 | if (bo->destroy == &ast_bo_ttm_destroy) | 107 | if (bo->destroy == &ast_bo_ttm_destroy) |
| 108 | return true; | 108 | return true; |
| @@ -208,7 +208,7 @@ static struct ttm_backend_func ast_tt_backend_func = { | |||
| 208 | }; | 208 | }; |
| 209 | 209 | ||
| 210 | 210 | ||
| 211 | struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, | 211 | static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, |
| 212 | unsigned long size, uint32_t page_flags, | 212 | unsigned long size, uint32_t page_flags, |
| 213 | struct page *dummy_read_page) | 213 | struct page *dummy_read_page) |
| 214 | { | 214 | { |
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig new file mode 100644 index 000000000000..c8fcf12019f0 --- /dev/null +++ b/drivers/gpu/drm/bochs/Kconfig | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | config DRM_BOCHS | ||
| 2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" | ||
| 3 | depends on DRM && PCI | ||
| 4 | select DRM_KMS_HELPER | ||
| 5 | select FB_SYS_FILLRECT | ||
| 6 | select FB_SYS_COPYAREA | ||
| 7 | select FB_SYS_IMAGEBLIT | ||
| 8 | select DRM_TTM | ||
| 9 | help | ||
| 10 | Choose this option for qemu. | ||
| 11 | If M is selected the module will be called bochs-drm. | ||
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile new file mode 100644 index 000000000000..844a55614920 --- /dev/null +++ b/drivers/gpu/drm/bochs/Makefile | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | ccflags-y := -Iinclude/drm | ||
| 2 | bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o | ||
| 3 | |||
| 4 | obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o | ||
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h new file mode 100644 index 000000000000..741965c001a6 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs.h | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | #include <linux/io.h> | ||
| 2 | #include <linux/fb.h> | ||
| 3 | |||
| 4 | #include <drm/drmP.h> | ||
| 5 | #include <drm/drm_crtc.h> | ||
| 6 | #include <drm/drm_crtc_helper.h> | ||
| 7 | #include <drm/drm_fb_helper.h> | ||
| 8 | |||
| 9 | #include <ttm/ttm_bo_driver.h> | ||
| 10 | #include <ttm/ttm_page_alloc.h> | ||
| 11 | |||
| 12 | /* ---------------------------------------------------------------------- */ | ||
| 13 | |||
| 14 | #define VBE_DISPI_IOPORT_INDEX 0x01CE | ||
| 15 | #define VBE_DISPI_IOPORT_DATA 0x01CF | ||
| 16 | |||
| 17 | #define VBE_DISPI_INDEX_ID 0x0 | ||
| 18 | #define VBE_DISPI_INDEX_XRES 0x1 | ||
| 19 | #define VBE_DISPI_INDEX_YRES 0x2 | ||
| 20 | #define VBE_DISPI_INDEX_BPP 0x3 | ||
| 21 | #define VBE_DISPI_INDEX_ENABLE 0x4 | ||
| 22 | #define VBE_DISPI_INDEX_BANK 0x5 | ||
| 23 | #define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 | ||
| 24 | #define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 | ||
| 25 | #define VBE_DISPI_INDEX_X_OFFSET 0x8 | ||
| 26 | #define VBE_DISPI_INDEX_Y_OFFSET 0x9 | ||
| 27 | #define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa | ||
| 28 | |||
| 29 | #define VBE_DISPI_ID0 0xB0C0 | ||
| 30 | #define VBE_DISPI_ID1 0xB0C1 | ||
| 31 | #define VBE_DISPI_ID2 0xB0C2 | ||
| 32 | #define VBE_DISPI_ID3 0xB0C3 | ||
| 33 | #define VBE_DISPI_ID4 0xB0C4 | ||
| 34 | #define VBE_DISPI_ID5 0xB0C5 | ||
| 35 | |||
| 36 | #define VBE_DISPI_DISABLED 0x00 | ||
| 37 | #define VBE_DISPI_ENABLED 0x01 | ||
| 38 | #define VBE_DISPI_GETCAPS 0x02 | ||
| 39 | #define VBE_DISPI_8BIT_DAC 0x20 | ||
| 40 | #define VBE_DISPI_LFB_ENABLED 0x40 | ||
| 41 | #define VBE_DISPI_NOCLEARMEM 0x80 | ||
| 42 | |||
| 43 | /* ---------------------------------------------------------------------- */ | ||
| 44 | |||
| 45 | enum bochs_types { | ||
| 46 | BOCHS_QEMU_STDVGA, | ||
| 47 | BOCHS_UNKNOWN, | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct bochs_framebuffer { | ||
| 51 | struct drm_framebuffer base; | ||
| 52 | struct drm_gem_object *obj; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct bochs_device { | ||
| 56 | /* hw */ | ||
| 57 | void __iomem *mmio; | ||
| 58 | int ioports; | ||
| 59 | void __iomem *fb_map; | ||
| 60 | unsigned long fb_base; | ||
| 61 | unsigned long fb_size; | ||
| 62 | |||
| 63 | /* mode */ | ||
| 64 | u16 xres; | ||
| 65 | u16 yres; | ||
| 66 | u16 yres_virtual; | ||
| 67 | u32 stride; | ||
| 68 | u32 bpp; | ||
| 69 | |||
| 70 | /* drm */ | ||
| 71 | struct drm_device *dev; | ||
| 72 | struct drm_crtc crtc; | ||
| 73 | struct drm_encoder encoder; | ||
| 74 | struct drm_connector connector; | ||
| 75 | bool mode_config_initialized; | ||
| 76 | |||
| 77 | /* ttm */ | ||
| 78 | struct { | ||
| 79 | struct drm_global_reference mem_global_ref; | ||
| 80 | struct ttm_bo_global_ref bo_global_ref; | ||
| 81 | struct ttm_bo_device bdev; | ||
| 82 | bool initialized; | ||
| 83 | } ttm; | ||
| 84 | |||
| 85 | /* fbdev */ | ||
| 86 | struct { | ||
| 87 | struct bochs_framebuffer gfb; | ||
| 88 | struct drm_fb_helper helper; | ||
| 89 | int size; | ||
| 90 | int x1, y1, x2, y2; /* dirty rect */ | ||
| 91 | spinlock_t dirty_lock; | ||
| 92 | bool initialized; | ||
| 93 | } fb; | ||
| 94 | }; | ||
| 95 | |||
| 96 | #define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base) | ||
| 97 | |||
| 98 | struct bochs_bo { | ||
| 99 | struct ttm_buffer_object bo; | ||
| 100 | struct ttm_placement placement; | ||
| 101 | struct ttm_bo_kmap_obj kmap; | ||
| 102 | struct drm_gem_object gem; | ||
| 103 | u32 placements[3]; | ||
| 104 | int pin_count; | ||
| 105 | }; | ||
| 106 | |||
| 107 | static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo) | ||
| 108 | { | ||
| 109 | return container_of(bo, struct bochs_bo, bo); | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem) | ||
| 113 | { | ||
| 114 | return container_of(gem, struct bochs_bo, gem); | ||
| 115 | } | ||
| 116 | |||
| 117 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | ||
| 118 | |||
| 119 | static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo) | ||
| 120 | { | ||
| 121 | return drm_vma_node_offset_addr(&bo->bo.vma_node); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* ---------------------------------------------------------------------- */ | ||
| 125 | |||
| 126 | /* bochs_hw.c */ | ||
| 127 | int bochs_hw_init(struct drm_device *dev, uint32_t flags); | ||
| 128 | void bochs_hw_fini(struct drm_device *dev); | ||
| 129 | |||
| 130 | void bochs_hw_setmode(struct bochs_device *bochs, | ||
| 131 | struct drm_display_mode *mode); | ||
| 132 | void bochs_hw_setbase(struct bochs_device *bochs, | ||
| 133 | int x, int y, u64 addr); | ||
| 134 | |||
| 135 | /* bochs_mm.c */ | ||
| 136 | int bochs_mm_init(struct bochs_device *bochs); | ||
| 137 | void bochs_mm_fini(struct bochs_device *bochs); | ||
| 138 | int bochs_mmap(struct file *filp, struct vm_area_struct *vma); | ||
| 139 | |||
| 140 | int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, | ||
| 141 | struct drm_gem_object **obj); | ||
| 142 | int bochs_gem_init_object(struct drm_gem_object *obj); | ||
| 143 | void bochs_gem_free_object(struct drm_gem_object *obj); | ||
| 144 | int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
| 145 | struct drm_mode_create_dumb *args); | ||
| 146 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | ||
| 147 | uint32_t handle, uint64_t *offset); | ||
| 148 | |||
| 149 | int bochs_framebuffer_init(struct drm_device *dev, | ||
| 150 | struct bochs_framebuffer *gfb, | ||
| 151 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 152 | struct drm_gem_object *obj); | ||
| 153 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); | ||
| 154 | int bochs_bo_unpin(struct bochs_bo *bo); | ||
| 155 | |||
| 156 | extern const struct drm_mode_config_funcs bochs_mode_funcs; | ||
| 157 | |||
| 158 | /* bochs_kms.c */ | ||
| 159 | int bochs_kms_init(struct bochs_device *bochs); | ||
| 160 | void bochs_kms_fini(struct bochs_device *bochs); | ||
| 161 | |||
| 162 | /* bochs_fbdev.c */ | ||
| 163 | int bochs_fbdev_init(struct bochs_device *bochs); | ||
| 164 | void bochs_fbdev_fini(struct bochs_device *bochs); | ||
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c new file mode 100644 index 000000000000..395bba261c9a --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_drv.c | |||
| @@ -0,0 +1,178 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/slab.h> | ||
| 11 | |||
| 12 | #include "bochs.h" | ||
| 13 | |||
| 14 | static bool enable_fbdev = true; | ||
| 15 | module_param_named(fbdev, enable_fbdev, bool, 0444); | ||
| 16 | MODULE_PARM_DESC(fbdev, "register fbdev device"); | ||
| 17 | |||
| 18 | /* ---------------------------------------------------------------------- */ | ||
| 19 | /* drm interface */ | ||
| 20 | |||
| 21 | static int bochs_unload(struct drm_device *dev) | ||
| 22 | { | ||
| 23 | struct bochs_device *bochs = dev->dev_private; | ||
| 24 | |||
| 25 | bochs_fbdev_fini(bochs); | ||
| 26 | bochs_kms_fini(bochs); | ||
| 27 | bochs_mm_fini(bochs); | ||
| 28 | bochs_hw_fini(dev); | ||
| 29 | kfree(bochs); | ||
| 30 | dev->dev_private = NULL; | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static int bochs_load(struct drm_device *dev, unsigned long flags) | ||
| 35 | { | ||
| 36 | struct bochs_device *bochs; | ||
| 37 | int ret; | ||
| 38 | |||
| 39 | bochs = kzalloc(sizeof(*bochs), GFP_KERNEL); | ||
| 40 | if (bochs == NULL) | ||
| 41 | return -ENOMEM; | ||
| 42 | dev->dev_private = bochs; | ||
| 43 | bochs->dev = dev; | ||
| 44 | |||
| 45 | ret = bochs_hw_init(dev, flags); | ||
| 46 | if (ret) | ||
| 47 | goto err; | ||
| 48 | |||
| 49 | ret = bochs_mm_init(bochs); | ||
| 50 | if (ret) | ||
| 51 | goto err; | ||
| 52 | |||
| 53 | ret = bochs_kms_init(bochs); | ||
| 54 | if (ret) | ||
| 55 | goto err; | ||
| 56 | |||
| 57 | if (enable_fbdev) | ||
| 58 | bochs_fbdev_init(bochs); | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | |||
| 62 | err: | ||
| 63 | bochs_unload(dev); | ||
| 64 | return ret; | ||
| 65 | } | ||
| 66 | |||
| 67 | static const struct file_operations bochs_fops = { | ||
| 68 | .owner = THIS_MODULE, | ||
| 69 | .open = drm_open, | ||
| 70 | .release = drm_release, | ||
| 71 | .unlocked_ioctl = drm_ioctl, | ||
| 72 | #ifdef CONFIG_COMPAT | ||
| 73 | .compat_ioctl = drm_compat_ioctl, | ||
| 74 | #endif | ||
| 75 | .poll = drm_poll, | ||
| 76 | .read = drm_read, | ||
| 77 | .llseek = no_llseek, | ||
| 78 | .mmap = bochs_mmap, | ||
| 79 | }; | ||
| 80 | |||
| 81 | static struct drm_driver bochs_driver = { | ||
| 82 | .driver_features = DRIVER_GEM | DRIVER_MODESET, | ||
| 83 | .load = bochs_load, | ||
| 84 | .unload = bochs_unload, | ||
| 85 | .fops = &bochs_fops, | ||
| 86 | .name = "bochs-drm", | ||
| 87 | .desc = "bochs dispi vga interface (qemu stdvga)", | ||
| 88 | .date = "20130925", | ||
| 89 | .major = 1, | ||
| 90 | .minor = 0, | ||
| 91 | .gem_free_object = bochs_gem_free_object, | ||
| 92 | .dumb_create = bochs_dumb_create, | ||
| 93 | .dumb_map_offset = bochs_dumb_mmap_offset, | ||
| 94 | .dumb_destroy = drm_gem_dumb_destroy, | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* ---------------------------------------------------------------------- */ | ||
| 98 | /* pci interface */ | ||
| 99 | |||
| 100 | static int bochs_kick_out_firmware_fb(struct pci_dev *pdev) | ||
| 101 | { | ||
| 102 | struct apertures_struct *ap; | ||
| 103 | |||
| 104 | ap = alloc_apertures(1); | ||
| 105 | if (!ap) | ||
| 106 | return -ENOMEM; | ||
| 107 | |||
| 108 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
| 109 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
| 110 | remove_conflicting_framebuffers(ap, "bochsdrmfb", false); | ||
| 111 | kfree(ap); | ||
| 112 | |||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int bochs_pci_probe(struct pci_dev *pdev, | ||
| 117 | const struct pci_device_id *ent) | ||
| 118 | { | ||
| 119 | int ret; | ||
| 120 | |||
| 121 | ret = bochs_kick_out_firmware_fb(pdev); | ||
| 122 | if (ret) | ||
| 123 | return ret; | ||
| 124 | |||
| 125 | return drm_get_pci_dev(pdev, ent, &bochs_driver); | ||
| 126 | } | ||
| 127 | |||
| 128 | static void bochs_pci_remove(struct pci_dev *pdev) | ||
| 129 | { | ||
| 130 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 131 | |||
| 132 | drm_put_dev(dev); | ||
| 133 | } | ||
| 134 | |||
| 135 | static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = { | ||
| 136 | { | ||
| 137 | .vendor = 0x1234, | ||
| 138 | .device = 0x1111, | ||
| 139 | .subvendor = 0x1af4, | ||
| 140 | .subdevice = 0x1100, | ||
| 141 | .driver_data = BOCHS_QEMU_STDVGA, | ||
| 142 | }, | ||
| 143 | { | ||
| 144 | .vendor = 0x1234, | ||
| 145 | .device = 0x1111, | ||
| 146 | .subvendor = PCI_ANY_ID, | ||
| 147 | .subdevice = PCI_ANY_ID, | ||
| 148 | .driver_data = BOCHS_UNKNOWN, | ||
| 149 | }, | ||
| 150 | { /* end of list */ } | ||
| 151 | }; | ||
| 152 | |||
| 153 | static struct pci_driver bochs_pci_driver = { | ||
| 154 | .name = "bochs-drm", | ||
| 155 | .id_table = bochs_pci_tbl, | ||
| 156 | .probe = bochs_pci_probe, | ||
| 157 | .remove = bochs_pci_remove, | ||
| 158 | }; | ||
| 159 | |||
| 160 | /* ---------------------------------------------------------------------- */ | ||
| 161 | /* module init/exit */ | ||
| 162 | |||
| 163 | static int __init bochs_init(void) | ||
| 164 | { | ||
| 165 | return drm_pci_init(&bochs_driver, &bochs_pci_driver); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void __exit bochs_exit(void) | ||
| 169 | { | ||
| 170 | drm_pci_exit(&bochs_driver, &bochs_pci_driver); | ||
| 171 | } | ||
| 172 | |||
| 173 | module_init(bochs_init); | ||
| 174 | module_exit(bochs_exit); | ||
| 175 | |||
| 176 | MODULE_DEVICE_TABLE(pci, bochs_pci_tbl); | ||
| 177 | MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); | ||
| 178 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c new file mode 100644 index 000000000000..4da5206b7cc9 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c | |||
| @@ -0,0 +1,215 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include "bochs.h" | ||
| 9 | |||
| 10 | /* ---------------------------------------------------------------------- */ | ||
| 11 | |||
| 12 | static struct fb_ops bochsfb_ops = { | ||
| 13 | .owner = THIS_MODULE, | ||
| 14 | .fb_check_var = drm_fb_helper_check_var, | ||
| 15 | .fb_set_par = drm_fb_helper_set_par, | ||
| 16 | .fb_fillrect = sys_fillrect, | ||
| 17 | .fb_copyarea = sys_copyarea, | ||
| 18 | .fb_imageblit = sys_imageblit, | ||
| 19 | .fb_pan_display = drm_fb_helper_pan_display, | ||
| 20 | .fb_blank = drm_fb_helper_blank, | ||
| 21 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 22 | }; | ||
| 23 | |||
| 24 | static int bochsfb_create_object(struct bochs_device *bochs, | ||
| 25 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 26 | struct drm_gem_object **gobj_p) | ||
| 27 | { | ||
| 28 | struct drm_device *dev = bochs->dev; | ||
| 29 | struct drm_gem_object *gobj; | ||
| 30 | u32 size; | ||
| 31 | int ret = 0; | ||
| 32 | |||
| 33 | size = mode_cmd->pitches[0] * mode_cmd->height; | ||
| 34 | ret = bochs_gem_create(dev, size, true, &gobj); | ||
| 35 | if (ret) | ||
| 36 | return ret; | ||
| 37 | |||
| 38 | *gobj_p = gobj; | ||
| 39 | return ret; | ||
| 40 | } | ||
| 41 | |||
| 42 | static int bochsfb_create(struct drm_fb_helper *helper, | ||
| 43 | struct drm_fb_helper_surface_size *sizes) | ||
| 44 | { | ||
| 45 | struct bochs_device *bochs = | ||
| 46 | container_of(helper, struct bochs_device, fb.helper); | ||
| 47 | struct drm_device *dev = bochs->dev; | ||
| 48 | struct fb_info *info; | ||
| 49 | struct drm_framebuffer *fb; | ||
| 50 | struct drm_mode_fb_cmd2 mode_cmd; | ||
| 51 | struct device *device = &dev->pdev->dev; | ||
| 52 | struct drm_gem_object *gobj = NULL; | ||
| 53 | struct bochs_bo *bo = NULL; | ||
| 54 | int size, ret; | ||
| 55 | |||
| 56 | if (sizes->surface_bpp != 32) | ||
| 57 | return -EINVAL; | ||
| 58 | |||
| 59 | mode_cmd.width = sizes->surface_width; | ||
| 60 | mode_cmd.height = sizes->surface_height; | ||
| 61 | mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); | ||
| 62 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
| 63 | sizes->surface_depth); | ||
| 64 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
| 65 | |||
| 66 | /* alloc, pin & map bo */ | ||
| 67 | ret = bochsfb_create_object(bochs, &mode_cmd, &gobj); | ||
| 68 | if (ret) { | ||
| 69 | DRM_ERROR("failed to create fbcon backing object %d\n", ret); | ||
| 70 | return ret; | ||
| 71 | } | ||
| 72 | |||
| 73 | bo = gem_to_bochs_bo(gobj); | ||
| 74 | |||
| 75 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
| 76 | if (ret) | ||
| 77 | return ret; | ||
| 78 | |||
| 79 | ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); | ||
| 80 | if (ret) { | ||
| 81 | DRM_ERROR("failed to pin fbcon\n"); | ||
| 82 | ttm_bo_unreserve(&bo->bo); | ||
| 83 | return ret; | ||
| 84 | } | ||
| 85 | |||
| 86 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, | ||
| 87 | &bo->kmap); | ||
| 88 | if (ret) { | ||
| 89 | DRM_ERROR("failed to kmap fbcon\n"); | ||
| 90 | ttm_bo_unreserve(&bo->bo); | ||
| 91 | return ret; | ||
| 92 | } | ||
| 93 | |||
| 94 | ttm_bo_unreserve(&bo->bo); | ||
| 95 | |||
| 96 | /* init fb device */ | ||
| 97 | info = framebuffer_alloc(0, device); | ||
| 98 | if (info == NULL) | ||
| 99 | return -ENOMEM; | ||
| 100 | |||
| 101 | info->par = &bochs->fb.helper; | ||
| 102 | |||
| 103 | ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); | ||
| 104 | if (ret) | ||
| 105 | return ret; | ||
| 106 | |||
| 107 | bochs->fb.size = size; | ||
| 108 | |||
| 109 | /* setup helper */ | ||
| 110 | fb = &bochs->fb.gfb.base; | ||
| 111 | bochs->fb.helper.fb = fb; | ||
| 112 | bochs->fb.helper.fbdev = info; | ||
| 113 | |||
| 114 | strcpy(info->fix.id, "bochsdrmfb"); | ||
| 115 | |||
| 116 | info->flags = FBINFO_DEFAULT; | ||
| 117 | info->fbops = &bochsfb_ops; | ||
| 118 | |||
| 119 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
| 120 | drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, | ||
| 121 | sizes->fb_height); | ||
| 122 | |||
| 123 | info->screen_base = bo->kmap.virtual; | ||
| 124 | info->screen_size = size; | ||
| 125 | |||
| 126 | #if 0 | ||
| 127 | /* FIXME: get this right for mmap(/dev/fb0) */ | ||
| 128 | info->fix.smem_start = bochs_bo_mmap_offset(bo); | ||
| 129 | info->fix.smem_len = size; | ||
| 130 | #endif | ||
| 131 | |||
| 132 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
| 133 | if (ret) { | ||
| 134 | DRM_ERROR("%s: can't allocate color map\n", info->fix.id); | ||
| 135 | return -ENOMEM; | ||
| 136 | } | ||
| 137 | |||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static int bochs_fbdev_destroy(struct bochs_device *bochs) | ||
| 142 | { | ||
| 143 | struct bochs_framebuffer *gfb = &bochs->fb.gfb; | ||
| 144 | struct fb_info *info; | ||
| 145 | |||
| 146 | DRM_DEBUG_DRIVER("\n"); | ||
| 147 | |||
| 148 | if (bochs->fb.helper.fbdev) { | ||
| 149 | info = bochs->fb.helper.fbdev; | ||
| 150 | |||
| 151 | unregister_framebuffer(info); | ||
| 152 | if (info->cmap.len) | ||
| 153 | fb_dealloc_cmap(&info->cmap); | ||
| 154 | framebuffer_release(info); | ||
| 155 | } | ||
| 156 | |||
| 157 | if (gfb->obj) { | ||
| 158 | drm_gem_object_unreference_unlocked(gfb->obj); | ||
| 159 | gfb->obj = NULL; | ||
| 160 | } | ||
| 161 | |||
| 162 | drm_fb_helper_fini(&bochs->fb.helper); | ||
| 163 | drm_framebuffer_unregister_private(&gfb->base); | ||
| 164 | drm_framebuffer_cleanup(&gfb->base); | ||
| 165 | |||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
| 170 | u16 blue, int regno) | ||
| 171 | { | ||
| 172 | } | ||
| 173 | |||
| 174 | void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 175 | u16 *blue, int regno) | ||
| 176 | { | ||
| 177 | *red = regno; | ||
| 178 | *green = regno; | ||
| 179 | *blue = regno; | ||
| 180 | } | ||
| 181 | |||
| 182 | static struct drm_fb_helper_funcs bochs_fb_helper_funcs = { | ||
| 183 | .gamma_set = bochs_fb_gamma_set, | ||
| 184 | .gamma_get = bochs_fb_gamma_get, | ||
| 185 | .fb_probe = bochsfb_create, | ||
| 186 | }; | ||
| 187 | |||
| 188 | int bochs_fbdev_init(struct bochs_device *bochs) | ||
| 189 | { | ||
| 190 | int ret; | ||
| 191 | |||
| 192 | bochs->fb.helper.funcs = &bochs_fb_helper_funcs; | ||
| 193 | spin_lock_init(&bochs->fb.dirty_lock); | ||
| 194 | |||
| 195 | ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, | ||
| 196 | 1, 1); | ||
| 197 | if (ret) | ||
| 198 | return ret; | ||
| 199 | |||
| 200 | drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); | ||
| 201 | drm_helper_disable_unused_functions(bochs->dev); | ||
| 202 | drm_fb_helper_initial_config(&bochs->fb.helper, 32); | ||
| 203 | |||
| 204 | bochs->fb.initialized = true; | ||
| 205 | return 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | void bochs_fbdev_fini(struct bochs_device *bochs) | ||
| 209 | { | ||
| 210 | if (!bochs->fb.initialized) | ||
| 211 | return; | ||
| 212 | |||
| 213 | bochs_fbdev_destroy(bochs); | ||
| 214 | bochs->fb.initialized = false; | ||
| 215 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c new file mode 100644 index 000000000000..dbe619e6aab4 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_hw.c | |||
| @@ -0,0 +1,177 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include "bochs.h" | ||
| 9 | |||
| 10 | /* ---------------------------------------------------------------------- */ | ||
| 11 | |||
| 12 | static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) | ||
| 13 | { | ||
| 14 | if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) | ||
| 15 | return; | ||
| 16 | |||
| 17 | if (bochs->mmio) { | ||
| 18 | int offset = ioport - 0x3c0 + 0x400; | ||
| 19 | writeb(val, bochs->mmio + offset); | ||
| 20 | } else { | ||
| 21 | outb(val, ioport); | ||
| 22 | } | ||
| 23 | } | ||
| 24 | |||
| 25 | static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) | ||
| 26 | { | ||
| 27 | u16 ret = 0; | ||
| 28 | |||
| 29 | if (bochs->mmio) { | ||
| 30 | int offset = 0x500 + (reg << 1); | ||
| 31 | ret = readw(bochs->mmio + offset); | ||
| 32 | } else { | ||
| 33 | outw(reg, VBE_DISPI_IOPORT_INDEX); | ||
| 34 | ret = inw(VBE_DISPI_IOPORT_DATA); | ||
| 35 | } | ||
| 36 | return ret; | ||
| 37 | } | ||
| 38 | |||
| 39 | static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) | ||
| 40 | { | ||
| 41 | if (bochs->mmio) { | ||
| 42 | int offset = 0x500 + (reg << 1); | ||
| 43 | writew(val, bochs->mmio + offset); | ||
| 44 | } else { | ||
| 45 | outw(reg, VBE_DISPI_IOPORT_INDEX); | ||
| 46 | outw(val, VBE_DISPI_IOPORT_DATA); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | int bochs_hw_init(struct drm_device *dev, uint32_t flags) | ||
| 51 | { | ||
| 52 | struct bochs_device *bochs = dev->dev_private; | ||
| 53 | struct pci_dev *pdev = dev->pdev; | ||
| 54 | unsigned long addr, size, mem, ioaddr, iosize; | ||
| 55 | u16 id; | ||
| 56 | |||
| 57 | if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */ | ||
| 58 | (pdev->resource[2].flags & IORESOURCE_MEM)) { | ||
| 59 | /* mmio bar with vga and bochs registers present */ | ||
| 60 | if (pci_request_region(pdev, 2, "bochs-drm") != 0) { | ||
| 61 | DRM_ERROR("Cannot request mmio region\n"); | ||
| 62 | return -EBUSY; | ||
| 63 | } | ||
| 64 | ioaddr = pci_resource_start(pdev, 2); | ||
| 65 | iosize = pci_resource_len(pdev, 2); | ||
| 66 | bochs->mmio = ioremap(ioaddr, iosize); | ||
| 67 | if (bochs->mmio == NULL) { | ||
| 68 | DRM_ERROR("Cannot map mmio region\n"); | ||
| 69 | return -ENOMEM; | ||
| 70 | } | ||
| 71 | } else { | ||
| 72 | ioaddr = VBE_DISPI_IOPORT_INDEX; | ||
| 73 | iosize = 2; | ||
| 74 | if (!request_region(ioaddr, iosize, "bochs-drm")) { | ||
| 75 | DRM_ERROR("Cannot request ioports\n"); | ||
| 76 | return -EBUSY; | ||
| 77 | } | ||
| 78 | bochs->ioports = 1; | ||
| 79 | } | ||
| 80 | |||
| 81 | id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); | ||
| 82 | mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K) | ||
| 83 | * 64 * 1024; | ||
| 84 | if ((id & 0xfff0) != VBE_DISPI_ID0) { | ||
| 85 | DRM_ERROR("ID mismatch\n"); | ||
| 86 | return -ENODEV; | ||
| 87 | } | ||
| 88 | |||
| 89 | if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) | ||
| 90 | return -ENODEV; | ||
| 91 | addr = pci_resource_start(pdev, 0); | ||
| 92 | size = pci_resource_len(pdev, 0); | ||
| 93 | if (addr == 0) | ||
| 94 | return -ENODEV; | ||
| 95 | if (size != mem) { | ||
| 96 | DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n", | ||
| 97 | size, mem); | ||
| 98 | size = min(size, mem); | ||
| 99 | } | ||
| 100 | |||
| 101 | if (pci_request_region(pdev, 0, "bochs-drm") != 0) { | ||
| 102 | DRM_ERROR("Cannot request framebuffer\n"); | ||
| 103 | return -EBUSY; | ||
| 104 | } | ||
| 105 | |||
| 106 | bochs->fb_map = ioremap(addr, size); | ||
| 107 | if (bochs->fb_map == NULL) { | ||
| 108 | DRM_ERROR("Cannot map framebuffer\n"); | ||
| 109 | return -ENOMEM; | ||
| 110 | } | ||
| 111 | bochs->fb_base = addr; | ||
| 112 | bochs->fb_size = size; | ||
| 113 | |||
| 114 | DRM_INFO("Found bochs VGA, ID 0x%x.\n", id); | ||
| 115 | DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n", | ||
| 116 | size / 1024, addr, | ||
| 117 | bochs->ioports ? "ioports" : "mmio", | ||
| 118 | ioaddr); | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | void bochs_hw_fini(struct drm_device *dev) | ||
| 123 | { | ||
| 124 | struct bochs_device *bochs = dev->dev_private; | ||
| 125 | |||
| 126 | if (bochs->mmio) | ||
| 127 | iounmap(bochs->mmio); | ||
| 128 | if (bochs->ioports) | ||
| 129 | release_region(VBE_DISPI_IOPORT_INDEX, 2); | ||
| 130 | if (bochs->fb_map) | ||
| 131 | iounmap(bochs->fb_map); | ||
| 132 | pci_release_regions(dev->pdev); | ||
| 133 | } | ||
| 134 | |||
| 135 | void bochs_hw_setmode(struct bochs_device *bochs, | ||
| 136 | struct drm_display_mode *mode) | ||
| 137 | { | ||
| 138 | bochs->xres = mode->hdisplay; | ||
| 139 | bochs->yres = mode->vdisplay; | ||
| 140 | bochs->bpp = 32; | ||
| 141 | bochs->stride = mode->hdisplay * (bochs->bpp / 8); | ||
| 142 | bochs->yres_virtual = bochs->fb_size / bochs->stride; | ||
| 143 | |||
| 144 | DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", | ||
| 145 | bochs->xres, bochs->yres, bochs->bpp, | ||
| 146 | bochs->yres_virtual); | ||
| 147 | |||
| 148 | bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */ | ||
| 149 | |||
| 150 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); | ||
| 151 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); | ||
| 152 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); | ||
| 153 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0); | ||
| 154 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres); | ||
| 155 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT, | ||
| 156 | bochs->yres_virtual); | ||
| 157 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0); | ||
| 158 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0); | ||
| 159 | |||
| 160 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, | ||
| 161 | VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); | ||
| 162 | } | ||
| 163 | |||
| 164 | void bochs_hw_setbase(struct bochs_device *bochs, | ||
| 165 | int x, int y, u64 addr) | ||
| 166 | { | ||
| 167 | unsigned long offset = (unsigned long)addr + | ||
| 168 | y * bochs->stride + | ||
| 169 | x * (bochs->bpp / 8); | ||
| 170 | int vy = offset / bochs->stride; | ||
| 171 | int vx = (offset % bochs->stride) * 8 / bochs->bpp; | ||
| 172 | |||
| 173 | DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n", | ||
| 174 | x, y, addr, offset, vx, vy); | ||
| 175 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx); | ||
| 176 | bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy); | ||
| 177 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c new file mode 100644 index 000000000000..62ec7d4b3816 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_kms.c | |||
| @@ -0,0 +1,294 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include "bochs.h" | ||
| 9 | |||
| 10 | static int defx = 1024; | ||
| 11 | static int defy = 768; | ||
| 12 | |||
| 13 | module_param(defx, int, 0444); | ||
| 14 | module_param(defy, int, 0444); | ||
| 15 | MODULE_PARM_DESC(defx, "default x resolution"); | ||
| 16 | MODULE_PARM_DESC(defy, "default y resolution"); | ||
| 17 | |||
| 18 | /* ---------------------------------------------------------------------- */ | ||
| 19 | |||
| 20 | static void bochs_crtc_load_lut(struct drm_crtc *crtc) | ||
| 21 | { | ||
| 22 | } | ||
| 23 | |||
| 24 | static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
| 25 | { | ||
| 26 | switch (mode) { | ||
| 27 | case DRM_MODE_DPMS_ON: | ||
| 28 | case DRM_MODE_DPMS_STANDBY: | ||
| 29 | case DRM_MODE_DPMS_SUSPEND: | ||
| 30 | case DRM_MODE_DPMS_OFF: | ||
| 31 | default: | ||
| 32 | return; | ||
| 33 | } | ||
| 34 | } | ||
| 35 | |||
| 36 | static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc, | ||
| 37 | const struct drm_display_mode *mode, | ||
| 38 | struct drm_display_mode *adjusted_mode) | ||
| 39 | { | ||
| 40 | return true; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
| 44 | struct drm_framebuffer *old_fb) | ||
| 45 | { | ||
| 46 | struct bochs_device *bochs = | ||
| 47 | container_of(crtc, struct bochs_device, crtc); | ||
| 48 | struct bochs_framebuffer *bochs_fb; | ||
| 49 | struct bochs_bo *bo; | ||
| 50 | u64 gpu_addr = 0; | ||
| 51 | int ret; | ||
| 52 | |||
| 53 | if (old_fb) { | ||
| 54 | bochs_fb = to_bochs_framebuffer(old_fb); | ||
| 55 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
| 56 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
| 57 | if (ret) { | ||
| 58 | DRM_ERROR("failed to reserve old_fb bo\n"); | ||
| 59 | } else { | ||
| 60 | bochs_bo_unpin(bo); | ||
| 61 | ttm_bo_unreserve(&bo->bo); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | if (WARN_ON(crtc->fb == NULL)) | ||
| 66 | return -EINVAL; | ||
| 67 | |||
| 68 | bochs_fb = to_bochs_framebuffer(crtc->fb); | ||
| 69 | bo = gem_to_bochs_bo(bochs_fb->obj); | ||
| 70 | ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); | ||
| 71 | if (ret) | ||
| 72 | return ret; | ||
| 73 | |||
| 74 | ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); | ||
| 75 | if (ret) { | ||
| 76 | ttm_bo_unreserve(&bo->bo); | ||
| 77 | return ret; | ||
| 78 | } | ||
| 79 | |||
| 80 | ttm_bo_unreserve(&bo->bo); | ||
| 81 | bochs_hw_setbase(bochs, x, y, gpu_addr); | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | static int bochs_crtc_mode_set(struct drm_crtc *crtc, | ||
| 86 | struct drm_display_mode *mode, | ||
| 87 | struct drm_display_mode *adjusted_mode, | ||
| 88 | int x, int y, struct drm_framebuffer *old_fb) | ||
| 89 | { | ||
| 90 | struct bochs_device *bochs = | ||
| 91 | container_of(crtc, struct bochs_device, crtc); | ||
| 92 | |||
| 93 | bochs_hw_setmode(bochs, mode); | ||
| 94 | bochs_crtc_mode_set_base(crtc, x, y, old_fb); | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | static void bochs_crtc_prepare(struct drm_crtc *crtc) | ||
| 99 | { | ||
| 100 | } | ||
| 101 | |||
| 102 | static void bochs_crtc_commit(struct drm_crtc *crtc) | ||
| 103 | { | ||
| 104 | } | ||
| 105 | |||
| 106 | static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 107 | u16 *blue, uint32_t start, uint32_t size) | ||
| 108 | { | ||
| 109 | } | ||
| 110 | |||
| 111 | /* These provide the minimum set of functions required to handle a CRTC */ | ||
| 112 | static const struct drm_crtc_funcs bochs_crtc_funcs = { | ||
| 113 | .gamma_set = bochs_crtc_gamma_set, | ||
| 114 | .set_config = drm_crtc_helper_set_config, | ||
| 115 | .destroy = drm_crtc_cleanup, | ||
| 116 | }; | ||
| 117 | |||
| 118 | static const struct drm_crtc_helper_funcs bochs_helper_funcs = { | ||
| 119 | .dpms = bochs_crtc_dpms, | ||
| 120 | .mode_fixup = bochs_crtc_mode_fixup, | ||
| 121 | .mode_set = bochs_crtc_mode_set, | ||
| 122 | .mode_set_base = bochs_crtc_mode_set_base, | ||
| 123 | .prepare = bochs_crtc_prepare, | ||
| 124 | .commit = bochs_crtc_commit, | ||
| 125 | .load_lut = bochs_crtc_load_lut, | ||
| 126 | }; | ||
| 127 | |||
| 128 | static void bochs_crtc_init(struct drm_device *dev) | ||
| 129 | { | ||
| 130 | struct bochs_device *bochs = dev->dev_private; | ||
| 131 | struct drm_crtc *crtc = &bochs->crtc; | ||
| 132 | |||
| 133 | drm_crtc_init(dev, crtc, &bochs_crtc_funcs); | ||
| 134 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
| 135 | drm_crtc_helper_add(crtc, &bochs_helper_funcs); | ||
| 136 | } | ||
| 137 | |||
| 138 | static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder, | ||
| 139 | const struct drm_display_mode *mode, | ||
| 140 | struct drm_display_mode *adjusted_mode) | ||
| 141 | { | ||
| 142 | return true; | ||
| 143 | } | ||
| 144 | |||
| 145 | static void bochs_encoder_mode_set(struct drm_encoder *encoder, | ||
| 146 | struct drm_display_mode *mode, | ||
| 147 | struct drm_display_mode *adjusted_mode) | ||
| 148 | { | ||
| 149 | } | ||
| 150 | |||
| 151 | static void bochs_encoder_dpms(struct drm_encoder *encoder, int state) | ||
| 152 | { | ||
| 153 | } | ||
| 154 | |||
| 155 | static void bochs_encoder_prepare(struct drm_encoder *encoder) | ||
| 156 | { | ||
| 157 | } | ||
| 158 | |||
| 159 | static void bochs_encoder_commit(struct drm_encoder *encoder) | ||
| 160 | { | ||
| 161 | } | ||
| 162 | |||
| 163 | static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = { | ||
| 164 | .dpms = bochs_encoder_dpms, | ||
| 165 | .mode_fixup = bochs_encoder_mode_fixup, | ||
| 166 | .mode_set = bochs_encoder_mode_set, | ||
| 167 | .prepare = bochs_encoder_prepare, | ||
| 168 | .commit = bochs_encoder_commit, | ||
| 169 | }; | ||
| 170 | |||
| 171 | static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = { | ||
| 172 | .destroy = drm_encoder_cleanup, | ||
| 173 | }; | ||
| 174 | |||
| 175 | static void bochs_encoder_init(struct drm_device *dev) | ||
| 176 | { | ||
| 177 | struct bochs_device *bochs = dev->dev_private; | ||
| 178 | struct drm_encoder *encoder = &bochs->encoder; | ||
| 179 | |||
| 180 | encoder->possible_crtcs = 0x1; | ||
| 181 | drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, | ||
| 182 | DRM_MODE_ENCODER_DAC); | ||
| 183 | drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); | ||
| 184 | } | ||
| 185 | |||
| 186 | |||
| 187 | int bochs_connector_get_modes(struct drm_connector *connector) | ||
| 188 | { | ||
| 189 | int count; | ||
| 190 | |||
| 191 | count = drm_add_modes_noedid(connector, 8192, 8192); | ||
| 192 | drm_set_preferred_mode(connector, defx, defy); | ||
| 193 | return count; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int bochs_connector_mode_valid(struct drm_connector *connector, | ||
| 197 | struct drm_display_mode *mode) | ||
| 198 | { | ||
| 199 | struct bochs_device *bochs = | ||
| 200 | container_of(connector, struct bochs_device, connector); | ||
| 201 | unsigned long size = mode->hdisplay * mode->vdisplay * 4; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Make sure we can fit two framebuffers into video memory. | ||
| 205 | * This allows up to 1600x1200 with 16 MB (default size). | ||
| 206 | * If you want more try this: | ||
| 207 | * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs' | ||
| 208 | */ | ||
| 209 | if (size * 2 > bochs->fb_size) | ||
| 210 | return MODE_BAD; | ||
| 211 | |||
| 212 | return MODE_OK; | ||
| 213 | } | ||
| 214 | |||
| 215 | static struct drm_encoder * | ||
| 216 | bochs_connector_best_encoder(struct drm_connector *connector) | ||
| 217 | { | ||
| 218 | int enc_id = connector->encoder_ids[0]; | ||
| 219 | struct drm_mode_object *obj; | ||
| 220 | struct drm_encoder *encoder; | ||
| 221 | |||
| 222 | /* pick the encoder ids */ | ||
| 223 | if (enc_id) { | ||
| 224 | obj = drm_mode_object_find(connector->dev, enc_id, | ||
| 225 | DRM_MODE_OBJECT_ENCODER); | ||
| 226 | if (!obj) | ||
| 227 | return NULL; | ||
| 228 | encoder = obj_to_encoder(obj); | ||
| 229 | return encoder; | ||
| 230 | } | ||
| 231 | return NULL; | ||
| 232 | } | ||
| 233 | |||
| 234 | static enum drm_connector_status bochs_connector_detect(struct drm_connector | ||
| 235 | *connector, bool force) | ||
| 236 | { | ||
| 237 | return connector_status_connected; | ||
| 238 | } | ||
| 239 | |||
| 240 | struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { | ||
| 241 | .get_modes = bochs_connector_get_modes, | ||
| 242 | .mode_valid = bochs_connector_mode_valid, | ||
| 243 | .best_encoder = bochs_connector_best_encoder, | ||
| 244 | }; | ||
| 245 | |||
| 246 | struct drm_connector_funcs bochs_connector_connector_funcs = { | ||
| 247 | .dpms = drm_helper_connector_dpms, | ||
| 248 | .detect = bochs_connector_detect, | ||
| 249 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 250 | .destroy = drm_connector_cleanup, | ||
| 251 | }; | ||
| 252 | |||
| 253 | static void bochs_connector_init(struct drm_device *dev) | ||
| 254 | { | ||
| 255 | struct bochs_device *bochs = dev->dev_private; | ||
| 256 | struct drm_connector *connector = &bochs->connector; | ||
| 257 | |||
| 258 | drm_connector_init(dev, connector, &bochs_connector_connector_funcs, | ||
| 259 | DRM_MODE_CONNECTOR_VIRTUAL); | ||
| 260 | drm_connector_helper_add(connector, | ||
| 261 | &bochs_connector_connector_helper_funcs); | ||
| 262 | } | ||
| 263 | |||
| 264 | |||
| 265 | int bochs_kms_init(struct bochs_device *bochs) | ||
| 266 | { | ||
| 267 | drm_mode_config_init(bochs->dev); | ||
| 268 | bochs->mode_config_initialized = true; | ||
| 269 | |||
| 270 | bochs->dev->mode_config.max_width = 8192; | ||
| 271 | bochs->dev->mode_config.max_height = 8192; | ||
| 272 | |||
| 273 | bochs->dev->mode_config.fb_base = bochs->fb_base; | ||
| 274 | bochs->dev->mode_config.preferred_depth = 24; | ||
| 275 | bochs->dev->mode_config.prefer_shadow = 0; | ||
| 276 | |||
| 277 | bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs; | ||
| 278 | |||
| 279 | bochs_crtc_init(bochs->dev); | ||
| 280 | bochs_encoder_init(bochs->dev); | ||
| 281 | bochs_connector_init(bochs->dev); | ||
| 282 | drm_mode_connector_attach_encoder(&bochs->connector, | ||
| 283 | &bochs->encoder); | ||
| 284 | |||
| 285 | return 0; | ||
| 286 | } | ||
| 287 | |||
| 288 | void bochs_kms_fini(struct bochs_device *bochs) | ||
| 289 | { | ||
| 290 | if (bochs->mode_config_initialized) { | ||
| 291 | drm_mode_config_cleanup(bochs->dev); | ||
| 292 | bochs->mode_config_initialized = false; | ||
| 293 | } | ||
| 294 | } | ||
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c new file mode 100644 index 000000000000..ce6858765b37 --- /dev/null +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
| @@ -0,0 +1,546 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include "bochs.h" | ||
| 9 | |||
| 10 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain); | ||
| 11 | |||
| 12 | /* ---------------------------------------------------------------------- */ | ||
| 13 | |||
| 14 | static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd) | ||
| 15 | { | ||
| 16 | return container_of(bd, struct bochs_device, ttm.bdev); | ||
| 17 | } | ||
| 18 | |||
| 19 | static int bochs_ttm_mem_global_init(struct drm_global_reference *ref) | ||
| 20 | { | ||
| 21 | return ttm_mem_global_init(ref->object); | ||
| 22 | } | ||
| 23 | |||
| 24 | static void bochs_ttm_mem_global_release(struct drm_global_reference *ref) | ||
| 25 | { | ||
| 26 | ttm_mem_global_release(ref->object); | ||
| 27 | } | ||
| 28 | |||
| 29 | static int bochs_ttm_global_init(struct bochs_device *bochs) | ||
| 30 | { | ||
| 31 | struct drm_global_reference *global_ref; | ||
| 32 | int r; | ||
| 33 | |||
| 34 | global_ref = &bochs->ttm.mem_global_ref; | ||
| 35 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
| 36 | global_ref->size = sizeof(struct ttm_mem_global); | ||
| 37 | global_ref->init = &bochs_ttm_mem_global_init; | ||
| 38 | global_ref->release = &bochs_ttm_mem_global_release; | ||
| 39 | r = drm_global_item_ref(global_ref); | ||
| 40 | if (r != 0) { | ||
| 41 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
| 42 | "subsystem.\n"); | ||
| 43 | return r; | ||
| 44 | } | ||
| 45 | |||
| 46 | bochs->ttm.bo_global_ref.mem_glob = | ||
| 47 | bochs->ttm.mem_global_ref.object; | ||
| 48 | global_ref = &bochs->ttm.bo_global_ref.ref; | ||
| 49 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
| 50 | global_ref->size = sizeof(struct ttm_bo_global); | ||
| 51 | global_ref->init = &ttm_bo_global_init; | ||
| 52 | global_ref->release = &ttm_bo_global_release; | ||
| 53 | r = drm_global_item_ref(global_ref); | ||
| 54 | if (r != 0) { | ||
| 55 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
| 56 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
| 57 | return r; | ||
| 58 | } | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static void bochs_ttm_global_release(struct bochs_device *bochs) | ||
| 64 | { | ||
| 65 | if (bochs->ttm.mem_global_ref.release == NULL) | ||
| 66 | return; | ||
| 67 | |||
| 68 | drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); | ||
| 69 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
| 70 | bochs->ttm.mem_global_ref.release = NULL; | ||
| 71 | } | ||
| 72 | |||
| 73 | |||
| 74 | static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) | ||
| 75 | { | ||
| 76 | struct bochs_bo *bo; | ||
| 77 | |||
| 78 | bo = container_of(tbo, struct bochs_bo, bo); | ||
| 79 | drm_gem_object_release(&bo->gem); | ||
| 80 | kfree(bo); | ||
| 81 | } | ||
| 82 | |||
| 83 | static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo) | ||
| 84 | { | ||
| 85 | if (bo->destroy == &bochs_bo_ttm_destroy) | ||
| 86 | return true; | ||
| 87 | return false; | ||
| 88 | } | ||
| 89 | |||
| 90 | static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
| 91 | struct ttm_mem_type_manager *man) | ||
| 92 | { | ||
| 93 | switch (type) { | ||
| 94 | case TTM_PL_SYSTEM: | ||
| 95 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 96 | man->available_caching = TTM_PL_MASK_CACHING; | ||
| 97 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
| 98 | break; | ||
| 99 | case TTM_PL_VRAM: | ||
| 100 | man->func = &ttm_bo_manager_func; | ||
| 101 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
| 102 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 103 | man->available_caching = TTM_PL_FLAG_UNCACHED | | ||
| 104 | TTM_PL_FLAG_WC; | ||
| 105 | man->default_caching = TTM_PL_FLAG_WC; | ||
| 106 | break; | ||
| 107 | default: | ||
| 108 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
| 109 | return -EINVAL; | ||
| 110 | } | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void | ||
| 115 | bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | ||
| 116 | { | ||
| 117 | struct bochs_bo *bochsbo = bochs_bo(bo); | ||
| 118 | |||
| 119 | if (!bochs_ttm_bo_is_bochs_bo(bo)) | ||
| 120 | return; | ||
| 121 | |||
| 122 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM); | ||
| 123 | *pl = bochsbo->placement; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int bochs_bo_verify_access(struct ttm_buffer_object *bo, | ||
| 127 | struct file *filp) | ||
| 128 | { | ||
| 129 | struct bochs_bo *bochsbo = bochs_bo(bo); | ||
| 130 | |||
| 131 | return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev, | ||
| 135 | struct ttm_mem_reg *mem) | ||
| 136 | { | ||
| 137 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
| 138 | struct bochs_device *bochs = bochs_bdev(bdev); | ||
| 139 | |||
| 140 | mem->bus.addr = NULL; | ||
| 141 | mem->bus.offset = 0; | ||
| 142 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
| 143 | mem->bus.base = 0; | ||
| 144 | mem->bus.is_iomem = false; | ||
| 145 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
| 146 | return -EINVAL; | ||
| 147 | switch (mem->mem_type) { | ||
| 148 | case TTM_PL_SYSTEM: | ||
| 149 | /* system memory */ | ||
| 150 | return 0; | ||
| 151 | case TTM_PL_VRAM: | ||
| 152 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
| 153 | mem->bus.base = bochs->fb_base; | ||
| 154 | mem->bus.is_iomem = true; | ||
| 155 | break; | ||
| 156 | default: | ||
| 157 | return -EINVAL; | ||
| 158 | break; | ||
| 159 | } | ||
| 160 | return 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, | ||
| 164 | struct ttm_mem_reg *mem) | ||
| 165 | { | ||
| 166 | } | ||
| 167 | |||
| 168 | static int bochs_bo_move(struct ttm_buffer_object *bo, | ||
| 169 | bool evict, bool interruptible, | ||
| 170 | bool no_wait_gpu, | ||
| 171 | struct ttm_mem_reg *new_mem) | ||
| 172 | { | ||
| 173 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
| 174 | } | ||
| 175 | |||
| 176 | |||
| 177 | static void bochs_ttm_backend_destroy(struct ttm_tt *tt) | ||
| 178 | { | ||
| 179 | ttm_tt_fini(tt); | ||
| 180 | kfree(tt); | ||
| 181 | } | ||
| 182 | |||
| 183 | static struct ttm_backend_func bochs_tt_backend_func = { | ||
| 184 | .destroy = &bochs_ttm_backend_destroy, | ||
| 185 | }; | ||
| 186 | |||
| 187 | static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, | ||
| 188 | unsigned long size, | ||
| 189 | uint32_t page_flags, | ||
| 190 | struct page *dummy_read_page) | ||
| 191 | { | ||
| 192 | struct ttm_tt *tt; | ||
| 193 | |||
| 194 | tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); | ||
| 195 | if (tt == NULL) | ||
| 196 | return NULL; | ||
| 197 | tt->func = &bochs_tt_backend_func; | ||
| 198 | if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { | ||
| 199 | kfree(tt); | ||
| 200 | return NULL; | ||
| 201 | } | ||
| 202 | return tt; | ||
| 203 | } | ||
| 204 | |||
| 205 | struct ttm_bo_driver bochs_bo_driver = { | ||
| 206 | .ttm_tt_create = bochs_ttm_tt_create, | ||
| 207 | .ttm_tt_populate = ttm_pool_populate, | ||
| 208 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | ||
| 209 | .init_mem_type = bochs_bo_init_mem_type, | ||
| 210 | .evict_flags = bochs_bo_evict_flags, | ||
| 211 | .move = bochs_bo_move, | ||
| 212 | .verify_access = bochs_bo_verify_access, | ||
| 213 | .io_mem_reserve = &bochs_ttm_io_mem_reserve, | ||
| 214 | .io_mem_free = &bochs_ttm_io_mem_free, | ||
| 215 | }; | ||
| 216 | |||
| 217 | int bochs_mm_init(struct bochs_device *bochs) | ||
| 218 | { | ||
| 219 | struct ttm_bo_device *bdev = &bochs->ttm.bdev; | ||
| 220 | int ret; | ||
| 221 | |||
| 222 | ret = bochs_ttm_global_init(bochs); | ||
| 223 | if (ret) | ||
| 224 | return ret; | ||
| 225 | |||
| 226 | ret = ttm_bo_device_init(&bochs->ttm.bdev, | ||
| 227 | bochs->ttm.bo_global_ref.ref.object, | ||
| 228 | &bochs_bo_driver, DRM_FILE_PAGE_OFFSET, | ||
| 229 | true); | ||
| 230 | if (ret) { | ||
| 231 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | ||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | ||
| 236 | bochs->fb_size >> PAGE_SHIFT); | ||
| 237 | if (ret) { | ||
| 238 | DRM_ERROR("Failed ttm VRAM init: %d\n", ret); | ||
| 239 | return ret; | ||
| 240 | } | ||
| 241 | |||
| 242 | bochs->ttm.initialized = true; | ||
| 243 | return 0; | ||
| 244 | } | ||
| 245 | |||
| 246 | void bochs_mm_fini(struct bochs_device *bochs) | ||
| 247 | { | ||
| 248 | if (!bochs->ttm.initialized) | ||
| 249 | return; | ||
| 250 | |||
| 251 | ttm_bo_device_release(&bochs->ttm.bdev); | ||
| 252 | bochs_ttm_global_release(bochs); | ||
| 253 | bochs->ttm.initialized = false; | ||
| 254 | } | ||
| 255 | |||
| 256 | static void bochs_ttm_placement(struct bochs_bo *bo, int domain) | ||
| 257 | { | ||
| 258 | u32 c = 0; | ||
| 259 | bo->placement.fpfn = 0; | ||
| 260 | bo->placement.lpfn = 0; | ||
| 261 | bo->placement.placement = bo->placements; | ||
| 262 | bo->placement.busy_placement = bo->placements; | ||
| 263 | if (domain & TTM_PL_FLAG_VRAM) { | ||
| 264 | bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | ||
| 265 | | TTM_PL_FLAG_VRAM; | ||
| 266 | } | ||
| 267 | if (domain & TTM_PL_FLAG_SYSTEM) { | ||
| 268 | bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
| 269 | } | ||
| 270 | if (!c) { | ||
| 271 | bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
| 272 | } | ||
| 273 | bo->placement.num_placement = c; | ||
| 274 | bo->placement.num_busy_placement = c; | ||
| 275 | } | ||
| 276 | |||
| 277 | static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) | ||
| 278 | { | ||
| 279 | return bo->bo.offset; | ||
| 280 | } | ||
| 281 | |||
| 282 | int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) | ||
| 283 | { | ||
| 284 | int i, ret; | ||
| 285 | |||
| 286 | if (bo->pin_count) { | ||
| 287 | bo->pin_count++; | ||
| 288 | if (gpu_addr) | ||
| 289 | *gpu_addr = bochs_bo_gpu_offset(bo); | ||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 293 | bochs_ttm_placement(bo, pl_flag); | ||
| 294 | for (i = 0; i < bo->placement.num_placement; i++) | ||
| 295 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
| 296 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); | ||
| 297 | if (ret) | ||
| 298 | return ret; | ||
| 299 | |||
| 300 | bo->pin_count = 1; | ||
| 301 | if (gpu_addr) | ||
| 302 | *gpu_addr = bochs_bo_gpu_offset(bo); | ||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 306 | int bochs_bo_unpin(struct bochs_bo *bo) | ||
| 307 | { | ||
| 308 | int i, ret; | ||
| 309 | |||
| 310 | if (!bo->pin_count) { | ||
| 311 | DRM_ERROR("unpin bad %p\n", bo); | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | bo->pin_count--; | ||
| 315 | |||
| 316 | if (bo->pin_count) | ||
| 317 | return 0; | ||
| 318 | |||
| 319 | for (i = 0; i < bo->placement.num_placement; i++) | ||
| 320 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 321 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); | ||
| 322 | if (ret) | ||
| 323 | return ret; | ||
| 324 | |||
| 325 | return 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | int bochs_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 329 | { | ||
| 330 | struct drm_file *file_priv; | ||
| 331 | struct bochs_device *bochs; | ||
| 332 | |||
| 333 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | ||
| 334 | return drm_mmap(filp, vma); | ||
| 335 | |||
| 336 | file_priv = filp->private_data; | ||
| 337 | bochs = file_priv->minor->dev->dev_private; | ||
| 338 | return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev); | ||
| 339 | } | ||
| 340 | |||
| 341 | /* ---------------------------------------------------------------------- */ | ||
| 342 | |||
| 343 | static int bochs_bo_create(struct drm_device *dev, int size, int align, | ||
| 344 | uint32_t flags, struct bochs_bo **pbochsbo) | ||
| 345 | { | ||
| 346 | struct bochs_device *bochs = dev->dev_private; | ||
| 347 | struct bochs_bo *bochsbo; | ||
| 348 | size_t acc_size; | ||
| 349 | int ret; | ||
| 350 | |||
| 351 | bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL); | ||
| 352 | if (!bochsbo) | ||
| 353 | return -ENOMEM; | ||
| 354 | |||
| 355 | ret = drm_gem_object_init(dev, &bochsbo->gem, size); | ||
| 356 | if (ret) { | ||
| 357 | kfree(bochsbo); | ||
| 358 | return ret; | ||
| 359 | } | ||
| 360 | |||
| 361 | bochsbo->bo.bdev = &bochs->ttm.bdev; | ||
| 362 | bochsbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 363 | |||
| 364 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | ||
| 365 | |||
| 366 | acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, | ||
| 367 | sizeof(struct bochs_bo)); | ||
| 368 | |||
| 369 | ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, | ||
| 370 | ttm_bo_type_device, &bochsbo->placement, | ||
| 371 | align >> PAGE_SHIFT, false, NULL, acc_size, | ||
| 372 | NULL, bochs_bo_ttm_destroy); | ||
| 373 | if (ret) | ||
| 374 | return ret; | ||
| 375 | |||
| 376 | *pbochsbo = bochsbo; | ||
| 377 | return 0; | ||
| 378 | } | ||
| 379 | |||
| 380 | int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, | ||
| 381 | struct drm_gem_object **obj) | ||
| 382 | { | ||
| 383 | struct bochs_bo *bochsbo; | ||
| 384 | int ret; | ||
| 385 | |||
| 386 | *obj = NULL; | ||
| 387 | |||
| 388 | size = ALIGN(size, PAGE_SIZE); | ||
| 389 | if (size == 0) | ||
| 390 | return -EINVAL; | ||
| 391 | |||
| 392 | ret = bochs_bo_create(dev, size, 0, 0, &bochsbo); | ||
| 393 | if (ret) { | ||
| 394 | if (ret != -ERESTARTSYS) | ||
| 395 | DRM_ERROR("failed to allocate GEM object\n"); | ||
| 396 | return ret; | ||
| 397 | } | ||
| 398 | *obj = &bochsbo->gem; | ||
| 399 | return 0; | ||
| 400 | } | ||
| 401 | |||
| 402 | int bochs_dumb_create(struct drm_file *file, struct drm_device *dev, | ||
| 403 | struct drm_mode_create_dumb *args) | ||
| 404 | { | ||
| 405 | struct drm_gem_object *gobj; | ||
| 406 | u32 handle; | ||
| 407 | int ret; | ||
| 408 | |||
| 409 | args->pitch = args->width * ((args->bpp + 7) / 8); | ||
| 410 | args->size = args->pitch * args->height; | ||
| 411 | |||
| 412 | ret = bochs_gem_create(dev, args->size, false, | ||
| 413 | &gobj); | ||
| 414 | if (ret) | ||
| 415 | return ret; | ||
| 416 | |||
| 417 | ret = drm_gem_handle_create(file, gobj, &handle); | ||
| 418 | drm_gem_object_unreference_unlocked(gobj); | ||
| 419 | if (ret) | ||
| 420 | return ret; | ||
| 421 | |||
| 422 | args->handle = handle; | ||
| 423 | return 0; | ||
| 424 | } | ||
| 425 | |||
| 426 | static void bochs_bo_unref(struct bochs_bo **bo) | ||
| 427 | { | ||
| 428 | struct ttm_buffer_object *tbo; | ||
| 429 | |||
| 430 | if ((*bo) == NULL) | ||
| 431 | return; | ||
| 432 | |||
| 433 | tbo = &((*bo)->bo); | ||
| 434 | ttm_bo_unref(&tbo); | ||
| 435 | if (tbo == NULL) | ||
| 436 | *bo = NULL; | ||
| 437 | |||
| 438 | } | ||
| 439 | |||
| 440 | void bochs_gem_free_object(struct drm_gem_object *obj) | ||
| 441 | { | ||
| 442 | struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj); | ||
| 443 | |||
| 444 | if (!bochs_bo) | ||
| 445 | return; | ||
| 446 | bochs_bo_unref(&bochs_bo); | ||
| 447 | } | ||
| 448 | |||
| 449 | int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, | ||
| 450 | uint32_t handle, uint64_t *offset) | ||
| 451 | { | ||
| 452 | struct drm_gem_object *obj; | ||
| 453 | int ret; | ||
| 454 | struct bochs_bo *bo; | ||
| 455 | |||
| 456 | mutex_lock(&dev->struct_mutex); | ||
| 457 | obj = drm_gem_object_lookup(dev, file, handle); | ||
| 458 | if (obj == NULL) { | ||
| 459 | ret = -ENOENT; | ||
| 460 | goto out_unlock; | ||
| 461 | } | ||
| 462 | |||
| 463 | bo = gem_to_bochs_bo(obj); | ||
| 464 | *offset = bochs_bo_mmap_offset(bo); | ||
| 465 | |||
| 466 | drm_gem_object_unreference(obj); | ||
| 467 | ret = 0; | ||
| 468 | out_unlock: | ||
| 469 | mutex_unlock(&dev->struct_mutex); | ||
| 470 | return ret; | ||
| 471 | |||
| 472 | } | ||
| 473 | |||
| 474 | /* ---------------------------------------------------------------------- */ | ||
| 475 | |||
| 476 | static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
| 477 | { | ||
| 478 | struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); | ||
| 479 | if (bochs_fb->obj) | ||
| 480 | drm_gem_object_unreference_unlocked(bochs_fb->obj); | ||
| 481 | drm_framebuffer_cleanup(fb); | ||
| 482 | kfree(fb); | ||
| 483 | } | ||
| 484 | |||
| 485 | static const struct drm_framebuffer_funcs bochs_fb_funcs = { | ||
| 486 | .destroy = bochs_user_framebuffer_destroy, | ||
| 487 | }; | ||
| 488 | |||
| 489 | int bochs_framebuffer_init(struct drm_device *dev, | ||
| 490 | struct bochs_framebuffer *gfb, | ||
| 491 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
| 492 | struct drm_gem_object *obj) | ||
| 493 | { | ||
| 494 | int ret; | ||
| 495 | |||
| 496 | drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); | ||
| 497 | gfb->obj = obj; | ||
| 498 | ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs); | ||
| 499 | if (ret) { | ||
| 500 | DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); | ||
| 501 | return ret; | ||
| 502 | } | ||
| 503 | return 0; | ||
| 504 | } | ||
| 505 | |||
| 506 | static struct drm_framebuffer * | ||
| 507 | bochs_user_framebuffer_create(struct drm_device *dev, | ||
| 508 | struct drm_file *filp, | ||
| 509 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
| 510 | { | ||
| 511 | struct drm_gem_object *obj; | ||
| 512 | struct bochs_framebuffer *bochs_fb; | ||
| 513 | int ret; | ||
| 514 | |||
| 515 | DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n", | ||
| 516 | mode_cmd->width, mode_cmd->height, | ||
| 517 | (mode_cmd->pixel_format) & 0xff, | ||
| 518 | (mode_cmd->pixel_format >> 8) & 0xff, | ||
| 519 | (mode_cmd->pixel_format >> 16) & 0xff, | ||
| 520 | (mode_cmd->pixel_format >> 24) & 0xff); | ||
| 521 | |||
| 522 | if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888) | ||
| 523 | return ERR_PTR(-ENOENT); | ||
| 524 | |||
| 525 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); | ||
| 526 | if (obj == NULL) | ||
| 527 | return ERR_PTR(-ENOENT); | ||
| 528 | |||
| 529 | bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL); | ||
| 530 | if (!bochs_fb) { | ||
| 531 | drm_gem_object_unreference_unlocked(obj); | ||
| 532 | return ERR_PTR(-ENOMEM); | ||
| 533 | } | ||
| 534 | |||
| 535 | ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj); | ||
| 536 | if (ret) { | ||
| 537 | drm_gem_object_unreference_unlocked(obj); | ||
| 538 | kfree(bochs_fb); | ||
| 539 | return ERR_PTR(ret); | ||
| 540 | } | ||
| 541 | return &bochs_fb->base; | ||
| 542 | } | ||
| 543 | |||
| 544 | const struct drm_mode_config_funcs bochs_mode_funcs = { | ||
| 545 | .fb_create = bochs_user_framebuffer_create, | ||
| 546 | }; | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index b6aded73838b..117d3eca5e37 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h | |||
| @@ -222,7 +222,7 @@ void cirrus_fbdev_fini(struct cirrus_device *cdev); | |||
| 222 | void cirrus_driver_irq_preinstall(struct drm_device *dev); | 222 | void cirrus_driver_irq_preinstall(struct drm_device *dev); |
| 223 | int cirrus_driver_irq_postinstall(struct drm_device *dev); | 223 | int cirrus_driver_irq_postinstall(struct drm_device *dev); |
| 224 | void cirrus_driver_irq_uninstall(struct drm_device *dev); | 224 | void cirrus_driver_irq_uninstall(struct drm_device *dev); |
| 225 | irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS); | 225 | irqreturn_t cirrus_driver_irq_handler(int irq, void *arg); |
| 226 | 226 | ||
| 227 | /* cirrus_kms.c */ | 227 | /* cirrus_kms.c */ |
| 228 | int cirrus_driver_load(struct drm_device *dev, unsigned long flags); | 228 | int cirrus_driver_load(struct drm_device *dev, unsigned long flags); |
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index b27e95666fab..2fd4a92162cb 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c | |||
| @@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, | |||
| 39 | * then the BO is being moved and we should | 39 | * then the BO is being moved and we should |
| 40 | * store up the damage until later. | 40 | * store up the damage until later. |
| 41 | */ | 41 | */ |
| 42 | if (!in_interrupt()) | 42 | if (!drm_can_sleep()) |
| 43 | ret = cirrus_bo_reserve(bo, true); | 43 | ret = cirrus_bo_reserve(bo, true); |
| 44 | if (ret) { | 44 | if (ret) { |
| 45 | if (ret != -EBUSY) | 45 | if (ret != -EBUSY) |
| @@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, | |||
| 233 | info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; | 233 | info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; |
| 234 | info->apertures->ranges[0].size = cdev->mc.vram_size; | 234 | info->apertures->ranges[0].size = cdev->mc.vram_size; |
| 235 | 235 | ||
| 236 | info->fix.smem_start = cdev->dev->mode_config.fb_base; | ||
| 237 | info->fix.smem_len = cdev->mc.vram_size; | ||
| 238 | |||
| 236 | info->screen_base = sysram; | 239 | info->screen_base = sysram; |
| 237 | info->screen_size = size; | 240 | info->screen_size = size; |
| 238 | 241 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 78e76f24343d..4b0170cf53fd 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c | |||
| @@ -255,7 +255,7 @@ int cirrus_dumb_create(struct drm_file *file, | |||
| 255 | return 0; | 255 | return 0; |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | void cirrus_bo_unref(struct cirrus_bo **bo) | 258 | static void cirrus_bo_unref(struct cirrus_bo **bo) |
| 259 | { | 259 | { |
| 260 | struct ttm_buffer_object *tbo; | 260 | struct ttm_buffer_object *tbo; |
| 261 | 261 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index adabc3daaa5b..530f78f84dee 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c | |||
| @@ -102,7 +102,7 @@ static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc, | |||
| 102 | return true; | 102 | return true; |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset) | 105 | static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset) |
| 106 | { | 106 | { |
| 107 | struct cirrus_device *cdev = crtc->dev->dev_private; | 107 | struct cirrus_device *cdev = crtc->dev->dev_private; |
| 108 | u32 addr; | 108 | u32 addr; |
| @@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc, | |||
| 273 | sr07 |= 0x11; | 273 | sr07 |= 0x11; |
| 274 | break; | 274 | break; |
| 275 | case 16: | 275 | case 16: |
| 276 | sr07 |= 0xc1; | 276 | sr07 |= 0x17; |
| 277 | hdr = 0xc0; | 277 | hdr = 0xc1; |
| 278 | break; | 278 | break; |
| 279 | case 24: | 279 | case 24: |
| 280 | sr07 |= 0x15; | 280 | sr07 |= 0x15; |
| @@ -453,7 +453,7 @@ static void cirrus_encoder_commit(struct drm_encoder *encoder) | |||
| 453 | { | 453 | { |
| 454 | } | 454 | } |
| 455 | 455 | ||
| 456 | void cirrus_encoder_destroy(struct drm_encoder *encoder) | 456 | static void cirrus_encoder_destroy(struct drm_encoder *encoder) |
| 457 | { | 457 | { |
| 458 | struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder); | 458 | struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder); |
| 459 | drm_encoder_cleanup(encoder); | 459 | drm_encoder_cleanup(encoder); |
| @@ -492,7 +492,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev) | |||
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | 494 | ||
| 495 | int cirrus_vga_get_modes(struct drm_connector *connector) | 495 | static int cirrus_vga_get_modes(struct drm_connector *connector) |
| 496 | { | 496 | { |
| 497 | int count; | 497 | int count; |
| 498 | 498 | ||
| @@ -509,7 +509,7 @@ static int cirrus_vga_mode_valid(struct drm_connector *connector, | |||
| 509 | return MODE_OK; | 509 | return MODE_OK; |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector | 512 | static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector |
| 513 | *connector) | 513 | *connector) |
| 514 | { | 514 | { |
| 515 | int enc_id = connector->encoder_ids[0]; | 515 | int enc_id = connector->encoder_ids[0]; |
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 75becdeac07d..8b37c25ff9bd 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
| @@ -80,7 +80,7 @@ static int cirrus_ttm_global_init(struct cirrus_device *cirrus) | |||
| 80 | return 0; | 80 | return 0; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | void | 83 | static void |
| 84 | cirrus_ttm_global_release(struct cirrus_device *cirrus) | 84 | cirrus_ttm_global_release(struct cirrus_device *cirrus) |
| 85 | { | 85 | { |
| 86 | if (cirrus->ttm.mem_global_ref.release == NULL) | 86 | if (cirrus->ttm.mem_global_ref.release == NULL) |
| @@ -102,7 +102,7 @@ static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo) | |||
| 102 | kfree(bo); | 102 | kfree(bo); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo) | 105 | static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo) |
| 106 | { | 106 | { |
| 107 | if (bo->destroy == &cirrus_bo_ttm_destroy) | 107 | if (bo->destroy == &cirrus_bo_ttm_destroy) |
| 108 | return true; | 108 | return true; |
| @@ -208,7 +208,7 @@ static struct ttm_backend_func cirrus_tt_backend_func = { | |||
| 208 | }; | 208 | }; |
| 209 | 209 | ||
| 210 | 210 | ||
| 211 | struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, | 211 | static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, |
| 212 | unsigned long size, uint32_t page_flags, | 212 | unsigned long size, uint32_t page_flags, |
| 213 | struct page *dummy_read_page) | 213 | struct page *dummy_read_page) |
| 214 | { | 214 | { |
| @@ -375,26 +375,6 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) | |||
| 375 | return 0; | 375 | return 0; |
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | int cirrus_bo_unpin(struct cirrus_bo *bo) | ||
| 379 | { | ||
| 380 | int i, ret; | ||
| 381 | if (!bo->pin_count) { | ||
| 382 | DRM_ERROR("unpin bad %p\n", bo); | ||
| 383 | return 0; | ||
| 384 | } | ||
| 385 | bo->pin_count--; | ||
| 386 | if (bo->pin_count) | ||
| 387 | return 0; | ||
| 388 | |||
| 389 | for (i = 0; i < bo->placement.num_placement ; i++) | ||
| 390 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 391 | ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); | ||
| 392 | if (ret) | ||
| 393 | return ret; | ||
| 394 | |||
| 395 | return 0; | ||
| 396 | } | ||
| 397 | |||
| 398 | int cirrus_bo_push_sysram(struct cirrus_bo *bo) | 378 | int cirrus_bo_push_sysram(struct cirrus_bo *bo) |
| 399 | { | 379 | { |
| 400 | int i, ret; | 380 | int i, ret; |
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index e301d653d97e..dde205cef384 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | */ | 53 | */ |
| 54 | int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) | 54 | int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) |
| 55 | { | 55 | { |
| 56 | DRM_AGP_KERN *kern; | 56 | struct agp_kern_info *kern; |
| 57 | 57 | ||
| 58 | if (!dev->agp || !dev->agp->acquired) | 58 | if (!dev->agp || !dev->agp->acquired) |
| 59 | return -EINVAL; | 59 | return -EINVAL; |
| @@ -198,17 +198,15 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data, | |||
| 198 | int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) | 198 | int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) |
| 199 | { | 199 | { |
| 200 | struct drm_agp_mem *entry; | 200 | struct drm_agp_mem *entry; |
| 201 | DRM_AGP_MEM *memory; | 201 | struct agp_memory *memory; |
| 202 | unsigned long pages; | 202 | unsigned long pages; |
| 203 | u32 type; | 203 | u32 type; |
| 204 | 204 | ||
| 205 | if (!dev->agp || !dev->agp->acquired) | 205 | if (!dev->agp || !dev->agp->acquired) |
| 206 | return -EINVAL; | 206 | return -EINVAL; |
| 207 | if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) | 207 | if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL))) |
| 208 | return -ENOMEM; | 208 | return -ENOMEM; |
| 209 | 209 | ||
| 210 | memset(entry, 0, sizeof(*entry)); | ||
| 211 | |||
| 212 | pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; | 210 | pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; |
| 213 | type = (u32) request->type; | 211 | type = (u32) request->type; |
| 214 | if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { | 212 | if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { |
| @@ -393,14 +391,16 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data, | |||
| 393 | * Gets the drm_agp_t structure which is made available by the agpgart module | 391 | * Gets the drm_agp_t structure which is made available by the agpgart module |
| 394 | * via the inter_module_* functions. Creates and initializes a drm_agp_head | 392 | * via the inter_module_* functions. Creates and initializes a drm_agp_head |
| 395 | * structure. | 393 | * structure. |
| 394 | * | ||
| 395 | * Note that final cleanup of the kmalloced structure is directly done in | ||
| 396 | * drm_pci_agp_destroy. | ||
| 396 | */ | 397 | */ |
| 397 | struct drm_agp_head *drm_agp_init(struct drm_device *dev) | 398 | struct drm_agp_head *drm_agp_init(struct drm_device *dev) |
| 398 | { | 399 | { |
| 399 | struct drm_agp_head *head = NULL; | 400 | struct drm_agp_head *head = NULL; |
| 400 | 401 | ||
| 401 | if (!(head = kmalloc(sizeof(*head), GFP_KERNEL))) | 402 | if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) |
| 402 | return NULL; | 403 | return NULL; |
| 403 | memset((void *)head, 0, sizeof(*head)); | ||
| 404 | head->bridge = agp_find_bridge(dev->pdev); | 404 | head->bridge = agp_find_bridge(dev->pdev); |
| 405 | if (!head->bridge) { | 405 | if (!head->bridge) { |
| 406 | if (!(head->bridge = agp_backend_acquire(dev->pdev))) { | 406 | if (!(head->bridge = agp_backend_acquire(dev->pdev))) { |
| @@ -439,7 +439,7 @@ void drm_agp_clear(struct drm_device *dev) | |||
| 439 | { | 439 | { |
| 440 | struct drm_agp_mem *entry, *tempe; | 440 | struct drm_agp_mem *entry, *tempe; |
| 441 | 441 | ||
| 442 | if (!drm_core_has_AGP(dev) || !dev->agp) | 442 | if (!dev->agp) |
| 443 | return; | 443 | return; |
| 444 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 444 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 445 | return; | 445 | return; |
| @@ -460,35 +460,20 @@ void drm_agp_clear(struct drm_device *dev) | |||
| 460 | } | 460 | } |
| 461 | 461 | ||
| 462 | /** | 462 | /** |
| 463 | * drm_agp_destroy - Destroy AGP head | ||
| 464 | * @dev: DRM device | ||
| 465 | * | ||
| 466 | * Destroy resources that were previously allocated via drm_agp_initp. Caller | ||
| 467 | * must ensure to clean up all AGP resources before calling this. See | ||
| 468 | * drm_agp_clear(). | ||
| 469 | * | ||
| 470 | * Call this to destroy AGP heads allocated via drm_agp_init(). | ||
| 471 | */ | ||
| 472 | void drm_agp_destroy(struct drm_agp_head *agp) | ||
| 473 | { | ||
| 474 | kfree(agp); | ||
| 475 | } | ||
| 476 | |||
| 477 | /** | ||
| 478 | * Binds a collection of pages into AGP memory at the given offset, returning | 463 | * Binds a collection of pages into AGP memory at the given offset, returning |
| 479 | * the AGP memory structure containing them. | 464 | * the AGP memory structure containing them. |
| 480 | * | 465 | * |
| 481 | * No reference is held on the pages during this time -- it is up to the | 466 | * No reference is held on the pages during this time -- it is up to the |
| 482 | * caller to handle that. | 467 | * caller to handle that. |
| 483 | */ | 468 | */ |
| 484 | DRM_AGP_MEM * | 469 | struct agp_memory * |
| 485 | drm_agp_bind_pages(struct drm_device *dev, | 470 | drm_agp_bind_pages(struct drm_device *dev, |
| 486 | struct page **pages, | 471 | struct page **pages, |
| 487 | unsigned long num_pages, | 472 | unsigned long num_pages, |
| 488 | uint32_t gtt_offset, | 473 | uint32_t gtt_offset, |
| 489 | u32 type) | 474 | u32 type) |
| 490 | { | 475 | { |
| 491 | DRM_AGP_MEM *mem; | 476 | struct agp_memory *mem; |
| 492 | int ret, i; | 477 | int ret, i; |
| 493 | 478 | ||
| 494 | DRM_DEBUG("\n"); | 479 | DRM_DEBUG("\n"); |
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 39a718340319..0406110f83ed 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c | |||
| @@ -114,7 +114,7 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf, | |||
| 114 | 114 | ||
| 115 | for (idx = 0; idx < nr_pages; ++idx) { | 115 | for (idx = 0; idx < nr_pages; ++idx) { |
| 116 | 116 | ||
| 117 | if (DRM_COPY_FROM_USER(buf->data[idx], | 117 | if (copy_from_user(buf->data[idx], |
| 118 | user_data + idx * PAGE_SIZE, | 118 | user_data + idx * PAGE_SIZE, |
| 119 | min(PAGE_SIZE, size - idx * PAGE_SIZE))) { | 119 | min(PAGE_SIZE, size - idx * PAGE_SIZE))) { |
| 120 | DRM_ERROR("Failed to copy user data (%p) to drm buffer" | 120 | DRM_ERROR("Failed to copy user data (%p) to drm buffer" |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 471e051d295e..edec31fe3fed 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
| @@ -261,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 261 | struct drm_agp_mem *entry; | 261 | struct drm_agp_mem *entry; |
| 262 | int valid = 0; | 262 | int valid = 0; |
| 263 | 263 | ||
| 264 | if (!drm_core_has_AGP(dev)) { | 264 | if (!dev->agp) { |
| 265 | kfree(map); | 265 | kfree(map); |
| 266 | return -EINVAL; | 266 | return -EINVAL; |
| 267 | } | 267 | } |
| @@ -303,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 303 | 303 | ||
| 304 | break; | 304 | break; |
| 305 | } | 305 | } |
| 306 | case _DRM_GEM: | ||
| 307 | DRM_ERROR("tried to addmap GEM object\n"); | ||
| 308 | break; | ||
| 309 | case _DRM_SCATTER_GATHER: | 306 | case _DRM_SCATTER_GATHER: |
| 310 | if (!dev->sg) { | 307 | if (!dev->sg) { |
| 311 | kfree(map); | 308 | kfree(map); |
| @@ -483,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) | |||
| 483 | dmah.size = map->size; | 480 | dmah.size = map->size; |
| 484 | __drm_pci_free(dev, &dmah); | 481 | __drm_pci_free(dev, &dmah); |
| 485 | break; | 482 | break; |
| 486 | case _DRM_GEM: | ||
| 487 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
| 488 | break; | ||
| 489 | } | 483 | } |
| 490 | kfree(map); | 484 | kfree(map); |
| 491 | 485 | ||
| @@ -1396,7 +1390,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, | |||
| 1396 | spin_unlock(&dev->count_lock); | 1390 | spin_unlock(&dev->count_lock); |
| 1397 | 1391 | ||
| 1398 | if (request->count >= dma->buf_count) { | 1392 | if (request->count >= dma->buf_count) { |
| 1399 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) | 1393 | if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) |
| 1400 | || (drm_core_check_feature(dev, DRIVER_SG) | 1394 | || (drm_core_check_feature(dev, DRIVER_SG) |
| 1401 | && (dma->flags & _DRM_DMA_USE_SG))) { | 1395 | && (dma->flags & _DRM_DMA_USE_SG))) { |
| 1402 | struct drm_local_map *map = dev->agp_buffer_map; | 1396 | struct drm_local_map *map = dev->agp_buffer_map; |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d6cf77c472e7..3b7d32da1604 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -675,6 +675,29 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) | |||
| 675 | EXPORT_SYMBOL(drm_crtc_cleanup); | 675 | EXPORT_SYMBOL(drm_crtc_cleanup); |
| 676 | 676 | ||
| 677 | /** | 677 | /** |
| 678 | * drm_crtc_index - find the index of a registered CRTC | ||
| 679 | * @crtc: CRTC to find index for | ||
| 680 | * | ||
| 681 | * Given a registered CRTC, return the index of that CRTC within a DRM | ||
| 682 | * device's list of CRTCs. | ||
| 683 | */ | ||
| 684 | unsigned int drm_crtc_index(struct drm_crtc *crtc) | ||
| 685 | { | ||
| 686 | unsigned int index = 0; | ||
| 687 | struct drm_crtc *tmp; | ||
| 688 | |||
| 689 | list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { | ||
| 690 | if (tmp == crtc) | ||
| 691 | return index; | ||
| 692 | |||
| 693 | index++; | ||
| 694 | } | ||
| 695 | |||
| 696 | BUG(); | ||
| 697 | } | ||
| 698 | EXPORT_SYMBOL(drm_crtc_index); | ||
| 699 | |||
| 700 | /** | ||
| 678 | * drm_mode_probed_add - add a mode to a connector's probed mode list | 701 | * drm_mode_probed_add - add a mode to a connector's probed mode list |
| 679 | * @connector: connector the new mode | 702 | * @connector: connector the new mode |
| 680 | * @mode: mode data | 703 | * @mode: mode data |
| @@ -2767,10 +2790,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
| 2767 | } | 2790 | } |
| 2768 | 2791 | ||
| 2769 | if (fb->funcs->dirty) { | 2792 | if (fb->funcs->dirty) { |
| 2770 | drm_modeset_lock_all(dev); | ||
| 2771 | ret = fb->funcs->dirty(fb, file_priv, flags, r->color, | 2793 | ret = fb->funcs->dirty(fb, file_priv, flags, r->color, |
| 2772 | clips, num_clips); | 2794 | clips, num_clips); |
| 2773 | drm_modeset_unlock_all(dev); | ||
| 2774 | } else { | 2795 | } else { |
| 2775 | ret = -ENOSYS; | 2796 | ret = -ENOSYS; |
| 2776 | } | 2797 | } |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 01361aba033b..ea92b827e787 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -324,35 +324,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) | |||
| 324 | } | 324 | } |
| 325 | EXPORT_SYMBOL(drm_helper_disable_unused_functions); | 325 | EXPORT_SYMBOL(drm_helper_disable_unused_functions); |
| 326 | 326 | ||
| 327 | /** | ||
| 328 | * drm_encoder_crtc_ok - can a given crtc drive a given encoder? | ||
| 329 | * @encoder: encoder to test | ||
| 330 | * @crtc: crtc to test | ||
| 331 | * | ||
| 332 | * Return false if @encoder can't be driven by @crtc, true otherwise. | ||
| 333 | */ | ||
| 334 | static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | ||
| 335 | struct drm_crtc *crtc) | ||
| 336 | { | ||
| 337 | struct drm_device *dev; | ||
| 338 | struct drm_crtc *tmp; | ||
| 339 | int crtc_mask = 1; | ||
| 340 | |||
| 341 | WARN(!crtc, "checking null crtc?\n"); | ||
| 342 | |||
| 343 | dev = crtc->dev; | ||
| 344 | |||
| 345 | list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { | ||
| 346 | if (tmp == crtc) | ||
| 347 | break; | ||
| 348 | crtc_mask <<= 1; | ||
| 349 | } | ||
| 350 | |||
| 351 | if (encoder->possible_crtcs & crtc_mask) | ||
| 352 | return true; | ||
| 353 | return false; | ||
| 354 | } | ||
| 355 | |||
| 356 | /* | 327 | /* |
| 357 | * Check the CRTC we're going to map each output to vs. its current | 328 | * Check the CRTC we're going to map each output to vs. its current |
| 358 | * CRTC. If they don't match, we have to disable the output and the CRTC | 329 | * CRTC. If they don't match, we have to disable the output and the CRTC |
| @@ -536,7 +507,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
| 536 | * are later needed by vblank and swap-completion | 507 | * are later needed by vblank and swap-completion |
| 537 | * timestamping. They are derived from true hwmode. | 508 | * timestamping. They are derived from true hwmode. |
| 538 | */ | 509 | */ |
| 539 | drm_calc_timestamping_constants(crtc); | 510 | drm_calc_timestamping_constants(crtc, &crtc->hwmode); |
| 540 | 511 | ||
| 541 | /* FIXME: add subpixel order */ | 512 | /* FIXME: add subpixel order */ |
| 542 | done: | 513 | done: |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index d9137e49c4e8..345be03c23db 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -315,9 +315,6 @@ long drm_ioctl(struct file *filp, | |||
| 315 | if (drm_device_is_unplugged(dev)) | 315 | if (drm_device_is_unplugged(dev)) |
| 316 | return -ENODEV; | 316 | return -ENODEV; |
| 317 | 317 | ||
| 318 | atomic_inc(&dev->ioctl_count); | ||
| 319 | ++file_priv->ioctl_count; | ||
| 320 | |||
| 321 | if ((nr >= DRM_CORE_IOCTL_COUNT) && | 318 | if ((nr >= DRM_CORE_IOCTL_COUNT) && |
| 322 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) | 319 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) |
| 323 | goto err_i1; | 320 | goto err_i1; |
| @@ -410,7 +407,6 @@ long drm_ioctl(struct file *filp, | |||
| 410 | 407 | ||
| 411 | if (kdata != stack_kdata) | 408 | if (kdata != stack_kdata) |
| 412 | kfree(kdata); | 409 | kfree(kdata); |
| 413 | atomic_dec(&dev->ioctl_count); | ||
| 414 | if (retcode) | 410 | if (retcode) |
| 415 | DRM_DEBUG("ret = %d\n", retcode); | 411 | DRM_DEBUG("ret = %d\n", retcode); |
| 416 | return retcode; | 412 | return retcode; |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 8835dcddfac3..b924306b8477 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -605,347 +605,347 @@ static const struct drm_display_mode edid_cea_modes[] = { | |||
| 605 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | 605 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
| 606 | 752, 800, 0, 480, 490, 492, 525, 0, | 606 | 752, 800, 0, 480, 490, 492, 525, 0, |
| 607 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 607 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 608 | .vrefresh = 60, }, | 608 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 609 | /* 2 - 720x480@60Hz */ | 609 | /* 2 - 720x480@60Hz */ |
| 610 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, | 610 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, |
| 611 | 798, 858, 0, 480, 489, 495, 525, 0, | 611 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 612 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 612 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 613 | .vrefresh = 60, }, | 613 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 614 | /* 3 - 720x480@60Hz */ | 614 | /* 3 - 720x480@60Hz */ |
| 615 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, | 615 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, |
| 616 | 798, 858, 0, 480, 489, 495, 525, 0, | 616 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 617 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 617 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 618 | .vrefresh = 60, }, | 618 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 619 | /* 4 - 1280x720@60Hz */ | 619 | /* 4 - 1280x720@60Hz */ |
| 620 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, | 620 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, |
| 621 | 1430, 1650, 0, 720, 725, 730, 750, 0, | 621 | 1430, 1650, 0, 720, 725, 730, 750, 0, |
| 622 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 622 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 623 | .vrefresh = 60, }, | 623 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 624 | /* 5 - 1920x1080i@60Hz */ | 624 | /* 5 - 1920x1080i@60Hz */ |
| 625 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, | 625 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, |
| 626 | 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, | 626 | 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, |
| 627 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | | 627 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
| 628 | DRM_MODE_FLAG_INTERLACE), | 628 | DRM_MODE_FLAG_INTERLACE), |
| 629 | .vrefresh = 60, }, | 629 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 630 | /* 6 - 1440x480i@60Hz */ | 630 | /* 6 - 1440x480i@60Hz */ |
| 631 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, | 631 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
| 632 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 632 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 633 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 633 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 634 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 634 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 635 | .vrefresh = 60, }, | 635 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 636 | /* 7 - 1440x480i@60Hz */ | 636 | /* 7 - 1440x480i@60Hz */ |
| 637 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, | 637 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
| 638 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 638 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 639 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 639 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 640 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 640 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 641 | .vrefresh = 60, }, | 641 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 642 | /* 8 - 1440x240@60Hz */ | 642 | /* 8 - 1440x240@60Hz */ |
| 643 | { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, | 643 | { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
| 644 | 1602, 1716, 0, 240, 244, 247, 262, 0, | 644 | 1602, 1716, 0, 240, 244, 247, 262, 0, |
| 645 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 645 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 646 | DRM_MODE_FLAG_DBLCLK), | 646 | DRM_MODE_FLAG_DBLCLK), |
| 647 | .vrefresh = 60, }, | 647 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 648 | /* 9 - 1440x240@60Hz */ | 648 | /* 9 - 1440x240@60Hz */ |
| 649 | { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, | 649 | { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
| 650 | 1602, 1716, 0, 240, 244, 247, 262, 0, | 650 | 1602, 1716, 0, 240, 244, 247, 262, 0, |
| 651 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 651 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 652 | DRM_MODE_FLAG_DBLCLK), | 652 | DRM_MODE_FLAG_DBLCLK), |
| 653 | .vrefresh = 60, }, | 653 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 654 | /* 10 - 2880x480i@60Hz */ | 654 | /* 10 - 2880x480i@60Hz */ |
| 655 | { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, | 655 | { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
| 656 | 3204, 3432, 0, 480, 488, 494, 525, 0, | 656 | 3204, 3432, 0, 480, 488, 494, 525, 0, |
| 657 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 657 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 658 | DRM_MODE_FLAG_INTERLACE), | 658 | DRM_MODE_FLAG_INTERLACE), |
| 659 | .vrefresh = 60, }, | 659 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 660 | /* 11 - 2880x480i@60Hz */ | 660 | /* 11 - 2880x480i@60Hz */ |
| 661 | { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, | 661 | { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
| 662 | 3204, 3432, 0, 480, 488, 494, 525, 0, | 662 | 3204, 3432, 0, 480, 488, 494, 525, 0, |
| 663 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 663 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 664 | DRM_MODE_FLAG_INTERLACE), | 664 | DRM_MODE_FLAG_INTERLACE), |
| 665 | .vrefresh = 60, }, | 665 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 666 | /* 12 - 2880x240@60Hz */ | 666 | /* 12 - 2880x240@60Hz */ |
| 667 | { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, | 667 | { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
| 668 | 3204, 3432, 0, 240, 244, 247, 262, 0, | 668 | 3204, 3432, 0, 240, 244, 247, 262, 0, |
| 669 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 669 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 670 | .vrefresh = 60, }, | 670 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 671 | /* 13 - 2880x240@60Hz */ | 671 | /* 13 - 2880x240@60Hz */ |
| 672 | { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, | 672 | { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
| 673 | 3204, 3432, 0, 240, 244, 247, 262, 0, | 673 | 3204, 3432, 0, 240, 244, 247, 262, 0, |
| 674 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 674 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 675 | .vrefresh = 60, }, | 675 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 676 | /* 14 - 1440x480@60Hz */ | 676 | /* 14 - 1440x480@60Hz */ |
| 677 | { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, | 677 | { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, |
| 678 | 1596, 1716, 0, 480, 489, 495, 525, 0, | 678 | 1596, 1716, 0, 480, 489, 495, 525, 0, |
| 679 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 679 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 680 | .vrefresh = 60, }, | 680 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 681 | /* 15 - 1440x480@60Hz */ | 681 | /* 15 - 1440x480@60Hz */ |
| 682 | { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, | 682 | { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, |
| 683 | 1596, 1716, 0, 480, 489, 495, 525, 0, | 683 | 1596, 1716, 0, 480, 489, 495, 525, 0, |
| 684 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 684 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 685 | .vrefresh = 60, }, | 685 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 686 | /* 16 - 1920x1080@60Hz */ | 686 | /* 16 - 1920x1080@60Hz */ |
| 687 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, | 687 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, |
| 688 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, | 688 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
| 689 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 689 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 690 | .vrefresh = 60, }, | 690 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 691 | /* 17 - 720x576@50Hz */ | 691 | /* 17 - 720x576@50Hz */ |
| 692 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, | 692 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, |
| 693 | 796, 864, 0, 576, 581, 586, 625, 0, | 693 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 694 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 694 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 695 | .vrefresh = 50, }, | 695 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 696 | /* 18 - 720x576@50Hz */ | 696 | /* 18 - 720x576@50Hz */ |
| 697 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, | 697 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, |
| 698 | 796, 864, 0, 576, 581, 586, 625, 0, | 698 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 699 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 699 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 700 | .vrefresh = 50, }, | 700 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 701 | /* 19 - 1280x720@50Hz */ | 701 | /* 19 - 1280x720@50Hz */ |
| 702 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, | 702 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, |
| 703 | 1760, 1980, 0, 720, 725, 730, 750, 0, | 703 | 1760, 1980, 0, 720, 725, 730, 750, 0, |
| 704 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 704 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 705 | .vrefresh = 50, }, | 705 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 706 | /* 20 - 1920x1080i@50Hz */ | 706 | /* 20 - 1920x1080i@50Hz */ |
| 707 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, | 707 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, |
| 708 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, | 708 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
| 709 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | | 709 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
| 710 | DRM_MODE_FLAG_INTERLACE), | 710 | DRM_MODE_FLAG_INTERLACE), |
| 711 | .vrefresh = 50, }, | 711 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 712 | /* 21 - 1440x576i@50Hz */ | 712 | /* 21 - 1440x576i@50Hz */ |
| 713 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, | 713 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
| 714 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 714 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 715 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 715 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 716 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 716 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 717 | .vrefresh = 50, }, | 717 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 718 | /* 22 - 1440x576i@50Hz */ | 718 | /* 22 - 1440x576i@50Hz */ |
| 719 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, | 719 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
| 720 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 720 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 721 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 721 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 722 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 722 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 723 | .vrefresh = 50, }, | 723 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 724 | /* 23 - 1440x288@50Hz */ | 724 | /* 23 - 1440x288@50Hz */ |
| 725 | { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, | 725 | { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
| 726 | 1590, 1728, 0, 288, 290, 293, 312, 0, | 726 | 1590, 1728, 0, 288, 290, 293, 312, 0, |
| 727 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 727 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 728 | DRM_MODE_FLAG_DBLCLK), | 728 | DRM_MODE_FLAG_DBLCLK), |
| 729 | .vrefresh = 50, }, | 729 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 730 | /* 24 - 1440x288@50Hz */ | 730 | /* 24 - 1440x288@50Hz */ |
| 731 | { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, | 731 | { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
| 732 | 1590, 1728, 0, 288, 290, 293, 312, 0, | 732 | 1590, 1728, 0, 288, 290, 293, 312, 0, |
| 733 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 733 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 734 | DRM_MODE_FLAG_DBLCLK), | 734 | DRM_MODE_FLAG_DBLCLK), |
| 735 | .vrefresh = 50, }, | 735 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 736 | /* 25 - 2880x576i@50Hz */ | 736 | /* 25 - 2880x576i@50Hz */ |
| 737 | { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, | 737 | { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
| 738 | 3180, 3456, 0, 576, 580, 586, 625, 0, | 738 | 3180, 3456, 0, 576, 580, 586, 625, 0, |
| 739 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 739 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 740 | DRM_MODE_FLAG_INTERLACE), | 740 | DRM_MODE_FLAG_INTERLACE), |
| 741 | .vrefresh = 50, }, | 741 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 742 | /* 26 - 2880x576i@50Hz */ | 742 | /* 26 - 2880x576i@50Hz */ |
| 743 | { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, | 743 | { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
| 744 | 3180, 3456, 0, 576, 580, 586, 625, 0, | 744 | 3180, 3456, 0, 576, 580, 586, 625, 0, |
| 745 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 745 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 746 | DRM_MODE_FLAG_INTERLACE), | 746 | DRM_MODE_FLAG_INTERLACE), |
| 747 | .vrefresh = 50, }, | 747 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 748 | /* 27 - 2880x288@50Hz */ | 748 | /* 27 - 2880x288@50Hz */ |
| 749 | { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, | 749 | { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
| 750 | 3180, 3456, 0, 288, 290, 293, 312, 0, | 750 | 3180, 3456, 0, 288, 290, 293, 312, 0, |
| 751 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 751 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 752 | .vrefresh = 50, }, | 752 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 753 | /* 28 - 2880x288@50Hz */ | 753 | /* 28 - 2880x288@50Hz */ |
| 754 | { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, | 754 | { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
| 755 | 3180, 3456, 0, 288, 290, 293, 312, 0, | 755 | 3180, 3456, 0, 288, 290, 293, 312, 0, |
| 756 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 756 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 757 | .vrefresh = 50, }, | 757 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 758 | /* 29 - 1440x576@50Hz */ | 758 | /* 29 - 1440x576@50Hz */ |
| 759 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, | 759 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
| 760 | 1592, 1728, 0, 576, 581, 586, 625, 0, | 760 | 1592, 1728, 0, 576, 581, 586, 625, 0, |
| 761 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 761 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 762 | .vrefresh = 50, }, | 762 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 763 | /* 30 - 1440x576@50Hz */ | 763 | /* 30 - 1440x576@50Hz */ |
| 764 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, | 764 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
| 765 | 1592, 1728, 0, 576, 581, 586, 625, 0, | 765 | 1592, 1728, 0, 576, 581, 586, 625, 0, |
| 766 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 766 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 767 | .vrefresh = 50, }, | 767 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 768 | /* 31 - 1920x1080@50Hz */ | 768 | /* 31 - 1920x1080@50Hz */ |
| 769 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, | 769 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, |
| 770 | 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, | 770 | 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, |
| 771 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 771 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 772 | .vrefresh = 50, }, | 772 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 773 | /* 32 - 1920x1080@24Hz */ | 773 | /* 32 - 1920x1080@24Hz */ |
| 774 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, | 774 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, |
| 775 | 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, | 775 | 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, |
| 776 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 776 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 777 | .vrefresh = 24, }, | 777 | .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 778 | /* 33 - 1920x1080@25Hz */ | 778 | /* 33 - 1920x1080@25Hz */ |
| 779 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, | 779 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, |
| 780 | 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, | 780 | 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, |
| 781 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 781 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 782 | .vrefresh = 25, }, | 782 | .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 783 | /* 34 - 1920x1080@30Hz */ | 783 | /* 34 - 1920x1080@30Hz */ |
| 784 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, | 784 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, |
| 785 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, | 785 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
| 786 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 786 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 787 | .vrefresh = 30, }, | 787 | .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 788 | /* 35 - 2880x480@60Hz */ | 788 | /* 35 - 2880x480@60Hz */ |
| 789 | { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, | 789 | { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, |
| 790 | 3192, 3432, 0, 480, 489, 495, 525, 0, | 790 | 3192, 3432, 0, 480, 489, 495, 525, 0, |
| 791 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 791 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 792 | .vrefresh = 60, }, | 792 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 793 | /* 36 - 2880x480@60Hz */ | 793 | /* 36 - 2880x480@60Hz */ |
| 794 | { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, | 794 | { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, |
| 795 | 3192, 3432, 0, 480, 489, 495, 525, 0, | 795 | 3192, 3432, 0, 480, 489, 495, 525, 0, |
| 796 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 796 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 797 | .vrefresh = 60, }, | 797 | .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 798 | /* 37 - 2880x576@50Hz */ | 798 | /* 37 - 2880x576@50Hz */ |
| 799 | { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, | 799 | { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, |
| 800 | 3184, 3456, 0, 576, 581, 586, 625, 0, | 800 | 3184, 3456, 0, 576, 581, 586, 625, 0, |
| 801 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 801 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 802 | .vrefresh = 50, }, | 802 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 803 | /* 38 - 2880x576@50Hz */ | 803 | /* 38 - 2880x576@50Hz */ |
| 804 | { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, | 804 | { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, |
| 805 | 3184, 3456, 0, 576, 581, 586, 625, 0, | 805 | 3184, 3456, 0, 576, 581, 586, 625, 0, |
| 806 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 806 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 807 | .vrefresh = 50, }, | 807 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 808 | /* 39 - 1920x1080i@50Hz */ | 808 | /* 39 - 1920x1080i@50Hz */ |
| 809 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, | 809 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, |
| 810 | 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, | 810 | 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, |
| 811 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | | 811 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 812 | DRM_MODE_FLAG_INTERLACE), | 812 | DRM_MODE_FLAG_INTERLACE), |
| 813 | .vrefresh = 50, }, | 813 | .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 814 | /* 40 - 1920x1080i@100Hz */ | 814 | /* 40 - 1920x1080i@100Hz */ |
| 815 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, | 815 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, |
| 816 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, | 816 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
| 817 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | | 817 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
| 818 | DRM_MODE_FLAG_INTERLACE), | 818 | DRM_MODE_FLAG_INTERLACE), |
| 819 | .vrefresh = 100, }, | 819 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 820 | /* 41 - 1280x720@100Hz */ | 820 | /* 41 - 1280x720@100Hz */ |
| 821 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, | 821 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, |
| 822 | 1760, 1980, 0, 720, 725, 730, 750, 0, | 822 | 1760, 1980, 0, 720, 725, 730, 750, 0, |
| 823 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 823 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 824 | .vrefresh = 100, }, | 824 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 825 | /* 42 - 720x576@100Hz */ | 825 | /* 42 - 720x576@100Hz */ |
| 826 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, | 826 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, |
| 827 | 796, 864, 0, 576, 581, 586, 625, 0, | 827 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 828 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 828 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 829 | .vrefresh = 100, }, | 829 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 830 | /* 43 - 720x576@100Hz */ | 830 | /* 43 - 720x576@100Hz */ |
| 831 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, | 831 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, |
| 832 | 796, 864, 0, 576, 581, 586, 625, 0, | 832 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 833 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 833 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 834 | .vrefresh = 100, }, | 834 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 835 | /* 44 - 1440x576i@100Hz */ | 835 | /* 44 - 1440x576i@100Hz */ |
| 836 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, | 836 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
| 837 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 837 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 838 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 838 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 839 | DRM_MODE_FLAG_DBLCLK), | 839 | DRM_MODE_FLAG_DBLCLK), |
| 840 | .vrefresh = 100, }, | 840 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 841 | /* 45 - 1440x576i@100Hz */ | 841 | /* 45 - 1440x576i@100Hz */ |
| 842 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, | 842 | { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
| 843 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 843 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 844 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 844 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 845 | DRM_MODE_FLAG_DBLCLK), | 845 | DRM_MODE_FLAG_DBLCLK), |
| 846 | .vrefresh = 100, }, | 846 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 847 | /* 46 - 1920x1080i@120Hz */ | 847 | /* 46 - 1920x1080i@120Hz */ |
| 848 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, | 848 | { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, |
| 849 | 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, | 849 | 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, |
| 850 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | | 850 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
| 851 | DRM_MODE_FLAG_INTERLACE), | 851 | DRM_MODE_FLAG_INTERLACE), |
| 852 | .vrefresh = 120, }, | 852 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 853 | /* 47 - 1280x720@120Hz */ | 853 | /* 47 - 1280x720@120Hz */ |
| 854 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, | 854 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, |
| 855 | 1430, 1650, 0, 720, 725, 730, 750, 0, | 855 | 1430, 1650, 0, 720, 725, 730, 750, 0, |
| 856 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 856 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 857 | .vrefresh = 120, }, | 857 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 858 | /* 48 - 720x480@120Hz */ | 858 | /* 48 - 720x480@120Hz */ |
| 859 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, | 859 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, |
| 860 | 798, 858, 0, 480, 489, 495, 525, 0, | 860 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 861 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 861 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 862 | .vrefresh = 120, }, | 862 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 863 | /* 49 - 720x480@120Hz */ | 863 | /* 49 - 720x480@120Hz */ |
| 864 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, | 864 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, |
| 865 | 798, 858, 0, 480, 489, 495, 525, 0, | 865 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 866 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 866 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 867 | .vrefresh = 120, }, | 867 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 868 | /* 50 - 1440x480i@120Hz */ | 868 | /* 50 - 1440x480i@120Hz */ |
| 869 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, | 869 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, |
| 870 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 870 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 871 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 871 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 872 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 872 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 873 | .vrefresh = 120, }, | 873 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 874 | /* 51 - 1440x480i@120Hz */ | 874 | /* 51 - 1440x480i@120Hz */ |
| 875 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, | 875 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, |
| 876 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 876 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 877 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 877 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 878 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 878 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 879 | .vrefresh = 120, }, | 879 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 880 | /* 52 - 720x576@200Hz */ | 880 | /* 52 - 720x576@200Hz */ |
| 881 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, | 881 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, |
| 882 | 796, 864, 0, 576, 581, 586, 625, 0, | 882 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 883 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 883 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 884 | .vrefresh = 200, }, | 884 | .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 885 | /* 53 - 720x576@200Hz */ | 885 | /* 53 - 720x576@200Hz */ |
| 886 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, | 886 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, |
| 887 | 796, 864, 0, 576, 581, 586, 625, 0, | 887 | 796, 864, 0, 576, 581, 586, 625, 0, |
| 888 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 888 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 889 | .vrefresh = 200, }, | 889 | .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 890 | /* 54 - 1440x576i@200Hz */ | 890 | /* 54 - 1440x576i@200Hz */ |
| 891 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, | 891 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, |
| 892 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 892 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 893 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 893 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 894 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 894 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 895 | .vrefresh = 200, }, | 895 | .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 896 | /* 55 - 1440x576i@200Hz */ | 896 | /* 55 - 1440x576i@200Hz */ |
| 897 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, | 897 | { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, |
| 898 | 1590, 1728, 0, 576, 580, 586, 625, 0, | 898 | 1590, 1728, 0, 576, 580, 586, 625, 0, |
| 899 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 899 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 900 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 900 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 901 | .vrefresh = 200, }, | 901 | .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 902 | /* 56 - 720x480@240Hz */ | 902 | /* 56 - 720x480@240Hz */ |
| 903 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, | 903 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, |
| 904 | 798, 858, 0, 480, 489, 495, 525, 0, | 904 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 905 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 905 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 906 | .vrefresh = 240, }, | 906 | .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 907 | /* 57 - 720x480@240Hz */ | 907 | /* 57 - 720x480@240Hz */ |
| 908 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, | 908 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, |
| 909 | 798, 858, 0, 480, 489, 495, 525, 0, | 909 | 798, 858, 0, 480, 489, 495, 525, 0, |
| 910 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 910 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 911 | .vrefresh = 240, }, | 911 | .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 912 | /* 58 - 1440x480i@240 */ | 912 | /* 58 - 1440x480i@240 */ |
| 913 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, | 913 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, |
| 914 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 914 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 915 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 915 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 916 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 916 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 917 | .vrefresh = 240, }, | 917 | .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, |
| 918 | /* 59 - 1440x480i@240 */ | 918 | /* 59 - 1440x480i@240 */ |
| 919 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, | 919 | { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, |
| 920 | 1602, 1716, 0, 480, 488, 494, 525, 0, | 920 | 1602, 1716, 0, 480, 488, 494, 525, 0, |
| 921 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | | 921 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
| 922 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), | 922 | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), |
| 923 | .vrefresh = 240, }, | 923 | .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 924 | /* 60 - 1280x720@24Hz */ | 924 | /* 60 - 1280x720@24Hz */ |
| 925 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, | 925 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, |
| 926 | 3080, 3300, 0, 720, 725, 730, 750, 0, | 926 | 3080, 3300, 0, 720, 725, 730, 750, 0, |
| 927 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 927 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 928 | .vrefresh = 24, }, | 928 | .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 929 | /* 61 - 1280x720@25Hz */ | 929 | /* 61 - 1280x720@25Hz */ |
| 930 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, | 930 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, |
| 931 | 3740, 3960, 0, 720, 725, 730, 750, 0, | 931 | 3740, 3960, 0, 720, 725, 730, 750, 0, |
| 932 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 932 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 933 | .vrefresh = 25, }, | 933 | .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 934 | /* 62 - 1280x720@30Hz */ | 934 | /* 62 - 1280x720@30Hz */ |
| 935 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, | 935 | { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, |
| 936 | 3080, 3300, 0, 720, 725, 730, 750, 0, | 936 | 3080, 3300, 0, 720, 725, 730, 750, 0, |
| 937 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 937 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 938 | .vrefresh = 30, }, | 938 | .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 939 | /* 63 - 1920x1080@120Hz */ | 939 | /* 63 - 1920x1080@120Hz */ |
| 940 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, | 940 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, |
| 941 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, | 941 | 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
| 942 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 942 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 943 | .vrefresh = 120, }, | 943 | .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 944 | /* 64 - 1920x1080@100Hz */ | 944 | /* 64 - 1920x1080@100Hz */ |
| 945 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, | 945 | { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, |
| 946 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, | 946 | 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
| 947 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), | 947 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), |
| 948 | .vrefresh = 100, }, | 948 | .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, |
| 949 | }; | 949 | }; |
| 950 | 950 | ||
| 951 | /* | 951 | /* |
| @@ -2562,25 +2562,40 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) | |||
| 2562 | return modes; | 2562 | return modes; |
| 2563 | } | 2563 | } |
| 2564 | 2564 | ||
| 2565 | static int | 2565 | static struct drm_display_mode * |
| 2566 | do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) | 2566 | drm_display_mode_from_vic_index(struct drm_connector *connector, |
| 2567 | const u8 *video_db, u8 video_len, | ||
| 2568 | u8 video_index) | ||
| 2567 | { | 2569 | { |
| 2568 | struct drm_device *dev = connector->dev; | 2570 | struct drm_device *dev = connector->dev; |
| 2569 | const u8 *mode; | 2571 | struct drm_display_mode *newmode; |
| 2570 | u8 cea_mode; | 2572 | u8 cea_mode; |
| 2571 | int modes = 0; | ||
| 2572 | 2573 | ||
| 2573 | for (mode = db; mode < db + len; mode++) { | 2574 | if (video_db == NULL || video_index >= video_len) |
| 2574 | cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */ | 2575 | return NULL; |
| 2575 | if (cea_mode < ARRAY_SIZE(edid_cea_modes)) { | 2576 | |
| 2576 | struct drm_display_mode *newmode; | 2577 | /* CEA modes are numbered 1..127 */ |
| 2577 | newmode = drm_mode_duplicate(dev, | 2578 | cea_mode = (video_db[video_index] & 127) - 1; |
| 2578 | &edid_cea_modes[cea_mode]); | 2579 | if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) |
| 2579 | if (newmode) { | 2580 | return NULL; |
| 2580 | newmode->vrefresh = 0; | 2581 | |
| 2581 | drm_mode_probed_add(connector, newmode); | 2582 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); |
| 2582 | modes++; | 2583 | newmode->vrefresh = 0; |
| 2583 | } | 2584 | |
| 2585 | return newmode; | ||
| 2586 | } | ||
| 2587 | |||
| 2588 | static int | ||
| 2589 | do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) | ||
| 2590 | { | ||
| 2591 | int i, modes = 0; | ||
| 2592 | |||
| 2593 | for (i = 0; i < len; i++) { | ||
| 2594 | struct drm_display_mode *mode; | ||
| 2595 | mode = drm_display_mode_from_vic_index(connector, db, len, i); | ||
| 2596 | if (mode) { | ||
| 2597 | drm_mode_probed_add(connector, mode); | ||
| 2598 | modes++; | ||
| 2584 | } | 2599 | } |
| 2585 | } | 2600 | } |
| 2586 | 2601 | ||
| @@ -2674,21 +2689,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) | |||
| 2674 | static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | 2689 | static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, |
| 2675 | const u8 *video_db, u8 video_len, u8 video_index) | 2690 | const u8 *video_db, u8 video_len, u8 video_index) |
| 2676 | { | 2691 | { |
| 2677 | struct drm_device *dev = connector->dev; | ||
| 2678 | struct drm_display_mode *newmode; | 2692 | struct drm_display_mode *newmode; |
| 2679 | int modes = 0; | 2693 | int modes = 0; |
| 2680 | u8 cea_mode; | ||
| 2681 | |||
| 2682 | if (video_db == NULL || video_index >= video_len) | ||
| 2683 | return 0; | ||
| 2684 | |||
| 2685 | /* CEA modes are numbered 1..127 */ | ||
| 2686 | cea_mode = (video_db[video_index] & 127) - 1; | ||
| 2687 | if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) | ||
| 2688 | return 0; | ||
| 2689 | 2694 | ||
| 2690 | if (structure & (1 << 0)) { | 2695 | if (structure & (1 << 0)) { |
| 2691 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | 2696 | newmode = drm_display_mode_from_vic_index(connector, video_db, |
| 2697 | video_len, | ||
| 2698 | video_index); | ||
| 2692 | if (newmode) { | 2699 | if (newmode) { |
| 2693 | newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING; | 2700 | newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING; |
| 2694 | drm_mode_probed_add(connector, newmode); | 2701 | drm_mode_probed_add(connector, newmode); |
| @@ -2696,7 +2703,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | |||
| 2696 | } | 2703 | } |
| 2697 | } | 2704 | } |
| 2698 | if (structure & (1 << 6)) { | 2705 | if (structure & (1 << 6)) { |
| 2699 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | 2706 | newmode = drm_display_mode_from_vic_index(connector, video_db, |
| 2707 | video_len, | ||
| 2708 | video_index); | ||
| 2700 | if (newmode) { | 2709 | if (newmode) { |
| 2701 | newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; | 2710 | newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; |
| 2702 | drm_mode_probed_add(connector, newmode); | 2711 | drm_mode_probed_add(connector, newmode); |
| @@ -2704,7 +2713,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | |||
| 2704 | } | 2713 | } |
| 2705 | } | 2714 | } |
| 2706 | if (structure & (1 << 8)) { | 2715 | if (structure & (1 << 8)) { |
| 2707 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | 2716 | newmode = drm_display_mode_from_vic_index(connector, video_db, |
| 2717 | video_len, | ||
| 2718 | video_index); | ||
| 2708 | if (newmode) { | 2719 | if (newmode) { |
| 2709 | newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; | 2720 | newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; |
| 2710 | drm_mode_probed_add(connector, newmode); | 2721 | drm_mode_probed_add(connector, newmode); |
| @@ -2728,7 +2739,7 @@ static int | |||
| 2728 | do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, | 2739 | do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, |
| 2729 | const u8 *video_db, u8 video_len) | 2740 | const u8 *video_db, u8 video_len) |
| 2730 | { | 2741 | { |
| 2731 | int modes = 0, offset = 0, i, multi_present = 0; | 2742 | int modes = 0, offset = 0, i, multi_present = 0, multi_len; |
| 2732 | u8 vic_len, hdmi_3d_len = 0; | 2743 | u8 vic_len, hdmi_3d_len = 0; |
| 2733 | u16 mask; | 2744 | u16 mask; |
| 2734 | u16 structure_all; | 2745 | u16 structure_all; |
| @@ -2774,32 +2785,84 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, | |||
| 2774 | } | 2785 | } |
| 2775 | offset += 1 + vic_len; | 2786 | offset += 1 + vic_len; |
| 2776 | 2787 | ||
| 2777 | if (!(multi_present == 1 || multi_present == 2)) | 2788 | if (multi_present == 1) |
| 2778 | goto out; | 2789 | multi_len = 2; |
| 2790 | else if (multi_present == 2) | ||
| 2791 | multi_len = 4; | ||
| 2792 | else | ||
| 2793 | multi_len = 0; | ||
| 2779 | 2794 | ||
| 2780 | if ((multi_present == 1 && len < (9 + offset)) || | 2795 | if (len < (8 + offset + hdmi_3d_len - 1)) |
| 2781 | (multi_present == 2 && len < (11 + offset))) | ||
| 2782 | goto out; | 2796 | goto out; |
| 2783 | 2797 | ||
| 2784 | if ((multi_present == 1 && hdmi_3d_len < 2) || | 2798 | if (hdmi_3d_len < multi_len) |
| 2785 | (multi_present == 2 && hdmi_3d_len < 4)) | ||
| 2786 | goto out; | 2799 | goto out; |
| 2787 | 2800 | ||
| 2788 | /* 3D_Structure_ALL */ | 2801 | if (multi_present == 1 || multi_present == 2) { |
| 2789 | structure_all = (db[8 + offset] << 8) | db[9 + offset]; | 2802 | /* 3D_Structure_ALL */ |
| 2803 | structure_all = (db[8 + offset] << 8) | db[9 + offset]; | ||
| 2790 | 2804 | ||
| 2791 | /* check if 3D_MASK is present */ | 2805 | /* check if 3D_MASK is present */ |
| 2792 | if (multi_present == 2) | 2806 | if (multi_present == 2) |
| 2793 | mask = (db[10 + offset] << 8) | db[11 + offset]; | 2807 | mask = (db[10 + offset] << 8) | db[11 + offset]; |
| 2794 | else | 2808 | else |
| 2795 | mask = 0xffff; | 2809 | mask = 0xffff; |
| 2796 | 2810 | ||
| 2797 | for (i = 0; i < 16; i++) { | 2811 | for (i = 0; i < 16; i++) { |
| 2798 | if (mask & (1 << i)) | 2812 | if (mask & (1 << i)) |
| 2799 | modes += add_3d_struct_modes(connector, | 2813 | modes += add_3d_struct_modes(connector, |
| 2800 | structure_all, | 2814 | structure_all, |
| 2801 | video_db, | 2815 | video_db, |
| 2802 | video_len, i); | 2816 | video_len, i); |
| 2817 | } | ||
| 2818 | } | ||
| 2819 | |||
| 2820 | offset += multi_len; | ||
| 2821 | |||
| 2822 | for (i = 0; i < (hdmi_3d_len - multi_len); i++) { | ||
| 2823 | int vic_index; | ||
| 2824 | struct drm_display_mode *newmode = NULL; | ||
| 2825 | unsigned int newflag = 0; | ||
| 2826 | bool detail_present; | ||
| 2827 | |||
| 2828 | detail_present = ((db[8 + offset + i] & 0x0f) > 7); | ||
| 2829 | |||
| 2830 | if (detail_present && (i + 1 == hdmi_3d_len - multi_len)) | ||
| 2831 | break; | ||
| 2832 | |||
| 2833 | /* 2D_VIC_order_X */ | ||
| 2834 | vic_index = db[8 + offset + i] >> 4; | ||
| 2835 | |||
| 2836 | /* 3D_Structure_X */ | ||
| 2837 | switch (db[8 + offset + i] & 0x0f) { | ||
| 2838 | case 0: | ||
| 2839 | newflag = DRM_MODE_FLAG_3D_FRAME_PACKING; | ||
| 2840 | break; | ||
| 2841 | case 6: | ||
| 2842 | newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; | ||
| 2843 | break; | ||
| 2844 | case 8: | ||
| 2845 | /* 3D_Detail_X */ | ||
| 2846 | if ((db[9 + offset + i] >> 4) == 1) | ||
| 2847 | newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; | ||
| 2848 | break; | ||
| 2849 | } | ||
| 2850 | |||
| 2851 | if (newflag != 0) { | ||
| 2852 | newmode = drm_display_mode_from_vic_index(connector, | ||
| 2853 | video_db, | ||
| 2854 | video_len, | ||
| 2855 | vic_index); | ||
| 2856 | |||
| 2857 | if (newmode) { | ||
| 2858 | newmode->flags |= newflag; | ||
| 2859 | drm_mode_probed_add(connector, newmode); | ||
| 2860 | modes++; | ||
| 2861 | } | ||
| 2862 | } | ||
| 2863 | |||
| 2864 | if (detail_present) | ||
| 2865 | i++; | ||
| 2803 | } | 2866 | } |
| 2804 | 2867 | ||
| 2805 | out: | 2868 | out: |
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 9081172ef057..1b4c7a5442c5 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c | |||
| @@ -141,7 +141,7 @@ static int edid_size(const u8 *edid, int data_size) | |||
| 141 | return (edid[0x7e] + 1) * EDID_LENGTH; | 141 | return (edid[0x7e] + 1) * EDID_LENGTH; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static u8 *edid_load(struct drm_connector *connector, const char *name, | 144 | static void *edid_load(struct drm_connector *connector, const char *name, |
| 145 | const char *connector_name) | 145 | const char *connector_name) |
| 146 | { | 146 | { |
| 147 | const struct firmware *fw = NULL; | 147 | const struct firmware *fw = NULL; |
| @@ -263,7 +263,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) | |||
| 263 | if (*last == '\n') | 263 | if (*last == '\n') |
| 264 | *last = '\0'; | 264 | *last = '\0'; |
| 265 | 265 | ||
| 266 | edid = (struct edid *) edid_load(connector, edidname, connector_name); | 266 | edid = edid_load(connector, edidname, connector_name); |
| 267 | if (IS_ERR_OR_NULL(edid)) | 267 | if (IS_ERR_OR_NULL(edid)) |
| 268 | return 0; | 268 | return 0; |
| 269 | 269 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0a19401aff80..98a03639b413 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -359,6 +359,11 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) | |||
| 359 | struct drm_crtc *crtc; | 359 | struct drm_crtc *crtc; |
| 360 | int bound = 0, crtcs_bound = 0; | 360 | int bound = 0, crtcs_bound = 0; |
| 361 | 361 | ||
| 362 | /* Sometimes user space wants everything disabled, so don't steal the | ||
| 363 | * display if there's a master. */ | ||
| 364 | if (dev->primary->master) | ||
| 365 | return false; | ||
| 366 | |||
| 362 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 367 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 363 | if (crtc->fb) | 368 | if (crtc->fb) |
| 364 | crtcs_bound++; | 369 | crtcs_bound++; |
| @@ -368,6 +373,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) | |||
| 368 | 373 | ||
| 369 | if (bound < crtcs_bound) | 374 | if (bound < crtcs_bound) |
| 370 | return false; | 375 | return false; |
| 376 | |||
| 371 | return true; | 377 | return true; |
| 372 | } | 378 | } |
| 373 | 379 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c5b929c3f77a..7f2af9aca038 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
| @@ -232,7 +232,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
| 232 | goto out_put_pid; | 232 | goto out_put_pid; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | priv->ioctl_count = 0; | ||
| 236 | /* for compatibility root is always authenticated */ | 235 | /* for compatibility root is always authenticated */ |
| 237 | priv->always_authenticated = capable(CAP_SYS_ADMIN); | 236 | priv->always_authenticated = capable(CAP_SYS_ADMIN); |
| 238 | priv->authenticated = priv->always_authenticated; | 237 | priv->authenticated = priv->always_authenticated; |
| @@ -392,9 +391,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev) | |||
| 392 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 391 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 393 | return; | 392 | return; |
| 394 | 393 | ||
| 395 | atomic_set(&dev->ioctl_count, 0); | ||
| 396 | atomic_set(&dev->vma_count, 0); | ||
| 397 | |||
| 398 | dev->sigdata.lock = NULL; | 394 | dev->sigdata.lock = NULL; |
| 399 | 395 | ||
| 400 | dev->context_flag = 0; | 396 | dev->context_flag = 0; |
| @@ -578,12 +574,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 578 | */ | 574 | */ |
| 579 | 575 | ||
| 580 | if (!--dev->open_count) { | 576 | if (!--dev->open_count) { |
| 581 | if (atomic_read(&dev->ioctl_count)) { | 577 | retcode = drm_lastclose(dev); |
| 582 | DRM_ERROR("Device busy: %d\n", | ||
| 583 | atomic_read(&dev->ioctl_count)); | ||
| 584 | retcode = -EBUSY; | ||
| 585 | } else | ||
| 586 | retcode = drm_lastclose(dev); | ||
| 587 | if (drm_device_is_unplugged(dev)) | 578 | if (drm_device_is_unplugged(dev)) |
| 588 | drm_put_dev(dev); | 579 | drm_put_dev(dev); |
| 589 | } | 580 | } |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4761adedad2a..5bbad873c798 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
| @@ -91,19 +91,19 @@ | |||
| 91 | int | 91 | int |
| 92 | drm_gem_init(struct drm_device *dev) | 92 | drm_gem_init(struct drm_device *dev) |
| 93 | { | 93 | { |
| 94 | struct drm_gem_mm *mm; | 94 | struct drm_vma_offset_manager *vma_offset_manager; |
| 95 | 95 | ||
| 96 | mutex_init(&dev->object_name_lock); | 96 | mutex_init(&dev->object_name_lock); |
| 97 | idr_init(&dev->object_name_idr); | 97 | idr_init(&dev->object_name_idr); |
| 98 | 98 | ||
| 99 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); | 99 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
| 100 | if (!mm) { | 100 | if (!vma_offset_manager) { |
| 101 | DRM_ERROR("out of memory\n"); | 101 | DRM_ERROR("out of memory\n"); |
| 102 | return -ENOMEM; | 102 | return -ENOMEM; |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | dev->mm_private = mm; | 105 | dev->vma_offset_manager = vma_offset_manager; |
| 106 | drm_vma_offset_manager_init(&mm->vma_manager, | 106 | drm_vma_offset_manager_init(vma_offset_manager, |
| 107 | DRM_FILE_PAGE_OFFSET_START, | 107 | DRM_FILE_PAGE_OFFSET_START, |
| 108 | DRM_FILE_PAGE_OFFSET_SIZE); | 108 | DRM_FILE_PAGE_OFFSET_SIZE); |
| 109 | 109 | ||
| @@ -113,11 +113,10 @@ drm_gem_init(struct drm_device *dev) | |||
| 113 | void | 113 | void |
| 114 | drm_gem_destroy(struct drm_device *dev) | 114 | drm_gem_destroy(struct drm_device *dev) |
| 115 | { | 115 | { |
| 116 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 117 | 116 | ||
| 118 | drm_vma_offset_manager_destroy(&mm->vma_manager); | 117 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
| 119 | kfree(mm); | 118 | kfree(dev->vma_offset_manager); |
| 120 | dev->mm_private = NULL; | 119 | dev->vma_offset_manager = NULL; |
| 121 | } | 120 | } |
| 122 | 121 | ||
| 123 | /** | 122 | /** |
| @@ -129,11 +128,12 @@ int drm_gem_object_init(struct drm_device *dev, | |||
| 129 | { | 128 | { |
| 130 | struct file *filp; | 129 | struct file *filp; |
| 131 | 130 | ||
| 131 | drm_gem_private_object_init(dev, obj, size); | ||
| 132 | |||
| 132 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); | 133 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
| 133 | if (IS_ERR(filp)) | 134 | if (IS_ERR(filp)) |
| 134 | return PTR_ERR(filp); | 135 | return PTR_ERR(filp); |
| 135 | 136 | ||
| 136 | drm_gem_private_object_init(dev, obj, size); | ||
| 137 | obj->filp = filp; | 137 | obj->filp = filp; |
| 138 | 138 | ||
| 139 | return 0; | 139 | return 0; |
| @@ -175,11 +175,6 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | |||
| 175 | mutex_unlock(&filp->prime.lock); | 175 | mutex_unlock(&filp->prime.lock); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | static void drm_gem_object_ref_bug(struct kref *list_kref) | ||
| 179 | { | ||
| 180 | BUG(); | ||
| 181 | } | ||
| 182 | |||
| 183 | /** | 178 | /** |
| 184 | * Called after the last handle to the object has been closed | 179 | * Called after the last handle to the object has been closed |
| 185 | * | 180 | * |
| @@ -195,13 +190,6 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) | |||
| 195 | if (obj->name) { | 190 | if (obj->name) { |
| 196 | idr_remove(&dev->object_name_idr, obj->name); | 191 | idr_remove(&dev->object_name_idr, obj->name); |
| 197 | obj->name = 0; | 192 | obj->name = 0; |
| 198 | /* | ||
| 199 | * The object name held a reference to this object, drop | ||
| 200 | * that now. | ||
| 201 | * | ||
| 202 | * This cannot be the last reference, since the handle holds one too. | ||
| 203 | */ | ||
| 204 | kref_put(&obj->refcount, drm_gem_object_ref_bug); | ||
| 205 | } | 193 | } |
| 206 | } | 194 | } |
| 207 | 195 | ||
| @@ -374,9 +362,8 @@ void | |||
| 374 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) | 362 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
| 375 | { | 363 | { |
| 376 | struct drm_device *dev = obj->dev; | 364 | struct drm_device *dev = obj->dev; |
| 377 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 378 | 365 | ||
| 379 | drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); | 366 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
| 380 | } | 367 | } |
| 381 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | 368 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
| 382 | 369 | ||
| @@ -398,9 +385,8 @@ int | |||
| 398 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) | 385 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
| 399 | { | 386 | { |
| 400 | struct drm_device *dev = obj->dev; | 387 | struct drm_device *dev = obj->dev; |
| 401 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 402 | 388 | ||
| 403 | return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, | 389 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
| 404 | size / PAGE_SIZE); | 390 | size / PAGE_SIZE); |
| 405 | } | 391 | } |
| 406 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); | 392 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
| @@ -602,9 +588,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |||
| 602 | goto err; | 588 | goto err; |
| 603 | 589 | ||
| 604 | obj->name = ret; | 590 | obj->name = ret; |
| 605 | |||
| 606 | /* Allocate a reference for the name table. */ | ||
| 607 | drm_gem_object_reference(obj); | ||
| 608 | } | 591 | } |
| 609 | 592 | ||
| 610 | args->name = (uint64_t) obj->name; | 593 | args->name = (uint64_t) obj->name; |
| @@ -833,7 +816,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 833 | { | 816 | { |
| 834 | struct drm_file *priv = filp->private_data; | 817 | struct drm_file *priv = filp->private_data; |
| 835 | struct drm_device *dev = priv->minor->dev; | 818 | struct drm_device *dev = priv->minor->dev; |
| 836 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 837 | struct drm_gem_object *obj; | 819 | struct drm_gem_object *obj; |
| 838 | struct drm_vma_offset_node *node; | 820 | struct drm_vma_offset_node *node; |
| 839 | int ret = 0; | 821 | int ret = 0; |
| @@ -843,7 +825,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 843 | 825 | ||
| 844 | mutex_lock(&dev->struct_mutex); | 826 | mutex_lock(&dev->struct_mutex); |
| 845 | 827 | ||
| 846 | node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff, | 828 | node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, |
| 829 | vma->vm_pgoff, | ||
| 847 | vma_pages(vma)); | 830 | vma_pages(vma)); |
| 848 | if (!node) { | 831 | if (!node) { |
| 849 | mutex_unlock(&dev->struct_mutex); | 832 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 7d5a152eeb02..7473035dd28b 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
| @@ -186,14 +186,14 @@ int drm_clients_info(struct seq_file *m, void *data) | |||
| 186 | struct drm_file *priv; | 186 | struct drm_file *priv; |
| 187 | 187 | ||
| 188 | mutex_lock(&dev->struct_mutex); | 188 | mutex_lock(&dev->struct_mutex); |
| 189 | seq_printf(m, "a dev pid uid magic ioctls\n\n"); | 189 | seq_printf(m, "a dev pid uid magic\n\n"); |
| 190 | list_for_each_entry(priv, &dev->filelist, lhead) { | 190 | list_for_each_entry(priv, &dev->filelist, lhead) { |
| 191 | seq_printf(m, "%c %3d %5d %5d %10u %10lu\n", | 191 | seq_printf(m, "%c %3d %5d %5d %10u\n", |
| 192 | priv->authenticated ? 'y' : 'n', | 192 | priv->authenticated ? 'y' : 'n', |
| 193 | priv->minor->index, | 193 | priv->minor->index, |
| 194 | pid_vnr(priv->pid), | 194 | pid_vnr(priv->pid), |
| 195 | from_kuid_munged(seq_user_ns(m), priv->uid), | 195 | from_kuid_munged(seq_user_ns(m), priv->uid), |
| 196 | priv->magic, priv->ioctl_count); | 196 | priv->magic); |
| 197 | } | 197 | } |
| 198 | mutex_unlock(&dev->struct_mutex); | 198 | mutex_unlock(&dev->struct_mutex); |
| 199 | return 0; | 199 | return 0; |
| @@ -234,14 +234,18 @@ int drm_vma_info(struct seq_file *m, void *data) | |||
| 234 | struct drm_device *dev = node->minor->dev; | 234 | struct drm_device *dev = node->minor->dev; |
| 235 | struct drm_vma_entry *pt; | 235 | struct drm_vma_entry *pt; |
| 236 | struct vm_area_struct *vma; | 236 | struct vm_area_struct *vma; |
| 237 | unsigned long vma_count = 0; | ||
| 237 | #if defined(__i386__) | 238 | #if defined(__i386__) |
| 238 | unsigned int pgprot; | 239 | unsigned int pgprot; |
| 239 | #endif | 240 | #endif |
| 240 | 241 | ||
| 241 | mutex_lock(&dev->struct_mutex); | 242 | mutex_lock(&dev->struct_mutex); |
| 242 | seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", | 243 | list_for_each_entry(pt, &dev->vmalist, head) |
| 243 | atomic_read(&dev->vma_count), | 244 | vma_count++; |
| 244 | high_memory, (void *)(unsigned long)virt_to_phys(high_memory)); | 245 | |
| 246 | seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n", | ||
| 247 | vma_count, high_memory, | ||
| 248 | (void *)(unsigned long)virt_to_phys(high_memory)); | ||
| 245 | 249 | ||
| 246 | list_for_each_entry(pt, &dev->vmalist, head) { | 250 | list_for_each_entry(pt, &dev->vmalist, head) { |
| 247 | vma = pt->vma; | 251 | vma = pt->vma; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 64c34d5876ff..c2676b5908d9 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -368,7 +368,7 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
| 368 | if (dev->num_crtcs) { | 368 | if (dev->num_crtcs) { |
| 369 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 369 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
| 370 | for (i = 0; i < dev->num_crtcs; i++) { | 370 | for (i = 0; i < dev->num_crtcs; i++) { |
| 371 | DRM_WAKEUP(&dev->vblank[i].queue); | 371 | wake_up(&dev->vblank[i].queue); |
| 372 | dev->vblank[i].enabled = false; | 372 | dev->vblank[i].enabled = false; |
| 373 | dev->vblank[i].last = | 373 | dev->vblank[i].last = |
| 374 | dev->driver->get_vblank_counter(dev, i); | 374 | dev->driver->get_vblank_counter(dev, i); |
| @@ -436,45 +436,41 @@ int drm_control(struct drm_device *dev, void *data, | |||
| 436 | } | 436 | } |
| 437 | 437 | ||
| 438 | /** | 438 | /** |
| 439 | * drm_calc_timestamping_constants - Calculate and | 439 | * drm_calc_timestamping_constants - Calculate vblank timestamp constants |
| 440 | * store various constants which are later needed by | ||
| 441 | * vblank and swap-completion timestamping, e.g, by | ||
| 442 | * drm_calc_vbltimestamp_from_scanoutpos(). | ||
| 443 | * They are derived from crtc's true scanout timing, | ||
| 444 | * so they take things like panel scaling or other | ||
| 445 | * adjustments into account. | ||
| 446 | * | 440 | * |
| 447 | * @crtc drm_crtc whose timestamp constants should be updated. | 441 | * @crtc drm_crtc whose timestamp constants should be updated. |
| 442 | * @mode display mode containing the scanout timings | ||
| 448 | * | 443 | * |
| 444 | * Calculate and store various constants which are later | ||
| 445 | * needed by vblank and swap-completion timestamping, e.g, | ||
| 446 | * by drm_calc_vbltimestamp_from_scanoutpos(). They are | ||
| 447 | * derived from crtc's true scanout timing, so they take | ||
| 448 | * things like panel scaling or other adjustments into account. | ||
| 449 | */ | 449 | */ |
| 450 | void drm_calc_timestamping_constants(struct drm_crtc *crtc) | 450 | void drm_calc_timestamping_constants(struct drm_crtc *crtc, |
| 451 | const struct drm_display_mode *mode) | ||
| 451 | { | 452 | { |
| 452 | s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; | 453 | int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; |
| 453 | u64 dotclock; | 454 | int dotclock = mode->crtc_clock; |
| 454 | |||
| 455 | /* Dot clock in Hz: */ | ||
| 456 | dotclock = (u64) crtc->hwmode.clock * 1000; | ||
| 457 | |||
| 458 | /* Fields of interlaced scanout modes are only half a frame duration. | ||
| 459 | * Double the dotclock to get half the frame-/line-/pixelduration. | ||
| 460 | */ | ||
| 461 | if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) | ||
| 462 | dotclock *= 2; | ||
| 463 | 455 | ||
| 464 | /* Valid dotclock? */ | 456 | /* Valid dotclock? */ |
| 465 | if (dotclock > 0) { | 457 | if (dotclock > 0) { |
| 466 | int frame_size; | 458 | int frame_size = mode->crtc_htotal * mode->crtc_vtotal; |
| 467 | /* Convert scanline length in pixels and video dot clock to | 459 | |
| 468 | * line duration, frame duration and pixel duration in | 460 | /* |
| 469 | * nanoseconds: | 461 | * Convert scanline length in pixels and video |
| 462 | * dot clock to line duration, frame duration | ||
| 463 | * and pixel duration in nanoseconds: | ||
| 470 | */ | 464 | */ |
| 471 | pixeldur_ns = (s64) div64_u64(1000000000, dotclock); | 465 | pixeldur_ns = 1000000 / dotclock; |
| 472 | linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * | 466 | linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock); |
| 473 | 1000000000), dotclock); | 467 | framedur_ns = div_u64((u64) frame_size * 1000000, dotclock); |
| 474 | frame_size = crtc->hwmode.crtc_htotal * | 468 | |
| 475 | crtc->hwmode.crtc_vtotal; | 469 | /* |
| 476 | framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000, | 470 | * Fields of interlaced scanout modes are only half a frame duration. |
| 477 | dotclock); | 471 | */ |
| 472 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
| 473 | framedur_ns /= 2; | ||
| 478 | } else | 474 | } else |
| 479 | DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", | 475 | DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", |
| 480 | crtc->base.id); | 476 | crtc->base.id); |
| @@ -484,11 +480,11 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) | |||
| 484 | crtc->framedur_ns = framedur_ns; | 480 | crtc->framedur_ns = framedur_ns; |
| 485 | 481 | ||
| 486 | DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", | 482 | DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", |
| 487 | crtc->base.id, crtc->hwmode.crtc_htotal, | 483 | crtc->base.id, mode->crtc_htotal, |
| 488 | crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); | 484 | mode->crtc_vtotal, mode->crtc_vdisplay); |
| 489 | DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", | 485 | DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", |
| 490 | crtc->base.id, (int) dotclock/1000, (int) framedur_ns, | 486 | crtc->base.id, dotclock, framedur_ns, |
| 491 | (int) linedur_ns, (int) pixeldur_ns); | 487 | linedur_ns, pixeldur_ns); |
| 492 | } | 488 | } |
| 493 | EXPORT_SYMBOL(drm_calc_timestamping_constants); | 489 | EXPORT_SYMBOL(drm_calc_timestamping_constants); |
| 494 | 490 | ||
| @@ -521,6 +517,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); | |||
| 521 | * 0 = Default. | 517 | * 0 = Default. |
| 522 | * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. | 518 | * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. |
| 523 | * @refcrtc: drm_crtc* of crtc which defines scanout timing. | 519 | * @refcrtc: drm_crtc* of crtc which defines scanout timing. |
| 520 | * @mode: mode which defines the scanout timings | ||
| 524 | * | 521 | * |
| 525 | * Returns negative value on error, failure or if not supported in current | 522 | * Returns negative value on error, failure or if not supported in current |
| 526 | * video mode: | 523 | * video mode: |
| @@ -540,14 +537,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 540 | int *max_error, | 537 | int *max_error, |
| 541 | struct timeval *vblank_time, | 538 | struct timeval *vblank_time, |
| 542 | unsigned flags, | 539 | unsigned flags, |
| 543 | struct drm_crtc *refcrtc) | 540 | const struct drm_crtc *refcrtc, |
| 541 | const struct drm_display_mode *mode) | ||
| 544 | { | 542 | { |
| 545 | ktime_t stime, etime, mono_time_offset; | 543 | ktime_t stime, etime, mono_time_offset; |
| 546 | struct timeval tv_etime; | 544 | struct timeval tv_etime; |
| 547 | struct drm_display_mode *mode; | 545 | int vbl_status; |
| 548 | int vbl_status, vtotal, vdisplay; | ||
| 549 | int vpos, hpos, i; | 546 | int vpos, hpos, i; |
| 550 | s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; | 547 | int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; |
| 551 | bool invbl; | 548 | bool invbl; |
| 552 | 549 | ||
| 553 | if (crtc < 0 || crtc >= dev->num_crtcs) { | 550 | if (crtc < 0 || crtc >= dev->num_crtcs) { |
| @@ -561,10 +558,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 561 | return -EIO; | 558 | return -EIO; |
| 562 | } | 559 | } |
| 563 | 560 | ||
| 564 | mode = &refcrtc->hwmode; | ||
| 565 | vtotal = mode->crtc_vtotal; | ||
| 566 | vdisplay = mode->crtc_vdisplay; | ||
| 567 | |||
| 568 | /* Durations of frames, lines, pixels in nanoseconds. */ | 561 | /* Durations of frames, lines, pixels in nanoseconds. */ |
| 569 | framedur_ns = refcrtc->framedur_ns; | 562 | framedur_ns = refcrtc->framedur_ns; |
| 570 | linedur_ns = refcrtc->linedur_ns; | 563 | linedur_ns = refcrtc->linedur_ns; |
| @@ -573,7 +566,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 573 | /* If mode timing undefined, just return as no-op: | 566 | /* If mode timing undefined, just return as no-op: |
| 574 | * Happens during initial modesetting of a crtc. | 567 | * Happens during initial modesetting of a crtc. |
| 575 | */ | 568 | */ |
| 576 | if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { | 569 | if (framedur_ns == 0) { |
| 577 | DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); | 570 | DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); |
| 578 | return -EAGAIN; | 571 | return -EAGAIN; |
| 579 | } | 572 | } |
| @@ -590,7 +583,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 590 | * Get vertical and horizontal scanout position vpos, hpos, | 583 | * Get vertical and horizontal scanout position vpos, hpos, |
| 591 | * and bounding timestamps stime, etime, pre/post query. | 584 | * and bounding timestamps stime, etime, pre/post query. |
| 592 | */ | 585 | */ |
| 593 | vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, | 586 | vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, |
| 594 | &hpos, &stime, &etime); | 587 | &hpos, &stime, &etime); |
| 595 | 588 | ||
| 596 | /* | 589 | /* |
| @@ -611,18 +604,18 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 611 | duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); | 604 | duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); |
| 612 | 605 | ||
| 613 | /* Accept result with < max_error nsecs timing uncertainty. */ | 606 | /* Accept result with < max_error nsecs timing uncertainty. */ |
| 614 | if (duration_ns <= (s64) *max_error) | 607 | if (duration_ns <= *max_error) |
| 615 | break; | 608 | break; |
| 616 | } | 609 | } |
| 617 | 610 | ||
| 618 | /* Noisy system timing? */ | 611 | /* Noisy system timing? */ |
| 619 | if (i == DRM_TIMESTAMP_MAXRETRIES) { | 612 | if (i == DRM_TIMESTAMP_MAXRETRIES) { |
| 620 | DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", | 613 | DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", |
| 621 | crtc, (int) duration_ns/1000, *max_error/1000, i); | 614 | crtc, duration_ns/1000, *max_error/1000, i); |
| 622 | } | 615 | } |
| 623 | 616 | ||
| 624 | /* Return upper bound of timestamp precision error. */ | 617 | /* Return upper bound of timestamp precision error. */ |
| 625 | *max_error = (int) duration_ns; | 618 | *max_error = duration_ns; |
| 626 | 619 | ||
| 627 | /* Check if in vblank area: | 620 | /* Check if in vblank area: |
| 628 | * vpos is >=0 in video scanout area, but negative | 621 | * vpos is >=0 in video scanout area, but negative |
| @@ -635,25 +628,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 635 | * since start of scanout at first display scanline. delta_ns | 628 | * since start of scanout at first display scanline. delta_ns |
| 636 | * can be negative if start of scanout hasn't happened yet. | 629 | * can be negative if start of scanout hasn't happened yet. |
| 637 | */ | 630 | */ |
| 638 | delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns; | 631 | delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; |
| 639 | |||
| 640 | /* Is vpos outside nominal vblank area, but less than | ||
| 641 | * 1/100 of a frame height away from start of vblank? | ||
| 642 | * If so, assume this isn't a massively delayed vblank | ||
| 643 | * interrupt, but a vblank interrupt that fired a few | ||
| 644 | * microseconds before true start of vblank. Compensate | ||
| 645 | * by adding a full frame duration to the final timestamp. | ||
| 646 | * Happens, e.g., on ATI R500, R600. | ||
| 647 | * | ||
| 648 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
| 649 | */ | ||
| 650 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && | ||
| 651 | ((vdisplay - vpos) < vtotal / 100)) { | ||
| 652 | delta_ns = delta_ns - framedur_ns; | ||
| 653 | |||
| 654 | /* Signal this correction as "applied". */ | ||
| 655 | vbl_status |= 0x8; | ||
| 656 | } | ||
| 657 | 632 | ||
| 658 | if (!drm_timestamp_monotonic) | 633 | if (!drm_timestamp_monotonic) |
| 659 | etime = ktime_sub(etime, mono_time_offset); | 634 | etime = ktime_sub(etime, mono_time_offset); |
| @@ -673,7 +648,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 673 | crtc, (int)vbl_status, hpos, vpos, | 648 | crtc, (int)vbl_status, hpos, vpos, |
| 674 | (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, | 649 | (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, |
| 675 | (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, | 650 | (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, |
| 676 | (int)duration_ns/1000, i); | 651 | duration_ns/1000, i); |
| 677 | 652 | ||
| 678 | vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; | 653 | vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; |
| 679 | if (invbl) | 654 | if (invbl) |
| @@ -960,7 +935,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc) | |||
| 960 | if (atomic_dec_and_test(&dev->vblank[crtc].refcount) && | 935 | if (atomic_dec_and_test(&dev->vblank[crtc].refcount) && |
| 961 | (drm_vblank_offdelay > 0)) | 936 | (drm_vblank_offdelay > 0)) |
| 962 | mod_timer(&dev->vblank_disable_timer, | 937 | mod_timer(&dev->vblank_disable_timer, |
| 963 | jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); | 938 | jiffies + ((drm_vblank_offdelay * HZ)/1000)); |
| 964 | } | 939 | } |
| 965 | EXPORT_SYMBOL(drm_vblank_put); | 940 | EXPORT_SYMBOL(drm_vblank_put); |
| 966 | 941 | ||
| @@ -980,7 +955,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) | |||
| 980 | 955 | ||
| 981 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 956 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
| 982 | vblank_disable_and_save(dev, crtc); | 957 | vblank_disable_and_save(dev, crtc); |
| 983 | DRM_WAKEUP(&dev->vblank[crtc].queue); | 958 | wake_up(&dev->vblank[crtc].queue); |
| 984 | 959 | ||
| 985 | /* Send any queued vblank events, lest the natives grow disquiet */ | 960 | /* Send any queued vblank events, lest the natives grow disquiet */ |
| 986 | seq = drm_vblank_count_and_time(dev, crtc, &now); | 961 | seq = drm_vblank_count_and_time(dev, crtc, &now); |
| @@ -1244,7 +1219,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
| 1244 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", | 1219 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", |
| 1245 | vblwait->request.sequence, crtc); | 1220 | vblwait->request.sequence, crtc); |
| 1246 | dev->vblank[crtc].last_wait = vblwait->request.sequence; | 1221 | dev->vblank[crtc].last_wait = vblwait->request.sequence; |
| 1247 | DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ, | 1222 | DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ, |
| 1248 | (((drm_vblank_count(dev, crtc) - | 1223 | (((drm_vblank_count(dev, crtc) - |
| 1249 | vblwait->request.sequence) <= (1 << 23)) || | 1224 | vblwait->request.sequence) <= (1 << 23)) || |
| 1250 | !dev->irq_enabled)); | 1225 | !dev->irq_enabled)); |
| @@ -1363,7 +1338,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
| 1363 | crtc, (int) diff_ns); | 1338 | crtc, (int) diff_ns); |
| 1364 | } | 1339 | } |
| 1365 | 1340 | ||
| 1366 | DRM_WAKEUP(&dev->vblank[crtc].queue); | 1341 | wake_up(&dev->vblank[crtc].queue); |
| 1367 | drm_handle_vblank_events(dev, crtc); | 1342 | drm_handle_vblank_events(dev, crtc); |
| 1368 | 1343 | ||
| 1369 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1344 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 64e44fad8ae8..00c67c0f2381 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c | |||
| @@ -82,19 +82,19 @@ static void *agp_remap(unsigned long offset, unsigned long size, | |||
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | /** Wrapper around agp_free_memory() */ | 84 | /** Wrapper around agp_free_memory() */ |
| 85 | void drm_free_agp(DRM_AGP_MEM * handle, int pages) | 85 | void drm_free_agp(struct agp_memory * handle, int pages) |
| 86 | { | 86 | { |
| 87 | agp_free_memory(handle); | 87 | agp_free_memory(handle); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | /** Wrapper around agp_bind_memory() */ | 90 | /** Wrapper around agp_bind_memory() */ |
| 91 | int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) | 91 | int drm_bind_agp(struct agp_memory * handle, unsigned int start) |
| 92 | { | 92 | { |
| 93 | return agp_bind_memory(handle, start); | 93 | return agp_bind_memory(handle, start); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | /** Wrapper around agp_unbind_memory() */ | 96 | /** Wrapper around agp_unbind_memory() */ |
| 97 | int drm_unbind_agp(DRM_AGP_MEM * handle) | 97 | int drm_unbind_agp(struct agp_memory * handle) |
| 98 | { | 98 | { |
| 99 | return agp_unbind_memory(handle); | 99 | return agp_unbind_memory(handle); |
| 100 | } | 100 | } |
| @@ -110,8 +110,7 @@ static inline void *agp_remap(unsigned long offset, unsigned long size, | |||
| 110 | 110 | ||
| 111 | void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) | 111 | void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) |
| 112 | { | 112 | { |
| 113 | if (drm_core_has_AGP(dev) && | 113 | if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) |
| 114 | dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) | ||
| 115 | map->handle = agp_remap(map->offset, map->size, dev); | 114 | map->handle = agp_remap(map->offset, map->size, dev); |
| 116 | else | 115 | else |
| 117 | map->handle = ioremap(map->offset, map->size); | 116 | map->handle = ioremap(map->offset, map->size); |
| @@ -120,8 +119,7 @@ EXPORT_SYMBOL(drm_core_ioremap); | |||
| 120 | 119 | ||
| 121 | void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) | 120 | void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) |
| 122 | { | 121 | { |
| 123 | if (drm_core_has_AGP(dev) && | 122 | if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) |
| 124 | dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) | ||
| 125 | map->handle = agp_remap(map->offset, map->size, dev); | 123 | map->handle = agp_remap(map->offset, map->size, dev); |
| 126 | else | 124 | else |
| 127 | map->handle = ioremap_wc(map->offset, map->size); | 125 | map->handle = ioremap_wc(map->offset, map->size); |
| @@ -133,8 +131,7 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) | |||
| 133 | if (!map->handle || !map->size) | 131 | if (!map->handle || !map->size) |
| 134 | return; | 132 | return; |
| 135 | 133 | ||
| 136 | if (drm_core_has_AGP(dev) && | 134 | if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) |
| 137 | dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) | ||
| 138 | vunmap(map->handle); | 135 | vunmap(map->handle); |
| 139 | else | 136 | else |
| 140 | iounmap(map->handle); | 137 | iounmap(map->handle); |
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c new file mode 100644 index 000000000000..b155ee2ffa17 --- /dev/null +++ b/drivers/gpu/drm/drm_mipi_dsi.c | |||
| @@ -0,0 +1,315 @@ | |||
| 1 | /* | ||
| 2 | * MIPI DSI Bus | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. | ||
| 5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
| 6 | * | ||
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 8 | * copy of this software and associated documentation files (the | ||
| 9 | * "Software"), to deal in the Software without restriction, including | ||
| 10 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 11 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 12 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 13 | * the following conditions: | ||
| 14 | * | ||
| 15 | * The above copyright notice and this permission notice (including the | ||
| 16 | * next paragraph) shall be included in all copies or substantial portions | ||
| 17 | * of the Software. | ||
| 18 | * | ||
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <drm/drm_mipi_dsi.h> | ||
| 29 | |||
| 30 | #include <linux/device.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/of_device.h> | ||
| 33 | #include <linux/pm_runtime.h> | ||
| 34 | #include <linux/slab.h> | ||
| 35 | |||
| 36 | #include <video/mipi_display.h> | ||
| 37 | |||
| 38 | static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv) | ||
| 39 | { | ||
| 40 | return of_driver_match_device(dev, drv); | ||
| 41 | } | ||
| 42 | |||
| 43 | static const struct dev_pm_ops mipi_dsi_device_pm_ops = { | ||
| 44 | .runtime_suspend = pm_generic_runtime_suspend, | ||
| 45 | .runtime_resume = pm_generic_runtime_resume, | ||
| 46 | .suspend = pm_generic_suspend, | ||
| 47 | .resume = pm_generic_resume, | ||
| 48 | .freeze = pm_generic_freeze, | ||
| 49 | .thaw = pm_generic_thaw, | ||
| 50 | .poweroff = pm_generic_poweroff, | ||
| 51 | .restore = pm_generic_restore, | ||
| 52 | }; | ||
| 53 | |||
| 54 | static struct bus_type mipi_dsi_bus_type = { | ||
| 55 | .name = "mipi-dsi", | ||
| 56 | .match = mipi_dsi_device_match, | ||
| 57 | .pm = &mipi_dsi_device_pm_ops, | ||
| 58 | }; | ||
| 59 | |||
| 60 | static void mipi_dsi_dev_release(struct device *dev) | ||
| 61 | { | ||
| 62 | struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); | ||
| 63 | |||
| 64 | of_node_put(dev->of_node); | ||
| 65 | kfree(dsi); | ||
| 66 | } | ||
| 67 | |||
| 68 | static const struct device_type mipi_dsi_device_type = { | ||
| 69 | .release = mipi_dsi_dev_release, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host) | ||
| 73 | { | ||
| 74 | struct mipi_dsi_device *dsi; | ||
| 75 | |||
| 76 | dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); | ||
| 77 | if (!dsi) | ||
| 78 | return ERR_PTR(-ENOMEM); | ||
| 79 | |||
| 80 | dsi->host = host; | ||
| 81 | dsi->dev.bus = &mipi_dsi_bus_type; | ||
| 82 | dsi->dev.parent = host->dev; | ||
| 83 | dsi->dev.type = &mipi_dsi_device_type; | ||
| 84 | |||
| 85 | device_initialize(&dsi->dev); | ||
| 86 | |||
| 87 | return dsi; | ||
| 88 | } | ||
| 89 | |||
| 90 | static int mipi_dsi_device_add(struct mipi_dsi_device *dsi) | ||
| 91 | { | ||
| 92 | struct mipi_dsi_host *host = dsi->host; | ||
| 93 | |||
| 94 | dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel); | ||
| 95 | |||
| 96 | return device_add(&dsi->dev); | ||
| 97 | } | ||
| 98 | |||
| 99 | static struct mipi_dsi_device * | ||
| 100 | of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node) | ||
| 101 | { | ||
| 102 | struct mipi_dsi_device *dsi; | ||
| 103 | struct device *dev = host->dev; | ||
| 104 | int ret; | ||
| 105 | u32 reg; | ||
| 106 | |||
| 107 | ret = of_property_read_u32(node, "reg", ®); | ||
| 108 | if (ret) { | ||
| 109 | dev_err(dev, "device node %s has no valid reg property: %d\n", | ||
| 110 | node->full_name, ret); | ||
| 111 | return ERR_PTR(-EINVAL); | ||
| 112 | } | ||
| 113 | |||
| 114 | if (reg > 3) { | ||
| 115 | dev_err(dev, "device node %s has invalid reg property: %u\n", | ||
| 116 | node->full_name, reg); | ||
| 117 | return ERR_PTR(-EINVAL); | ||
| 118 | } | ||
| 119 | |||
| 120 | dsi = mipi_dsi_device_alloc(host); | ||
| 121 | if (IS_ERR(dsi)) { | ||
| 122 | dev_err(dev, "failed to allocate DSI device %s: %ld\n", | ||
| 123 | node->full_name, PTR_ERR(dsi)); | ||
| 124 | return dsi; | ||
| 125 | } | ||
| 126 | |||
| 127 | dsi->dev.of_node = of_node_get(node); | ||
| 128 | dsi->channel = reg; | ||
| 129 | |||
| 130 | ret = mipi_dsi_device_add(dsi); | ||
| 131 | if (ret) { | ||
| 132 | dev_err(dev, "failed to add DSI device %s: %d\n", | ||
| 133 | node->full_name, ret); | ||
| 134 | kfree(dsi); | ||
| 135 | return ERR_PTR(ret); | ||
| 136 | } | ||
| 137 | |||
| 138 | return dsi; | ||
| 139 | } | ||
| 140 | |||
| 141 | int mipi_dsi_host_register(struct mipi_dsi_host *host) | ||
| 142 | { | ||
| 143 | struct device_node *node; | ||
| 144 | |||
| 145 | for_each_available_child_of_node(host->dev->of_node, node) | ||
| 146 | of_mipi_dsi_device_add(host, node); | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | EXPORT_SYMBOL(mipi_dsi_host_register); | ||
| 151 | |||
| 152 | static int mipi_dsi_remove_device_fn(struct device *dev, void *priv) | ||
| 153 | { | ||
| 154 | struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); | ||
| 155 | |||
| 156 | device_unregister(&dsi->dev); | ||
| 157 | |||
| 158 | return 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | void mipi_dsi_host_unregister(struct mipi_dsi_host *host) | ||
| 162 | { | ||
| 163 | device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn); | ||
| 164 | } | ||
| 165 | EXPORT_SYMBOL(mipi_dsi_host_unregister); | ||
| 166 | |||
| 167 | /** | ||
| 168 | * mipi_dsi_attach - attach a DSI device to its DSI host | ||
| 169 | * @dsi: DSI peripheral | ||
| 170 | */ | ||
| 171 | int mipi_dsi_attach(struct mipi_dsi_device *dsi) | ||
| 172 | { | ||
| 173 | const struct mipi_dsi_host_ops *ops = dsi->host->ops; | ||
| 174 | |||
| 175 | if (!ops || !ops->attach) | ||
| 176 | return -ENOSYS; | ||
| 177 | |||
| 178 | return ops->attach(dsi->host, dsi); | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL(mipi_dsi_attach); | ||
| 181 | |||
| 182 | /** | ||
| 183 | * mipi_dsi_detach - detach a DSI device from its DSI host | ||
| 184 | * @dsi: DSI peripheral | ||
| 185 | */ | ||
| 186 | int mipi_dsi_detach(struct mipi_dsi_device *dsi) | ||
| 187 | { | ||
| 188 | const struct mipi_dsi_host_ops *ops = dsi->host->ops; | ||
| 189 | |||
| 190 | if (!ops || !ops->detach) | ||
| 191 | return -ENOSYS; | ||
| 192 | |||
| 193 | return ops->detach(dsi->host, dsi); | ||
| 194 | } | ||
| 195 | EXPORT_SYMBOL(mipi_dsi_detach); | ||
| 196 | |||
| 197 | /** | ||
| 198 | * mipi_dsi_dcs_write - send DCS write command | ||
| 199 | * @dsi: DSI device | ||
| 200 | * @channel: virtual channel | ||
| 201 | * @data: pointer to the command followed by parameters | ||
| 202 | * @len: length of @data | ||
| 203 | */ | ||
| 204 | int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel, | ||
| 205 | const void *data, size_t len) | ||
| 206 | { | ||
| 207 | const struct mipi_dsi_host_ops *ops = dsi->host->ops; | ||
| 208 | struct mipi_dsi_msg msg = { | ||
| 209 | .channel = channel, | ||
| 210 | .tx_buf = data, | ||
| 211 | .tx_len = len | ||
| 212 | }; | ||
| 213 | |||
| 214 | if (!ops || !ops->transfer) | ||
| 215 | return -ENOSYS; | ||
| 216 | |||
| 217 | switch (len) { | ||
| 218 | case 0: | ||
| 219 | return -EINVAL; | ||
| 220 | case 1: | ||
| 221 | msg.type = MIPI_DSI_DCS_SHORT_WRITE; | ||
| 222 | break; | ||
| 223 | case 2: | ||
| 224 | msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM; | ||
| 225 | break; | ||
| 226 | default: | ||
| 227 | msg.type = MIPI_DSI_DCS_LONG_WRITE; | ||
| 228 | break; | ||
| 229 | } | ||
| 230 | |||
| 231 | return ops->transfer(dsi->host, &msg); | ||
| 232 | } | ||
| 233 | EXPORT_SYMBOL(mipi_dsi_dcs_write); | ||
| 234 | |||
| 235 | /** | ||
| 236 | * mipi_dsi_dcs_read - send DCS read request command | ||
| 237 | * @dsi: DSI device | ||
| 238 | * @channel: virtual channel | ||
| 239 | * @cmd: DCS read command | ||
| 240 | * @data: pointer to read buffer | ||
| 241 | * @len: length of @data | ||
| 242 | * | ||
| 243 | * Function returns number of read bytes or error code. | ||
| 244 | */ | ||
| 245 | ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel, | ||
| 246 | u8 cmd, void *data, size_t len) | ||
| 247 | { | ||
| 248 | const struct mipi_dsi_host_ops *ops = dsi->host->ops; | ||
| 249 | struct mipi_dsi_msg msg = { | ||
| 250 | .channel = channel, | ||
| 251 | .type = MIPI_DSI_DCS_READ, | ||
| 252 | .tx_buf = &cmd, | ||
| 253 | .tx_len = 1, | ||
| 254 | .rx_buf = data, | ||
| 255 | .rx_len = len | ||
| 256 | }; | ||
| 257 | |||
| 258 | if (!ops || !ops->transfer) | ||
| 259 | return -ENOSYS; | ||
| 260 | |||
| 261 | return ops->transfer(dsi->host, &msg); | ||
| 262 | } | ||
| 263 | EXPORT_SYMBOL(mipi_dsi_dcs_read); | ||
| 264 | |||
| 265 | static int mipi_dsi_drv_probe(struct device *dev) | ||
| 266 | { | ||
| 267 | struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver); | ||
| 268 | struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); | ||
| 269 | |||
| 270 | return drv->probe(dsi); | ||
| 271 | } | ||
| 272 | |||
| 273 | static int mipi_dsi_drv_remove(struct device *dev) | ||
| 274 | { | ||
| 275 | struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver); | ||
| 276 | struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); | ||
| 277 | |||
| 278 | return drv->remove(dsi); | ||
| 279 | } | ||
| 280 | |||
| 281 | /** | ||
| 282 | * mipi_dsi_driver_register - register a driver for DSI devices | ||
| 283 | * @drv: DSI driver structure | ||
| 284 | */ | ||
| 285 | int mipi_dsi_driver_register(struct mipi_dsi_driver *drv) | ||
| 286 | { | ||
| 287 | drv->driver.bus = &mipi_dsi_bus_type; | ||
| 288 | if (drv->probe) | ||
| 289 | drv->driver.probe = mipi_dsi_drv_probe; | ||
| 290 | if (drv->remove) | ||
| 291 | drv->driver.remove = mipi_dsi_drv_remove; | ||
| 292 | |||
| 293 | return driver_register(&drv->driver); | ||
| 294 | } | ||
| 295 | EXPORT_SYMBOL(mipi_dsi_driver_register); | ||
| 296 | |||
| 297 | /** | ||
| 298 | * mipi_dsi_driver_unregister - unregister a driver for DSI devices | ||
| 299 | * @drv: DSI driver structure | ||
| 300 | */ | ||
| 301 | void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv) | ||
| 302 | { | ||
| 303 | driver_unregister(&drv->driver); | ||
| 304 | } | ||
| 305 | EXPORT_SYMBOL(mipi_dsi_driver_unregister); | ||
| 306 | |||
| 307 | static int __init mipi_dsi_bus_init(void) | ||
| 308 | { | ||
| 309 | return bus_register(&mipi_dsi_bus_type); | ||
| 310 | } | ||
| 311 | postcore_initcall(mipi_dsi_bus_init); | ||
| 312 | |||
| 313 | MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>"); | ||
| 314 | MODULE_DESCRIPTION("MIPI DSI Bus"); | ||
| 315 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c new file mode 100644 index 000000000000..2ef988e037b7 --- /dev/null +++ b/drivers/gpu/drm/drm_panel.c | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the | ||
| 12 | * next paragraph) shall be included in all copies or substantial portions | ||
| 13 | * of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/err.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | |||
| 27 | #include <drm/drm_crtc.h> | ||
| 28 | #include <drm/drm_panel.h> | ||
| 29 | |||
| 30 | static DEFINE_MUTEX(panel_lock); | ||
| 31 | static LIST_HEAD(panel_list); | ||
| 32 | |||
| 33 | void drm_panel_init(struct drm_panel *panel) | ||
| 34 | { | ||
| 35 | INIT_LIST_HEAD(&panel->list); | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL(drm_panel_init); | ||
| 38 | |||
| 39 | int drm_panel_add(struct drm_panel *panel) | ||
| 40 | { | ||
| 41 | mutex_lock(&panel_lock); | ||
| 42 | list_add_tail(&panel->list, &panel_list); | ||
| 43 | mutex_unlock(&panel_lock); | ||
| 44 | |||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | EXPORT_SYMBOL(drm_panel_add); | ||
| 48 | |||
| 49 | void drm_panel_remove(struct drm_panel *panel) | ||
| 50 | { | ||
| 51 | mutex_lock(&panel_lock); | ||
| 52 | list_del_init(&panel->list); | ||
| 53 | mutex_unlock(&panel_lock); | ||
| 54 | } | ||
| 55 | EXPORT_SYMBOL(drm_panel_remove); | ||
| 56 | |||
| 57 | int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector) | ||
| 58 | { | ||
| 59 | if (panel->connector) | ||
| 60 | return -EBUSY; | ||
| 61 | |||
| 62 | panel->connector = connector; | ||
| 63 | panel->drm = connector->dev; | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | EXPORT_SYMBOL(drm_panel_attach); | ||
| 68 | |||
| 69 | int drm_panel_detach(struct drm_panel *panel) | ||
| 70 | { | ||
| 71 | panel->connector = NULL; | ||
| 72 | panel->drm = NULL; | ||
| 73 | |||
| 74 | return 0; | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL(drm_panel_detach); | ||
| 77 | |||
| 78 | #ifdef CONFIG_OF | ||
| 79 | struct drm_panel *of_drm_find_panel(struct device_node *np) | ||
| 80 | { | ||
| 81 | struct drm_panel *panel; | ||
| 82 | |||
| 83 | mutex_lock(&panel_lock); | ||
| 84 | |||
| 85 | list_for_each_entry(panel, &panel_list, list) { | ||
| 86 | if (panel->dev->of_node == np) { | ||
| 87 | mutex_unlock(&panel_lock); | ||
| 88 | return panel; | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | mutex_unlock(&panel_lock); | ||
| 93 | return NULL; | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL(of_drm_find_panel); | ||
| 96 | #endif | ||
| 97 | |||
| 98 | MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); | ||
| 99 | MODULE_DESCRIPTION("DRM panel infrastructure"); | ||
| 100 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 02679793c9e2..5736aaa7e86c 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
| @@ -262,16 +262,11 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) | |||
| 262 | return 0; | 262 | return 0; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | static int drm_pci_agp_init(struct drm_device *dev) | 265 | static void drm_pci_agp_init(struct drm_device *dev) |
| 266 | { | 266 | { |
| 267 | if (drm_core_has_AGP(dev)) { | 267 | if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { |
| 268 | if (drm_pci_device_is_agp(dev)) | 268 | if (drm_pci_device_is_agp(dev)) |
| 269 | dev->agp = drm_agp_init(dev); | 269 | dev->agp = drm_agp_init(dev); |
| 270 | if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) | ||
| 271 | && (dev->agp == NULL)) { | ||
| 272 | DRM_ERROR("Cannot initialize the agpgart module.\n"); | ||
| 273 | return -EINVAL; | ||
| 274 | } | ||
| 275 | if (dev->agp) { | 270 | if (dev->agp) { |
| 276 | dev->agp->agp_mtrr = arch_phys_wc_add( | 271 | dev->agp->agp_mtrr = arch_phys_wc_add( |
| 277 | dev->agp->agp_info.aper_base, | 272 | dev->agp->agp_info.aper_base, |
| @@ -279,15 +274,14 @@ static int drm_pci_agp_init(struct drm_device *dev) | |||
| 279 | 1024 * 1024); | 274 | 1024 * 1024); |
| 280 | } | 275 | } |
| 281 | } | 276 | } |
| 282 | return 0; | ||
| 283 | } | 277 | } |
| 284 | 278 | ||
| 285 | static void drm_pci_agp_destroy(struct drm_device *dev) | 279 | void drm_pci_agp_destroy(struct drm_device *dev) |
| 286 | { | 280 | { |
| 287 | if (drm_core_has_AGP(dev) && dev->agp) { | 281 | if (dev->agp) { |
| 288 | arch_phys_wc_del(dev->agp->agp_mtrr); | 282 | arch_phys_wc_del(dev->agp->agp_mtrr); |
| 289 | drm_agp_clear(dev); | 283 | drm_agp_clear(dev); |
| 290 | drm_agp_destroy(dev->agp); | 284 | kfree(dev->agp); |
| 291 | dev->agp = NULL; | 285 | dev->agp = NULL; |
| 292 | } | 286 | } |
| 293 | } | 287 | } |
| @@ -299,8 +293,6 @@ static struct drm_bus drm_pci_bus = { | |||
| 299 | .set_busid = drm_pci_set_busid, | 293 | .set_busid = drm_pci_set_busid, |
| 300 | .set_unique = drm_pci_set_unique, | 294 | .set_unique = drm_pci_set_unique, |
| 301 | .irq_by_busid = drm_pci_irq_by_busid, | 295 | .irq_by_busid = drm_pci_irq_by_busid, |
| 302 | .agp_init = drm_pci_agp_init, | ||
| 303 | .agp_destroy = drm_pci_agp_destroy, | ||
| 304 | }; | 296 | }; |
| 305 | 297 | ||
| 306 | /** | 298 | /** |
| @@ -338,17 +330,25 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
| 338 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 330 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 339 | pci_set_drvdata(pdev, dev); | 331 | pci_set_drvdata(pdev, dev); |
| 340 | 332 | ||
| 333 | drm_pci_agp_init(dev); | ||
| 334 | |||
| 341 | ret = drm_dev_register(dev, ent->driver_data); | 335 | ret = drm_dev_register(dev, ent->driver_data); |
| 342 | if (ret) | 336 | if (ret) |
| 343 | goto err_pci; | 337 | goto err_agp; |
| 344 | 338 | ||
| 345 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | 339 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", |
| 346 | driver->name, driver->major, driver->minor, driver->patchlevel, | 340 | driver->name, driver->major, driver->minor, driver->patchlevel, |
| 347 | driver->date, pci_name(pdev), dev->primary->index); | 341 | driver->date, pci_name(pdev), dev->primary->index); |
| 348 | 342 | ||
| 343 | /* No locking needed since shadow-attach is single-threaded since it may | ||
| 344 | * only be called from the per-driver module init hook. */ | ||
| 345 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 346 | list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); | ||
| 347 | |||
| 349 | return 0; | 348 | return 0; |
| 350 | 349 | ||
| 351 | err_pci: | 350 | err_agp: |
| 351 | drm_pci_agp_destroy(dev); | ||
| 352 | pci_disable_device(pdev); | 352 | pci_disable_device(pdev); |
| 353 | err_free: | 353 | err_free: |
| 354 | drm_dev_free(dev); | 354 | drm_dev_free(dev); |
| @@ -375,7 +375,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 375 | 375 | ||
| 376 | DRM_DEBUG("\n"); | 376 | DRM_DEBUG("\n"); |
| 377 | 377 | ||
| 378 | INIT_LIST_HEAD(&driver->device_list); | ||
| 379 | driver->kdriver.pci = pdriver; | 378 | driver->kdriver.pci = pdriver; |
| 380 | driver->bus = &drm_pci_bus; | 379 | driver->bus = &drm_pci_bus; |
| 381 | 380 | ||
| @@ -383,6 +382,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 383 | return pci_register_driver(pdriver); | 382 | return pci_register_driver(pdriver); |
| 384 | 383 | ||
| 385 | /* If not using KMS, fall back to stealth mode manual scanning. */ | 384 | /* If not using KMS, fall back to stealth mode manual scanning. */ |
| 385 | INIT_LIST_HEAD(&driver->legacy_dev_list); | ||
| 386 | for (i = 0; pdriver->id_table[i].vendor != 0; i++) { | 386 | for (i = 0; pdriver->id_table[i].vendor != 0; i++) { |
| 387 | pid = &pdriver->id_table[i]; | 387 | pid = &pdriver->id_table[i]; |
| 388 | 388 | ||
| @@ -452,6 +452,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 452 | return -1; | 452 | return -1; |
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | void drm_pci_agp_destroy(struct drm_device *dev) {} | ||
| 455 | #endif | 456 | #endif |
| 456 | 457 | ||
| 457 | EXPORT_SYMBOL(drm_pci_init); | 458 | EXPORT_SYMBOL(drm_pci_init); |
| @@ -465,8 +466,11 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 465 | if (driver->driver_features & DRIVER_MODESET) { | 466 | if (driver->driver_features & DRIVER_MODESET) { |
| 466 | pci_unregister_driver(pdriver); | 467 | pci_unregister_driver(pdriver); |
| 467 | } else { | 468 | } else { |
| 468 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) | 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, |
| 470 | legacy_dev_list) { | ||
| 469 | drm_put_dev(dev); | 471 | drm_put_dev(dev); |
| 472 | list_del(&dev->legacy_dev_list); | ||
| 473 | } | ||
| 470 | } | 474 | } |
| 471 | DRM_INFO("Module unloaded\n"); | 475 | DRM_INFO("Module unloaded\n"); |
| 472 | } | 476 | } |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index fc24fee8ec83..21fc82006b78 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
| @@ -147,18 +147,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor | |||
| 147 | 147 | ||
| 148 | driver->kdriver.platform_device = platform_device; | 148 | driver->kdriver.platform_device = platform_device; |
| 149 | driver->bus = &drm_platform_bus; | 149 | driver->bus = &drm_platform_bus; |
| 150 | INIT_LIST_HEAD(&driver->device_list); | ||
| 151 | return drm_get_platform_dev(platform_device, driver); | 150 | return drm_get_platform_dev(platform_device, driver); |
| 152 | } | 151 | } |
| 153 | EXPORT_SYMBOL(drm_platform_init); | 152 | EXPORT_SYMBOL(drm_platform_init); |
| 154 | |||
| 155 | void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device) | ||
| 156 | { | ||
| 157 | struct drm_device *dev, *tmp; | ||
| 158 | DRM_DEBUG("\n"); | ||
| 159 | |||
| 160 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) | ||
| 161 | drm_put_dev(dev); | ||
| 162 | DRM_INFO("Module unloaded\n"); | ||
| 163 | } | ||
| 164 | EXPORT_SYMBOL(drm_platform_exit); | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 66dd3a001cf1..98a33c580ca1 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
| @@ -99,13 +99,19 @@ void drm_ut_debug_printk(unsigned int request_level, | |||
| 99 | const char *function_name, | 99 | const char *function_name, |
| 100 | const char *format, ...) | 100 | const char *format, ...) |
| 101 | { | 101 | { |
| 102 | struct va_format vaf; | ||
| 102 | va_list args; | 103 | va_list args; |
| 103 | 104 | ||
| 104 | if (drm_debug & request_level) { | 105 | if (drm_debug & request_level) { |
| 105 | if (function_name) | ||
| 106 | printk(KERN_DEBUG "[%s:%s], ", prefix, function_name); | ||
| 107 | va_start(args, format); | 106 | va_start(args, format); |
| 108 | vprintk(format, args); | 107 | vaf.fmt = format; |
| 108 | vaf.va = &args; | ||
| 109 | |||
| 110 | if (function_name) | ||
| 111 | printk(KERN_DEBUG "[%s:%s], %pV", prefix, | ||
| 112 | function_name, &vaf); | ||
| 113 | else | ||
| 114 | printk(KERN_DEBUG "%pV", &vaf); | ||
| 109 | va_end(args); | 115 | va_end(args); |
| 110 | } | 116 | } |
| 111 | } | 117 | } |
| @@ -521,16 +527,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
| 521 | 527 | ||
| 522 | mutex_lock(&drm_global_mutex); | 528 | mutex_lock(&drm_global_mutex); |
| 523 | 529 | ||
| 524 | if (dev->driver->bus->agp_init) { | ||
| 525 | ret = dev->driver->bus->agp_init(dev); | ||
| 526 | if (ret) | ||
| 527 | goto out_unlock; | ||
| 528 | } | ||
| 529 | |||
| 530 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 530 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 531 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | 531 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); |
| 532 | if (ret) | 532 | if (ret) |
| 533 | goto err_agp; | 533 | goto out_unlock; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | 536 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { |
| @@ -557,8 +557,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
| 557 | goto err_unload; | 557 | goto err_unload; |
| 558 | } | 558 | } |
| 559 | 559 | ||
| 560 | list_add_tail(&dev->driver_item, &dev->driver->device_list); | ||
| 561 | |||
| 562 | ret = 0; | 560 | ret = 0; |
| 563 | goto out_unlock; | 561 | goto out_unlock; |
| 564 | 562 | ||
| @@ -571,9 +569,6 @@ err_render_node: | |||
| 571 | drm_unplug_minor(dev->render); | 569 | drm_unplug_minor(dev->render); |
| 572 | err_control_node: | 570 | err_control_node: |
| 573 | drm_unplug_minor(dev->control); | 571 | drm_unplug_minor(dev->control); |
| 574 | err_agp: | ||
| 575 | if (dev->driver->bus->agp_destroy) | ||
| 576 | dev->driver->bus->agp_destroy(dev); | ||
| 577 | out_unlock: | 572 | out_unlock: |
| 578 | mutex_unlock(&drm_global_mutex); | 573 | mutex_unlock(&drm_global_mutex); |
| 579 | return ret; | 574 | return ret; |
| @@ -597,8 +592,8 @@ void drm_dev_unregister(struct drm_device *dev) | |||
| 597 | if (dev->driver->unload) | 592 | if (dev->driver->unload) |
| 598 | dev->driver->unload(dev); | 593 | dev->driver->unload(dev); |
| 599 | 594 | ||
| 600 | if (dev->driver->bus->agp_destroy) | 595 | if (dev->agp) |
| 601 | dev->driver->bus->agp_destroy(dev); | 596 | drm_pci_agp_destroy(dev); |
| 602 | 597 | ||
| 603 | drm_vblank_cleanup(dev); | 598 | drm_vblank_cleanup(dev); |
| 604 | 599 | ||
| @@ -608,7 +603,5 @@ void drm_dev_unregister(struct drm_device *dev) | |||
| 608 | drm_unplug_minor(dev->control); | 603 | drm_unplug_minor(dev->control); |
| 609 | drm_unplug_minor(dev->render); | 604 | drm_unplug_minor(dev->render); |
| 610 | drm_unplug_minor(dev->primary); | 605 | drm_unplug_minor(dev->primary); |
| 611 | |||
| 612 | list_del(&dev->driver_item); | ||
| 613 | } | 606 | } |
| 614 | EXPORT_SYMBOL(drm_dev_unregister); | 607 | EXPORT_SYMBOL(drm_dev_unregister); |
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index b179b70e7853..0f8cb1ae7607 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <drm/drmP.h> | 1 | #include <drm/drmP.h> |
| 2 | #include <drm/drm_usb.h> | ||
| 2 | #include <linux/usb.h> | 3 | #include <linux/usb.h> |
| 3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 4 | 5 | ||
| @@ -63,7 +64,6 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver) | |||
| 63 | int res; | 64 | int res; |
| 64 | DRM_DEBUG("\n"); | 65 | DRM_DEBUG("\n"); |
| 65 | 66 | ||
| 66 | INIT_LIST_HEAD(&driver->device_list); | ||
| 67 | driver->kdriver.usb = udriver; | 67 | driver->kdriver.usb = udriver; |
| 68 | driver->bus = &drm_usb_bus; | 68 | driver->bus = &drm_usb_bus; |
| 69 | 69 | ||
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 93e95d7efd57..24e045c4f531 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
| @@ -101,7 +101,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 101 | /* | 101 | /* |
| 102 | * Find the right map | 102 | * Find the right map |
| 103 | */ | 103 | */ |
| 104 | if (!drm_core_has_AGP(dev)) | 104 | if (!dev->agp) |
| 105 | goto vm_fault_error; | 105 | goto vm_fault_error; |
| 106 | 106 | ||
| 107 | if (!dev->agp || !dev->agp->cant_use_aperture) | 107 | if (!dev->agp || !dev->agp->cant_use_aperture) |
| @@ -220,7 +220,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
| 220 | 220 | ||
| 221 | DRM_DEBUG("0x%08lx,0x%08lx\n", | 221 | DRM_DEBUG("0x%08lx,0x%08lx\n", |
| 222 | vma->vm_start, vma->vm_end - vma->vm_start); | 222 | vma->vm_start, vma->vm_end - vma->vm_start); |
| 223 | atomic_dec(&dev->vma_count); | ||
| 224 | 223 | ||
| 225 | map = vma->vm_private_data; | 224 | map = vma->vm_private_data; |
| 226 | 225 | ||
| @@ -266,9 +265,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
| 266 | dmah.size = map->size; | 265 | dmah.size = map->size; |
| 267 | __drm_pci_free(dev, &dmah); | 266 | __drm_pci_free(dev, &dmah); |
| 268 | break; | 267 | break; |
| 269 | case _DRM_GEM: | ||
| 270 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
| 271 | break; | ||
| 272 | } | 268 | } |
| 273 | kfree(map); | 269 | kfree(map); |
| 274 | } | 270 | } |
| @@ -408,7 +404,6 @@ void drm_vm_open_locked(struct drm_device *dev, | |||
| 408 | 404 | ||
| 409 | DRM_DEBUG("0x%08lx,0x%08lx\n", | 405 | DRM_DEBUG("0x%08lx,0x%08lx\n", |
| 410 | vma->vm_start, vma->vm_end - vma->vm_start); | 406 | vma->vm_start, vma->vm_end - vma->vm_start); |
| 411 | atomic_inc(&dev->vma_count); | ||
| 412 | 407 | ||
| 413 | vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); | 408 | vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); |
| 414 | if (vma_entry) { | 409 | if (vma_entry) { |
| @@ -436,7 +431,6 @@ void drm_vm_close_locked(struct drm_device *dev, | |||
| 436 | 431 | ||
| 437 | DRM_DEBUG("0x%08lx,0x%08lx\n", | 432 | DRM_DEBUG("0x%08lx,0x%08lx\n", |
| 438 | vma->vm_start, vma->vm_end - vma->vm_start); | 433 | vma->vm_start, vma->vm_end - vma->vm_start); |
| 439 | atomic_dec(&dev->vma_count); | ||
| 440 | 434 | ||
| 441 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | 435 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { |
| 442 | if (pt->vma == vma) { | 436 | if (pt->vma == vma) { |
| @@ -595,7 +589,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | |||
| 595 | switch (map->type) { | 589 | switch (map->type) { |
| 596 | #if !defined(__arm__) | 590 | #if !defined(__arm__) |
| 597 | case _DRM_AGP: | 591 | case _DRM_AGP: |
| 598 | if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { | 592 | if (dev->agp && dev->agp->cant_use_aperture) { |
| 599 | /* | 593 | /* |
| 600 | * On some platforms we can't talk to bus dma address from the CPU, so for | 594 | * On some platforms we can't talk to bus dma address from the CPU, so for |
| 601 | * memory of type DRM_AGP, we'll deal with sorting out the real physical | 595 | * memory of type DRM_AGP, we'll deal with sorting out the real physical |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 22b8f5eced80..9d096a0c5f8d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <drm/drmP.h> | 14 | #include <drm/drmP.h> |
| 15 | #include <drm/drm_crtc_helper.h> | 15 | #include <drm/drm_crtc_helper.h> |
| 16 | 16 | ||
| 17 | #include <linux/anon_inodes.h> | ||
| 18 | |||
| 17 | #include <drm/exynos_drm.h> | 19 | #include <drm/exynos_drm.h> |
| 18 | 20 | ||
| 19 | #include "exynos_drm_drv.h" | 21 | #include "exynos_drm_drv.h" |
| @@ -119,6 +121,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 119 | 121 | ||
| 120 | drm_vblank_offdelay = VBLANK_OFF_DELAY; | 122 | drm_vblank_offdelay = VBLANK_OFF_DELAY; |
| 121 | 123 | ||
| 124 | platform_set_drvdata(dev->platformdev, dev); | ||
| 125 | |||
| 122 | return 0; | 126 | return 0; |
| 123 | 127 | ||
| 124 | err_drm_device: | 128 | err_drm_device: |
| @@ -150,9 +154,14 @@ static int exynos_drm_unload(struct drm_device *dev) | |||
| 150 | return 0; | 154 | return 0; |
| 151 | } | 155 | } |
| 152 | 156 | ||
| 157 | static const struct file_operations exynos_drm_gem_fops = { | ||
| 158 | .mmap = exynos_drm_gem_mmap_buffer, | ||
| 159 | }; | ||
| 160 | |||
| 153 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | 161 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) |
| 154 | { | 162 | { |
| 155 | struct drm_exynos_file_private *file_priv; | 163 | struct drm_exynos_file_private *file_priv; |
| 164 | struct file *anon_filp; | ||
| 156 | int ret; | 165 | int ret; |
| 157 | 166 | ||
| 158 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); | 167 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
| @@ -167,6 +176,16 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
| 167 | file->driver_priv = NULL; | 176 | file->driver_priv = NULL; |
| 168 | } | 177 | } |
| 169 | 178 | ||
| 179 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | ||
| 180 | NULL, 0); | ||
| 181 | if (IS_ERR(anon_filp)) { | ||
| 182 | kfree(file_priv); | ||
| 183 | return PTR_ERR(anon_filp); | ||
| 184 | } | ||
| 185 | |||
| 186 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | ||
| 187 | file_priv->anon_filp = anon_filp; | ||
| 188 | |||
| 170 | return ret; | 189 | return ret; |
| 171 | } | 190 | } |
| 172 | 191 | ||
| @@ -179,6 +198,7 @@ static void exynos_drm_preclose(struct drm_device *dev, | |||
| 179 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 198 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
| 180 | { | 199 | { |
| 181 | struct exynos_drm_private *private = dev->dev_private; | 200 | struct exynos_drm_private *private = dev->dev_private; |
| 201 | struct drm_exynos_file_private *file_priv; | ||
| 182 | struct drm_pending_vblank_event *v, *vt; | 202 | struct drm_pending_vblank_event *v, *vt; |
| 183 | struct drm_pending_event *e, *et; | 203 | struct drm_pending_event *e, *et; |
| 184 | unsigned long flags; | 204 | unsigned long flags; |
| @@ -204,6 +224,9 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | |||
| 204 | } | 224 | } |
| 205 | spin_unlock_irqrestore(&dev->event_lock, flags); | 225 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 206 | 226 | ||
| 227 | file_priv = file->driver_priv; | ||
| 228 | if (file_priv->anon_filp) | ||
| 229 | fput(file_priv->anon_filp); | ||
| 207 | 230 | ||
| 208 | kfree(file->driver_priv); | 231 | kfree(file->driver_priv); |
| 209 | file->driver_priv = NULL; | 232 | file->driver_priv = NULL; |
| @@ -305,7 +328,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) | |||
| 305 | 328 | ||
| 306 | static int exynos_drm_platform_remove(struct platform_device *pdev) | 329 | static int exynos_drm_platform_remove(struct platform_device *pdev) |
| 307 | { | 330 | { |
| 308 | drm_platform_exit(&exynos_drm_driver, pdev); | 331 | drm_put_dev(platform_get_drvdata(pdev)); |
| 309 | 332 | ||
| 310 | return 0; | 333 | return 0; |
| 311 | } | 334 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index eaa19668bf00..0eaf5a27e120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
| @@ -226,6 +226,7 @@ struct exynos_drm_ipp_private { | |||
| 226 | struct drm_exynos_file_private { | 226 | struct drm_exynos_file_private { |
| 227 | struct exynos_drm_g2d_private *g2d_priv; | 227 | struct exynos_drm_g2d_private *g2d_priv; |
| 228 | struct exynos_drm_ipp_private *ipp_priv; | 228 | struct exynos_drm_ipp_private *ipp_priv; |
| 229 | struct file *anon_filp; | ||
| 229 | }; | 230 | }; |
| 230 | 231 | ||
| 231 | /* | 232 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a61878bf5dcd..a20440ce32e6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -347,7 +347,7 @@ static void fimd_wait_for_vblank(struct device *dev) | |||
| 347 | */ | 347 | */ |
| 348 | if (!wait_event_timeout(ctx->wait_vsync_queue, | 348 | if (!wait_event_timeout(ctx->wait_vsync_queue, |
| 349 | !atomic_read(&ctx->wait_vsync_event), | 349 | !atomic_read(&ctx->wait_vsync_event), |
| 350 | DRM_HZ/20)) | 350 | HZ/20)) |
| 351 | DRM_DEBUG_KMS("vblank wait timed out.\n"); | 351 | DRM_DEBUG_KMS("vblank wait timed out.\n"); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| @@ -706,7 +706,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | |||
| 706 | /* set wait vsync event to zero and wake up queue. */ | 706 | /* set wait vsync event to zero and wake up queue. */ |
| 707 | if (atomic_read(&ctx->wait_vsync_event)) { | 707 | if (atomic_read(&ctx->wait_vsync_event)) { |
| 708 | atomic_set(&ctx->wait_vsync_event, 0); | 708 | atomic_set(&ctx->wait_vsync_event, 0); |
| 709 | DRM_WAKEUP(&ctx->wait_vsync_queue); | 709 | wake_up(&ctx->wait_vsync_queue); |
| 710 | } | 710 | } |
| 711 | out: | 711 | out: |
| 712 | return IRQ_HANDLED; | 712 | return IRQ_HANDLED; |
| @@ -954,7 +954,7 @@ static int fimd_probe(struct platform_device *pdev) | |||
| 954 | } | 954 | } |
| 955 | 955 | ||
| 956 | ctx->driver_data = drm_fimd_get_driver_data(pdev); | 956 | ctx->driver_data = drm_fimd_get_driver_data(pdev); |
| 957 | DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue); | 957 | init_waitqueue_head(&ctx->wait_vsync_queue); |
| 958 | atomic_set(&ctx->wait_vsync_event, 0); | 958 | atomic_set(&ctx->wait_vsync_event, 0); |
| 959 | 959 | ||
| 960 | subdrv = &ctx->subdrv; | 960 | subdrv = &ctx->subdrv; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index be59d50d8b16..42d2904d88c7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
| @@ -338,46 +338,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | |||
| 338 | &args->offset); | 338 | &args->offset); |
| 339 | } | 339 | } |
| 340 | 340 | ||
| 341 | static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, | 341 | int exynos_drm_gem_mmap_buffer(struct file *filp, |
| 342 | struct file *filp) | ||
| 343 | { | ||
| 344 | struct drm_file *file_priv; | ||
| 345 | |||
| 346 | /* find current process's drm_file from filelist. */ | ||
| 347 | list_for_each_entry(file_priv, &drm_dev->filelist, lhead) | ||
| 348 | if (file_priv->filp == filp) | ||
| 349 | return file_priv; | ||
| 350 | |||
| 351 | WARN_ON(1); | ||
| 352 | |||
| 353 | return ERR_PTR(-EFAULT); | ||
| 354 | } | ||
| 355 | |||
| 356 | static int exynos_drm_gem_mmap_buffer(struct file *filp, | ||
| 357 | struct vm_area_struct *vma) | 342 | struct vm_area_struct *vma) |
| 358 | { | 343 | { |
| 359 | struct drm_gem_object *obj = filp->private_data; | 344 | struct drm_gem_object *obj = filp->private_data; |
| 360 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 345 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
| 361 | struct drm_device *drm_dev = obj->dev; | 346 | struct drm_device *drm_dev = obj->dev; |
| 362 | struct exynos_drm_gem_buf *buffer; | 347 | struct exynos_drm_gem_buf *buffer; |
| 363 | struct drm_file *file_priv; | ||
| 364 | unsigned long vm_size; | 348 | unsigned long vm_size; |
| 365 | int ret; | 349 | int ret; |
| 366 | 350 | ||
| 351 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | ||
| 352 | |||
| 367 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 353 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
| 368 | vma->vm_private_data = obj; | 354 | vma->vm_private_data = obj; |
| 369 | vma->vm_ops = drm_dev->driver->gem_vm_ops; | 355 | vma->vm_ops = drm_dev->driver->gem_vm_ops; |
| 370 | 356 | ||
| 371 | /* restore it to driver's fops. */ | ||
| 372 | filp->f_op = fops_get(drm_dev->driver->fops); | ||
| 373 | |||
| 374 | file_priv = exynos_drm_find_drm_file(drm_dev, filp); | ||
| 375 | if (IS_ERR(file_priv)) | ||
| 376 | return PTR_ERR(file_priv); | ||
| 377 | |||
| 378 | /* restore it to drm_file. */ | ||
| 379 | filp->private_data = file_priv; | ||
| 380 | |||
| 381 | update_vm_cache_attr(exynos_gem_obj, vma); | 357 | update_vm_cache_attr(exynos_gem_obj, vma); |
| 382 | 358 | ||
| 383 | vm_size = vma->vm_end - vma->vm_start; | 359 | vm_size = vma->vm_end - vma->vm_start; |
| @@ -411,15 +387,13 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
| 411 | return 0; | 387 | return 0; |
| 412 | } | 388 | } |
| 413 | 389 | ||
| 414 | static const struct file_operations exynos_drm_gem_fops = { | ||
| 415 | .mmap = exynos_drm_gem_mmap_buffer, | ||
| 416 | }; | ||
| 417 | |||
| 418 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | 390 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 419 | struct drm_file *file_priv) | 391 | struct drm_file *file_priv) |
| 420 | { | 392 | { |
| 393 | struct drm_exynos_file_private *exynos_file_priv; | ||
| 421 | struct drm_exynos_gem_mmap *args = data; | 394 | struct drm_exynos_gem_mmap *args = data; |
| 422 | struct drm_gem_object *obj; | 395 | struct drm_gem_object *obj; |
| 396 | struct file *anon_filp; | ||
| 423 | unsigned long addr; | 397 | unsigned long addr; |
| 424 | 398 | ||
| 425 | if (!(dev->driver->driver_features & DRIVER_GEM)) { | 399 | if (!(dev->driver->driver_features & DRIVER_GEM)) { |
| @@ -427,47 +401,25 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
| 427 | return -ENODEV; | 401 | return -ENODEV; |
| 428 | } | 402 | } |
| 429 | 403 | ||
| 404 | mutex_lock(&dev->struct_mutex); | ||
| 405 | |||
| 430 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 406 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 431 | if (!obj) { | 407 | if (!obj) { |
| 432 | DRM_ERROR("failed to lookup gem object.\n"); | 408 | DRM_ERROR("failed to lookup gem object.\n"); |
| 409 | mutex_unlock(&dev->struct_mutex); | ||
| 433 | return -EINVAL; | 410 | return -EINVAL; |
| 434 | } | 411 | } |
| 435 | 412 | ||
| 436 | /* | 413 | exynos_file_priv = file_priv->driver_priv; |
| 437 | * We have to use gem object and its fops for specific mmaper, | 414 | anon_filp = exynos_file_priv->anon_filp; |
| 438 | * but vm_mmap() can deliver only filp. So we have to change | 415 | anon_filp->private_data = obj; |
| 439 | * filp->f_op and filp->private_data temporarily, then restore | ||
| 440 | * again. So it is important to keep lock until restoration the | ||
| 441 | * settings to prevent others from misuse of filp->f_op or | ||
| 442 | * filp->private_data. | ||
| 443 | */ | ||
| 444 | mutex_lock(&dev->struct_mutex); | ||
| 445 | |||
| 446 | /* | ||
| 447 | * Set specific mmper's fops. And it will be restored by | ||
| 448 | * exynos_drm_gem_mmap_buffer to dev->driver->fops. | ||
| 449 | * This is used to call specific mapper temporarily. | ||
| 450 | */ | ||
| 451 | file_priv->filp->f_op = &exynos_drm_gem_fops; | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Set gem object to private_data so that specific mmaper | ||
| 455 | * can get the gem object. And it will be restored by | ||
| 456 | * exynos_drm_gem_mmap_buffer to drm_file. | ||
| 457 | */ | ||
| 458 | file_priv->filp->private_data = obj; | ||
| 459 | 416 | ||
| 460 | addr = vm_mmap(file_priv->filp, 0, args->size, | 417 | addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE, |
| 461 | PROT_READ | PROT_WRITE, MAP_SHARED, 0); | 418 | MAP_SHARED, 0); |
| 462 | 419 | ||
| 463 | drm_gem_object_unreference(obj); | 420 | drm_gem_object_unreference(obj); |
| 464 | 421 | ||
| 465 | if (IS_ERR_VALUE(addr)) { | 422 | if (IS_ERR_VALUE(addr)) { |
| 466 | /* check filp->f_op, filp->private_data are restored */ | ||
| 467 | if (file_priv->filp->f_op == &exynos_drm_gem_fops) { | ||
| 468 | file_priv->filp->f_op = fops_get(dev->driver->fops); | ||
| 469 | file_priv->filp->private_data = file_priv; | ||
| 470 | } | ||
| 471 | mutex_unlock(&dev->struct_mutex); | 423 | mutex_unlock(&dev->struct_mutex); |
| 472 | return (int)addr; | 424 | return (int)addr; |
| 473 | } | 425 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index b8c818ba2ff4..1592c0ba7de8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
| @@ -122,6 +122,9 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | |||
| 122 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, | 122 | int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 123 | struct drm_file *file_priv); | 123 | struct drm_file *file_priv); |
| 124 | 124 | ||
| 125 | int exynos_drm_gem_mmap_buffer(struct file *filp, | ||
| 126 | struct vm_area_struct *vma); | ||
| 127 | |||
| 125 | /* map user space allocated by malloc to pages. */ | 128 | /* map user space allocated by malloc to pages. */ |
| 126 | int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, | 129 | int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, |
| 127 | struct drm_file *file_priv); | 130 | struct drm_file *file_priv); |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 63bc5f92fbb3..2dfa48c76f54 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -868,7 +868,7 @@ static void mixer_wait_for_vblank(void *ctx) | |||
| 868 | */ | 868 | */ |
| 869 | if (!wait_event_timeout(mixer_ctx->wait_vsync_queue, | 869 | if (!wait_event_timeout(mixer_ctx->wait_vsync_queue, |
| 870 | !atomic_read(&mixer_ctx->wait_vsync_event), | 870 | !atomic_read(&mixer_ctx->wait_vsync_event), |
| 871 | DRM_HZ/20)) | 871 | HZ/20)) |
| 872 | DRM_DEBUG_KMS("vblank wait timed out.\n"); | 872 | DRM_DEBUG_KMS("vblank wait timed out.\n"); |
| 873 | } | 873 | } |
| 874 | 874 | ||
| @@ -1019,7 +1019,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) | |||
| 1019 | /* set wait vsync event to zero and wake up queue. */ | 1019 | /* set wait vsync event to zero and wake up queue. */ |
| 1020 | if (atomic_read(&ctx->wait_vsync_event)) { | 1020 | if (atomic_read(&ctx->wait_vsync_event)) { |
| 1021 | atomic_set(&ctx->wait_vsync_event, 0); | 1021 | atomic_set(&ctx->wait_vsync_event, 0); |
| 1022 | DRM_WAKEUP(&ctx->wait_vsync_queue); | 1022 | wake_up(&ctx->wait_vsync_queue); |
| 1023 | } | 1023 | } |
| 1024 | } | 1024 | } |
| 1025 | 1025 | ||
| @@ -1209,7 +1209,7 @@ static int mixer_probe(struct platform_device *pdev) | |||
| 1209 | drm_hdmi_ctx->ctx = (void *)ctx; | 1209 | drm_hdmi_ctx->ctx = (void *)ctx; |
| 1210 | ctx->vp_enabled = drv->is_vp_enabled; | 1210 | ctx->vp_enabled = drv->is_vp_enabled; |
| 1211 | ctx->mxr_ver = drv->version; | 1211 | ctx->mxr_ver = drv->version; |
| 1212 | DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue); | 1212 | init_waitqueue_head(&ctx->wait_vsync_queue); |
| 1213 | atomic_set(&ctx->wait_vsync_event, 0); | 1213 | atomic_set(&ctx->wait_vsync_event, 0); |
| 1214 | 1214 | ||
| 1215 | platform_set_drvdata(pdev, drm_hdmi_ctx); | 1215 | platform_set_drvdata(pdev, drm_hdmi_ctx); |
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c index d5ef1a5793c8..de6f62a6ceb7 100644 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ b/drivers/gpu/drm/gma500/accel_2d.c | |||
| @@ -326,7 +326,7 @@ int psbfb_sync(struct fb_info *info) | |||
| 326 | struct psb_framebuffer *psbfb = &fbdev->pfb; | 326 | struct psb_framebuffer *psbfb = &fbdev->pfb; |
| 327 | struct drm_device *dev = psbfb->base.dev; | 327 | struct drm_device *dev = psbfb->base.dev; |
| 328 | struct drm_psb_private *dev_priv = dev->dev_private; | 328 | struct drm_psb_private *dev_priv = dev->dev_private; |
| 329 | unsigned long _end = jiffies + DRM_HZ; | 329 | unsigned long _end = jiffies + HZ; |
| 330 | int busy = 0; | 330 | int busy = 0; |
| 331 | unsigned long flags; | 331 | unsigned long flags; |
| 332 | 332 | ||
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index f88a1815d87c..0490ce36b53f 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c | |||
| @@ -483,7 +483,7 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder, | |||
| 483 | 483 | ||
| 484 | if (send_bytes > 16) | 484 | if (send_bytes > 16) |
| 485 | return -1; | 485 | return -1; |
| 486 | msg[0] = AUX_NATIVE_WRITE << 4; | 486 | msg[0] = DP_AUX_NATIVE_WRITE << 4; |
| 487 | msg[1] = address >> 8; | 487 | msg[1] = address >> 8; |
| 488 | msg[2] = address & 0xff; | 488 | msg[2] = address & 0xff; |
| 489 | msg[3] = send_bytes - 1; | 489 | msg[3] = send_bytes - 1; |
| @@ -493,9 +493,10 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder, | |||
| 493 | ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1); | 493 | ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1); |
| 494 | if (ret < 0) | 494 | if (ret < 0) |
| 495 | return ret; | 495 | return ret; |
| 496 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 496 | ack >>= 4; |
| 497 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
| 497 | break; | 498 | break; |
| 498 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 499 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 499 | udelay(100); | 500 | udelay(100); |
| 500 | else | 501 | else |
| 501 | return -EIO; | 502 | return -EIO; |
| @@ -523,7 +524,7 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder, | |||
| 523 | uint8_t ack; | 524 | uint8_t ack; |
| 524 | int ret; | 525 | int ret; |
| 525 | 526 | ||
| 526 | msg[0] = AUX_NATIVE_READ << 4; | 527 | msg[0] = DP_AUX_NATIVE_READ << 4; |
| 527 | msg[1] = address >> 8; | 528 | msg[1] = address >> 8; |
| 528 | msg[2] = address & 0xff; | 529 | msg[2] = address & 0xff; |
| 529 | msg[3] = recv_bytes - 1; | 530 | msg[3] = recv_bytes - 1; |
| @@ -538,12 +539,12 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder, | |||
| 538 | return -EPROTO; | 539 | return -EPROTO; |
| 539 | if (ret < 0) | 540 | if (ret < 0) |
| 540 | return ret; | 541 | return ret; |
| 541 | ack = reply[0]; | 542 | ack = reply[0] >> 4; |
| 542 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { | 543 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { |
| 543 | memcpy(recv, reply + 1, ret - 1); | 544 | memcpy(recv, reply + 1, ret - 1); |
| 544 | return ret - 1; | 545 | return ret - 1; |
| 545 | } | 546 | } |
| 546 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 547 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 547 | udelay(100); | 548 | udelay(100); |
| 548 | else | 549 | else |
| 549 | return -EIO; | 550 | return -EIO; |
| @@ -569,12 +570,12 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 569 | 570 | ||
| 570 | /* Set up the command byte */ | 571 | /* Set up the command byte */ |
| 571 | if (mode & MODE_I2C_READ) | 572 | if (mode & MODE_I2C_READ) |
| 572 | msg[0] = AUX_I2C_READ << 4; | 573 | msg[0] = DP_AUX_I2C_READ << 4; |
| 573 | else | 574 | else |
| 574 | msg[0] = AUX_I2C_WRITE << 4; | 575 | msg[0] = DP_AUX_I2C_WRITE << 4; |
| 575 | 576 | ||
| 576 | if (!(mode & MODE_I2C_STOP)) | 577 | if (!(mode & MODE_I2C_STOP)) |
| 577 | msg[0] |= AUX_I2C_MOT << 4; | 578 | msg[0] |= DP_AUX_I2C_MOT << 4; |
| 578 | 579 | ||
| 579 | msg[1] = address >> 8; | 580 | msg[1] = address >> 8; |
| 580 | msg[2] = address; | 581 | msg[2] = address; |
| @@ -606,16 +607,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 606 | return ret; | 607 | return ret; |
| 607 | } | 608 | } |
| 608 | 609 | ||
| 609 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { | 610 | switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { |
| 610 | case AUX_NATIVE_REPLY_ACK: | 611 | case DP_AUX_NATIVE_REPLY_ACK: |
| 611 | /* I2C-over-AUX Reply field is only valid | 612 | /* I2C-over-AUX Reply field is only valid |
| 612 | * when paired with AUX ACK. | 613 | * when paired with AUX ACK. |
| 613 | */ | 614 | */ |
| 614 | break; | 615 | break; |
| 615 | case AUX_NATIVE_REPLY_NACK: | 616 | case DP_AUX_NATIVE_REPLY_NACK: |
| 616 | DRM_DEBUG_KMS("aux_ch native nack\n"); | 617 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
| 617 | return -EREMOTEIO; | 618 | return -EREMOTEIO; |
| 618 | case AUX_NATIVE_REPLY_DEFER: | 619 | case DP_AUX_NATIVE_REPLY_DEFER: |
| 619 | udelay(100); | 620 | udelay(100); |
| 620 | continue; | 621 | continue; |
| 621 | default: | 622 | default: |
| @@ -624,16 +625,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 624 | return -EREMOTEIO; | 625 | return -EREMOTEIO; |
| 625 | } | 626 | } |
| 626 | 627 | ||
| 627 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 628 | switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { |
| 628 | case AUX_I2C_REPLY_ACK: | 629 | case DP_AUX_I2C_REPLY_ACK: |
| 629 | if (mode == MODE_I2C_READ) { | 630 | if (mode == MODE_I2C_READ) { |
| 630 | *read_byte = reply[1]; | 631 | *read_byte = reply[1]; |
| 631 | } | 632 | } |
| 632 | return reply_bytes - 1; | 633 | return reply_bytes - 1; |
| 633 | case AUX_I2C_REPLY_NACK: | 634 | case DP_AUX_I2C_REPLY_NACK: |
| 634 | DRM_DEBUG_KMS("aux_i2c nack\n"); | 635 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
| 635 | return -EREMOTEIO; | 636 | return -EREMOTEIO; |
| 636 | case AUX_I2C_REPLY_DEFER: | 637 | case DP_AUX_I2C_REPLY_DEFER: |
| 637 | DRM_DEBUG_KMS("aux_i2c defer\n"); | 638 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
| 638 | udelay(100); | 639 | udelay(100); |
| 639 | break; | 640 | break; |
| @@ -677,7 +678,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector, | |||
| 677 | return ret; | 678 | return ret; |
| 678 | } | 679 | } |
| 679 | 680 | ||
| 680 | void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 681 | static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
| 681 | struct drm_display_mode *adjusted_mode) | 682 | struct drm_display_mode *adjusted_mode) |
| 682 | { | 683 | { |
| 683 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | 684 | adjusted_mode->hdisplay = fixed_mode->hdisplay; |
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 24e8af3d22bf..386de2c9dc86 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c | |||
| @@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 349 | /* If we didn't get a handle then turn the cursor off */ | 349 | /* If we didn't get a handle then turn the cursor off */ |
| 350 | if (!handle) { | 350 | if (!handle) { |
| 351 | temp = CURSOR_MODE_DISABLE; | 351 | temp = CURSOR_MODE_DISABLE; |
| 352 | mutex_lock(&dev->struct_mutex); | ||
| 352 | 353 | ||
| 353 | if (gma_power_begin(dev, false)) { | 354 | if (gma_power_begin(dev, false)) { |
| 354 | REG_WRITE(control, temp); | 355 | REG_WRITE(control, temp); |
| @@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 365 | gma_crtc->cursor_obj = NULL; | 366 | gma_crtc->cursor_obj = NULL; |
| 366 | } | 367 | } |
| 367 | 368 | ||
| 369 | mutex_unlock(&dev->struct_mutex); | ||
| 368 | return 0; | 370 | return 0; |
| 369 | } | 371 | } |
| 370 | 372 | ||
| @@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 374 | return -EINVAL; | 376 | return -EINVAL; |
| 375 | } | 377 | } |
| 376 | 378 | ||
| 379 | mutex_lock(&dev->struct_mutex); | ||
| 377 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 380 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
| 378 | if (!obj) | 381 | if (!obj) { |
| 379 | return -ENOENT; | 382 | ret = -ENOENT; |
| 383 | goto unlock; | ||
| 384 | } | ||
| 380 | 385 | ||
| 381 | if (obj->size < width * height * 4) { | 386 | if (obj->size < width * height * 4) { |
| 382 | dev_dbg(dev->dev, "Buffer is too small\n"); | 387 | dev_dbg(dev->dev, "Buffer is too small\n"); |
| @@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 440 | } | 445 | } |
| 441 | 446 | ||
| 442 | gma_crtc->cursor_obj = obj; | 447 | gma_crtc->cursor_obj = obj; |
| 448 | unlock: | ||
| 449 | mutex_unlock(&dev->struct_mutex); | ||
| 443 | return ret; | 450 | return ret; |
| 444 | 451 | ||
| 445 | unref_cursor: | 452 | unref_cursor: |
| 446 | drm_gem_object_unreference(obj); | 453 | drm_gem_object_unreference(obj); |
| 454 | mutex_unlock(&dev->struct_mutex); | ||
| 447 | return ret; | 455 | return ret; |
| 448 | } | 456 | } |
| 449 | 457 | ||
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index b59e6588c343..5ad6a03e477e 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h | |||
| @@ -212,8 +212,8 @@ enum { | |||
| 212 | #define PSB_HIGH_REG_OFFS 0x0600 | 212 | #define PSB_HIGH_REG_OFFS 0x0600 |
| 213 | 213 | ||
| 214 | #define PSB_NUM_VBLANKS 2 | 214 | #define PSB_NUM_VBLANKS 2 |
| 215 | #define PSB_WATCHDOG_DELAY (DRM_HZ * 2) | 215 | #define PSB_WATCHDOG_DELAY (HZ * 2) |
| 216 | #define PSB_LID_DELAY (DRM_HZ / 10) | 216 | #define PSB_LID_DELAY (HZ / 10) |
| 217 | 217 | ||
| 218 | #define MDFLD_PNW_B0 0x04 | 218 | #define MDFLD_PNW_B0 0x04 |
| 219 | #define MDFLD_PNW_C0 0x08 | 219 | #define MDFLD_PNW_C0 0x08 |
| @@ -232,7 +232,7 @@ enum { | |||
| 232 | #define MDFLD_DSR_RR 45 | 232 | #define MDFLD_DSR_RR 45 |
| 233 | #define MDFLD_DPU_ENABLE (1 << 31) | 233 | #define MDFLD_DPU_ENABLE (1 << 31) |
| 234 | #define MDFLD_DSR_FULLSCREEN (1 << 30) | 234 | #define MDFLD_DSR_FULLSCREEN (1 << 30) |
| 235 | #define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR) | 235 | #define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR) |
| 236 | 236 | ||
| 237 | #define PSB_PWR_STATE_ON 1 | 237 | #define PSB_PWR_STATE_ON 1 |
| 238 | #define PSB_PWR_STATE_OFF 2 | 238 | #define PSB_PWR_STATE_OFF 2 |
| @@ -769,7 +769,7 @@ extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, | |||
| 769 | *psb_irq.c | 769 | *psb_irq.c |
| 770 | */ | 770 | */ |
| 771 | 771 | ||
| 772 | extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); | 772 | extern irqreturn_t psb_irq_handler(int irq, void *arg); |
| 773 | extern int psb_irq_enable_dpst(struct drm_device *dev); | 773 | extern int psb_irq_enable_dpst(struct drm_device *dev); |
| 774 | extern int psb_irq_disable_dpst(struct drm_device *dev); | 774 | extern int psb_irq_disable_dpst(struct drm_device *dev); |
| 775 | extern void psb_irq_preinstall(struct drm_device *dev); | 775 | extern void psb_irq_preinstall(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index bde27fdb41bf..dc2c8eb030fa 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h | |||
| @@ -250,11 +250,6 @@ extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, | |||
| 250 | extern int intelfb_probe(struct drm_device *dev); | 250 | extern int intelfb_probe(struct drm_device *dev); |
| 251 | extern int intelfb_remove(struct drm_device *dev, | 251 | extern int intelfb_remove(struct drm_device *dev, |
| 252 | struct drm_framebuffer *fb); | 252 | struct drm_framebuffer *fb); |
| 253 | extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device | ||
| 254 | *dev, struct | ||
| 255 | drm_mode_fb_cmd | ||
| 256 | *mode_cmd, | ||
| 257 | void *mm_private); | ||
| 258 | extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, | 253 | extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, |
| 259 | const struct drm_display_mode *mode, | 254 | const struct drm_display_mode *mode, |
| 260 | struct drm_display_mode *adjusted_mode); | 255 | struct drm_display_mode *adjusted_mode); |
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index ba4830342d34..f883f9e4c524 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c | |||
| @@ -200,7 +200,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) | |||
| 200 | mid_pipe_event_handler(dev, 1); | 200 | mid_pipe_event_handler(dev, 1); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) | 203 | irqreturn_t psb_irq_handler(int irq, void *arg) |
| 204 | { | 204 | { |
| 205 | struct drm_device *dev = arg; | 205 | struct drm_device *dev = arg; |
| 206 | struct drm_psb_private *dev_priv = dev->dev_private; | 206 | struct drm_psb_private *dev_priv = dev->dev_private; |
| @@ -253,7 +253,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) | |||
| 253 | 253 | ||
| 254 | PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); | 254 | PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); |
| 255 | (void) PSB_RVDC32(PSB_INT_IDENTITY_R); | 255 | (void) PSB_RVDC32(PSB_INT_IDENTITY_R); |
| 256 | DRM_READMEMORYBARRIER(); | 256 | rmb(); |
| 257 | 257 | ||
| 258 | if (!handled) | 258 | if (!handled) |
| 259 | return IRQ_NONE; | 259 | return IRQ_NONE; |
| @@ -450,21 +450,6 @@ int psb_irq_disable_dpst(struct drm_device *dev) | |||
| 450 | return 0; | 450 | return 0; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | #ifdef PSB_FIXME | ||
| 454 | static int psb_vblank_do_wait(struct drm_device *dev, | ||
| 455 | unsigned int *sequence, atomic_t *counter) | ||
| 456 | { | ||
| 457 | unsigned int cur_vblank; | ||
| 458 | int ret = 0; | ||
| 459 | DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ, | ||
| 460 | (((cur_vblank = atomic_read(counter)) | ||
| 461 | - *sequence) <= (1 << 23))); | ||
| 462 | *sequence = cur_vblank; | ||
| 463 | |||
| 464 | return ret; | ||
| 465 | } | ||
| 466 | #endif | ||
| 467 | |||
| 468 | /* | 453 | /* |
| 469 | * It is used to enable VBLANK interrupt | 454 | * It is used to enable VBLANK interrupt |
| 470 | */ | 455 | */ |
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h index debb7f190c06..d0b45ffa1126 100644 --- a/drivers/gpu/drm/gma500/psb_irq.h +++ b/drivers/gpu/drm/gma500/psb_irq.h | |||
| @@ -32,7 +32,7 @@ void sysirq_uninit(struct drm_device *dev); | |||
| 32 | void psb_irq_preinstall(struct drm_device *dev); | 32 | void psb_irq_preinstall(struct drm_device *dev); |
| 33 | int psb_irq_postinstall(struct drm_device *dev); | 33 | int psb_irq_postinstall(struct drm_device *dev); |
| 34 | void psb_irq_uninstall(struct drm_device *dev); | 34 | void psb_irq_uninstall(struct drm_device *dev); |
| 35 | irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); | 35 | irqreturn_t psb_irq_handler(int irq, void *arg); |
| 36 | 36 | ||
| 37 | int psb_irq_enable_dpst(struct drm_device *dev); | 37 | int psb_irq_enable_dpst(struct drm_device *dev); |
| 38 | int psb_irq_disable_dpst(struct drm_device *dev); | 38 | int psb_irq_disable_dpst(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 249fdff305c6..aeace37415aa 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
| @@ -1193,6 +1193,10 @@ static int i810_flip_bufs(struct drm_device *dev, void *data, | |||
| 1193 | 1193 | ||
| 1194 | int i810_driver_load(struct drm_device *dev, unsigned long flags) | 1194 | int i810_driver_load(struct drm_device *dev, unsigned long flags) |
| 1195 | { | 1195 | { |
| 1196 | /* Our userspace depends upon the agp mapping support. */ | ||
| 1197 | if (!dev->agp) | ||
| 1198 | return -EINVAL; | ||
| 1199 | |||
| 1196 | pci_set_master(dev->pdev); | 1200 | pci_set_master(dev->pdev); |
| 1197 | 1201 | ||
| 1198 | return 0; | 1202 | return 0; |
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index d8180d22cedd..441ccf8f5bdc 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c | |||
| @@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = { | |||
| 57 | 57 | ||
| 58 | static struct drm_driver driver = { | 58 | static struct drm_driver driver = { |
| 59 | .driver_features = | 59 | .driver_features = |
| 60 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | | 60 | DRIVER_USE_AGP | |
| 61 | DRIVER_HAVE_DMA, | 61 | DRIVER_HAVE_DMA, |
| 62 | .dev_priv_size = sizeof(drm_i810_buf_priv_t), | 62 | .dev_priv_size = sizeof(drm_i810_buf_priv_t), |
| 63 | .load = i810_driver_load, | 63 | .load = i810_driver_load, |
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 6199d0b5b958..73ed59eff139 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig | |||
| @@ -1,8 +1,10 @@ | |||
| 1 | config DRM_I915 | 1 | config DRM_I915 |
| 2 | tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" | 2 | tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" |
| 3 | depends on DRM | 3 | depends on DRM |
| 4 | depends on AGP | 4 | depends on X86 && PCI |
| 5 | depends on AGP_INTEL | 5 | depends on (AGP || AGP=n) |
| 6 | select INTEL_GTT | ||
| 7 | select AGP_INTEL if AGP | ||
| 6 | # we need shmfs for the swappable backing store, and in particular | 8 | # we need shmfs for the swappable backing store, and in particular |
| 7 | # the shmem_readpage() which depends upon tmpfs | 9 | # the shmem_readpage() which depends upon tmpfs |
| 8 | select SHMEM | 10 | select SHMEM |
| @@ -35,15 +37,14 @@ config DRM_I915 | |||
| 35 | config DRM_I915_KMS | 37 | config DRM_I915_KMS |
| 36 | bool "Enable modesetting on intel by default" | 38 | bool "Enable modesetting on intel by default" |
| 37 | depends on DRM_I915 | 39 | depends on DRM_I915 |
| 40 | default y | ||
| 38 | help | 41 | help |
| 39 | Choose this option if you want kernel modesetting enabled by default, | 42 | Choose this option if you want kernel modesetting enabled by default. |
| 40 | and you have a new enough userspace to support this. Running old | 43 | |
| 41 | userspaces with this enabled will cause pain. Note that this causes | 44 | If in doubt, say "Y". |
| 42 | the driver to bind to PCI devices, which precludes loading things | ||
| 43 | like intelfb. | ||
| 44 | 45 | ||
| 45 | config DRM_I915_FBDEV | 46 | config DRM_I915_FBDEV |
| 46 | bool "Enable legacy fbdev support for the modesettting intel driver" | 47 | bool "Enable legacy fbdev support for the modesetting intel driver" |
| 47 | depends on DRM_I915 | 48 | depends on DRM_I915 |
| 48 | select DRM_KMS_FB_HELPER | 49 | select DRM_KMS_FB_HELPER |
| 49 | select FB_CFB_FILLRECT | 50 | select FB_CFB_FILLRECT |
| @@ -55,9 +56,12 @@ config DRM_I915_FBDEV | |||
| 55 | support. Note that this support also provide the linux console | 56 | support. Note that this support also provide the linux console |
| 56 | support on top of the intel modesetting driver. | 57 | support on top of the intel modesetting driver. |
| 57 | 58 | ||
| 59 | If in doubt, say "Y". | ||
| 60 | |||
| 58 | config DRM_I915_PRELIMINARY_HW_SUPPORT | 61 | config DRM_I915_PRELIMINARY_HW_SUPPORT |
| 59 | bool "Enable preliminary support for prerelease Intel hardware by default" | 62 | bool "Enable preliminary support for prerelease Intel hardware by default" |
| 60 | depends on DRM_I915 | 63 | depends on DRM_I915 |
| 64 | default n | ||
| 61 | help | 65 | help |
| 62 | Choose this option if you have prerelease Intel hardware and want the | 66 | Choose this option if you have prerelease Intel hardware and want the |
| 63 | i915 driver to support it by default. You can enable such support at | 67 | i915 driver to support it by default. You can enable such support at |
| @@ -65,3 +69,15 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT | |||
| 65 | option changes the default for that module option. | 69 | option changes the default for that module option. |
| 66 | 70 | ||
| 67 | If in doubt, say "N". | 71 | If in doubt, say "N". |
| 72 | |||
| 73 | config DRM_I915_UMS | ||
| 74 | bool "Enable userspace modesetting on Intel hardware (DEPRECATED)" | ||
| 75 | depends on DRM_I915 | ||
| 76 | default n | ||
| 77 | help | ||
| 78 | Choose this option if you still need userspace modesetting. | ||
| 79 | |||
| 80 | Userspace modesetting is deprecated for quite some time now, so | ||
| 81 | enable this only if you have ancient versions of the DDX drivers. | ||
| 82 | |||
| 83 | If in doubt, say "N". | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index d4ae48b04cf2..9fd44f5f3b3b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | ccflags-y := -Iinclude/drm | 5 | ccflags-y := -Iinclude/drm |
| 6 | i915-y := i915_drv.o i915_dma.o i915_irq.o \ | 6 | i915-y := i915_drv.o i915_dma.o i915_irq.o \ |
| 7 | i915_debugfs.o \ | ||
| 8 | i915_gpu_error.o \ | 7 | i915_gpu_error.o \ |
| 9 | i915_suspend.o \ | 8 | i915_suspend.o \ |
| 10 | i915_gem.o \ | 9 | i915_gem.o \ |
| @@ -54,6 +53,8 @@ i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o | |||
| 54 | 53 | ||
| 55 | i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o | 54 | i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o |
| 56 | 55 | ||
| 56 | i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o | ||
| 57 | |||
| 57 | obj-$(CONFIG_DRM_I915) += i915.o | 58 | obj-$(CONFIG_DRM_I915) += i915.o |
| 58 | 59 | ||
| 59 | CFLAGS_i915_trace_points.o := -I$(src) | 60 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index c4a255be6979..954acb2c7021 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c | |||
| @@ -87,49 +87,6 @@ struct ns2501_priv { | |||
| 87 | * when switching the resolution. | 87 | * when switching the resolution. |
| 88 | */ | 88 | */ |
| 89 | 89 | ||
| 90 | static void enable_dvo(struct intel_dvo_device *dvo) | ||
| 91 | { | ||
| 92 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
| 93 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
| 94 | struct intel_gmbus *bus = container_of(adapter, | ||
| 95 | struct intel_gmbus, | ||
| 96 | adapter); | ||
| 97 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
| 98 | |||
| 99 | DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__); | ||
| 100 | |||
| 101 | ns->dvoc = I915_READ(DVO_C); | ||
| 102 | ns->pll_a = I915_READ(_DPLL_A); | ||
| 103 | ns->srcdim = I915_READ(DVOC_SRCDIM); | ||
| 104 | ns->fw_blc = I915_READ(FW_BLC); | ||
| 105 | |||
| 106 | I915_WRITE(DVOC, 0x10004084); | ||
| 107 | I915_WRITE(_DPLL_A, 0xd0820000); | ||
| 108 | I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 | ||
| 109 | I915_WRITE(FW_BLC, 0x1080304); | ||
| 110 | |||
| 111 | I915_WRITE(DVOC, 0x90004084); | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Restore the I915 registers modified by the above | ||
| 116 | * trigger function. | ||
| 117 | */ | ||
| 118 | static void restore_dvo(struct intel_dvo_device *dvo) | ||
| 119 | { | ||
| 120 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
| 121 | struct intel_gmbus *bus = container_of(adapter, | ||
| 122 | struct intel_gmbus, | ||
| 123 | adapter); | ||
| 124 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
| 125 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
| 126 | |||
| 127 | I915_WRITE(DVOC, ns->dvoc); | ||
| 128 | I915_WRITE(_DPLL_A, ns->pll_a); | ||
| 129 | I915_WRITE(DVOC_SRCDIM, ns->srcdim); | ||
| 130 | I915_WRITE(FW_BLC, ns->fw_blc); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* | 90 | /* |
| 134 | ** Read a register from the ns2501. | 91 | ** Read a register from the ns2501. |
| 135 | ** Returns true if successful, false otherwise. | 92 | ** Returns true if successful, false otherwise. |
| @@ -300,7 +257,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo, | |||
| 300 | struct drm_display_mode *adjusted_mode) | 257 | struct drm_display_mode *adjusted_mode) |
| 301 | { | 258 | { |
| 302 | bool ok; | 259 | bool ok; |
| 303 | bool restore = false; | 260 | int retries = 10; |
| 304 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | 261 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); |
| 305 | 262 | ||
| 306 | DRM_DEBUG_KMS | 263 | DRM_DEBUG_KMS |
| @@ -476,20 +433,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo, | |||
| 476 | ns->reg_8_shadow |= NS2501_8_BPAS; | 433 | ns->reg_8_shadow |= NS2501_8_BPAS; |
| 477 | } | 434 | } |
| 478 | ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); | 435 | ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); |
| 479 | 436 | } while (!ok && retries--); | |
| 480 | if (!ok) { | ||
| 481 | if (restore) | ||
| 482 | restore_dvo(dvo); | ||
| 483 | enable_dvo(dvo); | ||
| 484 | restore = true; | ||
| 485 | } | ||
| 486 | } while (!ok); | ||
| 487 | /* | ||
| 488 | * Restore the old i915 registers before | ||
| 489 | * forcing the ns2501 on. | ||
| 490 | */ | ||
| 491 | if (restore) | ||
| 492 | restore_dvo(dvo); | ||
| 493 | } | 437 | } |
| 494 | 438 | ||
| 495 | /* set the NS2501 power state */ | 439 | /* set the NS2501 power state */ |
| @@ -510,7 +454,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) | |||
| 510 | static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) | 454 | static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) |
| 511 | { | 455 | { |
| 512 | bool ok; | 456 | bool ok; |
| 513 | bool restore = false; | 457 | int retries = 10; |
| 514 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | 458 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); |
| 515 | unsigned char ch; | 459 | unsigned char ch; |
| 516 | 460 | ||
| @@ -537,16 +481,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) | |||
| 537 | ok &= | 481 | ok &= |
| 538 | ns2501_writeb(dvo, 0x35, | 482 | ns2501_writeb(dvo, 0x35, |
| 539 | enable ? 0xff : 0x00); | 483 | enable ? 0xff : 0x00); |
| 540 | if (!ok) { | 484 | } while (!ok && retries--); |
| 541 | if (restore) | ||
| 542 | restore_dvo(dvo); | ||
| 543 | enable_dvo(dvo); | ||
| 544 | restore = true; | ||
| 545 | } | ||
| 546 | } while (!ok); | ||
| 547 | |||
| 548 | if (restore) | ||
| 549 | restore_dvo(dvo); | ||
| 550 | } | 485 | } |
| 551 | } | 486 | } |
| 552 | 487 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6ed45a984230..b2b46c52294c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -40,8 +40,6 @@ | |||
| 40 | #include <drm/i915_drm.h> | 40 | #include <drm/i915_drm.h> |
| 41 | #include "i915_drv.h" | 41 | #include "i915_drv.h" |
| 42 | 42 | ||
| 43 | #if defined(CONFIG_DEBUG_FS) | ||
| 44 | |||
| 45 | enum { | 43 | enum { |
| 46 | ACTIVE_LIST, | 44 | ACTIVE_LIST, |
| 47 | INACTIVE_LIST, | 45 | INACTIVE_LIST, |
| @@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
| 406 | seq_putc(m, '\n'); | 404 | seq_putc(m, '\n'); |
| 407 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { | 405 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { |
| 408 | struct file_stats stats; | 406 | struct file_stats stats; |
| 407 | struct task_struct *task; | ||
| 409 | 408 | ||
| 410 | memset(&stats, 0, sizeof(stats)); | 409 | memset(&stats, 0, sizeof(stats)); |
| 411 | idr_for_each(&file->object_idr, per_file_stats, &stats); | 410 | idr_for_each(&file->object_idr, per_file_stats, &stats); |
| 411 | /* | ||
| 412 | * Although we have a valid reference on file->pid, that does | ||
| 413 | * not guarantee that the task_struct who called get_pid() is | ||
| 414 | * still alive (e.g. get_pid(current) => fork() => exit()). | ||
| 415 | * Therefore, we need to protect this ->comm access using RCU. | ||
| 416 | */ | ||
| 417 | rcu_read_lock(); | ||
| 418 | task = pid_task(file->pid, PIDTYPE_PID); | ||
| 412 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", | 419 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", |
| 413 | get_pid_task(file->pid, PIDTYPE_PID)->comm, | 420 | task ? task->comm : "<unknown>", |
| 414 | stats.count, | 421 | stats.count, |
| 415 | stats.total, | 422 | stats.total, |
| 416 | stats.active, | 423 | stats.active, |
| 417 | stats.inactive, | 424 | stats.inactive, |
| 418 | stats.unbound); | 425 | stats.unbound); |
| 426 | rcu_read_unlock(); | ||
| 419 | } | 427 | } |
| 420 | 428 | ||
| 421 | mutex_unlock(&dev->struct_mutex); | 429 | mutex_unlock(&dev->struct_mutex); |
| @@ -564,10 +572,12 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) | |||
| 564 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 572 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 565 | if (ret) | 573 | if (ret) |
| 566 | return ret; | 574 | return ret; |
| 575 | intel_runtime_pm_get(dev_priv); | ||
| 567 | 576 | ||
| 568 | for_each_ring(ring, dev_priv, i) | 577 | for_each_ring(ring, dev_priv, i) |
| 569 | i915_ring_seqno_info(m, ring); | 578 | i915_ring_seqno_info(m, ring); |
| 570 | 579 | ||
| 580 | intel_runtime_pm_put(dev_priv); | ||
| 571 | mutex_unlock(&dev->struct_mutex); | 581 | mutex_unlock(&dev->struct_mutex); |
| 572 | 582 | ||
| 573 | return 0; | 583 | return 0; |
| @@ -585,6 +595,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
| 585 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 595 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 586 | if (ret) | 596 | if (ret) |
| 587 | return ret; | 597 | return ret; |
| 598 | intel_runtime_pm_get(dev_priv); | ||
| 588 | 599 | ||
| 589 | if (INTEL_INFO(dev)->gen >= 8) { | 600 | if (INTEL_INFO(dev)->gen >= 8) { |
| 590 | int i; | 601 | int i; |
| @@ -711,6 +722,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
| 711 | } | 722 | } |
| 712 | i915_ring_seqno_info(m, ring); | 723 | i915_ring_seqno_info(m, ring); |
| 713 | } | 724 | } |
| 725 | intel_runtime_pm_put(dev_priv); | ||
| 714 | mutex_unlock(&dev->struct_mutex); | 726 | mutex_unlock(&dev->struct_mutex); |
| 715 | 727 | ||
| 716 | return 0; | 728 | return 0; |
| @@ -904,9 +916,11 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused) | |||
| 904 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 916 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 905 | if (ret) | 917 | if (ret) |
| 906 | return ret; | 918 | return ret; |
| 919 | intel_runtime_pm_get(dev_priv); | ||
| 907 | 920 | ||
| 908 | crstanddelay = I915_READ16(CRSTANDVID); | 921 | crstanddelay = I915_READ16(CRSTANDVID); |
| 909 | 922 | ||
| 923 | intel_runtime_pm_put(dev_priv); | ||
| 910 | mutex_unlock(&dev->struct_mutex); | 924 | mutex_unlock(&dev->struct_mutex); |
| 911 | 925 | ||
| 912 | seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); | 926 | seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); |
| @@ -919,7 +933,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 919 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 933 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 920 | struct drm_device *dev = node->minor->dev; | 934 | struct drm_device *dev = node->minor->dev; |
| 921 | drm_i915_private_t *dev_priv = dev->dev_private; | 935 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 922 | int ret; | 936 | int ret = 0; |
| 937 | |||
| 938 | intel_runtime_pm_get(dev_priv); | ||
| 923 | 939 | ||
| 924 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 940 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
| 925 | 941 | ||
| @@ -945,9 +961,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 945 | /* RPSTAT1 is in the GT power well */ | 961 | /* RPSTAT1 is in the GT power well */ |
| 946 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 962 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 947 | if (ret) | 963 | if (ret) |
| 948 | return ret; | 964 | goto out; |
| 949 | 965 | ||
| 950 | gen6_gt_force_wake_get(dev_priv); | 966 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 951 | 967 | ||
| 952 | reqf = I915_READ(GEN6_RPNSWREQ); | 968 | reqf = I915_READ(GEN6_RPNSWREQ); |
| 953 | reqf &= ~GEN6_TURBO_DISABLE; | 969 | reqf &= ~GEN6_TURBO_DISABLE; |
| @@ -970,7 +986,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 970 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | 986 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; |
| 971 | cagf *= GT_FREQUENCY_MULTIPLIER; | 987 | cagf *= GT_FREQUENCY_MULTIPLIER; |
| 972 | 988 | ||
| 973 | gen6_gt_force_wake_put(dev_priv); | 989 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 974 | mutex_unlock(&dev->struct_mutex); | 990 | mutex_unlock(&dev->struct_mutex); |
| 975 | 991 | ||
| 976 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 992 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
| @@ -1018,23 +1034,24 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 1018 | seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); | 1034 | seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); |
| 1019 | seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); | 1035 | seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); |
| 1020 | 1036 | ||
| 1021 | val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1); | 1037 | val = valleyview_rps_max_freq(dev_priv); |
| 1022 | seq_printf(m, "max GPU freq: %d MHz\n", | 1038 | seq_printf(m, "max GPU freq: %d MHz\n", |
| 1023 | vlv_gpu_freq(dev_priv->mem_freq, val)); | 1039 | vlv_gpu_freq(dev_priv, val)); |
| 1024 | 1040 | ||
| 1025 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM); | 1041 | val = valleyview_rps_min_freq(dev_priv); |
| 1026 | seq_printf(m, "min GPU freq: %d MHz\n", | 1042 | seq_printf(m, "min GPU freq: %d MHz\n", |
| 1027 | vlv_gpu_freq(dev_priv->mem_freq, val)); | 1043 | vlv_gpu_freq(dev_priv, val)); |
| 1028 | 1044 | ||
| 1029 | seq_printf(m, "current GPU freq: %d MHz\n", | 1045 | seq_printf(m, "current GPU freq: %d MHz\n", |
| 1030 | vlv_gpu_freq(dev_priv->mem_freq, | 1046 | vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); |
| 1031 | (freq_sts >> 8) & 0xff)); | ||
| 1032 | mutex_unlock(&dev_priv->rps.hw_lock); | 1047 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 1033 | } else { | 1048 | } else { |
| 1034 | seq_puts(m, "no P-state info available\n"); | 1049 | seq_puts(m, "no P-state info available\n"); |
| 1035 | } | 1050 | } |
| 1036 | 1051 | ||
| 1037 | return 0; | 1052 | out: |
| 1053 | intel_runtime_pm_put(dev_priv); | ||
| 1054 | return ret; | ||
| 1038 | } | 1055 | } |
| 1039 | 1056 | ||
| 1040 | static int i915_delayfreq_table(struct seq_file *m, void *unused) | 1057 | static int i915_delayfreq_table(struct seq_file *m, void *unused) |
| @@ -1048,6 +1065,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) | |||
| 1048 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1065 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1049 | if (ret) | 1066 | if (ret) |
| 1050 | return ret; | 1067 | return ret; |
| 1068 | intel_runtime_pm_get(dev_priv); | ||
| 1051 | 1069 | ||
| 1052 | for (i = 0; i < 16; i++) { | 1070 | for (i = 0; i < 16; i++) { |
| 1053 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); | 1071 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); |
| @@ -1055,6 +1073,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) | |||
| 1055 | (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); | 1073 | (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); |
| 1056 | } | 1074 | } |
| 1057 | 1075 | ||
| 1076 | intel_runtime_pm_put(dev_priv); | ||
| 1077 | |||
| 1058 | mutex_unlock(&dev->struct_mutex); | 1078 | mutex_unlock(&dev->struct_mutex); |
| 1059 | 1079 | ||
| 1060 | return 0; | 1080 | return 0; |
| @@ -1076,12 +1096,14 @@ static int i915_inttoext_table(struct seq_file *m, void *unused) | |||
| 1076 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1096 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1077 | if (ret) | 1097 | if (ret) |
| 1078 | return ret; | 1098 | return ret; |
| 1099 | intel_runtime_pm_get(dev_priv); | ||
| 1079 | 1100 | ||
| 1080 | for (i = 1; i <= 32; i++) { | 1101 | for (i = 1; i <= 32; i++) { |
| 1081 | inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); | 1102 | inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); |
| 1082 | seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); | 1103 | seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); |
| 1083 | } | 1104 | } |
| 1084 | 1105 | ||
| 1106 | intel_runtime_pm_put(dev_priv); | ||
| 1085 | mutex_unlock(&dev->struct_mutex); | 1107 | mutex_unlock(&dev->struct_mutex); |
| 1086 | 1108 | ||
| 1087 | return 0; | 1109 | return 0; |
| @@ -1099,11 +1121,13 @@ static int ironlake_drpc_info(struct seq_file *m) | |||
| 1099 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1121 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1100 | if (ret) | 1122 | if (ret) |
| 1101 | return ret; | 1123 | return ret; |
| 1124 | intel_runtime_pm_get(dev_priv); | ||
| 1102 | 1125 | ||
| 1103 | rgvmodectl = I915_READ(MEMMODECTL); | 1126 | rgvmodectl = I915_READ(MEMMODECTL); |
| 1104 | rstdbyctl = I915_READ(RSTDBYCTL); | 1127 | rstdbyctl = I915_READ(RSTDBYCTL); |
| 1105 | crstandvid = I915_READ16(CRSTANDVID); | 1128 | crstandvid = I915_READ16(CRSTANDVID); |
| 1106 | 1129 | ||
| 1130 | intel_runtime_pm_put(dev_priv); | ||
| 1107 | mutex_unlock(&dev->struct_mutex); | 1131 | mutex_unlock(&dev->struct_mutex); |
| 1108 | 1132 | ||
| 1109 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 1133 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
| @@ -1154,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m) | |||
| 1154 | return 0; | 1178 | return 0; |
| 1155 | } | 1179 | } |
| 1156 | 1180 | ||
| 1181 | static int vlv_drpc_info(struct seq_file *m) | ||
| 1182 | { | ||
| 1183 | |||
| 1184 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 1185 | struct drm_device *dev = node->minor->dev; | ||
| 1186 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1187 | u32 rpmodectl1, rcctl1; | ||
| 1188 | unsigned fw_rendercount = 0, fw_mediacount = 0; | ||
| 1189 | |||
| 1190 | rpmodectl1 = I915_READ(GEN6_RP_CONTROL); | ||
| 1191 | rcctl1 = I915_READ(GEN6_RC_CONTROL); | ||
| 1192 | |||
| 1193 | seq_printf(m, "Video Turbo Mode: %s\n", | ||
| 1194 | yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); | ||
| 1195 | seq_printf(m, "Turbo enabled: %s\n", | ||
| 1196 | yesno(rpmodectl1 & GEN6_RP_ENABLE)); | ||
| 1197 | seq_printf(m, "HW control enabled: %s\n", | ||
| 1198 | yesno(rpmodectl1 & GEN6_RP_ENABLE)); | ||
| 1199 | seq_printf(m, "SW control enabled: %s\n", | ||
| 1200 | yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == | ||
| 1201 | GEN6_RP_MEDIA_SW_MODE)); | ||
| 1202 | seq_printf(m, "RC6 Enabled: %s\n", | ||
| 1203 | yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | | ||
| 1204 | GEN6_RC_CTL_EI_MODE(1)))); | ||
| 1205 | seq_printf(m, "Render Power Well: %s\n", | ||
| 1206 | (I915_READ(VLV_GTLC_PW_STATUS) & | ||
| 1207 | VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); | ||
| 1208 | seq_printf(m, "Media Power Well: %s\n", | ||
| 1209 | (I915_READ(VLV_GTLC_PW_STATUS) & | ||
| 1210 | VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); | ||
| 1211 | |||
| 1212 | spin_lock_irq(&dev_priv->uncore.lock); | ||
| 1213 | fw_rendercount = dev_priv->uncore.fw_rendercount; | ||
| 1214 | fw_mediacount = dev_priv->uncore.fw_mediacount; | ||
| 1215 | spin_unlock_irq(&dev_priv->uncore.lock); | ||
| 1216 | |||
| 1217 | seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); | ||
| 1218 | seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); | ||
| 1219 | |||
| 1220 | |||
| 1221 | return 0; | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | |||
| 1157 | static int gen6_drpc_info(struct seq_file *m) | 1225 | static int gen6_drpc_info(struct seq_file *m) |
| 1158 | { | 1226 | { |
| 1159 | 1227 | ||
| @@ -1167,6 +1235,7 @@ static int gen6_drpc_info(struct seq_file *m) | |||
| 1167 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1235 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1168 | if (ret) | 1236 | if (ret) |
| 1169 | return ret; | 1237 | return ret; |
| 1238 | intel_runtime_pm_get(dev_priv); | ||
| 1170 | 1239 | ||
| 1171 | spin_lock_irq(&dev_priv->uncore.lock); | 1240 | spin_lock_irq(&dev_priv->uncore.lock); |
| 1172 | forcewake_count = dev_priv->uncore.forcewake_count; | 1241 | forcewake_count = dev_priv->uncore.forcewake_count; |
| @@ -1192,6 +1261,8 @@ static int gen6_drpc_info(struct seq_file *m) | |||
| 1192 | sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | 1261 | sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); |
| 1193 | mutex_unlock(&dev_priv->rps.hw_lock); | 1262 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 1194 | 1263 | ||
| 1264 | intel_runtime_pm_put(dev_priv); | ||
| 1265 | |||
| 1195 | seq_printf(m, "Video Turbo Mode: %s\n", | 1266 | seq_printf(m, "Video Turbo Mode: %s\n", |
| 1196 | yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); | 1267 | yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); |
| 1197 | seq_printf(m, "HW control enabled: %s\n", | 1268 | seq_printf(m, "HW control enabled: %s\n", |
| @@ -1256,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
| 1256 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1327 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 1257 | struct drm_device *dev = node->minor->dev; | 1328 | struct drm_device *dev = node->minor->dev; |
| 1258 | 1329 | ||
| 1259 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 1330 | if (IS_VALLEYVIEW(dev)) |
| 1331 | return vlv_drpc_info(m); | ||
| 1332 | else if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
| 1260 | return gen6_drpc_info(m); | 1333 | return gen6_drpc_info(m); |
| 1261 | else | 1334 | else |
| 1262 | return ironlake_drpc_info(m); | 1335 | return ironlake_drpc_info(m); |
| @@ -1268,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
| 1268 | struct drm_device *dev = node->minor->dev; | 1341 | struct drm_device *dev = node->minor->dev; |
| 1269 | drm_i915_private_t *dev_priv = dev->dev_private; | 1342 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1270 | 1343 | ||
| 1271 | if (!I915_HAS_FBC(dev)) { | 1344 | if (!HAS_FBC(dev)) { |
| 1272 | seq_puts(m, "FBC unsupported on this chipset\n"); | 1345 | seq_puts(m, "FBC unsupported on this chipset\n"); |
| 1273 | return 0; | 1346 | return 0; |
| 1274 | } | 1347 | } |
| @@ -1330,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) | |||
| 1330 | return 0; | 1403 | return 0; |
| 1331 | } | 1404 | } |
| 1332 | 1405 | ||
| 1333 | if (I915_READ(IPS_CTL) & IPS_ENABLE) | 1406 | if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) |
| 1334 | seq_puts(m, "enabled\n"); | 1407 | seq_puts(m, "enabled\n"); |
| 1335 | else | 1408 | else |
| 1336 | seq_puts(m, "disabled\n"); | 1409 | seq_puts(m, "disabled\n"); |
| @@ -1406,6 +1479,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
| 1406 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 1479 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
| 1407 | if (ret) | 1480 | if (ret) |
| 1408 | return ret; | 1481 | return ret; |
| 1482 | intel_runtime_pm_get(dev_priv); | ||
| 1409 | 1483 | ||
| 1410 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); | 1484 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); |
| 1411 | 1485 | ||
| @@ -1422,6 +1496,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
| 1422 | ((ia_freq >> 8) & 0xff) * 100); | 1496 | ((ia_freq >> 8) & 0xff) * 100); |
| 1423 | } | 1497 | } |
| 1424 | 1498 | ||
| 1499 | intel_runtime_pm_put(dev_priv); | ||
| 1425 | mutex_unlock(&dev_priv->rps.hw_lock); | 1500 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 1426 | 1501 | ||
| 1427 | return 0; | 1502 | return 0; |
| @@ -1437,8 +1512,10 @@ static int i915_gfxec(struct seq_file *m, void *unused) | |||
| 1437 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1512 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1438 | if (ret) | 1513 | if (ret) |
| 1439 | return ret; | 1514 | return ret; |
| 1515 | intel_runtime_pm_get(dev_priv); | ||
| 1440 | 1516 | ||
| 1441 | seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); | 1517 | seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); |
| 1518 | intel_runtime_pm_put(dev_priv); | ||
| 1442 | 1519 | ||
| 1443 | mutex_unlock(&dev->struct_mutex); | 1520 | mutex_unlock(&dev->struct_mutex); |
| 1444 | 1521 | ||
| @@ -1565,13 +1642,21 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) | |||
| 1565 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1642 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 1566 | struct drm_device *dev = node->minor->dev; | 1643 | struct drm_device *dev = node->minor->dev; |
| 1567 | struct drm_i915_private *dev_priv = dev->dev_private; | 1644 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1568 | unsigned forcewake_count; | 1645 | unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; |
| 1569 | 1646 | ||
| 1570 | spin_lock_irq(&dev_priv->uncore.lock); | 1647 | spin_lock_irq(&dev_priv->uncore.lock); |
| 1571 | forcewake_count = dev_priv->uncore.forcewake_count; | 1648 | if (IS_VALLEYVIEW(dev)) { |
| 1649 | fw_rendercount = dev_priv->uncore.fw_rendercount; | ||
| 1650 | fw_mediacount = dev_priv->uncore.fw_mediacount; | ||
| 1651 | } else | ||
| 1652 | forcewake_count = dev_priv->uncore.forcewake_count; | ||
| 1572 | spin_unlock_irq(&dev_priv->uncore.lock); | 1653 | spin_unlock_irq(&dev_priv->uncore.lock); |
| 1573 | 1654 | ||
| 1574 | seq_printf(m, "forcewake count = %u\n", forcewake_count); | 1655 | if (IS_VALLEYVIEW(dev)) { |
| 1656 | seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); | ||
| 1657 | seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); | ||
| 1658 | } else | ||
| 1659 | seq_printf(m, "forcewake count = %u\n", forcewake_count); | ||
| 1575 | 1660 | ||
| 1576 | return 0; | 1661 | return 0; |
| 1577 | } | 1662 | } |
| @@ -1610,6 +1695,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) | |||
| 1610 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1695 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1611 | if (ret) | 1696 | if (ret) |
| 1612 | return ret; | 1697 | return ret; |
| 1698 | intel_runtime_pm_get(dev_priv); | ||
| 1613 | 1699 | ||
| 1614 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", | 1700 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", |
| 1615 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); | 1701 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); |
| @@ -1641,6 +1727,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) | |||
| 1641 | seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", | 1727 | seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", |
| 1642 | I915_READ(DISP_ARB_CTL)); | 1728 | I915_READ(DISP_ARB_CTL)); |
| 1643 | } | 1729 | } |
| 1730 | intel_runtime_pm_put(dev_priv); | ||
| 1644 | mutex_unlock(&dev->struct_mutex); | 1731 | mutex_unlock(&dev->struct_mutex); |
| 1645 | 1732 | ||
| 1646 | return 0; | 1733 | return 0; |
| @@ -1701,16 +1788,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) | |||
| 1701 | { | 1788 | { |
| 1702 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1789 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 1703 | struct drm_device *dev = node->minor->dev; | 1790 | struct drm_device *dev = node->minor->dev; |
| 1791 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1704 | 1792 | ||
| 1705 | int ret = mutex_lock_interruptible(&dev->struct_mutex); | 1793 | int ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 1706 | if (ret) | 1794 | if (ret) |
| 1707 | return ret; | 1795 | return ret; |
| 1796 | intel_runtime_pm_get(dev_priv); | ||
| 1708 | 1797 | ||
| 1709 | if (INTEL_INFO(dev)->gen >= 8) | 1798 | if (INTEL_INFO(dev)->gen >= 8) |
| 1710 | gen8_ppgtt_info(m, dev); | 1799 | gen8_ppgtt_info(m, dev); |
| 1711 | else if (INTEL_INFO(dev)->gen >= 6) | 1800 | else if (INTEL_INFO(dev)->gen >= 6) |
| 1712 | gen6_ppgtt_info(m, dev); | 1801 | gen6_ppgtt_info(m, dev); |
| 1713 | 1802 | ||
| 1803 | intel_runtime_pm_put(dev_priv); | ||
| 1714 | mutex_unlock(&dev->struct_mutex); | 1804 | mutex_unlock(&dev->struct_mutex); |
| 1715 | 1805 | ||
| 1716 | return 0; | 1806 | return 0; |
| @@ -1735,28 +1825,28 @@ static int i915_dpio_info(struct seq_file *m, void *data) | |||
| 1735 | 1825 | ||
| 1736 | seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); | 1826 | seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); |
| 1737 | 1827 | ||
| 1738 | seq_printf(m, "DPIO_DIV_A: 0x%08x\n", | 1828 | seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n", |
| 1739 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A)); | 1829 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0))); |
| 1740 | seq_printf(m, "DPIO_DIV_B: 0x%08x\n", | 1830 | seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n", |
| 1741 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B)); | 1831 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1))); |
| 1742 | 1832 | ||
| 1743 | seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", | 1833 | seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n", |
| 1744 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A)); | 1834 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0))); |
| 1745 | seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", | 1835 | seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n", |
| 1746 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B)); | 1836 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1))); |
| 1747 | 1837 | ||
| 1748 | seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", | 1838 | seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n", |
| 1749 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A)); | 1839 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0))); |
| 1750 | seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", | 1840 | seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n", |
| 1751 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B)); | 1841 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1))); |
| 1752 | 1842 | ||
| 1753 | seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", | 1843 | seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n", |
| 1754 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A)); | 1844 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0))); |
| 1755 | seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", | 1845 | seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n", |
| 1756 | vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B)); | 1846 | vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1))); |
| 1757 | 1847 | ||
| 1758 | seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", | 1848 | seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", |
| 1759 | vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE)); | 1849 | vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0)); |
| 1760 | 1850 | ||
| 1761 | mutex_unlock(&dev_priv->dpio_lock); | 1851 | mutex_unlock(&dev_priv->dpio_lock); |
| 1762 | 1852 | ||
| @@ -1784,6 +1874,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
| 1784 | u32 psrperf = 0; | 1874 | u32 psrperf = 0; |
| 1785 | bool enabled = false; | 1875 | bool enabled = false; |
| 1786 | 1876 | ||
| 1877 | intel_runtime_pm_get(dev_priv); | ||
| 1878 | |||
| 1787 | seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); | 1879 | seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); |
| 1788 | seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); | 1880 | seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); |
| 1789 | 1881 | ||
| @@ -1796,6 +1888,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
| 1796 | EDP_PSR_PERF_CNT_MASK; | 1888 | EDP_PSR_PERF_CNT_MASK; |
| 1797 | seq_printf(m, "Performance_Counter: %u\n", psrperf); | 1889 | seq_printf(m, "Performance_Counter: %u\n", psrperf); |
| 1798 | 1890 | ||
| 1891 | intel_runtime_pm_put(dev_priv); | ||
| 1799 | return 0; | 1892 | return 0; |
| 1800 | } | 1893 | } |
| 1801 | 1894 | ||
| @@ -1845,6 +1938,76 @@ static int i915_pc8_status(struct seq_file *m, void *unused) | |||
| 1845 | return 0; | 1938 | return 0; |
| 1846 | } | 1939 | } |
| 1847 | 1940 | ||
| 1941 | static const char *power_domain_str(enum intel_display_power_domain domain) | ||
| 1942 | { | ||
| 1943 | switch (domain) { | ||
| 1944 | case POWER_DOMAIN_PIPE_A: | ||
| 1945 | return "PIPE_A"; | ||
| 1946 | case POWER_DOMAIN_PIPE_B: | ||
| 1947 | return "PIPE_B"; | ||
| 1948 | case POWER_DOMAIN_PIPE_C: | ||
| 1949 | return "PIPE_C"; | ||
| 1950 | case POWER_DOMAIN_PIPE_A_PANEL_FITTER: | ||
| 1951 | return "PIPE_A_PANEL_FITTER"; | ||
| 1952 | case POWER_DOMAIN_PIPE_B_PANEL_FITTER: | ||
| 1953 | return "PIPE_B_PANEL_FITTER"; | ||
| 1954 | case POWER_DOMAIN_PIPE_C_PANEL_FITTER: | ||
| 1955 | return "PIPE_C_PANEL_FITTER"; | ||
| 1956 | case POWER_DOMAIN_TRANSCODER_A: | ||
| 1957 | return "TRANSCODER_A"; | ||
| 1958 | case POWER_DOMAIN_TRANSCODER_B: | ||
| 1959 | return "TRANSCODER_B"; | ||
| 1960 | case POWER_DOMAIN_TRANSCODER_C: | ||
| 1961 | return "TRANSCODER_C"; | ||
| 1962 | case POWER_DOMAIN_TRANSCODER_EDP: | ||
| 1963 | return "TRANSCODER_EDP"; | ||
| 1964 | case POWER_DOMAIN_VGA: | ||
| 1965 | return "VGA"; | ||
| 1966 | case POWER_DOMAIN_AUDIO: | ||
| 1967 | return "AUDIO"; | ||
| 1968 | case POWER_DOMAIN_INIT: | ||
| 1969 | return "INIT"; | ||
| 1970 | default: | ||
| 1971 | WARN_ON(1); | ||
| 1972 | return "?"; | ||
| 1973 | } | ||
| 1974 | } | ||
| 1975 | |||
| 1976 | static int i915_power_domain_info(struct seq_file *m, void *unused) | ||
| 1977 | { | ||
| 1978 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 1979 | struct drm_device *dev = node->minor->dev; | ||
| 1980 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1981 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
| 1982 | int i; | ||
| 1983 | |||
| 1984 | mutex_lock(&power_domains->lock); | ||
| 1985 | |||
| 1986 | seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); | ||
| 1987 | for (i = 0; i < power_domains->power_well_count; i++) { | ||
| 1988 | struct i915_power_well *power_well; | ||
| 1989 | enum intel_display_power_domain power_domain; | ||
| 1990 | |||
| 1991 | power_well = &power_domains->power_wells[i]; | ||
| 1992 | seq_printf(m, "%-25s %d\n", power_well->name, | ||
| 1993 | power_well->count); | ||
| 1994 | |||
| 1995 | for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; | ||
| 1996 | power_domain++) { | ||
| 1997 | if (!(BIT(power_domain) & power_well->domains)) | ||
| 1998 | continue; | ||
| 1999 | |||
| 2000 | seq_printf(m, " %-23s %d\n", | ||
| 2001 | power_domain_str(power_domain), | ||
| 2002 | power_domains->domain_use_count[power_domain]); | ||
| 2003 | } | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | mutex_unlock(&power_domains->lock); | ||
| 2007 | |||
| 2008 | return 0; | ||
| 2009 | } | ||
| 2010 | |||
| 1848 | struct pipe_crc_info { | 2011 | struct pipe_crc_info { |
| 1849 | const char *name; | 2012 | const char *name; |
| 1850 | struct drm_device *dev; | 2013 | struct drm_device *dev; |
| @@ -1857,6 +2020,9 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep) | |||
| 1857 | struct drm_i915_private *dev_priv = info->dev->dev_private; | 2020 | struct drm_i915_private *dev_priv = info->dev->dev_private; |
| 1858 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | 2021 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; |
| 1859 | 2022 | ||
| 2023 | if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) | ||
| 2024 | return -ENODEV; | ||
| 2025 | |||
| 1860 | spin_lock_irq(&pipe_crc->lock); | 2026 | spin_lock_irq(&pipe_crc->lock); |
| 1861 | 2027 | ||
| 1862 | if (pipe_crc->opened) { | 2028 | if (pipe_crc->opened) { |
| @@ -2005,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, | |||
| 2005 | info->dev = dev; | 2171 | info->dev = dev; |
| 2006 | ent = debugfs_create_file(info->name, S_IRUGO, root, info, | 2172 | ent = debugfs_create_file(info->name, S_IRUGO, root, info, |
| 2007 | &i915_pipe_crc_fops); | 2173 | &i915_pipe_crc_fops); |
| 2008 | if (IS_ERR(ent)) | 2174 | if (!ent) |
| 2009 | return PTR_ERR(ent); | 2175 | return -ENOMEM; |
| 2010 | 2176 | ||
| 2011 | return drm_add_fake_info_node(minor, ent, info); | 2177 | return drm_add_fake_info_node(minor, ent, info); |
| 2012 | } | 2178 | } |
| @@ -2347,7 +2513,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
| 2347 | { | 2513 | { |
| 2348 | struct drm_i915_private *dev_priv = dev->dev_private; | 2514 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2349 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | 2515 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; |
| 2350 | u32 val; | 2516 | u32 val = 0; /* shut up gcc */ |
| 2351 | int ret; | 2517 | int ret; |
| 2352 | 2518 | ||
| 2353 | if (pipe_crc->source == source) | 2519 | if (pipe_crc->source == source) |
| @@ -2742,7 +2908,7 @@ i915_drop_caches_set(void *data, u64 val) | |||
| 2742 | struct i915_vma *vma, *x; | 2908 | struct i915_vma *vma, *x; |
| 2743 | int ret; | 2909 | int ret; |
| 2744 | 2910 | ||
| 2745 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); | 2911 | DRM_DEBUG("Dropping caches: 0x%08llx\n", val); |
| 2746 | 2912 | ||
| 2747 | /* No need to check and wait for gpu resets, only libdrm auto-restarts | 2913 | /* No need to check and wait for gpu resets, only libdrm auto-restarts |
| 2748 | * on ioctls on -EAGAIN. */ | 2914 | * on ioctls on -EAGAIN. */ |
| @@ -2810,8 +2976,7 @@ i915_max_freq_get(void *data, u64 *val) | |||
| 2810 | return ret; | 2976 | return ret; |
| 2811 | 2977 | ||
| 2812 | if (IS_VALLEYVIEW(dev)) | 2978 | if (IS_VALLEYVIEW(dev)) |
| 2813 | *val = vlv_gpu_freq(dev_priv->mem_freq, | 2979 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); |
| 2814 | dev_priv->rps.max_delay); | ||
| 2815 | else | 2980 | else |
| 2816 | *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 2981 | *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; |
| 2817 | mutex_unlock(&dev_priv->rps.hw_lock); | 2982 | mutex_unlock(&dev_priv->rps.hw_lock); |
| @@ -2841,9 +3006,9 @@ i915_max_freq_set(void *data, u64 val) | |||
| 2841 | * Turbo will still be enabled, but won't go above the set value. | 3006 | * Turbo will still be enabled, but won't go above the set value. |
| 2842 | */ | 3007 | */ |
| 2843 | if (IS_VALLEYVIEW(dev)) { | 3008 | if (IS_VALLEYVIEW(dev)) { |
| 2844 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | 3009 | val = vlv_freq_opcode(dev_priv, val); |
| 2845 | dev_priv->rps.max_delay = val; | 3010 | dev_priv->rps.max_delay = val; |
| 2846 | gen6_set_rps(dev, val); | 3011 | valleyview_set_rps(dev, val); |
| 2847 | } else { | 3012 | } else { |
| 2848 | do_div(val, GT_FREQUENCY_MULTIPLIER); | 3013 | do_div(val, GT_FREQUENCY_MULTIPLIER); |
| 2849 | dev_priv->rps.max_delay = val; | 3014 | dev_priv->rps.max_delay = val; |
| @@ -2876,8 +3041,7 @@ i915_min_freq_get(void *data, u64 *val) | |||
| 2876 | return ret; | 3041 | return ret; |
| 2877 | 3042 | ||
| 2878 | if (IS_VALLEYVIEW(dev)) | 3043 | if (IS_VALLEYVIEW(dev)) |
| 2879 | *val = vlv_gpu_freq(dev_priv->mem_freq, | 3044 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); |
| 2880 | dev_priv->rps.min_delay); | ||
| 2881 | else | 3045 | else |
| 2882 | *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 3046 | *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; |
| 2883 | mutex_unlock(&dev_priv->rps.hw_lock); | 3047 | mutex_unlock(&dev_priv->rps.hw_lock); |
| @@ -2907,7 +3071,7 @@ i915_min_freq_set(void *data, u64 val) | |||
| 2907 | * Turbo will still be enabled, but won't go below the set value. | 3071 | * Turbo will still be enabled, but won't go below the set value. |
| 2908 | */ | 3072 | */ |
| 2909 | if (IS_VALLEYVIEW(dev)) { | 3073 | if (IS_VALLEYVIEW(dev)) { |
| 2910 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | 3074 | val = vlv_freq_opcode(dev_priv, val); |
| 2911 | dev_priv->rps.min_delay = val; | 3075 | dev_priv->rps.min_delay = val; |
| 2912 | valleyview_set_rps(dev, val); | 3076 | valleyview_set_rps(dev, val); |
| 2913 | } else { | 3077 | } else { |
| @@ -2938,8 +3102,11 @@ i915_cache_sharing_get(void *data, u64 *val) | |||
| 2938 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 3102 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 2939 | if (ret) | 3103 | if (ret) |
| 2940 | return ret; | 3104 | return ret; |
| 3105 | intel_runtime_pm_get(dev_priv); | ||
| 2941 | 3106 | ||
| 2942 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | 3107 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); |
| 3108 | |||
| 3109 | intel_runtime_pm_put(dev_priv); | ||
| 2943 | mutex_unlock(&dev_priv->dev->struct_mutex); | 3110 | mutex_unlock(&dev_priv->dev->struct_mutex); |
| 2944 | 3111 | ||
| 2945 | *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; | 3112 | *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; |
| @@ -2960,6 +3127,7 @@ i915_cache_sharing_set(void *data, u64 val) | |||
| 2960 | if (val > 3) | 3127 | if (val > 3) |
| 2961 | return -EINVAL; | 3128 | return -EINVAL; |
| 2962 | 3129 | ||
| 3130 | intel_runtime_pm_get(dev_priv); | ||
| 2963 | DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); | 3131 | DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); |
| 2964 | 3132 | ||
| 2965 | /* Update the cache sharing policy here as well */ | 3133 | /* Update the cache sharing policy here as well */ |
| @@ -2968,6 +3136,7 @@ i915_cache_sharing_set(void *data, u64 val) | |||
| 2968 | snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); | 3136 | snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); |
| 2969 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | 3137 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
| 2970 | 3138 | ||
| 3139 | intel_runtime_pm_put(dev_priv); | ||
| 2971 | return 0; | 3140 | return 0; |
| 2972 | } | 3141 | } |
| 2973 | 3142 | ||
| @@ -2983,7 +3152,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) | |||
| 2983 | if (INTEL_INFO(dev)->gen < 6) | 3152 | if (INTEL_INFO(dev)->gen < 6) |
| 2984 | return 0; | 3153 | return 0; |
| 2985 | 3154 | ||
| 2986 | gen6_gt_force_wake_get(dev_priv); | 3155 | intel_runtime_pm_get(dev_priv); |
| 3156 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
| 2987 | 3157 | ||
| 2988 | return 0; | 3158 | return 0; |
| 2989 | } | 3159 | } |
| @@ -2996,7 +3166,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) | |||
| 2996 | if (INTEL_INFO(dev)->gen < 6) | 3166 | if (INTEL_INFO(dev)->gen < 6) |
| 2997 | return 0; | 3167 | return 0; |
| 2998 | 3168 | ||
| 2999 | gen6_gt_force_wake_put(dev_priv); | 3169 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 3170 | intel_runtime_pm_put(dev_priv); | ||
| 3000 | 3171 | ||
| 3001 | return 0; | 3172 | return 0; |
| 3002 | } | 3173 | } |
| @@ -3016,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | |||
| 3016 | S_IRUSR, | 3187 | S_IRUSR, |
| 3017 | root, dev, | 3188 | root, dev, |
| 3018 | &i915_forcewake_fops); | 3189 | &i915_forcewake_fops); |
| 3019 | if (IS_ERR(ent)) | 3190 | if (!ent) |
| 3020 | return PTR_ERR(ent); | 3191 | return -ENOMEM; |
| 3021 | 3192 | ||
| 3022 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | 3193 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); |
| 3023 | } | 3194 | } |
| @@ -3034,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root, | |||
| 3034 | S_IRUGO | S_IWUSR, | 3205 | S_IRUGO | S_IWUSR, |
| 3035 | root, dev, | 3206 | root, dev, |
| 3036 | fops); | 3207 | fops); |
| 3037 | if (IS_ERR(ent)) | 3208 | if (!ent) |
| 3038 | return PTR_ERR(ent); | 3209 | return -ENOMEM; |
| 3039 | 3210 | ||
| 3040 | return drm_add_fake_info_node(minor, ent, fops); | 3211 | return drm_add_fake_info_node(minor, ent, fops); |
| 3041 | } | 3212 | } |
| @@ -3079,6 +3250,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
| 3079 | {"i915_edp_psr_status", i915_edp_psr_status, 0}, | 3250 | {"i915_edp_psr_status", i915_edp_psr_status, 0}, |
| 3080 | {"i915_energy_uJ", i915_energy_uJ, 0}, | 3251 | {"i915_energy_uJ", i915_energy_uJ, 0}, |
| 3081 | {"i915_pc8_status", i915_pc8_status, 0}, | 3252 | {"i915_pc8_status", i915_pc8_status, 0}, |
| 3253 | {"i915_power_domain_info", i915_power_domain_info, 0}, | ||
| 3082 | }; | 3254 | }; |
| 3083 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 3255 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
| 3084 | 3256 | ||
| @@ -3102,10 +3274,10 @@ static const struct i915_debugfs_files { | |||
| 3102 | void intel_display_crc_init(struct drm_device *dev) | 3274 | void intel_display_crc_init(struct drm_device *dev) |
| 3103 | { | 3275 | { |
| 3104 | struct drm_i915_private *dev_priv = dev->dev_private; | 3276 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3105 | int i; | 3277 | enum pipe pipe; |
| 3106 | 3278 | ||
| 3107 | for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { | 3279 | for_each_pipe(pipe) { |
| 3108 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; | 3280 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; |
| 3109 | 3281 | ||
| 3110 | pipe_crc->opened = false; | 3282 | pipe_crc->opened = false; |
| 3111 | spin_lock_init(&pipe_crc->lock); | 3283 | spin_lock_init(&pipe_crc->lock); |
| @@ -3164,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
| 3164 | drm_debugfs_remove_files(info_list, 1, minor); | 3336 | drm_debugfs_remove_files(info_list, 1, minor); |
| 3165 | } | 3337 | } |
| 3166 | } | 3338 | } |
| 3167 | |||
| 3168 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5c648425c1e0..15a74f979b4b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | #include <linux/vga_switcheroo.h> | 42 | #include <linux/vga_switcheroo.h> |
| 43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 44 | #include <acpi/video.h> | 44 | #include <acpi/video.h> |
| 45 | #include <linux/pm.h> | ||
| 46 | #include <linux/pm_runtime.h> | ||
| 45 | 47 | ||
| 46 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) | 48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
| 47 | 49 | ||
| @@ -791,7 +793,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
| 791 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 793 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
| 792 | 794 | ||
| 793 | if (ring->irq_get(ring)) { | 795 | if (ring->irq_get(ring)) { |
| 794 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, | 796 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ, |
| 795 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 797 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
| 796 | ring->irq_put(ring); | 798 | ring->irq_put(ring); |
| 797 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | 799 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
| @@ -828,7 +830,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data, | |||
| 828 | result = i915_emit_irq(dev); | 830 | result = i915_emit_irq(dev); |
| 829 | mutex_unlock(&dev->struct_mutex); | 831 | mutex_unlock(&dev->struct_mutex); |
| 830 | 832 | ||
| 831 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 833 | if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { |
| 832 | DRM_ERROR("copy_to_user\n"); | 834 | DRM_ERROR("copy_to_user\n"); |
| 833 | return -EFAULT; | 835 | return -EFAULT; |
| 834 | } | 836 | } |
| @@ -1016,8 +1018,8 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 1016 | return -EINVAL; | 1018 | return -EINVAL; |
| 1017 | } | 1019 | } |
| 1018 | 1020 | ||
| 1019 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | 1021 | if (copy_to_user(param->value, &value, sizeof(int))) { |
| 1020 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | 1022 | DRM_ERROR("copy_to_user failed\n"); |
| 1021 | return -EFAULT; | 1023 | return -EFAULT; |
| 1022 | } | 1024 | } |
| 1023 | 1025 | ||
| @@ -1411,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |||
| 1411 | master->driver_priv = NULL; | 1413 | master->driver_priv = NULL; |
| 1412 | } | 1414 | } |
| 1413 | 1415 | ||
| 1414 | #ifdef CONFIG_DRM_I915_FBDEV | 1416 | #if IS_ENABLED(CONFIG_FB) |
| 1415 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | 1417 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
| 1416 | { | 1418 | { |
| 1417 | struct apertures_struct *ap; | 1419 | struct apertures_struct *ap; |
| @@ -1484,6 +1486,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1484 | return -ENODEV; | 1486 | return -ENODEV; |
| 1485 | } | 1487 | } |
| 1486 | 1488 | ||
| 1489 | /* UMS needs agp support. */ | ||
| 1490 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp) | ||
| 1491 | return -EINVAL; | ||
| 1492 | |||
| 1487 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 1493 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
| 1488 | if (dev_priv == NULL) | 1494 | if (dev_priv == NULL) |
| 1489 | return -ENOMEM; | 1495 | return -ENOMEM; |
| @@ -1494,7 +1500,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1494 | 1500 | ||
| 1495 | spin_lock_init(&dev_priv->irq_lock); | 1501 | spin_lock_init(&dev_priv->irq_lock); |
| 1496 | spin_lock_init(&dev_priv->gpu_error.lock); | 1502 | spin_lock_init(&dev_priv->gpu_error.lock); |
| 1497 | spin_lock_init(&dev_priv->backlight.lock); | 1503 | spin_lock_init(&dev_priv->backlight_lock); |
| 1498 | spin_lock_init(&dev_priv->uncore.lock); | 1504 | spin_lock_init(&dev_priv->uncore.lock); |
| 1499 | spin_lock_init(&dev_priv->mm.object_stat_lock); | 1505 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
| 1500 | mutex_init(&dev_priv->dpio_lock); | 1506 | mutex_init(&dev_priv->dpio_lock); |
| @@ -1639,8 +1645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1639 | goto out_gem_unload; | 1645 | goto out_gem_unload; |
| 1640 | } | 1646 | } |
| 1641 | 1647 | ||
| 1642 | if (HAS_POWER_WELL(dev)) | 1648 | intel_power_domains_init(dev); |
| 1643 | intel_power_domains_init(dev); | ||
| 1644 | 1649 | ||
| 1645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1650 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 1646 | ret = i915_load_modeset_init(dev); | 1651 | ret = i915_load_modeset_init(dev); |
| @@ -1664,11 +1669,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1664 | if (IS_GEN5(dev)) | 1669 | if (IS_GEN5(dev)) |
| 1665 | intel_gpu_ips_init(dev_priv); | 1670 | intel_gpu_ips_init(dev_priv); |
| 1666 | 1671 | ||
| 1672 | intel_init_runtime_pm(dev_priv); | ||
| 1673 | |||
| 1667 | return 0; | 1674 | return 0; |
| 1668 | 1675 | ||
| 1669 | out_power_well: | 1676 | out_power_well: |
| 1670 | if (HAS_POWER_WELL(dev)) | 1677 | intel_power_domains_remove(dev); |
| 1671 | intel_power_domains_remove(dev); | ||
| 1672 | drm_vblank_cleanup(dev); | 1678 | drm_vblank_cleanup(dev); |
| 1673 | out_gem_unload: | 1679 | out_gem_unload: |
| 1674 | if (dev_priv->mm.inactive_shrinker.scan_objects) | 1680 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
| @@ -1679,6 +1685,7 @@ out_gem_unload: | |||
| 1679 | 1685 | ||
| 1680 | intel_teardown_gmbus(dev); | 1686 | intel_teardown_gmbus(dev); |
| 1681 | intel_teardown_mchbar(dev); | 1687 | intel_teardown_mchbar(dev); |
| 1688 | pm_qos_remove_request(&dev_priv->pm_qos); | ||
| 1682 | destroy_workqueue(dev_priv->wq); | 1689 | destroy_workqueue(dev_priv->wq); |
| 1683 | out_mtrrfree: | 1690 | out_mtrrfree: |
| 1684 | arch_phys_wc_del(dev_priv->gtt.mtrr); | 1691 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
| @@ -1704,25 +1711,27 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1704 | struct drm_i915_private *dev_priv = dev->dev_private; | 1711 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1705 | int ret; | 1712 | int ret; |
| 1706 | 1713 | ||
| 1714 | ret = i915_gem_suspend(dev); | ||
| 1715 | if (ret) { | ||
| 1716 | DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
| 1717 | return ret; | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | intel_fini_runtime_pm(dev_priv); | ||
| 1721 | |||
| 1707 | intel_gpu_ips_teardown(); | 1722 | intel_gpu_ips_teardown(); |
| 1708 | 1723 | ||
| 1709 | if (HAS_POWER_WELL(dev)) { | 1724 | /* The i915.ko module is still not prepared to be loaded when |
| 1710 | /* The i915.ko module is still not prepared to be loaded when | 1725 | * the power well is not enabled, so just enable it in case |
| 1711 | * the power well is not enabled, so just enable it in case | 1726 | * we're going to unload/reload. */ |
| 1712 | * we're going to unload/reload. */ | 1727 | intel_display_set_init_power(dev, true); |
| 1713 | intel_display_set_init_power(dev, true); | 1728 | intel_power_domains_remove(dev); |
| 1714 | intel_power_domains_remove(dev); | ||
| 1715 | } | ||
| 1716 | 1729 | ||
| 1717 | i915_teardown_sysfs(dev); | 1730 | i915_teardown_sysfs(dev); |
| 1718 | 1731 | ||
| 1719 | if (dev_priv->mm.inactive_shrinker.scan_objects) | 1732 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
| 1720 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 1733 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
| 1721 | 1734 | ||
| 1722 | ret = i915_gem_suspend(dev); | ||
| 1723 | if (ret) | ||
| 1724 | DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
| 1725 | |||
| 1726 | io_mapping_free(dev_priv->gtt.mappable); | 1735 | io_mapping_free(dev_priv->gtt.mappable); |
| 1727 | arch_phys_wc_del(dev_priv->gtt.mtrr); | 1736 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
| 1728 | 1737 | ||
| @@ -1777,7 +1786,6 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1777 | 1786 | ||
| 1778 | list_del(&dev_priv->gtt.base.global_link); | 1787 | list_del(&dev_priv->gtt.base.global_link); |
| 1779 | WARN_ON(!list_empty(&dev_priv->vm_list)); | 1788 | WARN_ON(!list_empty(&dev_priv->vm_list)); |
| 1780 | drm_mm_takedown(&dev_priv->gtt.base.mm); | ||
| 1781 | 1789 | ||
| 1782 | drm_vblank_cleanup(dev); | 1790 | drm_vblank_cleanup(dev); |
| 1783 | 1791 | ||
| @@ -1910,6 +1918,7 @@ const struct drm_ioctl_desc i915_ioctls[] = { | |||
| 1910 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1918 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
| 1911 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1919 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
| 1912 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1920 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
| 1921 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
| 1913 | }; | 1922 | }; |
| 1914 | 1923 | ||
| 1915 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1924 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5b7b7e06cb3a..04f1f02c4019 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -59,7 +59,7 @@ MODULE_PARM_DESC(powersave, | |||
| 59 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); | 59 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); |
| 60 | 60 | ||
| 61 | int i915_semaphores __read_mostly = -1; | 61 | int i915_semaphores __read_mostly = -1; |
| 62 | module_param_named(semaphores, i915_semaphores, int, 0600); | 62 | module_param_named(semaphores, i915_semaphores, int, 0400); |
| 63 | MODULE_PARM_DESC(semaphores, | 63 | MODULE_PARM_DESC(semaphores, |
| 64 | "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); | 64 | "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); |
| 65 | 65 | ||
| @@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck, | |||
| 114 | "(default: true)"); | 114 | "(default: true)"); |
| 115 | 115 | ||
| 116 | int i915_enable_ppgtt __read_mostly = -1; | 116 | int i915_enable_ppgtt __read_mostly = -1; |
| 117 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); | 117 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400); |
| 118 | MODULE_PARM_DESC(i915_enable_ppgtt, | 118 | MODULE_PARM_DESC(i915_enable_ppgtt, |
| 119 | "Enable PPGTT (default: true)"); | 119 | "Enable PPGTT (default: true)"); |
| 120 | 120 | ||
| @@ -155,7 +155,6 @@ MODULE_PARM_DESC(prefault_disable, | |||
| 155 | "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); | 155 | "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); |
| 156 | 156 | ||
| 157 | static struct drm_driver driver; | 157 | static struct drm_driver driver; |
| 158 | extern int intel_agp_enabled; | ||
| 159 | 158 | ||
| 160 | static const struct intel_device_info intel_i830_info = { | 159 | static const struct intel_device_info intel_i830_info = { |
| 161 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, | 160 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
| @@ -173,6 +172,7 @@ static const struct intel_device_info intel_i85x_info = { | |||
| 173 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, | 172 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, |
| 174 | .cursor_needs_physical = 1, | 173 | .cursor_needs_physical = 1, |
| 175 | .has_overlay = 1, .overlay_needs_physical = 1, | 174 | .has_overlay = 1, .overlay_needs_physical = 1, |
| 175 | .has_fbc = 1, | ||
| 176 | .ring_mask = RENDER_RING, | 176 | .ring_mask = RENDER_RING, |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| @@ -192,6 +192,7 @@ static const struct intel_device_info intel_i915gm_info = { | |||
| 192 | .cursor_needs_physical = 1, | 192 | .cursor_needs_physical = 1, |
| 193 | .has_overlay = 1, .overlay_needs_physical = 1, | 193 | .has_overlay = 1, .overlay_needs_physical = 1, |
| 194 | .supports_tv = 1, | 194 | .supports_tv = 1, |
| 195 | .has_fbc = 1, | ||
| 195 | .ring_mask = RENDER_RING, | 196 | .ring_mask = RENDER_RING, |
| 196 | }; | 197 | }; |
| 197 | static const struct intel_device_info intel_i945g_info = { | 198 | static const struct intel_device_info intel_i945g_info = { |
| @@ -204,6 +205,7 @@ static const struct intel_device_info intel_i945gm_info = { | |||
| 204 | .has_hotplug = 1, .cursor_needs_physical = 1, | 205 | .has_hotplug = 1, .cursor_needs_physical = 1, |
| 205 | .has_overlay = 1, .overlay_needs_physical = 1, | 206 | .has_overlay = 1, .overlay_needs_physical = 1, |
| 206 | .supports_tv = 1, | 207 | .supports_tv = 1, |
| 208 | .has_fbc = 1, | ||
| 207 | .ring_mask = RENDER_RING, | 209 | .ring_mask = RENDER_RING, |
| 208 | }; | 210 | }; |
| 209 | 211 | ||
| @@ -265,6 +267,7 @@ static const struct intel_device_info intel_ironlake_m_info = { | |||
| 265 | static const struct intel_device_info intel_sandybridge_d_info = { | 267 | static const struct intel_device_info intel_sandybridge_d_info = { |
| 266 | .gen = 6, .num_pipes = 2, | 268 | .gen = 6, .num_pipes = 2, |
| 267 | .need_gfx_hws = 1, .has_hotplug = 1, | 269 | .need_gfx_hws = 1, .has_hotplug = 1, |
| 270 | .has_fbc = 1, | ||
| 268 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, | 271 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
| 269 | .has_llc = 1, | 272 | .has_llc = 1, |
| 270 | }; | 273 | }; |
| @@ -280,6 +283,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
| 280 | #define GEN7_FEATURES \ | 283 | #define GEN7_FEATURES \ |
| 281 | .gen = 7, .num_pipes = 3, \ | 284 | .gen = 7, .num_pipes = 3, \ |
| 282 | .need_gfx_hws = 1, .has_hotplug = 1, \ | 285 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
| 286 | .has_fbc = 1, \ | ||
| 283 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | 287 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
| 284 | .has_llc = 1 | 288 | .has_llc = 1 |
| 285 | 289 | ||
| @@ -292,7 +296,6 @@ static const struct intel_device_info intel_ivybridge_m_info = { | |||
| 292 | GEN7_FEATURES, | 296 | GEN7_FEATURES, |
| 293 | .is_ivybridge = 1, | 297 | .is_ivybridge = 1, |
| 294 | .is_mobile = 1, | 298 | .is_mobile = 1, |
| 295 | .has_fbc = 1, | ||
| 296 | }; | 299 | }; |
| 297 | 300 | ||
| 298 | static const struct intel_device_info intel_ivybridge_q_info = { | 301 | static const struct intel_device_info intel_ivybridge_q_info = { |
| @@ -307,6 +310,7 @@ static const struct intel_device_info intel_valleyview_m_info = { | |||
| 307 | .num_pipes = 2, | 310 | .num_pipes = 2, |
| 308 | .is_valleyview = 1, | 311 | .is_valleyview = 1, |
| 309 | .display_mmio_offset = VLV_DISPLAY_BASE, | 312 | .display_mmio_offset = VLV_DISPLAY_BASE, |
| 313 | .has_fbc = 0, /* legal, last one wins */ | ||
| 310 | .has_llc = 0, /* legal, last one wins */ | 314 | .has_llc = 0, /* legal, last one wins */ |
| 311 | }; | 315 | }; |
| 312 | 316 | ||
| @@ -315,6 +319,7 @@ static const struct intel_device_info intel_valleyview_d_info = { | |||
| 315 | .num_pipes = 2, | 319 | .num_pipes = 2, |
| 316 | .is_valleyview = 1, | 320 | .is_valleyview = 1, |
| 317 | .display_mmio_offset = VLV_DISPLAY_BASE, | 321 | .display_mmio_offset = VLV_DISPLAY_BASE, |
| 322 | .has_fbc = 0, /* legal, last one wins */ | ||
| 318 | .has_llc = 0, /* legal, last one wins */ | 323 | .has_llc = 0, /* legal, last one wins */ |
| 319 | }; | 324 | }; |
| 320 | 325 | ||
| @@ -332,12 +337,10 @@ static const struct intel_device_info intel_haswell_m_info = { | |||
| 332 | .is_mobile = 1, | 337 | .is_mobile = 1, |
| 333 | .has_ddi = 1, | 338 | .has_ddi = 1, |
| 334 | .has_fpga_dbg = 1, | 339 | .has_fpga_dbg = 1, |
| 335 | .has_fbc = 1, | ||
| 336 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 340 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
| 337 | }; | 341 | }; |
| 338 | 342 | ||
| 339 | static const struct intel_device_info intel_broadwell_d_info = { | 343 | static const struct intel_device_info intel_broadwell_d_info = { |
| 340 | .is_preliminary = 1, | ||
| 341 | .gen = 8, .num_pipes = 3, | 344 | .gen = 8, .num_pipes = 3, |
| 342 | .need_gfx_hws = 1, .has_hotplug = 1, | 345 | .need_gfx_hws = 1, .has_hotplug = 1, |
| 343 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 346 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
| @@ -346,7 +349,6 @@ static const struct intel_device_info intel_broadwell_d_info = { | |||
| 346 | }; | 349 | }; |
| 347 | 350 | ||
| 348 | static const struct intel_device_info intel_broadwell_m_info = { | 351 | static const struct intel_device_info intel_broadwell_m_info = { |
| 349 | .is_preliminary = 1, | ||
| 350 | .gen = 8, .is_mobile = 1, .num_pipes = 3, | 352 | .gen = 8, .is_mobile = 1, .num_pipes = 3, |
| 351 | .need_gfx_hws = 1, .has_hotplug = 1, | 353 | .need_gfx_hws = 1, .has_hotplug = 1, |
| 352 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 354 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
| @@ -476,12 +478,12 @@ check_next: | |||
| 476 | bool i915_semaphore_is_enabled(struct drm_device *dev) | 478 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
| 477 | { | 479 | { |
| 478 | if (INTEL_INFO(dev)->gen < 6) | 480 | if (INTEL_INFO(dev)->gen < 6) |
| 479 | return 0; | 481 | return false; |
| 480 | 482 | ||
| 481 | /* Until we get further testing... */ | 483 | /* Until we get further testing... */ |
| 482 | if (IS_GEN8(dev)) { | 484 | if (IS_GEN8(dev)) { |
| 483 | WARN_ON(!i915_preliminary_hw_support); | 485 | WARN_ON(!i915_preliminary_hw_support); |
| 484 | return 0; | 486 | return false; |
| 485 | } | 487 | } |
| 486 | 488 | ||
| 487 | if (i915_semaphores >= 0) | 489 | if (i915_semaphores >= 0) |
| @@ -493,7 +495,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) | |||
| 493 | return false; | 495 | return false; |
| 494 | #endif | 496 | #endif |
| 495 | 497 | ||
| 496 | return 1; | 498 | return true; |
| 497 | } | 499 | } |
| 498 | 500 | ||
| 499 | static int i915_drm_freeze(struct drm_device *dev) | 501 | static int i915_drm_freeze(struct drm_device *dev) |
| @@ -501,6 +503,8 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
| 501 | struct drm_i915_private *dev_priv = dev->dev_private; | 503 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 502 | struct drm_crtc *crtc; | 504 | struct drm_crtc *crtc; |
| 503 | 505 | ||
| 506 | intel_runtime_pm_get(dev_priv); | ||
| 507 | |||
| 504 | /* ignore lid events during suspend */ | 508 | /* ignore lid events during suspend */ |
| 505 | mutex_lock(&dev_priv->modeset_restore_lock); | 509 | mutex_lock(&dev_priv->modeset_restore_lock); |
| 506 | dev_priv->modeset_restore = MODESET_SUSPENDED; | 510 | dev_priv->modeset_restore = MODESET_SUSPENDED; |
| @@ -688,6 +692,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
| 688 | mutex_lock(&dev_priv->modeset_restore_lock); | 692 | mutex_lock(&dev_priv->modeset_restore_lock); |
| 689 | dev_priv->modeset_restore = MODESET_DONE; | 693 | dev_priv->modeset_restore = MODESET_DONE; |
| 690 | mutex_unlock(&dev_priv->modeset_restore_lock); | 694 | mutex_unlock(&dev_priv->modeset_restore_lock); |
| 695 | |||
| 696 | intel_runtime_pm_put(dev_priv); | ||
| 691 | return error; | 697 | return error; |
| 692 | } | 698 | } |
| 693 | 699 | ||
| @@ -762,14 +768,14 @@ int i915_reset(struct drm_device *dev) | |||
| 762 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); | 768 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
| 763 | dev_priv->gpu_error.stop_rings = 0; | 769 | dev_priv->gpu_error.stop_rings = 0; |
| 764 | if (ret == -ENODEV) { | 770 | if (ret == -ENODEV) { |
| 765 | DRM_ERROR("Reset not implemented, but ignoring " | 771 | DRM_INFO("Reset not implemented, but ignoring " |
| 766 | "error for simulated gpu hangs\n"); | 772 | "error for simulated gpu hangs\n"); |
| 767 | ret = 0; | 773 | ret = 0; |
| 768 | } | 774 | } |
| 769 | } | 775 | } |
| 770 | 776 | ||
| 771 | if (ret) { | 777 | if (ret) { |
| 772 | DRM_ERROR("Failed to reset chip.\n"); | 778 | DRM_ERROR("Failed to reset chip: %i\n", ret); |
| 773 | mutex_unlock(&dev->struct_mutex); | 779 | mutex_unlock(&dev->struct_mutex); |
| 774 | return ret; | 780 | return ret; |
| 775 | } | 781 | } |
| @@ -790,12 +796,9 @@ int i915_reset(struct drm_device *dev) | |||
| 790 | */ | 796 | */ |
| 791 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 797 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
| 792 | !dev_priv->ums.mm_suspended) { | 798 | !dev_priv->ums.mm_suspended) { |
| 793 | bool hw_contexts_disabled = dev_priv->hw_contexts_disabled; | ||
| 794 | dev_priv->ums.mm_suspended = 0; | 799 | dev_priv->ums.mm_suspended = 0; |
| 795 | 800 | ||
| 796 | ret = i915_gem_init_hw(dev); | 801 | ret = i915_gem_init_hw(dev); |
| 797 | if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled) | ||
| 798 | DRM_ERROR("HW contexts didn't survive reset\n"); | ||
| 799 | mutex_unlock(&dev->struct_mutex); | 802 | mutex_unlock(&dev->struct_mutex); |
| 800 | if (ret) { | 803 | if (ret) { |
| 801 | DRM_ERROR("Failed hw init on reset %d\n", ret); | 804 | DRM_ERROR("Failed hw init on reset %d\n", ret); |
| @@ -831,17 +834,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 831 | if (PCI_FUNC(pdev->devfn)) | 834 | if (PCI_FUNC(pdev->devfn)) |
| 832 | return -ENODEV; | 835 | return -ENODEV; |
| 833 | 836 | ||
| 834 | /* We've managed to ship a kms-enabled ddx that shipped with an XvMC | 837 | driver.driver_features &= ~(DRIVER_USE_AGP); |
| 835 | * implementation for gen3 (and only gen3) that used legacy drm maps | ||
| 836 | * (gasp!) to share buffers between X and the client. Hence we need to | ||
| 837 | * keep around the fake agp stuff for gen3, even when kms is enabled. */ | ||
| 838 | if (intel_info->gen != 3) { | ||
| 839 | driver.driver_features &= | ||
| 840 | ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); | ||
| 841 | } else if (!intel_agp_enabled) { | ||
| 842 | DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); | ||
| 843 | return -ENODEV; | ||
| 844 | } | ||
| 845 | 838 | ||
| 846 | return drm_get_pci_dev(pdev, ent, &driver); | 839 | return drm_get_pci_dev(pdev, ent, &driver); |
| 847 | } | 840 | } |
| @@ -915,6 +908,49 @@ static int i915_pm_poweroff(struct device *dev) | |||
| 915 | return i915_drm_freeze(drm_dev); | 908 | return i915_drm_freeze(drm_dev); |
| 916 | } | 909 | } |
| 917 | 910 | ||
| 911 | static int i915_runtime_suspend(struct device *device) | ||
| 912 | { | ||
| 913 | struct pci_dev *pdev = to_pci_dev(device); | ||
| 914 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 915 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 916 | |||
| 917 | WARN_ON(!HAS_RUNTIME_PM(dev)); | ||
| 918 | |||
| 919 | DRM_DEBUG_KMS("Suspending device\n"); | ||
| 920 | |||
| 921 | i915_gem_release_all_mmaps(dev_priv); | ||
| 922 | |||
| 923 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | ||
| 924 | dev_priv->pm.suspended = true; | ||
| 925 | |||
| 926 | /* | ||
| 927 | * current versions of firmware which depend on this opregion | ||
| 928 | * notification have repurposed the D1 definition to mean | ||
| 929 | * "runtime suspended" vs. what you would normally expect (D3) | ||
| 930 | * to distinguish it from notifications that might be sent | ||
| 931 | * via the suspend path. | ||
| 932 | */ | ||
| 933 | intel_opregion_notify_adapter(dev, PCI_D1); | ||
| 934 | |||
| 935 | return 0; | ||
| 936 | } | ||
| 937 | |||
| 938 | static int i915_runtime_resume(struct device *device) | ||
| 939 | { | ||
| 940 | struct pci_dev *pdev = to_pci_dev(device); | ||
| 941 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 942 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 943 | |||
| 944 | WARN_ON(!HAS_RUNTIME_PM(dev)); | ||
| 945 | |||
| 946 | DRM_DEBUG_KMS("Resuming device\n"); | ||
| 947 | |||
| 948 | intel_opregion_notify_adapter(dev, PCI_D0); | ||
| 949 | dev_priv->pm.suspended = false; | ||
| 950 | |||
| 951 | return 0; | ||
| 952 | } | ||
| 953 | |||
| 918 | static const struct dev_pm_ops i915_pm_ops = { | 954 | static const struct dev_pm_ops i915_pm_ops = { |
| 919 | .suspend = i915_pm_suspend, | 955 | .suspend = i915_pm_suspend, |
| 920 | .resume = i915_pm_resume, | 956 | .resume = i915_pm_resume, |
| @@ -922,6 +958,8 @@ static const struct dev_pm_ops i915_pm_ops = { | |||
| 922 | .thaw = i915_pm_thaw, | 958 | .thaw = i915_pm_thaw, |
| 923 | .poweroff = i915_pm_poweroff, | 959 | .poweroff = i915_pm_poweroff, |
| 924 | .restore = i915_pm_resume, | 960 | .restore = i915_pm_resume, |
| 961 | .runtime_suspend = i915_runtime_suspend, | ||
| 962 | .runtime_resume = i915_runtime_resume, | ||
| 925 | }; | 963 | }; |
| 926 | 964 | ||
| 927 | static const struct vm_operations_struct i915_gem_vm_ops = { | 965 | static const struct vm_operations_struct i915_gem_vm_ops = { |
| @@ -949,7 +987,7 @@ static struct drm_driver driver = { | |||
| 949 | * deal with them for Intel hardware. | 987 | * deal with them for Intel hardware. |
| 950 | */ | 988 | */ |
| 951 | .driver_features = | 989 | .driver_features = |
| 952 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | | 990 | DRIVER_USE_AGP | |
| 953 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | | 991 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
| 954 | DRIVER_RENDER, | 992 | DRIVER_RENDER, |
| 955 | .load = i915_driver_load, | 993 | .load = i915_driver_load, |
| @@ -1024,14 +1062,24 @@ static int __init i915_init(void) | |||
| 1024 | driver.driver_features &= ~DRIVER_MODESET; | 1062 | driver.driver_features &= ~DRIVER_MODESET; |
| 1025 | #endif | 1063 | #endif |
| 1026 | 1064 | ||
| 1027 | if (!(driver.driver_features & DRIVER_MODESET)) | 1065 | if (!(driver.driver_features & DRIVER_MODESET)) { |
| 1028 | driver.get_vblank_timestamp = NULL; | 1066 | driver.get_vblank_timestamp = NULL; |
| 1067 | #ifndef CONFIG_DRM_I915_UMS | ||
| 1068 | /* Silently fail loading to not upset userspace. */ | ||
| 1069 | return 0; | ||
| 1070 | #endif | ||
| 1071 | } | ||
| 1029 | 1072 | ||
| 1030 | return drm_pci_init(&driver, &i915_pci_driver); | 1073 | return drm_pci_init(&driver, &i915_pci_driver); |
| 1031 | } | 1074 | } |
| 1032 | 1075 | ||
| 1033 | static void __exit i915_exit(void) | 1076 | static void __exit i915_exit(void) |
| 1034 | { | 1077 | { |
| 1078 | #ifndef CONFIG_DRM_I915_UMS | ||
| 1079 | if (!(driver.driver_features & DRIVER_MODESET)) | ||
| 1080 | return; /* Never loaded a driver. */ | ||
| 1081 | #endif | ||
| 1082 | |||
| 1035 | drm_pci_exit(&driver, &i915_pci_driver); | 1083 | drm_pci_exit(&driver, &i915_pci_driver); |
| 1036 | } | 1084 | } |
| 1037 | 1085 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1caa5e34fbe3..4a2bf8e3f739 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -89,6 +89,18 @@ enum port { | |||
| 89 | }; | 89 | }; |
| 90 | #define port_name(p) ((p) + 'A') | 90 | #define port_name(p) ((p) + 'A') |
| 91 | 91 | ||
| 92 | #define I915_NUM_PHYS_VLV 1 | ||
| 93 | |||
| 94 | enum dpio_channel { | ||
| 95 | DPIO_CH0, | ||
| 96 | DPIO_CH1 | ||
| 97 | }; | ||
| 98 | |||
| 99 | enum dpio_phy { | ||
| 100 | DPIO_PHY0, | ||
| 101 | DPIO_PHY1 | ||
| 102 | }; | ||
| 103 | |||
| 92 | enum intel_display_power_domain { | 104 | enum intel_display_power_domain { |
| 93 | POWER_DOMAIN_PIPE_A, | 105 | POWER_DOMAIN_PIPE_A, |
| 94 | POWER_DOMAIN_PIPE_B, | 106 | POWER_DOMAIN_PIPE_B, |
| @@ -101,6 +113,7 @@ enum intel_display_power_domain { | |||
| 101 | POWER_DOMAIN_TRANSCODER_C, | 113 | POWER_DOMAIN_TRANSCODER_C, |
| 102 | POWER_DOMAIN_TRANSCODER_EDP, | 114 | POWER_DOMAIN_TRANSCODER_EDP, |
| 103 | POWER_DOMAIN_VGA, | 115 | POWER_DOMAIN_VGA, |
| 116 | POWER_DOMAIN_AUDIO, | ||
| 104 | POWER_DOMAIN_INIT, | 117 | POWER_DOMAIN_INIT, |
| 105 | 118 | ||
| 106 | POWER_DOMAIN_NUM, | 119 | POWER_DOMAIN_NUM, |
| @@ -310,13 +323,14 @@ struct drm_i915_error_state { | |||
| 310 | u32 instps[I915_NUM_RINGS]; | 323 | u32 instps[I915_NUM_RINGS]; |
| 311 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; | 324 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; |
| 312 | u32 seqno[I915_NUM_RINGS]; | 325 | u32 seqno[I915_NUM_RINGS]; |
| 313 | u64 bbaddr; | 326 | u64 bbaddr[I915_NUM_RINGS]; |
| 314 | u32 fault_reg[I915_NUM_RINGS]; | 327 | u32 fault_reg[I915_NUM_RINGS]; |
| 315 | u32 done_reg; | 328 | u32 done_reg; |
| 316 | u32 faddr[I915_NUM_RINGS]; | 329 | u32 faddr[I915_NUM_RINGS]; |
| 317 | u64 fence[I915_MAX_NUM_FENCES]; | 330 | u64 fence[I915_MAX_NUM_FENCES]; |
| 318 | struct timeval time; | 331 | struct timeval time; |
| 319 | struct drm_i915_error_ring { | 332 | struct drm_i915_error_ring { |
| 333 | bool valid; | ||
| 320 | struct drm_i915_error_object { | 334 | struct drm_i915_error_object { |
| 321 | int page_count; | 335 | int page_count; |
| 322 | u32 gtt_offset; | 336 | u32 gtt_offset; |
| @@ -351,6 +365,7 @@ struct drm_i915_error_state { | |||
| 351 | enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; | 365 | enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; |
| 352 | }; | 366 | }; |
| 353 | 367 | ||
| 368 | struct intel_connector; | ||
| 354 | struct intel_crtc_config; | 369 | struct intel_crtc_config; |
| 355 | struct intel_crtc; | 370 | struct intel_crtc; |
| 356 | struct intel_limit; | 371 | struct intel_limit; |
| @@ -358,7 +373,7 @@ struct dpll; | |||
| 358 | 373 | ||
| 359 | struct drm_i915_display_funcs { | 374 | struct drm_i915_display_funcs { |
| 360 | bool (*fbc_enabled)(struct drm_device *dev); | 375 | bool (*fbc_enabled)(struct drm_device *dev); |
| 361 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); | 376 | void (*enable_fbc)(struct drm_crtc *crtc); |
| 362 | void (*disable_fbc)(struct drm_device *dev); | 377 | void (*disable_fbc)(struct drm_device *dev); |
| 363 | int (*get_display_clock_speed)(struct drm_device *dev); | 378 | int (*get_display_clock_speed)(struct drm_device *dev); |
| 364 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 379 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
| @@ -413,11 +428,20 @@ struct drm_i915_display_funcs { | |||
| 413 | /* render clock increase/decrease */ | 428 | /* render clock increase/decrease */ |
| 414 | /* display clock increase/decrease */ | 429 | /* display clock increase/decrease */ |
| 415 | /* pll clock increase/decrease */ | 430 | /* pll clock increase/decrease */ |
| 431 | |||
| 432 | int (*setup_backlight)(struct intel_connector *connector); | ||
| 433 | uint32_t (*get_backlight)(struct intel_connector *connector); | ||
| 434 | void (*set_backlight)(struct intel_connector *connector, | ||
| 435 | uint32_t level); | ||
| 436 | void (*disable_backlight)(struct intel_connector *connector); | ||
| 437 | void (*enable_backlight)(struct intel_connector *connector); | ||
| 416 | }; | 438 | }; |
| 417 | 439 | ||
| 418 | struct intel_uncore_funcs { | 440 | struct intel_uncore_funcs { |
| 419 | void (*force_wake_get)(struct drm_i915_private *dev_priv); | 441 | void (*force_wake_get)(struct drm_i915_private *dev_priv, |
| 420 | void (*force_wake_put)(struct drm_i915_private *dev_priv); | 442 | int fw_engine); |
| 443 | void (*force_wake_put)(struct drm_i915_private *dev_priv, | ||
| 444 | int fw_engine); | ||
| 421 | 445 | ||
| 422 | uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | 446 | uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
| 423 | uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | 447 | uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
| @@ -442,6 +466,9 @@ struct intel_uncore { | |||
| 442 | unsigned fifo_count; | 466 | unsigned fifo_count; |
| 443 | unsigned forcewake_count; | 467 | unsigned forcewake_count; |
| 444 | 468 | ||
| 469 | unsigned fw_rendercount; | ||
| 470 | unsigned fw_mediacount; | ||
| 471 | |||
| 445 | struct delayed_work force_wake_work; | 472 | struct delayed_work force_wake_work; |
| 446 | }; | 473 | }; |
| 447 | 474 | ||
| @@ -669,7 +696,6 @@ struct i915_fbc { | |||
| 669 | struct delayed_work work; | 696 | struct delayed_work work; |
| 670 | struct drm_crtc *crtc; | 697 | struct drm_crtc *crtc; |
| 671 | struct drm_framebuffer *fb; | 698 | struct drm_framebuffer *fb; |
| 672 | int interval; | ||
| 673 | } *fbc_work; | 699 | } *fbc_work; |
| 674 | 700 | ||
| 675 | enum no_fbc_reason { | 701 | enum no_fbc_reason { |
| @@ -708,7 +734,6 @@ enum intel_sbi_destination { | |||
| 708 | #define QUIRK_PIPEA_FORCE (1<<0) | 734 | #define QUIRK_PIPEA_FORCE (1<<0) |
| 709 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 735 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
| 710 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 736 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
| 711 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
| 712 | 737 | ||
| 713 | struct intel_fbdev; | 738 | struct intel_fbdev; |
| 714 | struct intel_fbc_work; | 739 | struct intel_fbc_work; |
| @@ -761,8 +786,6 @@ struct i915_suspend_saved_registers { | |||
| 761 | u32 saveBLC_PWM_CTL; | 786 | u32 saveBLC_PWM_CTL; |
| 762 | u32 saveBLC_PWM_CTL2; | 787 | u32 saveBLC_PWM_CTL2; |
| 763 | u32 saveBLC_HIST_CTL_B; | 788 | u32 saveBLC_HIST_CTL_B; |
| 764 | u32 saveBLC_PWM_CTL_B; | ||
| 765 | u32 saveBLC_PWM_CTL2_B; | ||
| 766 | u32 saveBLC_CPU_PWM_CTL; | 789 | u32 saveBLC_CPU_PWM_CTL; |
| 767 | u32 saveBLC_CPU_PWM_CTL2; | 790 | u32 saveBLC_CPU_PWM_CTL2; |
| 768 | u32 saveFPB0; | 791 | u32 saveFPB0; |
| @@ -932,21 +955,29 @@ struct intel_ilk_power_mgmt { | |||
| 932 | 955 | ||
| 933 | /* Power well structure for haswell */ | 956 | /* Power well structure for haswell */ |
| 934 | struct i915_power_well { | 957 | struct i915_power_well { |
| 958 | const char *name; | ||
| 959 | bool always_on; | ||
| 935 | /* power well enable/disable usage count */ | 960 | /* power well enable/disable usage count */ |
| 936 | int count; | 961 | int count; |
| 962 | unsigned long domains; | ||
| 963 | void *data; | ||
| 964 | void (*set)(struct drm_device *dev, struct i915_power_well *power_well, | ||
| 965 | bool enable); | ||
| 966 | bool (*is_enabled)(struct drm_device *dev, | ||
| 967 | struct i915_power_well *power_well); | ||
| 937 | }; | 968 | }; |
| 938 | 969 | ||
| 939 | #define I915_MAX_POWER_WELLS 1 | ||
| 940 | |||
| 941 | struct i915_power_domains { | 970 | struct i915_power_domains { |
| 942 | /* | 971 | /* |
| 943 | * Power wells needed for initialization at driver init and suspend | 972 | * Power wells needed for initialization at driver init and suspend |
| 944 | * time are on. They are kept on until after the first modeset. | 973 | * time are on. They are kept on until after the first modeset. |
| 945 | */ | 974 | */ |
| 946 | bool init_power_on; | 975 | bool init_power_on; |
| 976 | int power_well_count; | ||
| 947 | 977 | ||
| 948 | struct mutex lock; | 978 | struct mutex lock; |
| 949 | struct i915_power_well power_wells[I915_MAX_POWER_WELLS]; | 979 | int domain_use_count[POWER_DOMAIN_NUM]; |
| 980 | struct i915_power_well *power_wells; | ||
| 950 | }; | 981 | }; |
| 951 | 982 | ||
| 952 | struct i915_dri1_state { | 983 | struct i915_dri1_state { |
| @@ -1077,34 +1108,30 @@ struct i915_gpu_error { | |||
| 1077 | unsigned long missed_irq_rings; | 1108 | unsigned long missed_irq_rings; |
| 1078 | 1109 | ||
| 1079 | /** | 1110 | /** |
| 1080 | * State variable and reset counter controlling the reset flow | 1111 | * State variable controlling the reset flow and count |
| 1081 | * | 1112 | * |
| 1082 | * Upper bits are for the reset counter. This counter is used by the | 1113 | * This is a counter which gets incremented when reset is triggered, |
| 1083 | * wait_seqno code to race-free noticed that a reset event happened and | 1114 | * and again when reset has been handled. So odd values (lowest bit set) |
| 1084 | * that it needs to restart the entire ioctl (since most likely the | 1115 | * means that reset is in progress and even values that |
| 1085 | * seqno it waited for won't ever signal anytime soon). | 1116 | * (reset_counter >> 1):th reset was successfully completed. |
| 1117 | * | ||
| 1118 | * If reset is not completed succesfully, the I915_WEDGE bit is | ||
| 1119 | * set meaning that hardware is terminally sour and there is no | ||
| 1120 | * recovery. All waiters on the reset_queue will be woken when | ||
| 1121 | * that happens. | ||
| 1122 | * | ||
| 1123 | * This counter is used by the wait_seqno code to notice that reset | ||
| 1124 | * event happened and it needs to restart the entire ioctl (since most | ||
| 1125 | * likely the seqno it waited for won't ever signal anytime soon). | ||
| 1086 | * | 1126 | * |
| 1087 | * This is important for lock-free wait paths, where no contended lock | 1127 | * This is important for lock-free wait paths, where no contended lock |
| 1088 | * naturally enforces the correct ordering between the bail-out of the | 1128 | * naturally enforces the correct ordering between the bail-out of the |
| 1089 | * waiter and the gpu reset work code. | 1129 | * waiter and the gpu reset work code. |
| 1090 | * | ||
| 1091 | * Lowest bit controls the reset state machine: Set means a reset is in | ||
| 1092 | * progress. This state will (presuming we don't have any bugs) decay | ||
| 1093 | * into either unset (successful reset) or the special WEDGED value (hw | ||
| 1094 | * terminally sour). All waiters on the reset_queue will be woken when | ||
| 1095 | * that happens. | ||
| 1096 | */ | 1130 | */ |
| 1097 | atomic_t reset_counter; | 1131 | atomic_t reset_counter; |
| 1098 | 1132 | ||
| 1099 | /** | ||
| 1100 | * Special values/flags for reset_counter | ||
| 1101 | * | ||
| 1102 | * Note that the code relies on | ||
| 1103 | * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG | ||
| 1104 | * being true. | ||
| 1105 | */ | ||
| 1106 | #define I915_RESET_IN_PROGRESS_FLAG 1 | 1133 | #define I915_RESET_IN_PROGRESS_FLAG 1 |
| 1107 | #define I915_WEDGED 0xffffffff | 1134 | #define I915_WEDGED (1 << 31) |
| 1108 | 1135 | ||
| 1109 | /** | 1136 | /** |
| 1110 | * Waitqueue to signal when the reset has completed. Used by clients | 1137 | * Waitqueue to signal when the reset has completed. Used by clients |
| @@ -1158,6 +1185,11 @@ struct intel_vbt_data { | |||
| 1158 | int edp_bpp; | 1185 | int edp_bpp; |
| 1159 | struct edp_power_seq edp_pps; | 1186 | struct edp_power_seq edp_pps; |
| 1160 | 1187 | ||
| 1188 | struct { | ||
| 1189 | u16 pwm_freq_hz; | ||
| 1190 | bool active_low_pwm; | ||
| 1191 | } backlight; | ||
| 1192 | |||
| 1161 | /* MIPI DSI */ | 1193 | /* MIPI DSI */ |
| 1162 | struct { | 1194 | struct { |
| 1163 | u16 panel_id; | 1195 | u16 panel_id; |
| @@ -1184,7 +1216,7 @@ struct intel_wm_level { | |||
| 1184 | uint32_t fbc_val; | 1216 | uint32_t fbc_val; |
| 1185 | }; | 1217 | }; |
| 1186 | 1218 | ||
| 1187 | struct hsw_wm_values { | 1219 | struct ilk_wm_values { |
| 1188 | uint32_t wm_pipe[3]; | 1220 | uint32_t wm_pipe[3]; |
| 1189 | uint32_t wm_lp[3]; | 1221 | uint32_t wm_lp[3]; |
| 1190 | uint32_t wm_lp_spr[3]; | 1222 | uint32_t wm_lp_spr[3]; |
| @@ -1262,6 +1294,10 @@ struct i915_package_c8 { | |||
| 1262 | } regsave; | 1294 | } regsave; |
| 1263 | }; | 1295 | }; |
| 1264 | 1296 | ||
| 1297 | struct i915_runtime_pm { | ||
| 1298 | bool suspended; | ||
| 1299 | }; | ||
| 1300 | |||
| 1265 | enum intel_pipe_crc_source { | 1301 | enum intel_pipe_crc_source { |
| 1266 | INTEL_PIPE_CRC_SOURCE_NONE, | 1302 | INTEL_PIPE_CRC_SOURCE_NONE, |
| 1267 | INTEL_PIPE_CRC_SOURCE_PLANE1, | 1303 | INTEL_PIPE_CRC_SOURCE_PLANE1, |
| @@ -1366,15 +1402,9 @@ typedef struct drm_i915_private { | |||
| 1366 | 1402 | ||
| 1367 | /* overlay */ | 1403 | /* overlay */ |
| 1368 | struct intel_overlay *overlay; | 1404 | struct intel_overlay *overlay; |
| 1369 | unsigned int sprite_scaling_enabled; | ||
| 1370 | 1405 | ||
| 1371 | /* backlight */ | 1406 | /* backlight registers and fields in struct intel_panel */ |
| 1372 | struct { | 1407 | spinlock_t backlight_lock; |
| 1373 | int level; | ||
| 1374 | bool enabled; | ||
| 1375 | spinlock_t lock; /* bl registers and the above bl fields */ | ||
| 1376 | struct backlight_device *device; | ||
| 1377 | } backlight; | ||
| 1378 | 1408 | ||
| 1379 | /* LVDS info */ | 1409 | /* LVDS info */ |
| 1380 | bool no_aux_handshake; | 1410 | bool no_aux_handshake; |
| @@ -1426,6 +1456,7 @@ typedef struct drm_i915_private { | |||
| 1426 | int num_shared_dpll; | 1456 | int num_shared_dpll; |
| 1427 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; | 1457 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; |
| 1428 | struct intel_ddi_plls ddi_plls; | 1458 | struct intel_ddi_plls ddi_plls; |
| 1459 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; | ||
| 1429 | 1460 | ||
| 1430 | /* Reclocking support */ | 1461 | /* Reclocking support */ |
| 1431 | bool render_reclock_avail; | 1462 | bool render_reclock_avail; |
| @@ -1470,7 +1501,6 @@ typedef struct drm_i915_private { | |||
| 1470 | struct drm_property *broadcast_rgb_property; | 1501 | struct drm_property *broadcast_rgb_property; |
| 1471 | struct drm_property *force_audio_property; | 1502 | struct drm_property *force_audio_property; |
| 1472 | 1503 | ||
| 1473 | bool hw_contexts_disabled; | ||
| 1474 | uint32_t hw_context_size; | 1504 | uint32_t hw_context_size; |
| 1475 | struct list_head context_list; | 1505 | struct list_head context_list; |
| 1476 | 1506 | ||
| @@ -1492,11 +1522,13 @@ typedef struct drm_i915_private { | |||
| 1492 | uint16_t cur_latency[5]; | 1522 | uint16_t cur_latency[5]; |
| 1493 | 1523 | ||
| 1494 | /* current hardware state */ | 1524 | /* current hardware state */ |
| 1495 | struct hsw_wm_values hw; | 1525 | struct ilk_wm_values hw; |
| 1496 | } wm; | 1526 | } wm; |
| 1497 | 1527 | ||
| 1498 | struct i915_package_c8 pc8; | 1528 | struct i915_package_c8 pc8; |
| 1499 | 1529 | ||
| 1530 | struct i915_runtime_pm pm; | ||
| 1531 | |||
| 1500 | /* Old dri1 support infrastructure, beware the dragons ya fools entering | 1532 | /* Old dri1 support infrastructure, beware the dragons ya fools entering |
| 1501 | * here! */ | 1533 | * here! */ |
| 1502 | struct i915_dri1_state dri1; | 1534 | struct i915_dri1_state dri1; |
| @@ -1813,15 +1845,15 @@ struct drm_i915_file_private { | |||
| 1813 | 1845 | ||
| 1814 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) | 1846 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) |
| 1815 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | 1847 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
| 1816 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | 1848 | #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
| 1817 | 1849 | ||
| 1818 | #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) | 1850 | #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) |
| 1819 | 1851 | ||
| 1820 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) | 1852 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
| 1821 | #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
| 1822 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) | 1853 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
| 1823 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 1854 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 1824 | #define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ | 1855 | #define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ |
| 1856 | #define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev)) | ||
| 1825 | 1857 | ||
| 1826 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 1858 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
| 1827 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | 1859 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
| @@ -1911,7 +1943,6 @@ extern void intel_hpd_init(struct drm_device *dev); | |||
| 1911 | extern void intel_uncore_sanitize(struct drm_device *dev); | 1943 | extern void intel_uncore_sanitize(struct drm_device *dev); |
| 1912 | extern void intel_uncore_early_sanitize(struct drm_device *dev); | 1944 | extern void intel_uncore_early_sanitize(struct drm_device *dev); |
| 1913 | extern void intel_uncore_init(struct drm_device *dev); | 1945 | extern void intel_uncore_init(struct drm_device *dev); |
| 1914 | extern void intel_uncore_clear_errors(struct drm_device *dev); | ||
| 1915 | extern void intel_uncore_check_errors(struct drm_device *dev); | 1946 | extern void intel_uncore_check_errors(struct drm_device *dev); |
| 1916 | extern void intel_uncore_fini(struct drm_device *dev); | 1947 | extern void intel_uncore_fini(struct drm_device *dev); |
| 1917 | 1948 | ||
| @@ -1987,6 +2018,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj); | |||
| 1987 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2018 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
| 1988 | int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); | 2019 | int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); |
| 1989 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 2020 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
| 2021 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | ||
| 1990 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 2022 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
| 1991 | void i915_gem_lastclose(struct drm_device *dev); | 2023 | void i915_gem_lastclose(struct drm_device *dev); |
| 1992 | 2024 | ||
| @@ -2063,12 +2095,17 @@ int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | |||
| 2063 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 2095 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
| 2064 | { | 2096 | { |
| 2065 | return unlikely(atomic_read(&error->reset_counter) | 2097 | return unlikely(atomic_read(&error->reset_counter) |
| 2066 | & I915_RESET_IN_PROGRESS_FLAG); | 2098 | & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); |
| 2067 | } | 2099 | } |
| 2068 | 2100 | ||
| 2069 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) | 2101 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
| 2070 | { | 2102 | { |
| 2071 | return atomic_read(&error->reset_counter) == I915_WEDGED; | 2103 | return atomic_read(&error->reset_counter) & I915_WEDGED; |
| 2104 | } | ||
| 2105 | |||
| 2106 | static inline u32 i915_reset_count(struct i915_gpu_error *error) | ||
| 2107 | { | ||
| 2108 | return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; | ||
| 2072 | } | 2109 | } |
| 2073 | 2110 | ||
| 2074 | void i915_gem_reset(struct drm_device *dev); | 2111 | void i915_gem_reset(struct drm_device *dev); |
| @@ -2180,7 +2217,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, | |||
| 2180 | } | 2217 | } |
| 2181 | 2218 | ||
| 2182 | /* i915_gem_context.c */ | 2219 | /* i915_gem_context.c */ |
| 2183 | void i915_gem_context_init(struct drm_device *dev); | 2220 | int __must_check i915_gem_context_init(struct drm_device *dev); |
| 2184 | void i915_gem_context_fini(struct drm_device *dev); | 2221 | void i915_gem_context_fini(struct drm_device *dev); |
| 2185 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | 2222 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
| 2186 | int i915_switch_context(struct intel_ring_buffer *ring, | 2223 | int i915_switch_context(struct intel_ring_buffer *ring, |
| @@ -2399,6 +2436,8 @@ extern int intel_enable_rc6(const struct drm_device *dev); | |||
| 2399 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); | 2436 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); |
| 2400 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, | 2437 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
| 2401 | struct drm_file *file); | 2438 | struct drm_file *file); |
| 2439 | int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, | ||
| 2440 | struct drm_file *file); | ||
| 2402 | 2441 | ||
| 2403 | /* overlay */ | 2442 | /* overlay */ |
| 2404 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); | 2443 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
| @@ -2414,8 +2453,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, | |||
| 2414 | * must be set to prevent GT core from power down and stale values being | 2453 | * must be set to prevent GT core from power down and stale values being |
| 2415 | * returned. | 2454 | * returned. |
| 2416 | */ | 2455 | */ |
| 2417 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); | 2456 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); |
| 2418 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); | 2457 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
| 2419 | 2458 | ||
| 2420 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); | 2459 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
| 2421 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); | 2460 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
| @@ -2430,6 +2469,8 @@ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); | |||
| 2430 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | 2469 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2431 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); | 2470 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2432 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | 2471 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2472 | u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); | ||
| 2473 | void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | ||
| 2433 | u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); | 2474 | u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); |
| 2434 | void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | 2475 | void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 2435 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); | 2476 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); |
| @@ -2438,9 +2479,30 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, | |||
| 2438 | enum intel_sbi_destination destination); | 2479 | enum intel_sbi_destination destination); |
| 2439 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, | 2480 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
| 2440 | enum intel_sbi_destination destination); | 2481 | enum intel_sbi_destination destination); |
| 2482 | u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); | ||
| 2483 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | ||
| 2484 | |||
| 2485 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); | ||
| 2486 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); | ||
| 2487 | |||
| 2488 | void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); | ||
| 2489 | void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); | ||
| 2490 | |||
| 2491 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ | ||
| 2492 | (((reg) >= 0x2000 && (reg) < 0x4000) ||\ | ||
| 2493 | ((reg) >= 0x5000 && (reg) < 0x8000) ||\ | ||
| 2494 | ((reg) >= 0xB000 && (reg) < 0x12000) ||\ | ||
| 2495 | ((reg) >= 0x2E000 && (reg) < 0x30000)) | ||
| 2496 | |||
| 2497 | #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ | ||
| 2498 | (((reg) >= 0x12000 && (reg) < 0x14000) ||\ | ||
| 2499 | ((reg) >= 0x22000 && (reg) < 0x24000) ||\ | ||
| 2500 | ((reg) >= 0x30000 && (reg) < 0x40000)) | ||
| 2501 | |||
| 2502 | #define FORCEWAKE_RENDER (1 << 0) | ||
| 2503 | #define FORCEWAKE_MEDIA (1 << 1) | ||
| 2504 | #define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) | ||
| 2441 | 2505 | ||
| 2442 | int vlv_gpu_freq(int ddr_freq, int val); | ||
| 2443 | int vlv_freq_opcode(int ddr_freq, int val); | ||
| 2444 | 2506 | ||
| 2445 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) | 2507 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
| 2446 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) | 2508 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 76d3d1ab73c6..00c836154725 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1015,9 +1015,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1015 | struct drm_i915_file_private *file_priv) | 1015 | struct drm_i915_file_private *file_priv) |
| 1016 | { | 1016 | { |
| 1017 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1017 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
| 1018 | const bool irq_test_in_progress = | ||
| 1019 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); | ||
| 1018 | struct timespec before, now; | 1020 | struct timespec before, now; |
| 1019 | DEFINE_WAIT(wait); | 1021 | DEFINE_WAIT(wait); |
| 1020 | long timeout_jiffies; | 1022 | unsigned long timeout_expire; |
| 1021 | int ret; | 1023 | int ret; |
| 1022 | 1024 | ||
| 1023 | WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); | 1025 | WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); |
| @@ -1025,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1025 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1027 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
| 1026 | return 0; | 1028 | return 0; |
| 1027 | 1029 | ||
| 1028 | timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1; | 1030 | timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; |
| 1029 | 1031 | ||
| 1030 | if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { | 1032 | if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { |
| 1031 | gen6_rps_boost(dev_priv); | 1033 | gen6_rps_boost(dev_priv); |
| @@ -1035,8 +1037,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1035 | msecs_to_jiffies(100)); | 1037 | msecs_to_jiffies(100)); |
| 1036 | } | 1038 | } |
| 1037 | 1039 | ||
| 1038 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) && | 1040 | if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) |
| 1039 | WARN_ON(!ring->irq_get(ring))) | ||
| 1040 | return -ENODEV; | 1041 | return -ENODEV; |
| 1041 | 1042 | ||
| 1042 | /* Record current time in case interrupted by signal, or wedged */ | 1043 | /* Record current time in case interrupted by signal, or wedged */ |
| @@ -1044,7 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1044 | getrawmonotonic(&before); | 1045 | getrawmonotonic(&before); |
| 1045 | for (;;) { | 1046 | for (;;) { |
| 1046 | struct timer_list timer; | 1047 | struct timer_list timer; |
| 1047 | unsigned long expire; | ||
| 1048 | 1048 | ||
| 1049 | prepare_to_wait(&ring->irq_queue, &wait, | 1049 | prepare_to_wait(&ring->irq_queue, &wait, |
| 1050 | interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | 1050 | interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
| @@ -1070,23 +1070,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1070 | break; | 1070 | break; |
| 1071 | } | 1071 | } |
| 1072 | 1072 | ||
| 1073 | if (timeout_jiffies <= 0) { | 1073 | if (timeout && time_after_eq(jiffies, timeout_expire)) { |
| 1074 | ret = -ETIME; | 1074 | ret = -ETIME; |
| 1075 | break; | 1075 | break; |
| 1076 | } | 1076 | } |
| 1077 | 1077 | ||
| 1078 | timer.function = NULL; | 1078 | timer.function = NULL; |
| 1079 | if (timeout || missed_irq(dev_priv, ring)) { | 1079 | if (timeout || missed_irq(dev_priv, ring)) { |
| 1080 | unsigned long expire; | ||
| 1081 | |||
| 1080 | setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); | 1082 | setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); |
| 1081 | expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies); | 1083 | expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; |
| 1082 | mod_timer(&timer, expire); | 1084 | mod_timer(&timer, expire); |
| 1083 | } | 1085 | } |
| 1084 | 1086 | ||
| 1085 | io_schedule(); | 1087 | io_schedule(); |
| 1086 | 1088 | ||
| 1087 | if (timeout) | ||
| 1088 | timeout_jiffies = expire - jiffies; | ||
| 1089 | |||
| 1090 | if (timer.function) { | 1089 | if (timer.function) { |
| 1091 | del_singleshot_timer_sync(&timer); | 1090 | del_singleshot_timer_sync(&timer); |
| 1092 | destroy_timer_on_stack(&timer); | 1091 | destroy_timer_on_stack(&timer); |
| @@ -1095,7 +1094,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
| 1095 | getrawmonotonic(&now); | 1094 | getrawmonotonic(&now); |
| 1096 | trace_i915_gem_request_wait_end(ring, seqno); | 1095 | trace_i915_gem_request_wait_end(ring, seqno); |
| 1097 | 1096 | ||
| 1098 | ring->irq_put(ring); | 1097 | if (!irq_test_in_progress) |
| 1098 | ring->irq_put(ring); | ||
| 1099 | 1099 | ||
| 1100 | finish_wait(&ring->irq_queue, &wait); | 1100 | finish_wait(&ring->irq_queue, &wait); |
| 1101 | 1101 | ||
| @@ -1380,6 +1380,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1380 | int ret = 0; | 1380 | int ret = 0; |
| 1381 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 1381 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); |
| 1382 | 1382 | ||
| 1383 | intel_runtime_pm_get(dev_priv); | ||
| 1384 | |||
| 1383 | /* We don't use vmf->pgoff since that has the fake offset */ | 1385 | /* We don't use vmf->pgoff since that has the fake offset */ |
| 1384 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 1386 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
| 1385 | PAGE_SHIFT; | 1387 | PAGE_SHIFT; |
| @@ -1427,8 +1429,10 @@ out: | |||
| 1427 | /* If this -EIO is due to a gpu hang, give the reset code a | 1429 | /* If this -EIO is due to a gpu hang, give the reset code a |
| 1428 | * chance to clean up the mess. Otherwise return the proper | 1430 | * chance to clean up the mess. Otherwise return the proper |
| 1429 | * SIGBUS. */ | 1431 | * SIGBUS. */ |
| 1430 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | 1432 | if (i915_terminally_wedged(&dev_priv->gpu_error)) { |
| 1431 | return VM_FAULT_SIGBUS; | 1433 | ret = VM_FAULT_SIGBUS; |
| 1434 | break; | ||
| 1435 | } | ||
| 1432 | case -EAGAIN: | 1436 | case -EAGAIN: |
| 1433 | /* | 1437 | /* |
| 1434 | * EAGAIN means the gpu is hung and we'll wait for the error | 1438 | * EAGAIN means the gpu is hung and we'll wait for the error |
| @@ -1443,15 +1447,38 @@ out: | |||
| 1443 | * EBUSY is ok: this just means that another thread | 1447 | * EBUSY is ok: this just means that another thread |
| 1444 | * already did the job. | 1448 | * already did the job. |
| 1445 | */ | 1449 | */ |
| 1446 | return VM_FAULT_NOPAGE; | 1450 | ret = VM_FAULT_NOPAGE; |
| 1451 | break; | ||
| 1447 | case -ENOMEM: | 1452 | case -ENOMEM: |
| 1448 | return VM_FAULT_OOM; | 1453 | ret = VM_FAULT_OOM; |
| 1454 | break; | ||
| 1449 | case -ENOSPC: | 1455 | case -ENOSPC: |
| 1450 | return VM_FAULT_SIGBUS; | 1456 | ret = VM_FAULT_SIGBUS; |
| 1457 | break; | ||
| 1451 | default: | 1458 | default: |
| 1452 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); | 1459 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); |
| 1453 | return VM_FAULT_SIGBUS; | 1460 | ret = VM_FAULT_SIGBUS; |
| 1461 | break; | ||
| 1454 | } | 1462 | } |
| 1463 | |||
| 1464 | intel_runtime_pm_put(dev_priv); | ||
| 1465 | return ret; | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) | ||
| 1469 | { | ||
| 1470 | struct i915_vma *vma; | ||
| 1471 | |||
| 1472 | /* | ||
| 1473 | * Only the global gtt is relevant for gtt memory mappings, so restrict | ||
| 1474 | * list traversal to objects bound into the global address space. Note | ||
| 1475 | * that the active list should be empty, but better safe than sorry. | ||
| 1476 | */ | ||
| 1477 | WARN_ON(!list_empty(&dev_priv->gtt.base.active_list)); | ||
| 1478 | list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list) | ||
| 1479 | i915_gem_release_mmap(vma->obj); | ||
| 1480 | list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list) | ||
| 1481 | i915_gem_release_mmap(vma->obj); | ||
| 1455 | } | 1482 | } |
| 1456 | 1483 | ||
| 1457 | /** | 1484 | /** |
| @@ -2303,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, | |||
| 2303 | 2330 | ||
| 2304 | if (ring->hangcheck.action != HANGCHECK_WAIT && | 2331 | if (ring->hangcheck.action != HANGCHECK_WAIT && |
| 2305 | i915_request_guilty(request, acthd, &inside)) { | 2332 | i915_request_guilty(request, acthd, &inside)) { |
| 2306 | DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", | 2333 | DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", |
| 2307 | ring->name, | 2334 | ring->name, |
| 2308 | inside ? "inside" : "flushing", | 2335 | inside ? "inside" : "flushing", |
| 2309 | offset, | 2336 | offset, |
| @@ -2361,16 +2388,6 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, | |||
| 2361 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | 2388 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
| 2362 | struct intel_ring_buffer *ring) | 2389 | struct intel_ring_buffer *ring) |
| 2363 | { | 2390 | { |
| 2364 | while (!list_empty(&ring->request_list)) { | ||
| 2365 | struct drm_i915_gem_request *request; | ||
| 2366 | |||
| 2367 | request = list_first_entry(&ring->request_list, | ||
| 2368 | struct drm_i915_gem_request, | ||
| 2369 | list); | ||
| 2370 | |||
| 2371 | i915_gem_free_request(request); | ||
| 2372 | } | ||
| 2373 | |||
| 2374 | while (!list_empty(&ring->active_list)) { | 2391 | while (!list_empty(&ring->active_list)) { |
| 2375 | struct drm_i915_gem_object *obj; | 2392 | struct drm_i915_gem_object *obj; |
| 2376 | 2393 | ||
| @@ -2380,6 +2397,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
| 2380 | 2397 | ||
| 2381 | i915_gem_object_move_to_inactive(obj); | 2398 | i915_gem_object_move_to_inactive(obj); |
| 2382 | } | 2399 | } |
| 2400 | |||
| 2401 | /* | ||
| 2402 | * We must free the requests after all the corresponding objects have | ||
| 2403 | * been moved off active lists. Which is the same order as the normal | ||
| 2404 | * retire_requests function does. This is important if object hold | ||
| 2405 | * implicit references on things like e.g. ppgtt address spaces through | ||
| 2406 | * the request. | ||
| 2407 | */ | ||
| 2408 | while (!list_empty(&ring->request_list)) { | ||
| 2409 | struct drm_i915_gem_request *request; | ||
| 2410 | |||
| 2411 | request = list_first_entry(&ring->request_list, | ||
| 2412 | struct drm_i915_gem_request, | ||
| 2413 | list); | ||
| 2414 | |||
| 2415 | i915_gem_free_request(request); | ||
| 2416 | } | ||
| 2383 | } | 2417 | } |
| 2384 | 2418 | ||
| 2385 | void i915_gem_restore_fences(struct drm_device *dev) | 2419 | void i915_gem_restore_fences(struct drm_device *dev) |
| @@ -2760,7 +2794,6 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 2760 | obj->has_aliasing_ppgtt_mapping = 0; | 2794 | obj->has_aliasing_ppgtt_mapping = 0; |
| 2761 | } | 2795 | } |
| 2762 | i915_gem_gtt_finish_object(obj); | 2796 | i915_gem_gtt_finish_object(obj); |
| 2763 | i915_gem_object_unpin_pages(obj); | ||
| 2764 | 2797 | ||
| 2765 | list_del(&vma->mm_list); | 2798 | list_del(&vma->mm_list); |
| 2766 | /* Avoid an unnecessary call to unbind on rebind. */ | 2799 | /* Avoid an unnecessary call to unbind on rebind. */ |
| @@ -2768,7 +2801,6 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 2768 | obj->map_and_fenceable = true; | 2801 | obj->map_and_fenceable = true; |
| 2769 | 2802 | ||
| 2770 | drm_mm_remove_node(&vma->node); | 2803 | drm_mm_remove_node(&vma->node); |
| 2771 | |||
| 2772 | i915_gem_vma_destroy(vma); | 2804 | i915_gem_vma_destroy(vma); |
| 2773 | 2805 | ||
| 2774 | /* Since the unbound list is global, only move to that list if | 2806 | /* Since the unbound list is global, only move to that list if |
| @@ -2776,6 +2808,12 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 2776 | if (list_empty(&obj->vma_list)) | 2808 | if (list_empty(&obj->vma_list)) |
| 2777 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); | 2809 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
| 2778 | 2810 | ||
| 2811 | /* And finally now the object is completely decoupled from this vma, | ||
| 2812 | * we can drop its hold on the backing storage and allow it to be | ||
| 2813 | * reaped by the shrinker. | ||
| 2814 | */ | ||
| 2815 | i915_gem_object_unpin_pages(obj); | ||
| 2816 | |||
| 2779 | return 0; | 2817 | return 0; |
| 2780 | } | 2818 | } |
| 2781 | 2819 | ||
| @@ -3068,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev) | |||
| 3068 | } | 3106 | } |
| 3069 | 3107 | ||
| 3070 | if (avail == NULL) | 3108 | if (avail == NULL) |
| 3071 | return NULL; | 3109 | goto deadlock; |
| 3072 | 3110 | ||
| 3073 | /* None available, try to steal one or wait for a user to finish */ | 3111 | /* None available, try to steal one or wait for a user to finish */ |
| 3074 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { | 3112 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
| @@ -3078,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev) | |||
| 3078 | return reg; | 3116 | return reg; |
| 3079 | } | 3117 | } |
| 3080 | 3118 | ||
| 3081 | return NULL; | 3119 | deadlock: |
| 3120 | /* Wait for completion of pending flips which consume fences */ | ||
| 3121 | if (intel_has_pending_fb_unpin(dev)) | ||
| 3122 | return ERR_PTR(-EAGAIN); | ||
| 3123 | |||
| 3124 | return ERR_PTR(-EDEADLK); | ||
| 3082 | } | 3125 | } |
| 3083 | 3126 | ||
| 3084 | /** | 3127 | /** |
| @@ -3123,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
| 3123 | } | 3166 | } |
| 3124 | } else if (enable) { | 3167 | } else if (enable) { |
| 3125 | reg = i915_find_fence_reg(dev); | 3168 | reg = i915_find_fence_reg(dev); |
| 3126 | if (reg == NULL) | 3169 | if (IS_ERR(reg)) |
| 3127 | return -EDEADLK; | 3170 | return PTR_ERR(reg); |
| 3128 | 3171 | ||
| 3129 | if (reg->obj) { | 3172 | if (reg->obj) { |
| 3130 | struct drm_i915_gem_object *old = reg->obj; | 3173 | struct drm_i915_gem_object *old = reg->obj; |
| @@ -4179,6 +4222,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
| 4179 | drm_i915_private_t *dev_priv = dev->dev_private; | 4222 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4180 | struct i915_vma *vma, *next; | 4223 | struct i915_vma *vma, *next; |
| 4181 | 4224 | ||
| 4225 | intel_runtime_pm_get(dev_priv); | ||
| 4226 | |||
| 4182 | trace_i915_gem_object_destroy(obj); | 4227 | trace_i915_gem_object_destroy(obj); |
| 4183 | 4228 | ||
| 4184 | if (obj->phys_obj) | 4229 | if (obj->phys_obj) |
| @@ -4223,6 +4268,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
| 4223 | 4268 | ||
| 4224 | kfree(obj->bit_17); | 4269 | kfree(obj->bit_17); |
| 4225 | i915_gem_object_free(obj); | 4270 | i915_gem_object_free(obj); |
| 4271 | |||
| 4272 | intel_runtime_pm_put(dev_priv); | ||
| 4226 | } | 4273 | } |
| 4227 | 4274 | ||
| 4228 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, | 4275 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
| @@ -4479,7 +4526,13 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4479 | * XXX: There was some w/a described somewhere suggesting loading | 4526 | * XXX: There was some w/a described somewhere suggesting loading |
| 4480 | * contexts before PPGTT. | 4527 | * contexts before PPGTT. |
| 4481 | */ | 4528 | */ |
| 4482 | i915_gem_context_init(dev); | 4529 | ret = i915_gem_context_init(dev); |
| 4530 | if (ret) { | ||
| 4531 | i915_gem_cleanup_ringbuffer(dev); | ||
| 4532 | DRM_ERROR("Context initialization failed %d\n", ret); | ||
| 4533 | return ret; | ||
| 4534 | } | ||
| 4535 | |||
| 4483 | if (dev_priv->mm.aliasing_ppgtt) { | 4536 | if (dev_priv->mm.aliasing_ppgtt) { |
| 4484 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); | 4537 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); |
| 4485 | if (ret) { | 4538 | if (ret) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b0f42b9ca037..e08acaba5402 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -247,36 +247,34 @@ err_destroy: | |||
| 247 | return ret; | 247 | return ret; |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | void i915_gem_context_init(struct drm_device *dev) | 250 | int i915_gem_context_init(struct drm_device *dev) |
| 251 | { | 251 | { |
| 252 | struct drm_i915_private *dev_priv = dev->dev_private; | 252 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 253 | int ret; | ||
| 253 | 254 | ||
| 254 | if (!HAS_HW_CONTEXTS(dev)) { | 255 | if (!HAS_HW_CONTEXTS(dev)) |
| 255 | dev_priv->hw_contexts_disabled = true; | 256 | return 0; |
| 256 | DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n"); | ||
| 257 | return; | ||
| 258 | } | ||
| 259 | 257 | ||
| 260 | /* If called from reset, or thaw... we've been here already */ | 258 | /* If called from reset, or thaw... we've been here already */ |
| 261 | if (dev_priv->hw_contexts_disabled || | 259 | if (dev_priv->ring[RCS].default_context) |
| 262 | dev_priv->ring[RCS].default_context) | 260 | return 0; |
| 263 | return; | ||
| 264 | 261 | ||
| 265 | dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); | 262 | dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); |
| 266 | 263 | ||
| 267 | if (dev_priv->hw_context_size > (1<<20)) { | 264 | if (dev_priv->hw_context_size > (1<<20)) { |
| 268 | dev_priv->hw_contexts_disabled = true; | ||
| 269 | DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); | 265 | DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); |
| 270 | return; | 266 | return -E2BIG; |
| 271 | } | 267 | } |
| 272 | 268 | ||
| 273 | if (create_default_context(dev_priv)) { | 269 | ret = create_default_context(dev_priv); |
| 274 | dev_priv->hw_contexts_disabled = true; | 270 | if (ret) { |
| 275 | DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n"); | 271 | DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n", |
| 276 | return; | 272 | ret); |
| 273 | return ret; | ||
| 277 | } | 274 | } |
| 278 | 275 | ||
| 279 | DRM_DEBUG_DRIVER("HW context support initialized\n"); | 276 | DRM_DEBUG_DRIVER("HW context support initialized\n"); |
| 277 | return 0; | ||
| 280 | } | 278 | } |
| 281 | 279 | ||
| 282 | void i915_gem_context_fini(struct drm_device *dev) | 280 | void i915_gem_context_fini(struct drm_device *dev) |
| @@ -284,7 +282,7 @@ void i915_gem_context_fini(struct drm_device *dev) | |||
| 284 | struct drm_i915_private *dev_priv = dev->dev_private; | 282 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 285 | struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; | 283 | struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; |
| 286 | 284 | ||
| 287 | if (dev_priv->hw_contexts_disabled) | 285 | if (!HAS_HW_CONTEXTS(dev)) |
| 288 | return; | 286 | return; |
| 289 | 287 | ||
| 290 | /* The only known way to stop the gpu from accessing the hw context is | 288 | /* The only known way to stop the gpu from accessing the hw context is |
| @@ -327,16 +325,16 @@ i915_gem_context_get_hang_stats(struct drm_device *dev, | |||
| 327 | struct drm_file *file, | 325 | struct drm_file *file, |
| 328 | u32 id) | 326 | u32 id) |
| 329 | { | 327 | { |
| 330 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 331 | struct drm_i915_file_private *file_priv = file->driver_priv; | 328 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 332 | struct i915_hw_context *ctx; | 329 | struct i915_hw_context *ctx; |
| 333 | 330 | ||
| 334 | if (id == DEFAULT_CONTEXT_ID) | 331 | if (id == DEFAULT_CONTEXT_ID) |
| 335 | return &file_priv->hang_stats; | 332 | return &file_priv->hang_stats; |
| 336 | 333 | ||
| 337 | ctx = NULL; | 334 | if (!HAS_HW_CONTEXTS(dev)) |
| 338 | if (!dev_priv->hw_contexts_disabled) | 335 | return ERR_PTR(-ENOENT); |
| 339 | ctx = i915_gem_context_get(file->driver_priv, id); | 336 | |
| 337 | ctx = i915_gem_context_get(file->driver_priv, id); | ||
| 340 | if (ctx == NULL) | 338 | if (ctx == NULL) |
| 341 | return ERR_PTR(-ENOENT); | 339 | return ERR_PTR(-ENOENT); |
| 342 | 340 | ||
| @@ -502,8 +500,6 @@ static int do_switch(struct i915_hw_context *to) | |||
| 502 | * @ring: ring for which we'll execute the context switch | 500 | * @ring: ring for which we'll execute the context switch |
| 503 | * @file_priv: file_priv associated with the context, may be NULL | 501 | * @file_priv: file_priv associated with the context, may be NULL |
| 504 | * @id: context id number | 502 | * @id: context id number |
| 505 | * @seqno: sequence number by which the new context will be switched to | ||
| 506 | * @flags: | ||
| 507 | * | 503 | * |
| 508 | * The context life cycle is simple. The context refcount is incremented and | 504 | * The context life cycle is simple. The context refcount is incremented and |
| 509 | * decremented by 1 and create and destroy. If the context is in use by the GPU, | 505 | * decremented by 1 and create and destroy. If the context is in use by the GPU, |
| @@ -517,7 +513,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, | |||
| 517 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 513 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 518 | struct i915_hw_context *to; | 514 | struct i915_hw_context *to; |
| 519 | 515 | ||
| 520 | if (dev_priv->hw_contexts_disabled) | 516 | if (!HAS_HW_CONTEXTS(ring->dev)) |
| 521 | return 0; | 517 | return 0; |
| 522 | 518 | ||
| 523 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | 519 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
| @@ -542,7 +538,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, | |||
| 542 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 538 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
| 543 | struct drm_file *file) | 539 | struct drm_file *file) |
| 544 | { | 540 | { |
| 545 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 546 | struct drm_i915_gem_context_create *args = data; | 541 | struct drm_i915_gem_context_create *args = data; |
| 547 | struct drm_i915_file_private *file_priv = file->driver_priv; | 542 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 548 | struct i915_hw_context *ctx; | 543 | struct i915_hw_context *ctx; |
| @@ -551,7 +546,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | |||
| 551 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 546 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
| 552 | return -ENODEV; | 547 | return -ENODEV; |
| 553 | 548 | ||
| 554 | if (dev_priv->hw_contexts_disabled) | 549 | if (!HAS_HW_CONTEXTS(dev)) |
| 555 | return -ENODEV; | 550 | return -ENODEV; |
| 556 | 551 | ||
| 557 | ret = i915_mutex_lock_interruptible(dev); | 552 | ret = i915_mutex_lock_interruptible(dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 8f3adc7d0dc8..2ca280f9ee53 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -27,8 +27,10 @@ | |||
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
| 30 | #include "i915_drv.h" | ||
| 31 | #include <drm/i915_drm.h> | 30 | #include <drm/i915_drm.h> |
| 31 | |||
| 32 | #include "i915_drv.h" | ||
| 33 | #include "intel_drv.h" | ||
| 32 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
| 33 | 35 | ||
| 34 | static bool | 36 | static bool |
| @@ -53,6 +55,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
| 53 | struct list_head eviction_list, unwind_list; | 55 | struct list_head eviction_list, unwind_list; |
| 54 | struct i915_vma *vma; | 56 | struct i915_vma *vma; |
| 55 | int ret = 0; | 57 | int ret = 0; |
| 58 | int pass = 0; | ||
| 56 | 59 | ||
| 57 | trace_i915_gem_evict(dev, min_size, alignment, mappable); | 60 | trace_i915_gem_evict(dev, min_size, alignment, mappable); |
| 58 | 61 | ||
| @@ -119,14 +122,24 @@ none: | |||
| 119 | /* Can we unpin some objects such as idle hw contents, | 122 | /* Can we unpin some objects such as idle hw contents, |
| 120 | * or pending flips? | 123 | * or pending flips? |
| 121 | */ | 124 | */ |
| 122 | ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); | 125 | if (nonblocking) |
| 123 | if (ret) | 126 | return -ENOSPC; |
| 124 | return ret; | ||
| 125 | 127 | ||
| 126 | /* Only idle the GPU and repeat the search once */ | 128 | /* Only idle the GPU and repeat the search once */ |
| 127 | i915_gem_retire_requests(dev); | 129 | if (pass++ == 0) { |
| 128 | nonblocking = true; | 130 | ret = i915_gpu_idle(dev); |
| 129 | goto search_again; | 131 | if (ret) |
| 132 | return ret; | ||
| 133 | |||
| 134 | i915_gem_retire_requests(dev); | ||
| 135 | goto search_again; | ||
| 136 | } | ||
| 137 | |||
| 138 | /* If we still have pending pageflip completions, drop | ||
| 139 | * back to userspace to give our workqueues time to | ||
| 140 | * acquire our locks and unpin the old scanouts. | ||
| 141 | */ | ||
| 142 | return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC; | ||
| 130 | 143 | ||
| 131 | found: | 144 | found: |
| 132 | /* drm_mm doesn't allow any other other operations while | 145 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3ba9a8cd687..d269ecf46e26 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -46,7 +46,7 @@ struct eb_vmas { | |||
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | static struct eb_vmas * | 48 | static struct eb_vmas * |
| 49 | eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm) | 49 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
| 50 | { | 50 | { |
| 51 | struct eb_vmas *eb = NULL; | 51 | struct eb_vmas *eb = NULL; |
| 52 | 52 | ||
| @@ -252,7 +252,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, | |||
| 252 | struct drm_device *dev = obj->base.dev; | 252 | struct drm_device *dev = obj->base.dev; |
| 253 | uint32_t page_offset = offset_in_page(reloc->offset); | 253 | uint32_t page_offset = offset_in_page(reloc->offset); |
| 254 | char *vaddr; | 254 | char *vaddr; |
| 255 | int ret = -EINVAL; | 255 | int ret; |
| 256 | 256 | ||
| 257 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 257 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
| 258 | if (ret) | 258 | if (ret) |
| @@ -287,7 +287,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, | |||
| 287 | struct drm_i915_private *dev_priv = dev->dev_private; | 287 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 288 | uint32_t __iomem *reloc_entry; | 288 | uint32_t __iomem *reloc_entry; |
| 289 | void __iomem *reloc_page; | 289 | void __iomem *reloc_page; |
| 290 | int ret = -EINVAL; | 290 | int ret; |
| 291 | 291 | ||
| 292 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 292 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| 293 | if (ret) | 293 | if (ret) |
| @@ -335,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 335 | struct drm_i915_gem_object *target_i915_obj; | 335 | struct drm_i915_gem_object *target_i915_obj; |
| 336 | struct i915_vma *target_vma; | 336 | struct i915_vma *target_vma; |
| 337 | uint32_t target_offset; | 337 | uint32_t target_offset; |
| 338 | int ret = -EINVAL; | 338 | int ret; |
| 339 | 339 | ||
| 340 | /* we've already hold a reference to all valid objects */ | 340 | /* we've already hold a reference to all valid objects */ |
| 341 | target_vma = eb_get_vma(eb, reloc->target_handle); | 341 | target_vma = eb_get_vma(eb, reloc->target_handle); |
| @@ -344,7 +344,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 344 | target_i915_obj = target_vma->obj; | 344 | target_i915_obj = target_vma->obj; |
| 345 | target_obj = &target_vma->obj->base; | 345 | target_obj = &target_vma->obj->base; |
| 346 | 346 | ||
| 347 | target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); | 347 | target_offset = target_vma->node.start; |
| 348 | 348 | ||
| 349 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and | 349 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
| 350 | * pipe_control writes because the gpu doesn't properly redirect them | 350 | * pipe_control writes because the gpu doesn't properly redirect them |
| @@ -365,7 +365,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 365 | (int) reloc->offset, | 365 | (int) reloc->offset, |
| 366 | reloc->read_domains, | 366 | reloc->read_domains, |
| 367 | reloc->write_domain); | 367 | reloc->write_domain); |
| 368 | return ret; | 368 | return -EINVAL; |
| 369 | } | 369 | } |
| 370 | if (unlikely((reloc->write_domain | reloc->read_domains) | 370 | if (unlikely((reloc->write_domain | reloc->read_domains) |
| 371 | & ~I915_GEM_GPU_DOMAINS)) { | 371 | & ~I915_GEM_GPU_DOMAINS)) { |
| @@ -376,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 376 | (int) reloc->offset, | 376 | (int) reloc->offset, |
| 377 | reloc->read_domains, | 377 | reloc->read_domains, |
| 378 | reloc->write_domain); | 378 | reloc->write_domain); |
| 379 | return ret; | 379 | return -EINVAL; |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | target_obj->pending_read_domains |= reloc->read_domains; | 382 | target_obj->pending_read_domains |= reloc->read_domains; |
| @@ -396,14 +396,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 396 | obj, reloc->target_handle, | 396 | obj, reloc->target_handle, |
| 397 | (int) reloc->offset, | 397 | (int) reloc->offset, |
| 398 | (int) obj->base.size); | 398 | (int) obj->base.size); |
| 399 | return ret; | 399 | return -EINVAL; |
| 400 | } | 400 | } |
| 401 | if (unlikely(reloc->offset & 3)) { | 401 | if (unlikely(reloc->offset & 3)) { |
| 402 | DRM_DEBUG("Relocation not 4-byte aligned: " | 402 | DRM_DEBUG("Relocation not 4-byte aligned: " |
| 403 | "obj %p target %d offset %d.\n", | 403 | "obj %p target %d offset %d.\n", |
| 404 | obj, reloc->target_handle, | 404 | obj, reloc->target_handle, |
| 405 | (int) reloc->offset); | 405 | (int) reloc->offset); |
| 406 | return ret; | 406 | return -EINVAL; |
| 407 | } | 407 | } |
| 408 | 408 | ||
| 409 | /* We can't wait for rendering with pagefaults disabled */ | 409 | /* We can't wait for rendering with pagefaults disabled */ |
| @@ -491,8 +491,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, | |||
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | static int | 493 | static int |
| 494 | i915_gem_execbuffer_relocate(struct eb_vmas *eb, | 494 | i915_gem_execbuffer_relocate(struct eb_vmas *eb) |
| 495 | struct i915_address_space *vm) | ||
| 496 | { | 495 | { |
| 497 | struct i915_vma *vma; | 496 | struct i915_vma *vma; |
| 498 | int ret = 0; | 497 | int ret = 0; |
| @@ -901,6 +900,24 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
| 901 | return 0; | 900 | return 0; |
| 902 | } | 901 | } |
| 903 | 902 | ||
| 903 | static int | ||
| 904 | i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, | ||
| 905 | const u32 ctx_id) | ||
| 906 | { | ||
| 907 | struct i915_ctx_hang_stats *hs; | ||
| 908 | |||
| 909 | hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); | ||
| 910 | if (IS_ERR(hs)) | ||
| 911 | return PTR_ERR(hs); | ||
| 912 | |||
| 913 | if (hs->banned) { | ||
| 914 | DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); | ||
| 915 | return -EIO; | ||
| 916 | } | ||
| 917 | |||
| 918 | return 0; | ||
| 919 | } | ||
| 920 | |||
| 904 | static void | 921 | static void |
| 905 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 922 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
| 906 | struct intel_ring_buffer *ring) | 923 | struct intel_ring_buffer *ring) |
| @@ -980,8 +997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 980 | struct drm_i915_gem_object *batch_obj; | 997 | struct drm_i915_gem_object *batch_obj; |
| 981 | struct drm_clip_rect *cliprects = NULL; | 998 | struct drm_clip_rect *cliprects = NULL; |
| 982 | struct intel_ring_buffer *ring; | 999 | struct intel_ring_buffer *ring; |
| 983 | struct i915_ctx_hang_stats *hs; | 1000 | const u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
| 984 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); | ||
| 985 | u32 exec_start, exec_len; | 1001 | u32 exec_start, exec_len; |
| 986 | u32 mask, flags; | 1002 | u32 mask, flags; |
| 987 | int ret, mode, i; | 1003 | int ret, mode, i; |
| @@ -1108,6 +1124,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1108 | } | 1124 | } |
| 1109 | } | 1125 | } |
| 1110 | 1126 | ||
| 1127 | intel_runtime_pm_get(dev_priv); | ||
| 1128 | |||
| 1111 | ret = i915_mutex_lock_interruptible(dev); | 1129 | ret = i915_mutex_lock_interruptible(dev); |
| 1112 | if (ret) | 1130 | if (ret) |
| 1113 | goto pre_mutex_err; | 1131 | goto pre_mutex_err; |
| @@ -1118,7 +1136,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1118 | goto pre_mutex_err; | 1136 | goto pre_mutex_err; |
| 1119 | } | 1137 | } |
| 1120 | 1138 | ||
| 1121 | eb = eb_create(args, vm); | 1139 | ret = i915_gem_validate_context(dev, file, ctx_id); |
| 1140 | if (ret) { | ||
| 1141 | mutex_unlock(&dev->struct_mutex); | ||
| 1142 | goto pre_mutex_err; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | eb = eb_create(args); | ||
| 1122 | if (eb == NULL) { | 1146 | if (eb == NULL) { |
| 1123 | mutex_unlock(&dev->struct_mutex); | 1147 | mutex_unlock(&dev->struct_mutex); |
| 1124 | ret = -ENOMEM; | 1148 | ret = -ENOMEM; |
| @@ -1141,7 +1165,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1141 | 1165 | ||
| 1142 | /* The objects are in their final locations, apply the relocations. */ | 1166 | /* The objects are in their final locations, apply the relocations. */ |
| 1143 | if (need_relocs) | 1167 | if (need_relocs) |
| 1144 | ret = i915_gem_execbuffer_relocate(eb, vm); | 1168 | ret = i915_gem_execbuffer_relocate(eb); |
| 1145 | if (ret) { | 1169 | if (ret) { |
| 1146 | if (ret == -EFAULT) { | 1170 | if (ret == -EFAULT) { |
| 1147 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, | 1171 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
| @@ -1170,17 +1194,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1170 | if (ret) | 1194 | if (ret) |
| 1171 | goto err; | 1195 | goto err; |
| 1172 | 1196 | ||
| 1173 | hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); | ||
| 1174 | if (IS_ERR(hs)) { | ||
| 1175 | ret = PTR_ERR(hs); | ||
| 1176 | goto err; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | if (hs->banned) { | ||
| 1180 | ret = -EIO; | ||
| 1181 | goto err; | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | ret = i915_switch_context(ring, file, ctx_id); | 1197 | ret = i915_switch_context(ring, file, ctx_id); |
| 1185 | if (ret) | 1198 | if (ret) |
| 1186 | goto err; | 1199 | goto err; |
| @@ -1242,6 +1255,10 @@ err: | |||
| 1242 | 1255 | ||
| 1243 | pre_mutex_err: | 1256 | pre_mutex_err: |
| 1244 | kfree(cliprects); | 1257 | kfree(cliprects); |
| 1258 | |||
| 1259 | /* intel_gpu_busy should also get a ref, so it will free when the device | ||
| 1260 | * is really idle. */ | ||
| 1261 | intel_runtime_pm_put(dev_priv); | ||
| 1245 | return ret; | 1262 | return ret; |
| 1246 | } | 1263 | } |
| 1247 | 1264 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3540569948db..40a2b36b276b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -240,10 +240,16 @@ static int gen8_ppgtt_enable(struct drm_device *dev) | |||
| 240 | for_each_ring(ring, dev_priv, j) { | 240 | for_each_ring(ring, dev_priv, j) { |
| 241 | ret = gen8_write_pdp(ring, i, addr); | 241 | ret = gen8_write_pdp(ring, i, addr); |
| 242 | if (ret) | 242 | if (ret) |
| 243 | return ret; | 243 | goto err_out; |
| 244 | } | 244 | } |
| 245 | } | 245 | } |
| 246 | return 0; | 246 | return 0; |
| 247 | |||
| 248 | err_out: | ||
| 249 | for_each_ring(ring, dev_priv, j) | ||
| 250 | I915_WRITE(RING_MODE_GEN7(ring), | ||
| 251 | _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); | ||
| 252 | return ret; | ||
| 247 | } | 253 | } |
| 248 | 254 | ||
| 249 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | 255 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
| @@ -293,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
| 293 | unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; | 299 | unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; |
| 294 | struct sg_page_iter sg_iter; | 300 | struct sg_page_iter sg_iter; |
| 295 | 301 | ||
| 296 | pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); | 302 | pt_vaddr = NULL; |
| 297 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 303 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
| 298 | dma_addr_t page_addr; | 304 | if (pt_vaddr == NULL) |
| 305 | pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); | ||
| 299 | 306 | ||
| 300 | page_addr = sg_dma_address(sg_iter.sg) + | 307 | pt_vaddr[act_pte] = |
| 301 | (sg_iter.sg_pgoffset << PAGE_SHIFT); | 308 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), |
| 302 | pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level, | 309 | cache_level, true); |
| 303 | true); | ||
| 304 | if (++act_pte == GEN8_PTES_PER_PAGE) { | 310 | if (++act_pte == GEN8_PTES_PER_PAGE) { |
| 305 | kunmap_atomic(pt_vaddr); | 311 | kunmap_atomic(pt_vaddr); |
| 312 | pt_vaddr = NULL; | ||
| 306 | act_pt++; | 313 | act_pt++; |
| 307 | pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); | ||
| 308 | act_pte = 0; | 314 | act_pte = 0; |
| 309 | |||
| 310 | } | 315 | } |
| 311 | } | 316 | } |
| 312 | kunmap_atomic(pt_vaddr); | 317 | if (pt_vaddr) |
| 318 | kunmap_atomic(pt_vaddr); | ||
| 313 | } | 319 | } |
| 314 | 320 | ||
| 315 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | 321 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
| @@ -318,6 +324,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |||
| 318 | container_of(vm, struct i915_hw_ppgtt, base); | 324 | container_of(vm, struct i915_hw_ppgtt, base); |
| 319 | int i, j; | 325 | int i, j; |
| 320 | 326 | ||
| 327 | drm_mm_takedown(&vm->mm); | ||
| 328 | |||
| 321 | for (i = 0; i < ppgtt->num_pd_pages ; i++) { | 329 | for (i = 0; i < ppgtt->num_pd_pages ; i++) { |
| 322 | if (ppgtt->pd_dma_addr[i]) { | 330 | if (ppgtt->pd_dma_addr[i]) { |
| 323 | pci_unmap_page(ppgtt->base.dev->pdev, | 331 | pci_unmap_page(ppgtt->base.dev->pdev, |
| @@ -381,6 +389,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) | |||
| 381 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; | 389 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
| 382 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; | 390 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
| 383 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; | 391 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
| 392 | ppgtt->base.start = 0; | ||
| 393 | ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE; | ||
| 384 | 394 | ||
| 385 | BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); | 395 | BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); |
| 386 | 396 | ||
| @@ -573,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | |||
| 573 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 583 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
| 574 | struct sg_page_iter sg_iter; | 584 | struct sg_page_iter sg_iter; |
| 575 | 585 | ||
| 576 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); | 586 | pt_vaddr = NULL; |
| 577 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 587 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
| 578 | dma_addr_t page_addr; | 588 | if (pt_vaddr == NULL) |
| 589 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); | ||
| 579 | 590 | ||
| 580 | page_addr = sg_page_iter_dma_address(&sg_iter); | 591 | pt_vaddr[act_pte] = |
| 581 | pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); | 592 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
| 593 | cache_level, true); | ||
| 582 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { | 594 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
| 583 | kunmap_atomic(pt_vaddr); | 595 | kunmap_atomic(pt_vaddr); |
| 596 | pt_vaddr = NULL; | ||
| 584 | act_pt++; | 597 | act_pt++; |
| 585 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); | ||
| 586 | act_pte = 0; | 598 | act_pte = 0; |
| 587 | |||
| 588 | } | 599 | } |
| 589 | } | 600 | } |
| 590 | kunmap_atomic(pt_vaddr); | 601 | if (pt_vaddr) |
| 602 | kunmap_atomic(pt_vaddr); | ||
| 591 | } | 603 | } |
| 592 | 604 | ||
| 593 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | 605 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
| @@ -632,6 +644,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
| 632 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; | 644 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
| 633 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; | 645 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
| 634 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | 646 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
| 647 | ppgtt->base.start = 0; | ||
| 648 | ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; | ||
| 635 | ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), | 649 | ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), |
| 636 | GFP_KERNEL); | 650 | GFP_KERNEL); |
| 637 | if (!ppgtt->pt_pages) | 651 | if (!ppgtt->pt_pages) |
| @@ -1124,7 +1138,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, | |||
| 1124 | if (ret) | 1138 | if (ret) |
| 1125 | DRM_DEBUG_KMS("Reservation failed\n"); | 1139 | DRM_DEBUG_KMS("Reservation failed\n"); |
| 1126 | obj->has_global_gtt_mapping = 1; | 1140 | obj->has_global_gtt_mapping = 1; |
| 1127 | list_add(&vma->vma_link, &obj->vma_list); | ||
| 1128 | } | 1141 | } |
| 1129 | 1142 | ||
| 1130 | dev_priv->gtt.base.start = start; | 1143 | dev_priv->gtt.base.start = start; |
| @@ -1400,6 +1413,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm) | |||
| 1400 | { | 1413 | { |
| 1401 | 1414 | ||
| 1402 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); | 1415 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
| 1416 | |||
| 1417 | drm_mm_takedown(&vm->mm); | ||
| 1403 | iounmap(gtt->gsm); | 1418 | iounmap(gtt->gsm); |
| 1404 | teardown_scratch_page(vm->dev); | 1419 | teardown_scratch_page(vm->dev); |
| 1405 | } | 1420 | } |
| @@ -1425,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev, | |||
| 1425 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; | 1440 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
| 1426 | dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; | 1441 | dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
| 1427 | 1442 | ||
| 1443 | if (unlikely(dev_priv->gtt.do_idle_maps)) | ||
| 1444 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); | ||
| 1445 | |||
| 1428 | return 0; | 1446 | return 0; |
| 1429 | } | 1447 | } |
| 1430 | 1448 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index d284d892ed94..1a24e84f2315 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, | |||
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | sg = st->sgl; | 252 | sg = st->sgl; |
| 253 | sg->offset = offset; | 253 | sg->offset = 0; |
| 254 | sg->length = size; | 254 | sg->length = size; |
| 255 | 255 | ||
| 256 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; | 256 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
| @@ -420,6 +420,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
| 420 | 420 | ||
| 421 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); | 421 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
| 422 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); | 422 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
| 423 | i915_gem_object_pin_pages(obj); | ||
| 423 | 424 | ||
| 424 | return obj; | 425 | return obj; |
| 425 | 426 | ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 79dcb8f896c6..d7fd2fd2f0a5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
| @@ -239,6 +239,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, | |||
| 239 | unsigned ring) | 239 | unsigned ring) |
| 240 | { | 240 | { |
| 241 | BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ | 241 | BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ |
| 242 | if (!error->ring[ring].valid) | ||
| 243 | return; | ||
| 244 | |||
| 242 | err_printf(m, "%s command stream:\n", ring_str(ring)); | 245 | err_printf(m, "%s command stream:\n", ring_str(ring)); |
| 243 | err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); | 246 | err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); |
| 244 | err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); | 247 | err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); |
| @@ -247,12 +250,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, | |||
| 247 | err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | 250 | err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); |
| 248 | err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | 251 | err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); |
| 249 | err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); | 252 | err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); |
| 250 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) | 253 | if (INTEL_INFO(dev)->gen >= 4) { |
| 251 | err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); | 254 | err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); |
| 252 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 253 | err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); | 255 | err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); |
| 254 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 255 | err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); | 256 | err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); |
| 257 | } | ||
| 256 | err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); | 258 | err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); |
| 257 | err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); | 259 | err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); |
| 258 | if (INTEL_INFO(dev)->gen >= 6) { | 260 | if (INTEL_INFO(dev)->gen >= 6) { |
| @@ -294,7 +296,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
| 294 | struct drm_device *dev = error_priv->dev; | 296 | struct drm_device *dev = error_priv->dev; |
| 295 | drm_i915_private_t *dev_priv = dev->dev_private; | 297 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 296 | struct drm_i915_error_state *error = error_priv->error; | 298 | struct drm_i915_error_state *error = error_priv->error; |
| 297 | struct intel_ring_buffer *ring; | ||
| 298 | int i, j, page, offset, elt; | 299 | int i, j, page, offset, elt; |
| 299 | 300 | ||
| 300 | if (!error) { | 301 | if (!error) { |
| @@ -329,7 +330,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
| 329 | if (INTEL_INFO(dev)->gen == 7) | 330 | if (INTEL_INFO(dev)->gen == 7) |
| 330 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); | 331 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); |
| 331 | 332 | ||
| 332 | for_each_ring(ring, dev_priv, i) | 333 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) |
| 333 | i915_ring_error_state(m, dev, error, i); | 334 | i915_ring_error_state(m, dev, error, i); |
| 334 | 335 | ||
| 335 | if (error->active_bo) | 336 | if (error->active_bo) |
| @@ -386,8 +387,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
| 386 | } | 387 | } |
| 387 | } | 388 | } |
| 388 | 389 | ||
| 389 | obj = error->ring[i].ctx; | 390 | if ((obj = error->ring[i].ctx)) { |
| 390 | if (obj) { | ||
| 391 | err_printf(m, "%s --- HW Context = 0x%08x\n", | 391 | err_printf(m, "%s --- HW Context = 0x%08x\n", |
| 392 | dev_priv->ring[i].name, | 392 | dev_priv->ring[i].name, |
| 393 | obj->gtt_offset); | 393 | obj->gtt_offset); |
| @@ -668,7 +668,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
| 668 | return NULL; | 668 | return NULL; |
| 669 | 669 | ||
| 670 | obj = ring->scratch.obj; | 670 | obj = ring->scratch.obj; |
| 671 | if (acthd >= i915_gem_obj_ggtt_offset(obj) && | 671 | if (obj != NULL && |
| 672 | acthd >= i915_gem_obj_ggtt_offset(obj) && | ||
| 672 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) | 673 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) |
| 673 | return i915_error_object_create(dev_priv, obj); | 674 | return i915_error_object_create(dev_priv, obj); |
| 674 | } | 675 | } |
| @@ -725,8 +726,9 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
| 725 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | 726 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
| 726 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | 727 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
| 727 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | 728 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
| 728 | if (ring->id == RCS) | 729 | error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); |
| 729 | error->bbaddr = I915_READ64(BB_ADDR); | 730 | if (INTEL_INFO(dev)->gen >= 8) |
| 731 | error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; | ||
| 730 | error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); | 732 | error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); |
| 731 | } else { | 733 | } else { |
| 732 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); | 734 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
| @@ -775,11 +777,17 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
| 775 | struct drm_i915_error_state *error) | 777 | struct drm_i915_error_state *error) |
| 776 | { | 778 | { |
| 777 | struct drm_i915_private *dev_priv = dev->dev_private; | 779 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 778 | struct intel_ring_buffer *ring; | ||
| 779 | struct drm_i915_gem_request *request; | 780 | struct drm_i915_gem_request *request; |
| 780 | int i, count; | 781 | int i, count; |
| 781 | 782 | ||
| 782 | for_each_ring(ring, dev_priv, i) { | 783 | for (i = 0; i < I915_NUM_RINGS; i++) { |
| 784 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; | ||
| 785 | |||
| 786 | if (ring->dev == NULL) | ||
| 787 | continue; | ||
| 788 | |||
| 789 | error->ring[i].valid = true; | ||
| 790 | |||
| 783 | i915_record_ring_state(dev, error, ring); | 791 | i915_record_ring_state(dev, error, ring); |
| 784 | 792 | ||
| 785 | error->ring[i].batchbuffer = | 793 | error->ring[i].batchbuffer = |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f13d5edc39d5..17d8fcb1b6f7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = { | |||
| 62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | 62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | static const u32 hpd_status_gen4[] = { | 65 | static const u32 hpd_status_g4x[] = { |
| 66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | 66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
| 67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | 67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, |
| 68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | 68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, |
| @@ -600,7 +600,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 600 | * Cook up a vblank counter by also checking the pixel | 600 | * Cook up a vblank counter by also checking the pixel |
| 601 | * counter against vblank start. | 601 | * counter against vblank start. |
| 602 | */ | 602 | */ |
| 603 | return ((high1 << 8) | low) + (pixel >= vbl_start); | 603 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 606 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
| @@ -621,36 +621,15 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 621 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | 621 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
| 622 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | 622 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) |
| 623 | 623 | ||
| 624 | static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | 624 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) |
| 625 | { | 625 | { |
| 626 | struct drm_i915_private *dev_priv = dev->dev_private; | 626 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 627 | uint32_t status; | 627 | uint32_t status; |
| 628 | int reg; | ||
| 629 | 628 | ||
| 630 | if (IS_VALLEYVIEW(dev)) { | 629 | if (INTEL_INFO(dev)->gen < 7) { |
| 631 | status = pipe == PIPE_A ? | ||
| 632 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
| 633 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
| 634 | |||
| 635 | reg = VLV_ISR; | ||
| 636 | } else if (IS_GEN2(dev)) { | ||
| 637 | status = pipe == PIPE_A ? | ||
| 638 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
| 639 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
| 640 | |||
| 641 | reg = ISR; | ||
| 642 | } else if (INTEL_INFO(dev)->gen < 5) { | ||
| 643 | status = pipe == PIPE_A ? | ||
| 644 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
| 645 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
| 646 | |||
| 647 | reg = ISR; | ||
| 648 | } else if (INTEL_INFO(dev)->gen < 7) { | ||
| 649 | status = pipe == PIPE_A ? | 630 | status = pipe == PIPE_A ? |
| 650 | DE_PIPEA_VBLANK : | 631 | DE_PIPEA_VBLANK : |
| 651 | DE_PIPEB_VBLANK; | 632 | DE_PIPEB_VBLANK; |
| 652 | |||
| 653 | reg = DEISR; | ||
| 654 | } else { | 633 | } else { |
| 655 | switch (pipe) { | 634 | switch (pipe) { |
| 656 | default: | 635 | default: |
| @@ -664,18 +643,14 @@ static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | |||
| 664 | status = DE_PIPEC_VBLANK_IVB; | 643 | status = DE_PIPEC_VBLANK_IVB; |
| 665 | break; | 644 | break; |
| 666 | } | 645 | } |
| 667 | |||
| 668 | reg = DEISR; | ||
| 669 | } | 646 | } |
| 670 | 647 | ||
| 671 | if (IS_GEN2(dev)) | 648 | return __raw_i915_read32(dev_priv, DEISR) & status; |
| 672 | return __raw_i915_read16(dev_priv, reg) & status; | ||
| 673 | else | ||
| 674 | return __raw_i915_read32(dev_priv, reg) & status; | ||
| 675 | } | 649 | } |
| 676 | 650 | ||
| 677 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 651 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
| 678 | int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) | 652 | unsigned int flags, int *vpos, int *hpos, |
| 653 | ktime_t *stime, ktime_t *etime) | ||
| 679 | { | 654 | { |
| 680 | struct drm_i915_private *dev_priv = dev->dev_private; | 655 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 681 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 656 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| @@ -698,6 +673,12 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
| 698 | vbl_start = mode->crtc_vblank_start; | 673 | vbl_start = mode->crtc_vblank_start; |
| 699 | vbl_end = mode->crtc_vblank_end; | 674 | vbl_end = mode->crtc_vblank_end; |
| 700 | 675 | ||
| 676 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
| 677 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | ||
| 678 | vbl_end /= 2; | ||
| 679 | vtotal /= 2; | ||
| 680 | } | ||
| 681 | |||
| 701 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | 682 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
| 702 | 683 | ||
| 703 | /* | 684 | /* |
| @@ -722,17 +703,42 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
| 722 | else | 703 | else |
| 723 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 704 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
| 724 | 705 | ||
| 725 | /* | 706 | if (HAS_PCH_SPLIT(dev)) { |
| 726 | * The scanline counter increments at the leading edge | 707 | /* |
| 727 | * of hsync, ie. it completely misses the active portion | 708 | * The scanline counter increments at the leading edge |
| 728 | * of the line. Fix up the counter at both edges of vblank | 709 | * of hsync, ie. it completely misses the active portion |
| 729 | * to get a more accurate picture whether we're in vblank | 710 | * of the line. Fix up the counter at both edges of vblank |
| 730 | * or not. | 711 | * to get a more accurate picture whether we're in vblank |
| 731 | */ | 712 | * or not. |
| 732 | in_vbl = intel_pipe_in_vblank_locked(dev, pipe); | 713 | */ |
| 733 | if ((in_vbl && position == vbl_start - 1) || | 714 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); |
| 734 | (!in_vbl && position == vbl_end - 1)) | 715 | if ((in_vbl && position == vbl_start - 1) || |
| 735 | position = (position + 1) % vtotal; | 716 | (!in_vbl && position == vbl_end - 1)) |
| 717 | position = (position + 1) % vtotal; | ||
| 718 | } else { | ||
| 719 | /* | ||
| 720 | * ISR vblank status bits don't work the way we'd want | ||
| 721 | * them to work on non-PCH platforms (for | ||
| 722 | * ilk_pipe_in_vblank_locked()), and there doesn't | ||
| 723 | * appear any other way to determine if we're currently | ||
| 724 | * in vblank. | ||
| 725 | * | ||
| 726 | * Instead let's assume that we're already in vblank if | ||
| 727 | * we got called from the vblank interrupt and the | ||
| 728 | * scanline counter value indicates that we're on the | ||
| 729 | * line just prior to vblank start. This should result | ||
| 730 | * in the correct answer, unless the vblank interrupt | ||
| 731 | * delivery really got delayed for almost exactly one | ||
| 732 | * full frame/field. | ||
| 733 | */ | ||
| 734 | if (flags & DRM_CALLED_FROM_VBLIRQ && | ||
| 735 | position == vbl_start - 1) { | ||
| 736 | position = (position + 1) % vtotal; | ||
| 737 | |||
| 738 | /* Signal this correction as "applied". */ | ||
| 739 | ret |= 0x8; | ||
| 740 | } | ||
| 741 | } | ||
| 736 | } else { | 742 | } else { |
| 737 | /* Have access to pixelcount since start of frame. | 743 | /* Have access to pixelcount since start of frame. |
| 738 | * We can split this into vertical and horizontal | 744 | * We can split this into vertical and horizontal |
| @@ -809,7 +815,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |||
| 809 | /* Helper routine in DRM core does all the work: */ | 815 | /* Helper routine in DRM core does all the work: */ |
| 810 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | 816 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
| 811 | vblank_time, flags, | 817 | vblank_time, flags, |
| 812 | crtc); | 818 | crtc, |
| 819 | &to_intel_crtc(crtc)->config.adjusted_mode); | ||
| 813 | } | 820 | } |
| 814 | 821 | ||
| 815 | static bool intel_hpd_irq_event(struct drm_device *dev, | 822 | static bool intel_hpd_irq_event(struct drm_device *dev, |
| @@ -1015,10 +1022,8 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
| 1015 | /* sysfs frequency interfaces may have snuck in while servicing the | 1022 | /* sysfs frequency interfaces may have snuck in while servicing the |
| 1016 | * interrupt | 1023 | * interrupt |
| 1017 | */ | 1024 | */ |
| 1018 | if (new_delay < (int)dev_priv->rps.min_delay) | 1025 | new_delay = clamp_t(int, new_delay, |
| 1019 | new_delay = dev_priv->rps.min_delay; | 1026 | dev_priv->rps.min_delay, dev_priv->rps.max_delay); |
| 1020 | if (new_delay > (int)dev_priv->rps.max_delay) | ||
| 1021 | new_delay = dev_priv->rps.max_delay; | ||
| 1022 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; | 1027 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; |
| 1023 | 1028 | ||
| 1024 | if (IS_VALLEYVIEW(dev_priv->dev)) | 1029 | if (IS_VALLEYVIEW(dev_priv->dev)) |
| @@ -1235,9 +1240,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
| 1235 | spin_lock(&dev_priv->irq_lock); | 1240 | spin_lock(&dev_priv->irq_lock); |
| 1236 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1241 | for (i = 1; i < HPD_NUM_PINS; i++) { |
| 1237 | 1242 | ||
| 1238 | WARN(((hpd[i] & hotplug_trigger) && | 1243 | WARN_ONCE(hpd[i] & hotplug_trigger && |
| 1239 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), | 1244 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, |
| 1240 | "Received HPD interrupt although disabled\n"); | 1245 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", |
| 1246 | hotplug_trigger, i, hpd[i]); | ||
| 1241 | 1247 | ||
| 1242 | if (!(hpd[i] & hotplug_trigger) || | 1248 | if (!(hpd[i] & hotplug_trigger) || |
| 1243 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 1249 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
| @@ -1474,6 +1480,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
| 1474 | 1480 | ||
| 1475 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | 1481 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
| 1476 | 1482 | ||
| 1483 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | ||
| 1484 | dp_aux_irq_handler(dev); | ||
| 1485 | |||
| 1477 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 1486 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 1478 | I915_READ(PORT_HOTPLUG_STAT); | 1487 | I915_READ(PORT_HOTPLUG_STAT); |
| 1479 | } | 1488 | } |
| @@ -1993,7 +2002,7 @@ static void i915_error_work_func(struct work_struct *work) | |||
| 1993 | kobject_uevent_env(&dev->primary->kdev->kobj, | 2002 | kobject_uevent_env(&dev->primary->kdev->kobj, |
| 1994 | KOBJ_CHANGE, reset_done_event); | 2003 | KOBJ_CHANGE, reset_done_event); |
| 1995 | } else { | 2004 | } else { |
| 1996 | atomic_set(&error->reset_counter, I915_WEDGED); | 2005 | atomic_set_mask(I915_WEDGED, &error->reset_counter); |
| 1997 | } | 2006 | } |
| 1998 | 2007 | ||
| 1999 | /* | 2008 | /* |
| @@ -3140,10 +3149,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev) | |||
| 3140 | * Returns true when a page flip has completed. | 3149 | * Returns true when a page flip has completed. |
| 3141 | */ | 3150 | */ |
| 3142 | static bool i8xx_handle_vblank(struct drm_device *dev, | 3151 | static bool i8xx_handle_vblank(struct drm_device *dev, |
| 3143 | int pipe, u16 iir) | 3152 | int plane, int pipe, u32 iir) |
| 3144 | { | 3153 | { |
| 3145 | drm_i915_private_t *dev_priv = dev->dev_private; | 3154 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 3146 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); | 3155 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
| 3147 | 3156 | ||
| 3148 | if (!drm_handle_vblank(dev, pipe)) | 3157 | if (!drm_handle_vblank(dev, pipe)) |
| 3149 | return false; | 3158 | return false; |
| @@ -3151,7 +3160,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, | |||
| 3151 | if ((iir & flip_pending) == 0) | 3160 | if ((iir & flip_pending) == 0) |
| 3152 | return false; | 3161 | return false; |
| 3153 | 3162 | ||
| 3154 | intel_prepare_page_flip(dev, pipe); | 3163 | intel_prepare_page_flip(dev, plane); |
| 3155 | 3164 | ||
| 3156 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | 3165 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
| 3157 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | 3166 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
| @@ -3220,9 +3229,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
| 3220 | notify_ring(dev, &dev_priv->ring[RCS]); | 3229 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 3221 | 3230 | ||
| 3222 | for_each_pipe(pipe) { | 3231 | for_each_pipe(pipe) { |
| 3232 | int plane = pipe; | ||
| 3233 | if (HAS_FBC(dev)) | ||
| 3234 | plane = !plane; | ||
| 3235 | |||
| 3223 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | 3236 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
| 3224 | i8xx_handle_vblank(dev, pipe, iir)) | 3237 | i8xx_handle_vblank(dev, plane, pipe, iir)) |
| 3225 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | 3238 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); |
| 3226 | 3239 | ||
| 3227 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | 3240 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
| 3228 | i9xx_pipe_crc_irq_handler(dev, pipe); | 3241 | i9xx_pipe_crc_irq_handler(dev, pipe); |
| @@ -3418,7 +3431,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
| 3418 | 3431 | ||
| 3419 | for_each_pipe(pipe) { | 3432 | for_each_pipe(pipe) { |
| 3420 | int plane = pipe; | 3433 | int plane = pipe; |
| 3421 | if (IS_MOBILE(dev)) | 3434 | if (HAS_FBC(dev)) |
| 3422 | plane = !plane; | 3435 | plane = !plane; |
| 3423 | 3436 | ||
| 3424 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | 3437 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
| @@ -3655,7 +3668,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
| 3655 | hotplug_status); | 3668 | hotplug_status); |
| 3656 | 3669 | ||
| 3657 | intel_hpd_irq_handler(dev, hotplug_trigger, | 3670 | intel_hpd_irq_handler(dev, hotplug_trigger, |
| 3658 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); | 3671 | IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); |
| 3672 | |||
| 3673 | if (IS_G4X(dev) && | ||
| 3674 | (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) | ||
| 3675 | dp_aux_irq_handler(dev); | ||
| 3659 | 3676 | ||
| 3660 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 3677 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 3661 | I915_READ(PORT_HOTPLUG_STAT); | 3678 | I915_READ(PORT_HOTPLUG_STAT); |
| @@ -3893,8 +3910,8 @@ void hsw_pc8_disable_interrupts(struct drm_device *dev) | |||
| 3893 | dev_priv->pc8.regsave.gtier = I915_READ(GTIER); | 3910 | dev_priv->pc8.regsave.gtier = I915_READ(GTIER); |
| 3894 | dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); | 3911 | dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); |
| 3895 | 3912 | ||
| 3896 | ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); | 3913 | ironlake_disable_display_irq(dev_priv, 0xffffffff); |
| 3897 | ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); | 3914 | ibx_disable_display_interrupt(dev_priv, 0xffffffff); |
| 3898 | ilk_disable_gt_irq(dev_priv, 0xffffffff); | 3915 | ilk_disable_gt_irq(dev_priv, 0xffffffff); |
| 3899 | snb_disable_pm_irq(dev_priv, 0xffffffff); | 3916 | snb_disable_pm_irq(dev_priv, 0xffffffff); |
| 3900 | 3917 | ||
| @@ -3908,34 +3925,26 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev) | |||
| 3908 | { | 3925 | { |
| 3909 | struct drm_i915_private *dev_priv = dev->dev_private; | 3926 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3910 | unsigned long irqflags; | 3927 | unsigned long irqflags; |
| 3911 | uint32_t val, expected; | 3928 | uint32_t val; |
| 3912 | 3929 | ||
| 3913 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3930 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 3914 | 3931 | ||
| 3915 | val = I915_READ(DEIMR); | 3932 | val = I915_READ(DEIMR); |
| 3916 | expected = ~DE_PCH_EVENT_IVB; | 3933 | WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); |
| 3917 | WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); | ||
| 3918 | 3934 | ||
| 3919 | val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; | 3935 | val = I915_READ(SDEIMR); |
| 3920 | expected = ~SDE_HOTPLUG_MASK_CPT; | 3936 | WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); |
| 3921 | WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", | ||
| 3922 | val, expected); | ||
| 3923 | 3937 | ||
| 3924 | val = I915_READ(GTIMR); | 3938 | val = I915_READ(GTIMR); |
| 3925 | expected = 0xffffffff; | 3939 | WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); |
| 3926 | WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); | ||
| 3927 | 3940 | ||
| 3928 | val = I915_READ(GEN6_PMIMR); | 3941 | val = I915_READ(GEN6_PMIMR); |
| 3929 | expected = 0xffffffff; | 3942 | WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); |
| 3930 | WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, | ||
| 3931 | expected); | ||
| 3932 | 3943 | ||
| 3933 | dev_priv->pc8.irqs_disabled = false; | 3944 | dev_priv->pc8.irqs_disabled = false; |
| 3934 | 3945 | ||
| 3935 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); | 3946 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); |
| 3936 | ibx_enable_display_interrupt(dev_priv, | 3947 | ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); |
| 3937 | ~dev_priv->pc8.regsave.sdeimr & | ||
| 3938 | ~SDE_HOTPLUG_MASK_CPT); | ||
| 3939 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); | 3948 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); |
| 3940 | snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); | 3949 | snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); |
| 3941 | I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); | 3950 | I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ee2742122a02..a48b7cad6f11 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -193,10 +193,13 @@ | |||
| 193 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ | 193 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ |
| 194 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 194 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
| 195 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | 195 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ |
| 196 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | ||
| 197 | #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) | ||
| 198 | #define MI_ARB_ENABLE (1<<0) | ||
| 199 | #define MI_ARB_DISABLE (0<<0) | ||
| 196 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 200 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
| 197 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | 201 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) |
| 198 | #define MI_SUSPEND_FLUSH_EN (1<<0) | 202 | #define MI_SUSPEND_FLUSH_EN (1<<0) |
| 199 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | ||
| 200 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) | 203 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) |
| 201 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 204 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
| 202 | #define MI_OVERLAY_ON (0x1<<21) | 205 | #define MI_OVERLAY_ON (0x1<<21) |
| @@ -212,10 +215,24 @@ | |||
| 212 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) | 215 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) |
| 213 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) | 216 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) |
| 214 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) | 217 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) |
| 215 | #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) | 218 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ |
| 216 | #define MI_ARB_ENABLE (1<<0) | 219 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) |
| 217 | #define MI_ARB_DISABLE (0<<0) | 220 | #define MI_SEMAPHORE_UPDATE (1<<21) |
| 218 | 221 | #define MI_SEMAPHORE_COMPARE (1<<20) | |
| 222 | #define MI_SEMAPHORE_REGISTER (1<<18) | ||
| 223 | #define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ | ||
| 224 | #define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ | ||
| 225 | #define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ | ||
| 226 | #define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ | ||
| 227 | #define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ | ||
| 228 | #define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ | ||
| 229 | #define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ | ||
| 230 | #define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ | ||
| 231 | #define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ | ||
| 232 | #define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ | ||
| 233 | #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ | ||
| 234 | #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ | ||
| 235 | #define MI_SEMAPHORE_SYNC_INVALID (3<<16) | ||
| 219 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) | 236 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) |
| 220 | #define MI_MM_SPACE_GTT (1<<8) | 237 | #define MI_MM_SPACE_GTT (1<<8) |
| 221 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 238 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
| @@ -235,7 +252,7 @@ | |||
| 235 | */ | 252 | */ |
| 236 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | 253 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
| 237 | #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) | 254 | #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) |
| 238 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) | 255 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) |
| 239 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ | 256 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
| 240 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) | 257 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) |
| 241 | #define MI_INVALIDATE_TLB (1<<18) | 258 | #define MI_INVALIDATE_TLB (1<<18) |
| @@ -246,30 +263,13 @@ | |||
| 246 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 263 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
| 247 | #define MI_BATCH_NON_SECURE (1) | 264 | #define MI_BATCH_NON_SECURE (1) |
| 248 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ | 265 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
| 249 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 266 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
| 250 | #define MI_BATCH_PPGTT_HSW (1<<8) | 267 | #define MI_BATCH_PPGTT_HSW (1<<8) |
| 251 | #define MI_BATCH_NON_SECURE_HSW (1<<13) | 268 | #define MI_BATCH_NON_SECURE_HSW (1<<13) |
| 252 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 269 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
| 253 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ | 270 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ |
| 254 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) | 271 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) |
| 255 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ | 272 | |
| 256 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) | ||
| 257 | #define MI_SEMAPHORE_UPDATE (1<<21) | ||
| 258 | #define MI_SEMAPHORE_COMPARE (1<<20) | ||
| 259 | #define MI_SEMAPHORE_REGISTER (1<<18) | ||
| 260 | #define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ | ||
| 261 | #define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ | ||
| 262 | #define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ | ||
| 263 | #define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ | ||
| 264 | #define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ | ||
| 265 | #define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ | ||
| 266 | #define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ | ||
| 267 | #define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ | ||
| 268 | #define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ | ||
| 269 | #define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ | ||
| 270 | #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ | ||
| 271 | #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ | ||
| 272 | #define MI_SEMAPHORE_SYNC_INVALID (3<<16) | ||
| 273 | 273 | ||
| 274 | #define MI_PREDICATE_RESULT_2 (0x2214) | 274 | #define MI_PREDICATE_RESULT_2 (0x2214) |
| 275 | #define LOWER_SLICE_ENABLED (1<<0) | 275 | #define LOWER_SLICE_ENABLED (1<<0) |
| @@ -354,6 +354,7 @@ | |||
| 354 | #define IOSF_BYTE_ENABLES_SHIFT 4 | 354 | #define IOSF_BYTE_ENABLES_SHIFT 4 |
| 355 | #define IOSF_BAR_SHIFT 1 | 355 | #define IOSF_BAR_SHIFT 1 |
| 356 | #define IOSF_SB_BUSY (1<<0) | 356 | #define IOSF_SB_BUSY (1<<0) |
| 357 | #define IOSF_PORT_BUNIT 0x3 | ||
| 357 | #define IOSF_PORT_PUNIT 0x4 | 358 | #define IOSF_PORT_PUNIT 0x4 |
| 358 | #define IOSF_PORT_NC 0x11 | 359 | #define IOSF_PORT_NC 0x11 |
| 359 | #define IOSF_PORT_DPIO 0x12 | 360 | #define IOSF_PORT_DPIO 0x12 |
| @@ -361,12 +362,21 @@ | |||
| 361 | #define IOSF_PORT_CCK 0x14 | 362 | #define IOSF_PORT_CCK 0x14 |
| 362 | #define IOSF_PORT_CCU 0xA9 | 363 | #define IOSF_PORT_CCU 0xA9 |
| 363 | #define IOSF_PORT_GPS_CORE 0x48 | 364 | #define IOSF_PORT_GPS_CORE 0x48 |
| 365 | #define IOSF_PORT_FLISDSI 0x1B | ||
| 364 | #define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) | 366 | #define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) |
| 365 | #define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) | 367 | #define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) |
| 366 | 368 | ||
| 369 | /* See configdb bunit SB addr map */ | ||
| 370 | #define BUNIT_REG_BISOC 0x11 | ||
| 371 | |||
| 367 | #define PUNIT_OPCODE_REG_READ 6 | 372 | #define PUNIT_OPCODE_REG_READ 6 |
| 368 | #define PUNIT_OPCODE_REG_WRITE 7 | 373 | #define PUNIT_OPCODE_REG_WRITE 7 |
| 369 | 374 | ||
| 375 | #define PUNIT_REG_DSPFREQ 0x36 | ||
| 376 | #define DSPFREQSTAT_SHIFT 30 | ||
| 377 | #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) | ||
| 378 | #define DSPFREQGUAR_SHIFT 14 | ||
| 379 | #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) | ||
| 370 | #define PUNIT_REG_PWRGT_CTRL 0x60 | 380 | #define PUNIT_REG_PWRGT_CTRL 0x60 |
| 371 | #define PUNIT_REG_PWRGT_STATUS 0x61 | 381 | #define PUNIT_REG_PWRGT_STATUS 0x61 |
| 372 | #define PUNIT_CLK_GATE 1 | 382 | #define PUNIT_CLK_GATE 1 |
| @@ -429,6 +439,7 @@ | |||
| 429 | #define DSI_PLL_N1_DIV_MASK (3 << 16) | 439 | #define DSI_PLL_N1_DIV_MASK (3 << 16) |
| 430 | #define DSI_PLL_M1_DIV_SHIFT 0 | 440 | #define DSI_PLL_M1_DIV_SHIFT 0 |
| 431 | #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) | 441 | #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) |
| 442 | #define CCK_DISPLAY_CLOCK_CONTROL 0x6b | ||
| 432 | 443 | ||
| 433 | /* | 444 | /* |
| 434 | * DPIO - a special bus for various display related registers to hide behind | 445 | * DPIO - a special bus for various display related registers to hide behind |
| @@ -447,15 +458,13 @@ | |||
| 447 | #define DPIO_SFR_BYPASS (1<<1) | 458 | #define DPIO_SFR_BYPASS (1<<1) |
| 448 | #define DPIO_CMNRST (1<<0) | 459 | #define DPIO_CMNRST (1<<0) |
| 449 | 460 | ||
| 450 | #define _DPIO_TX3_SWING_CTL4_A 0x690 | 461 | #define DPIO_PHY(pipe) ((pipe) >> 1) |
| 451 | #define _DPIO_TX3_SWING_CTL4_B 0x2a90 | 462 | #define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) |
| 452 | #define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \ | ||
| 453 | _DPIO_TX3_SWING_CTL4_B) | ||
| 454 | 463 | ||
| 455 | /* | 464 | /* |
| 456 | * Per pipe/PLL DPIO regs | 465 | * Per pipe/PLL DPIO regs |
| 457 | */ | 466 | */ |
| 458 | #define _DPIO_DIV_A 0x800c | 467 | #define _VLV_PLL_DW3_CH0 0x800c |
| 459 | #define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ | 468 | #define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ |
| 460 | #define DPIO_POST_DIV_DAC 0 | 469 | #define DPIO_POST_DIV_DAC 0 |
| 461 | #define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */ | 470 | #define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */ |
| @@ -468,10 +477,10 @@ | |||
| 468 | #define DPIO_ENABLE_CALIBRATION (1<<11) | 477 | #define DPIO_ENABLE_CALIBRATION (1<<11) |
| 469 | #define DPIO_M1DIV_SHIFT (8) /* 3 bits */ | 478 | #define DPIO_M1DIV_SHIFT (8) /* 3 bits */ |
| 470 | #define DPIO_M2DIV_MASK 0xff | 479 | #define DPIO_M2DIV_MASK 0xff |
| 471 | #define _DPIO_DIV_B 0x802c | 480 | #define _VLV_PLL_DW3_CH1 0x802c |
| 472 | #define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B) | 481 | #define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1) |
| 473 | 482 | ||
| 474 | #define _DPIO_REFSFR_A 0x8014 | 483 | #define _VLV_PLL_DW5_CH0 0x8014 |
| 475 | #define DPIO_REFSEL_OVERRIDE 27 | 484 | #define DPIO_REFSEL_OVERRIDE 27 |
| 476 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ | 485 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ |
| 477 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ | 486 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ |
| @@ -479,118 +488,112 @@ | |||
| 479 | #define DPIO_PLL_REFCLK_SEL_MASK 3 | 488 | #define DPIO_PLL_REFCLK_SEL_MASK 3 |
| 480 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ | 489 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ |
| 481 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ | 490 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ |
| 482 | #define _DPIO_REFSFR_B 0x8034 | 491 | #define _VLV_PLL_DW5_CH1 0x8034 |
| 483 | #define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B) | 492 | #define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1) |
| 484 | 493 | ||
| 485 | #define _DPIO_CORE_CLK_A 0x801c | 494 | #define _VLV_PLL_DW7_CH0 0x801c |
| 486 | #define _DPIO_CORE_CLK_B 0x803c | 495 | #define _VLV_PLL_DW7_CH1 0x803c |
| 487 | #define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B) | 496 | #define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1) |
| 488 | 497 | ||
| 489 | #define _DPIO_IREF_CTL_A 0x8040 | 498 | #define _VLV_PLL_DW8_CH0 0x8040 |
| 490 | #define _DPIO_IREF_CTL_B 0x8060 | 499 | #define _VLV_PLL_DW8_CH1 0x8060 |
| 491 | #define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B) | 500 | #define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1) |
| 492 | 501 | ||
| 493 | #define DPIO_IREF_BCAST 0xc044 | 502 | #define VLV_PLL_DW9_BCAST 0xc044 |
| 494 | #define _DPIO_IREF_A 0x8044 | 503 | #define _VLV_PLL_DW9_CH0 0x8044 |
| 495 | #define _DPIO_IREF_B 0x8064 | 504 | #define _VLV_PLL_DW9_CH1 0x8064 |
| 496 | #define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B) | 505 | #define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1) |
| 497 | 506 | ||
| 498 | #define _DPIO_PLL_CML_A 0x804c | 507 | #define _VLV_PLL_DW10_CH0 0x8048 |
| 499 | #define _DPIO_PLL_CML_B 0x806c | 508 | #define _VLV_PLL_DW10_CH1 0x8068 |
| 500 | #define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B) | 509 | #define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1) |
| 501 | 510 | ||
| 502 | #define _DPIO_LPF_COEFF_A 0x8048 | 511 | #define _VLV_PLL_DW11_CH0 0x804c |
| 503 | #define _DPIO_LPF_COEFF_B 0x8068 | 512 | #define _VLV_PLL_DW11_CH1 0x806c |
| 504 | #define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B) | 513 | #define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1) |
| 505 | 514 | ||
| 506 | #define DPIO_CALIBRATION 0x80ac | 515 | /* Spec for ref block start counts at DW10 */ |
| 516 | #define VLV_REF_DW13 0x80ac | ||
| 507 | 517 | ||
| 508 | #define DPIO_FASTCLK_DISABLE 0x8100 | 518 | #define VLV_CMN_DW0 0x8100 |
| 509 | 519 | ||
| 510 | /* | 520 | /* |
| 511 | * Per DDI channel DPIO regs | 521 | * Per DDI channel DPIO regs |
| 512 | */ | 522 | */ |
| 513 | 523 | ||
| 514 | #define _DPIO_PCS_TX_0 0x8200 | 524 | #define _VLV_PCS_DW0_CH0 0x8200 |
| 515 | #define _DPIO_PCS_TX_1 0x8400 | 525 | #define _VLV_PCS_DW0_CH1 0x8400 |
| 516 | #define DPIO_PCS_TX_LANE2_RESET (1<<16) | 526 | #define DPIO_PCS_TX_LANE2_RESET (1<<16) |
| 517 | #define DPIO_PCS_TX_LANE1_RESET (1<<7) | 527 | #define DPIO_PCS_TX_LANE1_RESET (1<<7) |
| 518 | #define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1) | 528 | #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) |
| 519 | 529 | ||
| 520 | #define _DPIO_PCS_CLK_0 0x8204 | 530 | #define _VLV_PCS_DW1_CH0 0x8204 |
| 521 | #define _DPIO_PCS_CLK_1 0x8404 | 531 | #define _VLV_PCS_DW1_CH1 0x8404 |
| 522 | #define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) | 532 | #define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) |
| 523 | #define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) | 533 | #define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) |
| 524 | #define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) | 534 | #define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) |
| 525 | #define DPIO_PCS_CLK_SOFT_RESET (1<<5) | 535 | #define DPIO_PCS_CLK_SOFT_RESET (1<<5) |
| 526 | #define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1) | 536 | #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) |
| 527 | 537 | ||
| 528 | #define _DPIO_PCS_CTL_OVR1_A 0x8224 | 538 | #define _VLV_PCS_DW8_CH0 0x8220 |
| 529 | #define _DPIO_PCS_CTL_OVR1_B 0x8424 | 539 | #define _VLV_PCS_DW8_CH1 0x8420 |
| 530 | #define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \ | 540 | #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) |
| 531 | _DPIO_PCS_CTL_OVR1_B) | 541 | |
| 532 | 542 | #define _VLV_PCS01_DW8_CH0 0x0220 | |
| 533 | #define _DPIO_PCS_STAGGER0_A 0x822c | 543 | #define _VLV_PCS23_DW8_CH0 0x0420 |
| 534 | #define _DPIO_PCS_STAGGER0_B 0x842c | 544 | #define _VLV_PCS01_DW8_CH1 0x2620 |
| 535 | #define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \ | 545 | #define _VLV_PCS23_DW8_CH1 0x2820 |
| 536 | _DPIO_PCS_STAGGER0_B) | 546 | #define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1) |
| 537 | 547 | #define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1) | |
| 538 | #define _DPIO_PCS_STAGGER1_A 0x8230 | 548 | |
| 539 | #define _DPIO_PCS_STAGGER1_B 0x8430 | 549 | #define _VLV_PCS_DW9_CH0 0x8224 |
| 540 | #define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \ | 550 | #define _VLV_PCS_DW9_CH1 0x8424 |
| 541 | _DPIO_PCS_STAGGER1_B) | 551 | #define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) |
| 542 | 552 | ||
| 543 | #define _DPIO_PCS_CLOCKBUF0_A 0x8238 | 553 | #define _VLV_PCS_DW11_CH0 0x822c |
| 544 | #define _DPIO_PCS_CLOCKBUF0_B 0x8438 | 554 | #define _VLV_PCS_DW11_CH1 0x842c |
| 545 | #define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \ | 555 | #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) |
| 546 | _DPIO_PCS_CLOCKBUF0_B) | 556 | |
| 547 | 557 | #define _VLV_PCS_DW12_CH0 0x8230 | |
| 548 | #define _DPIO_PCS_CLOCKBUF8_A 0x825c | 558 | #define _VLV_PCS_DW12_CH1 0x8430 |
| 549 | #define _DPIO_PCS_CLOCKBUF8_B 0x845c | 559 | #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) |
| 550 | #define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \ | 560 | |
| 551 | _DPIO_PCS_CLOCKBUF8_B) | 561 | #define _VLV_PCS_DW14_CH0 0x8238 |
| 552 | 562 | #define _VLV_PCS_DW14_CH1 0x8438 | |
| 553 | #define _DPIO_TX_SWING_CTL2_A 0x8288 | 563 | #define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1) |
| 554 | #define _DPIO_TX_SWING_CTL2_B 0x8488 | 564 | |
| 555 | #define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \ | 565 | #define _VLV_PCS_DW23_CH0 0x825c |
| 556 | _DPIO_TX_SWING_CTL2_B) | 566 | #define _VLV_PCS_DW23_CH1 0x845c |
| 557 | 567 | #define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1) | |
| 558 | #define _DPIO_TX_SWING_CTL3_A 0x828c | 568 | |
| 559 | #define _DPIO_TX_SWING_CTL3_B 0x848c | 569 | #define _VLV_TX_DW2_CH0 0x8288 |
| 560 | #define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \ | 570 | #define _VLV_TX_DW2_CH1 0x8488 |
| 561 | _DPIO_TX_SWING_CTL3_B) | 571 | #define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) |
| 562 | 572 | ||
| 563 | #define _DPIO_TX_SWING_CTL4_A 0x8290 | 573 | #define _VLV_TX_DW3_CH0 0x828c |
| 564 | #define _DPIO_TX_SWING_CTL4_B 0x8490 | 574 | #define _VLV_TX_DW3_CH1 0x848c |
| 565 | #define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \ | 575 | #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) |
| 566 | _DPIO_TX_SWING_CTL4_B) | 576 | |
| 567 | 577 | #define _VLV_TX_DW4_CH0 0x8290 | |
| 568 | #define _DPIO_TX_OCALINIT_0 0x8294 | 578 | #define _VLV_TX_DW4_CH1 0x8490 |
| 569 | #define _DPIO_TX_OCALINIT_1 0x8494 | 579 | #define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) |
| 580 | |||
| 581 | #define _VLV_TX3_DW4_CH0 0x690 | ||
| 582 | #define _VLV_TX3_DW4_CH1 0x2a90 | ||
| 583 | #define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1) | ||
| 584 | |||
| 585 | #define _VLV_TX_DW5_CH0 0x8294 | ||
| 586 | #define _VLV_TX_DW5_CH1 0x8494 | ||
| 570 | #define DPIO_TX_OCALINIT_EN (1<<31) | 587 | #define DPIO_TX_OCALINIT_EN (1<<31) |
| 571 | #define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \ | 588 | #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1) |
| 572 | _DPIO_TX_OCALINIT_1) | 589 | |
| 573 | 590 | #define _VLV_TX_DW11_CH0 0x82ac | |
| 574 | #define _DPIO_TX_CTL_0 0x82ac | 591 | #define _VLV_TX_DW11_CH1 0x84ac |
| 575 | #define _DPIO_TX_CTL_1 0x84ac | 592 | #define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1) |
| 576 | #define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1) | 593 | |
| 577 | 594 | #define _VLV_TX_DW14_CH0 0x82b8 | |
| 578 | #define _DPIO_TX_LANE_0 0x82b8 | 595 | #define _VLV_TX_DW14_CH1 0x84b8 |
| 579 | #define _DPIO_TX_LANE_1 0x84b8 | 596 | #define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1) |
| 580 | #define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1) | ||
| 581 | |||
| 582 | #define _DPIO_DATA_CHANNEL1 0x8220 | ||
| 583 | #define _DPIO_DATA_CHANNEL2 0x8420 | ||
| 584 | #define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2) | ||
| 585 | |||
| 586 | #define _DPIO_PORT0_PCS0 0x0220 | ||
| 587 | #define _DPIO_PORT0_PCS1 0x0420 | ||
| 588 | #define _DPIO_PORT1_PCS2 0x2620 | ||
| 589 | #define _DPIO_PORT1_PCS3 0x2820 | ||
| 590 | #define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2) | ||
| 591 | #define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3) | ||
| 592 | #define DPIO_DATA_CHANNEL1 0x8220 | ||
| 593 | #define DPIO_DATA_CHANNEL2 0x8420 | ||
| 594 | 597 | ||
| 595 | /* | 598 | /* |
| 596 | * Fence registers | 599 | * Fence registers |
| @@ -732,6 +735,8 @@ | |||
| 732 | #define HWSTAM 0x02098 | 735 | #define HWSTAM 0x02098 |
| 733 | #define DMA_FADD_I8XX 0x020d0 | 736 | #define DMA_FADD_I8XX 0x020d0 |
| 734 | #define RING_BBSTATE(base) ((base)+0x110) | 737 | #define RING_BBSTATE(base) ((base)+0x110) |
| 738 | #define RING_BBADDR(base) ((base)+0x140) | ||
| 739 | #define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ | ||
| 735 | 740 | ||
| 736 | #define ERROR_GEN6 0x040a0 | 741 | #define ERROR_GEN6 0x040a0 |
| 737 | #define GEN7_ERR_INT 0x44040 | 742 | #define GEN7_ERR_INT 0x44040 |
| @@ -922,7 +927,6 @@ | |||
| 922 | #define CM0_COLOR_EVICT_DISABLE (1<<3) | 927 | #define CM0_COLOR_EVICT_DISABLE (1<<3) |
| 923 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) | 928 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) |
| 924 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 929 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
| 925 | #define BB_ADDR 0x02140 /* 8 bytes */ | ||
| 926 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 930 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
| 927 | #define GFX_FLSH_CNTL_GEN6 0x101008 | 931 | #define GFX_FLSH_CNTL_GEN6 0x101008 |
| 928 | #define GFX_FLSH_CNTL_EN (1<<0) | 932 | #define GFX_FLSH_CNTL_EN (1<<0) |
| @@ -999,6 +1003,7 @@ | |||
| 999 | 1003 | ||
| 1000 | #define GEN7_FF_THREAD_MODE 0x20a0 | 1004 | #define GEN7_FF_THREAD_MODE 0x20a0 |
| 1001 | #define GEN7_FF_SCHED_MASK 0x0077070 | 1005 | #define GEN7_FF_SCHED_MASK 0x0077070 |
| 1006 | #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) | ||
| 1002 | #define GEN7_FF_TS_SCHED_HS1 (0x5<<16) | 1007 | #define GEN7_FF_TS_SCHED_HS1 (0x5<<16) |
| 1003 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) | 1008 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) |
| 1004 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) | 1009 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) |
| @@ -1026,14 +1031,14 @@ | |||
| 1026 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 1031 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
| 1027 | #define FBC_CTL_C3_IDLE (1<<13) | 1032 | #define FBC_CTL_C3_IDLE (1<<13) |
| 1028 | #define FBC_CTL_STRIDE_SHIFT (5) | 1033 | #define FBC_CTL_STRIDE_SHIFT (5) |
| 1029 | #define FBC_CTL_FENCENO (1<<0) | 1034 | #define FBC_CTL_FENCENO_SHIFT (0) |
| 1030 | #define FBC_COMMAND 0x0320c | 1035 | #define FBC_COMMAND 0x0320c |
| 1031 | #define FBC_CMD_COMPRESS (1<<0) | 1036 | #define FBC_CMD_COMPRESS (1<<0) |
| 1032 | #define FBC_STATUS 0x03210 | 1037 | #define FBC_STATUS 0x03210 |
| 1033 | #define FBC_STAT_COMPRESSING (1<<31) | 1038 | #define FBC_STAT_COMPRESSING (1<<31) |
| 1034 | #define FBC_STAT_COMPRESSED (1<<30) | 1039 | #define FBC_STAT_COMPRESSED (1<<30) |
| 1035 | #define FBC_STAT_MODIFIED (1<<29) | 1040 | #define FBC_STAT_MODIFIED (1<<29) |
| 1036 | #define FBC_STAT_CURRENT_LINE (1<<0) | 1041 | #define FBC_STAT_CURRENT_LINE_SHIFT (0) |
| 1037 | #define FBC_CONTROL2 0x03214 | 1042 | #define FBC_CONTROL2 0x03214 |
| 1038 | #define FBC_CTL_FENCE_DBL (0<<4) | 1043 | #define FBC_CTL_FENCE_DBL (0<<4) |
| 1039 | #define FBC_CTL_IDLE_IMM (0<<2) | 1044 | #define FBC_CTL_IDLE_IMM (0<<2) |
| @@ -2117,9 +2122,13 @@ | |||
| 2117 | * Please check the detailed lore in the commit message for for experimental | 2122 | * Please check the detailed lore in the commit message for for experimental |
| 2118 | * evidence. | 2123 | * evidence. |
| 2119 | */ | 2124 | */ |
| 2120 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) | 2125 | #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) |
| 2121 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) | 2126 | #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) |
| 2122 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) | 2127 | #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) |
| 2128 | /* VLV DP/HDMI bits again match Bspec */ | ||
| 2129 | #define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27) | ||
| 2130 | #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) | ||
| 2131 | #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) | ||
| 2123 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 2132 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
| 2124 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 2133 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
| 2125 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 2134 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
| @@ -2130,6 +2139,11 @@ | |||
| 2130 | #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) | 2139 | #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) |
| 2131 | #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) | 2140 | #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) |
| 2132 | #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) | 2141 | #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) |
| 2142 | #define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6) | ||
| 2143 | #define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5) | ||
| 2144 | #define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4) | ||
| 2145 | #define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4) | ||
| 2146 | |||
| 2133 | /* SDVO is different across gen3/4 */ | 2147 | /* SDVO is different across gen3/4 */ |
| 2134 | #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) | 2148 | #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) |
| 2135 | #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) | 2149 | #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) |
| @@ -3421,42 +3435,6 @@ | |||
| 3421 | /* the unit of memory self-refresh latency time is 0.5us */ | 3435 | /* the unit of memory self-refresh latency time is 0.5us */ |
| 3422 | #define ILK_SRLT_MASK 0x3f | 3436 | #define ILK_SRLT_MASK 0x3f |
| 3423 | 3437 | ||
| 3424 | /* define the fifo size on Ironlake */ | ||
| 3425 | #define ILK_DISPLAY_FIFO 128 | ||
| 3426 | #define ILK_DISPLAY_MAXWM 64 | ||
| 3427 | #define ILK_DISPLAY_DFTWM 8 | ||
| 3428 | #define ILK_CURSOR_FIFO 32 | ||
| 3429 | #define ILK_CURSOR_MAXWM 16 | ||
| 3430 | #define ILK_CURSOR_DFTWM 8 | ||
| 3431 | |||
| 3432 | #define ILK_DISPLAY_SR_FIFO 512 | ||
| 3433 | #define ILK_DISPLAY_MAX_SRWM 0x1ff | ||
| 3434 | #define ILK_DISPLAY_DFT_SRWM 0x3f | ||
| 3435 | #define ILK_CURSOR_SR_FIFO 64 | ||
| 3436 | #define ILK_CURSOR_MAX_SRWM 0x3f | ||
| 3437 | #define ILK_CURSOR_DFT_SRWM 8 | ||
| 3438 | |||
| 3439 | #define ILK_FIFO_LINE_SIZE 64 | ||
| 3440 | |||
| 3441 | /* define the WM info on Sandybridge */ | ||
| 3442 | #define SNB_DISPLAY_FIFO 128 | ||
| 3443 | #define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ | ||
| 3444 | #define SNB_DISPLAY_DFTWM 8 | ||
| 3445 | #define SNB_CURSOR_FIFO 32 | ||
| 3446 | #define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ | ||
| 3447 | #define SNB_CURSOR_DFTWM 8 | ||
| 3448 | |||
| 3449 | #define SNB_DISPLAY_SR_FIFO 512 | ||
| 3450 | #define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ | ||
| 3451 | #define SNB_DISPLAY_DFT_SRWM 0x3f | ||
| 3452 | #define SNB_CURSOR_SR_FIFO 64 | ||
| 3453 | #define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ | ||
| 3454 | #define SNB_CURSOR_DFT_SRWM 8 | ||
| 3455 | |||
| 3456 | #define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ | ||
| 3457 | |||
| 3458 | #define SNB_FIFO_LINE_SIZE 64 | ||
| 3459 | |||
| 3460 | 3438 | ||
| 3461 | /* the address where we get all kinds of latency value */ | 3439 | /* the address where we get all kinds of latency value */ |
| 3462 | #define SSKPD 0x5d10 | 3440 | #define SSKPD 0x5d10 |
| @@ -3600,8 +3578,6 @@ | |||
| 3600 | #define DISP_BASEADDR_MASK (0xfffff000) | 3578 | #define DISP_BASEADDR_MASK (0xfffff000) |
| 3601 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) | 3579 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) |
| 3602 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) | 3580 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) |
| 3603 | #define I915_MODIFY_DISPBASE(reg, gfx_addr) \ | ||
| 3604 | (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) | ||
| 3605 | 3581 | ||
| 3606 | /* VBIOS flags */ | 3582 | /* VBIOS flags */ |
| 3607 | #define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) | 3583 | #define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) |
| @@ -3787,7 +3763,7 @@ | |||
| 3787 | 3763 | ||
| 3788 | #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) | 3764 | #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) |
| 3789 | #define SP_ENABLE (1<<31) | 3765 | #define SP_ENABLE (1<<31) |
| 3790 | #define SP_GEAMMA_ENABLE (1<<30) | 3766 | #define SP_GAMMA_ENABLE (1<<30) |
| 3791 | #define SP_PIXFORMAT_MASK (0xf<<26) | 3767 | #define SP_PIXFORMAT_MASK (0xf<<26) |
| 3792 | #define SP_FORMAT_YUV422 (0<<26) | 3768 | #define SP_FORMAT_YUV422 (0<<26) |
| 3793 | #define SP_FORMAT_BGR565 (5<<26) | 3769 | #define SP_FORMAT_BGR565 (5<<26) |
| @@ -4139,6 +4115,8 @@ | |||
| 4139 | #define DISP_ARB_CTL 0x45000 | 4115 | #define DISP_ARB_CTL 0x45000 |
| 4140 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 4116 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
| 4141 | #define DISP_FBC_WM_DIS (1<<15) | 4117 | #define DISP_FBC_WM_DIS (1<<15) |
| 4118 | #define DISP_ARB_CTL2 0x45004 | ||
| 4119 | #define DISP_DATA_PARTITION_5_6 (1<<6) | ||
| 4142 | #define GEN7_MSG_CTL 0x45010 | 4120 | #define GEN7_MSG_CTL 0x45010 |
| 4143 | #define WAIT_FOR_PCH_RESET_ACK (1<<1) | 4121 | #define WAIT_FOR_PCH_RESET_ACK (1<<1) |
| 4144 | #define WAIT_FOR_PCH_FLR_ACK (1<<0) | 4122 | #define WAIT_FOR_PCH_FLR_ACK (1<<0) |
| @@ -4159,6 +4137,10 @@ | |||
| 4159 | #define GEN7_L3SQCREG4 0xb034 | 4137 | #define GEN7_L3SQCREG4 0xb034 |
| 4160 | #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) | 4138 | #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) |
| 4161 | 4139 | ||
| 4140 | /* GEN8 chicken */ | ||
| 4141 | #define HDC_CHICKEN0 0x7300 | ||
| 4142 | #define HDC_FORCE_NON_COHERENT (1<<4) | ||
| 4143 | |||
| 4162 | /* WaCatErrorRejectionIssue */ | 4144 | /* WaCatErrorRejectionIssue */ |
| 4163 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 | 4145 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
| 4164 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 4146 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
| @@ -4843,6 +4825,8 @@ | |||
| 4843 | #define FORCEWAKE_ACK 0x130090 | 4825 | #define FORCEWAKE_ACK 0x130090 |
| 4844 | #define VLV_GTLC_WAKE_CTRL 0x130090 | 4826 | #define VLV_GTLC_WAKE_CTRL 0x130090 |
| 4845 | #define VLV_GTLC_PW_STATUS 0x130094 | 4827 | #define VLV_GTLC_PW_STATUS 0x130094 |
| 4828 | #define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80 | ||
| 4829 | #define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20 | ||
| 4846 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ | 4830 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ |
| 4847 | #define FORCEWAKE_KERNEL 0x1 | 4831 | #define FORCEWAKE_KERNEL 0x1 |
| 4848 | #define FORCEWAKE_USER 0x2 | 4832 | #define FORCEWAKE_USER 0x2 |
| @@ -4851,12 +4835,16 @@ | |||
| 4851 | #define FORCEWAKE_MT_ENABLE (1<<5) | 4835 | #define FORCEWAKE_MT_ENABLE (1<<5) |
| 4852 | 4836 | ||
| 4853 | #define GTFIFODBG 0x120000 | 4837 | #define GTFIFODBG 0x120000 |
| 4854 | #define GT_FIFO_CPU_ERROR_MASK 7 | 4838 | #define GT_FIFO_SBDROPERR (1<<6) |
| 4839 | #define GT_FIFO_BLOBDROPERR (1<<5) | ||
| 4840 | #define GT_FIFO_SB_READ_ABORTERR (1<<4) | ||
| 4841 | #define GT_FIFO_DROPERR (1<<3) | ||
| 4855 | #define GT_FIFO_OVFERR (1<<2) | 4842 | #define GT_FIFO_OVFERR (1<<2) |
| 4856 | #define GT_FIFO_IAWRERR (1<<1) | 4843 | #define GT_FIFO_IAWRERR (1<<1) |
| 4857 | #define GT_FIFO_IARDERR (1<<0) | 4844 | #define GT_FIFO_IARDERR (1<<0) |
| 4858 | 4845 | ||
| 4859 | #define GT_FIFO_FREE_ENTRIES 0x120008 | 4846 | #define GTFIFOCTL 0x120008 |
| 4847 | #define GT_FIFO_FREE_ENTRIES_MASK 0x7f | ||
| 4860 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 4848 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
| 4861 | 4849 | ||
| 4862 | #define HSW_IDICR 0x9008 | 4850 | #define HSW_IDICR 0x9008 |
| @@ -4890,6 +4878,7 @@ | |||
| 4890 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) | 4878 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) |
| 4891 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) | 4879 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) |
| 4892 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) | 4880 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) |
| 4881 | #define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24) | ||
| 4893 | #define GEN7_RC_CTL_TO_MODE (1<<28) | 4882 | #define GEN7_RC_CTL_TO_MODE (1<<28) |
| 4894 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) | 4883 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) |
| 4895 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) | 4884 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 98790c7cccb1..8150fdc08d49 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
| @@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev) | |||
| 192 | static void i915_save_display(struct drm_device *dev) | 192 | static void i915_save_display(struct drm_device *dev) |
| 193 | { | 193 | { |
| 194 | struct drm_i915_private *dev_priv = dev->dev_private; | 194 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 195 | unsigned long flags; | ||
| 196 | 195 | ||
| 197 | /* Display arbitration control */ | 196 | /* Display arbitration control */ |
| 198 | if (INTEL_INFO(dev)->gen <= 4) | 197 | if (INTEL_INFO(dev)->gen <= 4) |
| @@ -203,46 +202,27 @@ static void i915_save_display(struct drm_device *dev) | |||
| 203 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 202 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 204 | i915_save_display_reg(dev); | 203 | i915_save_display_reg(dev); |
| 205 | 204 | ||
| 206 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | ||
| 207 | |||
| 208 | /* LVDS state */ | 205 | /* LVDS state */ |
| 209 | if (HAS_PCH_SPLIT(dev)) { | 206 | if (HAS_PCH_SPLIT(dev)) { |
| 210 | dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); | 207 | dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); |
| 211 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 212 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
| 213 | dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); | ||
| 214 | dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); | ||
| 215 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 208 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
| 216 | dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); | 209 | dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); |
| 217 | } else if (IS_VALLEYVIEW(dev)) { | 210 | } else if (IS_VALLEYVIEW(dev)) { |
| 218 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); | 211 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); |
| 219 | dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | 212 | dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); |
| 220 | 213 | ||
| 221 | dev_priv->regfile.saveBLC_PWM_CTL = | ||
| 222 | I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); | ||
| 223 | dev_priv->regfile.saveBLC_HIST_CTL = | 214 | dev_priv->regfile.saveBLC_HIST_CTL = |
| 224 | I915_READ(VLV_BLC_HIST_CTL(PIPE_A)); | 215 | I915_READ(VLV_BLC_HIST_CTL(PIPE_A)); |
| 225 | dev_priv->regfile.saveBLC_PWM_CTL2 = | ||
| 226 | I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); | ||
| 227 | dev_priv->regfile.saveBLC_PWM_CTL_B = | ||
| 228 | I915_READ(VLV_BLC_PWM_CTL(PIPE_B)); | ||
| 229 | dev_priv->regfile.saveBLC_HIST_CTL_B = | 216 | dev_priv->regfile.saveBLC_HIST_CTL_B = |
| 230 | I915_READ(VLV_BLC_HIST_CTL(PIPE_B)); | 217 | I915_READ(VLV_BLC_HIST_CTL(PIPE_B)); |
| 231 | dev_priv->regfile.saveBLC_PWM_CTL2_B = | ||
| 232 | I915_READ(VLV_BLC_PWM_CTL2(PIPE_B)); | ||
| 233 | } else { | 218 | } else { |
| 234 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); | 219 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); |
| 235 | dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | 220 | dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); |
| 236 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | ||
| 237 | dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); | 221 | dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); |
| 238 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 239 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | ||
| 240 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 222 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
| 241 | dev_priv->regfile.saveLVDS = I915_READ(LVDS); | 223 | dev_priv->regfile.saveLVDS = I915_READ(LVDS); |
| 242 | } | 224 | } |
| 243 | 225 | ||
| 244 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | ||
| 245 | |||
| 246 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) | 226 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) |
| 247 | dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); | 227 | dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); |
| 248 | 228 | ||
| @@ -257,7 +237,7 @@ static void i915_save_display(struct drm_device *dev) | |||
| 257 | } | 237 | } |
| 258 | 238 | ||
| 259 | /* Only regfile.save FBC state on the platform that supports FBC */ | 239 | /* Only regfile.save FBC state on the platform that supports FBC */ |
| 260 | if (I915_HAS_FBC(dev)) { | 240 | if (HAS_FBC(dev)) { |
| 261 | if (HAS_PCH_SPLIT(dev)) { | 241 | if (HAS_PCH_SPLIT(dev)) { |
| 262 | dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); | 242 | dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); |
| 263 | } else if (IS_GM45(dev)) { | 243 | } else if (IS_GM45(dev)) { |
| @@ -278,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 278 | { | 258 | { |
| 279 | struct drm_i915_private *dev_priv = dev->dev_private; | 259 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 280 | u32 mask = 0xffffffff; | 260 | u32 mask = 0xffffffff; |
| 281 | unsigned long flags; | ||
| 282 | 261 | ||
| 283 | /* Display arbitration */ | 262 | /* Display arbitration */ |
| 284 | if (INTEL_INFO(dev)->gen <= 4) | 263 | if (INTEL_INFO(dev)->gen <= 4) |
| @@ -287,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 287 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 266 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 288 | i915_restore_display_reg(dev); | 267 | i915_restore_display_reg(dev); |
| 289 | 268 | ||
| 290 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | ||
| 291 | |||
| 292 | /* LVDS state */ | ||
| 293 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | ||
| 294 | I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 295 | |||
| 296 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 269 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 297 | mask = ~LVDS_PORT_EN; | 270 | mask = ~LVDS_PORT_EN; |
| 298 | 271 | ||
| @@ -305,13 +278,6 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 305 | I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); | 278 | I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); |
| 306 | 279 | ||
| 307 | if (HAS_PCH_SPLIT(dev)) { | 280 | if (HAS_PCH_SPLIT(dev)) { |
| 308 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 309 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 310 | /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; | ||
| 311 | * otherwise we get blank eDP screen after S3 on some machines | ||
| 312 | */ | ||
| 313 | I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); | ||
| 314 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); | ||
| 315 | I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); | 281 | I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); |
| 316 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); | 282 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
| 317 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); | 283 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); |
| @@ -319,21 +285,12 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 319 | I915_WRITE(RSTDBYCTL, | 285 | I915_WRITE(RSTDBYCTL, |
| 320 | dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); | 286 | dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); |
| 321 | } else if (IS_VALLEYVIEW(dev)) { | 287 | } else if (IS_VALLEYVIEW(dev)) { |
| 322 | I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A), | ||
| 323 | dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 324 | I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A), | 288 | I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A), |
| 325 | dev_priv->regfile.saveBLC_HIST_CTL); | 289 | dev_priv->regfile.saveBLC_HIST_CTL); |
| 326 | I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A), | ||
| 327 | dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 328 | I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B), | ||
| 329 | dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 330 | I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B), | 290 | I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B), |
| 331 | dev_priv->regfile.saveBLC_HIST_CTL); | 291 | dev_priv->regfile.saveBLC_HIST_CTL); |
| 332 | I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B), | ||
| 333 | dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 334 | } else { | 292 | } else { |
| 335 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); | 293 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); |
| 336 | I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 337 | I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); | 294 | I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); |
| 338 | I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); | 295 | I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); |
| 339 | I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); | 296 | I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
| @@ -341,11 +298,9 @@ static void i915_restore_display(struct drm_device *dev) | |||
| 341 | I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); | 298 | I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); |
| 342 | } | 299 | } |
| 343 | 300 | ||
| 344 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | ||
| 345 | |||
| 346 | /* only restore FBC info on the platform that supports FBC*/ | 301 | /* only restore FBC info on the platform that supports FBC*/ |
| 347 | intel_disable_fbc(dev); | 302 | intel_disable_fbc(dev); |
| 348 | if (I915_HAS_FBC(dev)) { | 303 | if (HAS_FBC(dev)) { |
| 349 | if (HAS_PCH_SPLIT(dev)) { | 304 | if (HAS_PCH_SPLIT(dev)) { |
| 350 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); | 305 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); |
| 351 | } else if (IS_GM45(dev)) { | 306 | } else if (IS_GM45(dev)) { |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index cef38fd320a7..33bcae314bf8 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
| @@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) | |||
| 40 | struct drm_i915_private *dev_priv = dev->dev_private; | 40 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 41 | u64 raw_time; /* 32b value may overflow during fixed point math */ | 41 | u64 raw_time; /* 32b value may overflow during fixed point math */ |
| 42 | u64 units = 128ULL, div = 100000ULL, bias = 100ULL; | 42 | u64 units = 128ULL, div = 100000ULL, bias = 100ULL; |
| 43 | u32 ret; | ||
| 43 | 44 | ||
| 44 | if (!intel_enable_rc6(dev)) | 45 | if (!intel_enable_rc6(dev)) |
| 45 | return 0; | 46 | return 0; |
| 46 | 47 | ||
| 48 | intel_runtime_pm_get(dev_priv); | ||
| 49 | |||
| 47 | /* On VLV, residency time is in CZ units rather than 1.28us */ | 50 | /* On VLV, residency time is in CZ units rather than 1.28us */ |
| 48 | if (IS_VALLEYVIEW(dev)) { | 51 | if (IS_VALLEYVIEW(dev)) { |
| 49 | u32 clkctl2; | 52 | u32 clkctl2; |
| @@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) | |||
| 52 | CLK_CTL2_CZCOUNT_30NS_SHIFT; | 55 | CLK_CTL2_CZCOUNT_30NS_SHIFT; |
| 53 | if (!clkctl2) { | 56 | if (!clkctl2) { |
| 54 | WARN(!clkctl2, "bogus CZ count value"); | 57 | WARN(!clkctl2, "bogus CZ count value"); |
| 55 | return 0; | 58 | ret = 0; |
| 59 | goto out; | ||
| 56 | } | 60 | } |
| 57 | units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); | 61 | units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); |
| 58 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) | 62 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
| @@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) | |||
| 62 | } | 66 | } |
| 63 | 67 | ||
| 64 | raw_time = I915_READ(reg) * units; | 68 | raw_time = I915_READ(reg) * units; |
| 65 | return DIV_ROUND_UP_ULL(raw_time, div); | 69 | ret = DIV_ROUND_UP_ULL(raw_time, div); |
| 70 | |||
| 71 | out: | ||
| 72 | intel_runtime_pm_put(dev_priv); | ||
| 73 | return ret; | ||
| 66 | } | 74 | } |
| 67 | 75 | ||
| 68 | static ssize_t | 76 | static ssize_t |
| @@ -183,13 +191,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj, | |||
| 183 | int slice = (int)(uintptr_t)attr->private; | 191 | int slice = (int)(uintptr_t)attr->private; |
| 184 | int ret; | 192 | int ret; |
| 185 | 193 | ||
| 194 | if (!HAS_HW_CONTEXTS(drm_dev)) | ||
| 195 | return -ENXIO; | ||
| 196 | |||
| 186 | ret = l3_access_valid(drm_dev, offset); | 197 | ret = l3_access_valid(drm_dev, offset); |
| 187 | if (ret) | 198 | if (ret) |
| 188 | return ret; | 199 | return ret; |
| 189 | 200 | ||
| 190 | if (dev_priv->hw_contexts_disabled) | ||
| 191 | return -ENXIO; | ||
| 192 | |||
| 193 | ret = i915_mutex_lock_interruptible(drm_dev); | 201 | ret = i915_mutex_lock_interruptible(drm_dev); |
| 194 | if (ret) | 202 | if (ret) |
| 195 | return ret; | 203 | return ret; |
| @@ -259,7 +267,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
| 259 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 267 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
| 260 | u32 freq; | 268 | u32 freq; |
| 261 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
| 262 | ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff); | 270 | ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
| 263 | } else { | 271 | } else { |
| 264 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; | 272 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; |
| 265 | } | 273 | } |
| @@ -276,8 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, | |||
| 276 | struct drm_i915_private *dev_priv = dev->dev_private; | 284 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 277 | 285 | ||
| 278 | return snprintf(buf, PAGE_SIZE, "%d\n", | 286 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 279 | vlv_gpu_freq(dev_priv->mem_freq, | 287 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); |
| 280 | dev_priv->rps.rpe_delay)); | ||
| 281 | } | 288 | } |
| 282 | 289 | ||
| 283 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 290 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
| @@ -291,7 +298,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute | |||
| 291 | 298 | ||
| 292 | mutex_lock(&dev_priv->rps.hw_lock); | 299 | mutex_lock(&dev_priv->rps.hw_lock); |
| 293 | if (IS_VALLEYVIEW(dev_priv->dev)) | 300 | if (IS_VALLEYVIEW(dev_priv->dev)) |
| 294 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); | 301 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); |
| 295 | else | 302 | else |
| 296 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 303 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; |
| 297 | mutex_unlock(&dev_priv->rps.hw_lock); | 304 | mutex_unlock(&dev_priv->rps.hw_lock); |
| @@ -318,7 +325,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
| 318 | mutex_lock(&dev_priv->rps.hw_lock); | 325 | mutex_lock(&dev_priv->rps.hw_lock); |
| 319 | 326 | ||
| 320 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 327 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
| 321 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | 328 | val = vlv_freq_opcode(dev_priv, val); |
| 322 | 329 | ||
| 323 | hw_max = valleyview_rps_max_freq(dev_priv); | 330 | hw_max = valleyview_rps_max_freq(dev_priv); |
| 324 | hw_min = valleyview_rps_min_freq(dev_priv); | 331 | hw_min = valleyview_rps_min_freq(dev_priv); |
| @@ -342,15 +349,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
| 342 | DRM_DEBUG("User requested overclocking to %d\n", | 349 | DRM_DEBUG("User requested overclocking to %d\n", |
| 343 | val * GT_FREQUENCY_MULTIPLIER); | 350 | val * GT_FREQUENCY_MULTIPLIER); |
| 344 | 351 | ||
| 352 | dev_priv->rps.max_delay = val; | ||
| 353 | |||
| 345 | if (dev_priv->rps.cur_delay > val) { | 354 | if (dev_priv->rps.cur_delay > val) { |
| 346 | if (IS_VALLEYVIEW(dev_priv->dev)) | 355 | if (IS_VALLEYVIEW(dev)) |
| 347 | valleyview_set_rps(dev_priv->dev, val); | 356 | valleyview_set_rps(dev, val); |
| 348 | else | 357 | else |
| 349 | gen6_set_rps(dev_priv->dev, val); | 358 | gen6_set_rps(dev, val); |
| 350 | } | 359 | } |
| 351 | 360 | ||
| 352 | dev_priv->rps.max_delay = val; | ||
| 353 | |||
| 354 | mutex_unlock(&dev_priv->rps.hw_lock); | 361 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 355 | 362 | ||
| 356 | return count; | 363 | return count; |
| @@ -367,7 +374,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute | |||
| 367 | 374 | ||
| 368 | mutex_lock(&dev_priv->rps.hw_lock); | 375 | mutex_lock(&dev_priv->rps.hw_lock); |
| 369 | if (IS_VALLEYVIEW(dev_priv->dev)) | 376 | if (IS_VALLEYVIEW(dev_priv->dev)) |
| 370 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); | 377 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); |
| 371 | else | 378 | else |
| 372 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 379 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; |
| 373 | mutex_unlock(&dev_priv->rps.hw_lock); | 380 | mutex_unlock(&dev_priv->rps.hw_lock); |
| @@ -394,7 +401,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
| 394 | mutex_lock(&dev_priv->rps.hw_lock); | 401 | mutex_lock(&dev_priv->rps.hw_lock); |
| 395 | 402 | ||
| 396 | if (IS_VALLEYVIEW(dev)) { | 403 | if (IS_VALLEYVIEW(dev)) { |
| 397 | val = vlv_freq_opcode(dev_priv->mem_freq, val); | 404 | val = vlv_freq_opcode(dev_priv, val); |
| 398 | 405 | ||
| 399 | hw_max = valleyview_rps_max_freq(dev_priv); | 406 | hw_max = valleyview_rps_max_freq(dev_priv); |
| 400 | hw_min = valleyview_rps_min_freq(dev_priv); | 407 | hw_min = valleyview_rps_min_freq(dev_priv); |
| @@ -411,15 +418,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
| 411 | return -EINVAL; | 418 | return -EINVAL; |
| 412 | } | 419 | } |
| 413 | 420 | ||
| 421 | dev_priv->rps.min_delay = val; | ||
| 422 | |||
| 414 | if (dev_priv->rps.cur_delay < val) { | 423 | if (dev_priv->rps.cur_delay < val) { |
| 415 | if (IS_VALLEYVIEW(dev)) | 424 | if (IS_VALLEYVIEW(dev)) |
| 416 | valleyview_set_rps(dev, val); | 425 | valleyview_set_rps(dev, val); |
| 417 | else | 426 | else |
| 418 | gen6_set_rps(dev_priv->dev, val); | 427 | gen6_set_rps(dev, val); |
| 419 | } | 428 | } |
| 420 | 429 | ||
| 421 | dev_priv->rps.min_delay = val; | ||
| 422 | |||
| 423 | mutex_unlock(&dev_priv->rps.hw_lock); | 430 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 424 | 431 | ||
| 425 | return count; | 432 | return count; |
| @@ -449,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr | |||
| 449 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 456 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
| 450 | if (ret) | 457 | if (ret) |
| 451 | return ret; | 458 | return ret; |
| 459 | intel_runtime_pm_get(dev_priv); | ||
| 452 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 460 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
| 461 | intel_runtime_pm_put(dev_priv); | ||
| 453 | mutex_unlock(&dev->struct_mutex); | 462 | mutex_unlock(&dev->struct_mutex); |
| 454 | 463 | ||
| 455 | if (attr == &dev_attr_gt_RP0_freq_mhz) { | 464 | if (attr == &dev_attr_gt_RP0_freq_mhz) { |
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c index 967da4772c44..caa18e855815 100644 --- a/drivers/gpu/drm/i915/i915_ums.c +++ b/drivers/gpu/drm/i915/i915_ums.c | |||
| @@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev) | |||
| 270 | } | 270 | } |
| 271 | /* FIXME: regfile.save TV & SDVO state */ | 271 | /* FIXME: regfile.save TV & SDVO state */ |
| 272 | 272 | ||
| 273 | /* Backlight */ | ||
| 274 | if (HAS_PCH_SPLIT(dev)) { | ||
| 275 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 276 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
| 277 | dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); | ||
| 278 | dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); | ||
| 279 | } else { | ||
| 280 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | ||
| 281 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 282 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | ||
| 283 | } | ||
| 284 | |||
| 273 | return; | 285 | return; |
| 274 | } | 286 | } |
| 275 | 287 | ||
| @@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev) | |||
| 280 | int dpll_b_reg, fpb0_reg, fpb1_reg; | 292 | int dpll_b_reg, fpb0_reg, fpb1_reg; |
| 281 | int i; | 293 | int i; |
| 282 | 294 | ||
| 295 | /* Backlight */ | ||
| 296 | if (HAS_PCH_SPLIT(dev)) { | ||
| 297 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 298 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 299 | /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; | ||
| 300 | * otherwise we get blank eDP screen after S3 on some machines | ||
| 301 | */ | ||
| 302 | I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); | ||
| 303 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); | ||
| 304 | } else { | ||
| 305 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 306 | I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 307 | I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); | ||
| 308 | } | ||
| 309 | |||
| 283 | /* Display port ratios (must be done before clock is set) */ | 310 | /* Display port ratios (must be done before clock is set) */ |
| 284 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 311 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
| 285 | I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M); | 312 | I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e4fba39631a5..f22041973f3a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
| 281 | } | 281 | } |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | static void | ||
| 285 | parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | ||
| 286 | { | ||
| 287 | const struct bdb_lfp_backlight_data *backlight_data; | ||
| 288 | const struct bdb_lfp_backlight_data_entry *entry; | ||
| 289 | |||
| 290 | backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); | ||
| 291 | if (!backlight_data) | ||
| 292 | return; | ||
| 293 | |||
| 294 | if (backlight_data->entry_size != sizeof(backlight_data->data[0])) { | ||
| 295 | DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n", | ||
| 296 | backlight_data->entry_size); | ||
| 297 | return; | ||
| 298 | } | ||
| 299 | |||
| 300 | entry = &backlight_data->data[panel_type]; | ||
| 301 | |||
| 302 | dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; | ||
| 303 | dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; | ||
| 304 | DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " | ||
| 305 | "active %s, min brightness %u, level %u\n", | ||
| 306 | dev_priv->vbt.backlight.pwm_freq_hz, | ||
| 307 | dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", | ||
| 308 | entry->min_brightness, | ||
| 309 | backlight_data->level[panel_type]); | ||
| 310 | } | ||
| 311 | |||
| 284 | /* Try to find sdvo panel data */ | 312 | /* Try to find sdvo panel data */ |
| 285 | static void | 313 | static void |
| 286 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, | 314 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, |
| @@ -327,12 +355,12 @@ static int intel_bios_ssc_frequency(struct drm_device *dev, | |||
| 327 | { | 355 | { |
| 328 | switch (INTEL_INFO(dev)->gen) { | 356 | switch (INTEL_INFO(dev)->gen) { |
| 329 | case 2: | 357 | case 2: |
| 330 | return alternate ? 66 : 48; | 358 | return alternate ? 66667 : 48000; |
| 331 | case 3: | 359 | case 3: |
| 332 | case 4: | 360 | case 4: |
| 333 | return alternate ? 100 : 96; | 361 | return alternate ? 100000 : 96000; |
| 334 | default: | 362 | default: |
| 335 | return alternate ? 100 : 120; | 363 | return alternate ? 100000 : 120000; |
| 336 | } | 364 | } |
| 337 | } | 365 | } |
| 338 | 366 | ||
| @@ -796,7 +824,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
| 796 | */ | 824 | */ |
| 797 | dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, | 825 | dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, |
| 798 | !HAS_PCH_SPLIT(dev)); | 826 | !HAS_PCH_SPLIT(dev)); |
| 799 | DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); | 827 | DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); |
| 800 | 828 | ||
| 801 | for (port = PORT_A; port < I915_MAX_PORTS; port++) { | 829 | for (port = PORT_A; port < I915_MAX_PORTS; port++) { |
| 802 | struct ddi_vbt_port_info *info = | 830 | struct ddi_vbt_port_info *info = |
| @@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev) | |||
| 894 | parse_general_features(dev_priv, bdb); | 922 | parse_general_features(dev_priv, bdb); |
| 895 | parse_general_definitions(dev_priv, bdb); | 923 | parse_general_definitions(dev_priv, bdb); |
| 896 | parse_lfp_panel_data(dev_priv, bdb); | 924 | parse_lfp_panel_data(dev_priv, bdb); |
| 925 | parse_lfp_backlight(dev_priv, bdb); | ||
| 897 | parse_sdvo_panel_data(dev_priv, bdb); | 926 | parse_sdvo_panel_data(dev_priv, bdb); |
| 898 | parse_sdvo_device_mapping(dev_priv, bdb); | 927 | parse_sdvo_device_mapping(dev_priv, bdb); |
| 899 | parse_device_mapping(dev_priv, bdb); | 928 | parse_device_mapping(dev_priv, bdb); |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index f580a2b0ddd3..282de5e9f39d 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
| @@ -39,7 +39,7 @@ struct vbt_header { | |||
| 39 | u8 reserved0; | 39 | u8 reserved0; |
| 40 | u32 bdb_offset; /**< from beginning of VBT */ | 40 | u32 bdb_offset; /**< from beginning of VBT */ |
| 41 | u32 aim_offset[4]; /**< from beginning of VBT */ | 41 | u32 aim_offset[4]; /**< from beginning of VBT */ |
| 42 | } __attribute__((packed)); | 42 | } __packed; |
| 43 | 43 | ||
| 44 | struct bdb_header { | 44 | struct bdb_header { |
| 45 | u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ | 45 | u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ |
| @@ -65,7 +65,7 @@ struct vbios_data { | |||
| 65 | u8 rsvd4; /* popup memory size */ | 65 | u8 rsvd4; /* popup memory size */ |
| 66 | u8 resize_pci_bios; | 66 | u8 resize_pci_bios; |
| 67 | u8 rsvd5; /* is crt already on ddc2 */ | 67 | u8 rsvd5; /* is crt already on ddc2 */ |
| 68 | } __attribute__((packed)); | 68 | } __packed; |
| 69 | 69 | ||
| 70 | /* | 70 | /* |
| 71 | * There are several types of BIOS data blocks (BDBs), each block has | 71 | * There are several types of BIOS data blocks (BDBs), each block has |
| @@ -142,7 +142,7 @@ struct bdb_general_features { | |||
| 142 | u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ | 142 | u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ |
| 143 | u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ | 143 | u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ |
| 144 | u8 rsvd11:3; /* finish byte */ | 144 | u8 rsvd11:3; /* finish byte */ |
| 145 | } __attribute__((packed)); | 145 | } __packed; |
| 146 | 146 | ||
| 147 | /* pre-915 */ | 147 | /* pre-915 */ |
| 148 | #define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ | 148 | #define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ |
| @@ -225,7 +225,7 @@ struct old_child_dev_config { | |||
| 225 | u8 dvo2_wiring; | 225 | u8 dvo2_wiring; |
| 226 | u16 extended_type; | 226 | u16 extended_type; |
| 227 | u8 dvo_function; | 227 | u8 dvo_function; |
| 228 | } __attribute__((packed)); | 228 | } __packed; |
| 229 | 229 | ||
| 230 | /* This one contains field offsets that are known to be common for all BDB | 230 | /* This one contains field offsets that are known to be common for all BDB |
| 231 | * versions. Notice that the meaning of the contents contents may still change, | 231 | * versions. Notice that the meaning of the contents contents may still change, |
| @@ -238,7 +238,7 @@ struct common_child_dev_config { | |||
| 238 | u8 not_common2[2]; | 238 | u8 not_common2[2]; |
| 239 | u8 ddc_pin; | 239 | u8 ddc_pin; |
| 240 | u16 edid_ptr; | 240 | u16 edid_ptr; |
| 241 | } __attribute__((packed)); | 241 | } __packed; |
| 242 | 242 | ||
| 243 | /* This field changes depending on the BDB version, so the most reliable way to | 243 | /* This field changes depending on the BDB version, so the most reliable way to |
| 244 | * read it is by checking the BDB version and reading the raw pointer. */ | 244 | * read it is by checking the BDB version and reading the raw pointer. */ |
| @@ -279,7 +279,7 @@ struct bdb_general_definitions { | |||
| 279 | * sizeof(child_device_config); | 279 | * sizeof(child_device_config); |
| 280 | */ | 280 | */ |
| 281 | union child_device_config devices[0]; | 281 | union child_device_config devices[0]; |
| 282 | } __attribute__((packed)); | 282 | } __packed; |
| 283 | 283 | ||
| 284 | struct bdb_lvds_options { | 284 | struct bdb_lvds_options { |
| 285 | u8 panel_type; | 285 | u8 panel_type; |
| @@ -293,7 +293,7 @@ struct bdb_lvds_options { | |||
| 293 | u8 lvds_edid:1; | 293 | u8 lvds_edid:1; |
| 294 | u8 rsvd2:1; | 294 | u8 rsvd2:1; |
| 295 | u8 rsvd4; | 295 | u8 rsvd4; |
| 296 | } __attribute__((packed)); | 296 | } __packed; |
| 297 | 297 | ||
| 298 | /* LFP pointer table contains entries to the struct below */ | 298 | /* LFP pointer table contains entries to the struct below */ |
| 299 | struct bdb_lvds_lfp_data_ptr { | 299 | struct bdb_lvds_lfp_data_ptr { |
| @@ -303,12 +303,12 @@ struct bdb_lvds_lfp_data_ptr { | |||
| 303 | u8 dvo_table_size; | 303 | u8 dvo_table_size; |
| 304 | u16 panel_pnp_id_offset; | 304 | u16 panel_pnp_id_offset; |
| 305 | u8 pnp_table_size; | 305 | u8 pnp_table_size; |
| 306 | } __attribute__((packed)); | 306 | } __packed; |
| 307 | 307 | ||
| 308 | struct bdb_lvds_lfp_data_ptrs { | 308 | struct bdb_lvds_lfp_data_ptrs { |
| 309 | u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ | 309 | u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ |
| 310 | struct bdb_lvds_lfp_data_ptr ptr[16]; | 310 | struct bdb_lvds_lfp_data_ptr ptr[16]; |
| 311 | } __attribute__((packed)); | 311 | } __packed; |
| 312 | 312 | ||
| 313 | /* LFP data has 3 blocks per entry */ | 313 | /* LFP data has 3 blocks per entry */ |
| 314 | struct lvds_fp_timing { | 314 | struct lvds_fp_timing { |
| @@ -325,7 +325,7 @@ struct lvds_fp_timing { | |||
| 325 | u32 pfit_reg; | 325 | u32 pfit_reg; |
| 326 | u32 pfit_reg_val; | 326 | u32 pfit_reg_val; |
| 327 | u16 terminator; | 327 | u16 terminator; |
| 328 | } __attribute__((packed)); | 328 | } __packed; |
| 329 | 329 | ||
| 330 | struct lvds_dvo_timing { | 330 | struct lvds_dvo_timing { |
| 331 | u16 clock; /**< In 10khz */ | 331 | u16 clock; /**< In 10khz */ |
| @@ -353,7 +353,7 @@ struct lvds_dvo_timing { | |||
| 353 | u8 vsync_positive:1; | 353 | u8 vsync_positive:1; |
| 354 | u8 hsync_positive:1; | 354 | u8 hsync_positive:1; |
| 355 | u8 rsvd2:1; | 355 | u8 rsvd2:1; |
| 356 | } __attribute__((packed)); | 356 | } __packed; |
| 357 | 357 | ||
| 358 | struct lvds_pnp_id { | 358 | struct lvds_pnp_id { |
| 359 | u16 mfg_name; | 359 | u16 mfg_name; |
| @@ -361,17 +361,33 @@ struct lvds_pnp_id { | |||
| 361 | u32 serial; | 361 | u32 serial; |
| 362 | u8 mfg_week; | 362 | u8 mfg_week; |
| 363 | u8 mfg_year; | 363 | u8 mfg_year; |
| 364 | } __attribute__((packed)); | 364 | } __packed; |
| 365 | 365 | ||
| 366 | struct bdb_lvds_lfp_data_entry { | 366 | struct bdb_lvds_lfp_data_entry { |
| 367 | struct lvds_fp_timing fp_timing; | 367 | struct lvds_fp_timing fp_timing; |
| 368 | struct lvds_dvo_timing dvo_timing; | 368 | struct lvds_dvo_timing dvo_timing; |
| 369 | struct lvds_pnp_id pnp_id; | 369 | struct lvds_pnp_id pnp_id; |
| 370 | } __attribute__((packed)); | 370 | } __packed; |
| 371 | 371 | ||
| 372 | struct bdb_lvds_lfp_data { | 372 | struct bdb_lvds_lfp_data { |
| 373 | struct bdb_lvds_lfp_data_entry data[16]; | 373 | struct bdb_lvds_lfp_data_entry data[16]; |
| 374 | } __attribute__((packed)); | 374 | } __packed; |
| 375 | |||
| 376 | struct bdb_lfp_backlight_data_entry { | ||
| 377 | u8 type:2; | ||
| 378 | u8 active_low_pwm:1; | ||
| 379 | u8 obsolete1:5; | ||
| 380 | u16 pwm_freq_hz; | ||
| 381 | u8 min_brightness; | ||
| 382 | u8 obsolete2; | ||
| 383 | u8 obsolete3; | ||
| 384 | } __packed; | ||
| 385 | |||
| 386 | struct bdb_lfp_backlight_data { | ||
| 387 | u8 entry_size; | ||
| 388 | struct bdb_lfp_backlight_data_entry data[16]; | ||
| 389 | u8 level[16]; | ||
| 390 | } __packed; | ||
| 375 | 391 | ||
| 376 | struct aimdb_header { | 392 | struct aimdb_header { |
| 377 | char signature[16]; | 393 | char signature[16]; |
| @@ -379,12 +395,12 @@ struct aimdb_header { | |||
| 379 | u16 aimdb_version; | 395 | u16 aimdb_version; |
| 380 | u16 aimdb_header_size; | 396 | u16 aimdb_header_size; |
| 381 | u16 aimdb_size; | 397 | u16 aimdb_size; |
| 382 | } __attribute__((packed)); | 398 | } __packed; |
| 383 | 399 | ||
| 384 | struct aimdb_block { | 400 | struct aimdb_block { |
| 385 | u8 aimdb_id; | 401 | u8 aimdb_id; |
| 386 | u16 aimdb_size; | 402 | u16 aimdb_size; |
| 387 | } __attribute__((packed)); | 403 | } __packed; |
| 388 | 404 | ||
| 389 | struct vch_panel_data { | 405 | struct vch_panel_data { |
| 390 | u16 fp_timing_offset; | 406 | u16 fp_timing_offset; |
| @@ -395,12 +411,12 @@ struct vch_panel_data { | |||
| 395 | u8 text_fitting_size; | 411 | u8 text_fitting_size; |
| 396 | u16 graphics_fitting_offset; | 412 | u16 graphics_fitting_offset; |
| 397 | u8 graphics_fitting_size; | 413 | u8 graphics_fitting_size; |
| 398 | } __attribute__((packed)); | 414 | } __packed; |
| 399 | 415 | ||
| 400 | struct vch_bdb_22 { | 416 | struct vch_bdb_22 { |
| 401 | struct aimdb_block aimdb_block; | 417 | struct aimdb_block aimdb_block; |
| 402 | struct vch_panel_data panels[16]; | 418 | struct vch_panel_data panels[16]; |
| 403 | } __attribute__((packed)); | 419 | } __packed; |
| 404 | 420 | ||
| 405 | struct bdb_sdvo_lvds_options { | 421 | struct bdb_sdvo_lvds_options { |
| 406 | u8 panel_backlight; | 422 | u8 panel_backlight; |
| @@ -416,7 +432,7 @@ struct bdb_sdvo_lvds_options { | |||
| 416 | u8 panel_misc_bits_2; | 432 | u8 panel_misc_bits_2; |
| 417 | u8 panel_misc_bits_3; | 433 | u8 panel_misc_bits_3; |
| 418 | u8 panel_misc_bits_4; | 434 | u8 panel_misc_bits_4; |
| 419 | } __attribute__((packed)); | 435 | } __packed; |
| 420 | 436 | ||
| 421 | 437 | ||
| 422 | #define BDB_DRIVER_FEATURE_NO_LVDS 0 | 438 | #define BDB_DRIVER_FEATURE_NO_LVDS 0 |
| @@ -462,7 +478,7 @@ struct bdb_driver_features { | |||
| 462 | 478 | ||
| 463 | u8 hdmi_termination; | 479 | u8 hdmi_termination; |
| 464 | u8 custom_vbt_version; | 480 | u8 custom_vbt_version; |
| 465 | } __attribute__((packed)); | 481 | } __packed; |
| 466 | 482 | ||
| 467 | #define EDP_18BPP 0 | 483 | #define EDP_18BPP 0 |
| 468 | #define EDP_24BPP 1 | 484 | #define EDP_24BPP 1 |
| @@ -487,14 +503,14 @@ struct edp_power_seq { | |||
| 487 | u16 t9; | 503 | u16 t9; |
| 488 | u16 t10; | 504 | u16 t10; |
| 489 | u16 t11_t12; | 505 | u16 t11_t12; |
| 490 | } __attribute__ ((packed)); | 506 | } __packed; |
| 491 | 507 | ||
| 492 | struct edp_link_params { | 508 | struct edp_link_params { |
| 493 | u8 rate:4; | 509 | u8 rate:4; |
| 494 | u8 lanes:4; | 510 | u8 lanes:4; |
| 495 | u8 preemphasis:4; | 511 | u8 preemphasis:4; |
| 496 | u8 vswing:4; | 512 | u8 vswing:4; |
| 497 | } __attribute__ ((packed)); | 513 | } __packed; |
| 498 | 514 | ||
| 499 | struct bdb_edp { | 515 | struct bdb_edp { |
| 500 | struct edp_power_seq power_seqs[16]; | 516 | struct edp_power_seq power_seqs[16]; |
| @@ -505,7 +521,7 @@ struct bdb_edp { | |||
| 505 | /* ith bit indicates enabled/disabled for (i+1)th panel */ | 521 | /* ith bit indicates enabled/disabled for (i+1)th panel */ |
| 506 | u16 edp_s3d_feature; | 522 | u16 edp_s3d_feature; |
| 507 | u16 edp_t3_optimization; | 523 | u16 edp_t3_optimization; |
| 508 | } __attribute__ ((packed)); | 524 | } __packed; |
| 509 | 525 | ||
| 510 | void intel_setup_bios(struct drm_device *dev); | 526 | void intel_setup_bios(struct drm_device *dev); |
| 511 | int intel_parse_bios(struct drm_device *dev); | 527 | int intel_parse_bios(struct drm_device *dev); |
| @@ -733,6 +749,6 @@ struct bdb_mipi { | |||
| 733 | u32 hl_switch_cnt; | 749 | u32 hl_switch_cnt; |
| 734 | u32 lp_byte_clk; | 750 | u32 lp_byte_clk; |
| 735 | u32 clk_lane_switch_cnt; | 751 | u32 clk_lane_switch_cnt; |
| 736 | } __attribute__((packed)); | 752 | } __packed; |
| 737 | 753 | ||
| 738 | #endif /* _I830_BIOS_H_ */ | 754 | #endif /* _I830_BIOS_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b5b1b9b23adf..e2e39e65f109 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -222,8 +222,9 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode) | |||
| 222 | intel_modeset_check_state(connector->dev); | 222 | intel_modeset_check_state(connector->dev); |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | static int intel_crt_mode_valid(struct drm_connector *connector, | 225 | static enum drm_mode_status |
| 226 | struct drm_display_mode *mode) | 226 | intel_crt_mode_valid(struct drm_connector *connector, |
| 227 | struct drm_display_mode *mode) | ||
| 227 | { | 228 | { |
| 228 | struct drm_device *dev = connector->dev; | 229 | struct drm_device *dev = connector->dev; |
| 229 | 230 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b69dc3e66c16..e06b9e017d6b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = { | |||
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | static const u32 bdw_ddi_translations_edp[] = { | 75 | static const u32 bdw_ddi_translations_edp[] = { |
| 76 | 0x00FFFFFF, 0x00000012, /* DP parameters */ | 76 | 0x00FFFFFF, 0x00000012, /* eDP parameters */ |
| 77 | 0x00EBAFFF, 0x00020011, | 77 | 0x00EBAFFF, 0x00020011, |
| 78 | 0x00C71FFF, 0x0006000F, | 78 | 0x00C71FFF, 0x0006000F, |
| 79 | 0x00FFFFFF, 0x00020011, | 79 | 0x00FFFFFF, 0x00020011, |
| @@ -696,25 +696,25 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */, | |||
| 696 | *n2_out = best.n2; | 696 | *n2_out = best.n2; |
| 697 | *p_out = best.p; | 697 | *p_out = best.p; |
| 698 | *r2_out = best.r2; | 698 | *r2_out = best.r2; |
| 699 | |||
| 700 | DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n", | ||
| 701 | clock, *p_out, *n2_out, *r2_out); | ||
| 702 | } | 699 | } |
| 703 | 700 | ||
| 704 | bool intel_ddi_pll_mode_set(struct drm_crtc *crtc) | 701 | /* |
| 702 | * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and | ||
| 703 | * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to | ||
| 704 | * steal the selected PLL. You need to call intel_ddi_pll_enable to actually | ||
| 705 | * enable the PLL. | ||
| 706 | */ | ||
| 707 | bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) | ||
| 705 | { | 708 | { |
| 706 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 709 | struct drm_crtc *crtc = &intel_crtc->base; |
| 707 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | 710 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
| 708 | struct drm_encoder *encoder = &intel_encoder->base; | 711 | struct drm_encoder *encoder = &intel_encoder->base; |
| 709 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 712 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
| 710 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | 713 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; |
| 711 | int type = intel_encoder->type; | 714 | int type = intel_encoder->type; |
| 712 | enum pipe pipe = intel_crtc->pipe; | 715 | enum pipe pipe = intel_crtc->pipe; |
| 713 | uint32_t reg, val; | ||
| 714 | int clock = intel_crtc->config.port_clock; | 716 | int clock = intel_crtc->config.port_clock; |
| 715 | 717 | ||
| 716 | /* TODO: reuse PLLs when possible (compare values) */ | ||
| 717 | |||
| 718 | intel_ddi_put_crtc_pll(crtc); | 718 | intel_ddi_put_crtc_pll(crtc); |
| 719 | 719 | ||
| 720 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 720 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
| @@ -736,66 +736,145 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc) | |||
| 736 | return false; | 736 | return false; |
| 737 | } | 737 | } |
| 738 | 738 | ||
| 739 | /* We don't need to turn any PLL on because we'll use LCPLL. */ | ||
| 740 | return true; | ||
| 741 | |||
| 742 | } else if (type == INTEL_OUTPUT_HDMI) { | 739 | } else if (type == INTEL_OUTPUT_HDMI) { |
| 740 | uint32_t reg, val; | ||
| 743 | unsigned p, n2, r2; | 741 | unsigned p, n2, r2; |
| 744 | 742 | ||
| 745 | if (plls->wrpll1_refcount == 0) { | 743 | intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); |
| 744 | |||
| 745 | val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | | ||
| 746 | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | | ||
| 747 | WRPLL_DIVIDER_POST(p); | ||
| 748 | |||
| 749 | if (val == I915_READ(WRPLL_CTL1)) { | ||
| 750 | DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n", | ||
| 751 | pipe_name(pipe)); | ||
| 752 | reg = WRPLL_CTL1; | ||
| 753 | } else if (val == I915_READ(WRPLL_CTL2)) { | ||
| 754 | DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n", | ||
| 755 | pipe_name(pipe)); | ||
| 756 | reg = WRPLL_CTL2; | ||
| 757 | } else if (plls->wrpll1_refcount == 0) { | ||
| 746 | DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", | 758 | DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", |
| 747 | pipe_name(pipe)); | 759 | pipe_name(pipe)); |
| 748 | plls->wrpll1_refcount++; | ||
| 749 | reg = WRPLL_CTL1; | 760 | reg = WRPLL_CTL1; |
| 750 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; | ||
| 751 | } else if (plls->wrpll2_refcount == 0) { | 761 | } else if (plls->wrpll2_refcount == 0) { |
| 752 | DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", | 762 | DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", |
| 753 | pipe_name(pipe)); | 763 | pipe_name(pipe)); |
| 754 | plls->wrpll2_refcount++; | ||
| 755 | reg = WRPLL_CTL2; | 764 | reg = WRPLL_CTL2; |
| 756 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; | ||
| 757 | } else { | 765 | } else { |
| 758 | DRM_ERROR("No WRPLLs available!\n"); | 766 | DRM_ERROR("No WRPLLs available!\n"); |
| 759 | return false; | 767 | return false; |
| 760 | } | 768 | } |
| 761 | 769 | ||
| 762 | WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, | 770 | DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", |
| 763 | "WRPLL already enabled\n"); | 771 | clock, p, n2, r2); |
| 764 | 772 | ||
| 765 | intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); | 773 | if (reg == WRPLL_CTL1) { |
| 766 | 774 | plls->wrpll1_refcount++; | |
| 767 | val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | | 775 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; |
| 768 | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | | 776 | } else { |
| 769 | WRPLL_DIVIDER_POST(p); | 777 | plls->wrpll2_refcount++; |
| 778 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; | ||
| 779 | } | ||
| 770 | 780 | ||
| 771 | } else if (type == INTEL_OUTPUT_ANALOG) { | 781 | } else if (type == INTEL_OUTPUT_ANALOG) { |
| 772 | if (plls->spll_refcount == 0) { | 782 | if (plls->spll_refcount == 0) { |
| 773 | DRM_DEBUG_KMS("Using SPLL on pipe %c\n", | 783 | DRM_DEBUG_KMS("Using SPLL on pipe %c\n", |
| 774 | pipe_name(pipe)); | 784 | pipe_name(pipe)); |
| 775 | plls->spll_refcount++; | 785 | plls->spll_refcount++; |
| 776 | reg = SPLL_CTL; | ||
| 777 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; | 786 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; |
| 778 | } else { | 787 | } else { |
| 779 | DRM_ERROR("SPLL already in use\n"); | 788 | DRM_ERROR("SPLL already in use\n"); |
| 780 | return false; | 789 | return false; |
| 781 | } | 790 | } |
| 782 | 791 | ||
| 783 | WARN(I915_READ(reg) & SPLL_PLL_ENABLE, | ||
| 784 | "SPLL already enabled\n"); | ||
| 785 | |||
| 786 | val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; | ||
| 787 | |||
| 788 | } else { | 792 | } else { |
| 789 | WARN(1, "Invalid DDI encoder type %d\n", type); | 793 | WARN(1, "Invalid DDI encoder type %d\n", type); |
| 790 | return false; | 794 | return false; |
| 791 | } | 795 | } |
| 792 | 796 | ||
| 793 | I915_WRITE(reg, val); | ||
| 794 | udelay(20); | ||
| 795 | |||
| 796 | return true; | 797 | return true; |
| 797 | } | 798 | } |
| 798 | 799 | ||
| 800 | /* | ||
| 801 | * To be called after intel_ddi_pll_select(). That one selects the PLL to be | ||
| 802 | * used, this one actually enables the PLL. | ||
| 803 | */ | ||
| 804 | void intel_ddi_pll_enable(struct intel_crtc *crtc) | ||
| 805 | { | ||
| 806 | struct drm_device *dev = crtc->base.dev; | ||
| 807 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 808 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | ||
| 809 | int clock = crtc->config.port_clock; | ||
| 810 | uint32_t reg, cur_val, new_val; | ||
| 811 | int refcount; | ||
| 812 | const char *pll_name; | ||
| 813 | uint32_t enable_bit = (1 << 31); | ||
| 814 | unsigned int p, n2, r2; | ||
| 815 | |||
| 816 | BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE); | ||
| 817 | BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE); | ||
| 818 | |||
| 819 | switch (crtc->ddi_pll_sel) { | ||
| 820 | case PORT_CLK_SEL_LCPLL_2700: | ||
| 821 | case PORT_CLK_SEL_LCPLL_1350: | ||
| 822 | case PORT_CLK_SEL_LCPLL_810: | ||
| 823 | /* | ||
| 824 | * LCPLL should always be enabled at this point of the mode set | ||
| 825 | * sequence, so nothing to do. | ||
| 826 | */ | ||
| 827 | return; | ||
| 828 | |||
| 829 | case PORT_CLK_SEL_SPLL: | ||
| 830 | pll_name = "SPLL"; | ||
| 831 | reg = SPLL_CTL; | ||
| 832 | refcount = plls->spll_refcount; | ||
| 833 | new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | | ||
| 834 | SPLL_PLL_SSC; | ||
| 835 | break; | ||
| 836 | |||
| 837 | case PORT_CLK_SEL_WRPLL1: | ||
| 838 | case PORT_CLK_SEL_WRPLL2: | ||
| 839 | if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) { | ||
| 840 | pll_name = "WRPLL1"; | ||
| 841 | reg = WRPLL_CTL1; | ||
| 842 | refcount = plls->wrpll1_refcount; | ||
| 843 | } else { | ||
| 844 | pll_name = "WRPLL2"; | ||
| 845 | reg = WRPLL_CTL2; | ||
| 846 | refcount = plls->wrpll2_refcount; | ||
| 847 | } | ||
| 848 | |||
| 849 | intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); | ||
| 850 | |||
| 851 | new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | | ||
| 852 | WRPLL_DIVIDER_REFERENCE(r2) | | ||
| 853 | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); | ||
| 854 | |||
| 855 | break; | ||
| 856 | |||
| 857 | case PORT_CLK_SEL_NONE: | ||
| 858 | WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n"); | ||
| 859 | return; | ||
| 860 | default: | ||
| 861 | WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel); | ||
| 862 | return; | ||
| 863 | } | ||
| 864 | |||
| 865 | cur_val = I915_READ(reg); | ||
| 866 | |||
| 867 | WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount); | ||
| 868 | if (refcount == 1) { | ||
| 869 | WARN(cur_val & enable_bit, "%s already enabled\n", pll_name); | ||
| 870 | I915_WRITE(reg, new_val); | ||
| 871 | POSTING_READ(reg); | ||
| 872 | udelay(20); | ||
| 873 | } else { | ||
| 874 | WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name); | ||
| 875 | } | ||
| 876 | } | ||
| 877 | |||
| 799 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) | 878 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) |
| 800 | { | 879 | { |
| 801 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 880 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
| @@ -1121,9 +1200,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
| 1121 | 1200 | ||
| 1122 | if (type == INTEL_OUTPUT_EDP) { | 1201 | if (type == INTEL_OUTPUT_EDP) { |
| 1123 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1202 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1124 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1125 | ironlake_edp_panel_on(intel_dp); | 1203 | ironlake_edp_panel_on(intel_dp); |
| 1126 | ironlake_edp_panel_vdd_off(intel_dp, true); | ||
| 1127 | } | 1204 | } |
| 1128 | 1205 | ||
| 1129 | WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); | 1206 | WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); |
| @@ -1166,7 +1243,6 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
| 1166 | 1243 | ||
| 1167 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
| 1168 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1169 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1170 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1171 | ironlake_edp_panel_off(intel_dp); | 1247 | ironlake_edp_panel_off(intel_dp); |
| 1172 | } | 1248 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2bde35d34eb9..9fa24347963a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -90,8 +90,8 @@ intel_fdi_link_freq(struct drm_device *dev) | |||
| 90 | 90 | ||
| 91 | static const intel_limit_t intel_limits_i8xx_dac = { | 91 | static const intel_limit_t intel_limits_i8xx_dac = { |
| 92 | .dot = { .min = 25000, .max = 350000 }, | 92 | .dot = { .min = 25000, .max = 350000 }, |
| 93 | .vco = { .min = 930000, .max = 1400000 }, | 93 | .vco = { .min = 908000, .max = 1512000 }, |
| 94 | .n = { .min = 3, .max = 16 }, | 94 | .n = { .min = 2, .max = 16 }, |
| 95 | .m = { .min = 96, .max = 140 }, | 95 | .m = { .min = 96, .max = 140 }, |
| 96 | .m1 = { .min = 18, .max = 26 }, | 96 | .m1 = { .min = 18, .max = 26 }, |
| 97 | .m2 = { .min = 6, .max = 16 }, | 97 | .m2 = { .min = 6, .max = 16 }, |
| @@ -103,8 +103,8 @@ static const intel_limit_t intel_limits_i8xx_dac = { | |||
| 103 | 103 | ||
| 104 | static const intel_limit_t intel_limits_i8xx_dvo = { | 104 | static const intel_limit_t intel_limits_i8xx_dvo = { |
| 105 | .dot = { .min = 25000, .max = 350000 }, | 105 | .dot = { .min = 25000, .max = 350000 }, |
| 106 | .vco = { .min = 930000, .max = 1400000 }, | 106 | .vco = { .min = 908000, .max = 1512000 }, |
| 107 | .n = { .min = 3, .max = 16 }, | 107 | .n = { .min = 2, .max = 16 }, |
| 108 | .m = { .min = 96, .max = 140 }, | 108 | .m = { .min = 96, .max = 140 }, |
| 109 | .m1 = { .min = 18, .max = 26 }, | 109 | .m1 = { .min = 18, .max = 26 }, |
| 110 | .m2 = { .min = 6, .max = 16 }, | 110 | .m2 = { .min = 6, .max = 16 }, |
| @@ -116,8 +116,8 @@ static const intel_limit_t intel_limits_i8xx_dvo = { | |||
| 116 | 116 | ||
| 117 | static const intel_limit_t intel_limits_i8xx_lvds = { | 117 | static const intel_limit_t intel_limits_i8xx_lvds = { |
| 118 | .dot = { .min = 25000, .max = 350000 }, | 118 | .dot = { .min = 25000, .max = 350000 }, |
| 119 | .vco = { .min = 930000, .max = 1400000 }, | 119 | .vco = { .min = 908000, .max = 1512000 }, |
| 120 | .n = { .min = 3, .max = 16 }, | 120 | .n = { .min = 2, .max = 16 }, |
| 121 | .m = { .min = 96, .max = 140 }, | 121 | .m = { .min = 96, .max = 140 }, |
| 122 | .m1 = { .min = 18, .max = 26 }, | 122 | .m1 = { .min = 18, .max = 26 }, |
| 123 | .m2 = { .min = 6, .max = 16 }, | 123 | .m2 = { .min = 6, .max = 16 }, |
| @@ -329,6 +329,8 @@ static void vlv_clock(int refclk, intel_clock_t *clock) | |||
| 329 | { | 329 | { |
| 330 | clock->m = clock->m1 * clock->m2; | 330 | clock->m = clock->m1 * clock->m2; |
| 331 | clock->p = clock->p1 * clock->p2; | 331 | clock->p = clock->p1 * clock->p2; |
| 332 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | ||
| 333 | return; | ||
| 332 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | 334 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
| 333 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 335 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 334 | } | 336 | } |
| @@ -430,6 +432,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock) | |||
| 430 | { | 432 | { |
| 431 | clock->m = clock->m2 + 2; | 433 | clock->m = clock->m2 + 2; |
| 432 | clock->p = clock->p1 * clock->p2; | 434 | clock->p = clock->p1 * clock->p2; |
| 435 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | ||
| 436 | return; | ||
| 433 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | 437 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
| 434 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 438 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 435 | } | 439 | } |
| @@ -443,6 +447,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock) | |||
| 443 | { | 447 | { |
| 444 | clock->m = i9xx_dpll_compute_m(clock); | 448 | clock->m = i9xx_dpll_compute_m(clock); |
| 445 | clock->p = clock->p1 * clock->p2; | 449 | clock->p = clock->p1 * clock->p2; |
| 450 | if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) | ||
| 451 | return; | ||
| 446 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); | 452 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); |
| 447 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 453 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
| 448 | } | 454 | } |
| @@ -748,10 +754,10 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | |||
| 748 | return intel_crtc->config.cpu_transcoder; | 754 | return intel_crtc->config.cpu_transcoder; |
| 749 | } | 755 | } |
| 750 | 756 | ||
| 751 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) | 757 | static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) |
| 752 | { | 758 | { |
| 753 | struct drm_i915_private *dev_priv = dev->dev_private; | 759 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 754 | u32 frame, frame_reg = PIPEFRAME(pipe); | 760 | u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); |
| 755 | 761 | ||
| 756 | frame = I915_READ(frame_reg); | 762 | frame = I915_READ(frame_reg); |
| 757 | 763 | ||
| @@ -772,8 +778,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
| 772 | struct drm_i915_private *dev_priv = dev->dev_private; | 778 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 773 | int pipestat_reg = PIPESTAT(pipe); | 779 | int pipestat_reg = PIPESTAT(pipe); |
| 774 | 780 | ||
| 775 | if (INTEL_INFO(dev)->gen >= 5) { | 781 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
| 776 | ironlake_wait_for_vblank(dev, pipe); | 782 | g4x_wait_for_vblank(dev, pipe); |
| 777 | return; | 783 | return; |
| 778 | } | 784 | } |
| 779 | 785 | ||
| @@ -1205,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
| 1205 | } | 1211 | } |
| 1206 | } | 1212 | } |
| 1207 | 1213 | ||
| 1208 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | 1214 | static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
| 1209 | { | 1215 | { |
| 1210 | u32 val; | 1216 | u32 val; |
| 1211 | bool enabled; | 1217 | bool enabled; |
| 1212 | 1218 | ||
| 1213 | if (HAS_PCH_LPT(dev_priv->dev)) { | 1219 | WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); |
| 1214 | DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); | ||
| 1215 | return; | ||
| 1216 | } | ||
| 1217 | 1220 | ||
| 1218 | val = I915_READ(PCH_DREF_CONTROL); | 1221 | val = I915_READ(PCH_DREF_CONTROL); |
| 1219 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | 1222 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
| @@ -1361,6 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev) | |||
| 1361 | if (!IS_VALLEYVIEW(dev)) | 1364 | if (!IS_VALLEYVIEW(dev)) |
| 1362 | return; | 1365 | return; |
| 1363 | 1366 | ||
| 1367 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static void intel_reset_dpio(struct drm_device *dev) | ||
| 1371 | { | ||
| 1372 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1373 | |||
| 1374 | if (!IS_VALLEYVIEW(dev)) | ||
| 1375 | return; | ||
| 1376 | |||
| 1377 | /* | ||
| 1378 | * Enable the CRI clock source so we can get at the display and the | ||
| 1379 | * reference clock for VGA hotplug / manual detection. | ||
| 1380 | */ | ||
| 1381 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | ||
| 1382 | DPLL_REFA_CLK_ENABLE_VLV | | ||
| 1383 | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
| 1384 | |||
| 1364 | /* | 1385 | /* |
| 1365 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - | 1386 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - |
| 1366 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. | 1387 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. |
| @@ -1487,25 +1508,35 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
| 1487 | /* Make sure the pipe isn't still relying on us */ | 1508 | /* Make sure the pipe isn't still relying on us */ |
| 1488 | assert_pipe_disabled(dev_priv, pipe); | 1509 | assert_pipe_disabled(dev_priv, pipe); |
| 1489 | 1510 | ||
| 1490 | /* Leave integrated clock source enabled */ | 1511 | /* |
| 1512 | * Leave integrated clock source and reference clock enabled for pipe B. | ||
| 1513 | * The latter is needed for VGA hotplug / manual detection. | ||
| 1514 | */ | ||
| 1491 | if (pipe == PIPE_B) | 1515 | if (pipe == PIPE_B) |
| 1492 | val = DPLL_INTEGRATED_CRI_CLK_VLV; | 1516 | val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; |
| 1493 | I915_WRITE(DPLL(pipe), val); | 1517 | I915_WRITE(DPLL(pipe), val); |
| 1494 | POSTING_READ(DPLL(pipe)); | 1518 | POSTING_READ(DPLL(pipe)); |
| 1495 | } | 1519 | } |
| 1496 | 1520 | ||
| 1497 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) | 1521 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, |
| 1522 | struct intel_digital_port *dport) | ||
| 1498 | { | 1523 | { |
| 1499 | u32 port_mask; | 1524 | u32 port_mask; |
| 1500 | 1525 | ||
| 1501 | if (!port) | 1526 | switch (dport->port) { |
| 1527 | case PORT_B: | ||
| 1502 | port_mask = DPLL_PORTB_READY_MASK; | 1528 | port_mask = DPLL_PORTB_READY_MASK; |
| 1503 | else | 1529 | break; |
| 1530 | case PORT_C: | ||
| 1504 | port_mask = DPLL_PORTC_READY_MASK; | 1531 | port_mask = DPLL_PORTC_READY_MASK; |
| 1532 | break; | ||
| 1533 | default: | ||
| 1534 | BUG(); | ||
| 1535 | } | ||
| 1505 | 1536 | ||
| 1506 | if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000)) | 1537 | if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000)) |
| 1507 | WARN(1, "timed out waiting for port %c ready: 0x%08x\n", | 1538 | WARN(1, "timed out waiting for port %c ready: 0x%08x\n", |
| 1508 | 'B' + port, I915_READ(DPLL(0))); | 1539 | port_name(dport->port), I915_READ(DPLL(0))); |
| 1509 | } | 1540 | } |
| 1510 | 1541 | ||
| 1511 | /** | 1542 | /** |
| @@ -2083,8 +2114,8 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 2083 | fb->pitches[0]); | 2114 | fb->pitches[0]); |
| 2084 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2115 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
| 2085 | if (INTEL_INFO(dev)->gen >= 4) { | 2116 | if (INTEL_INFO(dev)->gen >= 4) { |
| 2086 | I915_MODIFY_DISPBASE(DSPSURF(plane), | 2117 | I915_WRITE(DSPSURF(plane), |
| 2087 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 2118 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| 2088 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | 2119 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
| 2089 | I915_WRITE(DSPLINOFF(plane), linear_offset); | 2120 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
| 2090 | } else | 2121 | } else |
| @@ -2174,8 +2205,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
| 2174 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, | 2205 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, |
| 2175 | fb->pitches[0]); | 2206 | fb->pitches[0]); |
| 2176 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2207 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
| 2177 | I915_MODIFY_DISPBASE(DSPSURF(plane), | 2208 | I915_WRITE(DSPSURF(plane), |
| 2178 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 2209 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| 2179 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 2210 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
| 2180 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); | 2211 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
| 2181 | } else { | 2212 | } else { |
| @@ -2233,7 +2264,12 @@ void intel_display_handle_reset(struct drm_device *dev) | |||
| 2233 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2264 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 2234 | 2265 | ||
| 2235 | mutex_lock(&crtc->mutex); | 2266 | mutex_lock(&crtc->mutex); |
| 2236 | if (intel_crtc->active) | 2267 | /* |
| 2268 | * FIXME: Once we have proper support for primary planes (and | ||
| 2269 | * disabling them without disabling the entire crtc) allow again | ||
| 2270 | * a NULL crtc->fb. | ||
| 2271 | */ | ||
| 2272 | if (intel_crtc->active && crtc->fb) | ||
| 2237 | dev_priv->display.update_plane(crtc, crtc->fb, | 2273 | dev_priv->display.update_plane(crtc, crtc->fb, |
| 2238 | crtc->x, crtc->y); | 2274 | crtc->x, crtc->y); |
| 2239 | mutex_unlock(&crtc->mutex); | 2275 | mutex_unlock(&crtc->mutex); |
| @@ -2350,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 2350 | I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); | 2386 | I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); |
| 2351 | I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); | 2387 | I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); |
| 2352 | } | 2388 | } |
| 2389 | intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; | ||
| 2390 | intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; | ||
| 2353 | } | 2391 | } |
| 2354 | 2392 | ||
| 2355 | ret = dev_priv->display.update_plane(crtc, fb, x, y); | 2393 | ret = dev_priv->display.update_plane(crtc, fb, x, y); |
| @@ -2944,6 +2982,30 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | |||
| 2944 | return pending; | 2982 | return pending; |
| 2945 | } | 2983 | } |
| 2946 | 2984 | ||
| 2985 | bool intel_has_pending_fb_unpin(struct drm_device *dev) | ||
| 2986 | { | ||
| 2987 | struct intel_crtc *crtc; | ||
| 2988 | |||
| 2989 | /* Note that we don't need to be called with mode_config.lock here | ||
| 2990 | * as our list of CRTC objects is static for the lifetime of the | ||
| 2991 | * device and so cannot disappear as we iterate. Similarly, we can | ||
| 2992 | * happily treat the predicates as racy, atomic checks as userspace | ||
| 2993 | * cannot claim and pin a new fb without at least acquring the | ||
| 2994 | * struct_mutex and so serialising with us. | ||
| 2995 | */ | ||
| 2996 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | ||
| 2997 | if (atomic_read(&crtc->unpin_work_count) == 0) | ||
| 2998 | continue; | ||
| 2999 | |||
| 3000 | if (crtc->unpin_work) | ||
| 3001 | intel_wait_for_vblank(dev, crtc->pipe); | ||
| 3002 | |||
| 3003 | return true; | ||
| 3004 | } | ||
| 3005 | |||
| 3006 | return false; | ||
| 3007 | } | ||
| 3008 | |||
| 2947 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 3009 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
| 2948 | { | 3010 | { |
| 2949 | struct drm_device *dev = crtc->dev; | 3011 | struct drm_device *dev = crtc->dev; |
| @@ -3399,9 +3461,8 @@ void hsw_enable_ips(struct intel_crtc *crtc) | |||
| 3399 | mutex_unlock(&dev_priv->rps.hw_lock); | 3461 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 3400 | /* Quoting Art Runyan: "its not safe to expect any particular | 3462 | /* Quoting Art Runyan: "its not safe to expect any particular |
| 3401 | * value in IPS_CTL bit 31 after enabling IPS through the | 3463 | * value in IPS_CTL bit 31 after enabling IPS through the |
| 3402 | * mailbox." Therefore we need to defer waiting on the state | 3464 | * mailbox." Moreover, the mailbox may return a bogus state, |
| 3403 | * change. | 3465 | * so we need to just enable it and continue on. |
| 3404 | * TODO: need to fix this for state checker | ||
| 3405 | */ | 3466 | */ |
| 3406 | } else { | 3467 | } else { |
| 3407 | I915_WRITE(IPS_CTL, IPS_ENABLE); | 3468 | I915_WRITE(IPS_CTL, IPS_ENABLE); |
| @@ -3428,9 +3489,10 @@ void hsw_disable_ips(struct intel_crtc *crtc) | |||
| 3428 | mutex_lock(&dev_priv->rps.hw_lock); | 3489 | mutex_lock(&dev_priv->rps.hw_lock); |
| 3429 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); | 3490 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
| 3430 | mutex_unlock(&dev_priv->rps.hw_lock); | 3491 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 3431 | } else | 3492 | } else { |
| 3432 | I915_WRITE(IPS_CTL, 0); | 3493 | I915_WRITE(IPS_CTL, 0); |
| 3433 | POSTING_READ(IPS_CTL); | 3494 | POSTING_READ(IPS_CTL); |
| 3495 | } | ||
| 3434 | 3496 | ||
| 3435 | /* We need to wait for a vblank before we can disable the plane. */ | 3497 | /* We need to wait for a vblank before we can disable the plane. */ |
| 3436 | intel_wait_for_vblank(dev, crtc->pipe); | 3498 | intel_wait_for_vblank(dev, crtc->pipe); |
| @@ -3465,7 +3527,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
| 3465 | /* Workaround : Do not read or write the pipe palette/gamma data while | 3527 | /* Workaround : Do not read or write the pipe palette/gamma data while |
| 3466 | * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. | 3528 | * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. |
| 3467 | */ | 3529 | */ |
| 3468 | if (intel_crtc->config.ips_enabled && | 3530 | if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled && |
| 3469 | ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == | 3531 | ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == |
| 3470 | GAMMA_MODE_MODE_SPLIT)) { | 3532 | GAMMA_MODE_MODE_SPLIT)) { |
| 3471 | hsw_disable_ips(intel_crtc); | 3533 | hsw_disable_ips(intel_crtc); |
| @@ -3910,6 +3972,174 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) | |||
| 3910 | I915_WRITE(BCLRPAT(crtc->pipe), 0); | 3972 | I915_WRITE(BCLRPAT(crtc->pipe), 0); |
| 3911 | } | 3973 | } |
| 3912 | 3974 | ||
| 3975 | int valleyview_get_vco(struct drm_i915_private *dev_priv) | ||
| 3976 | { | ||
| 3977 | int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; | ||
| 3978 | |||
| 3979 | /* Obtain SKU information */ | ||
| 3980 | mutex_lock(&dev_priv->dpio_lock); | ||
| 3981 | hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & | ||
| 3982 | CCK_FUSE_HPLL_FREQ_MASK; | ||
| 3983 | mutex_unlock(&dev_priv->dpio_lock); | ||
| 3984 | |||
| 3985 | return vco_freq[hpll_freq]; | ||
| 3986 | } | ||
| 3987 | |||
| 3988 | /* Adjust CDclk dividers to allow high res or save power if possible */ | ||
| 3989 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) | ||
| 3990 | { | ||
| 3991 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3992 | u32 val, cmd; | ||
| 3993 | |||
| 3994 | if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ | ||
| 3995 | cmd = 2; | ||
| 3996 | else if (cdclk == 266) | ||
| 3997 | cmd = 1; | ||
| 3998 | else | ||
| 3999 | cmd = 0; | ||
| 4000 | |||
| 4001 | mutex_lock(&dev_priv->rps.hw_lock); | ||
| 4002 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | ||
| 4003 | val &= ~DSPFREQGUAR_MASK; | ||
| 4004 | val |= (cmd << DSPFREQGUAR_SHIFT); | ||
| 4005 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); | ||
| 4006 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & | ||
| 4007 | DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), | ||
| 4008 | 50)) { | ||
| 4009 | DRM_ERROR("timed out waiting for CDclk change\n"); | ||
| 4010 | } | ||
| 4011 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 4012 | |||
| 4013 | if (cdclk == 400) { | ||
| 4014 | u32 divider, vco; | ||
| 4015 | |||
| 4016 | vco = valleyview_get_vco(dev_priv); | ||
| 4017 | divider = ((vco << 1) / cdclk) - 1; | ||
| 4018 | |||
| 4019 | mutex_lock(&dev_priv->dpio_lock); | ||
| 4020 | /* adjust cdclk divider */ | ||
| 4021 | val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); | ||
| 4022 | val &= ~0xf; | ||
| 4023 | val |= divider; | ||
| 4024 | vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); | ||
| 4025 | mutex_unlock(&dev_priv->dpio_lock); | ||
| 4026 | } | ||
| 4027 | |||
| 4028 | mutex_lock(&dev_priv->dpio_lock); | ||
| 4029 | /* adjust self-refresh exit latency value */ | ||
| 4030 | val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); | ||
| 4031 | val &= ~0x7f; | ||
| 4032 | |||
| 4033 | /* | ||
| 4034 | * For high bandwidth configs, we set a higher latency in the bunit | ||
| 4035 | * so that the core display fetch happens in time to avoid underruns. | ||
| 4036 | */ | ||
| 4037 | if (cdclk == 400) | ||
| 4038 | val |= 4500 / 250; /* 4.5 usec */ | ||
| 4039 | else | ||
| 4040 | val |= 3000 / 250; /* 3.0 usec */ | ||
| 4041 | vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); | ||
| 4042 | mutex_unlock(&dev_priv->dpio_lock); | ||
| 4043 | |||
| 4044 | /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ | ||
| 4045 | intel_i2c_reset(dev); | ||
| 4046 | } | ||
| 4047 | |||
| 4048 | static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) | ||
| 4049 | { | ||
| 4050 | int cur_cdclk, vco; | ||
| 4051 | int divider; | ||
| 4052 | |||
| 4053 | vco = valleyview_get_vco(dev_priv); | ||
| 4054 | |||
| 4055 | mutex_lock(&dev_priv->dpio_lock); | ||
| 4056 | divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); | ||
| 4057 | mutex_unlock(&dev_priv->dpio_lock); | ||
| 4058 | |||
| 4059 | divider &= 0xf; | ||
| 4060 | |||
| 4061 | cur_cdclk = (vco << 1) / (divider + 1); | ||
| 4062 | |||
| 4063 | return cur_cdclk; | ||
| 4064 | } | ||
| 4065 | |||
| 4066 | static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | ||
| 4067 | int max_pixclk) | ||
| 4068 | { | ||
| 4069 | int cur_cdclk; | ||
| 4070 | |||
| 4071 | cur_cdclk = valleyview_cur_cdclk(dev_priv); | ||
| 4072 | |||
| 4073 | /* | ||
| 4074 | * Really only a few cases to deal with, as only 4 CDclks are supported: | ||
| 4075 | * 200MHz | ||
| 4076 | * 267MHz | ||
| 4077 | * 320MHz | ||
| 4078 | * 400MHz | ||
| 4079 | * So we check to see whether we're above 90% of the lower bin and | ||
| 4080 | * adjust if needed. | ||
| 4081 | */ | ||
| 4082 | if (max_pixclk > 288000) { | ||
| 4083 | return 400; | ||
| 4084 | } else if (max_pixclk > 240000) { | ||
| 4085 | return 320; | ||
| 4086 | } else | ||
| 4087 | return 266; | ||
| 4088 | /* Looks like the 200MHz CDclk freq doesn't work on some configs */ | ||
| 4089 | } | ||
| 4090 | |||
| 4091 | static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, | ||
| 4092 | unsigned modeset_pipes, | ||
| 4093 | struct intel_crtc_config *pipe_config) | ||
| 4094 | { | ||
| 4095 | struct drm_device *dev = dev_priv->dev; | ||
| 4096 | struct intel_crtc *intel_crtc; | ||
| 4097 | int max_pixclk = 0; | ||
| 4098 | |||
| 4099 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | ||
| 4100 | base.head) { | ||
| 4101 | if (modeset_pipes & (1 << intel_crtc->pipe)) | ||
| 4102 | max_pixclk = max(max_pixclk, | ||
| 4103 | pipe_config->adjusted_mode.crtc_clock); | ||
| 4104 | else if (intel_crtc->base.enabled) | ||
| 4105 | max_pixclk = max(max_pixclk, | ||
| 4106 | intel_crtc->config.adjusted_mode.crtc_clock); | ||
| 4107 | } | ||
| 4108 | |||
| 4109 | return max_pixclk; | ||
| 4110 | } | ||
| 4111 | |||
| 4112 | static void valleyview_modeset_global_pipes(struct drm_device *dev, | ||
| 4113 | unsigned *prepare_pipes, | ||
| 4114 | unsigned modeset_pipes, | ||
| 4115 | struct intel_crtc_config *pipe_config) | ||
| 4116 | { | ||
| 4117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 4118 | struct intel_crtc *intel_crtc; | ||
| 4119 | int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes, | ||
| 4120 | pipe_config); | ||
| 4121 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); | ||
| 4122 | |||
| 4123 | if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) | ||
| 4124 | return; | ||
| 4125 | |||
| 4126 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | ||
| 4127 | base.head) | ||
| 4128 | if (intel_crtc->base.enabled) | ||
| 4129 | *prepare_pipes |= (1 << intel_crtc->pipe); | ||
| 4130 | } | ||
| 4131 | |||
| 4132 | static void valleyview_modeset_global_resources(struct drm_device *dev) | ||
| 4133 | { | ||
| 4134 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 4135 | int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL); | ||
| 4136 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); | ||
| 4137 | int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); | ||
| 4138 | |||
| 4139 | if (req_cdclk != cur_cdclk) | ||
| 4140 | valleyview_set_cdclk(dev, req_cdclk); | ||
| 4141 | } | ||
| 4142 | |||
| 3913 | static void valleyview_crtc_enable(struct drm_crtc *crtc) | 4143 | static void valleyview_crtc_enable(struct drm_crtc *crtc) |
| 3914 | { | 4144 | { |
| 3915 | struct drm_device *dev = crtc->dev; | 4145 | struct drm_device *dev = crtc->dev; |
| @@ -4570,9 +4800,8 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) | |||
| 4570 | refclk = 100000; | 4800 | refclk = 100000; |
| 4571 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 4801 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
| 4572 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 4802 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
| 4573 | refclk = dev_priv->vbt.lvds_ssc_freq * 1000; | 4803 | refclk = dev_priv->vbt.lvds_ssc_freq; |
| 4574 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 4804 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); |
| 4575 | refclk / 1000); | ||
| 4576 | } else if (!IS_GEN2(dev)) { | 4805 | } else if (!IS_GEN2(dev)) { |
| 4577 | refclk = 96000; | 4806 | refclk = 96000; |
| 4578 | } else { | 4807 | } else { |
| @@ -4634,24 +4863,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe | |||
| 4634 | * PLLB opamp always calibrates to max value of 0x3f, force enable it | 4863 | * PLLB opamp always calibrates to max value of 0x3f, force enable it |
| 4635 | * and set it to a reasonable value instead. | 4864 | * and set it to a reasonable value instead. |
| 4636 | */ | 4865 | */ |
| 4637 | reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1)); | 4866 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); |
| 4638 | reg_val &= 0xffffff00; | 4867 | reg_val &= 0xffffff00; |
| 4639 | reg_val |= 0x00000030; | 4868 | reg_val |= 0x00000030; |
| 4640 | vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val); | 4869 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); |
| 4641 | 4870 | ||
| 4642 | reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION); | 4871 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); |
| 4643 | reg_val &= 0x8cffffff; | 4872 | reg_val &= 0x8cffffff; |
| 4644 | reg_val = 0x8c000000; | 4873 | reg_val = 0x8c000000; |
| 4645 | vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val); | 4874 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); |
| 4646 | 4875 | ||
| 4647 | reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1)); | 4876 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); |
| 4648 | reg_val &= 0xffffff00; | 4877 | reg_val &= 0xffffff00; |
| 4649 | vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val); | 4878 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); |
| 4650 | 4879 | ||
| 4651 | reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION); | 4880 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); |
| 4652 | reg_val &= 0x00ffffff; | 4881 | reg_val &= 0x00ffffff; |
| 4653 | reg_val |= 0xb0000000; | 4882 | reg_val |= 0xb0000000; |
| 4654 | vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val); | 4883 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); |
| 4655 | } | 4884 | } |
| 4656 | 4885 | ||
| 4657 | static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, | 4886 | static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, |
| @@ -4720,15 +4949,15 @@ static void vlv_update_pll(struct intel_crtc *crtc) | |||
| 4720 | vlv_pllb_recal_opamp(dev_priv, pipe); | 4949 | vlv_pllb_recal_opamp(dev_priv, pipe); |
| 4721 | 4950 | ||
| 4722 | /* Set up Tx target for periodic Rcomp update */ | 4951 | /* Set up Tx target for periodic Rcomp update */ |
| 4723 | vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f); | 4952 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); |
| 4724 | 4953 | ||
| 4725 | /* Disable target IRef on PLL */ | 4954 | /* Disable target IRef on PLL */ |
| 4726 | reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe)); | 4955 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); |
| 4727 | reg_val &= 0x00ffffff; | 4956 | reg_val &= 0x00ffffff; |
| 4728 | vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val); | 4957 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); |
| 4729 | 4958 | ||
| 4730 | /* Disable fast lock */ | 4959 | /* Disable fast lock */ |
| 4731 | vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610); | 4960 | vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); |
| 4732 | 4961 | ||
| 4733 | /* Set idtafcrecal before PLL is enabled */ | 4962 | /* Set idtafcrecal before PLL is enabled */ |
| 4734 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); | 4963 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
| @@ -4742,50 +4971,54 @@ static void vlv_update_pll(struct intel_crtc *crtc) | |||
| 4742 | * Note: don't use the DAC post divider as it seems unstable. | 4971 | * Note: don't use the DAC post divider as it seems unstable. |
| 4743 | */ | 4972 | */ |
| 4744 | mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); | 4973 | mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); |
| 4745 | vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv); | 4974 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); |
| 4746 | 4975 | ||
| 4747 | mdiv |= DPIO_ENABLE_CALIBRATION; | 4976 | mdiv |= DPIO_ENABLE_CALIBRATION; |
| 4748 | vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv); | 4977 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); |
| 4749 | 4978 | ||
| 4750 | /* Set HBR and RBR LPF coefficients */ | 4979 | /* Set HBR and RBR LPF coefficients */ |
| 4751 | if (crtc->config.port_clock == 162000 || | 4980 | if (crtc->config.port_clock == 162000 || |
| 4752 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || | 4981 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || |
| 4753 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) | 4982 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) |
| 4754 | vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe), | 4983 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), |
| 4755 | 0x009f0003); | 4984 | 0x009f0003); |
| 4756 | else | 4985 | else |
| 4757 | vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe), | 4986 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), |
| 4758 | 0x00d0000f); | 4987 | 0x00d0000f); |
| 4759 | 4988 | ||
| 4760 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || | 4989 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || |
| 4761 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { | 4990 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { |
| 4762 | /* Use SSC source */ | 4991 | /* Use SSC source */ |
| 4763 | if (!pipe) | 4992 | if (!pipe) |
| 4764 | vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), | 4993 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
| 4765 | 0x0df40000); | 4994 | 0x0df40000); |
| 4766 | else | 4995 | else |
| 4767 | vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), | 4996 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
| 4768 | 0x0df70000); | 4997 | 0x0df70000); |
| 4769 | } else { /* HDMI or VGA */ | 4998 | } else { /* HDMI or VGA */ |
| 4770 | /* Use bend source */ | 4999 | /* Use bend source */ |
| 4771 | if (!pipe) | 5000 | if (!pipe) |
| 4772 | vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), | 5001 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
| 4773 | 0x0df70000); | 5002 | 0x0df70000); |
| 4774 | else | 5003 | else |
| 4775 | vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), | 5004 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
| 4776 | 0x0df40000); | 5005 | 0x0df40000); |
| 4777 | } | 5006 | } |
| 4778 | 5007 | ||
| 4779 | coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe)); | 5008 | coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); |
| 4780 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; | 5009 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; |
| 4781 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || | 5010 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || |
| 4782 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) | 5011 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) |
| 4783 | coreclk |= 0x01000000; | 5012 | coreclk |= 0x01000000; |
| 4784 | vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk); | 5013 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); |
| 4785 | 5014 | ||
| 4786 | vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000); | 5015 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); |
| 4787 | 5016 | ||
| 4788 | /* Enable DPIO clock input */ | 5017 | /* |
| 5018 | * Enable DPIO clock input. We should never disable the reference | ||
| 5019 | * clock for pipe B, since VGA hotplug / manual detection depends | ||
| 5020 | * on it. | ||
| 5021 | */ | ||
| 4789 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | | 5022 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | |
| 4790 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; | 5023 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; |
| 4791 | /* We should never disable this, set it here for state tracking */ | 5024 | /* We should never disable this, set it here for state tracking */ |
| @@ -5230,6 +5463,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
| 5230 | struct drm_i915_private *dev_priv = dev->dev_private; | 5463 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5231 | uint32_t tmp; | 5464 | uint32_t tmp; |
| 5232 | 5465 | ||
| 5466 | if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) | ||
| 5467 | return; | ||
| 5468 | |||
| 5233 | tmp = I915_READ(PFIT_CONTROL); | 5469 | tmp = I915_READ(PFIT_CONTROL); |
| 5234 | if (!(tmp & PFIT_ENABLE)) | 5470 | if (!(tmp & PFIT_ENABLE)) |
| 5235 | return; | 5471 | return; |
| @@ -5261,7 +5497,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, | |||
| 5261 | int refclk = 100000; | 5497 | int refclk = 100000; |
| 5262 | 5498 | ||
| 5263 | mutex_lock(&dev_priv->dpio_lock); | 5499 | mutex_lock(&dev_priv->dpio_lock); |
| 5264 | mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe)); | 5500 | mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); |
| 5265 | mutex_unlock(&dev_priv->dpio_lock); | 5501 | mutex_unlock(&dev_priv->dpio_lock); |
| 5266 | 5502 | ||
| 5267 | clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; | 5503 | clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; |
| @@ -5718,9 +5954,9 @@ static int ironlake_get_refclk(struct drm_crtc *crtc) | |||
| 5718 | } | 5954 | } |
| 5719 | 5955 | ||
| 5720 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 5956 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
| 5721 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 5957 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", |
| 5722 | dev_priv->vbt.lvds_ssc_freq); | 5958 | dev_priv->vbt.lvds_ssc_freq); |
| 5723 | return dev_priv->vbt.lvds_ssc_freq * 1000; | 5959 | return dev_priv->vbt.lvds_ssc_freq; |
| 5724 | } | 5960 | } |
| 5725 | 5961 | ||
| 5726 | return 120000; | 5962 | return 120000; |
| @@ -5982,7 +6218,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, | |||
| 5982 | factor = 21; | 6218 | factor = 21; |
| 5983 | if (is_lvds) { | 6219 | if (is_lvds) { |
| 5984 | if ((intel_panel_use_ssc(dev_priv) && | 6220 | if ((intel_panel_use_ssc(dev_priv) && |
| 5985 | dev_priv->vbt.lvds_ssc_freq == 100) || | 6221 | dev_priv->vbt.lvds_ssc_freq == 100000) || |
| 5986 | (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) | 6222 | (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) |
| 5987 | factor = 25; | 6223 | factor = 25; |
| 5988 | } else if (intel_crtc->config.sdvo_tv_clock) | 6224 | } else if (intel_crtc->config.sdvo_tv_clock) |
| @@ -6323,7 +6559,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
| 6323 | 6559 | ||
| 6324 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 6560 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 6325 | val = I915_READ(DEIMR); | 6561 | val = I915_READ(DEIMR); |
| 6326 | WARN((val & ~DE_PCH_EVENT_IVB) != val, | 6562 | WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff, |
| 6327 | "Unexpected DEIMR bits enabled: 0x%x\n", val); | 6563 | "Unexpected DEIMR bits enabled: 0x%x\n", val); |
| 6328 | val = I915_READ(SDEIMR); | 6564 | val = I915_READ(SDEIMR); |
| 6329 | WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, | 6565 | WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, |
| @@ -6402,7 +6638,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
| 6402 | 6638 | ||
| 6403 | /* Make sure we're not on PC8 state before disabling PC8, otherwise | 6639 | /* Make sure we're not on PC8 state before disabling PC8, otherwise |
| 6404 | * we'll hang the machine! */ | 6640 | * we'll hang the machine! */ |
| 6405 | gen6_gt_force_wake_get(dev_priv); | 6641 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 6406 | 6642 | ||
| 6407 | if (val & LCPLL_POWER_DOWN_ALLOW) { | 6643 | if (val & LCPLL_POWER_DOWN_ALLOW) { |
| 6408 | val &= ~LCPLL_POWER_DOWN_ALLOW; | 6644 | val &= ~LCPLL_POWER_DOWN_ALLOW; |
| @@ -6436,7 +6672,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
| 6436 | DRM_ERROR("Switching back to LCPLL failed\n"); | 6672 | DRM_ERROR("Switching back to LCPLL failed\n"); |
| 6437 | } | 6673 | } |
| 6438 | 6674 | ||
| 6439 | gen6_gt_force_wake_put(dev_priv); | 6675 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 6440 | } | 6676 | } |
| 6441 | 6677 | ||
| 6442 | void hsw_enable_pc8_work(struct work_struct *__work) | 6678 | void hsw_enable_pc8_work(struct work_struct *__work) |
| @@ -6447,6 +6683,8 @@ void hsw_enable_pc8_work(struct work_struct *__work) | |||
| 6447 | struct drm_device *dev = dev_priv->dev; | 6683 | struct drm_device *dev = dev_priv->dev; |
| 6448 | uint32_t val; | 6684 | uint32_t val; |
| 6449 | 6685 | ||
| 6686 | WARN_ON(!HAS_PC8(dev)); | ||
| 6687 | |||
| 6450 | if (dev_priv->pc8.enabled) | 6688 | if (dev_priv->pc8.enabled) |
| 6451 | return; | 6689 | return; |
| 6452 | 6690 | ||
| @@ -6463,6 +6701,8 @@ void hsw_enable_pc8_work(struct work_struct *__work) | |||
| 6463 | lpt_disable_clkout_dp(dev); | 6701 | lpt_disable_clkout_dp(dev); |
| 6464 | hsw_pc8_disable_interrupts(dev); | 6702 | hsw_pc8_disable_interrupts(dev); |
| 6465 | hsw_disable_lcpll(dev_priv, true, true); | 6703 | hsw_disable_lcpll(dev_priv, true, true); |
| 6704 | |||
| 6705 | intel_runtime_pm_put(dev_priv); | ||
| 6466 | } | 6706 | } |
| 6467 | 6707 | ||
| 6468 | static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) | 6708 | static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) |
| @@ -6492,12 +6732,16 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) | |||
| 6492 | if (dev_priv->pc8.disable_count != 1) | 6732 | if (dev_priv->pc8.disable_count != 1) |
| 6493 | return; | 6733 | return; |
| 6494 | 6734 | ||
| 6735 | WARN_ON(!HAS_PC8(dev)); | ||
| 6736 | |||
| 6495 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); | 6737 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); |
| 6496 | if (!dev_priv->pc8.enabled) | 6738 | if (!dev_priv->pc8.enabled) |
| 6497 | return; | 6739 | return; |
| 6498 | 6740 | ||
| 6499 | DRM_DEBUG_KMS("Disabling package C8+\n"); | 6741 | DRM_DEBUG_KMS("Disabling package C8+\n"); |
| 6500 | 6742 | ||
| 6743 | intel_runtime_pm_get(dev_priv); | ||
| 6744 | |||
| 6501 | hsw_restore_lcpll(dev_priv); | 6745 | hsw_restore_lcpll(dev_priv); |
| 6502 | hsw_pc8_restore_interrupts(dev); | 6746 | hsw_pc8_restore_interrupts(dev); |
| 6503 | lpt_init_pch_refclk(dev); | 6747 | lpt_init_pch_refclk(dev); |
| @@ -6704,8 +6948,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
| 6704 | int plane = intel_crtc->plane; | 6948 | int plane = intel_crtc->plane; |
| 6705 | int ret; | 6949 | int ret; |
| 6706 | 6950 | ||
| 6707 | if (!intel_ddi_pll_mode_set(crtc)) | 6951 | if (!intel_ddi_pll_select(intel_crtc)) |
| 6708 | return -EINVAL; | 6952 | return -EINVAL; |
| 6953 | intel_ddi_pll_enable(intel_crtc); | ||
| 6709 | 6954 | ||
| 6710 | if (intel_crtc->config.has_dp_encoder) | 6955 | if (intel_crtc->config.has_dp_encoder) |
| 6711 | intel_dp_set_m_n(intel_crtc); | 6956 | intel_dp_set_m_n(intel_crtc); |
| @@ -6796,8 +7041,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
| 6796 | if (intel_display_power_enabled(dev, pfit_domain)) | 7041 | if (intel_display_power_enabled(dev, pfit_domain)) |
| 6797 | ironlake_get_pfit_config(crtc, pipe_config); | 7042 | ironlake_get_pfit_config(crtc, pipe_config); |
| 6798 | 7043 | ||
| 6799 | pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && | 7044 | if (IS_HASWELL(dev)) |
| 6800 | (I915_READ(IPS_CTL) & IPS_ENABLE); | 7045 | pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && |
| 7046 | (I915_READ(IPS_CTL) & IPS_ENABLE); | ||
| 6801 | 7047 | ||
| 6802 | pipe_config->pixel_multiplier = 1; | 7048 | pipe_config->pixel_multiplier = 1; |
| 6803 | 7049 | ||
| @@ -7689,7 +7935,7 @@ static int i9xx_pll_refclk(struct drm_device *dev, | |||
| 7689 | u32 dpll = pipe_config->dpll_hw_state.dpll; | 7935 | u32 dpll = pipe_config->dpll_hw_state.dpll; |
| 7690 | 7936 | ||
| 7691 | if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) | 7937 | if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) |
| 7692 | return dev_priv->vbt.lvds_ssc_freq * 1000; | 7938 | return dev_priv->vbt.lvds_ssc_freq; |
| 7693 | else if (HAS_PCH_SPLIT(dev)) | 7939 | else if (HAS_PCH_SPLIT(dev)) |
| 7694 | return 120000; | 7940 | return 120000; |
| 7695 | else if (!IS_GEN2(dev)) | 7941 | else if (!IS_GEN2(dev)) |
| @@ -7752,12 +7998,17 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | |||
| 7752 | else | 7998 | else |
| 7753 | i9xx_clock(refclk, &clock); | 7999 | i9xx_clock(refclk, &clock); |
| 7754 | } else { | 8000 | } else { |
| 7755 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | 8001 | u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); |
| 8002 | bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); | ||
| 7756 | 8003 | ||
| 7757 | if (is_lvds) { | 8004 | if (is_lvds) { |
| 7758 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> | 8005 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
| 7759 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 8006 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
| 7760 | clock.p2 = 14; | 8007 | |
| 8008 | if (lvds & LVDS_CLKB_POWER_UP) | ||
| 8009 | clock.p2 = 7; | ||
| 8010 | else | ||
| 8011 | clock.p2 = 14; | ||
| 7761 | } else { | 8012 | } else { |
| 7762 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 8013 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
| 7763 | clock.p1 = 2; | 8014 | clock.p1 = 2; |
| @@ -8493,28 +8744,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
| 8493 | .load_lut = intel_crtc_load_lut, | 8744 | .load_lut = intel_crtc_load_lut, |
| 8494 | }; | 8745 | }; |
| 8495 | 8746 | ||
| 8496 | static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, | ||
| 8497 | struct drm_crtc *crtc) | ||
| 8498 | { | ||
| 8499 | struct drm_device *dev; | ||
| 8500 | struct drm_crtc *tmp; | ||
| 8501 | int crtc_mask = 1; | ||
| 8502 | |||
| 8503 | WARN(!crtc, "checking null crtc?\n"); | ||
| 8504 | |||
| 8505 | dev = crtc->dev; | ||
| 8506 | |||
| 8507 | list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { | ||
| 8508 | if (tmp == crtc) | ||
| 8509 | break; | ||
| 8510 | crtc_mask <<= 1; | ||
| 8511 | } | ||
| 8512 | |||
| 8513 | if (encoder->possible_crtcs & crtc_mask) | ||
| 8514 | return true; | ||
| 8515 | return false; | ||
| 8516 | } | ||
| 8517 | |||
| 8518 | /** | 8747 | /** |
| 8519 | * intel_modeset_update_staged_output_state | 8748 | * intel_modeset_update_staged_output_state |
| 8520 | * | 8749 | * |
| @@ -9122,7 +9351,9 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 9122 | PIPE_CONF_CHECK_I(pch_pfit.size); | 9351 | PIPE_CONF_CHECK_I(pch_pfit.size); |
| 9123 | } | 9352 | } |
| 9124 | 9353 | ||
| 9125 | PIPE_CONF_CHECK_I(ips_enabled); | 9354 | /* BDW+ don't expose a synchronous way to read the state */ |
| 9355 | if (IS_HASWELL(dev)) | ||
| 9356 | PIPE_CONF_CHECK_I(ips_enabled); | ||
| 9126 | 9357 | ||
| 9127 | PIPE_CONF_CHECK_I(double_wide); | 9358 | PIPE_CONF_CHECK_I(double_wide); |
| 9128 | 9359 | ||
| @@ -9368,21 +9599,19 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
| 9368 | { | 9599 | { |
| 9369 | struct drm_device *dev = crtc->dev; | 9600 | struct drm_device *dev = crtc->dev; |
| 9370 | drm_i915_private_t *dev_priv = dev->dev_private; | 9601 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 9371 | struct drm_display_mode *saved_mode, *saved_hwmode; | 9602 | struct drm_display_mode *saved_mode; |
| 9372 | struct intel_crtc_config *pipe_config = NULL; | 9603 | struct intel_crtc_config *pipe_config = NULL; |
| 9373 | struct intel_crtc *intel_crtc; | 9604 | struct intel_crtc *intel_crtc; |
| 9374 | unsigned disable_pipes, prepare_pipes, modeset_pipes; | 9605 | unsigned disable_pipes, prepare_pipes, modeset_pipes; |
| 9375 | int ret = 0; | 9606 | int ret = 0; |
| 9376 | 9607 | ||
| 9377 | saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL); | 9608 | saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); |
| 9378 | if (!saved_mode) | 9609 | if (!saved_mode) |
| 9379 | return -ENOMEM; | 9610 | return -ENOMEM; |
| 9380 | saved_hwmode = saved_mode + 1; | ||
| 9381 | 9611 | ||
| 9382 | intel_modeset_affected_pipes(crtc, &modeset_pipes, | 9612 | intel_modeset_affected_pipes(crtc, &modeset_pipes, |
| 9383 | &prepare_pipes, &disable_pipes); | 9613 | &prepare_pipes, &disable_pipes); |
| 9384 | 9614 | ||
| 9385 | *saved_hwmode = crtc->hwmode; | ||
| 9386 | *saved_mode = crtc->mode; | 9615 | *saved_mode = crtc->mode; |
| 9387 | 9616 | ||
| 9388 | /* Hack: Because we don't (yet) support global modeset on multiple | 9617 | /* Hack: Because we don't (yet) support global modeset on multiple |
| @@ -9402,6 +9631,21 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
| 9402 | "[modeset]"); | 9631 | "[modeset]"); |
| 9403 | } | 9632 | } |
| 9404 | 9633 | ||
| 9634 | /* | ||
| 9635 | * See if the config requires any additional preparation, e.g. | ||
| 9636 | * to adjust global state with pipes off. We need to do this | ||
| 9637 | * here so we can get the modeset_pipe updated config for the new | ||
| 9638 | * mode set on this crtc. For other crtcs we need to use the | ||
| 9639 | * adjusted_mode bits in the crtc directly. | ||
| 9640 | */ | ||
| 9641 | if (IS_VALLEYVIEW(dev)) { | ||
| 9642 | valleyview_modeset_global_pipes(dev, &prepare_pipes, | ||
| 9643 | modeset_pipes, pipe_config); | ||
| 9644 | |||
| 9645 | /* may have added more to prepare_pipes than we should */ | ||
| 9646 | prepare_pipes &= ~disable_pipes; | ||
| 9647 | } | ||
| 9648 | |||
| 9405 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) | 9649 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) |
| 9406 | intel_crtc_disable(&intel_crtc->base); | 9650 | intel_crtc_disable(&intel_crtc->base); |
| 9407 | 9651 | ||
| @@ -9418,6 +9662,14 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
| 9418 | /* mode_set/enable/disable functions rely on a correct pipe | 9662 | /* mode_set/enable/disable functions rely on a correct pipe |
| 9419 | * config. */ | 9663 | * config. */ |
| 9420 | to_intel_crtc(crtc)->config = *pipe_config; | 9664 | to_intel_crtc(crtc)->config = *pipe_config; |
| 9665 | |||
| 9666 | /* | ||
| 9667 | * Calculate and store various constants which | ||
| 9668 | * are later needed by vblank and swap-completion | ||
| 9669 | * timestamping. They are derived from true hwmode. | ||
| 9670 | */ | ||
| 9671 | drm_calc_timestamping_constants(crtc, | ||
| 9672 | &pipe_config->adjusted_mode); | ||
| 9421 | } | 9673 | } |
| 9422 | 9674 | ||
| 9423 | /* Only after disabling all output pipelines that will be changed can we | 9675 | /* Only after disabling all output pipelines that will be changed can we |
| @@ -9441,23 +9693,10 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
| 9441 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) | 9693 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) |
| 9442 | dev_priv->display.crtc_enable(&intel_crtc->base); | 9694 | dev_priv->display.crtc_enable(&intel_crtc->base); |
| 9443 | 9695 | ||
| 9444 | if (modeset_pipes) { | ||
| 9445 | /* Store real post-adjustment hardware mode. */ | ||
| 9446 | crtc->hwmode = pipe_config->adjusted_mode; | ||
| 9447 | |||
| 9448 | /* Calculate and store various constants which | ||
| 9449 | * are later needed by vblank and swap-completion | ||
| 9450 | * timestamping. They are derived from true hwmode. | ||
| 9451 | */ | ||
| 9452 | drm_calc_timestamping_constants(crtc); | ||
| 9453 | } | ||
| 9454 | |||
| 9455 | /* FIXME: add subpixel order */ | 9696 | /* FIXME: add subpixel order */ |
| 9456 | done: | 9697 | done: |
| 9457 | if (ret && crtc->enabled) { | 9698 | if (ret && crtc->enabled) |
| 9458 | crtc->hwmode = *saved_hwmode; | ||
| 9459 | crtc->mode = *saved_mode; | 9699 | crtc->mode = *saved_mode; |
| 9460 | } | ||
| 9461 | 9700 | ||
| 9462 | out: | 9701 | out: |
| 9463 | kfree(pipe_config); | 9702 | kfree(pipe_config); |
| @@ -9679,8 +9918,8 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
| 9679 | } | 9918 | } |
| 9680 | 9919 | ||
| 9681 | /* Make sure the new CRTC will work with the encoder */ | 9920 | /* Make sure the new CRTC will work with the encoder */ |
| 9682 | if (!intel_encoder_crtc_ok(&connector->new_encoder->base, | 9921 | if (!drm_encoder_crtc_ok(&connector->new_encoder->base, |
| 9683 | new_crtc)) { | 9922 | new_crtc)) { |
| 9684 | return -EINVAL; | 9923 | return -EINVAL; |
| 9685 | } | 9924 | } |
| 9686 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); | 9925 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); |
| @@ -9694,17 +9933,21 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
| 9694 | /* Check for any encoders that needs to be disabled. */ | 9933 | /* Check for any encoders that needs to be disabled. */ |
| 9695 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9934 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
| 9696 | base.head) { | 9935 | base.head) { |
| 9936 | int num_connectors = 0; | ||
| 9697 | list_for_each_entry(connector, | 9937 | list_for_each_entry(connector, |
| 9698 | &dev->mode_config.connector_list, | 9938 | &dev->mode_config.connector_list, |
| 9699 | base.head) { | 9939 | base.head) { |
| 9700 | if (connector->new_encoder == encoder) { | 9940 | if (connector->new_encoder == encoder) { |
| 9701 | WARN_ON(!connector->new_encoder->new_crtc); | 9941 | WARN_ON(!connector->new_encoder->new_crtc); |
| 9702 | 9942 | num_connectors++; | |
| 9703 | goto next_encoder; | ||
| 9704 | } | 9943 | } |
| 9705 | } | 9944 | } |
| 9706 | encoder->new_crtc = NULL; | 9945 | |
| 9707 | next_encoder: | 9946 | if (num_connectors == 0) |
| 9947 | encoder->new_crtc = NULL; | ||
| 9948 | else if (num_connectors > 1) | ||
| 9949 | return -EINVAL; | ||
| 9950 | |||
| 9708 | /* Only now check for crtc changes so we don't miss encoders | 9951 | /* Only now check for crtc changes so we don't miss encoders |
| 9709 | * that will be disabled. */ | 9952 | * that will be disabled. */ |
| 9710 | if (&encoder->new_crtc->base != encoder->base.crtc) { | 9953 | if (&encoder->new_crtc->base != encoder->base.crtc) { |
| @@ -9775,6 +10018,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set) | |||
| 9775 | 10018 | ||
| 9776 | ret = intel_pipe_set_base(set->crtc, | 10019 | ret = intel_pipe_set_base(set->crtc, |
| 9777 | set->x, set->y, set->fb); | 10020 | set->x, set->y, set->fb); |
| 10021 | /* | ||
| 10022 | * In the fastboot case this may be our only check of the | ||
| 10023 | * state after boot. It would be better to only do it on | ||
| 10024 | * the first update, but we don't have a nice way of doing that | ||
| 10025 | * (and really, set_config isn't used much for high freq page | ||
| 10026 | * flipping, so increasing its cost here shouldn't be a big | ||
| 10027 | * deal). | ||
| 10028 | */ | ||
| 10029 | if (i915_fastboot && ret == 0) | ||
| 10030 | intel_modeset_check_state(set->crtc->dev); | ||
| 9778 | } | 10031 | } |
| 9779 | 10032 | ||
| 9780 | if (ret) { | 10033 | if (ret) { |
| @@ -9835,7 +10088,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, | |||
| 9835 | struct intel_shared_dpll *pll) | 10088 | struct intel_shared_dpll *pll) |
| 9836 | { | 10089 | { |
| 9837 | /* PCH refclock must be enabled first */ | 10090 | /* PCH refclock must be enabled first */ |
| 9838 | assert_pch_refclk_enabled(dev_priv); | 10091 | ibx_assert_pch_refclk_enabled(dev_priv); |
| 9839 | 10092 | ||
| 9840 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); | 10093 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); |
| 9841 | 10094 | ||
| @@ -9903,8 +10156,6 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
| 9903 | dev_priv->num_shared_dpll = 0; | 10156 | dev_priv->num_shared_dpll = 0; |
| 9904 | 10157 | ||
| 9905 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); | 10158 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); |
| 9906 | DRM_DEBUG_KMS("%i shared PLLs initialized\n", | ||
| 9907 | dev_priv->num_shared_dpll); | ||
| 9908 | } | 10159 | } |
| 9909 | 10160 | ||
| 9910 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 10161 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
| @@ -9926,10 +10177,13 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
| 9926 | intel_crtc->lut_b[i] = i; | 10177 | intel_crtc->lut_b[i] = i; |
| 9927 | } | 10178 | } |
| 9928 | 10179 | ||
| 9929 | /* Swap pipes & planes for FBC on pre-965 */ | 10180 | /* |
| 10181 | * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port | ||
| 10182 | * is hooked to plane B. Hence we want plane A feeding pipe B. | ||
| 10183 | */ | ||
| 9930 | intel_crtc->pipe = pipe; | 10184 | intel_crtc->pipe = pipe; |
| 9931 | intel_crtc->plane = pipe; | 10185 | intel_crtc->plane = pipe; |
| 9932 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { | 10186 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { |
| 9933 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 10187 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
| 9934 | intel_crtc->plane = !pipe; | 10188 | intel_crtc->plane = !pipe; |
| 9935 | } | 10189 | } |
| @@ -10018,6 +10272,28 @@ static bool has_edp_a(struct drm_device *dev) | |||
| 10018 | return true; | 10272 | return true; |
| 10019 | } | 10273 | } |
| 10020 | 10274 | ||
| 10275 | const char *intel_output_name(int output) | ||
| 10276 | { | ||
| 10277 | static const char *names[] = { | ||
| 10278 | [INTEL_OUTPUT_UNUSED] = "Unused", | ||
| 10279 | [INTEL_OUTPUT_ANALOG] = "Analog", | ||
| 10280 | [INTEL_OUTPUT_DVO] = "DVO", | ||
| 10281 | [INTEL_OUTPUT_SDVO] = "SDVO", | ||
| 10282 | [INTEL_OUTPUT_LVDS] = "LVDS", | ||
| 10283 | [INTEL_OUTPUT_TVOUT] = "TV", | ||
| 10284 | [INTEL_OUTPUT_HDMI] = "HDMI", | ||
| 10285 | [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", | ||
| 10286 | [INTEL_OUTPUT_EDP] = "eDP", | ||
| 10287 | [INTEL_OUTPUT_DSI] = "DSI", | ||
| 10288 | [INTEL_OUTPUT_UNKNOWN] = "Unknown", | ||
| 10289 | }; | ||
| 10290 | |||
| 10291 | if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) | ||
| 10292 | return "Invalid"; | ||
| 10293 | |||
| 10294 | return names[output]; | ||
| 10295 | } | ||
| 10296 | |||
| 10021 | static void intel_setup_outputs(struct drm_device *dev) | 10297 | static void intel_setup_outputs(struct drm_device *dev) |
| 10022 | { | 10298 | { |
| 10023 | struct drm_i915_private *dev_priv = dev->dev_private; | 10299 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -10412,8 +10688,11 @@ static void intel_init_display(struct drm_device *dev) | |||
| 10412 | } | 10688 | } |
| 10413 | } else if (IS_G4X(dev)) { | 10689 | } else if (IS_G4X(dev)) { |
| 10414 | dev_priv->display.write_eld = g4x_write_eld; | 10690 | dev_priv->display.write_eld = g4x_write_eld; |
| 10415 | } else if (IS_VALLEYVIEW(dev)) | 10691 | } else if (IS_VALLEYVIEW(dev)) { |
| 10692 | dev_priv->display.modeset_global_resources = | ||
| 10693 | valleyview_modeset_global_resources; | ||
| 10416 | dev_priv->display.write_eld = ironlake_write_eld; | 10694 | dev_priv->display.write_eld = ironlake_write_eld; |
| 10695 | } | ||
| 10417 | 10696 | ||
| 10418 | /* Default just returns -ENODEV to indicate unsupported */ | 10697 | /* Default just returns -ENODEV to indicate unsupported */ |
| 10419 | dev_priv->display.queue_flip = intel_default_queue_flip; | 10698 | dev_priv->display.queue_flip = intel_default_queue_flip; |
| @@ -10440,6 +10719,8 @@ static void intel_init_display(struct drm_device *dev) | |||
| 10440 | dev_priv->display.queue_flip = intel_gen7_queue_flip; | 10719 | dev_priv->display.queue_flip = intel_gen7_queue_flip; |
| 10441 | break; | 10720 | break; |
| 10442 | } | 10721 | } |
| 10722 | |||
| 10723 | intel_panel_init_backlight_funcs(dev); | ||
| 10443 | } | 10724 | } |
| 10444 | 10725 | ||
| 10445 | /* | 10726 | /* |
| @@ -10476,17 +10757,6 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
| 10476 | DRM_INFO("applying inverted panel brightness quirk\n"); | 10757 | DRM_INFO("applying inverted panel brightness quirk\n"); |
| 10477 | } | 10758 | } |
| 10478 | 10759 | ||
| 10479 | /* | ||
| 10480 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
| 10481 | * BLM_PCH_PWM_ENABLE is set. | ||
| 10482 | */ | ||
| 10483 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
| 10484 | { | ||
| 10485 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 10486 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
| 10487 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
| 10488 | } | ||
| 10489 | |||
| 10490 | struct intel_quirk { | 10760 | struct intel_quirk { |
| 10491 | int device; | 10761 | int device; |
| 10492 | int subsystem_vendor; | 10762 | int subsystem_vendor; |
| @@ -10555,11 +10825,6 @@ static struct intel_quirk intel_quirks[] = { | |||
| 10555 | 10825 | ||
| 10556 | /* Acer Aspire 4736Z */ | 10826 | /* Acer Aspire 4736Z */ |
| 10557 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 10827 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
| 10558 | |||
| 10559 | /* Dell XPS13 HD Sandy Bridge */ | ||
| 10560 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
| 10561 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
| 10562 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
| 10563 | }; | 10828 | }; |
| 10564 | 10829 | ||
| 10565 | static void intel_init_quirks(struct drm_device *dev) | 10830 | static void intel_init_quirks(struct drm_device *dev) |
| @@ -10603,18 +10868,11 @@ static void i915_disable_vga(struct drm_device *dev) | |||
| 10603 | 10868 | ||
| 10604 | void intel_modeset_init_hw(struct drm_device *dev) | 10869 | void intel_modeset_init_hw(struct drm_device *dev) |
| 10605 | { | 10870 | { |
| 10606 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 10607 | |||
| 10608 | intel_prepare_ddi(dev); | 10871 | intel_prepare_ddi(dev); |
| 10609 | 10872 | ||
| 10610 | intel_init_clock_gating(dev); | 10873 | intel_init_clock_gating(dev); |
| 10611 | 10874 | ||
| 10612 | /* Enable the CRI clock source so we can get at the display */ | 10875 | intel_reset_dpio(dev); |
| 10613 | if (IS_VALLEYVIEW(dev)) | ||
| 10614 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | ||
| 10615 | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
| 10616 | |||
| 10617 | intel_init_dpio(dev); | ||
| 10618 | 10876 | ||
| 10619 | mutex_lock(&dev->struct_mutex); | 10877 | mutex_lock(&dev->struct_mutex); |
| 10620 | intel_enable_gt_powersave(dev); | 10878 | intel_enable_gt_powersave(dev); |
| @@ -10676,6 +10934,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 10676 | } | 10934 | } |
| 10677 | } | 10935 | } |
| 10678 | 10936 | ||
| 10937 | intel_init_dpio(dev); | ||
| 10938 | intel_reset_dpio(dev); | ||
| 10939 | |||
| 10679 | intel_cpu_pll_init(dev); | 10940 | intel_cpu_pll_init(dev); |
| 10680 | intel_shared_dpll_init(dev); | 10941 | intel_shared_dpll_init(dev); |
| 10681 | 10942 | ||
| @@ -10879,7 +11140,7 @@ void i915_redisable_vga(struct drm_device *dev) | |||
| 10879 | * level, just check if the power well is enabled instead of trying to | 11140 | * level, just check if the power well is enabled instead of trying to |
| 10880 | * follow the "don't touch the power well if we don't need it" policy | 11141 | * follow the "don't touch the power well if we don't need it" policy |
| 10881 | * the rest of the driver uses. */ | 11142 | * the rest of the driver uses. */ |
| 10882 | if (HAS_POWER_WELL(dev) && | 11143 | if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && |
| 10883 | (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) | 11144 | (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) |
| 10884 | return; | 11145 | return; |
| 10885 | 11146 | ||
| @@ -11023,7 +11284,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 11023 | pll->on = false; | 11284 | pll->on = false; |
| 11024 | } | 11285 | } |
| 11025 | 11286 | ||
| 11026 | if (IS_HASWELL(dev)) | 11287 | if (HAS_PCH_SPLIT(dev)) |
| 11027 | ilk_wm_get_hw_state(dev); | 11288 | ilk_wm_get_hw_state(dev); |
| 11028 | 11289 | ||
| 11029 | if (force_restore) { | 11290 | if (force_restore) { |
| @@ -11101,12 +11362,11 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 11101 | /* flush any delayed tasks or pending work */ | 11362 | /* flush any delayed tasks or pending work */ |
| 11102 | flush_scheduled_work(); | 11363 | flush_scheduled_work(); |
| 11103 | 11364 | ||
| 11104 | /* destroy backlight, if any, before the connectors */ | 11365 | /* destroy the backlight and sysfs files before encoders/connectors */ |
| 11105 | intel_panel_destroy_backlight(dev); | 11366 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 11106 | 11367 | intel_panel_destroy_backlight(connector); | |
| 11107 | /* destroy the sysfs files before encoders/connectors */ | ||
| 11108 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
| 11109 | drm_sysfs_connector_remove(connector); | 11368 | drm_sysfs_connector_remove(connector); |
| 11369 | } | ||
| 11110 | 11370 | ||
| 11111 | drm_mode_config_cleanup(dev); | 11371 | drm_mode_config_cleanup(dev); |
| 11112 | 11372 | ||
| @@ -11161,6 +11421,7 @@ struct intel_display_error_state { | |||
| 11161 | } cursor[I915_MAX_PIPES]; | 11421 | } cursor[I915_MAX_PIPES]; |
| 11162 | 11422 | ||
| 11163 | struct intel_pipe_error_state { | 11423 | struct intel_pipe_error_state { |
| 11424 | bool power_domain_on; | ||
| 11164 | u32 source; | 11425 | u32 source; |
| 11165 | } pipe[I915_MAX_PIPES]; | 11426 | } pipe[I915_MAX_PIPES]; |
| 11166 | 11427 | ||
| @@ -11175,6 +11436,7 @@ struct intel_display_error_state { | |||
| 11175 | } plane[I915_MAX_PIPES]; | 11436 | } plane[I915_MAX_PIPES]; |
| 11176 | 11437 | ||
| 11177 | struct intel_transcoder_error_state { | 11438 | struct intel_transcoder_error_state { |
| 11439 | bool power_domain_on; | ||
| 11178 | enum transcoder cpu_transcoder; | 11440 | enum transcoder cpu_transcoder; |
| 11179 | 11441 | ||
| 11180 | u32 conf; | 11442 | u32 conf; |
| @@ -11208,11 +11470,13 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
| 11208 | if (error == NULL) | 11470 | if (error == NULL) |
| 11209 | return NULL; | 11471 | return NULL; |
| 11210 | 11472 | ||
| 11211 | if (HAS_POWER_WELL(dev)) | 11473 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 11212 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 11474 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
| 11213 | 11475 | ||
| 11214 | for_each_pipe(i) { | 11476 | for_each_pipe(i) { |
| 11215 | if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i))) | 11477 | error->pipe[i].power_domain_on = |
| 11478 | intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)); | ||
| 11479 | if (!error->pipe[i].power_domain_on) | ||
| 11216 | continue; | 11480 | continue; |
| 11217 | 11481 | ||
| 11218 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 11482 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
| @@ -11248,8 +11512,10 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
| 11248 | for (i = 0; i < error->num_transcoders; i++) { | 11512 | for (i = 0; i < error->num_transcoders; i++) { |
| 11249 | enum transcoder cpu_transcoder = transcoders[i]; | 11513 | enum transcoder cpu_transcoder = transcoders[i]; |
| 11250 | 11514 | ||
| 11251 | if (!intel_display_power_enabled(dev, | 11515 | error->transcoder[i].power_domain_on = |
| 11252 | POWER_DOMAIN_TRANSCODER(cpu_transcoder))) | 11516 | intel_display_power_enabled_sw(dev, |
| 11517 | POWER_DOMAIN_TRANSCODER(cpu_transcoder)); | ||
| 11518 | if (!error->transcoder[i].power_domain_on) | ||
| 11253 | continue; | 11519 | continue; |
| 11254 | 11520 | ||
| 11255 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | 11521 | error->transcoder[i].cpu_transcoder = cpu_transcoder; |
| @@ -11279,11 +11545,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
| 11279 | return; | 11545 | return; |
| 11280 | 11546 | ||
| 11281 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 11547 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
| 11282 | if (HAS_POWER_WELL(dev)) | 11548 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 11283 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 11549 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
| 11284 | error->power_well_driver); | 11550 | error->power_well_driver); |
| 11285 | for_each_pipe(i) { | 11551 | for_each_pipe(i) { |
| 11286 | err_printf(m, "Pipe [%d]:\n", i); | 11552 | err_printf(m, "Pipe [%d]:\n", i); |
| 11553 | err_printf(m, " Power: %s\n", | ||
| 11554 | error->pipe[i].power_domain_on ? "on" : "off"); | ||
| 11287 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 11555 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
| 11288 | 11556 | ||
| 11289 | err_printf(m, "Plane [%d]:\n", i); | 11557 | err_printf(m, "Plane [%d]:\n", i); |
| @@ -11309,6 +11577,8 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
| 11309 | for (i = 0; i < error->num_transcoders; i++) { | 11577 | for (i = 0; i < error->num_transcoders; i++) { |
| 11310 | err_printf(m, "CPU transcoder: %c\n", | 11578 | err_printf(m, "CPU transcoder: %c\n", |
| 11311 | transcoder_name(error->transcoder[i].cpu_transcoder)); | 11579 | transcoder_name(error->transcoder[i].cpu_transcoder)); |
| 11580 | err_printf(m, " Power: %s\n", | ||
| 11581 | error->transcoder[i].power_domain_on ? "on" : "off"); | ||
| 11312 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | 11582 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); |
| 11313 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | 11583 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); |
| 11314 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | 11584 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 30c627c7b7ba..5ede4e8e290d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -142,7 +142,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) | |||
| 142 | return (max_link_clock * max_lanes * 8) / 10; | 142 | return (max_link_clock * max_lanes * 8) / 10; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static int | 145 | static enum drm_mode_status |
| 146 | intel_dp_mode_valid(struct drm_connector *connector, | 146 | intel_dp_mode_valid(struct drm_connector *connector, |
| 147 | struct drm_display_mode *mode) | 147 | struct drm_display_mode *mode) |
| 148 | { | 148 | { |
| @@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
| 404 | int i, ret, recv_bytes; | 404 | int i, ret, recv_bytes; |
| 405 | uint32_t status; | 405 | uint32_t status; |
| 406 | int try, precharge, clock = 0; | 406 | int try, precharge, clock = 0; |
| 407 | bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); | 407 | bool has_aux_irq = true; |
| 408 | uint32_t timeout; | 408 | uint32_t timeout; |
| 409 | 409 | ||
| 410 | /* dp aux is extremely sensitive to irq latency, hence request the | 410 | /* dp aux is extremely sensitive to irq latency, hence request the |
| @@ -542,7 +542,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
| 542 | return -E2BIG; | 542 | return -E2BIG; |
| 543 | 543 | ||
| 544 | intel_dp_check_edp(intel_dp); | 544 | intel_dp_check_edp(intel_dp); |
| 545 | msg[0] = AUX_NATIVE_WRITE << 4; | 545 | msg[0] = DP_AUX_NATIVE_WRITE << 4; |
| 546 | msg[1] = address >> 8; | 546 | msg[1] = address >> 8; |
| 547 | msg[2] = address & 0xff; | 547 | msg[2] = address & 0xff; |
| 548 | msg[3] = send_bytes - 1; | 548 | msg[3] = send_bytes - 1; |
| @@ -552,9 +552,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
| 552 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); | 552 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
| 553 | if (ret < 0) | 553 | if (ret < 0) |
| 554 | return ret; | 554 | return ret; |
| 555 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 555 | ack >>= 4; |
| 556 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
| 556 | break; | 557 | break; |
| 557 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 558 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 558 | udelay(100); | 559 | udelay(100); |
| 559 | else | 560 | else |
| 560 | return -EIO; | 561 | return -EIO; |
| @@ -586,7 +587,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
| 586 | return -E2BIG; | 587 | return -E2BIG; |
| 587 | 588 | ||
| 588 | intel_dp_check_edp(intel_dp); | 589 | intel_dp_check_edp(intel_dp); |
| 589 | msg[0] = AUX_NATIVE_READ << 4; | 590 | msg[0] = DP_AUX_NATIVE_READ << 4; |
| 590 | msg[1] = address >> 8; | 591 | msg[1] = address >> 8; |
| 591 | msg[2] = address & 0xff; | 592 | msg[2] = address & 0xff; |
| 592 | msg[3] = recv_bytes - 1; | 593 | msg[3] = recv_bytes - 1; |
| @@ -601,12 +602,12 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
| 601 | return -EPROTO; | 602 | return -EPROTO; |
| 602 | if (ret < 0) | 603 | if (ret < 0) |
| 603 | return ret; | 604 | return ret; |
| 604 | ack = reply[0]; | 605 | ack = reply[0] >> 4; |
| 605 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { | 606 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { |
| 606 | memcpy(recv, reply + 1, ret - 1); | 607 | memcpy(recv, reply + 1, ret - 1); |
| 607 | return ret - 1; | 608 | return ret - 1; |
| 608 | } | 609 | } |
| 609 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 610 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 610 | udelay(100); | 611 | udelay(100); |
| 611 | else | 612 | else |
| 612 | return -EIO; | 613 | return -EIO; |
| @@ -633,12 +634,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 633 | intel_dp_check_edp(intel_dp); | 634 | intel_dp_check_edp(intel_dp); |
| 634 | /* Set up the command byte */ | 635 | /* Set up the command byte */ |
| 635 | if (mode & MODE_I2C_READ) | 636 | if (mode & MODE_I2C_READ) |
| 636 | msg[0] = AUX_I2C_READ << 4; | 637 | msg[0] = DP_AUX_I2C_READ << 4; |
| 637 | else | 638 | else |
| 638 | msg[0] = AUX_I2C_WRITE << 4; | 639 | msg[0] = DP_AUX_I2C_WRITE << 4; |
| 639 | 640 | ||
| 640 | if (!(mode & MODE_I2C_STOP)) | 641 | if (!(mode & MODE_I2C_STOP)) |
| 641 | msg[0] |= AUX_I2C_MOT << 4; | 642 | msg[0] |= DP_AUX_I2C_MOT << 4; |
| 642 | 643 | ||
| 643 | msg[1] = address >> 8; | 644 | msg[1] = address >> 8; |
| 644 | msg[2] = address; | 645 | msg[2] = address; |
| @@ -675,17 +676,17 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 675 | goto out; | 676 | goto out; |
| 676 | } | 677 | } |
| 677 | 678 | ||
| 678 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { | 679 | switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { |
| 679 | case AUX_NATIVE_REPLY_ACK: | 680 | case DP_AUX_NATIVE_REPLY_ACK: |
| 680 | /* I2C-over-AUX Reply field is only valid | 681 | /* I2C-over-AUX Reply field is only valid |
| 681 | * when paired with AUX ACK. | 682 | * when paired with AUX ACK. |
| 682 | */ | 683 | */ |
| 683 | break; | 684 | break; |
| 684 | case AUX_NATIVE_REPLY_NACK: | 685 | case DP_AUX_NATIVE_REPLY_NACK: |
| 685 | DRM_DEBUG_KMS("aux_ch native nack\n"); | 686 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
| 686 | ret = -EREMOTEIO; | 687 | ret = -EREMOTEIO; |
| 687 | goto out; | 688 | goto out; |
| 688 | case AUX_NATIVE_REPLY_DEFER: | 689 | case DP_AUX_NATIVE_REPLY_DEFER: |
| 689 | /* | 690 | /* |
| 690 | * For now, just give more slack to branch devices. We | 691 | * For now, just give more slack to branch devices. We |
| 691 | * could check the DPCD for I2C bit rate capabilities, | 692 | * could check the DPCD for I2C bit rate capabilities, |
| @@ -706,18 +707,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 706 | goto out; | 707 | goto out; |
| 707 | } | 708 | } |
| 708 | 709 | ||
| 709 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 710 | switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { |
| 710 | case AUX_I2C_REPLY_ACK: | 711 | case DP_AUX_I2C_REPLY_ACK: |
| 711 | if (mode == MODE_I2C_READ) { | 712 | if (mode == MODE_I2C_READ) { |
| 712 | *read_byte = reply[1]; | 713 | *read_byte = reply[1]; |
| 713 | } | 714 | } |
| 714 | ret = reply_bytes - 1; | 715 | ret = reply_bytes - 1; |
| 715 | goto out; | 716 | goto out; |
| 716 | case AUX_I2C_REPLY_NACK: | 717 | case DP_AUX_I2C_REPLY_NACK: |
| 717 | DRM_DEBUG_KMS("aux_i2c nack\n"); | 718 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
| 718 | ret = -EREMOTEIO; | 719 | ret = -EREMOTEIO; |
| 719 | goto out; | 720 | goto out; |
| 720 | case AUX_I2C_REPLY_DEFER: | 721 | case DP_AUX_I2C_REPLY_DEFER: |
| 721 | DRM_DEBUG_KMS("aux_i2c defer\n"); | 722 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
| 722 | udelay(100); | 723 | udelay(100); |
| 723 | break; | 724 | break; |
| @@ -1037,6 +1038,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, | |||
| 1037 | I915_READ(pp_stat_reg), | 1038 | I915_READ(pp_stat_reg), |
| 1038 | I915_READ(pp_ctrl_reg)); | 1039 | I915_READ(pp_ctrl_reg)); |
| 1039 | } | 1040 | } |
| 1041 | |||
| 1042 | DRM_DEBUG_KMS("Wait complete\n"); | ||
| 1040 | } | 1043 | } |
| 1041 | 1044 | ||
| 1042 | static void ironlake_wait_panel_on(struct intel_dp *intel_dp) | 1045 | static void ironlake_wait_panel_on(struct intel_dp *intel_dp) |
| @@ -1092,6 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
| 1092 | if (ironlake_edp_have_panel_vdd(intel_dp)) | 1095 | if (ironlake_edp_have_panel_vdd(intel_dp)) |
| 1093 | return; | 1096 | return; |
| 1094 | 1097 | ||
| 1098 | intel_runtime_pm_get(dev_priv); | ||
| 1099 | |||
| 1095 | DRM_DEBUG_KMS("Turning eDP VDD on\n"); | 1100 | DRM_DEBUG_KMS("Turning eDP VDD on\n"); |
| 1096 | 1101 | ||
| 1097 | if (!ironlake_edp_have_panel_power(intel_dp)) | 1102 | if (!ironlake_edp_have_panel_power(intel_dp)) |
| @@ -1140,7 +1145,11 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) | |||
| 1140 | /* Make sure sequencer is idle before allowing subsequent activity */ | 1145 | /* Make sure sequencer is idle before allowing subsequent activity */ |
| 1141 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", | 1146 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", |
| 1142 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); | 1147 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); |
| 1143 | msleep(intel_dp->panel_power_down_delay); | 1148 | |
| 1149 | if ((pp & POWER_TARGET_ON) == 0) | ||
| 1150 | msleep(intel_dp->panel_power_cycle_delay); | ||
| 1151 | |||
| 1152 | intel_runtime_pm_put(dev_priv); | ||
| 1144 | } | 1153 | } |
| 1145 | } | 1154 | } |
| 1146 | 1155 | ||
| @@ -1233,20 +1242,16 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) | |||
| 1233 | 1242 | ||
| 1234 | DRM_DEBUG_KMS("Turn eDP power off\n"); | 1243 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
| 1235 | 1244 | ||
| 1236 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | ||
| 1237 | |||
| 1238 | pp = ironlake_get_pp_control(intel_dp); | 1245 | pp = ironlake_get_pp_control(intel_dp); |
| 1239 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1246 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
| 1240 | * panels get very unhappy and cease to work. */ | 1247 | * panels get very unhappy and cease to work. */ |
| 1241 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); | 1248 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
| 1242 | 1249 | ||
| 1243 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1250 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
| 1244 | 1251 | ||
| 1245 | I915_WRITE(pp_ctrl_reg, pp); | 1252 | I915_WRITE(pp_ctrl_reg, pp); |
| 1246 | POSTING_READ(pp_ctrl_reg); | 1253 | POSTING_READ(pp_ctrl_reg); |
| 1247 | 1254 | ||
| 1248 | intel_dp->want_panel_vdd = false; | ||
| 1249 | |||
| 1250 | ironlake_wait_panel_off(intel_dp); | 1255 | ironlake_wait_panel_off(intel_dp); |
| 1251 | } | 1256 | } |
| 1252 | 1257 | ||
| @@ -1772,7 +1777,6 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
| 1772 | 1777 | ||
| 1773 | /* Make sure the panel is off before trying to change the mode. But also | 1778 | /* Make sure the panel is off before trying to change the mode. But also |
| 1774 | * ensure that we have vdd while we switch off the panel. */ | 1779 | * ensure that we have vdd while we switch off the panel. */ |
| 1775 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1776 | ironlake_edp_backlight_off(intel_dp); | 1780 | ironlake_edp_backlight_off(intel_dp); |
| 1777 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1781 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1778 | ironlake_edp_panel_off(intel_dp); | 1782 | ironlake_edp_panel_off(intel_dp); |
| @@ -1845,23 +1849,23 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) | |||
| 1845 | struct drm_device *dev = encoder->base.dev; | 1849 | struct drm_device *dev = encoder->base.dev; |
| 1846 | struct drm_i915_private *dev_priv = dev->dev_private; | 1850 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1847 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 1851 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
| 1848 | int port = vlv_dport_to_channel(dport); | 1852 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 1849 | int pipe = intel_crtc->pipe; | 1853 | int pipe = intel_crtc->pipe; |
| 1850 | struct edp_power_seq power_seq; | 1854 | struct edp_power_seq power_seq; |
| 1851 | u32 val; | 1855 | u32 val; |
| 1852 | 1856 | ||
| 1853 | mutex_lock(&dev_priv->dpio_lock); | 1857 | mutex_lock(&dev_priv->dpio_lock); |
| 1854 | 1858 | ||
| 1855 | val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port)); | 1859 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); |
| 1856 | val = 0; | 1860 | val = 0; |
| 1857 | if (pipe) | 1861 | if (pipe) |
| 1858 | val |= (1<<21); | 1862 | val |= (1<<21); |
| 1859 | else | 1863 | else |
| 1860 | val &= ~(1<<21); | 1864 | val &= ~(1<<21); |
| 1861 | val |= 0x001000c4; | 1865 | val |= 0x001000c4; |
| 1862 | vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val); | 1866 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); |
| 1863 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018); | 1867 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); |
| 1864 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888); | 1868 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); |
| 1865 | 1869 | ||
| 1866 | mutex_unlock(&dev_priv->dpio_lock); | 1870 | mutex_unlock(&dev_priv->dpio_lock); |
| 1867 | 1871 | ||
| @@ -1872,7 +1876,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) | |||
| 1872 | 1876 | ||
| 1873 | intel_enable_dp(encoder); | 1877 | intel_enable_dp(encoder); |
| 1874 | 1878 | ||
| 1875 | vlv_wait_port_ready(dev_priv, port); | 1879 | vlv_wait_port_ready(dev_priv, dport); |
| 1876 | } | 1880 | } |
| 1877 | 1881 | ||
| 1878 | static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) | 1882 | static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) |
| @@ -1882,24 +1886,24 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
| 1882 | struct drm_i915_private *dev_priv = dev->dev_private; | 1886 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1883 | struct intel_crtc *intel_crtc = | 1887 | struct intel_crtc *intel_crtc = |
| 1884 | to_intel_crtc(encoder->base.crtc); | 1888 | to_intel_crtc(encoder->base.crtc); |
| 1885 | int port = vlv_dport_to_channel(dport); | 1889 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 1886 | int pipe = intel_crtc->pipe; | 1890 | int pipe = intel_crtc->pipe; |
| 1887 | 1891 | ||
| 1888 | /* Program Tx lane resets to default */ | 1892 | /* Program Tx lane resets to default */ |
| 1889 | mutex_lock(&dev_priv->dpio_lock); | 1893 | mutex_lock(&dev_priv->dpio_lock); |
| 1890 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), | 1894 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), |
| 1891 | DPIO_PCS_TX_LANE2_RESET | | 1895 | DPIO_PCS_TX_LANE2_RESET | |
| 1892 | DPIO_PCS_TX_LANE1_RESET); | 1896 | DPIO_PCS_TX_LANE1_RESET); |
| 1893 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), | 1897 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), |
| 1894 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | | 1898 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | |
| 1895 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | | 1899 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | |
| 1896 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | | 1900 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | |
| 1897 | DPIO_PCS_CLK_SOFT_RESET); | 1901 | DPIO_PCS_CLK_SOFT_RESET); |
| 1898 | 1902 | ||
| 1899 | /* Fix up inter-pair skew failure */ | 1903 | /* Fix up inter-pair skew failure */ |
| 1900 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00); | 1904 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); |
| 1901 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500); | 1905 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); |
| 1902 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000); | 1906 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); |
| 1903 | mutex_unlock(&dev_priv->dpio_lock); | 1907 | mutex_unlock(&dev_priv->dpio_lock); |
| 1904 | } | 1908 | } |
| 1905 | 1909 | ||
| @@ -1941,18 +1945,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ | |||
| 1941 | DP_LINK_STATUS_SIZE); | 1945 | DP_LINK_STATUS_SIZE); |
| 1942 | } | 1946 | } |
| 1943 | 1947 | ||
| 1944 | #if 0 | ||
| 1945 | static char *voltage_names[] = { | ||
| 1946 | "0.4V", "0.6V", "0.8V", "1.2V" | ||
| 1947 | }; | ||
| 1948 | static char *pre_emph_names[] = { | ||
| 1949 | "0dB", "3.5dB", "6dB", "9.5dB" | ||
| 1950 | }; | ||
| 1951 | static char *link_train_names[] = { | ||
| 1952 | "pattern 1", "pattern 2", "idle", "off" | ||
| 1953 | }; | ||
| 1954 | #endif | ||
| 1955 | |||
| 1956 | /* | 1948 | /* |
| 1957 | * These are source-specific values; current Intel hardware supports | 1949 | * These are source-specific values; current Intel hardware supports |
| 1958 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | 1950 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB |
| @@ -2050,7 +2042,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) | |||
| 2050 | unsigned long demph_reg_value, preemph_reg_value, | 2042 | unsigned long demph_reg_value, preemph_reg_value, |
| 2051 | uniqtranscale_reg_value; | 2043 | uniqtranscale_reg_value; |
| 2052 | uint8_t train_set = intel_dp->train_set[0]; | 2044 | uint8_t train_set = intel_dp->train_set[0]; |
| 2053 | int port = vlv_dport_to_channel(dport); | 2045 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 2054 | int pipe = intel_crtc->pipe; | 2046 | int pipe = intel_crtc->pipe; |
| 2055 | 2047 | ||
| 2056 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | 2048 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
| @@ -2127,14 +2119,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) | |||
| 2127 | } | 2119 | } |
| 2128 | 2120 | ||
| 2129 | mutex_lock(&dev_priv->dpio_lock); | 2121 | mutex_lock(&dev_priv->dpio_lock); |
| 2130 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000); | 2122 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); |
| 2131 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value); | 2123 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); |
| 2132 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port), | 2124 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), |
| 2133 | uniqtranscale_reg_value); | 2125 | uniqtranscale_reg_value); |
| 2134 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040); | 2126 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); |
| 2135 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000); | 2127 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); |
| 2136 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); | 2128 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); |
| 2137 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000); | 2129 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); |
| 2138 | mutex_unlock(&dev_priv->dpio_lock); | 2130 | mutex_unlock(&dev_priv->dpio_lock); |
| 2139 | 2131 | ||
| 2140 | return 0; | 2132 | return 0; |
| @@ -2646,7 +2638,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
| 2646 | 2638 | ||
| 2647 | if (cr_tries > 5) { | 2639 | if (cr_tries > 5) { |
| 2648 | DRM_ERROR("failed to train DP, aborting\n"); | 2640 | DRM_ERROR("failed to train DP, aborting\n"); |
| 2649 | intel_dp_link_down(intel_dp); | ||
| 2650 | break; | 2641 | break; |
| 2651 | } | 2642 | } |
| 2652 | 2643 | ||
| @@ -2899,13 +2890,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
| 2899 | 2890 | ||
| 2900 | /* Try to read receiver status if the link appears to be up */ | 2891 | /* Try to read receiver status if the link appears to be up */ |
| 2901 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 2892 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
| 2902 | intel_dp_link_down(intel_dp); | ||
| 2903 | return; | 2893 | return; |
| 2904 | } | 2894 | } |
| 2905 | 2895 | ||
| 2906 | /* Now read the DPCD to see if it's actually running */ | 2896 | /* Now read the DPCD to see if it's actually running */ |
| 2907 | if (!intel_dp_get_dpcd(intel_dp)) { | 2897 | if (!intel_dp_get_dpcd(intel_dp)) { |
| 2908 | intel_dp_link_down(intel_dp); | ||
| 2909 | return; | 2898 | return; |
| 2910 | } | 2899 | } |
| 2911 | 2900 | ||
| @@ -3020,18 +3009,34 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
| 3020 | return status; | 3009 | return status; |
| 3021 | } | 3010 | } |
| 3022 | 3011 | ||
| 3023 | switch (intel_dig_port->port) { | 3012 | if (IS_VALLEYVIEW(dev)) { |
| 3024 | case PORT_B: | 3013 | switch (intel_dig_port->port) { |
| 3025 | bit = PORTB_HOTPLUG_LIVE_STATUS; | 3014 | case PORT_B: |
| 3026 | break; | 3015 | bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; |
| 3027 | case PORT_C: | 3016 | break; |
| 3028 | bit = PORTC_HOTPLUG_LIVE_STATUS; | 3017 | case PORT_C: |
| 3029 | break; | 3018 | bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; |
| 3030 | case PORT_D: | 3019 | break; |
| 3031 | bit = PORTD_HOTPLUG_LIVE_STATUS; | 3020 | case PORT_D: |
| 3032 | break; | 3021 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; |
| 3033 | default: | 3022 | break; |
| 3034 | return connector_status_unknown; | 3023 | default: |
| 3024 | return connector_status_unknown; | ||
| 3025 | } | ||
| 3026 | } else { | ||
| 3027 | switch (intel_dig_port->port) { | ||
| 3028 | case PORT_B: | ||
| 3029 | bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; | ||
| 3030 | break; | ||
| 3031 | case PORT_C: | ||
| 3032 | bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; | ||
| 3033 | break; | ||
| 3034 | case PORT_D: | ||
| 3035 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; | ||
| 3036 | break; | ||
| 3037 | default: | ||
| 3038 | return connector_status_unknown; | ||
| 3039 | } | ||
| 3035 | } | 3040 | } |
| 3036 | 3041 | ||
| 3037 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) | 3042 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) |
| @@ -3082,9 +3087,12 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
| 3082 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3087 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
| 3083 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 3088 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
| 3084 | struct drm_device *dev = connector->dev; | 3089 | struct drm_device *dev = connector->dev; |
| 3090 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3085 | enum drm_connector_status status; | 3091 | enum drm_connector_status status; |
| 3086 | struct edid *edid = NULL; | 3092 | struct edid *edid = NULL; |
| 3087 | 3093 | ||
| 3094 | intel_runtime_pm_get(dev_priv); | ||
| 3095 | |||
| 3088 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 3096 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
| 3089 | connector->base.id, drm_get_connector_name(connector)); | 3097 | connector->base.id, drm_get_connector_name(connector)); |
| 3090 | 3098 | ||
| @@ -3096,7 +3104,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
| 3096 | status = g4x_dp_detect(intel_dp); | 3104 | status = g4x_dp_detect(intel_dp); |
| 3097 | 3105 | ||
| 3098 | if (status != connector_status_connected) | 3106 | if (status != connector_status_connected) |
| 3099 | return status; | 3107 | goto out; |
| 3100 | 3108 | ||
| 3101 | intel_dp_probe_oui(intel_dp); | 3109 | intel_dp_probe_oui(intel_dp); |
| 3102 | 3110 | ||
| @@ -3112,7 +3120,11 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
| 3112 | 3120 | ||
| 3113 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | 3121 | if (intel_encoder->type != INTEL_OUTPUT_EDP) |
| 3114 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 3122 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
| 3115 | return connector_status_connected; | 3123 | status = connector_status_connected; |
| 3124 | |||
| 3125 | out: | ||
| 3126 | intel_runtime_pm_put(dev_priv); | ||
| 3127 | return status; | ||
| 3116 | } | 3128 | } |
| 3117 | 3129 | ||
| 3118 | static int intel_dp_get_modes(struct drm_connector *connector) | 3130 | static int intel_dp_get_modes(struct drm_connector *connector) |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 79f91f26e288..fbfaaba5cc3b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -65,8 +65,8 @@ | |||
| 65 | #define wait_for_atomic_us(COND, US) _wait_for((COND), \ | 65 | #define wait_for_atomic_us(COND, US) _wait_for((COND), \ |
| 66 | DIV_ROUND_UP((US), 1000), 0) | 66 | DIV_ROUND_UP((US), 1000), 0) |
| 67 | 67 | ||
| 68 | #define KHz(x) (1000*x) | 68 | #define KHz(x) (1000 * (x)) |
| 69 | #define MHz(x) KHz(1000*x) | 69 | #define MHz(x) KHz(1000 * (x)) |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| 72 | * Display related stuff | 72 | * Display related stuff |
| @@ -155,7 +155,19 @@ struct intel_encoder { | |||
| 155 | 155 | ||
| 156 | struct intel_panel { | 156 | struct intel_panel { |
| 157 | struct drm_display_mode *fixed_mode; | 157 | struct drm_display_mode *fixed_mode; |
| 158 | struct drm_display_mode *downclock_mode; | ||
| 158 | int fitting_mode; | 159 | int fitting_mode; |
| 160 | |||
| 161 | /* backlight */ | ||
| 162 | struct { | ||
| 163 | bool present; | ||
| 164 | u32 level; | ||
| 165 | u32 max; | ||
| 166 | bool enabled; | ||
| 167 | bool combination_mode; /* gen 2/4 only */ | ||
| 168 | bool active_low_pwm; | ||
| 169 | struct backlight_device *device; | ||
| 170 | } backlight; | ||
| 159 | }; | 171 | }; |
| 160 | 172 | ||
| 161 | struct intel_connector { | 173 | struct intel_connector { |
| @@ -443,7 +455,7 @@ struct intel_hdmi { | |||
| 443 | bool rgb_quant_range_selectable; | 455 | bool rgb_quant_range_selectable; |
| 444 | void (*write_infoframe)(struct drm_encoder *encoder, | 456 | void (*write_infoframe)(struct drm_encoder *encoder, |
| 445 | enum hdmi_infoframe_type type, | 457 | enum hdmi_infoframe_type type, |
| 446 | const uint8_t *frame, ssize_t len); | 458 | const void *frame, ssize_t len); |
| 447 | void (*set_infoframes)(struct drm_encoder *encoder, | 459 | void (*set_infoframes)(struct drm_encoder *encoder, |
| 448 | struct drm_display_mode *adjusted_mode); | 460 | struct drm_display_mode *adjusted_mode); |
| 449 | }; | 461 | }; |
| @@ -490,9 +502,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport) | |||
| 490 | { | 502 | { |
| 491 | switch (dport->port) { | 503 | switch (dport->port) { |
| 492 | case PORT_B: | 504 | case PORT_B: |
| 493 | return 0; | 505 | return DPIO_CH0; |
| 494 | case PORT_C: | 506 | case PORT_C: |
| 495 | return 1; | 507 | return DPIO_CH1; |
| 496 | default: | 508 | default: |
| 497 | BUG(); | 509 | BUG(); |
| 498 | } | 510 | } |
| @@ -601,7 +613,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, | |||
| 601 | void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); | 613 | void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); |
| 602 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); | 614 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); |
| 603 | void intel_ddi_setup_hw_pll_state(struct drm_device *dev); | 615 | void intel_ddi_setup_hw_pll_state(struct drm_device *dev); |
| 604 | bool intel_ddi_pll_mode_set(struct drm_crtc *crtc); | 616 | bool intel_ddi_pll_select(struct intel_crtc *crtc); |
| 617 | void intel_ddi_pll_enable(struct intel_crtc *crtc); | ||
| 605 | void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); | 618 | void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); |
| 606 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); | 619 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); |
| 607 | void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); | 620 | void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); |
| @@ -612,6 +625,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
| 612 | 625 | ||
| 613 | 626 | ||
| 614 | /* intel_display.c */ | 627 | /* intel_display.c */ |
| 628 | const char *intel_output_name(int output); | ||
| 629 | bool intel_has_pending_fb_unpin(struct drm_device *dev); | ||
| 615 | int intel_pch_rawclk(struct drm_device *dev); | 630 | int intel_pch_rawclk(struct drm_device *dev); |
| 616 | void intel_mark_busy(struct drm_device *dev); | 631 | void intel_mark_busy(struct drm_device *dev); |
| 617 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 632 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, |
| @@ -638,7 +653,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | |||
| 638 | void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 653 | void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
| 639 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); | 654 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
| 640 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); | 655 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); |
| 641 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port); | 656 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, |
| 657 | struct intel_digital_port *dport); | ||
| 642 | bool intel_get_load_detect_pipe(struct drm_connector *connector, | 658 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
| 643 | struct drm_display_mode *mode, | 659 | struct drm_display_mode *mode, |
| 644 | struct intel_load_detect_pipe *old); | 660 | struct intel_load_detect_pipe *old); |
| @@ -690,11 +706,10 @@ void | |||
| 690 | ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, | 706 | ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, |
| 691 | int dotclock); | 707 | int dotclock); |
| 692 | bool intel_crtc_active(struct drm_crtc *crtc); | 708 | bool intel_crtc_active(struct drm_crtc *crtc); |
| 693 | void i915_disable_vga_mem(struct drm_device *dev); | ||
| 694 | void hsw_enable_ips(struct intel_crtc *crtc); | 709 | void hsw_enable_ips(struct intel_crtc *crtc); |
| 695 | void hsw_disable_ips(struct intel_crtc *crtc); | 710 | void hsw_disable_ips(struct intel_crtc *crtc); |
| 696 | void intel_display_set_init_power(struct drm_device *dev, bool enable); | 711 | void intel_display_set_init_power(struct drm_device *dev, bool enable); |
| 697 | 712 | int valleyview_get_vco(struct drm_i915_private *dev_priv); | |
| 698 | 713 | ||
| 699 | /* intel_dp.c */ | 714 | /* intel_dp.c */ |
| 700 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); | 715 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); |
| @@ -808,9 +823,13 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
| 808 | int intel_panel_setup_backlight(struct drm_connector *connector); | 823 | int intel_panel_setup_backlight(struct drm_connector *connector); |
| 809 | void intel_panel_enable_backlight(struct intel_connector *connector); | 824 | void intel_panel_enable_backlight(struct intel_connector *connector); |
| 810 | void intel_panel_disable_backlight(struct intel_connector *connector); | 825 | void intel_panel_disable_backlight(struct intel_connector *connector); |
| 811 | void intel_panel_destroy_backlight(struct drm_device *dev); | 826 | void intel_panel_destroy_backlight(struct drm_connector *connector); |
| 827 | void intel_panel_init_backlight_funcs(struct drm_device *dev); | ||
| 812 | enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 828 | enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
| 813 | 829 | extern struct drm_display_mode *intel_find_panel_downclock( | |
| 830 | struct drm_device *dev, | ||
| 831 | struct drm_display_mode *fixed_mode, | ||
| 832 | struct drm_connector *connector); | ||
| 814 | 833 | ||
| 815 | /* intel_pm.c */ | 834 | /* intel_pm.c */ |
| 816 | void intel_init_clock_gating(struct drm_device *dev); | 835 | void intel_init_clock_gating(struct drm_device *dev); |
| @@ -830,6 +849,8 @@ int intel_power_domains_init(struct drm_device *dev); | |||
| 830 | void intel_power_domains_remove(struct drm_device *dev); | 849 | void intel_power_domains_remove(struct drm_device *dev); |
| 831 | bool intel_display_power_enabled(struct drm_device *dev, | 850 | bool intel_display_power_enabled(struct drm_device *dev, |
| 832 | enum intel_display_power_domain domain); | 851 | enum intel_display_power_domain domain); |
| 852 | bool intel_display_power_enabled_sw(struct drm_device *dev, | ||
| 853 | enum intel_display_power_domain domain); | ||
| 833 | void intel_display_power_get(struct drm_device *dev, | 854 | void intel_display_power_get(struct drm_device *dev, |
| 834 | enum intel_display_power_domain domain); | 855 | enum intel_display_power_domain domain); |
| 835 | void intel_display_power_put(struct drm_device *dev, | 856 | void intel_display_power_put(struct drm_device *dev, |
| @@ -844,6 +865,10 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv); | |||
| 844 | void gen6_rps_boost(struct drm_i915_private *dev_priv); | 865 | void gen6_rps_boost(struct drm_i915_private *dev_priv); |
| 845 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | 866 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); |
| 846 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | 867 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); |
| 868 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); | ||
| 869 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); | ||
| 870 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv); | ||
| 871 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); | ||
| 847 | void ilk_wm_get_hw_state(struct drm_device *dev); | 872 | void ilk_wm_get_hw_state(struct drm_device *dev); |
| 848 | 873 | ||
| 849 | 874 | ||
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index d257b093ca68..fabbf0d895cf 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -37,49 +37,18 @@ | |||
| 37 | static const struct intel_dsi_device intel_dsi_devices[] = { | 37 | static const struct intel_dsi_device intel_dsi_devices[] = { |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | 40 | static void band_gap_reset(struct drm_i915_private *dev_priv) | |
| 41 | static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val, | ||
| 42 | u32 mask) | ||
| 43 | { | ||
| 44 | u32 tmp = vlv_cck_read(dev_priv, reg); | ||
| 45 | tmp &= ~mask; | ||
| 46 | tmp |= val; | ||
| 47 | vlv_cck_write(dev_priv, reg, tmp); | ||
| 48 | } | ||
| 49 | |||
| 50 | static void band_gap_wa(struct drm_i915_private *dev_priv) | ||
| 51 | { | 41 | { |
| 52 | mutex_lock(&dev_priv->dpio_lock); | 42 | mutex_lock(&dev_priv->dpio_lock); |
| 53 | 43 | ||
| 54 | /* Enable bandgap fix in GOP driver */ | 44 | vlv_flisdsi_write(dev_priv, 0x08, 0x0001); |
| 55 | vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000); | 45 | vlv_flisdsi_write(dev_priv, 0x0F, 0x0005); |
| 56 | msleep(20); | 46 | vlv_flisdsi_write(dev_priv, 0x0F, 0x0025); |
| 57 | vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000); | 47 | udelay(150); |
| 58 | msleep(20); | 48 | vlv_flisdsi_write(dev_priv, 0x0F, 0x0000); |
| 59 | vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000); | 49 | vlv_flisdsi_write(dev_priv, 0x08, 0x0000); |
| 60 | msleep(20); | ||
| 61 | vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000); | ||
| 62 | msleep(20); | ||
| 63 | vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000); | ||
| 64 | msleep(20); | ||
| 65 | |||
| 66 | /* Turn Display Trunk on */ | ||
| 67 | vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000); | ||
| 68 | msleep(20); | ||
| 69 | |||
| 70 | vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000); | ||
| 71 | msleep(20); | ||
| 72 | |||
| 73 | vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000); | ||
| 74 | msleep(20); | ||
| 75 | vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000); | ||
| 76 | msleep(20); | ||
| 77 | vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000); | ||
| 78 | 50 | ||
| 79 | mutex_unlock(&dev_priv->dpio_lock); | 51 | mutex_unlock(&dev_priv->dpio_lock); |
| 80 | |||
| 81 | /* Need huge delay, otherwise clock is not stable */ | ||
| 82 | msleep(100); | ||
| 83 | } | 52 | } |
| 84 | 53 | ||
| 85 | static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector) | 54 | static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector) |
| @@ -132,14 +101,47 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) | |||
| 132 | vlv_enable_dsi_pll(encoder); | 101 | vlv_enable_dsi_pll(encoder); |
| 133 | } | 102 | } |
| 134 | 103 | ||
| 104 | static void intel_dsi_device_ready(struct intel_encoder *encoder) | ||
| 105 | { | ||
| 106 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
| 107 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
| 108 | int pipe = intel_crtc->pipe; | ||
| 109 | u32 val; | ||
| 110 | |||
| 111 | DRM_DEBUG_KMS("\n"); | ||
| 112 | |||
| 113 | val = I915_READ(MIPI_PORT_CTRL(pipe)); | ||
| 114 | I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); | ||
| 115 | usleep_range(1000, 1500); | ||
| 116 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); | ||
| 117 | usleep_range(2000, 2500); | ||
| 118 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); | ||
| 119 | usleep_range(2000, 2500); | ||
| 120 | I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); | ||
| 121 | usleep_range(2000, 2500); | ||
| 122 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); | ||
| 123 | usleep_range(2000, 2500); | ||
| 124 | } | ||
| 135 | static void intel_dsi_pre_enable(struct intel_encoder *encoder) | 125 | static void intel_dsi_pre_enable(struct intel_encoder *encoder) |
| 136 | { | 126 | { |
| 127 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
| 128 | |||
| 137 | DRM_DEBUG_KMS("\n"); | 129 | DRM_DEBUG_KMS("\n"); |
| 130 | |||
| 131 | if (intel_dsi->dev.dev_ops->panel_reset) | ||
| 132 | intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev); | ||
| 133 | |||
| 134 | /* put device in ready state */ | ||
| 135 | intel_dsi_device_ready(encoder); | ||
| 136 | |||
| 137 | if (intel_dsi->dev.dev_ops->send_otp_cmds) | ||
| 138 | intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev); | ||
| 138 | } | 139 | } |
| 139 | 140 | ||
| 140 | static void intel_dsi_enable(struct intel_encoder *encoder) | 141 | static void intel_dsi_enable(struct intel_encoder *encoder) |
| 141 | { | 142 | { |
| 142 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 143 | struct drm_device *dev = encoder->base.dev; |
| 144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 143 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 145 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
| 144 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 146 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
| 145 | int pipe = intel_crtc->pipe; | 147 | int pipe = intel_crtc->pipe; |
| @@ -147,41 +149,28 @@ static void intel_dsi_enable(struct intel_encoder *encoder) | |||
| 147 | 149 | ||
| 148 | DRM_DEBUG_KMS("\n"); | 150 | DRM_DEBUG_KMS("\n"); |
| 149 | 151 | ||
| 150 | temp = I915_READ(MIPI_DEVICE_READY(pipe)); | ||
| 151 | if ((temp & DEVICE_READY) == 0) { | ||
| 152 | temp &= ~ULPS_STATE_MASK; | ||
| 153 | I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY); | ||
| 154 | } else if (temp & ULPS_STATE_MASK) { | ||
| 155 | temp &= ~ULPS_STATE_MASK; | ||
| 156 | I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT); | ||
| 157 | /* | ||
| 158 | * We need to ensure that there is a minimum of 1 ms time | ||
| 159 | * available before clearing the UPLS exit state. | ||
| 160 | */ | ||
| 161 | msleep(2); | ||
| 162 | I915_WRITE(MIPI_DEVICE_READY(pipe), temp); | ||
| 163 | } | ||
| 164 | |||
| 165 | if (is_cmd_mode(intel_dsi)) | 152 | if (is_cmd_mode(intel_dsi)) |
| 166 | I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); | 153 | I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); |
| 167 | 154 | else { | |
| 168 | if (is_vid_mode(intel_dsi)) { | ||
| 169 | msleep(20); /* XXX */ | 155 | msleep(20); /* XXX */ |
| 170 | dpi_send_cmd(intel_dsi, TURN_ON); | 156 | dpi_send_cmd(intel_dsi, TURN_ON); |
| 171 | msleep(100); | 157 | msleep(100); |
| 172 | 158 | ||
| 173 | /* assert ip_tg_enable signal */ | 159 | /* assert ip_tg_enable signal */ |
| 174 | temp = I915_READ(MIPI_PORT_CTRL(pipe)); | 160 | temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; |
| 161 | temp = temp | intel_dsi->port_bits; | ||
| 175 | I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); | 162 | I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); |
| 176 | POSTING_READ(MIPI_PORT_CTRL(pipe)); | 163 | POSTING_READ(MIPI_PORT_CTRL(pipe)); |
| 177 | } | 164 | } |
| 178 | 165 | ||
| 179 | intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); | 166 | if (intel_dsi->dev.dev_ops->enable) |
| 167 | intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); | ||
| 180 | } | 168 | } |
| 181 | 169 | ||
| 182 | static void intel_dsi_disable(struct intel_encoder *encoder) | 170 | static void intel_dsi_disable(struct intel_encoder *encoder) |
| 183 | { | 171 | { |
| 184 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 172 | struct drm_device *dev = encoder->base.dev; |
| 173 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 185 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 174 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
| 186 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 175 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
| 187 | int pipe = intel_crtc->pipe; | 176 | int pipe = intel_crtc->pipe; |
| @@ -189,8 +178,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder) | |||
| 189 | 178 | ||
| 190 | DRM_DEBUG_KMS("\n"); | 179 | DRM_DEBUG_KMS("\n"); |
| 191 | 180 | ||
| 192 | intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); | ||
| 193 | |||
| 194 | if (is_vid_mode(intel_dsi)) { | 181 | if (is_vid_mode(intel_dsi)) { |
| 195 | dpi_send_cmd(intel_dsi, SHUTDOWN); | 182 | dpi_send_cmd(intel_dsi, SHUTDOWN); |
| 196 | msleep(10); | 183 | msleep(10); |
| @@ -203,20 +190,54 @@ static void intel_dsi_disable(struct intel_encoder *encoder) | |||
| 203 | msleep(2); | 190 | msleep(2); |
| 204 | } | 191 | } |
| 205 | 192 | ||
| 206 | temp = I915_READ(MIPI_DEVICE_READY(pipe)); | 193 | /* if disable packets are sent before sending shutdown packet then in |
| 207 | if (temp & DEVICE_READY) { | 194 | * some next enable sequence send turn on packet error is observed */ |
| 208 | temp &= ~DEVICE_READY; | 195 | if (intel_dsi->dev.dev_ops->disable) |
| 209 | temp &= ~ULPS_STATE_MASK; | 196 | intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); |
| 210 | I915_WRITE(MIPI_DEVICE_READY(pipe), temp); | ||
| 211 | } | ||
| 212 | } | 197 | } |
| 213 | 198 | ||
| 214 | static void intel_dsi_post_disable(struct intel_encoder *encoder) | 199 | static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) |
| 215 | { | 200 | { |
| 201 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
| 202 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
| 203 | int pipe = intel_crtc->pipe; | ||
| 204 | u32 val; | ||
| 205 | |||
| 216 | DRM_DEBUG_KMS("\n"); | 206 | DRM_DEBUG_KMS("\n"); |
| 217 | 207 | ||
| 208 | I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); | ||
| 209 | usleep_range(2000, 2500); | ||
| 210 | |||
| 211 | I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); | ||
| 212 | usleep_range(2000, 2500); | ||
| 213 | |||
| 214 | I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); | ||
| 215 | usleep_range(2000, 2500); | ||
| 216 | |||
| 217 | val = I915_READ(MIPI_PORT_CTRL(pipe)); | ||
| 218 | I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD); | ||
| 219 | usleep_range(1000, 1500); | ||
| 220 | |||
| 221 | if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) | ||
| 222 | == 0x00000), 30)) | ||
| 223 | DRM_ERROR("DSI LP not going Low\n"); | ||
| 224 | |||
| 225 | I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); | ||
| 226 | usleep_range(2000, 2500); | ||
| 227 | |||
| 218 | vlv_disable_dsi_pll(encoder); | 228 | vlv_disable_dsi_pll(encoder); |
| 219 | } | 229 | } |
| 230 | static void intel_dsi_post_disable(struct intel_encoder *encoder) | ||
| 231 | { | ||
| 232 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
| 233 | |||
| 234 | DRM_DEBUG_KMS("\n"); | ||
| 235 | |||
| 236 | intel_dsi_clear_device_ready(encoder); | ||
| 237 | |||
| 238 | if (intel_dsi->dev.dev_ops->disable_panel_power) | ||
| 239 | intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev); | ||
| 240 | } | ||
| 220 | 241 | ||
| 221 | static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | 242 | static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, |
| 222 | enum pipe *pipe) | 243 | enum pipe *pipe) |
| @@ -251,8 +272,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
| 251 | /* XXX: read flags, set to adjusted_mode */ | 272 | /* XXX: read flags, set to adjusted_mode */ |
| 252 | } | 273 | } |
| 253 | 274 | ||
| 254 | static int intel_dsi_mode_valid(struct drm_connector *connector, | 275 | static enum drm_mode_status |
| 255 | struct drm_display_mode *mode) | 276 | intel_dsi_mode_valid(struct drm_connector *connector, |
| 277 | struct drm_display_mode *mode) | ||
| 256 | { | 278 | { |
| 257 | struct intel_connector *intel_connector = to_intel_connector(connector); | 279 | struct intel_connector *intel_connector = to_intel_connector(connector); |
| 258 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 280 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
| @@ -352,11 +374,8 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) | |||
| 352 | 374 | ||
| 353 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); | 375 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); |
| 354 | 376 | ||
| 355 | /* Update the DSI PLL */ | ||
| 356 | vlv_enable_dsi_pll(intel_encoder); | ||
| 357 | |||
| 358 | /* XXX: Location of the call */ | 377 | /* XXX: Location of the call */ |
| 359 | band_gap_wa(dev_priv); | 378 | band_gap_reset(dev_priv); |
| 360 | 379 | ||
| 361 | /* escape clock divider, 20MHz, shared for A and C. device ready must be | 380 | /* escape clock divider, 20MHz, shared for A and C. device ready must be |
| 362 | * off when doing this! txclkesc? */ | 381 | * off when doing this! txclkesc? */ |
| @@ -373,11 +392,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) | |||
| 373 | I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff); | 392 | I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff); |
| 374 | I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff); | 393 | I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff); |
| 375 | 394 | ||
| 376 | I915_WRITE(MIPI_DPHY_PARAM(pipe), | 395 | I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg); |
| 377 | 0x3c << EXIT_ZERO_COUNT_SHIFT | | ||
| 378 | 0x1f << TRAIL_COUNT_SHIFT | | ||
| 379 | 0xc5 << CLK_ZERO_COUNT_SHIFT | | ||
| 380 | 0x1f << PREPARE_COUNT_SHIFT); | ||
| 381 | 396 | ||
| 382 | I915_WRITE(MIPI_DPI_RESOLUTION(pipe), | 397 | I915_WRITE(MIPI_DPI_RESOLUTION(pipe), |
| 383 | adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | | 398 | adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | |
| @@ -425,9 +440,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) | |||
| 425 | adjusted_mode->htotal, | 440 | adjusted_mode->htotal, |
| 426 | bpp, intel_dsi->lane_count) + 1); | 441 | bpp, intel_dsi->lane_count) + 1); |
| 427 | } | 442 | } |
| 428 | I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */ | 443 | I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout); |
| 429 | I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */ | 444 | I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val); |
| 430 | I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */ | 445 | I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val); |
| 431 | 446 | ||
| 432 | /* dphy stuff */ | 447 | /* dphy stuff */ |
| 433 | 448 | ||
| @@ -442,29 +457,31 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) | |||
| 442 | * | 457 | * |
| 443 | * XXX: write MIPI_STOP_STATE_STALL? | 458 | * XXX: write MIPI_STOP_STATE_STALL? |
| 444 | */ | 459 | */ |
| 445 | I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46); | 460 | I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), |
| 461 | intel_dsi->hs_to_lp_count); | ||
| 446 | 462 | ||
| 447 | /* XXX: low power clock equivalence in terms of byte clock. the number | 463 | /* XXX: low power clock equivalence in terms of byte clock. the number |
| 448 | * of byte clocks occupied in one low power clock. based on txbyteclkhs | 464 | * of byte clocks occupied in one low power clock. based on txbyteclkhs |
| 449 | * and txclkesc. txclkesc time / txbyteclk time * (105 + | 465 | * and txclkesc. txclkesc time / txbyteclk time * (105 + |
| 450 | * MIPI_STOP_STATE_STALL) / 105.??? | 466 | * MIPI_STOP_STATE_STALL) / 105.??? |
| 451 | */ | 467 | */ |
| 452 | I915_WRITE(MIPI_LP_BYTECLK(pipe), 4); | 468 | I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk); |
| 453 | 469 | ||
| 454 | /* the bw essential for transmitting 16 long packets containing 252 | 470 | /* the bw essential for transmitting 16 long packets containing 252 |
| 455 | * bytes meant for dcs write memory command is programmed in this | 471 | * bytes meant for dcs write memory command is programmed in this |
| 456 | * register in terms of byte clocks. based on dsi transfer rate and the | 472 | * register in terms of byte clocks. based on dsi transfer rate and the |
| 457 | * number of lanes configured the time taken to transmit 16 long packets | 473 | * number of lanes configured the time taken to transmit 16 long packets |
| 458 | * in a dsi stream varies. */ | 474 | * in a dsi stream varies. */ |
| 459 | I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820); | 475 | I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer); |
| 460 | 476 | ||
| 461 | I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe), | 477 | I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe), |
| 462 | 0xa << LP_HS_SSW_CNT_SHIFT | | 478 | intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | |
| 463 | 0x14 << HS_LP_PWR_SW_CNT_SHIFT); | 479 | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); |
| 464 | 480 | ||
| 465 | if (is_vid_mode(intel_dsi)) | 481 | if (is_vid_mode(intel_dsi)) |
| 466 | I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), | 482 | I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), |
| 467 | intel_dsi->video_mode_format); | 483 | intel_dsi->video_frmt_cfg_bits | |
| 484 | intel_dsi->video_mode_format); | ||
| 468 | } | 485 | } |
| 469 | 486 | ||
| 470 | static enum drm_connector_status | 487 | static enum drm_connector_status |
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index c7765f33d524..b4a27cec882f 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
| @@ -39,6 +39,13 @@ struct intel_dsi_device { | |||
| 39 | struct intel_dsi_dev_ops { | 39 | struct intel_dsi_dev_ops { |
| 40 | bool (*init)(struct intel_dsi_device *dsi); | 40 | bool (*init)(struct intel_dsi_device *dsi); |
| 41 | 41 | ||
| 42 | void (*panel_reset)(struct intel_dsi_device *dsi); | ||
| 43 | |||
| 44 | void (*disable_panel_power)(struct intel_dsi_device *dsi); | ||
| 45 | |||
| 46 | /* one time programmable commands if needed */ | ||
| 47 | void (*send_otp_cmds)(struct intel_dsi_device *dsi); | ||
| 48 | |||
| 42 | /* This callback must be able to assume DSI commands can be sent */ | 49 | /* This callback must be able to assume DSI commands can be sent */ |
| 43 | void (*enable)(struct intel_dsi_device *dsi); | 50 | void (*enable)(struct intel_dsi_device *dsi); |
| 44 | 51 | ||
| @@ -89,6 +96,20 @@ struct intel_dsi { | |||
| 89 | 96 | ||
| 90 | /* eot for MIPI_EOT_DISABLE register */ | 97 | /* eot for MIPI_EOT_DISABLE register */ |
| 91 | u32 eot_disable; | 98 | u32 eot_disable; |
| 99 | |||
| 100 | u32 port_bits; | ||
| 101 | u32 bw_timer; | ||
| 102 | u32 dphy_reg; | ||
| 103 | u32 video_frmt_cfg_bits; | ||
| 104 | u16 lp_byte_clk; | ||
| 105 | |||
| 106 | /* timeouts in byte clocks */ | ||
| 107 | u16 lp_rx_timeout; | ||
| 108 | u16 turn_arnd_val; | ||
| 109 | u16 rst_timer_val; | ||
| 110 | u16 hs_to_lp_count; | ||
| 111 | u16 clk_lp_to_hs_count; | ||
| 112 | u16 clk_hs_to_lp_count; | ||
| 92 | }; | 113 | }; |
| 93 | 114 | ||
| 94 | static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) | 115 | static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 44279b2ade88..ba79ec19da3b 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
| @@ -50,6 +50,8 @@ static const u32 lfsr_converts[] = { | |||
| 50 | 71, 35 /* 91 - 92 */ | 50 | 71, 35 /* 91 - 92 */ |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | #ifdef DSI_CLK_FROM_RR | ||
| 54 | |||
| 53 | static u32 dsi_rr_formula(const struct drm_display_mode *mode, | 55 | static u32 dsi_rr_formula(const struct drm_display_mode *mode, |
| 54 | int pixel_format, int video_mode_format, | 56 | int pixel_format, int video_mode_format, |
| 55 | int lane_count, bool eotp) | 57 | int lane_count, bool eotp) |
| @@ -121,7 +123,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode, | |||
| 121 | 123 | ||
| 122 | /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */ | 124 | /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */ |
| 123 | dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8; | 125 | dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8; |
| 124 | dsi_clk = dsi_bit_clock_hz / (1000 * 1000); | 126 | dsi_clk = dsi_bit_clock_hz / 1000; |
| 125 | 127 | ||
| 126 | if (eotp && video_mode_format == VIDEO_MODE_BURST) | 128 | if (eotp && video_mode_format == VIDEO_MODE_BURST) |
| 127 | dsi_clk *= 2; | 129 | dsi_clk *= 2; |
| @@ -129,64 +131,37 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode, | |||
| 129 | return dsi_clk; | 131 | return dsi_clk; |
| 130 | } | 132 | } |
| 131 | 133 | ||
| 132 | #ifdef MNP_FROM_TABLE | 134 | #else |
| 133 | |||
| 134 | struct dsi_clock_table { | ||
| 135 | u32 freq; | ||
| 136 | u8 m; | ||
| 137 | u8 p; | ||
| 138 | }; | ||
| 139 | |||
| 140 | static const struct dsi_clock_table dsi_clk_tbl[] = { | ||
| 141 | {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6}, | ||
| 142 | {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6}, | ||
| 143 | {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5}, | ||
| 144 | {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5}, | ||
| 145 | {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5}, | ||
| 146 | {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5}, | ||
| 147 | {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5}, | ||
| 148 | {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5}, | ||
| 149 | {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5}, | ||
| 150 | {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4}, | ||
| 151 | {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4}, | ||
| 152 | {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3}, | ||
| 153 | {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3}, | ||
| 154 | {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3}, | ||
| 155 | {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3}, | ||
| 156 | {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3}, | ||
| 157 | {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2}, | ||
| 158 | {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2}, | ||
| 159 | {1000, 80, 2}, /* dsi clock frequency in Mhz*/ | ||
| 160 | }; | ||
| 161 | 135 | ||
| 162 | static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) | 136 | /* Get DSI clock from pixel clock */ |
| 137 | static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode, | ||
| 138 | int pixel_format, int lane_count) | ||
| 163 | { | 139 | { |
| 164 | unsigned int i; | 140 | u32 dsi_clk_khz; |
| 165 | u8 m; | 141 | u32 bpp; |
| 166 | u8 n; | ||
| 167 | u8 p; | ||
| 168 | u32 m_seed; | ||
| 169 | |||
| 170 | if (dsi_clk < 300 || dsi_clk > 1000) | ||
| 171 | return -ECHRNG; | ||
| 172 | 142 | ||
| 173 | for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) { | 143 | switch (pixel_format) { |
| 174 | if (dsi_clk_tbl[i].freq > dsi_clk) | 144 | default: |
| 175 | break; | 145 | case VID_MODE_FORMAT_RGB888: |
| 146 | case VID_MODE_FORMAT_RGB666_LOOSE: | ||
| 147 | bpp = 24; | ||
| 148 | break; | ||
| 149 | case VID_MODE_FORMAT_RGB666: | ||
| 150 | bpp = 18; | ||
| 151 | break; | ||
| 152 | case VID_MODE_FORMAT_RGB565: | ||
| 153 | bpp = 16; | ||
| 154 | break; | ||
| 176 | } | 155 | } |
| 177 | 156 | ||
| 178 | m = dsi_clk_tbl[i].m; | 157 | /* DSI data rate = pixel clock * bits per pixel / lane count |
| 179 | p = dsi_clk_tbl[i].p; | 158 | pixel clock is converted from KHz to Hz */ |
| 180 | m_seed = lfsr_converts[m - 62]; | 159 | dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count); |
| 181 | n = 1; | ||
| 182 | dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2); | ||
| 183 | dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT | | ||
| 184 | m_seed << DSI_PLL_M1_DIV_SHIFT; | ||
| 185 | 160 | ||
| 186 | return 0; | 161 | return dsi_clk_khz; |
| 187 | } | 162 | } |
| 188 | 163 | ||
| 189 | #else | 164 | #endif |
| 190 | 165 | ||
| 191 | static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) | 166 | static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) |
| 192 | { | 167 | { |
| @@ -194,36 +169,47 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) | |||
| 194 | u32 ref_clk; | 169 | u32 ref_clk; |
| 195 | u32 error; | 170 | u32 error; |
| 196 | u32 tmp_error; | 171 | u32 tmp_error; |
| 197 | u32 target_dsi_clk; | 172 | int target_dsi_clk; |
| 198 | u32 calc_dsi_clk; | 173 | int calc_dsi_clk; |
| 199 | u32 calc_m; | 174 | u32 calc_m; |
| 200 | u32 calc_p; | 175 | u32 calc_p; |
| 201 | u32 m_seed; | 176 | u32 m_seed; |
| 202 | 177 | ||
| 203 | if (dsi_clk < 300 || dsi_clk > 1150) { | 178 | /* dsi_clk is expected in KHZ */ |
| 179 | if (dsi_clk < 300000 || dsi_clk > 1150000) { | ||
| 204 | DRM_ERROR("DSI CLK Out of Range\n"); | 180 | DRM_ERROR("DSI CLK Out of Range\n"); |
| 205 | return -ECHRNG; | 181 | return -ECHRNG; |
| 206 | } | 182 | } |
| 207 | 183 | ||
| 208 | ref_clk = 25000; | 184 | ref_clk = 25000; |
| 209 | target_dsi_clk = dsi_clk * 1000; | 185 | target_dsi_clk = dsi_clk; |
| 210 | error = 0xFFFFFFFF; | 186 | error = 0xFFFFFFFF; |
| 187 | tmp_error = 0xFFFFFFFF; | ||
| 211 | calc_m = 0; | 188 | calc_m = 0; |
| 212 | calc_p = 0; | 189 | calc_p = 0; |
| 213 | 190 | ||
| 214 | for (m = 62; m <= 92; m++) { | 191 | for (m = 62; m <= 92; m++) { |
| 215 | for (p = 2; p <= 6; p++) { | 192 | for (p = 2; p <= 6; p++) { |
| 216 | 193 | /* Find the optimal m and p divisors | |
| 194 | with minimal error +/- the required clock */ | ||
| 217 | calc_dsi_clk = (m * ref_clk) / p; | 195 | calc_dsi_clk = (m * ref_clk) / p; |
| 218 | if (calc_dsi_clk >= target_dsi_clk) { | 196 | if (calc_dsi_clk == target_dsi_clk) { |
| 219 | tmp_error = calc_dsi_clk - target_dsi_clk; | 197 | calc_m = m; |
| 220 | if (tmp_error < error) { | 198 | calc_p = p; |
| 221 | error = tmp_error; | 199 | error = 0; |
| 222 | calc_m = m; | 200 | break; |
| 223 | calc_p = p; | 201 | } else |
| 224 | } | 202 | tmp_error = abs(target_dsi_clk - calc_dsi_clk); |
| 203 | |||
| 204 | if (tmp_error < error) { | ||
| 205 | error = tmp_error; | ||
| 206 | calc_m = m; | ||
| 207 | calc_p = p; | ||
| 225 | } | 208 | } |
| 226 | } | 209 | } |
| 210 | |||
| 211 | if (error == 0) | ||
| 212 | break; | ||
| 227 | } | 213 | } |
| 228 | 214 | ||
| 229 | m_seed = lfsr_converts[calc_m - 62]; | 215 | m_seed = lfsr_converts[calc_m - 62]; |
| @@ -235,8 +221,6 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) | |||
| 235 | return 0; | 221 | return 0; |
| 236 | } | 222 | } |
| 237 | 223 | ||
| 238 | #endif | ||
| 239 | |||
| 240 | /* | 224 | /* |
| 241 | * XXX: The muxing and gating is hard coded for now. Need to add support for | 225 | * XXX: The muxing and gating is hard coded for now. Need to add support for |
| 242 | * sharing PLLs with two DSI outputs. | 226 | * sharing PLLs with two DSI outputs. |
| @@ -251,9 +235,8 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder) | |||
| 251 | struct dsi_mnp dsi_mnp; | 235 | struct dsi_mnp dsi_mnp; |
| 252 | u32 dsi_clk; | 236 | u32 dsi_clk; |
| 253 | 237 | ||
| 254 | dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format, | 238 | dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format, |
| 255 | intel_dsi->video_mode_format, | 239 | intel_dsi->lane_count); |
| 256 | intel_dsi->lane_count, !intel_dsi->eot_disable); | ||
| 257 | 240 | ||
| 258 | ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); | 241 | ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); |
| 259 | if (ret) { | 242 | if (ret) { |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 3c7736546856..eeff998e52ef 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
| @@ -234,8 +234,9 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) | |||
| 234 | intel_modeset_check_state(connector->dev); | 234 | intel_modeset_check_state(connector->dev); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 237 | static enum drm_mode_status |
| 238 | struct drm_display_mode *mode) | 238 | intel_dvo_mode_valid(struct drm_connector *connector, |
| 239 | struct drm_display_mode *mode) | ||
| 239 | { | 240 | { |
| 240 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); | 241 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
| 241 | 242 | ||
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 895fcb4fbd94..39eac9937a4a 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -57,18 +57,14 @@ static struct fb_ops intelfb_ops = { | |||
| 57 | .fb_debug_leave = drm_fb_helper_debug_leave, | 57 | .fb_debug_leave = drm_fb_helper_debug_leave, |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | static int intelfb_create(struct drm_fb_helper *helper, | 60 | static int intelfb_alloc(struct drm_fb_helper *helper, |
| 61 | struct drm_fb_helper_surface_size *sizes) | 61 | struct drm_fb_helper_surface_size *sizes) |
| 62 | { | 62 | { |
| 63 | struct intel_fbdev *ifbdev = | 63 | struct intel_fbdev *ifbdev = |
| 64 | container_of(helper, struct intel_fbdev, helper); | 64 | container_of(helper, struct intel_fbdev, helper); |
| 65 | struct drm_device *dev = helper->dev; | 65 | struct drm_device *dev = helper->dev; |
| 66 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 67 | struct fb_info *info; | ||
| 68 | struct drm_framebuffer *fb; | ||
| 69 | struct drm_mode_fb_cmd2 mode_cmd = {}; | 66 | struct drm_mode_fb_cmd2 mode_cmd = {}; |
| 70 | struct drm_i915_gem_object *obj; | 67 | struct drm_i915_gem_object *obj; |
| 71 | struct device *device = &dev->pdev->dev; | ||
| 72 | int size, ret; | 68 | int size, ret; |
| 73 | 69 | ||
| 74 | /* we don't do packed 24bpp */ | 70 | /* we don't do packed 24bpp */ |
| @@ -94,8 +90,6 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
| 94 | goto out; | 90 | goto out; |
| 95 | } | 91 | } |
| 96 | 92 | ||
| 97 | mutex_lock(&dev->struct_mutex); | ||
| 98 | |||
| 99 | /* Flush everything out, we'll be doing GTT only from now on */ | 93 | /* Flush everything out, we'll be doing GTT only from now on */ |
| 100 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); | 94 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); |
| 101 | if (ret) { | 95 | if (ret) { |
| @@ -103,7 +97,50 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
| 103 | goto out_unref; | 97 | goto out_unref; |
| 104 | } | 98 | } |
| 105 | 99 | ||
| 106 | info = framebuffer_alloc(0, device); | 100 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); |
| 101 | if (ret) | ||
| 102 | goto out_unpin; | ||
| 103 | |||
| 104 | return 0; | ||
| 105 | |||
| 106 | out_unpin: | ||
| 107 | i915_gem_object_unpin(obj); | ||
| 108 | out_unref: | ||
| 109 | drm_gem_object_unreference(&obj->base); | ||
| 110 | out: | ||
| 111 | return ret; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int intelfb_create(struct drm_fb_helper *helper, | ||
| 115 | struct drm_fb_helper_surface_size *sizes) | ||
| 116 | { | ||
| 117 | struct intel_fbdev *ifbdev = | ||
| 118 | container_of(helper, struct intel_fbdev, helper); | ||
| 119 | struct intel_framebuffer *intel_fb = &ifbdev->ifb; | ||
| 120 | struct drm_device *dev = helper->dev; | ||
| 121 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 122 | struct fb_info *info; | ||
| 123 | struct drm_framebuffer *fb; | ||
| 124 | struct drm_i915_gem_object *obj; | ||
| 125 | int size, ret; | ||
| 126 | |||
| 127 | mutex_lock(&dev->struct_mutex); | ||
| 128 | |||
| 129 | if (!intel_fb->obj) { | ||
| 130 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); | ||
| 131 | ret = intelfb_alloc(helper, sizes); | ||
| 132 | if (ret) | ||
| 133 | goto out_unlock; | ||
| 134 | } else { | ||
| 135 | DRM_DEBUG_KMS("re-using BIOS fb\n"); | ||
| 136 | sizes->fb_width = intel_fb->base.width; | ||
| 137 | sizes->fb_height = intel_fb->base.height; | ||
| 138 | } | ||
| 139 | |||
| 140 | obj = intel_fb->obj; | ||
| 141 | size = obj->base.size; | ||
| 142 | |||
| 143 | info = framebuffer_alloc(0, &dev->pdev->dev); | ||
| 107 | if (!info) { | 144 | if (!info) { |
| 108 | ret = -ENOMEM; | 145 | ret = -ENOMEM; |
| 109 | goto out_unpin; | 146 | goto out_unpin; |
| @@ -111,10 +148,6 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
| 111 | 148 | ||
| 112 | info->par = helper; | 149 | info->par = helper; |
| 113 | 150 | ||
| 114 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); | ||
| 115 | if (ret) | ||
| 116 | goto out_unpin; | ||
| 117 | |||
| 118 | fb = &ifbdev->ifb.base; | 151 | fb = &ifbdev->ifb.base; |
| 119 | 152 | ||
| 120 | ifbdev->helper.fb = fb; | 153 | ifbdev->helper.fb = fb; |
| @@ -170,17 +203,15 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
| 170 | fb->width, fb->height, | 203 | fb->width, fb->height, |
| 171 | i915_gem_obj_ggtt_offset(obj), obj); | 204 | i915_gem_obj_ggtt_offset(obj), obj); |
| 172 | 205 | ||
| 173 | |||
| 174 | mutex_unlock(&dev->struct_mutex); | 206 | mutex_unlock(&dev->struct_mutex); |
| 175 | vga_switcheroo_client_fb_set(dev->pdev, info); | 207 | vga_switcheroo_client_fb_set(dev->pdev, info); |
| 176 | return 0; | 208 | return 0; |
| 177 | 209 | ||
| 178 | out_unpin: | 210 | out_unpin: |
| 179 | i915_gem_object_unpin(obj); | 211 | i915_gem_object_unpin(obj); |
| 180 | out_unref: | ||
| 181 | drm_gem_object_unreference(&obj->base); | 212 | drm_gem_object_unreference(&obj->base); |
| 213 | out_unlock: | ||
| 182 | mutex_unlock(&dev->struct_mutex); | 214 | mutex_unlock(&dev->struct_mutex); |
| 183 | out: | ||
| 184 | return ret; | 215 | return ret; |
| 185 | } | 216 | } |
| 186 | 217 | ||
| @@ -297,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) | |||
| 297 | fb_set_suspend(info, state); | 328 | fb_set_suspend(info, state); |
| 298 | } | 329 | } |
| 299 | 330 | ||
| 300 | MODULE_LICENSE("GPL and additional rights"); | ||
| 301 | |||
| 302 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 331 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
| 303 | { | 332 | { |
| 304 | struct drm_i915_private *dev_priv = dev->dev_private; | 333 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 03f9ca70530c..6db0d9d17f47 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -130,9 +130,9 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, | |||
| 130 | 130 | ||
| 131 | static void g4x_write_infoframe(struct drm_encoder *encoder, | 131 | static void g4x_write_infoframe(struct drm_encoder *encoder, |
| 132 | enum hdmi_infoframe_type type, | 132 | enum hdmi_infoframe_type type, |
| 133 | const uint8_t *frame, ssize_t len) | 133 | const void *frame, ssize_t len) |
| 134 | { | 134 | { |
| 135 | uint32_t *data = (uint32_t *)frame; | 135 | const uint32_t *data = frame; |
| 136 | struct drm_device *dev = encoder->dev; | 136 | struct drm_device *dev = encoder->dev; |
| 137 | struct drm_i915_private *dev_priv = dev->dev_private; | 137 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 138 | u32 val = I915_READ(VIDEO_DIP_CTL); | 138 | u32 val = I915_READ(VIDEO_DIP_CTL); |
| @@ -167,9 +167,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, | |||
| 167 | 167 | ||
| 168 | static void ibx_write_infoframe(struct drm_encoder *encoder, | 168 | static void ibx_write_infoframe(struct drm_encoder *encoder, |
| 169 | enum hdmi_infoframe_type type, | 169 | enum hdmi_infoframe_type type, |
| 170 | const uint8_t *frame, ssize_t len) | 170 | const void *frame, ssize_t len) |
| 171 | { | 171 | { |
| 172 | uint32_t *data = (uint32_t *)frame; | 172 | const uint32_t *data = frame; |
| 173 | struct drm_device *dev = encoder->dev; | 173 | struct drm_device *dev = encoder->dev; |
| 174 | struct drm_i915_private *dev_priv = dev->dev_private; | 174 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 175 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 175 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| @@ -205,9 +205,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, | |||
| 205 | 205 | ||
| 206 | static void cpt_write_infoframe(struct drm_encoder *encoder, | 206 | static void cpt_write_infoframe(struct drm_encoder *encoder, |
| 207 | enum hdmi_infoframe_type type, | 207 | enum hdmi_infoframe_type type, |
| 208 | const uint8_t *frame, ssize_t len) | 208 | const void *frame, ssize_t len) |
| 209 | { | 209 | { |
| 210 | uint32_t *data = (uint32_t *)frame; | 210 | const uint32_t *data = frame; |
| 211 | struct drm_device *dev = encoder->dev; | 211 | struct drm_device *dev = encoder->dev; |
| 212 | struct drm_i915_private *dev_priv = dev->dev_private; | 212 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 213 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 213 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| @@ -246,9 +246,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, | |||
| 246 | 246 | ||
| 247 | static void vlv_write_infoframe(struct drm_encoder *encoder, | 247 | static void vlv_write_infoframe(struct drm_encoder *encoder, |
| 248 | enum hdmi_infoframe_type type, | 248 | enum hdmi_infoframe_type type, |
| 249 | const uint8_t *frame, ssize_t len) | 249 | const void *frame, ssize_t len) |
| 250 | { | 250 | { |
| 251 | uint32_t *data = (uint32_t *)frame; | 251 | const uint32_t *data = frame; |
| 252 | struct drm_device *dev = encoder->dev; | 252 | struct drm_device *dev = encoder->dev; |
| 253 | struct drm_i915_private *dev_priv = dev->dev_private; | 253 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 254 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 254 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| @@ -284,9 +284,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, | |||
| 284 | 284 | ||
| 285 | static void hsw_write_infoframe(struct drm_encoder *encoder, | 285 | static void hsw_write_infoframe(struct drm_encoder *encoder, |
| 286 | enum hdmi_infoframe_type type, | 286 | enum hdmi_infoframe_type type, |
| 287 | const uint8_t *frame, ssize_t len) | 287 | const void *frame, ssize_t len) |
| 288 | { | 288 | { |
| 289 | uint32_t *data = (uint32_t *)frame; | 289 | const uint32_t *data = frame; |
| 290 | struct drm_device *dev = encoder->dev; | 290 | struct drm_device *dev = encoder->dev; |
| 291 | struct drm_i915_private *dev_priv = dev->dev_private; | 291 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 292 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 292 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| @@ -853,8 +853,9 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | |||
| 853 | return 225000; | 853 | return 225000; |
| 854 | } | 854 | } |
| 855 | 855 | ||
| 856 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 856 | static enum drm_mode_status |
| 857 | struct drm_display_mode *mode) | 857 | intel_hdmi_mode_valid(struct drm_connector *connector, |
| 858 | struct drm_display_mode *mode) | ||
| 858 | { | 859 | { |
| 859 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) | 860 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) |
| 860 | return MODE_CLOCK_HIGH; | 861 | return MODE_CLOCK_HIGH; |
| @@ -1081,7 +1082,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
| 1081 | struct drm_i915_private *dev_priv = dev->dev_private; | 1082 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1082 | struct intel_crtc *intel_crtc = | 1083 | struct intel_crtc *intel_crtc = |
| 1083 | to_intel_crtc(encoder->base.crtc); | 1084 | to_intel_crtc(encoder->base.crtc); |
| 1084 | int port = vlv_dport_to_channel(dport); | 1085 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 1085 | int pipe = intel_crtc->pipe; | 1086 | int pipe = intel_crtc->pipe; |
| 1086 | u32 val; | 1087 | u32 val; |
| 1087 | 1088 | ||
| @@ -1090,41 +1091,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
| 1090 | 1091 | ||
| 1091 | /* Enable clock channels for this port */ | 1092 | /* Enable clock channels for this port */ |
| 1092 | mutex_lock(&dev_priv->dpio_lock); | 1093 | mutex_lock(&dev_priv->dpio_lock); |
| 1093 | val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port)); | 1094 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); |
| 1094 | val = 0; | 1095 | val = 0; |
| 1095 | if (pipe) | 1096 | if (pipe) |
| 1096 | val |= (1<<21); | 1097 | val |= (1<<21); |
| 1097 | else | 1098 | else |
| 1098 | val &= ~(1<<21); | 1099 | val &= ~(1<<21); |
| 1099 | val |= 0x001000c4; | 1100 | val |= 0x001000c4; |
| 1100 | vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val); | 1101 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); |
| 1101 | 1102 | ||
| 1102 | /* HDMI 1.0V-2dB */ | 1103 | /* HDMI 1.0V-2dB */ |
| 1103 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0); | 1104 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); |
| 1104 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), | 1105 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); |
| 1105 | 0x2b245f5f); | 1106 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); |
| 1106 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port), | 1107 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); |
| 1107 | 0x5578b83a); | 1108 | vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); |
| 1108 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), | 1109 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); |
| 1109 | 0x0c782040); | 1110 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); |
| 1110 | vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port), | 1111 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); |
| 1111 | 0x2b247878); | ||
| 1112 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000); | ||
| 1113 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), | ||
| 1114 | 0x00002000); | ||
| 1115 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), | ||
| 1116 | DPIO_TX_OCALINIT_EN); | ||
| 1117 | 1112 | ||
| 1118 | /* Program lane clock */ | 1113 | /* Program lane clock */ |
| 1119 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), | 1114 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); |
| 1120 | 0x00760018); | 1115 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); |
| 1121 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), | ||
| 1122 | 0x00400888); | ||
| 1123 | mutex_unlock(&dev_priv->dpio_lock); | 1116 | mutex_unlock(&dev_priv->dpio_lock); |
| 1124 | 1117 | ||
| 1125 | intel_enable_hdmi(encoder); | 1118 | intel_enable_hdmi(encoder); |
| 1126 | 1119 | ||
| 1127 | vlv_wait_port_ready(dev_priv, port); | 1120 | vlv_wait_port_ready(dev_priv, dport); |
| 1128 | } | 1121 | } |
| 1129 | 1122 | ||
| 1130 | static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | 1123 | static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) |
| @@ -1134,7 +1127,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
| 1134 | struct drm_i915_private *dev_priv = dev->dev_private; | 1127 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1135 | struct intel_crtc *intel_crtc = | 1128 | struct intel_crtc *intel_crtc = |
| 1136 | to_intel_crtc(encoder->base.crtc); | 1129 | to_intel_crtc(encoder->base.crtc); |
| 1137 | int port = vlv_dport_to_channel(dport); | 1130 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 1138 | int pipe = intel_crtc->pipe; | 1131 | int pipe = intel_crtc->pipe; |
| 1139 | 1132 | ||
| 1140 | if (!IS_VALLEYVIEW(dev)) | 1133 | if (!IS_VALLEYVIEW(dev)) |
| @@ -1142,24 +1135,22 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
| 1142 | 1135 | ||
| 1143 | /* Program Tx lane resets to default */ | 1136 | /* Program Tx lane resets to default */ |
| 1144 | mutex_lock(&dev_priv->dpio_lock); | 1137 | mutex_lock(&dev_priv->dpio_lock); |
| 1145 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), | 1138 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), |
| 1146 | DPIO_PCS_TX_LANE2_RESET | | 1139 | DPIO_PCS_TX_LANE2_RESET | |
| 1147 | DPIO_PCS_TX_LANE1_RESET); | 1140 | DPIO_PCS_TX_LANE1_RESET); |
| 1148 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), | 1141 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), |
| 1149 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | | 1142 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | |
| 1150 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | | 1143 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | |
| 1151 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | | 1144 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | |
| 1152 | DPIO_PCS_CLK_SOFT_RESET); | 1145 | DPIO_PCS_CLK_SOFT_RESET); |
| 1153 | 1146 | ||
| 1154 | /* Fix up inter-pair skew failure */ | 1147 | /* Fix up inter-pair skew failure */ |
| 1155 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00); | 1148 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); |
| 1156 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500); | 1149 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); |
| 1157 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000); | 1150 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); |
| 1158 | 1151 | ||
| 1159 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), | 1152 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); |
| 1160 | 0x00002000); | 1153 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); |
| 1161 | vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), | ||
| 1162 | DPIO_TX_OCALINIT_EN); | ||
| 1163 | mutex_unlock(&dev_priv->dpio_lock); | 1154 | mutex_unlock(&dev_priv->dpio_lock); |
| 1164 | } | 1155 | } |
| 1165 | 1156 | ||
| @@ -1169,13 +1160,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder) | |||
| 1169 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 1160 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
| 1170 | struct intel_crtc *intel_crtc = | 1161 | struct intel_crtc *intel_crtc = |
| 1171 | to_intel_crtc(encoder->base.crtc); | 1162 | to_intel_crtc(encoder->base.crtc); |
| 1172 | int port = vlv_dport_to_channel(dport); | 1163 | enum dpio_channel port = vlv_dport_to_channel(dport); |
| 1173 | int pipe = intel_crtc->pipe; | 1164 | int pipe = intel_crtc->pipe; |
| 1174 | 1165 | ||
| 1175 | /* Reset lanes to avoid HDMI flicker (VLV w/a) */ | 1166 | /* Reset lanes to avoid HDMI flicker (VLV w/a) */ |
| 1176 | mutex_lock(&dev_priv->dpio_lock); | 1167 | mutex_lock(&dev_priv->dpio_lock); |
| 1177 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000); | 1168 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); |
| 1178 | vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060); | 1169 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); |
| 1179 | mutex_unlock(&dev_priv->dpio_lock); | 1170 | mutex_unlock(&dev_priv->dpio_lock); |
| 1180 | } | 1171 | } |
| 1181 | 1172 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2ca17b14b6c1..b1dc33f47899 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
| @@ -82,20 +82,11 @@ static int get_disp_clk_div(struct drm_i915_private *dev_priv, | |||
| 82 | 82 | ||
| 83 | static void gmbus_set_freq(struct drm_i915_private *dev_priv) | 83 | static void gmbus_set_freq(struct drm_i915_private *dev_priv) |
| 84 | { | 84 | { |
| 85 | int vco_freq[] = { 800, 1600, 2000, 2400 }; | 85 | int vco, gmbus_freq = 0, cdclk_div; |
| 86 | int gmbus_freq = 0, cdclk_div, hpll_freq; | ||
| 87 | 86 | ||
| 88 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); | 87 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); |
| 89 | 88 | ||
| 90 | /* Skip setting the gmbus freq if BIOS has already programmed it */ | 89 | vco = valleyview_get_vco(dev_priv); |
| 91 | if (I915_READ(GMBUSFREQ_VLV) != 0xA0) | ||
| 92 | return; | ||
| 93 | |||
| 94 | /* Obtain SKU information */ | ||
| 95 | mutex_lock(&dev_priv->dpio_lock); | ||
| 96 | hpll_freq = | ||
| 97 | vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK; | ||
| 98 | mutex_unlock(&dev_priv->dpio_lock); | ||
| 99 | 90 | ||
| 100 | /* Get the CDCLK divide ratio */ | 91 | /* Get the CDCLK divide ratio */ |
| 101 | cdclk_div = get_disp_clk_div(dev_priv, CDCLK); | 92 | cdclk_div = get_disp_clk_div(dev_priv, CDCLK); |
| @@ -106,7 +97,7 @@ static void gmbus_set_freq(struct drm_i915_private *dev_priv) | |||
| 106 | * in fact 1MHz is the correct frequency. | 97 | * in fact 1MHz is the correct frequency. |
| 107 | */ | 98 | */ |
| 108 | if (cdclk_div) | 99 | if (cdclk_div) |
| 109 | gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div; | 100 | gmbus_freq = (vco << 1) / cdclk_div; |
| 110 | 101 | ||
| 111 | if (WARN_ON(gmbus_freq == 0)) | 102 | if (WARN_ON(gmbus_freq == 0)) |
| 112 | return; | 103 | return; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c3b4da7895ed..8bcb93a2a9f6 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -256,8 +256,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
| 256 | POSTING_READ(lvds_encoder->reg); | 256 | POSTING_READ(lvds_encoder->reg); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int intel_lvds_mode_valid(struct drm_connector *connector, | 259 | static enum drm_mode_status |
| 260 | struct drm_display_mode *mode) | 260 | intel_lvds_mode_valid(struct drm_connector *connector, |
| 261 | struct drm_display_mode *mode) | ||
| 261 | { | 262 | { |
| 262 | struct intel_connector *intel_connector = to_intel_connector(connector); | 263 | struct intel_connector *intel_connector = to_intel_connector(connector); |
| 263 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 264 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
| @@ -446,9 +447,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 446 | if (dev_priv->modeset_restore == MODESET_DONE) | 447 | if (dev_priv->modeset_restore == MODESET_DONE) |
| 447 | goto exit; | 448 | goto exit; |
| 448 | 449 | ||
| 449 | drm_modeset_lock_all(dev); | 450 | /* |
| 450 | intel_modeset_setup_hw_state(dev, true); | 451 | * Some old platform's BIOS love to wreak havoc while the lid is closed. |
| 451 | drm_modeset_unlock_all(dev); | 452 | * We try to detect this here and undo any damage. The split for PCH |
| 453 | * platforms is rather conservative and a bit arbitrary expect that on | ||
| 454 | * those platforms VGA disabling requires actual legacy VGA I/O access, | ||
| 455 | * and as part of the cleanup in the hw state restore we also redisable | ||
| 456 | * the vga plane. | ||
| 457 | */ | ||
| 458 | if (!HAS_PCH_SPLIT(dev)) { | ||
| 459 | drm_modeset_lock_all(dev); | ||
| 460 | intel_modeset_setup_hw_state(dev, true); | ||
| 461 | drm_modeset_unlock_all(dev); | ||
| 462 | } | ||
| 452 | 463 | ||
| 453 | dev_priv->modeset_restore = MODESET_DONE; | 464 | dev_priv->modeset_restore = MODESET_DONE; |
| 454 | 465 | ||
| @@ -744,57 +755,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
| 744 | { } /* terminating entry */ | 755 | { } /* terminating entry */ |
| 745 | }; | 756 | }; |
| 746 | 757 | ||
| 747 | /** | ||
| 748 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | ||
| 749 | * @dev: drm device | ||
| 750 | * @connector: LVDS connector | ||
| 751 | * | ||
| 752 | * Find the reduced downclock for LVDS in EDID. | ||
| 753 | */ | ||
| 754 | static void intel_find_lvds_downclock(struct drm_device *dev, | ||
| 755 | struct drm_display_mode *fixed_mode, | ||
| 756 | struct drm_connector *connector) | ||
| 757 | { | ||
| 758 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 759 | struct drm_display_mode *scan; | ||
| 760 | int temp_downclock; | ||
| 761 | |||
| 762 | temp_downclock = fixed_mode->clock; | ||
| 763 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
| 764 | /* | ||
| 765 | * If one mode has the same resolution with the fixed_panel | ||
| 766 | * mode while they have the different refresh rate, it means | ||
| 767 | * that the reduced downclock is found for the LVDS. In such | ||
| 768 | * case we can set the different FPx0/1 to dynamically select | ||
| 769 | * between low and high frequency. | ||
| 770 | */ | ||
| 771 | if (scan->hdisplay == fixed_mode->hdisplay && | ||
| 772 | scan->hsync_start == fixed_mode->hsync_start && | ||
| 773 | scan->hsync_end == fixed_mode->hsync_end && | ||
| 774 | scan->htotal == fixed_mode->htotal && | ||
| 775 | scan->vdisplay == fixed_mode->vdisplay && | ||
| 776 | scan->vsync_start == fixed_mode->vsync_start && | ||
| 777 | scan->vsync_end == fixed_mode->vsync_end && | ||
| 778 | scan->vtotal == fixed_mode->vtotal) { | ||
| 779 | if (scan->clock < temp_downclock) { | ||
| 780 | /* | ||
| 781 | * The downclock is already found. But we | ||
| 782 | * expect to find the lower downclock. | ||
| 783 | */ | ||
| 784 | temp_downclock = scan->clock; | ||
| 785 | } | ||
| 786 | } | ||
| 787 | } | ||
| 788 | if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { | ||
| 789 | /* We found the downclock for LVDS. */ | ||
| 790 | dev_priv->lvds_downclock_avail = 1; | ||
| 791 | dev_priv->lvds_downclock = temp_downclock; | ||
| 792 | DRM_DEBUG_KMS("LVDS downclock is found in EDID. " | ||
| 793 | "Normal clock %dKhz, downclock %dKhz\n", | ||
| 794 | fixed_mode->clock, temp_downclock); | ||
| 795 | } | ||
| 796 | } | ||
| 797 | |||
| 798 | /* | 758 | /* |
| 799 | * Enumerate the child dev array parsed from VBT to check whether | 759 | * Enumerate the child dev array parsed from VBT to check whether |
| 800 | * the LVDS is present. | 760 | * the LVDS is present. |
| @@ -1072,8 +1032,22 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1072 | 1032 | ||
| 1073 | fixed_mode = drm_mode_duplicate(dev, scan); | 1033 | fixed_mode = drm_mode_duplicate(dev, scan); |
| 1074 | if (fixed_mode) { | 1034 | if (fixed_mode) { |
| 1075 | intel_find_lvds_downclock(dev, fixed_mode, | 1035 | intel_connector->panel.downclock_mode = |
| 1076 | connector); | 1036 | intel_find_panel_downclock(dev, |
| 1037 | fixed_mode, connector); | ||
| 1038 | if (intel_connector->panel.downclock_mode != | ||
| 1039 | NULL && i915_lvds_downclock) { | ||
| 1040 | /* We found the downclock for LVDS. */ | ||
| 1041 | dev_priv->lvds_downclock_avail = true; | ||
| 1042 | dev_priv->lvds_downclock = | ||
| 1043 | intel_connector->panel. | ||
| 1044 | downclock_mode->clock; | ||
| 1045 | DRM_DEBUG_KMS("LVDS downclock is found" | ||
| 1046 | " in EDID. Normal clock %dKhz, " | ||
| 1047 | "downclock %dKhz\n", | ||
| 1048 | fixed_mode->clock, | ||
| 1049 | dev_priv->lvds_downclock); | ||
| 1050 | } | ||
| 1077 | goto out; | 1051 | goto out; |
| 1078 | } | 1052 | } |
| 1079 | } | 1053 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 9a8804bee5cd..4e960ec7419f 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -63,7 +63,7 @@ struct opregion_header { | |||
| 63 | u8 driver_ver[16]; | 63 | u8 driver_ver[16]; |
| 64 | u32 mboxes; | 64 | u32 mboxes; |
| 65 | u8 reserved[164]; | 65 | u8 reserved[164]; |
| 66 | } __attribute__((packed)); | 66 | } __packed; |
| 67 | 67 | ||
| 68 | /* OpRegion mailbox #1: public ACPI methods */ | 68 | /* OpRegion mailbox #1: public ACPI methods */ |
| 69 | struct opregion_acpi { | 69 | struct opregion_acpi { |
| @@ -85,7 +85,7 @@ struct opregion_acpi { | |||
| 85 | u32 cnot; /* current OS notification */ | 85 | u32 cnot; /* current OS notification */ |
| 86 | u32 nrdy; /* driver status */ | 86 | u32 nrdy; /* driver status */ |
| 87 | u8 rsvd2[60]; | 87 | u8 rsvd2[60]; |
| 88 | } __attribute__((packed)); | 88 | } __packed; |
| 89 | 89 | ||
| 90 | /* OpRegion mailbox #2: SWSCI */ | 90 | /* OpRegion mailbox #2: SWSCI */ |
| 91 | struct opregion_swsci { | 91 | struct opregion_swsci { |
| @@ -93,7 +93,7 @@ struct opregion_swsci { | |||
| 93 | u32 parm; /* command parameters */ | 93 | u32 parm; /* command parameters */ |
| 94 | u32 dslp; /* driver sleep time-out */ | 94 | u32 dslp; /* driver sleep time-out */ |
| 95 | u8 rsvd[244]; | 95 | u8 rsvd[244]; |
| 96 | } __attribute__((packed)); | 96 | } __packed; |
| 97 | 97 | ||
| 98 | /* OpRegion mailbox #3: ASLE */ | 98 | /* OpRegion mailbox #3: ASLE */ |
| 99 | struct opregion_asle { | 99 | struct opregion_asle { |
| @@ -114,7 +114,7 @@ struct opregion_asle { | |||
| 114 | u32 srot; /* supported rotation angles */ | 114 | u32 srot; /* supported rotation angles */ |
| 115 | u32 iuer; /* IUER events */ | 115 | u32 iuer; /* IUER events */ |
| 116 | u8 rsvd[86]; | 116 | u8 rsvd[86]; |
| 117 | } __attribute__((packed)); | 117 | } __packed; |
| 118 | 118 | ||
| 119 | /* Driver readiness indicator */ | 119 | /* Driver readiness indicator */ |
| 120 | #define ASLE_ARDY_READY (1 << 0) | 120 | #define ASLE_ARDY_READY (1 << 0) |
| @@ -395,13 +395,8 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) | |||
| 395 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 395 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
| 396 | { | 396 | { |
| 397 | struct drm_i915_private *dev_priv = dev->dev_private; | 397 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 398 | struct drm_encoder *encoder; | 398 | struct intel_connector *intel_connector; |
| 399 | struct drm_connector *connector; | ||
| 400 | struct intel_connector *intel_connector = NULL; | ||
| 401 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; | ||
| 402 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 399 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; |
| 403 | u32 ret = 0; | ||
| 404 | bool found = false; | ||
| 405 | 400 | ||
| 406 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 401 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
| 407 | 402 | ||
| @@ -413,38 +408,20 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
| 413 | return ASLC_BACKLIGHT_FAILED; | 408 | return ASLC_BACKLIGHT_FAILED; |
| 414 | 409 | ||
| 415 | mutex_lock(&dev->mode_config.mutex); | 410 | mutex_lock(&dev->mode_config.mutex); |
| 411 | |||
| 416 | /* | 412 | /* |
| 417 | * Could match the OpRegion connector here instead, but we'd also need | 413 | * Update backlight on all connectors that support backlight (usually |
| 418 | * to verify the connector could handle a backlight call. | 414 | * only one). |
| 419 | */ | 415 | */ |
| 420 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | ||
| 421 | if (encoder->crtc == crtc) { | ||
| 422 | found = true; | ||
| 423 | break; | ||
| 424 | } | ||
| 425 | |||
| 426 | if (!found) { | ||
| 427 | ret = ASLC_BACKLIGHT_FAILED; | ||
| 428 | goto out; | ||
| 429 | } | ||
| 430 | |||
| 431 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
| 432 | if (connector->encoder == encoder) | ||
| 433 | intel_connector = to_intel_connector(connector); | ||
| 434 | |||
| 435 | if (!intel_connector) { | ||
| 436 | ret = ASLC_BACKLIGHT_FAILED; | ||
| 437 | goto out; | ||
| 438 | } | ||
| 439 | |||
| 440 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); | 416 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); |
| 441 | intel_panel_set_backlight(intel_connector, bclp, 255); | 417 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) |
| 418 | intel_panel_set_backlight(intel_connector, bclp, 255); | ||
| 442 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); | 419 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); |
| 443 | 420 | ||
| 444 | out: | ||
| 445 | mutex_unlock(&dev->mode_config.mutex); | 421 | mutex_unlock(&dev->mode_config.mutex); |
| 446 | 422 | ||
| 447 | return ret; | 423 | |
| 424 | return 0; | ||
| 448 | } | 425 | } |
| 449 | 426 | ||
| 450 | static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) | 427 | static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a98a990fbab3..a759ecdb7a6e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
| @@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) | |||
| 1005 | u32 pfit_control; | 1005 | u32 pfit_control; |
| 1006 | 1006 | ||
| 1007 | /* i830 doesn't have a panel fitter */ | 1007 | /* i830 doesn't have a panel fitter */ |
| 1008 | if (IS_I830(dev)) | 1008 | if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) |
| 1009 | return -1; | 1009 | return -1; |
| 1010 | 1010 | ||
| 1011 | pfit_control = I915_READ(PFIT_CONTROL); | 1011 | pfit_control = I915_READ(PFIT_CONTROL); |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e6f782d1c669..350de359123a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -325,214 +325,170 @@ out: | |||
| 325 | pipe_config->gmch_pfit.lvds_border_bits = border; | 325 | pipe_config->gmch_pfit.lvds_border_bits = border; |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | static int is_backlight_combination_mode(struct drm_device *dev) | 328 | static int i915_panel_invert_brightness; |
| 329 | MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " | ||
| 330 | "(-1 force normal, 0 machine defaults, 1 force inversion), please " | ||
| 331 | "report PCI device ID, subsystem vendor and subsystem device ID " | ||
| 332 | "to dri-devel@lists.freedesktop.org, if your machine needs it. " | ||
| 333 | "It will then be included in an upcoming module version."); | ||
| 334 | module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); | ||
| 335 | static u32 intel_panel_compute_brightness(struct intel_connector *connector, | ||
| 336 | u32 val) | ||
| 329 | { | 337 | { |
| 338 | struct drm_device *dev = connector->base.dev; | ||
| 330 | struct drm_i915_private *dev_priv = dev->dev_private; | 339 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 340 | struct intel_panel *panel = &connector->panel; | ||
| 331 | 341 | ||
| 332 | if (IS_GEN4(dev)) | 342 | WARN_ON(panel->backlight.max == 0); |
| 333 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
| 334 | 343 | ||
| 335 | if (IS_GEN2(dev)) | 344 | if (i915_panel_invert_brightness < 0) |
| 336 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | 345 | return val; |
| 337 | 346 | ||
| 338 | return 0; | 347 | if (i915_panel_invert_brightness > 0 || |
| 348 | dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { | ||
| 349 | return panel->backlight.max - val; | ||
| 350 | } | ||
| 351 | |||
| 352 | return val; | ||
| 339 | } | 353 | } |
| 340 | 354 | ||
| 341 | /* XXX: query mode clock or hardware clock and program max PWM appropriately | 355 | static u32 bdw_get_backlight(struct intel_connector *connector) |
| 342 | * when it's 0. | ||
| 343 | */ | ||
| 344 | static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe) | ||
| 345 | { | 356 | { |
| 357 | struct drm_device *dev = connector->base.dev; | ||
| 346 | struct drm_i915_private *dev_priv = dev->dev_private; | 358 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 347 | u32 val; | ||
| 348 | 359 | ||
| 349 | WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock)); | 360 | return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; |
| 350 | 361 | } | |
| 351 | /* Restore the CTL value if it lost, e.g. GPU reset */ | ||
| 352 | |||
| 353 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
| 354 | val = I915_READ(BLC_PWM_PCH_CTL2); | ||
| 355 | if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { | ||
| 356 | dev_priv->regfile.saveBLC_PWM_CTL2 = val; | ||
| 357 | } else if (val == 0) { | ||
| 358 | val = dev_priv->regfile.saveBLC_PWM_CTL2; | ||
| 359 | I915_WRITE(BLC_PWM_PCH_CTL2, val); | ||
| 360 | } | ||
| 361 | } else if (IS_VALLEYVIEW(dev)) { | ||
| 362 | val = I915_READ(VLV_BLC_PWM_CTL(pipe)); | ||
| 363 | if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { | ||
| 364 | dev_priv->regfile.saveBLC_PWM_CTL = val; | ||
| 365 | dev_priv->regfile.saveBLC_PWM_CTL2 = | ||
| 366 | I915_READ(VLV_BLC_PWM_CTL2(pipe)); | ||
| 367 | } else if (val == 0) { | ||
| 368 | val = dev_priv->regfile.saveBLC_PWM_CTL; | ||
| 369 | I915_WRITE(VLV_BLC_PWM_CTL(pipe), val); | ||
| 370 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), | ||
| 371 | dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 372 | } | ||
| 373 | 362 | ||
| 374 | if (!val) | 363 | static u32 pch_get_backlight(struct intel_connector *connector) |
| 375 | val = 0x0f42ffff; | 364 | { |
| 376 | } else { | 365 | struct drm_device *dev = connector->base.dev; |
| 377 | val = I915_READ(BLC_PWM_CTL); | 366 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 378 | if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { | ||
| 379 | dev_priv->regfile.saveBLC_PWM_CTL = val; | ||
| 380 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 381 | dev_priv->regfile.saveBLC_PWM_CTL2 = | ||
| 382 | I915_READ(BLC_PWM_CTL2); | ||
| 383 | } else if (val == 0) { | ||
| 384 | val = dev_priv->regfile.saveBLC_PWM_CTL; | ||
| 385 | I915_WRITE(BLC_PWM_CTL, val); | ||
| 386 | if (INTEL_INFO(dev)->gen >= 4) | ||
| 387 | I915_WRITE(BLC_PWM_CTL2, | ||
| 388 | dev_priv->regfile.saveBLC_PWM_CTL2); | ||
| 389 | } | ||
| 390 | } | ||
| 391 | 367 | ||
| 392 | return val; | 368 | return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
| 393 | } | 369 | } |
| 394 | 370 | ||
| 395 | static u32 intel_panel_get_max_backlight(struct drm_device *dev, | 371 | static u32 i9xx_get_backlight(struct intel_connector *connector) |
| 396 | enum pipe pipe) | ||
| 397 | { | 372 | { |
| 398 | u32 max; | 373 | struct drm_device *dev = connector->base.dev; |
| 374 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 375 | struct intel_panel *panel = &connector->panel; | ||
| 376 | u32 val; | ||
| 399 | 377 | ||
| 400 | max = i915_read_blc_pwm_ctl(dev, pipe); | 378 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
| 379 | if (INTEL_INFO(dev)->gen < 4) | ||
| 380 | val >>= 1; | ||
| 401 | 381 | ||
| 402 | if (HAS_PCH_SPLIT(dev)) { | 382 | if (panel->backlight.combination_mode) { |
| 403 | max >>= 16; | 383 | u8 lbpc; |
| 404 | } else { | ||
| 405 | if (INTEL_INFO(dev)->gen < 4) | ||
| 406 | max >>= 17; | ||
| 407 | else | ||
| 408 | max >>= 16; | ||
| 409 | 384 | ||
| 410 | if (is_backlight_combination_mode(dev)) | 385 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); |
| 411 | max *= 0xff; | 386 | val *= lbpc; |
| 412 | } | 387 | } |
| 413 | 388 | ||
| 414 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 389 | return val; |
| 415 | |||
| 416 | return max; | ||
| 417 | } | 390 | } |
| 418 | 391 | ||
| 419 | static int i915_panel_invert_brightness; | 392 | static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) |
| 420 | MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " | ||
| 421 | "(-1 force normal, 0 machine defaults, 1 force inversion), please " | ||
| 422 | "report PCI device ID, subsystem vendor and subsystem device ID " | ||
| 423 | "to dri-devel@lists.freedesktop.org, if your machine needs it. " | ||
| 424 | "It will then be included in an upcoming module version."); | ||
| 425 | module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); | ||
| 426 | static u32 intel_panel_compute_brightness(struct drm_device *dev, | ||
| 427 | enum pipe pipe, u32 val) | ||
| 428 | { | 393 | { |
| 429 | struct drm_i915_private *dev_priv = dev->dev_private; | 394 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 430 | 395 | ||
| 431 | if (i915_panel_invert_brightness < 0) | 396 | return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; |
| 432 | return val; | 397 | } |
| 433 | 398 | ||
| 434 | if (i915_panel_invert_brightness > 0 || | 399 | static u32 vlv_get_backlight(struct intel_connector *connector) |
| 435 | dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { | 400 | { |
| 436 | u32 max = intel_panel_get_max_backlight(dev, pipe); | 401 | struct drm_device *dev = connector->base.dev; |
| 437 | if (max) | 402 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
| 438 | return max - val; | ||
| 439 | } | ||
| 440 | 403 | ||
| 441 | return val; | 404 | return _vlv_get_backlight(dev, pipe); |
| 442 | } | 405 | } |
| 443 | 406 | ||
| 444 | static u32 intel_panel_get_backlight(struct drm_device *dev, | 407 | static u32 intel_panel_get_backlight(struct intel_connector *connector) |
| 445 | enum pipe pipe) | ||
| 446 | { | 408 | { |
| 409 | struct drm_device *dev = connector->base.dev; | ||
| 447 | struct drm_i915_private *dev_priv = dev->dev_private; | 410 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 448 | u32 val; | 411 | u32 val; |
| 449 | unsigned long flags; | 412 | unsigned long flags; |
| 450 | int reg; | ||
| 451 | |||
| 452 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | ||
| 453 | |||
| 454 | if (IS_BROADWELL(dev)) { | ||
| 455 | val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 456 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 457 | val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 458 | } else { | ||
| 459 | if (IS_VALLEYVIEW(dev)) | ||
| 460 | reg = VLV_BLC_PWM_CTL(pipe); | ||
| 461 | else | ||
| 462 | reg = BLC_PWM_CTL; | ||
| 463 | |||
| 464 | val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 465 | if (INTEL_INFO(dev)->gen < 4) | ||
| 466 | val >>= 1; | ||
| 467 | |||
| 468 | if (is_backlight_combination_mode(dev)) { | ||
| 469 | u8 lbpc; | ||
| 470 | 413 | ||
| 471 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | 414 | spin_lock_irqsave(&dev_priv->backlight_lock, flags); |
| 472 | val *= lbpc; | ||
| 473 | } | ||
| 474 | } | ||
| 475 | 415 | ||
| 476 | val = intel_panel_compute_brightness(dev, pipe, val); | 416 | val = dev_priv->display.get_backlight(connector); |
| 417 | val = intel_panel_compute_brightness(connector, val); | ||
| 477 | 418 | ||
| 478 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | 419 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); |
| 479 | 420 | ||
| 480 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 421 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
| 481 | return val; | 422 | return val; |
| 482 | } | 423 | } |
| 483 | 424 | ||
| 484 | static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level) | 425 | static void bdw_set_backlight(struct intel_connector *connector, u32 level) |
| 485 | { | 426 | { |
| 427 | struct drm_device *dev = connector->base.dev; | ||
| 486 | struct drm_i915_private *dev_priv = dev->dev_private; | 428 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 487 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; | 429 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; |
| 488 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); | 430 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); |
| 489 | } | 431 | } |
| 490 | 432 | ||
| 491 | static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | 433 | static void pch_set_backlight(struct intel_connector *connector, u32 level) |
| 492 | { | 434 | { |
| 435 | struct drm_device *dev = connector->base.dev; | ||
| 493 | struct drm_i915_private *dev_priv = dev->dev_private; | 436 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 494 | u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; | 437 | u32 tmp; |
| 495 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); | 438 | |
| 439 | tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 440 | I915_WRITE(BLC_PWM_CPU_CTL, tmp | level); | ||
| 496 | } | 441 | } |
| 497 | 442 | ||
| 498 | static void intel_panel_actually_set_backlight(struct drm_device *dev, | 443 | static void i9xx_set_backlight(struct intel_connector *connector, u32 level) |
| 499 | enum pipe pipe, u32 level) | ||
| 500 | { | 444 | { |
| 445 | struct drm_device *dev = connector->base.dev; | ||
| 501 | struct drm_i915_private *dev_priv = dev->dev_private; | 446 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 502 | u32 tmp; | 447 | struct intel_panel *panel = &connector->panel; |
| 503 | int reg; | 448 | u32 tmp, mask; |
| 504 | |||
| 505 | DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); | ||
| 506 | level = intel_panel_compute_brightness(dev, pipe, level); | ||
| 507 | 449 | ||
| 508 | if (IS_BROADWELL(dev)) | 450 | WARN_ON(panel->backlight.max == 0); |
| 509 | return intel_bdw_panel_set_backlight(dev, level); | ||
| 510 | else if (HAS_PCH_SPLIT(dev)) | ||
| 511 | return intel_pch_panel_set_backlight(dev, level); | ||
| 512 | 451 | ||
| 513 | if (is_backlight_combination_mode(dev)) { | 452 | if (panel->backlight.combination_mode) { |
| 514 | u32 max = intel_panel_get_max_backlight(dev, pipe); | ||
| 515 | u8 lbpc; | 453 | u8 lbpc; |
| 516 | 454 | ||
| 517 | /* we're screwed, but keep behaviour backwards compatible */ | 455 | lbpc = level * 0xfe / panel->backlight.max + 1; |
| 518 | if (!max) | ||
| 519 | max = 1; | ||
| 520 | |||
| 521 | lbpc = level * 0xfe / max + 1; | ||
| 522 | level /= lbpc; | 456 | level /= lbpc; |
| 523 | pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); | 457 | pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); |
| 524 | } | 458 | } |
| 525 | 459 | ||
| 526 | if (IS_VALLEYVIEW(dev)) | 460 | if (IS_GEN4(dev)) { |
| 527 | reg = VLV_BLC_PWM_CTL(pipe); | 461 | mask = BACKLIGHT_DUTY_CYCLE_MASK; |
| 528 | else | 462 | } else { |
| 529 | reg = BLC_PWM_CTL; | ||
| 530 | |||
| 531 | tmp = I915_READ(reg); | ||
| 532 | if (INTEL_INFO(dev)->gen < 4) | ||
| 533 | level <<= 1; | 463 | level <<= 1; |
| 534 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; | 464 | mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV; |
| 535 | I915_WRITE(reg, tmp | level); | 465 | } |
| 466 | |||
| 467 | tmp = I915_READ(BLC_PWM_CTL) & ~mask; | ||
| 468 | I915_WRITE(BLC_PWM_CTL, tmp | level); | ||
| 469 | } | ||
| 470 | |||
| 471 | static void vlv_set_backlight(struct intel_connector *connector, u32 level) | ||
| 472 | { | ||
| 473 | struct drm_device *dev = connector->base.dev; | ||
| 474 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 475 | enum pipe pipe = intel_get_pipe_from_connector(connector); | ||
| 476 | u32 tmp; | ||
| 477 | |||
| 478 | tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 479 | I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); | ||
| 480 | } | ||
| 481 | |||
| 482 | static void | ||
| 483 | intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) | ||
| 484 | { | ||
| 485 | struct drm_device *dev = connector->base.dev; | ||
| 486 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 487 | |||
| 488 | DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); | ||
| 489 | |||
| 490 | level = intel_panel_compute_brightness(connector, level); | ||
| 491 | dev_priv->display.set_backlight(connector, level); | ||
| 536 | } | 492 | } |
| 537 | 493 | ||
| 538 | /* set backlight brightness to level in range [0..max] */ | 494 | /* set backlight brightness to level in range [0..max] */ |
| @@ -541,45 +497,89 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
| 541 | { | 497 | { |
| 542 | struct drm_device *dev = connector->base.dev; | 498 | struct drm_device *dev = connector->base.dev; |
| 543 | struct drm_i915_private *dev_priv = dev->dev_private; | 499 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 500 | struct intel_panel *panel = &connector->panel; | ||
| 544 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 501 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
| 545 | u32 freq; | 502 | u32 freq; |
| 546 | unsigned long flags; | 503 | unsigned long flags; |
| 547 | 504 | ||
| 548 | if (pipe == INVALID_PIPE) | 505 | if (!panel->backlight.present || pipe == INVALID_PIPE) |
| 549 | return; | 506 | return; |
| 550 | 507 | ||
| 551 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 508 | spin_lock_irqsave(&dev_priv->backlight_lock, flags); |
| 552 | 509 | ||
| 553 | freq = intel_panel_get_max_backlight(dev, pipe); | 510 | WARN_ON(panel->backlight.max == 0); |
| 554 | if (!freq) { | ||
| 555 | /* we are screwed, bail out */ | ||
| 556 | goto out; | ||
| 557 | } | ||
| 558 | 511 | ||
| 559 | /* scale to hardware, but be careful to not overflow */ | 512 | /* scale to hardware max, but be careful to not overflow */ |
| 513 | freq = panel->backlight.max; | ||
| 560 | if (freq < max) | 514 | if (freq < max) |
| 561 | level = level * freq / max; | 515 | level = level * freq / max; |
| 562 | else | 516 | else |
| 563 | level = freq / max * level; | 517 | level = freq / max * level; |
| 564 | 518 | ||
| 565 | dev_priv->backlight.level = level; | 519 | panel->backlight.level = level; |
| 566 | if (dev_priv->backlight.device) | 520 | if (panel->backlight.device) |
| 567 | dev_priv->backlight.device->props.brightness = level; | 521 | panel->backlight.device->props.brightness = level; |
| 568 | 522 | ||
| 569 | if (dev_priv->backlight.enabled) | 523 | if (panel->backlight.enabled) |
| 570 | intel_panel_actually_set_backlight(dev, pipe, level); | 524 | intel_panel_actually_set_backlight(connector, level); |
| 571 | out: | 525 | |
| 572 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | 526 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); |
| 527 | } | ||
| 528 | |||
| 529 | static void pch_disable_backlight(struct intel_connector *connector) | ||
| 530 | { | ||
| 531 | struct drm_device *dev = connector->base.dev; | ||
| 532 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 533 | u32 tmp; | ||
| 534 | |||
| 535 | intel_panel_actually_set_backlight(connector, 0); | ||
| 536 | |||
| 537 | tmp = I915_READ(BLC_PWM_CPU_CTL2); | ||
| 538 | I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); | ||
| 539 | |||
| 540 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 541 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); | ||
| 542 | } | ||
| 543 | |||
| 544 | static void i9xx_disable_backlight(struct intel_connector *connector) | ||
| 545 | { | ||
| 546 | intel_panel_actually_set_backlight(connector, 0); | ||
| 547 | } | ||
| 548 | |||
| 549 | static void i965_disable_backlight(struct intel_connector *connector) | ||
| 550 | { | ||
| 551 | struct drm_device *dev = connector->base.dev; | ||
| 552 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 553 | u32 tmp; | ||
| 554 | |||
| 555 | intel_panel_actually_set_backlight(connector, 0); | ||
| 556 | |||
| 557 | tmp = I915_READ(BLC_PWM_CTL2); | ||
| 558 | I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); | ||
| 559 | } | ||
| 560 | |||
| 561 | static void vlv_disable_backlight(struct intel_connector *connector) | ||
| 562 | { | ||
| 563 | struct drm_device *dev = connector->base.dev; | ||
| 564 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 565 | enum pipe pipe = intel_get_pipe_from_connector(connector); | ||
| 566 | u32 tmp; | ||
| 567 | |||
| 568 | intel_panel_actually_set_backlight(connector, 0); | ||
| 569 | |||
| 570 | tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); | ||
| 571 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); | ||
| 573 | } | 572 | } |
| 574 | 573 | ||
| 575 | void intel_panel_disable_backlight(struct intel_connector *connector) | 574 | void intel_panel_disable_backlight(struct intel_connector *connector) |
| 576 | { | 575 | { |
| 577 | struct drm_device *dev = connector->base.dev; | 576 | struct drm_device *dev = connector->base.dev; |
| 578 | struct drm_i915_private *dev_priv = dev->dev_private; | 577 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 578 | struct intel_panel *panel = &connector->panel; | ||
| 579 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 579 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
| 580 | unsigned long flags; | 580 | unsigned long flags; |
| 581 | 581 | ||
| 582 | if (pipe == INVALID_PIPE) | 582 | if (!panel->backlight.present || pipe == INVALID_PIPE) |
| 583 | return; | 583 | return; |
| 584 | 584 | ||
| 585 | /* | 585 | /* |
| @@ -593,150 +593,215 @@ void intel_panel_disable_backlight(struct intel_connector *connector) | |||
| 593 | return; | 593 | return; |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 596 | spin_lock_irqsave(&dev_priv->backlight_lock, flags); |
| 597 | 597 | ||
| 598 | dev_priv->backlight.enabled = false; | 598 | panel->backlight.enabled = false; |
| 599 | intel_panel_actually_set_backlight(dev, pipe, 0); | 599 | dev_priv->display.disable_backlight(connector); |
| 600 | 600 | ||
| 601 | if (INTEL_INFO(dev)->gen >= 4) { | 601 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); |
| 602 | uint32_t reg, tmp; | 602 | } |
| 603 | 603 | ||
| 604 | if (HAS_PCH_SPLIT(dev)) | 604 | static void bdw_enable_backlight(struct intel_connector *connector) |
| 605 | reg = BLC_PWM_CPU_CTL2; | 605 | { |
| 606 | else if (IS_VALLEYVIEW(dev)) | 606 | struct drm_device *dev = connector->base.dev; |
| 607 | reg = VLV_BLC_PWM_CTL2(pipe); | 607 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 608 | else | 608 | struct intel_panel *panel = &connector->panel; |
| 609 | reg = BLC_PWM_CTL2; | 609 | u32 pch_ctl1, pch_ctl2; |
| 610 | |||
| 611 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 612 | if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { | ||
| 613 | DRM_DEBUG_KMS("pch backlight already enabled\n"); | ||
| 614 | pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; | ||
| 615 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
| 616 | } | ||
| 610 | 617 | ||
| 611 | I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); | 618 | pch_ctl2 = panel->backlight.max << 16; |
| 619 | I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); | ||
| 612 | 620 | ||
| 613 | if (HAS_PCH_SPLIT(dev)) { | 621 | pch_ctl1 = 0; |
| 614 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 622 | if (panel->backlight.active_low_pwm) |
| 615 | tmp &= ~BLM_PCH_PWM_ENABLE; | 623 | pch_ctl1 |= BLM_PCH_POLARITY; |
| 616 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||
| 617 | } | ||
| 618 | } | ||
| 619 | 624 | ||
| 620 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | 625 | /* BDW always uses the pch pwm controls. */ |
| 626 | pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; | ||
| 627 | |||
| 628 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
| 629 | POSTING_READ(BLC_PWM_PCH_CTL1); | ||
| 630 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); | ||
| 631 | |||
| 632 | /* This won't stick until the above enable. */ | ||
| 633 | intel_panel_actually_set_backlight(connector, panel->backlight.level); | ||
| 621 | } | 634 | } |
| 622 | 635 | ||
| 623 | void intel_panel_enable_backlight(struct intel_connector *connector) | 636 | static void pch_enable_backlight(struct intel_connector *connector) |
| 624 | { | 637 | { |
| 625 | struct drm_device *dev = connector->base.dev; | 638 | struct drm_device *dev = connector->base.dev; |
| 626 | struct drm_i915_private *dev_priv = dev->dev_private; | 639 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 640 | struct intel_panel *panel = &connector->panel; | ||
| 627 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 641 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
| 628 | enum transcoder cpu_transcoder = | 642 | enum transcoder cpu_transcoder = |
| 629 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | 643 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); |
| 630 | unsigned long flags; | 644 | u32 cpu_ctl2, pch_ctl1, pch_ctl2; |
| 631 | 645 | ||
| 632 | if (pipe == INVALID_PIPE) | 646 | cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); |
| 633 | return; | 647 | if (cpu_ctl2 & BLM_PWM_ENABLE) { |
| 648 | WARN(1, "cpu backlight already enabled\n"); | ||
| 649 | cpu_ctl2 &= ~BLM_PWM_ENABLE; | ||
| 650 | I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); | ||
| 651 | } | ||
| 634 | 652 | ||
| 635 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); | 653 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); |
| 654 | if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { | ||
| 655 | DRM_DEBUG_KMS("pch backlight already enabled\n"); | ||
| 656 | pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; | ||
| 657 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
| 658 | } | ||
| 659 | |||
| 660 | if (cpu_transcoder == TRANSCODER_EDP) | ||
| 661 | cpu_ctl2 = BLM_TRANSCODER_EDP; | ||
| 662 | else | ||
| 663 | cpu_ctl2 = BLM_PIPE(cpu_transcoder); | ||
| 664 | I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); | ||
| 665 | POSTING_READ(BLC_PWM_CPU_CTL2); | ||
| 666 | I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); | ||
| 667 | |||
| 668 | /* This won't stick until the above enable. */ | ||
| 669 | intel_panel_actually_set_backlight(connector, panel->backlight.level); | ||
| 670 | |||
| 671 | pch_ctl2 = panel->backlight.max << 16; | ||
| 672 | I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); | ||
| 673 | |||
| 674 | pch_ctl1 = 0; | ||
| 675 | if (panel->backlight.active_low_pwm) | ||
| 676 | pch_ctl1 |= BLM_PCH_POLARITY; | ||
| 677 | |||
| 678 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
| 679 | POSTING_READ(BLC_PWM_PCH_CTL1); | ||
| 680 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); | ||
| 681 | } | ||
| 636 | 682 | ||
| 637 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 683 | static void i9xx_enable_backlight(struct intel_connector *connector) |
| 684 | { | ||
| 685 | struct drm_device *dev = connector->base.dev; | ||
| 686 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 687 | struct intel_panel *panel = &connector->panel; | ||
| 688 | u32 ctl, freq; | ||
| 638 | 689 | ||
| 639 | if (dev_priv->backlight.level == 0) { | 690 | ctl = I915_READ(BLC_PWM_CTL); |
| 640 | dev_priv->backlight.level = intel_panel_get_max_backlight(dev, | 691 | if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { |
| 641 | pipe); | 692 | WARN(1, "backlight already enabled\n"); |
| 642 | if (dev_priv->backlight.device) | 693 | I915_WRITE(BLC_PWM_CTL, 0); |
| 643 | dev_priv->backlight.device->props.brightness = | ||
| 644 | dev_priv->backlight.level; | ||
| 645 | } | 694 | } |
| 646 | 695 | ||
| 647 | if (INTEL_INFO(dev)->gen >= 4) { | 696 | freq = panel->backlight.max; |
| 648 | uint32_t reg, tmp; | 697 | if (panel->backlight.combination_mode) |
| 698 | freq /= 0xff; | ||
| 649 | 699 | ||
| 650 | if (HAS_PCH_SPLIT(dev)) | 700 | ctl = freq << 17; |
| 651 | reg = BLC_PWM_CPU_CTL2; | 701 | if (IS_GEN2(dev) && panel->backlight.combination_mode) |
| 652 | else if (IS_VALLEYVIEW(dev)) | 702 | ctl |= BLM_LEGACY_MODE; |
| 653 | reg = VLV_BLC_PWM_CTL2(pipe); | 703 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) |
| 654 | else | 704 | ctl |= BLM_POLARITY_PNV; |
| 655 | reg = BLC_PWM_CTL2; | ||
| 656 | 705 | ||
| 657 | tmp = I915_READ(reg); | 706 | I915_WRITE(BLC_PWM_CTL, ctl); |
| 707 | POSTING_READ(BLC_PWM_CTL); | ||
| 658 | 708 | ||
| 659 | /* Note that this can also get called through dpms changes. And | 709 | /* XXX: combine this into above write? */ |
| 660 | * we don't track the backlight dpms state, hence check whether | 710 | intel_panel_actually_set_backlight(connector, panel->backlight.level); |
| 661 | * we have to do anything first. */ | 711 | } |
| 662 | if (tmp & BLM_PWM_ENABLE) | ||
| 663 | goto set_level; | ||
| 664 | 712 | ||
| 665 | if (INTEL_INFO(dev)->num_pipes == 3) | 713 | static void i965_enable_backlight(struct intel_connector *connector) |
| 666 | tmp &= ~BLM_PIPE_SELECT_IVB; | 714 | { |
| 667 | else | 715 | struct drm_device *dev = connector->base.dev; |
| 668 | tmp &= ~BLM_PIPE_SELECT; | 716 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 717 | struct intel_panel *panel = &connector->panel; | ||
| 718 | enum pipe pipe = intel_get_pipe_from_connector(connector); | ||
| 719 | u32 ctl, ctl2, freq; | ||
| 669 | 720 | ||
| 670 | if (cpu_transcoder == TRANSCODER_EDP) | 721 | ctl2 = I915_READ(BLC_PWM_CTL2); |
| 671 | tmp |= BLM_TRANSCODER_EDP; | 722 | if (ctl2 & BLM_PWM_ENABLE) { |
| 672 | else | 723 | WARN(1, "backlight already enabled\n"); |
| 673 | tmp |= BLM_PIPE(cpu_transcoder); | 724 | ctl2 &= ~BLM_PWM_ENABLE; |
| 674 | tmp &= ~BLM_PWM_ENABLE; | 725 | I915_WRITE(BLC_PWM_CTL2, ctl2); |
| 675 | |||
| 676 | I915_WRITE(reg, tmp); | ||
| 677 | POSTING_READ(reg); | ||
| 678 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | ||
| 679 | |||
| 680 | if (IS_BROADWELL(dev)) { | ||
| 681 | /* | ||
| 682 | * Broadwell requires PCH override to drive the PCH | ||
| 683 | * backlight pin. The above will configure the CPU | ||
| 684 | * backlight pin, which we don't plan to use. | ||
| 685 | */ | ||
| 686 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 687 | tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE; | ||
| 688 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||
| 689 | } else if (HAS_PCH_SPLIT(dev) && | ||
| 690 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
| 691 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 692 | tmp |= BLM_PCH_PWM_ENABLE; | ||
| 693 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | ||
| 694 | I915_WRITE(BLC_PWM_PCH_CTL1, tmp); | ||
| 695 | } | ||
| 696 | } | 726 | } |
| 697 | 727 | ||
| 698 | set_level: | 728 | freq = panel->backlight.max; |
| 699 | /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. | 729 | if (panel->backlight.combination_mode) |
| 700 | * BLC_PWM_CPU_CTL may be cleared to zero automatically when these | 730 | freq /= 0xff; |
| 701 | * registers are set. | 731 | |
| 702 | */ | 732 | ctl = freq << 16; |
| 703 | dev_priv->backlight.enabled = true; | 733 | I915_WRITE(BLC_PWM_CTL, ctl); |
| 704 | intel_panel_actually_set_backlight(dev, pipe, | 734 | |
| 705 | dev_priv->backlight.level); | 735 | /* XXX: combine this into above write? */ |
| 736 | intel_panel_actually_set_backlight(connector, panel->backlight.level); | ||
| 706 | 737 | ||
| 707 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | 738 | ctl2 = BLM_PIPE(pipe); |
| 739 | if (panel->backlight.combination_mode) | ||
| 740 | ctl2 |= BLM_COMBINATION_MODE; | ||
| 741 | if (panel->backlight.active_low_pwm) | ||
| 742 | ctl2 |= BLM_POLARITY_I965; | ||
| 743 | I915_WRITE(BLC_PWM_CTL2, ctl2); | ||
| 744 | POSTING_READ(BLC_PWM_CTL2); | ||
| 745 | I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); | ||
| 708 | } | 746 | } |
| 709 | 747 | ||
| 710 | /* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */ | 748 | static void vlv_enable_backlight(struct intel_connector *connector) |
| 711 | static void intel_panel_init_backlight_regs(struct drm_device *dev) | ||
| 712 | { | 749 | { |
| 750 | struct drm_device *dev = connector->base.dev; | ||
| 713 | struct drm_i915_private *dev_priv = dev->dev_private; | 751 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 752 | struct intel_panel *panel = &connector->panel; | ||
| 753 | enum pipe pipe = intel_get_pipe_from_connector(connector); | ||
| 754 | u32 ctl, ctl2; | ||
| 714 | 755 | ||
| 715 | if (IS_VALLEYVIEW(dev)) { | 756 | ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); |
| 716 | enum pipe pipe; | 757 | if (ctl2 & BLM_PWM_ENABLE) { |
| 758 | WARN(1, "backlight already enabled\n"); | ||
| 759 | ctl2 &= ~BLM_PWM_ENABLE; | ||
| 760 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); | ||
| 761 | } | ||
| 717 | 762 | ||
| 718 | for_each_pipe(pipe) { | 763 | ctl = panel->backlight.max << 16; |
| 719 | u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); | 764 | I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl); |
| 720 | 765 | ||
| 721 | /* Skip if the modulation freq is already set */ | 766 | /* XXX: combine this into above write? */ |
| 722 | if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) | 767 | intel_panel_actually_set_backlight(connector, panel->backlight.level); |
| 723 | continue; | ||
| 724 | 768 | ||
| 725 | cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; | 769 | ctl2 = 0; |
| 726 | I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | | 770 | if (panel->backlight.active_low_pwm) |
| 727 | cur_val); | 771 | ctl2 |= BLM_POLARITY_I965; |
| 728 | } | 772 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); |
| 729 | } | 773 | POSTING_READ(VLV_BLC_PWM_CTL2(pipe)); |
| 774 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE); | ||
| 730 | } | 775 | } |
| 731 | 776 | ||
| 732 | static void intel_panel_init_backlight(struct drm_device *dev) | 777 | void intel_panel_enable_backlight(struct intel_connector *connector) |
| 733 | { | 778 | { |
| 779 | struct drm_device *dev = connector->base.dev; | ||
| 734 | struct drm_i915_private *dev_priv = dev->dev_private; | 780 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 781 | struct intel_panel *panel = &connector->panel; | ||
| 782 | enum pipe pipe = intel_get_pipe_from_connector(connector); | ||
| 783 | unsigned long flags; | ||
| 784 | |||
| 785 | if (!panel->backlight.present || pipe == INVALID_PIPE) | ||
| 786 | return; | ||
| 787 | |||
| 788 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); | ||
| 789 | |||
| 790 | spin_lock_irqsave(&dev_priv->backlight_lock, flags); | ||
| 791 | |||
| 792 | WARN_ON(panel->backlight.max == 0); | ||
| 735 | 793 | ||
| 736 | intel_panel_init_backlight_regs(dev); | 794 | if (panel->backlight.level == 0) { |
| 795 | panel->backlight.level = panel->backlight.max; | ||
| 796 | if (panel->backlight.device) | ||
| 797 | panel->backlight.device->props.brightness = | ||
| 798 | panel->backlight.level; | ||
| 799 | } | ||
| 800 | |||
| 801 | dev_priv->display.enable_backlight(connector); | ||
| 802 | panel->backlight.enabled = true; | ||
| 737 | 803 | ||
| 738 | dev_priv->backlight.level = intel_panel_get_backlight(dev, 0); | 804 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); |
| 739 | dev_priv->backlight.enabled = dev_priv->backlight.level != 0; | ||
| 740 | } | 805 | } |
| 741 | 806 | ||
| 742 | enum drm_connector_status | 807 | enum drm_connector_status |
| @@ -762,7 +827,7 @@ intel_panel_detect(struct drm_device *dev) | |||
| 762 | } | 827 | } |
| 763 | 828 | ||
| 764 | #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) | 829 | #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) |
| 765 | static int intel_panel_update_status(struct backlight_device *bd) | 830 | static int intel_backlight_device_update_status(struct backlight_device *bd) |
| 766 | { | 831 | { |
| 767 | struct intel_connector *connector = bl_get_data(bd); | 832 | struct intel_connector *connector = bl_get_data(bd); |
| 768 | struct drm_device *dev = connector->base.dev; | 833 | struct drm_device *dev = connector->base.dev; |
| @@ -776,85 +841,362 @@ static int intel_panel_update_status(struct backlight_device *bd) | |||
| 776 | return 0; | 841 | return 0; |
| 777 | } | 842 | } |
| 778 | 843 | ||
| 779 | static int intel_panel_get_brightness(struct backlight_device *bd) | 844 | static int intel_backlight_device_get_brightness(struct backlight_device *bd) |
| 780 | { | 845 | { |
| 781 | struct intel_connector *connector = bl_get_data(bd); | 846 | struct intel_connector *connector = bl_get_data(bd); |
| 782 | struct drm_device *dev = connector->base.dev; | 847 | struct drm_device *dev = connector->base.dev; |
| 783 | enum pipe pipe; | 848 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 849 | int ret; | ||
| 784 | 850 | ||
| 851 | intel_runtime_pm_get(dev_priv); | ||
| 785 | mutex_lock(&dev->mode_config.mutex); | 852 | mutex_lock(&dev->mode_config.mutex); |
| 786 | pipe = intel_get_pipe_from_connector(connector); | 853 | ret = intel_panel_get_backlight(connector); |
| 787 | mutex_unlock(&dev->mode_config.mutex); | 854 | mutex_unlock(&dev->mode_config.mutex); |
| 788 | if (pipe == INVALID_PIPE) | 855 | intel_runtime_pm_put(dev_priv); |
| 789 | return 0; | ||
| 790 | 856 | ||
| 791 | return intel_panel_get_backlight(connector->base.dev, pipe); | 857 | return ret; |
| 792 | } | 858 | } |
| 793 | 859 | ||
| 794 | static const struct backlight_ops intel_panel_bl_ops = { | 860 | static const struct backlight_ops intel_backlight_device_ops = { |
| 795 | .update_status = intel_panel_update_status, | 861 | .update_status = intel_backlight_device_update_status, |
| 796 | .get_brightness = intel_panel_get_brightness, | 862 | .get_brightness = intel_backlight_device_get_brightness, |
| 797 | }; | 863 | }; |
| 798 | 864 | ||
| 799 | int intel_panel_setup_backlight(struct drm_connector *connector) | 865 | static int intel_backlight_device_register(struct intel_connector *connector) |
| 800 | { | 866 | { |
| 801 | struct drm_device *dev = connector->dev; | 867 | struct intel_panel *panel = &connector->panel; |
| 802 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 803 | struct backlight_properties props; | 868 | struct backlight_properties props; |
| 804 | unsigned long flags; | ||
| 805 | 869 | ||
| 806 | intel_panel_init_backlight(dev); | 870 | if (WARN_ON(panel->backlight.device)) |
| 807 | |||
| 808 | if (WARN_ON(dev_priv->backlight.device)) | ||
| 809 | return -ENODEV; | 871 | return -ENODEV; |
| 810 | 872 | ||
| 873 | BUG_ON(panel->backlight.max == 0); | ||
| 874 | |||
| 811 | memset(&props, 0, sizeof(props)); | 875 | memset(&props, 0, sizeof(props)); |
| 812 | props.type = BACKLIGHT_RAW; | 876 | props.type = BACKLIGHT_RAW; |
| 813 | props.brightness = dev_priv->backlight.level; | 877 | props.brightness = panel->backlight.level; |
| 878 | props.max_brightness = panel->backlight.max; | ||
| 814 | 879 | ||
| 815 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 880 | /* |
| 816 | props.max_brightness = intel_panel_get_max_backlight(dev, 0); | 881 | * Note: using the same name independent of the connector prevents |
| 817 | spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); | 882 | * registration of multiple backlight devices in the driver. |
| 818 | 883 | */ | |
| 819 | if (props.max_brightness == 0) { | 884 | panel->backlight.device = |
| 820 | DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); | ||
| 821 | return -ENODEV; | ||
| 822 | } | ||
| 823 | dev_priv->backlight.device = | ||
| 824 | backlight_device_register("intel_backlight", | 885 | backlight_device_register("intel_backlight", |
| 825 | connector->kdev, | 886 | connector->base.kdev, |
| 826 | to_intel_connector(connector), | 887 | connector, |
| 827 | &intel_panel_bl_ops, &props); | 888 | &intel_backlight_device_ops, &props); |
| 828 | 889 | ||
| 829 | if (IS_ERR(dev_priv->backlight.device)) { | 890 | if (IS_ERR(panel->backlight.device)) { |
| 830 | DRM_ERROR("Failed to register backlight: %ld\n", | 891 | DRM_ERROR("Failed to register backlight: %ld\n", |
| 831 | PTR_ERR(dev_priv->backlight.device)); | 892 | PTR_ERR(panel->backlight.device)); |
| 832 | dev_priv->backlight.device = NULL; | 893 | panel->backlight.device = NULL; |
| 833 | return -ENODEV; | 894 | return -ENODEV; |
| 834 | } | 895 | } |
| 835 | return 0; | 896 | return 0; |
| 836 | } | 897 | } |
| 837 | 898 | ||
| 838 | void intel_panel_destroy_backlight(struct drm_device *dev) | 899 | static void intel_backlight_device_unregister(struct intel_connector *connector) |
| 900 | { | ||
| 901 | struct intel_panel *panel = &connector->panel; | ||
| 902 | |||
| 903 | if (panel->backlight.device) { | ||
| 904 | backlight_device_unregister(panel->backlight.device); | ||
| 905 | panel->backlight.device = NULL; | ||
| 906 | } | ||
| 907 | } | ||
| 908 | #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ | ||
| 909 | static int intel_backlight_device_register(struct intel_connector *connector) | ||
| 910 | { | ||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | static void intel_backlight_device_unregister(struct intel_connector *connector) | ||
| 914 | { | ||
| 915 | } | ||
| 916 | #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ | ||
| 917 | |||
| 918 | /* | ||
| 919 | * Note: The setup hooks can't assume pipe is set! | ||
| 920 | * | ||
| 921 | * XXX: Query mode clock or hardware clock and program PWM modulation frequency | ||
| 922 | * appropriately when it's 0. Use VBT and/or sane defaults. | ||
| 923 | */ | ||
| 924 | static int bdw_setup_backlight(struct intel_connector *connector) | ||
| 925 | { | ||
| 926 | struct drm_device *dev = connector->base.dev; | ||
| 927 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 928 | struct intel_panel *panel = &connector->panel; | ||
| 929 | u32 pch_ctl1, pch_ctl2, val; | ||
| 930 | |||
| 931 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 932 | panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; | ||
| 933 | |||
| 934 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
| 935 | panel->backlight.max = pch_ctl2 >> 16; | ||
| 936 | if (!panel->backlight.max) | ||
| 937 | return -ENODEV; | ||
| 938 | |||
| 939 | val = bdw_get_backlight(connector); | ||
| 940 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
| 941 | |||
| 942 | panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && | ||
| 943 | panel->backlight.level != 0; | ||
| 944 | |||
| 945 | return 0; | ||
| 946 | } | ||
| 947 | |||
| 948 | static int pch_setup_backlight(struct intel_connector *connector) | ||
| 949 | { | ||
| 950 | struct drm_device *dev = connector->base.dev; | ||
| 951 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 952 | struct intel_panel *panel = &connector->panel; | ||
| 953 | u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; | ||
| 954 | |||
| 955 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); | ||
| 956 | panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; | ||
| 957 | |||
| 958 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
| 959 | panel->backlight.max = pch_ctl2 >> 16; | ||
| 960 | if (!panel->backlight.max) | ||
| 961 | return -ENODEV; | ||
| 962 | |||
| 963 | val = pch_get_backlight(connector); | ||
| 964 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
| 965 | |||
| 966 | cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); | ||
| 967 | panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && | ||
| 968 | (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0; | ||
| 969 | |||
| 970 | return 0; | ||
| 971 | } | ||
| 972 | |||
| 973 | static int i9xx_setup_backlight(struct intel_connector *connector) | ||
| 974 | { | ||
| 975 | struct drm_device *dev = connector->base.dev; | ||
| 976 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 977 | struct intel_panel *panel = &connector->panel; | ||
| 978 | u32 ctl, val; | ||
| 979 | |||
| 980 | ctl = I915_READ(BLC_PWM_CTL); | ||
| 981 | |||
| 982 | if (IS_GEN2(dev)) | ||
| 983 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; | ||
| 984 | |||
| 985 | if (IS_PINEVIEW(dev)) | ||
| 986 | panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; | ||
| 987 | |||
| 988 | panel->backlight.max = ctl >> 17; | ||
| 989 | if (panel->backlight.combination_mode) | ||
| 990 | panel->backlight.max *= 0xff; | ||
| 991 | |||
| 992 | if (!panel->backlight.max) | ||
| 993 | return -ENODEV; | ||
| 994 | |||
| 995 | val = i9xx_get_backlight(connector); | ||
| 996 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
| 997 | |||
| 998 | panel->backlight.enabled = panel->backlight.level != 0; | ||
| 999 | |||
| 1000 | return 0; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | static int i965_setup_backlight(struct intel_connector *connector) | ||
| 1004 | { | ||
| 1005 | struct drm_device *dev = connector->base.dev; | ||
| 1006 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1007 | struct intel_panel *panel = &connector->panel; | ||
| 1008 | u32 ctl, ctl2, val; | ||
| 1009 | |||
| 1010 | ctl2 = I915_READ(BLC_PWM_CTL2); | ||
| 1011 | panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE; | ||
| 1012 | panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; | ||
| 1013 | |||
| 1014 | ctl = I915_READ(BLC_PWM_CTL); | ||
| 1015 | panel->backlight.max = ctl >> 16; | ||
| 1016 | if (panel->backlight.combination_mode) | ||
| 1017 | panel->backlight.max *= 0xff; | ||
| 1018 | |||
| 1019 | if (!panel->backlight.max) | ||
| 1020 | return -ENODEV; | ||
| 1021 | |||
| 1022 | val = i9xx_get_backlight(connector); | ||
| 1023 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
| 1024 | |||
| 1025 | panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && | ||
| 1026 | panel->backlight.level != 0; | ||
| 1027 | |||
| 1028 | return 0; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | static int vlv_setup_backlight(struct intel_connector *connector) | ||
| 839 | { | 1032 | { |
| 1033 | struct drm_device *dev = connector->base.dev; | ||
| 840 | struct drm_i915_private *dev_priv = dev->dev_private; | 1034 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 841 | if (dev_priv->backlight.device) { | 1035 | struct intel_panel *panel = &connector->panel; |
| 842 | backlight_device_unregister(dev_priv->backlight.device); | 1036 | enum pipe pipe; |
| 843 | dev_priv->backlight.device = NULL; | 1037 | u32 ctl, ctl2, val; |
| 1038 | |||
| 1039 | for_each_pipe(pipe) { | ||
| 1040 | u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); | ||
| 1041 | |||
| 1042 | /* Skip if the modulation freq is already set */ | ||
| 1043 | if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) | ||
| 1044 | continue; | ||
| 1045 | |||
| 1046 | cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; | ||
| 1047 | I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | | ||
| 1048 | cur_val); | ||
| 844 | } | 1049 | } |
| 1050 | |||
| 1051 | ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); | ||
| 1052 | panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; | ||
| 1053 | |||
| 1054 | ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); | ||
| 1055 | panel->backlight.max = ctl >> 16; | ||
| 1056 | if (!panel->backlight.max) | ||
| 1057 | return -ENODEV; | ||
| 1058 | |||
| 1059 | val = _vlv_get_backlight(dev, PIPE_A); | ||
| 1060 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
| 1061 | |||
| 1062 | panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && | ||
| 1063 | panel->backlight.level != 0; | ||
| 1064 | |||
| 1065 | return 0; | ||
| 845 | } | 1066 | } |
| 846 | #else | 1067 | |
| 847 | int intel_panel_setup_backlight(struct drm_connector *connector) | 1068 | int intel_panel_setup_backlight(struct drm_connector *connector) |
| 848 | { | 1069 | { |
| 849 | intel_panel_init_backlight(connector->dev); | 1070 | struct drm_device *dev = connector->dev; |
| 1071 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1072 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 1073 | struct intel_panel *panel = &intel_connector->panel; | ||
| 1074 | unsigned long flags; | ||
| 1075 | int ret; | ||
| 1076 | |||
| 1077 | /* set level and max in panel struct */ | ||
| 1078 | spin_lock_irqsave(&dev_priv->backlight_lock, flags); | ||
| 1079 | ret = dev_priv->display.setup_backlight(intel_connector); | ||
| 1080 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); | ||
| 1081 | |||
| 1082 | if (ret) { | ||
| 1083 | DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", | ||
| 1084 | drm_get_connector_name(connector)); | ||
| 1085 | return ret; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | intel_backlight_device_register(intel_connector); | ||
| 1089 | |||
| 1090 | panel->backlight.present = true; | ||
| 1091 | |||
| 1092 | DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, " | ||
| 1093 | "sysfs interface %sregistered\n", | ||
| 1094 | panel->backlight.enabled ? "enabled" : "disabled", | ||
| 1095 | panel->backlight.level, panel->backlight.max, | ||
| 1096 | panel->backlight.device ? "" : "not "); | ||
| 1097 | |||
| 850 | return 0; | 1098 | return 0; |
| 851 | } | 1099 | } |
| 852 | 1100 | ||
| 853 | void intel_panel_destroy_backlight(struct drm_device *dev) | 1101 | void intel_panel_destroy_backlight(struct drm_connector *connector) |
| 1102 | { | ||
| 1103 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
| 1104 | struct intel_panel *panel = &intel_connector->panel; | ||
| 1105 | |||
| 1106 | panel->backlight.present = false; | ||
| 1107 | intel_backlight_device_unregister(intel_connector); | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | /** | ||
| 1111 | * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID | ||
| 1112 | * @dev: drm device | ||
| 1113 | * @fixed_mode : panel native mode | ||
| 1114 | * @connector: LVDS/eDP connector | ||
| 1115 | * | ||
| 1116 | * Return downclock_avail | ||
| 1117 | * Find the reduced downclock for LVDS/eDP in EDID. | ||
| 1118 | */ | ||
| 1119 | struct drm_display_mode * | ||
| 1120 | intel_find_panel_downclock(struct drm_device *dev, | ||
| 1121 | struct drm_display_mode *fixed_mode, | ||
| 1122 | struct drm_connector *connector) | ||
| 1123 | { | ||
| 1124 | struct drm_display_mode *scan, *tmp_mode; | ||
| 1125 | int temp_downclock; | ||
| 1126 | |||
| 1127 | temp_downclock = fixed_mode->clock; | ||
| 1128 | tmp_mode = NULL; | ||
| 1129 | |||
| 1130 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
| 1131 | /* | ||
| 1132 | * If one mode has the same resolution with the fixed_panel | ||
| 1133 | * mode while they have the different refresh rate, it means | ||
| 1134 | * that the reduced downclock is found. In such | ||
| 1135 | * case we can set the different FPx0/1 to dynamically select | ||
| 1136 | * between low and high frequency. | ||
| 1137 | */ | ||
| 1138 | if (scan->hdisplay == fixed_mode->hdisplay && | ||
| 1139 | scan->hsync_start == fixed_mode->hsync_start && | ||
| 1140 | scan->hsync_end == fixed_mode->hsync_end && | ||
| 1141 | scan->htotal == fixed_mode->htotal && | ||
| 1142 | scan->vdisplay == fixed_mode->vdisplay && | ||
| 1143 | scan->vsync_start == fixed_mode->vsync_start && | ||
| 1144 | scan->vsync_end == fixed_mode->vsync_end && | ||
| 1145 | scan->vtotal == fixed_mode->vtotal) { | ||
| 1146 | if (scan->clock < temp_downclock) { | ||
| 1147 | /* | ||
| 1148 | * The downclock is already found. But we | ||
| 1149 | * expect to find the lower downclock. | ||
| 1150 | */ | ||
| 1151 | temp_downclock = scan->clock; | ||
| 1152 | tmp_mode = scan; | ||
| 1153 | } | ||
| 1154 | } | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | if (temp_downclock < fixed_mode->clock) | ||
| 1158 | return drm_mode_duplicate(dev, tmp_mode); | ||
| 1159 | else | ||
| 1160 | return NULL; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | /* Set up chip specific backlight functions */ | ||
| 1164 | void intel_panel_init_backlight_funcs(struct drm_device *dev) | ||
| 854 | { | 1165 | { |
| 855 | return; | 1166 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1167 | |||
| 1168 | if (IS_BROADWELL(dev)) { | ||
| 1169 | dev_priv->display.setup_backlight = bdw_setup_backlight; | ||
| 1170 | dev_priv->display.enable_backlight = bdw_enable_backlight; | ||
| 1171 | dev_priv->display.disable_backlight = pch_disable_backlight; | ||
| 1172 | dev_priv->display.set_backlight = bdw_set_backlight; | ||
| 1173 | dev_priv->display.get_backlight = bdw_get_backlight; | ||
| 1174 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 1175 | dev_priv->display.setup_backlight = pch_setup_backlight; | ||
| 1176 | dev_priv->display.enable_backlight = pch_enable_backlight; | ||
| 1177 | dev_priv->display.disable_backlight = pch_disable_backlight; | ||
| 1178 | dev_priv->display.set_backlight = pch_set_backlight; | ||
| 1179 | dev_priv->display.get_backlight = pch_get_backlight; | ||
| 1180 | } else if (IS_VALLEYVIEW(dev)) { | ||
| 1181 | dev_priv->display.setup_backlight = vlv_setup_backlight; | ||
| 1182 | dev_priv->display.enable_backlight = vlv_enable_backlight; | ||
| 1183 | dev_priv->display.disable_backlight = vlv_disable_backlight; | ||
| 1184 | dev_priv->display.set_backlight = vlv_set_backlight; | ||
| 1185 | dev_priv->display.get_backlight = vlv_get_backlight; | ||
| 1186 | } else if (IS_GEN4(dev)) { | ||
| 1187 | dev_priv->display.setup_backlight = i965_setup_backlight; | ||
| 1188 | dev_priv->display.enable_backlight = i965_enable_backlight; | ||
| 1189 | dev_priv->display.disable_backlight = i965_disable_backlight; | ||
| 1190 | dev_priv->display.set_backlight = i9xx_set_backlight; | ||
| 1191 | dev_priv->display.get_backlight = i9xx_get_backlight; | ||
| 1192 | } else { | ||
| 1193 | dev_priv->display.setup_backlight = i9xx_setup_backlight; | ||
| 1194 | dev_priv->display.enable_backlight = i9xx_enable_backlight; | ||
| 1195 | dev_priv->display.disable_backlight = i9xx_disable_backlight; | ||
| 1196 | dev_priv->display.set_backlight = i9xx_set_backlight; | ||
| 1197 | dev_priv->display.get_backlight = i9xx_get_backlight; | ||
| 1198 | } | ||
| 856 | } | 1199 | } |
| 857 | #endif | ||
| 858 | 1200 | ||
| 859 | int intel_panel_init(struct intel_panel *panel, | 1201 | int intel_panel_init(struct intel_panel *panel, |
| 860 | struct drm_display_mode *fixed_mode) | 1202 | struct drm_display_mode *fixed_mode) |
| @@ -871,4 +1213,8 @@ void intel_panel_fini(struct intel_panel *panel) | |||
| 871 | 1213 | ||
| 872 | if (panel->fixed_mode) | 1214 | if (panel->fixed_mode) |
| 873 | drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); | 1215 | drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); |
| 1216 | |||
| 1217 | if (panel->downclock_mode) | ||
| 1218 | drm_mode_destroy(intel_connector->base.dev, | ||
| 1219 | panel->downclock_mode); | ||
| 874 | } | 1220 | } |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 26c29c173221..d77cc81900f9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -30,7 +30,9 @@ | |||
| 30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
| 31 | #include "../../../platform/x86/intel_ips.h" | 31 | #include "../../../platform/x86/intel_ips.h" |
| 32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| 33 | #include <linux/vgaarb.h> | ||
| 33 | #include <drm/i915_powerwell.h> | 34 | #include <drm/i915_powerwell.h> |
| 35 | #include <linux/pm_runtime.h> | ||
| 34 | 36 | ||
| 35 | /** | 37 | /** |
| 36 | * RC6 is a special power stage which allows the GPU to enter an very | 38 | * RC6 is a special power stage which allows the GPU to enter an very |
| @@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev) | |||
| 86 | DRM_DEBUG_KMS("disabled FBC\n"); | 88 | DRM_DEBUG_KMS("disabled FBC\n"); |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 89 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 91 | static void i8xx_enable_fbc(struct drm_crtc *crtc) |
| 90 | { | 92 | { |
| 91 | struct drm_device *dev = crtc->dev; | 93 | struct drm_device *dev = crtc->dev; |
| 92 | struct drm_i915_private *dev_priv = dev->dev_private; | 94 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 96 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 98 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 97 | int cfb_pitch; | 99 | int cfb_pitch; |
| 98 | int plane, i; | 100 | int plane, i; |
| 99 | u32 fbc_ctl, fbc_ctl2; | 101 | u32 fbc_ctl; |
| 100 | 102 | ||
| 101 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; | 103 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; |
| 102 | if (fb->pitches[0] < cfb_pitch) | 104 | if (fb->pitches[0] < cfb_pitch) |
| 103 | cfb_pitch = fb->pitches[0]; | 105 | cfb_pitch = fb->pitches[0]; |
| 104 | 106 | ||
| 105 | /* FBC_CTL wants 64B units */ | 107 | /* FBC_CTL wants 32B or 64B units */ |
| 106 | cfb_pitch = (cfb_pitch / 64) - 1; | 108 | if (IS_GEN2(dev)) |
| 109 | cfb_pitch = (cfb_pitch / 32) - 1; | ||
| 110 | else | ||
| 111 | cfb_pitch = (cfb_pitch / 64) - 1; | ||
| 107 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 112 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
| 108 | 113 | ||
| 109 | /* Clear old tags */ | 114 | /* Clear old tags */ |
| 110 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | 115 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
| 111 | I915_WRITE(FBC_TAG + (i * 4), 0); | 116 | I915_WRITE(FBC_TAG + (i * 4), 0); |
| 112 | 117 | ||
| 113 | /* Set it up... */ | 118 | if (IS_GEN4(dev)) { |
| 114 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | 119 | u32 fbc_ctl2; |
| 115 | fbc_ctl2 |= plane; | 120 | |
| 116 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 121 | /* Set it up... */ |
| 117 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 122 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
| 123 | fbc_ctl2 |= plane; | ||
| 124 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
| 125 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
| 126 | } | ||
| 118 | 127 | ||
| 119 | /* enable it... */ | 128 | /* enable it... */ |
| 120 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 129 | fbc_ctl = I915_READ(FBC_CONTROL); |
| 130 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | ||
| 131 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
| 121 | if (IS_I945GM(dev)) | 132 | if (IS_I945GM(dev)) |
| 122 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 133 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
| 123 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 134 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
| 124 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | ||
| 125 | fbc_ctl |= obj->fence_reg; | 135 | fbc_ctl |= obj->fence_reg; |
| 126 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 136 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
| 127 | 137 | ||
| @@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev) | |||
| 136 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | 146 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
| 137 | } | 147 | } |
| 138 | 148 | ||
| 139 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 149 | static void g4x_enable_fbc(struct drm_crtc *crtc) |
| 140 | { | 150 | { |
| 141 | struct drm_device *dev = crtc->dev; | 151 | struct drm_device *dev = crtc->dev; |
| 142 | struct drm_i915_private *dev_priv = dev->dev_private; | 152 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 145 | struct drm_i915_gem_object *obj = intel_fb->obj; | 155 | struct drm_i915_gem_object *obj = intel_fb->obj; |
| 146 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 156 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 147 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 157 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
| 148 | unsigned long stall_watermark = 200; | ||
| 149 | u32 dpfc_ctl; | 158 | u32 dpfc_ctl; |
| 150 | 159 | ||
| 151 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 160 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
| 152 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; | 161 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
| 153 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 162 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
| 154 | 163 | ||
| 155 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
| 156 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
| 157 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
| 158 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | 164 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); |
| 159 | 165 | ||
| 160 | /* enable it... */ | 166 | /* enable it... */ |
| @@ -191,7 +197,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
| 191 | u32 blt_ecoskpd; | 197 | u32 blt_ecoskpd; |
| 192 | 198 | ||
| 193 | /* Make sure blitter notifies FBC of writes */ | 199 | /* Make sure blitter notifies FBC of writes */ |
| 194 | gen6_gt_force_wake_get(dev_priv); | 200 | |
| 201 | /* Blitter is part of Media powerwell on VLV. No impact of | ||
| 202 | * his param in other platforms for now */ | ||
| 203 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); | ||
| 204 | |||
| 195 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 205 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
| 196 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 206 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
| 197 | GEN6_BLITTER_LOCK_SHIFT; | 207 | GEN6_BLITTER_LOCK_SHIFT; |
| @@ -202,10 +212,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
| 202 | GEN6_BLITTER_LOCK_SHIFT); | 212 | GEN6_BLITTER_LOCK_SHIFT); |
| 203 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 213 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
| 204 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 214 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
| 205 | gen6_gt_force_wake_put(dev_priv); | 215 | |
| 216 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); | ||
| 206 | } | 217 | } |
| 207 | 218 | ||
| 208 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 219 | static void ironlake_enable_fbc(struct drm_crtc *crtc) |
| 209 | { | 220 | { |
| 210 | struct drm_device *dev = crtc->dev; | 221 | struct drm_device *dev = crtc->dev; |
| 211 | struct drm_i915_private *dev_priv = dev->dev_private; | 222 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -214,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 214 | struct drm_i915_gem_object *obj = intel_fb->obj; | 225 | struct drm_i915_gem_object *obj = intel_fb->obj; |
| 215 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 216 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 227 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
| 217 | unsigned long stall_watermark = 200; | ||
| 218 | u32 dpfc_ctl; | 228 | u32 dpfc_ctl; |
| 219 | 229 | ||
| 220 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
| @@ -222,12 +232,11 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 222 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 232 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
| 223 | /* Set persistent mode for front-buffer rendering, ala X. */ | 233 | /* Set persistent mode for front-buffer rendering, ala X. */ |
| 224 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; | 234 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; |
| 225 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); | 235 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
| 236 | if (IS_GEN5(dev)) | ||
| 237 | dpfc_ctl |= obj->fence_reg; | ||
| 226 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 238 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
| 227 | 239 | ||
| 228 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
| 229 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
| 230 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
| 231 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 240 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
| 232 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); | 241 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); |
| 233 | /* enable it... */ | 242 | /* enable it... */ |
| @@ -265,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev) | |||
| 265 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | 274 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
| 266 | } | 275 | } |
| 267 | 276 | ||
| 268 | static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 277 | static void gen7_enable_fbc(struct drm_crtc *crtc) |
| 269 | { | 278 | { |
| 270 | struct drm_device *dev = crtc->dev; | 279 | struct drm_device *dev = crtc->dev; |
| 271 | struct drm_i915_private *dev_priv = dev->dev_private; | 280 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -295,7 +304,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 295 | 304 | ||
| 296 | sandybridge_blit_fbc_update(dev); | 305 | sandybridge_blit_fbc_update(dev); |
| 297 | 306 | ||
| 298 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 307 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); |
| 299 | } | 308 | } |
| 300 | 309 | ||
| 301 | bool intel_fbc_enabled(struct drm_device *dev) | 310 | bool intel_fbc_enabled(struct drm_device *dev) |
| @@ -322,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) | |||
| 322 | * the prior work. | 331 | * the prior work. |
| 323 | */ | 332 | */ |
| 324 | if (work->crtc->fb == work->fb) { | 333 | if (work->crtc->fb == work->fb) { |
| 325 | dev_priv->display.enable_fbc(work->crtc, | 334 | dev_priv->display.enable_fbc(work->crtc); |
| 326 | work->interval); | ||
| 327 | 335 | ||
| 328 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; | 336 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; |
| 329 | dev_priv->fbc.fb_id = work->crtc->fb->base.id; | 337 | dev_priv->fbc.fb_id = work->crtc->fb->base.id; |
| @@ -360,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) | |||
| 360 | dev_priv->fbc.fbc_work = NULL; | 368 | dev_priv->fbc.fbc_work = NULL; |
| 361 | } | 369 | } |
| 362 | 370 | ||
| 363 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 371 | static void intel_enable_fbc(struct drm_crtc *crtc) |
| 364 | { | 372 | { |
| 365 | struct intel_fbc_work *work; | 373 | struct intel_fbc_work *work; |
| 366 | struct drm_device *dev = crtc->dev; | 374 | struct drm_device *dev = crtc->dev; |
| @@ -374,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 374 | work = kzalloc(sizeof(*work), GFP_KERNEL); | 382 | work = kzalloc(sizeof(*work), GFP_KERNEL); |
| 375 | if (work == NULL) { | 383 | if (work == NULL) { |
| 376 | DRM_ERROR("Failed to allocate FBC work structure\n"); | 384 | DRM_ERROR("Failed to allocate FBC work structure\n"); |
| 377 | dev_priv->display.enable_fbc(crtc, interval); | 385 | dev_priv->display.enable_fbc(crtc); |
| 378 | return; | 386 | return; |
| 379 | } | 387 | } |
| 380 | 388 | ||
| 381 | work->crtc = crtc; | 389 | work->crtc = crtc; |
| 382 | work->fb = crtc->fb; | 390 | work->fb = crtc->fb; |
| 383 | work->interval = interval; | ||
| 384 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | 391 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); |
| 385 | 392 | ||
| 386 | dev_priv->fbc.fbc_work = work; | 393 | dev_priv->fbc.fbc_work = work; |
| @@ -454,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
| 454 | const struct drm_display_mode *adjusted_mode; | 461 | const struct drm_display_mode *adjusted_mode; |
| 455 | unsigned int max_width, max_height; | 462 | unsigned int max_width, max_height; |
| 456 | 463 | ||
| 457 | if (!I915_HAS_FBC(dev)) { | 464 | if (!HAS_FBC(dev)) { |
| 458 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); | 465 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); |
| 459 | return; | 466 | return; |
| 460 | } | 467 | } |
| @@ -530,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev) | |||
| 530 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | 537 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
| 531 | goto out_disable; | 538 | goto out_disable; |
| 532 | } | 539 | } |
| 533 | if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && | 540 | if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) && |
| 534 | intel_crtc->plane != 0) { | 541 | intel_crtc->plane != PLANE_A) { |
| 535 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) | 542 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) |
| 536 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | 543 | DRM_DEBUG_KMS("plane not A, disabling compression\n"); |
| 537 | goto out_disable; | 544 | goto out_disable; |
| 538 | } | 545 | } |
| 539 | 546 | ||
| @@ -595,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
| 595 | intel_disable_fbc(dev); | 602 | intel_disable_fbc(dev); |
| 596 | } | 603 | } |
| 597 | 604 | ||
| 598 | intel_enable_fbc(crtc, 500); | 605 | intel_enable_fbc(crtc); |
| 599 | dev_priv->fbc.no_fbc_reason = FBC_OK; | 606 | dev_priv->fbc.no_fbc_reason = FBC_OK; |
| 600 | return; | 607 | return; |
| 601 | 608 | ||
| @@ -817,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
| 817 | return size; | 824 | return size; |
| 818 | } | 825 | } |
| 819 | 826 | ||
| 820 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) | 827 | static int i830_get_fifo_size(struct drm_device *dev, int plane) |
| 821 | { | 828 | { |
| 822 | struct drm_i915_private *dev_priv = dev->dev_private; | 829 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 823 | uint32_t dsparb = I915_READ(DSPARB); | 830 | uint32_t dsparb = I915_READ(DSPARB); |
| @@ -850,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
| 850 | return size; | 857 | return size; |
| 851 | } | 858 | } |
| 852 | 859 | ||
| 853 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | ||
| 854 | { | ||
| 855 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 856 | uint32_t dsparb = I915_READ(DSPARB); | ||
| 857 | int size; | ||
| 858 | |||
| 859 | size = dsparb & 0x7f; | ||
| 860 | size >>= 1; /* Convert to cachelines */ | ||
| 861 | |||
| 862 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | ||
| 863 | plane ? "B" : "A", size); | ||
| 864 | |||
| 865 | return size; | ||
| 866 | } | ||
| 867 | |||
| 868 | /* Pineview has different values for various configs */ | 860 | /* Pineview has different values for various configs */ |
| 869 | static const struct intel_watermark_params pineview_display_wm = { | 861 | static const struct intel_watermark_params pineview_display_wm = { |
| 870 | PINEVIEW_DISPLAY_FIFO, | 862 | PINEVIEW_DISPLAY_FIFO, |
| @@ -943,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = { | |||
| 943 | 2, | 935 | 2, |
| 944 | I915_FIFO_LINE_SIZE | 936 | I915_FIFO_LINE_SIZE |
| 945 | }; | 937 | }; |
| 946 | static const struct intel_watermark_params i855_wm_info = { | 938 | static const struct intel_watermark_params i830_wm_info = { |
| 947 | I855GM_FIFO_SIZE, | 939 | I855GM_FIFO_SIZE, |
| 948 | I915_MAX_WM, | 940 | I915_MAX_WM, |
| 949 | 1, | 941 | 1, |
| 950 | 2, | 942 | 2, |
| 951 | I830_FIFO_LINE_SIZE | 943 | I830_FIFO_LINE_SIZE |
| 952 | }; | 944 | }; |
| 953 | static const struct intel_watermark_params i830_wm_info = { | 945 | static const struct intel_watermark_params i845_wm_info = { |
| 954 | I830_FIFO_SIZE, | 946 | I830_FIFO_SIZE, |
| 955 | I915_MAX_WM, | 947 | I915_MAX_WM, |
| 956 | 1, | 948 | 1, |
| @@ -958,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = { | |||
| 958 | I830_FIFO_LINE_SIZE | 950 | I830_FIFO_LINE_SIZE |
| 959 | }; | 951 | }; |
| 960 | 952 | ||
| 961 | static const struct intel_watermark_params ironlake_display_wm_info = { | ||
| 962 | ILK_DISPLAY_FIFO, | ||
| 963 | ILK_DISPLAY_MAXWM, | ||
| 964 | ILK_DISPLAY_DFTWM, | ||
| 965 | 2, | ||
| 966 | ILK_FIFO_LINE_SIZE | ||
| 967 | }; | ||
| 968 | static const struct intel_watermark_params ironlake_cursor_wm_info = { | ||
| 969 | ILK_CURSOR_FIFO, | ||
| 970 | ILK_CURSOR_MAXWM, | ||
| 971 | ILK_CURSOR_DFTWM, | ||
| 972 | 2, | ||
| 973 | ILK_FIFO_LINE_SIZE | ||
| 974 | }; | ||
| 975 | static const struct intel_watermark_params ironlake_display_srwm_info = { | ||
| 976 | ILK_DISPLAY_SR_FIFO, | ||
| 977 | ILK_DISPLAY_MAX_SRWM, | ||
| 978 | ILK_DISPLAY_DFT_SRWM, | ||
| 979 | 2, | ||
| 980 | ILK_FIFO_LINE_SIZE | ||
| 981 | }; | ||
| 982 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
| 983 | ILK_CURSOR_SR_FIFO, | ||
| 984 | ILK_CURSOR_MAX_SRWM, | ||
| 985 | ILK_CURSOR_DFT_SRWM, | ||
| 986 | 2, | ||
| 987 | ILK_FIFO_LINE_SIZE | ||
| 988 | }; | ||
| 989 | |||
| 990 | static const struct intel_watermark_params sandybridge_display_wm_info = { | ||
| 991 | SNB_DISPLAY_FIFO, | ||
| 992 | SNB_DISPLAY_MAXWM, | ||
| 993 | SNB_DISPLAY_DFTWM, | ||
| 994 | 2, | ||
| 995 | SNB_FIFO_LINE_SIZE | ||
| 996 | }; | ||
| 997 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
| 998 | SNB_CURSOR_FIFO, | ||
| 999 | SNB_CURSOR_MAXWM, | ||
| 1000 | SNB_CURSOR_DFTWM, | ||
| 1001 | 2, | ||
| 1002 | SNB_FIFO_LINE_SIZE | ||
| 1003 | }; | ||
| 1004 | static const struct intel_watermark_params sandybridge_display_srwm_info = { | ||
| 1005 | SNB_DISPLAY_SR_FIFO, | ||
| 1006 | SNB_DISPLAY_MAX_SRWM, | ||
| 1007 | SNB_DISPLAY_DFT_SRWM, | ||
| 1008 | 2, | ||
| 1009 | SNB_FIFO_LINE_SIZE | ||
| 1010 | }; | ||
| 1011 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
| 1012 | SNB_CURSOR_SR_FIFO, | ||
| 1013 | SNB_CURSOR_MAX_SRWM, | ||
| 1014 | SNB_CURSOR_DFT_SRWM, | ||
| 1015 | 2, | ||
| 1016 | SNB_FIFO_LINE_SIZE | ||
| 1017 | }; | ||
| 1018 | |||
| 1019 | |||
| 1020 | /** | 953 | /** |
| 1021 | * intel_calculate_wm - calculate watermark level | 954 | * intel_calculate_wm - calculate watermark level |
| 1022 | * @clock_in_khz: pixel clock | 955 | * @clock_in_khz: pixel clock |
| @@ -1567,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
| 1567 | else if (!IS_GEN2(dev)) | 1500 | else if (!IS_GEN2(dev)) |
| 1568 | wm_info = &i915_wm_info; | 1501 | wm_info = &i915_wm_info; |
| 1569 | else | 1502 | else |
| 1570 | wm_info = &i855_wm_info; | 1503 | wm_info = &i830_wm_info; |
| 1571 | 1504 | ||
| 1572 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 1505 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
| 1573 | crtc = intel_get_crtc_for_plane(dev, 0); | 1506 | crtc = intel_get_crtc_for_plane(dev, 0); |
| @@ -1615,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
| 1615 | if (IS_I945G(dev) || IS_I945GM(dev)) | 1548 | if (IS_I945G(dev) || IS_I945GM(dev)) |
| 1616 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | 1549 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); |
| 1617 | else if (IS_I915GM(dev)) | 1550 | else if (IS_I915GM(dev)) |
| 1618 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | 1551 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN)); |
| 1619 | 1552 | ||
| 1620 | /* Calc sr entries for one plane configs */ | 1553 | /* Calc sr entries for one plane configs */ |
| 1621 | if (HAS_FW_BLC(dev) && enabled) { | 1554 | if (HAS_FW_BLC(dev) && enabled) { |
| @@ -1667,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
| 1667 | I915_WRITE(FW_BLC_SELF, | 1600 | I915_WRITE(FW_BLC_SELF, |
| 1668 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | 1601 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); |
| 1669 | else if (IS_I915GM(dev)) | 1602 | else if (IS_I915GM(dev)) |
| 1670 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | 1603 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN)); |
| 1671 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | 1604 | DRM_DEBUG_KMS("memory self refresh enabled\n"); |
| 1672 | } else | 1605 | } else |
| 1673 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | 1606 | DRM_DEBUG_KMS("memory self refresh disabled\n"); |
| 1674 | } | 1607 | } |
| 1675 | } | 1608 | } |
| 1676 | 1609 | ||
| 1677 | static void i830_update_wm(struct drm_crtc *unused_crtc) | 1610 | static void i845_update_wm(struct drm_crtc *unused_crtc) |
| 1678 | { | 1611 | { |
| 1679 | struct drm_device *dev = unused_crtc->dev; | 1612 | struct drm_device *dev = unused_crtc->dev; |
| 1680 | struct drm_i915_private *dev_priv = dev->dev_private; | 1613 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -1689,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc) | |||
| 1689 | 1622 | ||
| 1690 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | 1623 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; |
| 1691 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | 1624 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, |
| 1692 | &i830_wm_info, | 1625 | &i845_wm_info, |
| 1693 | dev_priv->display.get_fifo_size(dev, 0), | 1626 | dev_priv->display.get_fifo_size(dev, 0), |
| 1694 | 4, latency_ns); | 1627 | 4, latency_ns); |
| 1695 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 1628 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
| @@ -1700,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc) | |||
| 1700 | I915_WRITE(FW_BLC, fwater_lo); | 1633 | I915_WRITE(FW_BLC, fwater_lo); |
| 1701 | } | 1634 | } |
| 1702 | 1635 | ||
| 1703 | /* | ||
| 1704 | * Check the wm result. | ||
| 1705 | * | ||
| 1706 | * If any calculated watermark values is larger than the maximum value that | ||
| 1707 | * can be programmed into the associated watermark register, that watermark | ||
| 1708 | * must be disabled. | ||
| 1709 | */ | ||
| 1710 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
| 1711 | int fbc_wm, int display_wm, int cursor_wm, | ||
| 1712 | const struct intel_watermark_params *display, | ||
| 1713 | const struct intel_watermark_params *cursor) | ||
| 1714 | { | ||
| 1715 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1716 | |||
| 1717 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," | ||
| 1718 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); | ||
| 1719 | |||
| 1720 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | ||
| 1721 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | ||
| 1722 | fbc_wm, SNB_FBC_MAX_SRWM, level); | ||
| 1723 | |||
| 1724 | /* fbc has it's own way to disable FBC WM */ | ||
| 1725 | I915_WRITE(DISP_ARB_CTL, | ||
| 1726 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
| 1727 | return false; | ||
| 1728 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
| 1729 | /* enable FBC WM (except on ILK, where it must remain off) */ | ||
| 1730 | I915_WRITE(DISP_ARB_CTL, | ||
| 1731 | I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS); | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | if (display_wm > display->max_wm) { | ||
| 1735 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
| 1736 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
| 1737 | return false; | ||
| 1738 | } | ||
| 1739 | |||
| 1740 | if (cursor_wm > cursor->max_wm) { | ||
| 1741 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
| 1742 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
| 1743 | return false; | ||
| 1744 | } | ||
| 1745 | |||
| 1746 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
| 1747 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
| 1748 | return false; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | return true; | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | /* | ||
| 1755 | * Compute watermark values of WM[1-3], | ||
| 1756 | */ | ||
| 1757 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, | ||
| 1758 | int latency_ns, | ||
| 1759 | const struct intel_watermark_params *display, | ||
| 1760 | const struct intel_watermark_params *cursor, | ||
| 1761 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
| 1762 | { | ||
| 1763 | struct drm_crtc *crtc; | ||
| 1764 | const struct drm_display_mode *adjusted_mode; | ||
| 1765 | unsigned long line_time_us; | ||
| 1766 | int hdisplay, htotal, pixel_size, clock; | ||
| 1767 | int line_count, line_size; | ||
| 1768 | int small, large; | ||
| 1769 | int entries; | ||
| 1770 | |||
| 1771 | if (!latency_ns) { | ||
| 1772 | *fbc_wm = *display_wm = *cursor_wm = 0; | ||
| 1773 | return false; | ||
| 1774 | } | ||
| 1775 | |||
| 1776 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
| 1777 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | ||
| 1778 | clock = adjusted_mode->crtc_clock; | ||
| 1779 | htotal = adjusted_mode->crtc_htotal; | ||
| 1780 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; | ||
| 1781 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
| 1782 | |||
| 1783 | line_time_us = (htotal * 1000) / clock; | ||
| 1784 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
| 1785 | line_size = hdisplay * pixel_size; | ||
| 1786 | |||
| 1787 | /* Use the minimum of the small and large buffer method for primary */ | ||
| 1788 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
| 1789 | large = line_count * line_size; | ||
| 1790 | |||
| 1791 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
| 1792 | *display_wm = entries + display->guard_size; | ||
| 1793 | |||
| 1794 | /* | ||
| 1795 | * Spec says: | ||
| 1796 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
| 1797 | */ | ||
| 1798 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
| 1799 | |||
| 1800 | /* calculate the self-refresh watermark for display cursor */ | ||
| 1801 | entries = line_count * pixel_size * 64; | ||
| 1802 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
| 1803 | *cursor_wm = entries + cursor->guard_size; | ||
| 1804 | |||
| 1805 | return ironlake_check_srwm(dev, level, | ||
| 1806 | *fbc_wm, *display_wm, *cursor_wm, | ||
| 1807 | display, cursor); | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | static void ironlake_update_wm(struct drm_crtc *crtc) | ||
| 1811 | { | ||
| 1812 | struct drm_device *dev = crtc->dev; | ||
| 1813 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1814 | int fbc_wm, plane_wm, cursor_wm; | ||
| 1815 | unsigned int enabled; | ||
| 1816 | |||
| 1817 | enabled = 0; | ||
| 1818 | if (g4x_compute_wm0(dev, PIPE_A, | ||
| 1819 | &ironlake_display_wm_info, | ||
| 1820 | dev_priv->wm.pri_latency[0] * 100, | ||
| 1821 | &ironlake_cursor_wm_info, | ||
| 1822 | dev_priv->wm.cur_latency[0] * 100, | ||
| 1823 | &plane_wm, &cursor_wm)) { | ||
| 1824 | I915_WRITE(WM0_PIPEA_ILK, | ||
| 1825 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
| 1826 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
| 1827 | " plane %d, " "cursor: %d\n", | ||
| 1828 | plane_wm, cursor_wm); | ||
| 1829 | enabled |= 1 << PIPE_A; | ||
| 1830 | } | ||
| 1831 | |||
| 1832 | if (g4x_compute_wm0(dev, PIPE_B, | ||
| 1833 | &ironlake_display_wm_info, | ||
| 1834 | dev_priv->wm.pri_latency[0] * 100, | ||
| 1835 | &ironlake_cursor_wm_info, | ||
| 1836 | dev_priv->wm.cur_latency[0] * 100, | ||
| 1837 | &plane_wm, &cursor_wm)) { | ||
| 1838 | I915_WRITE(WM0_PIPEB_ILK, | ||
| 1839 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
| 1840 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
| 1841 | " plane %d, cursor: %d\n", | ||
| 1842 | plane_wm, cursor_wm); | ||
| 1843 | enabled |= 1 << PIPE_B; | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | /* | ||
| 1847 | * Calculate and update the self-refresh watermark only when one | ||
| 1848 | * display plane is used. | ||
| 1849 | */ | ||
| 1850 | I915_WRITE(WM3_LP_ILK, 0); | ||
| 1851 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 1852 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 1853 | |||
| 1854 | if (!single_plane_enabled(enabled)) | ||
| 1855 | return; | ||
| 1856 | enabled = ffs(enabled) - 1; | ||
| 1857 | |||
| 1858 | /* WM1 */ | ||
| 1859 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
| 1860 | dev_priv->wm.pri_latency[1] * 500, | ||
| 1861 | &ironlake_display_srwm_info, | ||
| 1862 | &ironlake_cursor_srwm_info, | ||
| 1863 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 1864 | return; | ||
| 1865 | |||
| 1866 | I915_WRITE(WM1_LP_ILK, | ||
| 1867 | WM1_LP_SR_EN | | ||
| 1868 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
| 1869 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 1870 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 1871 | cursor_wm); | ||
| 1872 | |||
| 1873 | /* WM2 */ | ||
| 1874 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
| 1875 | dev_priv->wm.pri_latency[2] * 500, | ||
| 1876 | &ironlake_display_srwm_info, | ||
| 1877 | &ironlake_cursor_srwm_info, | ||
| 1878 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 1879 | return; | ||
| 1880 | |||
| 1881 | I915_WRITE(WM2_LP_ILK, | ||
| 1882 | WM2_LP_EN | | ||
| 1883 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
| 1884 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 1885 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 1886 | cursor_wm); | ||
| 1887 | |||
| 1888 | /* | ||
| 1889 | * WM3 is unsupported on ILK, probably because we don't have latency | ||
| 1890 | * data for that power state | ||
| 1891 | */ | ||
| 1892 | } | ||
| 1893 | |||
| 1894 | static void sandybridge_update_wm(struct drm_crtc *crtc) | ||
| 1895 | { | ||
| 1896 | struct drm_device *dev = crtc->dev; | ||
| 1897 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1898 | int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ | ||
| 1899 | u32 val; | ||
| 1900 | int fbc_wm, plane_wm, cursor_wm; | ||
| 1901 | unsigned int enabled; | ||
| 1902 | |||
| 1903 | enabled = 0; | ||
| 1904 | if (g4x_compute_wm0(dev, PIPE_A, | ||
| 1905 | &sandybridge_display_wm_info, latency, | ||
| 1906 | &sandybridge_cursor_wm_info, latency, | ||
| 1907 | &plane_wm, &cursor_wm)) { | ||
| 1908 | val = I915_READ(WM0_PIPEA_ILK); | ||
| 1909 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
| 1910 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
| 1911 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
| 1912 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
| 1913 | " plane %d, " "cursor: %d\n", | ||
| 1914 | plane_wm, cursor_wm); | ||
| 1915 | enabled |= 1 << PIPE_A; | ||
| 1916 | } | ||
| 1917 | |||
| 1918 | if (g4x_compute_wm0(dev, PIPE_B, | ||
| 1919 | &sandybridge_display_wm_info, latency, | ||
| 1920 | &sandybridge_cursor_wm_info, latency, | ||
| 1921 | &plane_wm, &cursor_wm)) { | ||
| 1922 | val = I915_READ(WM0_PIPEB_ILK); | ||
| 1923 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
| 1924 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
| 1925 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
| 1926 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
| 1927 | " plane %d, cursor: %d\n", | ||
| 1928 | plane_wm, cursor_wm); | ||
| 1929 | enabled |= 1 << PIPE_B; | ||
| 1930 | } | ||
| 1931 | |||
| 1932 | /* | ||
| 1933 | * Calculate and update the self-refresh watermark only when one | ||
| 1934 | * display plane is used. | ||
| 1935 | * | ||
| 1936 | * SNB support 3 levels of watermark. | ||
| 1937 | * | ||
| 1938 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
| 1939 | * and disabled in the descending order | ||
| 1940 | * | ||
| 1941 | */ | ||
| 1942 | I915_WRITE(WM3_LP_ILK, 0); | ||
| 1943 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 1944 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 1945 | |||
| 1946 | if (!single_plane_enabled(enabled) || | ||
| 1947 | dev_priv->sprite_scaling_enabled) | ||
| 1948 | return; | ||
| 1949 | enabled = ffs(enabled) - 1; | ||
| 1950 | |||
| 1951 | /* WM1 */ | ||
| 1952 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
| 1953 | dev_priv->wm.pri_latency[1] * 500, | ||
| 1954 | &sandybridge_display_srwm_info, | ||
| 1955 | &sandybridge_cursor_srwm_info, | ||
| 1956 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 1957 | return; | ||
| 1958 | |||
| 1959 | I915_WRITE(WM1_LP_ILK, | ||
| 1960 | WM1_LP_SR_EN | | ||
| 1961 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
| 1962 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 1963 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 1964 | cursor_wm); | ||
| 1965 | |||
| 1966 | /* WM2 */ | ||
| 1967 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
| 1968 | dev_priv->wm.pri_latency[2] * 500, | ||
| 1969 | &sandybridge_display_srwm_info, | ||
| 1970 | &sandybridge_cursor_srwm_info, | ||
| 1971 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 1972 | return; | ||
| 1973 | |||
| 1974 | I915_WRITE(WM2_LP_ILK, | ||
| 1975 | WM2_LP_EN | | ||
| 1976 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
| 1977 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 1978 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 1979 | cursor_wm); | ||
| 1980 | |||
| 1981 | /* WM3 */ | ||
| 1982 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
| 1983 | dev_priv->wm.pri_latency[3] * 500, | ||
| 1984 | &sandybridge_display_srwm_info, | ||
| 1985 | &sandybridge_cursor_srwm_info, | ||
| 1986 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 1987 | return; | ||
| 1988 | |||
| 1989 | I915_WRITE(WM3_LP_ILK, | ||
| 1990 | WM3_LP_EN | | ||
| 1991 | (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | | ||
| 1992 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 1993 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 1994 | cursor_wm); | ||
| 1995 | } | ||
| 1996 | |||
| 1997 | static void ivybridge_update_wm(struct drm_crtc *crtc) | ||
| 1998 | { | ||
| 1999 | struct drm_device *dev = crtc->dev; | ||
| 2000 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2001 | int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ | ||
| 2002 | u32 val; | ||
| 2003 | int fbc_wm, plane_wm, cursor_wm; | ||
| 2004 | int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; | ||
| 2005 | unsigned int enabled; | ||
| 2006 | |||
| 2007 | enabled = 0; | ||
| 2008 | if (g4x_compute_wm0(dev, PIPE_A, | ||
| 2009 | &sandybridge_display_wm_info, latency, | ||
| 2010 | &sandybridge_cursor_wm_info, latency, | ||
| 2011 | &plane_wm, &cursor_wm)) { | ||
| 2012 | val = I915_READ(WM0_PIPEA_ILK); | ||
| 2013 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
| 2014 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
| 2015 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
| 2016 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
| 2017 | " plane %d, " "cursor: %d\n", | ||
| 2018 | plane_wm, cursor_wm); | ||
| 2019 | enabled |= 1 << PIPE_A; | ||
| 2020 | } | ||
| 2021 | |||
| 2022 | if (g4x_compute_wm0(dev, PIPE_B, | ||
| 2023 | &sandybridge_display_wm_info, latency, | ||
| 2024 | &sandybridge_cursor_wm_info, latency, | ||
| 2025 | &plane_wm, &cursor_wm)) { | ||
| 2026 | val = I915_READ(WM0_PIPEB_ILK); | ||
| 2027 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
| 2028 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
| 2029 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
| 2030 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
| 2031 | " plane %d, cursor: %d\n", | ||
| 2032 | plane_wm, cursor_wm); | ||
| 2033 | enabled |= 1 << PIPE_B; | ||
| 2034 | } | ||
| 2035 | |||
| 2036 | if (g4x_compute_wm0(dev, PIPE_C, | ||
| 2037 | &sandybridge_display_wm_info, latency, | ||
| 2038 | &sandybridge_cursor_wm_info, latency, | ||
| 2039 | &plane_wm, &cursor_wm)) { | ||
| 2040 | val = I915_READ(WM0_PIPEC_IVB); | ||
| 2041 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
| 2042 | I915_WRITE(WM0_PIPEC_IVB, val | | ||
| 2043 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
| 2044 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | ||
| 2045 | " plane %d, cursor: %d\n", | ||
| 2046 | plane_wm, cursor_wm); | ||
| 2047 | enabled |= 1 << PIPE_C; | ||
| 2048 | } | ||
| 2049 | |||
| 2050 | /* | ||
| 2051 | * Calculate and update the self-refresh watermark only when one | ||
| 2052 | * display plane is used. | ||
| 2053 | * | ||
| 2054 | * SNB support 3 levels of watermark. | ||
| 2055 | * | ||
| 2056 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
| 2057 | * and disabled in the descending order | ||
| 2058 | * | ||
| 2059 | */ | ||
| 2060 | I915_WRITE(WM3_LP_ILK, 0); | ||
| 2061 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 2062 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 2063 | |||
| 2064 | if (!single_plane_enabled(enabled) || | ||
| 2065 | dev_priv->sprite_scaling_enabled) | ||
| 2066 | return; | ||
| 2067 | enabled = ffs(enabled) - 1; | ||
| 2068 | |||
| 2069 | /* WM1 */ | ||
| 2070 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
| 2071 | dev_priv->wm.pri_latency[1] * 500, | ||
| 2072 | &sandybridge_display_srwm_info, | ||
| 2073 | &sandybridge_cursor_srwm_info, | ||
| 2074 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 2075 | return; | ||
| 2076 | |||
| 2077 | I915_WRITE(WM1_LP_ILK, | ||
| 2078 | WM1_LP_SR_EN | | ||
| 2079 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
| 2080 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 2081 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 2082 | cursor_wm); | ||
| 2083 | |||
| 2084 | /* WM2 */ | ||
| 2085 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
| 2086 | dev_priv->wm.pri_latency[2] * 500, | ||
| 2087 | &sandybridge_display_srwm_info, | ||
| 2088 | &sandybridge_cursor_srwm_info, | ||
| 2089 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
| 2090 | return; | ||
| 2091 | |||
| 2092 | I915_WRITE(WM2_LP_ILK, | ||
| 2093 | WM2_LP_EN | | ||
| 2094 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
| 2095 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 2096 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 2097 | cursor_wm); | ||
| 2098 | |||
| 2099 | /* WM3, note we have to correct the cursor latency */ | ||
| 2100 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
| 2101 | dev_priv->wm.pri_latency[3] * 500, | ||
| 2102 | &sandybridge_display_srwm_info, | ||
| 2103 | &sandybridge_cursor_srwm_info, | ||
| 2104 | &fbc_wm, &plane_wm, &ignore_cursor_wm) || | ||
| 2105 | !ironlake_compute_srwm(dev, 3, enabled, | ||
| 2106 | dev_priv->wm.cur_latency[3] * 500, | ||
| 2107 | &sandybridge_display_srwm_info, | ||
| 2108 | &sandybridge_cursor_srwm_info, | ||
| 2109 | &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) | ||
| 2110 | return; | ||
| 2111 | |||
| 2112 | I915_WRITE(WM3_LP_ILK, | ||
| 2113 | WM3_LP_EN | | ||
| 2114 | (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | | ||
| 2115 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
| 2116 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
| 2117 | cursor_wm); | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, | 1636 | static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, |
| 2121 | struct drm_crtc *crtc) | 1637 | struct drm_crtc *crtc) |
| 2122 | { | 1638 | { |
| @@ -2185,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, | |||
| 2185 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; | 1701 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; |
| 2186 | } | 1702 | } |
| 2187 | 1703 | ||
| 2188 | struct hsw_pipe_wm_parameters { | 1704 | struct ilk_pipe_wm_parameters { |
| 2189 | bool active; | 1705 | bool active; |
| 2190 | uint32_t pipe_htotal; | 1706 | uint32_t pipe_htotal; |
| 2191 | uint32_t pixel_rate; | 1707 | uint32_t pixel_rate; |
| @@ -2194,7 +1710,7 @@ struct hsw_pipe_wm_parameters { | |||
| 2194 | struct intel_plane_wm_parameters cur; | 1710 | struct intel_plane_wm_parameters cur; |
| 2195 | }; | 1711 | }; |
| 2196 | 1712 | ||
| 2197 | struct hsw_wm_maximums { | 1713 | struct ilk_wm_maximums { |
| 2198 | uint16_t pri; | 1714 | uint16_t pri; |
| 2199 | uint16_t spr; | 1715 | uint16_t spr; |
| 2200 | uint16_t cur; | 1716 | uint16_t cur; |
| @@ -2212,7 +1728,7 @@ struct intel_wm_config { | |||
| 2212 | * For both WM_PIPE and WM_LP. | 1728 | * For both WM_PIPE and WM_LP. |
| 2213 | * mem_value must be in 0.1us units. | 1729 | * mem_value must be in 0.1us units. |
| 2214 | */ | 1730 | */ |
| 2215 | static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, | 1731 | static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, |
| 2216 | uint32_t mem_value, | 1732 | uint32_t mem_value, |
| 2217 | bool is_lp) | 1733 | bool is_lp) |
| 2218 | { | 1734 | { |
| @@ -2241,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, | |||
| 2241 | * For both WM_PIPE and WM_LP. | 1757 | * For both WM_PIPE and WM_LP. |
| 2242 | * mem_value must be in 0.1us units. | 1758 | * mem_value must be in 0.1us units. |
| 2243 | */ | 1759 | */ |
| 2244 | static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, | 1760 | static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, |
| 2245 | uint32_t mem_value) | 1761 | uint32_t mem_value) |
| 2246 | { | 1762 | { |
| 2247 | uint32_t method1, method2; | 1763 | uint32_t method1, method2; |
| @@ -2264,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, | |||
| 2264 | * For both WM_PIPE and WM_LP. | 1780 | * For both WM_PIPE and WM_LP. |
| 2265 | * mem_value must be in 0.1us units. | 1781 | * mem_value must be in 0.1us units. |
| 2266 | */ | 1782 | */ |
| 2267 | static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, | 1783 | static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, |
| 2268 | uint32_t mem_value) | 1784 | uint32_t mem_value) |
| 2269 | { | 1785 | { |
| 2270 | if (!params->active || !params->cur.enabled) | 1786 | if (!params->active || !params->cur.enabled) |
| @@ -2278,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, | |||
| 2278 | } | 1794 | } |
| 2279 | 1795 | ||
| 2280 | /* Only for WM_LP. */ | 1796 | /* Only for WM_LP. */ |
| 2281 | static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params, | 1797 | static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, |
| 2282 | uint32_t pri_val) | 1798 | uint32_t pri_val) |
| 2283 | { | 1799 | { |
| 2284 | if (!params->active || !params->pri.enabled) | 1800 | if (!params->active || !params->pri.enabled) |
| @@ -2383,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev, | |||
| 2383 | int level, | 1899 | int level, |
| 2384 | const struct intel_wm_config *config, | 1900 | const struct intel_wm_config *config, |
| 2385 | enum intel_ddb_partitioning ddb_partitioning, | 1901 | enum intel_ddb_partitioning ddb_partitioning, |
| 2386 | struct hsw_wm_maximums *max) | 1902 | struct ilk_wm_maximums *max) |
| 2387 | { | 1903 | { |
| 2388 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); | 1904 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); |
| 2389 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); | 1905 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); |
| @@ -2392,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev, | |||
| 2392 | } | 1908 | } |
| 2393 | 1909 | ||
| 2394 | static bool ilk_validate_wm_level(int level, | 1910 | static bool ilk_validate_wm_level(int level, |
| 2395 | const struct hsw_wm_maximums *max, | 1911 | const struct ilk_wm_maximums *max, |
| 2396 | struct intel_wm_level *result) | 1912 | struct intel_wm_level *result) |
| 2397 | { | 1913 | { |
| 2398 | bool ret; | 1914 | bool ret; |
| @@ -2434,7 +1950,7 @@ static bool ilk_validate_wm_level(int level, | |||
| 2434 | 1950 | ||
| 2435 | static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, | 1951 | static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, |
| 2436 | int level, | 1952 | int level, |
| 2437 | const struct hsw_pipe_wm_parameters *p, | 1953 | const struct ilk_pipe_wm_parameters *p, |
| 2438 | struct intel_wm_level *result) | 1954 | struct intel_wm_level *result) |
| 2439 | { | 1955 | { |
| 2440 | uint16_t pri_latency = dev_priv->wm.pri_latency[level]; | 1956 | uint16_t pri_latency = dev_priv->wm.pri_latency[level]; |
| @@ -2482,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |||
| 2482 | { | 1998 | { |
| 2483 | struct drm_i915_private *dev_priv = dev->dev_private; | 1999 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2484 | 2000 | ||
| 2485 | if (IS_HASWELL(dev)) { | 2001 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
| 2486 | uint64_t sskpd = I915_READ64(MCH_SSKPD); | 2002 | uint64_t sskpd = I915_READ64(MCH_SSKPD); |
| 2487 | 2003 | ||
| 2488 | wm[0] = (sskpd >> 56) & 0xFF; | 2004 | wm[0] = (sskpd >> 56) & 0xFF; |
| @@ -2530,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |||
| 2530 | static int ilk_wm_max_level(const struct drm_device *dev) | 2046 | static int ilk_wm_max_level(const struct drm_device *dev) |
| 2531 | { | 2047 | { |
| 2532 | /* how many WM levels are we expecting */ | 2048 | /* how many WM levels are we expecting */ |
| 2533 | if (IS_HASWELL(dev)) | 2049 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 2534 | return 4; | 2050 | return 4; |
| 2535 | else if (INTEL_INFO(dev)->gen >= 6) | 2051 | else if (INTEL_INFO(dev)->gen >= 6) |
| 2536 | return 3; | 2052 | return 3; |
| @@ -2582,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev) | |||
| 2582 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2098 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
| 2583 | } | 2099 | } |
| 2584 | 2100 | ||
| 2585 | static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | 2101 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, |
| 2586 | struct hsw_pipe_wm_parameters *p, | 2102 | struct ilk_pipe_wm_parameters *p, |
| 2587 | struct intel_wm_config *config) | 2103 | struct intel_wm_config *config) |
| 2588 | { | 2104 | { |
| 2589 | struct drm_device *dev = crtc->dev; | 2105 | struct drm_device *dev = crtc->dev; |
| @@ -2593,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | |||
| 2593 | 2109 | ||
| 2594 | p->active = intel_crtc_active(crtc); | 2110 | p->active = intel_crtc_active(crtc); |
| 2595 | if (p->active) { | 2111 | if (p->active) { |
| 2596 | p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; | 2112 | p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal; |
| 2597 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); | 2113 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); |
| 2598 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; | 2114 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; |
| 2599 | p->cur.bytes_per_pixel = 4; | 2115 | p->cur.bytes_per_pixel = 4; |
| @@ -2620,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | |||
| 2620 | 2136 | ||
| 2621 | /* Compute new watermarks for the pipe */ | 2137 | /* Compute new watermarks for the pipe */ |
| 2622 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | 2138 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, |
| 2623 | const struct hsw_pipe_wm_parameters *params, | 2139 | const struct ilk_pipe_wm_parameters *params, |
| 2624 | struct intel_pipe_wm *pipe_wm) | 2140 | struct intel_pipe_wm *pipe_wm) |
| 2625 | { | 2141 | { |
| 2626 | struct drm_device *dev = crtc->dev; | 2142 | struct drm_device *dev = crtc->dev; |
| @@ -2632,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | |||
| 2632 | .sprites_enabled = params->spr.enabled, | 2148 | .sprites_enabled = params->spr.enabled, |
| 2633 | .sprites_scaled = params->spr.scaled, | 2149 | .sprites_scaled = params->spr.scaled, |
| 2634 | }; | 2150 | }; |
| 2635 | struct hsw_wm_maximums max; | 2151 | struct ilk_wm_maximums max; |
| 2636 | 2152 | ||
| 2637 | /* LP0 watermarks always use 1/2 DDB partitioning */ | 2153 | /* LP0 watermarks always use 1/2 DDB partitioning */ |
| 2638 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); | 2154 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); |
| 2639 | 2155 | ||
| 2156 | /* ILK/SNB: LP2+ watermarks only w/o sprites */ | ||
| 2157 | if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) | ||
| 2158 | max_level = 1; | ||
| 2159 | |||
| 2160 | /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ | ||
| 2161 | if (params->spr.scaled) | ||
| 2162 | max_level = 0; | ||
| 2163 | |||
| 2640 | for (level = 0; level <= max_level; level++) | 2164 | for (level = 0; level <= max_level; level++) |
| 2641 | ilk_compute_wm_level(dev_priv, level, params, | 2165 | ilk_compute_wm_level(dev_priv, level, params, |
| 2642 | &pipe_wm->wm[level]); | 2166 | &pipe_wm->wm[level]); |
| 2643 | 2167 | ||
| 2644 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | 2168 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 2169 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | ||
| 2645 | 2170 | ||
| 2646 | /* At least LP0 must be valid */ | 2171 | /* At least LP0 must be valid */ |
| 2647 | return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); | 2172 | return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); |
| @@ -2676,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev, | |||
| 2676 | * Merge all low power watermarks for all active pipes. | 2201 | * Merge all low power watermarks for all active pipes. |
| 2677 | */ | 2202 | */ |
| 2678 | static void ilk_wm_merge(struct drm_device *dev, | 2203 | static void ilk_wm_merge(struct drm_device *dev, |
| 2679 | const struct hsw_wm_maximums *max, | 2204 | const struct intel_wm_config *config, |
| 2205 | const struct ilk_wm_maximums *max, | ||
| 2680 | struct intel_pipe_wm *merged) | 2206 | struct intel_pipe_wm *merged) |
| 2681 | { | 2207 | { |
| 2682 | int level, max_level = ilk_wm_max_level(dev); | 2208 | int level, max_level = ilk_wm_max_level(dev); |
| 2683 | 2209 | ||
| 2684 | merged->fbc_wm_enabled = true; | 2210 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ |
| 2211 | if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && | ||
| 2212 | config->num_pipes_active > 1) | ||
| 2213 | return; | ||
| 2214 | |||
| 2215 | /* ILK: FBC WM must be disabled always */ | ||
| 2216 | merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; | ||
| 2685 | 2217 | ||
| 2686 | /* merge each WM1+ level */ | 2218 | /* merge each WM1+ level */ |
| 2687 | for (level = 1; level <= max_level; level++) { | 2219 | for (level = 1; level <= max_level; level++) { |
| @@ -2701,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev, | |||
| 2701 | wm->fbc_val = 0; | 2233 | wm->fbc_val = 0; |
| 2702 | } | 2234 | } |
| 2703 | } | 2235 | } |
| 2236 | |||
| 2237 | /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ | ||
| 2238 | /* | ||
| 2239 | * FIXME this is racy. FBC might get enabled later. | ||
| 2240 | * What we should check here is whether FBC can be | ||
| 2241 | * enabled sometime later. | ||
| 2242 | */ | ||
| 2243 | if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { | ||
| 2244 | for (level = 2; level <= max_level; level++) { | ||
| 2245 | struct intel_wm_level *wm = &merged->wm[level]; | ||
| 2246 | |||
| 2247 | wm->enable = false; | ||
| 2248 | } | ||
| 2249 | } | ||
| 2704 | } | 2250 | } |
| 2705 | 2251 | ||
| 2706 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | 2252 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) |
| @@ -2709,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | |||
| 2709 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | 2255 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); |
| 2710 | } | 2256 | } |
| 2711 | 2257 | ||
| 2712 | static void hsw_compute_wm_results(struct drm_device *dev, | 2258 | /* The value we need to program into the WM_LPx latency field */ |
| 2259 | static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) | ||
| 2260 | { | ||
| 2261 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2262 | |||
| 2263 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
| 2264 | return 2 * level; | ||
| 2265 | else | ||
| 2266 | return dev_priv->wm.pri_latency[level]; | ||
| 2267 | } | ||
| 2268 | |||
| 2269 | static void ilk_compute_wm_results(struct drm_device *dev, | ||
| 2713 | const struct intel_pipe_wm *merged, | 2270 | const struct intel_pipe_wm *merged, |
| 2714 | enum intel_ddb_partitioning partitioning, | 2271 | enum intel_ddb_partitioning partitioning, |
| 2715 | struct hsw_wm_values *results) | 2272 | struct ilk_wm_values *results) |
| 2716 | { | 2273 | { |
| 2717 | struct intel_crtc *intel_crtc; | 2274 | struct intel_crtc *intel_crtc; |
| 2718 | int level, wm_lp; | 2275 | int level, wm_lp; |
| @@ -2731,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
| 2731 | break; | 2288 | break; |
| 2732 | 2289 | ||
| 2733 | results->wm_lp[wm_lp - 1] = WM3_LP_EN | | 2290 | results->wm_lp[wm_lp - 1] = WM3_LP_EN | |
| 2734 | ((level * 2) << WM1_LP_LATENCY_SHIFT) | | 2291 | (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | |
| 2735 | (r->pri_val << WM1_LP_SR_SHIFT) | | 2292 | (r->pri_val << WM1_LP_SR_SHIFT) | |
| 2736 | r->cur_val; | 2293 | r->cur_val; |
| 2737 | 2294 | ||
| @@ -2742,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
| 2742 | results->wm_lp[wm_lp - 1] |= | 2299 | results->wm_lp[wm_lp - 1] |= |
| 2743 | r->fbc_val << WM1_LP_FBC_SHIFT; | 2300 | r->fbc_val << WM1_LP_FBC_SHIFT; |
| 2744 | 2301 | ||
| 2745 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | 2302 | if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { |
| 2303 | WARN_ON(wm_lp != 1); | ||
| 2304 | results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; | ||
| 2305 | } else | ||
| 2306 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | ||
| 2746 | } | 2307 | } |
| 2747 | 2308 | ||
| 2748 | /* LP0 register values */ | 2309 | /* LP0 register values */ |
| @@ -2765,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
| 2765 | 2326 | ||
| 2766 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in | 2327 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in |
| 2767 | * case both are at the same level. Prefer r1 in case they're the same. */ | 2328 | * case both are at the same level. Prefer r1 in case they're the same. */ |
| 2768 | static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, | 2329 | static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, |
| 2769 | struct intel_pipe_wm *r1, | 2330 | struct intel_pipe_wm *r1, |
| 2770 | struct intel_pipe_wm *r2) | 2331 | struct intel_pipe_wm *r2) |
| 2771 | { | 2332 | { |
| @@ -2800,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, | |||
| 2800 | #define WM_DIRTY_DDB (1 << 25) | 2361 | #define WM_DIRTY_DDB (1 << 25) |
| 2801 | 2362 | ||
| 2802 | static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, | 2363 | static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, |
| 2803 | const struct hsw_wm_values *old, | 2364 | const struct ilk_wm_values *old, |
| 2804 | const struct hsw_wm_values *new) | 2365 | const struct ilk_wm_values *new) |
| 2805 | { | 2366 | { |
| 2806 | unsigned int dirty = 0; | 2367 | unsigned int dirty = 0; |
| 2807 | enum pipe pipe; | 2368 | enum pipe pipe; |
| @@ -2851,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, | |||
| 2851 | return dirty; | 2412 | return dirty; |
| 2852 | } | 2413 | } |
| 2853 | 2414 | ||
| 2415 | static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, | ||
| 2416 | unsigned int dirty) | ||
| 2417 | { | ||
| 2418 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | ||
| 2419 | bool changed = false; | ||
| 2420 | |||
| 2421 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { | ||
| 2422 | previous->wm_lp[2] &= ~WM1_LP_SR_EN; | ||
| 2423 | I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); | ||
| 2424 | changed = true; | ||
| 2425 | } | ||
| 2426 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { | ||
| 2427 | previous->wm_lp[1] &= ~WM1_LP_SR_EN; | ||
| 2428 | I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); | ||
| 2429 | changed = true; | ||
| 2430 | } | ||
| 2431 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { | ||
| 2432 | previous->wm_lp[0] &= ~WM1_LP_SR_EN; | ||
| 2433 | I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); | ||
| 2434 | changed = true; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | /* | ||
| 2438 | * Don't touch WM1S_LP_EN here. | ||
| 2439 | * Doing so could cause underruns. | ||
| 2440 | */ | ||
| 2441 | |||
| 2442 | return changed; | ||
| 2443 | } | ||
| 2444 | |||
| 2854 | /* | 2445 | /* |
| 2855 | * The spec says we shouldn't write when we don't need, because every write | 2446 | * The spec says we shouldn't write when we don't need, because every write |
| 2856 | * causes WMs to be re-evaluated, expending some power. | 2447 | * causes WMs to be re-evaluated, expending some power. |
| 2857 | */ | 2448 | */ |
| 2858 | static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | 2449 | static void ilk_write_wm_values(struct drm_i915_private *dev_priv, |
| 2859 | struct hsw_wm_values *results) | 2450 | struct ilk_wm_values *results) |
| 2860 | { | 2451 | { |
| 2861 | struct hsw_wm_values *previous = &dev_priv->wm.hw; | 2452 | struct drm_device *dev = dev_priv->dev; |
| 2453 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | ||
| 2862 | unsigned int dirty; | 2454 | unsigned int dirty; |
| 2863 | uint32_t val; | 2455 | uint32_t val; |
| 2864 | 2456 | ||
| 2865 | dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); | 2457 | dirty = ilk_compute_wm_dirty(dev, previous, results); |
| 2866 | if (!dirty) | 2458 | if (!dirty) |
| 2867 | return; | 2459 | return; |
| 2868 | 2460 | ||
| 2869 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) | 2461 | _ilk_disable_lp_wm(dev_priv, dirty); |
| 2870 | I915_WRITE(WM3_LP_ILK, 0); | ||
| 2871 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0) | ||
| 2872 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 2873 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0) | ||
| 2874 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 2875 | 2462 | ||
| 2876 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) | 2463 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) |
| 2877 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); | 2464 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); |
| @@ -2888,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | |||
| 2888 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); | 2475 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); |
| 2889 | 2476 | ||
| 2890 | if (dirty & WM_DIRTY_DDB) { | 2477 | if (dirty & WM_DIRTY_DDB) { |
| 2891 | val = I915_READ(WM_MISC); | 2478 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
| 2892 | if (results->partitioning == INTEL_DDB_PART_1_2) | 2479 | val = I915_READ(WM_MISC); |
| 2893 | val &= ~WM_MISC_DATA_PARTITION_5_6; | 2480 | if (results->partitioning == INTEL_DDB_PART_1_2) |
| 2894 | else | 2481 | val &= ~WM_MISC_DATA_PARTITION_5_6; |
| 2895 | val |= WM_MISC_DATA_PARTITION_5_6; | 2482 | else |
| 2896 | I915_WRITE(WM_MISC, val); | 2483 | val |= WM_MISC_DATA_PARTITION_5_6; |
| 2484 | I915_WRITE(WM_MISC, val); | ||
| 2485 | } else { | ||
| 2486 | val = I915_READ(DISP_ARB_CTL2); | ||
| 2487 | if (results->partitioning == INTEL_DDB_PART_1_2) | ||
| 2488 | val &= ~DISP_DATA_PARTITION_5_6; | ||
| 2489 | else | ||
| 2490 | val |= DISP_DATA_PARTITION_5_6; | ||
| 2491 | I915_WRITE(DISP_ARB_CTL2, val); | ||
| 2492 | } | ||
| 2897 | } | 2493 | } |
| 2898 | 2494 | ||
| 2899 | if (dirty & WM_DIRTY_FBC) { | 2495 | if (dirty & WM_DIRTY_FBC) { |
| @@ -2905,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | |||
| 2905 | I915_WRITE(DISP_ARB_CTL, val); | 2501 | I915_WRITE(DISP_ARB_CTL, val); |
| 2906 | } | 2502 | } |
| 2907 | 2503 | ||
| 2908 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | 2504 | if (dirty & WM_DIRTY_LP(1) && |
| 2505 | previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | ||
| 2909 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); | 2506 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); |
| 2910 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | ||
| 2911 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | ||
| 2912 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | ||
| 2913 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | ||
| 2914 | 2507 | ||
| 2915 | if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) | 2508 | if (INTEL_INFO(dev)->gen >= 7) { |
| 2509 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | ||
| 2510 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | ||
| 2511 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | ||
| 2512 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) | ||
| 2916 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); | 2516 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); |
| 2917 | if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) | 2517 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) |
| 2918 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); | 2518 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); |
| 2919 | if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) | 2519 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) |
| 2920 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); | 2520 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); |
| 2921 | 2521 | ||
| 2922 | dev_priv->wm.hw = *results; | 2522 | dev_priv->wm.hw = *results; |
| 2923 | } | 2523 | } |
| 2924 | 2524 | ||
| 2925 | static void haswell_update_wm(struct drm_crtc *crtc) | 2525 | static bool ilk_disable_lp_wm(struct drm_device *dev) |
| 2526 | { | ||
| 2527 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2528 | |||
| 2529 | return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); | ||
| 2530 | } | ||
| 2531 | |||
| 2532 | static void ilk_update_wm(struct drm_crtc *crtc) | ||
| 2926 | { | 2533 | { |
| 2927 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2534 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 2928 | struct drm_device *dev = crtc->dev; | 2535 | struct drm_device *dev = crtc->dev; |
| 2929 | struct drm_i915_private *dev_priv = dev->dev_private; | 2536 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2930 | struct hsw_wm_maximums max; | 2537 | struct ilk_wm_maximums max; |
| 2931 | struct hsw_pipe_wm_parameters params = {}; | 2538 | struct ilk_pipe_wm_parameters params = {}; |
| 2932 | struct hsw_wm_values results = {}; | 2539 | struct ilk_wm_values results = {}; |
| 2933 | enum intel_ddb_partitioning partitioning; | 2540 | enum intel_ddb_partitioning partitioning; |
| 2934 | struct intel_pipe_wm pipe_wm = {}; | 2541 | struct intel_pipe_wm pipe_wm = {}; |
| 2935 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; | 2542 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; |
| 2936 | struct intel_wm_config config = {}; | 2543 | struct intel_wm_config config = {}; |
| 2937 | 2544 | ||
| 2938 | hsw_compute_wm_parameters(crtc, ¶ms, &config); | 2545 | ilk_compute_wm_parameters(crtc, ¶ms, &config); |
| 2939 | 2546 | ||
| 2940 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); | 2547 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); |
| 2941 | 2548 | ||
| @@ -2945,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc) | |||
| 2945 | intel_crtc->wm.active = pipe_wm; | 2552 | intel_crtc->wm.active = pipe_wm; |
| 2946 | 2553 | ||
| 2947 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); | 2554 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); |
| 2948 | ilk_wm_merge(dev, &max, &lp_wm_1_2); | 2555 | ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); |
| 2949 | 2556 | ||
| 2950 | /* 5/6 split only in single pipe config on IVB+ */ | 2557 | /* 5/6 split only in single pipe config on IVB+ */ |
| 2951 | if (INTEL_INFO(dev)->gen >= 7 && | 2558 | if (INTEL_INFO(dev)->gen >= 7 && |
| 2952 | config.num_pipes_active == 1 && config.sprites_enabled) { | 2559 | config.num_pipes_active == 1 && config.sprites_enabled) { |
| 2953 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); | 2560 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); |
| 2954 | ilk_wm_merge(dev, &max, &lp_wm_5_6); | 2561 | ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); |
| 2955 | 2562 | ||
| 2956 | best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); | 2563 | best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); |
| 2957 | } else { | 2564 | } else { |
| 2958 | best_lp_wm = &lp_wm_1_2; | 2565 | best_lp_wm = &lp_wm_1_2; |
| 2959 | } | 2566 | } |
| @@ -2961,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc) | |||
| 2961 | partitioning = (best_lp_wm == &lp_wm_1_2) ? | 2568 | partitioning = (best_lp_wm == &lp_wm_1_2) ? |
| 2962 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; | 2569 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; |
| 2963 | 2570 | ||
| 2964 | hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); | 2571 | ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); |
| 2965 | 2572 | ||
| 2966 | hsw_write_wm_values(dev_priv, &results); | 2573 | ilk_write_wm_values(dev_priv, &results); |
| 2967 | } | 2574 | } |
| 2968 | 2575 | ||
| 2969 | static void haswell_update_sprite_wm(struct drm_plane *plane, | 2576 | static void ilk_update_sprite_wm(struct drm_plane *plane, |
| 2970 | struct drm_crtc *crtc, | 2577 | struct drm_crtc *crtc, |
| 2971 | uint32_t sprite_width, int pixel_size, | 2578 | uint32_t sprite_width, int pixel_size, |
| 2972 | bool enabled, bool scaled) | 2579 | bool enabled, bool scaled) |
| 2973 | { | 2580 | { |
| 2581 | struct drm_device *dev = plane->dev; | ||
| 2974 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2582 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 2975 | 2583 | ||
| 2976 | intel_plane->wm.enabled = enabled; | 2584 | intel_plane->wm.enabled = enabled; |
| @@ -2978,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane, | |||
| 2978 | intel_plane->wm.horiz_pixels = sprite_width; | 2586 | intel_plane->wm.horiz_pixels = sprite_width; |
| 2979 | intel_plane->wm.bytes_per_pixel = pixel_size; | 2587 | intel_plane->wm.bytes_per_pixel = pixel_size; |
| 2980 | 2588 | ||
| 2981 | haswell_update_wm(crtc); | 2589 | /* |
| 2982 | } | 2590 | * IVB workaround: must disable low power watermarks for at least |
| 2983 | 2591 | * one frame before enabling scaling. LP watermarks can be re-enabled | |
| 2984 | static bool | 2592 | * when scaling is disabled. |
| 2985 | sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, | 2593 | * |
| 2986 | uint32_t sprite_width, int pixel_size, | 2594 | * WaCxSRDisabledForSpriteScaling:ivb |
| 2987 | const struct intel_watermark_params *display, | 2595 | */ |
| 2988 | int display_latency_ns, int *sprite_wm) | 2596 | if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) |
| 2989 | { | 2597 | intel_wait_for_vblank(dev, intel_plane->pipe); |
| 2990 | struct drm_crtc *crtc; | ||
| 2991 | int clock; | ||
| 2992 | int entries, tlb_miss; | ||
| 2993 | |||
| 2994 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
| 2995 | if (!intel_crtc_active(crtc)) { | ||
| 2996 | *sprite_wm = display->guard_size; | ||
| 2997 | return false; | ||
| 2998 | } | ||
| 2999 | |||
| 3000 | clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; | ||
| 3001 | |||
| 3002 | /* Use the small buffer method to calculate the sprite watermark */ | ||
| 3003 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | ||
| 3004 | tlb_miss = display->fifo_size*display->cacheline_size - | ||
| 3005 | sprite_width * 8; | ||
| 3006 | if (tlb_miss > 0) | ||
| 3007 | entries += tlb_miss; | ||
| 3008 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | ||
| 3009 | *sprite_wm = entries + display->guard_size; | ||
| 3010 | if (*sprite_wm > (int)display->max_wm) | ||
| 3011 | *sprite_wm = display->max_wm; | ||
| 3012 | |||
| 3013 | return true; | ||
| 3014 | } | ||
| 3015 | |||
| 3016 | static bool | ||
| 3017 | sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, | ||
| 3018 | uint32_t sprite_width, int pixel_size, | ||
| 3019 | const struct intel_watermark_params *display, | ||
| 3020 | int latency_ns, int *sprite_wm) | ||
| 3021 | { | ||
| 3022 | struct drm_crtc *crtc; | ||
| 3023 | unsigned long line_time_us; | ||
| 3024 | int clock; | ||
| 3025 | int line_count, line_size; | ||
| 3026 | int small, large; | ||
| 3027 | int entries; | ||
| 3028 | |||
| 3029 | if (!latency_ns) { | ||
| 3030 | *sprite_wm = 0; | ||
| 3031 | return false; | ||
| 3032 | } | ||
| 3033 | |||
| 3034 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
| 3035 | clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; | ||
| 3036 | if (!clock) { | ||
| 3037 | *sprite_wm = 0; | ||
| 3038 | return false; | ||
| 3039 | } | ||
| 3040 | |||
| 3041 | line_time_us = (sprite_width * 1000) / clock; | ||
| 3042 | if (!line_time_us) { | ||
| 3043 | *sprite_wm = 0; | ||
| 3044 | return false; | ||
| 3045 | } | ||
| 3046 | |||
| 3047 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
| 3048 | line_size = sprite_width * pixel_size; | ||
| 3049 | |||
| 3050 | /* Use the minimum of the small and large buffer method for primary */ | ||
| 3051 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
| 3052 | large = line_count * line_size; | ||
| 3053 | |||
| 3054 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
| 3055 | *sprite_wm = entries + display->guard_size; | ||
| 3056 | |||
| 3057 | return *sprite_wm > 0x3ff ? false : true; | ||
| 3058 | } | ||
| 3059 | |||
| 3060 | static void sandybridge_update_sprite_wm(struct drm_plane *plane, | ||
| 3061 | struct drm_crtc *crtc, | ||
| 3062 | uint32_t sprite_width, int pixel_size, | ||
| 3063 | bool enabled, bool scaled) | ||
| 3064 | { | ||
| 3065 | struct drm_device *dev = plane->dev; | ||
| 3066 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3067 | int pipe = to_intel_plane(plane)->pipe; | ||
| 3068 | int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ | ||
| 3069 | u32 val; | ||
| 3070 | int sprite_wm, reg; | ||
| 3071 | int ret; | ||
| 3072 | |||
| 3073 | if (!enabled) | ||
| 3074 | return; | ||
| 3075 | |||
| 3076 | switch (pipe) { | ||
| 3077 | case 0: | ||
| 3078 | reg = WM0_PIPEA_ILK; | ||
| 3079 | break; | ||
| 3080 | case 1: | ||
| 3081 | reg = WM0_PIPEB_ILK; | ||
| 3082 | break; | ||
| 3083 | case 2: | ||
| 3084 | reg = WM0_PIPEC_IVB; | ||
| 3085 | break; | ||
| 3086 | default: | ||
| 3087 | return; /* bad pipe */ | ||
| 3088 | } | ||
| 3089 | |||
| 3090 | ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, | ||
| 3091 | &sandybridge_display_wm_info, | ||
| 3092 | latency, &sprite_wm); | ||
| 3093 | if (!ret) { | ||
| 3094 | DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n", | ||
| 3095 | pipe_name(pipe)); | ||
| 3096 | return; | ||
| 3097 | } | ||
| 3098 | |||
| 3099 | val = I915_READ(reg); | ||
| 3100 | val &= ~WM0_PIPE_SPRITE_MASK; | ||
| 3101 | I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | ||
| 3102 | DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm); | ||
| 3103 | |||
| 3104 | |||
| 3105 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | ||
| 3106 | pixel_size, | ||
| 3107 | &sandybridge_display_srwm_info, | ||
| 3108 | dev_priv->wm.spr_latency[1] * 500, | ||
| 3109 | &sprite_wm); | ||
| 3110 | if (!ret) { | ||
| 3111 | DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", | ||
| 3112 | pipe_name(pipe)); | ||
| 3113 | return; | ||
| 3114 | } | ||
| 3115 | I915_WRITE(WM1S_LP_ILK, sprite_wm); | ||
| 3116 | |||
| 3117 | /* Only IVB has two more LP watermarks for sprite */ | ||
| 3118 | if (!IS_IVYBRIDGE(dev)) | ||
| 3119 | return; | ||
| 3120 | |||
| 3121 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | ||
| 3122 | pixel_size, | ||
| 3123 | &sandybridge_display_srwm_info, | ||
| 3124 | dev_priv->wm.spr_latency[2] * 500, | ||
| 3125 | &sprite_wm); | ||
| 3126 | if (!ret) { | ||
| 3127 | DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", | ||
| 3128 | pipe_name(pipe)); | ||
| 3129 | return; | ||
| 3130 | } | ||
| 3131 | I915_WRITE(WM2S_LP_IVB, sprite_wm); | ||
| 3132 | 2598 | ||
| 3133 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | 2599 | ilk_update_wm(crtc); |
| 3134 | pixel_size, | ||
| 3135 | &sandybridge_display_srwm_info, | ||
| 3136 | dev_priv->wm.spr_latency[3] * 500, | ||
| 3137 | &sprite_wm); | ||
| 3138 | if (!ret) { | ||
| 3139 | DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", | ||
| 3140 | pipe_name(pipe)); | ||
| 3141 | return; | ||
| 3142 | } | ||
| 3143 | I915_WRITE(WM3S_LP_IVB, sprite_wm); | ||
| 3144 | } | 2600 | } |
| 3145 | 2601 | ||
| 3146 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | 2602 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) |
| 3147 | { | 2603 | { |
| 3148 | struct drm_device *dev = crtc->dev; | 2604 | struct drm_device *dev = crtc->dev; |
| 3149 | struct drm_i915_private *dev_priv = dev->dev_private; | 2605 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3150 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | 2606 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
| 3151 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2607 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3152 | struct intel_pipe_wm *active = &intel_crtc->wm.active; | 2608 | struct intel_pipe_wm *active = &intel_crtc->wm.active; |
| 3153 | enum pipe pipe = intel_crtc->pipe; | 2609 | enum pipe pipe = intel_crtc->pipe; |
| @@ -3158,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
| 3158 | }; | 2614 | }; |
| 3159 | 2615 | ||
| 3160 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); | 2616 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); |
| 3161 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | 2617 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 2618 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | ||
| 3162 | 2619 | ||
| 3163 | if (intel_crtc_active(crtc)) { | 2620 | if (intel_crtc_active(crtc)) { |
| 3164 | u32 tmp = hw->wm_pipe[pipe]; | 2621 | u32 tmp = hw->wm_pipe[pipe]; |
| @@ -3190,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
| 3190 | void ilk_wm_get_hw_state(struct drm_device *dev) | 2647 | void ilk_wm_get_hw_state(struct drm_device *dev) |
| 3191 | { | 2648 | { |
| 3192 | struct drm_i915_private *dev_priv = dev->dev_private; | 2649 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3193 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | 2650 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
| 3194 | struct drm_crtc *crtc; | 2651 | struct drm_crtc *crtc; |
| 3195 | 2652 | ||
| 3196 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 2653 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
| @@ -3204,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev) | |||
| 3204 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); | 2661 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); |
| 3205 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | 2662 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); |
| 3206 | 2663 | ||
| 3207 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? | 2664 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 3208 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | 2665 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? |
| 2666 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
| 2667 | else if (IS_IVYBRIDGE(dev)) | ||
| 2668 | hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? | ||
| 2669 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
| 3209 | 2670 | ||
| 3210 | hw->enable_fbc_wm = | 2671 | hw->enable_fbc_wm = |
| 3211 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | 2672 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); |
| @@ -3430,26 +2891,19 @@ static void ironlake_disable_drps(struct drm_device *dev) | |||
| 3430 | * ourselves, instead of doing a rmw cycle (which might result in us clearing | 2891 | * ourselves, instead of doing a rmw cycle (which might result in us clearing |
| 3431 | * all limits and the gpu stuck at whatever frequency it is at atm). | 2892 | * all limits and the gpu stuck at whatever frequency it is at atm). |
| 3432 | */ | 2893 | */ |
| 3433 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) | 2894 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) |
| 3434 | { | 2895 | { |
| 3435 | u32 limits; | 2896 | u32 limits; |
| 3436 | 2897 | ||
| 3437 | limits = 0; | ||
| 3438 | |||
| 3439 | if (*val >= dev_priv->rps.max_delay) | ||
| 3440 | *val = dev_priv->rps.max_delay; | ||
| 3441 | limits |= dev_priv->rps.max_delay << 24; | ||
| 3442 | |||
| 3443 | /* Only set the down limit when we've reached the lowest level to avoid | 2898 | /* Only set the down limit when we've reached the lowest level to avoid |
| 3444 | * getting more interrupts, otherwise leave this clear. This prevents a | 2899 | * getting more interrupts, otherwise leave this clear. This prevents a |
| 3445 | * race in the hw when coming out of rc6: There's a tiny window where | 2900 | * race in the hw when coming out of rc6: There's a tiny window where |
| 3446 | * the hw runs at the minimal clock before selecting the desired | 2901 | * the hw runs at the minimal clock before selecting the desired |
| 3447 | * frequency, if the down threshold expires in that window we will not | 2902 | * frequency, if the down threshold expires in that window we will not |
| 3448 | * receive a down interrupt. */ | 2903 | * receive a down interrupt. */ |
| 3449 | if (*val <= dev_priv->rps.min_delay) { | 2904 | limits = dev_priv->rps.max_delay << 24; |
| 3450 | *val = dev_priv->rps.min_delay; | 2905 | if (val <= dev_priv->rps.min_delay) |
| 3451 | limits |= dev_priv->rps.min_delay << 16; | 2906 | limits |= dev_priv->rps.min_delay << 16; |
| 3452 | } | ||
| 3453 | 2907 | ||
| 3454 | return limits; | 2908 | return limits; |
| 3455 | } | 2909 | } |
| @@ -3549,7 +3003,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
| 3549 | void gen6_set_rps(struct drm_device *dev, u8 val) | 3003 | void gen6_set_rps(struct drm_device *dev, u8 val) |
| 3550 | { | 3004 | { |
| 3551 | struct drm_i915_private *dev_priv = dev->dev_private; | 3005 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3552 | u32 limits = gen6_rps_limits(dev_priv, &val); | ||
| 3553 | 3006 | ||
| 3554 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3007 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
| 3555 | WARN_ON(val > dev_priv->rps.max_delay); | 3008 | WARN_ON(val > dev_priv->rps.max_delay); |
| @@ -3572,7 +3025,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
| 3572 | /* Make sure we continue to get interrupts | 3025 | /* Make sure we continue to get interrupts |
| 3573 | * until we hit the minimum or maximum frequencies. | 3026 | * until we hit the minimum or maximum frequencies. |
| 3574 | */ | 3027 | */ |
| 3575 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); | 3028 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
| 3029 | gen6_rps_limits(dev_priv, val)); | ||
| 3576 | 3030 | ||
| 3577 | POSTING_READ(GEN6_RPNSWREQ); | 3031 | POSTING_READ(GEN6_RPNSWREQ); |
| 3578 | 3032 | ||
| @@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
| 3583 | 3037 | ||
| 3584 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3038 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
| 3585 | { | 3039 | { |
| 3040 | struct drm_device *dev = dev_priv->dev; | ||
| 3041 | |||
| 3586 | mutex_lock(&dev_priv->rps.hw_lock); | 3042 | mutex_lock(&dev_priv->rps.hw_lock); |
| 3587 | if (dev_priv->rps.enabled) { | 3043 | if (dev_priv->rps.enabled) { |
| 3588 | if (dev_priv->info->is_valleyview) | 3044 | if (IS_VALLEYVIEW(dev)) |
| 3589 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3045 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
| 3590 | else | 3046 | else |
| 3591 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3047 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
| @@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
| 3596 | 3052 | ||
| 3597 | void gen6_rps_boost(struct drm_i915_private *dev_priv) | 3053 | void gen6_rps_boost(struct drm_i915_private *dev_priv) |
| 3598 | { | 3054 | { |
| 3055 | struct drm_device *dev = dev_priv->dev; | ||
| 3056 | |||
| 3599 | mutex_lock(&dev_priv->rps.hw_lock); | 3057 | mutex_lock(&dev_priv->rps.hw_lock); |
| 3600 | if (dev_priv->rps.enabled) { | 3058 | if (dev_priv->rps.enabled) { |
| 3601 | if (dev_priv->info->is_valleyview) | 3059 | if (IS_VALLEYVIEW(dev)) |
| 3602 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3060 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
| 3603 | else | 3061 | else |
| 3604 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3062 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
| @@ -3607,48 +3065,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) | |||
| 3607 | mutex_unlock(&dev_priv->rps.hw_lock); | 3065 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 3608 | } | 3066 | } |
| 3609 | 3067 | ||
| 3610 | /* | ||
| 3611 | * Wait until the previous freq change has completed, | ||
| 3612 | * or the timeout elapsed, and then update our notion | ||
| 3613 | * of the current GPU frequency. | ||
| 3614 | */ | ||
| 3615 | static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) | ||
| 3616 | { | ||
| 3617 | u32 pval; | ||
| 3618 | |||
| 3619 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
| 3620 | |||
| 3621 | if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10)) | ||
| 3622 | DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); | ||
| 3623 | |||
| 3624 | pval >>= 8; | ||
| 3625 | |||
| 3626 | if (pval != dev_priv->rps.cur_delay) | ||
| 3627 | DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n", | ||
| 3628 | vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay), | ||
| 3629 | dev_priv->rps.cur_delay, | ||
| 3630 | vlv_gpu_freq(dev_priv->mem_freq, pval), pval); | ||
| 3631 | |||
| 3632 | dev_priv->rps.cur_delay = pval; | ||
| 3633 | } | ||
| 3634 | |||
| 3635 | void valleyview_set_rps(struct drm_device *dev, u8 val) | 3068 | void valleyview_set_rps(struct drm_device *dev, u8 val) |
| 3636 | { | 3069 | { |
| 3637 | struct drm_i915_private *dev_priv = dev->dev_private; | 3070 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3638 | 3071 | ||
| 3639 | gen6_rps_limits(dev_priv, &val); | ||
| 3640 | |||
| 3641 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3072 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
| 3642 | WARN_ON(val > dev_priv->rps.max_delay); | 3073 | WARN_ON(val > dev_priv->rps.max_delay); |
| 3643 | WARN_ON(val < dev_priv->rps.min_delay); | 3074 | WARN_ON(val < dev_priv->rps.min_delay); |
| 3644 | 3075 | ||
| 3645 | vlv_update_rps_cur_delay(dev_priv); | ||
| 3646 | |||
| 3647 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", | 3076 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", |
| 3648 | vlv_gpu_freq(dev_priv->mem_freq, | 3077 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), |
| 3649 | dev_priv->rps.cur_delay), | ||
| 3650 | dev_priv->rps.cur_delay, | 3078 | dev_priv->rps.cur_delay, |
| 3651 | vlv_gpu_freq(dev_priv->mem_freq, val), val); | 3079 | vlv_gpu_freq(dev_priv, val), val); |
| 3652 | 3080 | ||
| 3653 | if (val == dev_priv->rps.cur_delay) | 3081 | if (val == dev_priv->rps.cur_delay) |
| 3654 | return; | 3082 | return; |
| @@ -3657,7 +3085,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
| 3657 | 3085 | ||
| 3658 | dev_priv->rps.cur_delay = val; | 3086 | dev_priv->rps.cur_delay = val; |
| 3659 | 3087 | ||
| 3660 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); | 3088 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); |
| 3661 | } | 3089 | } |
| 3662 | 3090 | ||
| 3663 | static void gen6_disable_rps_interrupts(struct drm_device *dev) | 3091 | static void gen6_disable_rps_interrupts(struct drm_device *dev) |
| @@ -3775,7 +3203,7 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
| 3775 | 3203 | ||
| 3776 | /* 1c & 1d: Get forcewake during program sequence. Although the driver | 3204 | /* 1c & 1d: Get forcewake during program sequence. Although the driver |
| 3777 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | 3205 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ |
| 3778 | gen6_gt_force_wake_get(dev_priv); | 3206 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 3779 | 3207 | ||
| 3780 | /* 2a: Disable RC states. */ | 3208 | /* 2a: Disable RC states. */ |
| 3781 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3209 | I915_WRITE(GEN6_RC_CONTROL, 0); |
| @@ -3832,7 +3260,7 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
| 3832 | 3260 | ||
| 3833 | gen6_enable_rps_interrupts(dev); | 3261 | gen6_enable_rps_interrupts(dev); |
| 3834 | 3262 | ||
| 3835 | gen6_gt_force_wake_put(dev_priv); | 3263 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 3836 | } | 3264 | } |
| 3837 | 3265 | ||
| 3838 | static void gen6_enable_rps(struct drm_device *dev) | 3266 | static void gen6_enable_rps(struct drm_device *dev) |
| @@ -3862,7 +3290,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
| 3862 | I915_WRITE(GTFIFODBG, gtfifodbg); | 3290 | I915_WRITE(GTFIFODBG, gtfifodbg); |
| 3863 | } | 3291 | } |
| 3864 | 3292 | ||
| 3865 | gen6_gt_force_wake_get(dev_priv); | 3293 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 3866 | 3294 | ||
| 3867 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3295 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
| 3868 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 3296 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
| @@ -3954,7 +3382,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
| 3954 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); | 3382 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); |
| 3955 | } | 3383 | } |
| 3956 | 3384 | ||
| 3957 | gen6_gt_force_wake_put(dev_priv); | 3385 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 3958 | } | 3386 | } |
| 3959 | 3387 | ||
| 3960 | void gen6_update_ring_freq(struct drm_device *dev) | 3388 | void gen6_update_ring_freq(struct drm_device *dev) |
| @@ -4116,7 +3544,8 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
| 4116 | 3544 | ||
| 4117 | valleyview_setup_pctx(dev); | 3545 | valleyview_setup_pctx(dev); |
| 4118 | 3546 | ||
| 4119 | gen6_gt_force_wake_get(dev_priv); | 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ |
| 3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
| 4120 | 3549 | ||
| 4121 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | 3550 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); |
| 4122 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | 3551 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); |
| @@ -4140,7 +3569,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
| 4140 | for_each_ring(ring, dev_priv, i) | 3569 | for_each_ring(ring, dev_priv, i) |
| 4141 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | 3570 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); |
| 4142 | 3571 | ||
| 4143 | I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); | 3572 | I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); |
| 4144 | 3573 | ||
| 4145 | /* allows RC6 residency counter to work */ | 3574 | /* allows RC6 residency counter to work */ |
| 4146 | I915_WRITE(VLV_COUNTER_CONTROL, | 3575 | I915_WRITE(VLV_COUNTER_CONTROL, |
| @@ -4148,65 +3577,47 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
| 4148 | VLV_MEDIA_RC6_COUNT_EN | | 3577 | VLV_MEDIA_RC6_COUNT_EN | |
| 4149 | VLV_RENDER_RC6_COUNT_EN)); | 3578 | VLV_RENDER_RC6_COUNT_EN)); |
| 4150 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | 3579 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) |
| 4151 | rc6_mode = GEN7_RC_CTL_TO_MODE; | 3580 | rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; |
| 4152 | 3581 | ||
| 4153 | intel_print_rc6_info(dev, rc6_mode); | 3582 | intel_print_rc6_info(dev, rc6_mode); |
| 4154 | 3583 | ||
| 4155 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | 3584 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); |
| 4156 | 3585 | ||
| 4157 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 3586 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
| 4158 | switch ((val >> 6) & 3) { | ||
| 4159 | case 0: | ||
| 4160 | case 1: | ||
| 4161 | dev_priv->mem_freq = 800; | ||
| 4162 | break; | ||
| 4163 | case 2: | ||
| 4164 | dev_priv->mem_freq = 1066; | ||
| 4165 | break; | ||
| 4166 | case 3: | ||
| 4167 | dev_priv->mem_freq = 1333; | ||
| 4168 | break; | ||
| 4169 | } | ||
| 4170 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | ||
| 4171 | 3587 | ||
| 4172 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | 3588 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); |
| 4173 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 3589 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
| 4174 | 3590 | ||
| 4175 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; | 3591 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; |
| 4176 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | 3592 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", |
| 4177 | vlv_gpu_freq(dev_priv->mem_freq, | 3593 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), |
| 4178 | dev_priv->rps.cur_delay), | ||
| 4179 | dev_priv->rps.cur_delay); | 3594 | dev_priv->rps.cur_delay); |
| 4180 | 3595 | ||
| 4181 | dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); | 3596 | dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); |
| 4182 | dev_priv->rps.hw_max = dev_priv->rps.max_delay; | 3597 | dev_priv->rps.hw_max = dev_priv->rps.max_delay; |
| 4183 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | 3598 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", |
| 4184 | vlv_gpu_freq(dev_priv->mem_freq, | 3599 | vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), |
| 4185 | dev_priv->rps.max_delay), | ||
| 4186 | dev_priv->rps.max_delay); | 3600 | dev_priv->rps.max_delay); |
| 4187 | 3601 | ||
| 4188 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); | 3602 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); |
| 4189 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | 3603 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", |
| 4190 | vlv_gpu_freq(dev_priv->mem_freq, | 3604 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), |
| 4191 | dev_priv->rps.rpe_delay), | ||
| 4192 | dev_priv->rps.rpe_delay); | 3605 | dev_priv->rps.rpe_delay); |
| 4193 | 3606 | ||
| 4194 | dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); | 3607 | dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); |
| 4195 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3608 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
| 4196 | vlv_gpu_freq(dev_priv->mem_freq, | 3609 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), |
| 4197 | dev_priv->rps.min_delay), | ||
| 4198 | dev_priv->rps.min_delay); | 3610 | dev_priv->rps.min_delay); |
| 4199 | 3611 | ||
| 4200 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | 3612 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", |
| 4201 | vlv_gpu_freq(dev_priv->mem_freq, | 3613 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), |
| 4202 | dev_priv->rps.rpe_delay), | ||
| 4203 | dev_priv->rps.rpe_delay); | 3614 | dev_priv->rps.rpe_delay); |
| 4204 | 3615 | ||
| 4205 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3616 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); |
| 4206 | 3617 | ||
| 4207 | gen6_enable_rps_interrupts(dev); | 3618 | gen6_enable_rps_interrupts(dev); |
| 4208 | 3619 | ||
| 4209 | gen6_gt_force_wake_put(dev_priv); | 3620 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 4210 | } | 3621 | } |
| 4211 | 3622 | ||
| 4212 | void ironlake_teardown_rc6(struct drm_device *dev) | 3623 | void ironlake_teardown_rc6(struct drm_device *dev) |
| @@ -5019,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) | |||
| 5019 | } | 4430 | } |
| 5020 | } | 4431 | } |
| 5021 | 4432 | ||
| 4433 | static void ilk_init_lp_watermarks(struct drm_device *dev) | ||
| 4434 | { | ||
| 4435 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 4436 | |||
| 4437 | I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); | ||
| 4438 | I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); | ||
| 4439 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | ||
| 4440 | |||
| 4441 | /* | ||
| 4442 | * Don't touch WM1S_LP_EN here. | ||
| 4443 | * Doing so could cause underruns. | ||
| 4444 | */ | ||
| 4445 | } | ||
| 4446 | |||
| 5022 | static void ironlake_init_clock_gating(struct drm_device *dev) | 4447 | static void ironlake_init_clock_gating(struct drm_device *dev) |
| 5023 | { | 4448 | { |
| 5024 | struct drm_i915_private *dev_priv = dev->dev_private; | 4449 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -5052,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
| 5052 | I915_WRITE(DISP_ARB_CTL, | 4477 | I915_WRITE(DISP_ARB_CTL, |
| 5053 | (I915_READ(DISP_ARB_CTL) | | 4478 | (I915_READ(DISP_ARB_CTL) | |
| 5054 | DISP_FBC_WM_DIS)); | 4479 | DISP_FBC_WM_DIS)); |
| 5055 | I915_WRITE(WM3_LP_ILK, 0); | 4480 | |
| 5056 | I915_WRITE(WM2_LP_ILK, 0); | 4481 | ilk_init_lp_watermarks(dev); |
| 5057 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 5058 | 4482 | ||
| 5059 | /* | 4483 | /* |
| 5060 | * Based on the document from hardware guys the following bits | 4484 | * Based on the document from hardware guys the following bits |
| @@ -5161,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
| 5161 | I915_WRITE(GEN6_GT_MODE, | 4585 | I915_WRITE(GEN6_GT_MODE, |
| 5162 | _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); | 4586 | _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); |
| 5163 | 4587 | ||
| 5164 | I915_WRITE(WM3_LP_ILK, 0); | 4588 | ilk_init_lp_watermarks(dev); |
| 5165 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 5166 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 5167 | 4589 | ||
| 5168 | I915_WRITE(CACHE_MODE_0, | 4590 | I915_WRITE(CACHE_MODE_0, |
| 5169 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | 4591 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
| @@ -5304,28 +4726,40 @@ static void gen8_init_clock_gating(struct drm_device *dev) | |||
| 5304 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | 4726 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
| 5305 | _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); | 4727 | _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); |
| 5306 | 4728 | ||
| 5307 | /* WaSwitchSolVfFArbitrationPriority */ | 4729 | /* WaSwitchSolVfFArbitrationPriority:bdw */ |
| 5308 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); | 4730 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); |
| 5309 | 4731 | ||
| 5310 | /* WaPsrDPAMaskVBlankInSRD */ | 4732 | /* WaPsrDPAMaskVBlankInSRD:bdw */ |
| 5311 | I915_WRITE(CHICKEN_PAR1_1, | 4733 | I915_WRITE(CHICKEN_PAR1_1, |
| 5312 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); | 4734 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); |
| 5313 | 4735 | ||
| 5314 | /* WaPsrDPRSUnmaskVBlankInSRD */ | 4736 | /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ |
| 5315 | for_each_pipe(i) { | 4737 | for_each_pipe(i) { |
| 5316 | I915_WRITE(CHICKEN_PIPESL_1(i), | 4738 | I915_WRITE(CHICKEN_PIPESL_1(i), |
| 5317 | I915_READ(CHICKEN_PIPESL_1(i) | | 4739 | I915_READ(CHICKEN_PIPESL_1(i) | |
| 5318 | DPRS_MASK_VBLANK_SRD)); | 4740 | DPRS_MASK_VBLANK_SRD)); |
| 5319 | } | 4741 | } |
| 4742 | |||
| 4743 | /* Use Force Non-Coherent whenever executing a 3D context. This is a | ||
| 4744 | * workaround for for a possible hang in the unlikely event a TLB | ||
| 4745 | * invalidation occurs during a PSD flush. | ||
| 4746 | */ | ||
| 4747 | I915_WRITE(HDC_CHICKEN0, | ||
| 4748 | I915_READ(HDC_CHICKEN0) | | ||
| 4749 | _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); | ||
| 4750 | |||
| 4751 | /* WaVSRefCountFullforceMissDisable:bdw */ | ||
| 4752 | /* WaDSRefCountFullforceMissDisable:bdw */ | ||
| 4753 | I915_WRITE(GEN7_FF_THREAD_MODE, | ||
| 4754 | I915_READ(GEN7_FF_THREAD_MODE) & | ||
| 4755 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | ||
| 5320 | } | 4756 | } |
| 5321 | 4757 | ||
| 5322 | static void haswell_init_clock_gating(struct drm_device *dev) | 4758 | static void haswell_init_clock_gating(struct drm_device *dev) |
| 5323 | { | 4759 | { |
| 5324 | struct drm_i915_private *dev_priv = dev->dev_private; | 4760 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5325 | 4761 | ||
| 5326 | I915_WRITE(WM3_LP_ILK, 0); | 4762 | ilk_init_lp_watermarks(dev); |
| 5327 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 5328 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 5329 | 4763 | ||
| 5330 | /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. | 4764 | /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. |
| 5331 | * This implements the WaDisableRCZUnitClockGating:hsw workaround. | 4765 | * This implements the WaDisableRCZUnitClockGating:hsw workaround. |
| @@ -5374,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
| 5374 | struct drm_i915_private *dev_priv = dev->dev_private; | 4808 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5375 | uint32_t snpcr; | 4809 | uint32_t snpcr; |
| 5376 | 4810 | ||
| 5377 | I915_WRITE(WM3_LP_ILK, 0); | 4811 | ilk_init_lp_watermarks(dev); |
| 5378 | I915_WRITE(WM2_LP_ILK, 0); | ||
| 5379 | I915_WRITE(WM1_LP_ILK, 0); | ||
| 5380 | 4812 | ||
| 5381 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); | 4813 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
| 5382 | 4814 | ||
| @@ -5463,6 +4895,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
| 5463 | static void valleyview_init_clock_gating(struct drm_device *dev) | 4895 | static void valleyview_init_clock_gating(struct drm_device *dev) |
| 5464 | { | 4896 | { |
| 5465 | struct drm_i915_private *dev_priv = dev->dev_private; | 4897 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4898 | u32 val; | ||
| 4899 | |||
| 4900 | mutex_lock(&dev_priv->rps.hw_lock); | ||
| 4901 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
| 4902 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 4903 | switch ((val >> 6) & 3) { | ||
| 4904 | case 0: | ||
| 4905 | dev_priv->mem_freq = 800; | ||
| 4906 | break; | ||
| 4907 | case 1: | ||
| 4908 | dev_priv->mem_freq = 1066; | ||
| 4909 | break; | ||
| 4910 | case 2: | ||
| 4911 | dev_priv->mem_freq = 1333; | ||
| 4912 | break; | ||
| 4913 | case 3: | ||
| 4914 | dev_priv->mem_freq = 1333; | ||
| 4915 | break; | ||
| 4916 | } | ||
| 4917 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | ||
| 5466 | 4918 | ||
| 5467 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 4919 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); |
| 5468 | 4920 | ||
| @@ -5642,50 +5094,133 @@ void intel_suspend_hw(struct drm_device *dev) | |||
| 5642 | lpt_suspend_hw(dev); | 5094 | lpt_suspend_hw(dev); |
| 5643 | } | 5095 | } |
| 5644 | 5096 | ||
| 5645 | static bool is_always_on_power_domain(struct drm_device *dev, | 5097 | #define for_each_power_well(i, power_well, domain_mask, power_domains) \ |
| 5646 | enum intel_display_power_domain domain) | 5098 | for (i = 0; \ |
| 5647 | { | 5099 | i < (power_domains)->power_well_count && \ |
| 5648 | unsigned long always_on_domains; | 5100 | ((power_well) = &(power_domains)->power_wells[i]); \ |
| 5101 | i++) \ | ||
| 5102 | if ((power_well)->domains & (domain_mask)) | ||
| 5649 | 5103 | ||
| 5650 | BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK); | 5104 | #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ |
| 5651 | 5105 | for (i = (power_domains)->power_well_count - 1; \ | |
| 5652 | if (IS_BROADWELL(dev)) { | 5106 | i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ |
| 5653 | always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS; | 5107 | i--) \ |
| 5654 | } else if (IS_HASWELL(dev)) { | 5108 | if ((power_well)->domains & (domain_mask)) |
| 5655 | always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS; | ||
| 5656 | } else { | ||
| 5657 | WARN_ON(1); | ||
| 5658 | return true; | ||
| 5659 | } | ||
| 5660 | |||
| 5661 | return BIT(domain) & always_on_domains; | ||
| 5662 | } | ||
| 5663 | 5109 | ||
| 5664 | /** | 5110 | /** |
| 5665 | * We should only use the power well if we explicitly asked the hardware to | 5111 | * We should only use the power well if we explicitly asked the hardware to |
| 5666 | * enable it, so check if it's enabled and also check if we've requested it to | 5112 | * enable it, so check if it's enabled and also check if we've requested it to |
| 5667 | * be enabled. | 5113 | * be enabled. |
| 5668 | */ | 5114 | */ |
| 5115 | static bool hsw_power_well_enabled(struct drm_device *dev, | ||
| 5116 | struct i915_power_well *power_well) | ||
| 5117 | { | ||
| 5118 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5119 | |||
| 5120 | return I915_READ(HSW_PWR_WELL_DRIVER) == | ||
| 5121 | (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); | ||
| 5122 | } | ||
| 5123 | |||
| 5124 | bool intel_display_power_enabled_sw(struct drm_device *dev, | ||
| 5125 | enum intel_display_power_domain domain) | ||
| 5126 | { | ||
| 5127 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5128 | struct i915_power_domains *power_domains; | ||
| 5129 | |||
| 5130 | power_domains = &dev_priv->power_domains; | ||
| 5131 | |||
| 5132 | return power_domains->domain_use_count[domain]; | ||
| 5133 | } | ||
| 5134 | |||
| 5669 | bool intel_display_power_enabled(struct drm_device *dev, | 5135 | bool intel_display_power_enabled(struct drm_device *dev, |
| 5670 | enum intel_display_power_domain domain) | 5136 | enum intel_display_power_domain domain) |
| 5671 | { | 5137 | { |
| 5672 | struct drm_i915_private *dev_priv = dev->dev_private; | 5138 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5139 | struct i915_power_domains *power_domains; | ||
| 5140 | struct i915_power_well *power_well; | ||
| 5141 | bool is_enabled; | ||
| 5142 | int i; | ||
| 5673 | 5143 | ||
| 5674 | if (!HAS_POWER_WELL(dev)) | 5144 | power_domains = &dev_priv->power_domains; |
| 5675 | return true; | ||
| 5676 | 5145 | ||
| 5677 | if (is_always_on_power_domain(dev, domain)) | 5146 | is_enabled = true; |
| 5678 | return true; | ||
| 5679 | 5147 | ||
| 5680 | return I915_READ(HSW_PWR_WELL_DRIVER) == | 5148 | mutex_lock(&power_domains->lock); |
| 5681 | (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); | 5149 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { |
| 5150 | if (power_well->always_on) | ||
| 5151 | continue; | ||
| 5152 | |||
| 5153 | if (!power_well->is_enabled(dev, power_well)) { | ||
| 5154 | is_enabled = false; | ||
| 5155 | break; | ||
| 5156 | } | ||
| 5157 | } | ||
| 5158 | mutex_unlock(&power_domains->lock); | ||
| 5159 | |||
| 5160 | return is_enabled; | ||
| 5161 | } | ||
| 5162 | |||
| 5163 | static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | ||
| 5164 | { | ||
| 5165 | struct drm_device *dev = dev_priv->dev; | ||
| 5166 | unsigned long irqflags; | ||
| 5167 | |||
| 5168 | /* | ||
| 5169 | * After we re-enable the power well, if we touch VGA register 0x3d5 | ||
| 5170 | * we'll get unclaimed register interrupts. This stops after we write | ||
| 5171 | * anything to the VGA MSR register. The vgacon module uses this | ||
| 5172 | * register all the time, so if we unbind our driver and, as a | ||
| 5173 | * consequence, bind vgacon, we'll get stuck in an infinite loop at | ||
| 5174 | * console_unlock(). So make here we touch the VGA MSR register, making | ||
| 5175 | * sure vgacon can keep working normally without triggering interrupts | ||
| 5176 | * and error messages. | ||
| 5177 | */ | ||
| 5178 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
| 5179 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | ||
| 5180 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
| 5181 | |||
| 5182 | if (IS_BROADWELL(dev)) { | ||
| 5183 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 5184 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
| 5185 | dev_priv->de_irq_mask[PIPE_B]); | ||
| 5186 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
| 5187 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
| 5188 | GEN8_PIPE_VBLANK); | ||
| 5189 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
| 5190 | dev_priv->de_irq_mask[PIPE_C]); | ||
| 5191 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
| 5192 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
| 5193 | GEN8_PIPE_VBLANK); | ||
| 5194 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
| 5195 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 5196 | } | ||
| 5682 | } | 5197 | } |
| 5683 | 5198 | ||
| 5684 | static void __intel_set_power_well(struct drm_device *dev, bool enable) | 5199 | static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv) |
| 5200 | { | ||
| 5201 | struct drm_device *dev = dev_priv->dev; | ||
| 5202 | enum pipe p; | ||
| 5203 | unsigned long irqflags; | ||
| 5204 | |||
| 5205 | /* | ||
| 5206 | * After this, the registers on the pipes that are part of the power | ||
| 5207 | * well will become zero, so we have to adjust our counters according to | ||
| 5208 | * that. | ||
| 5209 | * | ||
| 5210 | * FIXME: Should we do this in general in drm_vblank_post_modeset? | ||
| 5211 | */ | ||
| 5212 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
| 5213 | for_each_pipe(p) | ||
| 5214 | if (p != PIPE_A) | ||
| 5215 | dev->vblank[p].last = 0; | ||
| 5216 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
| 5217 | } | ||
| 5218 | |||
| 5219 | static void hsw_set_power_well(struct drm_device *dev, | ||
| 5220 | struct i915_power_well *power_well, bool enable) | ||
| 5685 | { | 5221 | { |
| 5686 | struct drm_i915_private *dev_priv = dev->dev_private; | 5222 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5687 | bool is_enabled, enable_requested; | 5223 | bool is_enabled, enable_requested; |
| 5688 | unsigned long irqflags; | ||
| 5689 | uint32_t tmp; | 5224 | uint32_t tmp; |
| 5690 | 5225 | ||
| 5691 | WARN_ON(dev_priv->pc8.enabled); | 5226 | WARN_ON(dev_priv->pc8.enabled); |
| @@ -5706,42 +5241,14 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5706 | DRM_ERROR("Timeout enabling power well\n"); | 5241 | DRM_ERROR("Timeout enabling power well\n"); |
| 5707 | } | 5242 | } |
| 5708 | 5243 | ||
| 5709 | if (IS_BROADWELL(dev)) { | 5244 | hsw_power_well_post_enable(dev_priv); |
| 5710 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 5711 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
| 5712 | dev_priv->de_irq_mask[PIPE_B]); | ||
| 5713 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
| 5714 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
| 5715 | GEN8_PIPE_VBLANK); | ||
| 5716 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
| 5717 | dev_priv->de_irq_mask[PIPE_C]); | ||
| 5718 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
| 5719 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
| 5720 | GEN8_PIPE_VBLANK); | ||
| 5721 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
| 5722 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 5723 | } | ||
| 5724 | } else { | 5245 | } else { |
| 5725 | if (enable_requested) { | 5246 | if (enable_requested) { |
| 5726 | enum pipe p; | ||
| 5727 | |||
| 5728 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5247 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
| 5729 | POSTING_READ(HSW_PWR_WELL_DRIVER); | 5248 | POSTING_READ(HSW_PWR_WELL_DRIVER); |
| 5730 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5249 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
| 5731 | 5250 | ||
| 5732 | /* | 5251 | hsw_power_well_post_disable(dev_priv); |
| 5733 | * After this, the registers on the pipes that are part | ||
| 5734 | * of the power well will become zero, so we have to | ||
| 5735 | * adjust our counters according to that. | ||
| 5736 | * | ||
| 5737 | * FIXME: Should we do this in general in | ||
| 5738 | * drm_vblank_post_modeset? | ||
| 5739 | */ | ||
| 5740 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
| 5741 | for_each_pipe(p) | ||
| 5742 | if (p != PIPE_A) | ||
| 5743 | dev->vblank[p].last = 0; | ||
| 5744 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
| 5745 | } | 5252 | } |
| 5746 | } | 5253 | } |
| 5747 | } | 5254 | } |
| @@ -5751,9 +5258,9 @@ static void __intel_power_well_get(struct drm_device *dev, | |||
| 5751 | { | 5258 | { |
| 5752 | struct drm_i915_private *dev_priv = dev->dev_private; | 5259 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5753 | 5260 | ||
| 5754 | if (!power_well->count++) { | 5261 | if (!power_well->count++ && power_well->set) { |
| 5755 | hsw_disable_package_c8(dev_priv); | 5262 | hsw_disable_package_c8(dev_priv); |
| 5756 | __intel_set_power_well(dev, true); | 5263 | power_well->set(dev, power_well, true); |
| 5757 | } | 5264 | } |
| 5758 | } | 5265 | } |
| 5759 | 5266 | ||
| @@ -5763,8 +5270,10 @@ static void __intel_power_well_put(struct drm_device *dev, | |||
| 5763 | struct drm_i915_private *dev_priv = dev->dev_private; | 5270 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5764 | 5271 | ||
| 5765 | WARN_ON(!power_well->count); | 5272 | WARN_ON(!power_well->count); |
| 5766 | if (!--power_well->count && i915_disable_power_well) { | 5273 | |
| 5767 | __intel_set_power_well(dev, false); | 5274 | if (!--power_well->count && power_well->set && |
| 5275 | i915_disable_power_well) { | ||
| 5276 | power_well->set(dev, power_well, false); | ||
| 5768 | hsw_enable_package_c8(dev_priv); | 5277 | hsw_enable_package_c8(dev_priv); |
| 5769 | } | 5278 | } |
| 5770 | } | 5279 | } |
| @@ -5774,17 +5283,18 @@ void intel_display_power_get(struct drm_device *dev, | |||
| 5774 | { | 5283 | { |
| 5775 | struct drm_i915_private *dev_priv = dev->dev_private; | 5284 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5776 | struct i915_power_domains *power_domains; | 5285 | struct i915_power_domains *power_domains; |
| 5777 | 5286 | struct i915_power_well *power_well; | |
| 5778 | if (!HAS_POWER_WELL(dev)) | 5287 | int i; |
| 5779 | return; | ||
| 5780 | |||
| 5781 | if (is_always_on_power_domain(dev, domain)) | ||
| 5782 | return; | ||
| 5783 | 5288 | ||
| 5784 | power_domains = &dev_priv->power_domains; | 5289 | power_domains = &dev_priv->power_domains; |
| 5785 | 5290 | ||
| 5786 | mutex_lock(&power_domains->lock); | 5291 | mutex_lock(&power_domains->lock); |
| 5787 | __intel_power_well_get(dev, &power_domains->power_wells[0]); | 5292 | |
| 5293 | for_each_power_well(i, power_well, BIT(domain), power_domains) | ||
| 5294 | __intel_power_well_get(dev, power_well); | ||
| 5295 | |||
| 5296 | power_domains->domain_use_count[domain]++; | ||
| 5297 | |||
| 5788 | mutex_unlock(&power_domains->lock); | 5298 | mutex_unlock(&power_domains->lock); |
| 5789 | } | 5299 | } |
| 5790 | 5300 | ||
| @@ -5793,17 +5303,19 @@ void intel_display_power_put(struct drm_device *dev, | |||
| 5793 | { | 5303 | { |
| 5794 | struct drm_i915_private *dev_priv = dev->dev_private; | 5304 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5795 | struct i915_power_domains *power_domains; | 5305 | struct i915_power_domains *power_domains; |
| 5796 | 5306 | struct i915_power_well *power_well; | |
| 5797 | if (!HAS_POWER_WELL(dev)) | 5307 | int i; |
| 5798 | return; | ||
| 5799 | |||
| 5800 | if (is_always_on_power_domain(dev, domain)) | ||
| 5801 | return; | ||
| 5802 | 5308 | ||
| 5803 | power_domains = &dev_priv->power_domains; | 5309 | power_domains = &dev_priv->power_domains; |
| 5804 | 5310 | ||
| 5805 | mutex_lock(&power_domains->lock); | 5311 | mutex_lock(&power_domains->lock); |
| 5806 | __intel_power_well_put(dev, &power_domains->power_wells[0]); | 5312 | |
| 5313 | WARN_ON(!power_domains->domain_use_count[domain]); | ||
| 5314 | power_domains->domain_use_count[domain]--; | ||
| 5315 | |||
| 5316 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) | ||
| 5317 | __intel_power_well_put(dev, power_well); | ||
| 5318 | |||
| 5807 | mutex_unlock(&power_domains->lock); | 5319 | mutex_unlock(&power_domains->lock); |
| 5808 | } | 5320 | } |
| 5809 | 5321 | ||
| @@ -5819,10 +5331,7 @@ void i915_request_power_well(void) | |||
| 5819 | 5331 | ||
| 5820 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | 5332 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, |
| 5821 | power_domains); | 5333 | power_domains); |
| 5822 | 5334 | intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO); | |
| 5823 | mutex_lock(&hsw_pwr->lock); | ||
| 5824 | __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]); | ||
| 5825 | mutex_unlock(&hsw_pwr->lock); | ||
| 5826 | } | 5335 | } |
| 5827 | EXPORT_SYMBOL_GPL(i915_request_power_well); | 5336 | EXPORT_SYMBOL_GPL(i915_request_power_well); |
| 5828 | 5337 | ||
| @@ -5836,24 +5345,71 @@ void i915_release_power_well(void) | |||
| 5836 | 5345 | ||
| 5837 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | 5346 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, |
| 5838 | power_domains); | 5347 | power_domains); |
| 5839 | 5348 | intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO); | |
| 5840 | mutex_lock(&hsw_pwr->lock); | ||
| 5841 | __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]); | ||
| 5842 | mutex_unlock(&hsw_pwr->lock); | ||
| 5843 | } | 5349 | } |
| 5844 | EXPORT_SYMBOL_GPL(i915_release_power_well); | 5350 | EXPORT_SYMBOL_GPL(i915_release_power_well); |
| 5845 | 5351 | ||
| 5352 | static struct i915_power_well i9xx_always_on_power_well[] = { | ||
| 5353 | { | ||
| 5354 | .name = "always-on", | ||
| 5355 | .always_on = 1, | ||
| 5356 | .domains = POWER_DOMAIN_MASK, | ||
| 5357 | }, | ||
| 5358 | }; | ||
| 5359 | |||
| 5360 | static struct i915_power_well hsw_power_wells[] = { | ||
| 5361 | { | ||
| 5362 | .name = "always-on", | ||
| 5363 | .always_on = 1, | ||
| 5364 | .domains = HSW_ALWAYS_ON_POWER_DOMAINS, | ||
| 5365 | }, | ||
| 5366 | { | ||
| 5367 | .name = "display", | ||
| 5368 | .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS, | ||
| 5369 | .is_enabled = hsw_power_well_enabled, | ||
| 5370 | .set = hsw_set_power_well, | ||
| 5371 | }, | ||
| 5372 | }; | ||
| 5373 | |||
| 5374 | static struct i915_power_well bdw_power_wells[] = { | ||
| 5375 | { | ||
| 5376 | .name = "always-on", | ||
| 5377 | .always_on = 1, | ||
| 5378 | .domains = BDW_ALWAYS_ON_POWER_DOMAINS, | ||
| 5379 | }, | ||
| 5380 | { | ||
| 5381 | .name = "display", | ||
| 5382 | .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS, | ||
| 5383 | .is_enabled = hsw_power_well_enabled, | ||
| 5384 | .set = hsw_set_power_well, | ||
| 5385 | }, | ||
| 5386 | }; | ||
| 5387 | |||
| 5388 | #define set_power_wells(power_domains, __power_wells) ({ \ | ||
| 5389 | (power_domains)->power_wells = (__power_wells); \ | ||
| 5390 | (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ | ||
| 5391 | }) | ||
| 5392 | |||
| 5846 | int intel_power_domains_init(struct drm_device *dev) | 5393 | int intel_power_domains_init(struct drm_device *dev) |
| 5847 | { | 5394 | { |
| 5848 | struct drm_i915_private *dev_priv = dev->dev_private; | 5395 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5849 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 5396 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
| 5850 | struct i915_power_well *power_well; | ||
| 5851 | 5397 | ||
| 5852 | mutex_init(&power_domains->lock); | 5398 | mutex_init(&power_domains->lock); |
| 5853 | hsw_pwr = power_domains; | ||
| 5854 | 5399 | ||
| 5855 | power_well = &power_domains->power_wells[0]; | 5400 | /* |
| 5856 | power_well->count = 0; | 5401 | * The enabling order will be from lower to higher indexed wells, |
| 5402 | * the disabling order is reversed. | ||
| 5403 | */ | ||
| 5404 | if (IS_HASWELL(dev)) { | ||
| 5405 | set_power_wells(power_domains, hsw_power_wells); | ||
| 5406 | hsw_pwr = power_domains; | ||
| 5407 | } else if (IS_BROADWELL(dev)) { | ||
| 5408 | set_power_wells(power_domains, bdw_power_wells); | ||
| 5409 | hsw_pwr = power_domains; | ||
| 5410 | } else { | ||
| 5411 | set_power_wells(power_domains, i9xx_always_on_power_well); | ||
| 5412 | } | ||
| 5857 | 5413 | ||
| 5858 | return 0; | 5414 | return 0; |
| 5859 | } | 5415 | } |
| @@ -5868,15 +5424,13 @@ static void intel_power_domains_resume(struct drm_device *dev) | |||
| 5868 | struct drm_i915_private *dev_priv = dev->dev_private; | 5424 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5869 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 5425 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
| 5870 | struct i915_power_well *power_well; | 5426 | struct i915_power_well *power_well; |
| 5871 | 5427 | int i; | |
| 5872 | if (!HAS_POWER_WELL(dev)) | ||
| 5873 | return; | ||
| 5874 | 5428 | ||
| 5875 | mutex_lock(&power_domains->lock); | 5429 | mutex_lock(&power_domains->lock); |
| 5876 | 5430 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | |
| 5877 | power_well = &power_domains->power_wells[0]; | 5431 | if (power_well->set) |
| 5878 | __intel_set_power_well(dev, power_well->count > 0); | 5432 | power_well->set(dev, power_well, power_well->count > 0); |
| 5879 | 5433 | } | |
| 5880 | mutex_unlock(&power_domains->lock); | 5434 | mutex_unlock(&power_domains->lock); |
| 5881 | } | 5435 | } |
| 5882 | 5436 | ||
| @@ -5890,13 +5444,13 @@ void intel_power_domains_init_hw(struct drm_device *dev) | |||
| 5890 | { | 5444 | { |
| 5891 | struct drm_i915_private *dev_priv = dev->dev_private; | 5445 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5892 | 5446 | ||
| 5893 | if (!HAS_POWER_WELL(dev)) | ||
| 5894 | return; | ||
| 5895 | |||
| 5896 | /* For now, we need the power well to be always enabled. */ | 5447 | /* For now, we need the power well to be always enabled. */ |
| 5897 | intel_display_set_init_power(dev, true); | 5448 | intel_display_set_init_power(dev, true); |
| 5898 | intel_power_domains_resume(dev); | 5449 | intel_power_domains_resume(dev); |
| 5899 | 5450 | ||
| 5451 | if (!(IS_HASWELL(dev) || IS_BROADWELL(dev))) | ||
| 5452 | return; | ||
| 5453 | |||
| 5900 | /* We're taking over the BIOS, so clear any requests made by it since | 5454 | /* We're taking over the BIOS, so clear any requests made by it since |
| 5901 | * the driver is in charge now. */ | 5455 | * the driver is in charge now. */ |
| 5902 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) | 5456 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) |
| @@ -5914,31 +5468,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) | |||
| 5914 | hsw_enable_package_c8(dev_priv); | 5468 | hsw_enable_package_c8(dev_priv); |
| 5915 | } | 5469 | } |
| 5916 | 5470 | ||
| 5471 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | ||
| 5472 | { | ||
| 5473 | struct drm_device *dev = dev_priv->dev; | ||
| 5474 | struct device *device = &dev->pdev->dev; | ||
| 5475 | |||
| 5476 | if (!HAS_RUNTIME_PM(dev)) | ||
| 5477 | return; | ||
| 5478 | |||
| 5479 | pm_runtime_get_sync(device); | ||
| 5480 | WARN(dev_priv->pm.suspended, "Device still suspended.\n"); | ||
| 5481 | } | ||
| 5482 | |||
| 5483 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | ||
| 5484 | { | ||
| 5485 | struct drm_device *dev = dev_priv->dev; | ||
| 5486 | struct device *device = &dev->pdev->dev; | ||
| 5487 | |||
| 5488 | if (!HAS_RUNTIME_PM(dev)) | ||
| 5489 | return; | ||
| 5490 | |||
| 5491 | pm_runtime_mark_last_busy(device); | ||
| 5492 | pm_runtime_put_autosuspend(device); | ||
| 5493 | } | ||
| 5494 | |||
| 5495 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv) | ||
| 5496 | { | ||
| 5497 | struct drm_device *dev = dev_priv->dev; | ||
| 5498 | struct device *device = &dev->pdev->dev; | ||
| 5499 | |||
| 5500 | dev_priv->pm.suspended = false; | ||
| 5501 | |||
| 5502 | if (!HAS_RUNTIME_PM(dev)) | ||
| 5503 | return; | ||
| 5504 | |||
| 5505 | pm_runtime_set_active(device); | ||
| 5506 | |||
| 5507 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | ||
| 5508 | pm_runtime_mark_last_busy(device); | ||
| 5509 | pm_runtime_use_autosuspend(device); | ||
| 5510 | } | ||
| 5511 | |||
| 5512 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) | ||
| 5513 | { | ||
| 5514 | struct drm_device *dev = dev_priv->dev; | ||
| 5515 | struct device *device = &dev->pdev->dev; | ||
| 5516 | |||
| 5517 | if (!HAS_RUNTIME_PM(dev)) | ||
| 5518 | return; | ||
| 5519 | |||
| 5520 | /* Make sure we're not suspended first. */ | ||
| 5521 | pm_runtime_get_sync(device); | ||
| 5522 | pm_runtime_disable(device); | ||
| 5523 | } | ||
| 5524 | |||
| 5917 | /* Set up chip specific power management-related functions */ | 5525 | /* Set up chip specific power management-related functions */ |
| 5918 | void intel_init_pm(struct drm_device *dev) | 5526 | void intel_init_pm(struct drm_device *dev) |
| 5919 | { | 5527 | { |
| 5920 | struct drm_i915_private *dev_priv = dev->dev_private; | 5528 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5921 | 5529 | ||
| 5922 | if (I915_HAS_FBC(dev)) { | 5530 | if (HAS_FBC(dev)) { |
| 5923 | if (HAS_PCH_SPLIT(dev)) { | 5531 | if (INTEL_INFO(dev)->gen >= 7) { |
| 5924 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 5532 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
| 5925 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5533 | dev_priv->display.enable_fbc = gen7_enable_fbc; |
| 5926 | dev_priv->display.enable_fbc = | 5534 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
| 5927 | gen7_enable_fbc; | 5535 | } else if (INTEL_INFO(dev)->gen >= 5) { |
| 5928 | else | 5536 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
| 5929 | dev_priv->display.enable_fbc = | 5537 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
| 5930 | ironlake_enable_fbc; | ||
| 5931 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 5538 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
| 5932 | } else if (IS_GM45(dev)) { | 5539 | } else if (IS_GM45(dev)) { |
| 5933 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 5540 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
| 5934 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 5541 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
| 5935 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 5542 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
| 5936 | } else if (IS_CRESTLINE(dev)) { | 5543 | } else { |
| 5937 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 5544 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
| 5938 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 5545 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
| 5939 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 5546 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
| 5547 | |||
| 5548 | /* This value was pulled out of someone's hat */ | ||
| 5549 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); | ||
| 5940 | } | 5550 | } |
| 5941 | /* 855GM needs testing */ | ||
| 5942 | } | 5551 | } |
| 5943 | 5552 | ||
| 5944 | /* For cxsr */ | 5553 | /* For cxsr */ |
| @@ -5951,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev) | |||
| 5951 | if (HAS_PCH_SPLIT(dev)) { | 5560 | if (HAS_PCH_SPLIT(dev)) { |
| 5952 | intel_setup_wm_latency(dev); | 5561 | intel_setup_wm_latency(dev); |
| 5953 | 5562 | ||
| 5954 | if (IS_GEN5(dev)) { | 5563 | if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && |
| 5955 | if (dev_priv->wm.pri_latency[1] && | 5564 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || |
| 5956 | dev_priv->wm.spr_latency[1] && | 5565 | (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && |
| 5957 | dev_priv->wm.cur_latency[1]) | 5566 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { |
| 5958 | dev_priv->display.update_wm = ironlake_update_wm; | 5567 | dev_priv->display.update_wm = ilk_update_wm; |
| 5959 | else { | 5568 | dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; |
| 5960 | DRM_DEBUG_KMS("Failed to get proper latency. " | 5569 | } else { |
| 5961 | "Disable CxSR\n"); | 5570 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
| 5962 | dev_priv->display.update_wm = NULL; | 5571 | "Disable CxSR\n"); |
| 5963 | } | 5572 | } |
| 5573 | |||
| 5574 | if (IS_GEN5(dev)) | ||
| 5964 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | 5575 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
| 5965 | } else if (IS_GEN6(dev)) { | 5576 | else if (IS_GEN6(dev)) |
| 5966 | if (dev_priv->wm.pri_latency[0] && | ||
| 5967 | dev_priv->wm.spr_latency[0] && | ||
| 5968 | dev_priv->wm.cur_latency[0]) { | ||
| 5969 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
| 5970 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; | ||
| 5971 | } else { | ||
| 5972 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
| 5973 | "Disable CxSR\n"); | ||
| 5974 | dev_priv->display.update_wm = NULL; | ||
| 5975 | } | ||
| 5976 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | 5577 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
| 5977 | } else if (IS_IVYBRIDGE(dev)) { | 5578 | else if (IS_IVYBRIDGE(dev)) |
| 5978 | if (dev_priv->wm.pri_latency[0] && | ||
| 5979 | dev_priv->wm.spr_latency[0] && | ||
| 5980 | dev_priv->wm.cur_latency[0]) { | ||
| 5981 | dev_priv->display.update_wm = ivybridge_update_wm; | ||
| 5982 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; | ||
| 5983 | } else { | ||
| 5984 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
| 5985 | "Disable CxSR\n"); | ||
| 5986 | dev_priv->display.update_wm = NULL; | ||
| 5987 | } | ||
| 5988 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | 5579 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
| 5989 | } else if (IS_HASWELL(dev)) { | 5580 | else if (IS_HASWELL(dev)) |
| 5990 | if (dev_priv->wm.pri_latency[0] && | ||
| 5991 | dev_priv->wm.spr_latency[0] && | ||
| 5992 | dev_priv->wm.cur_latency[0]) { | ||
| 5993 | dev_priv->display.update_wm = haswell_update_wm; | ||
| 5994 | dev_priv->display.update_sprite_wm = | ||
| 5995 | haswell_update_sprite_wm; | ||
| 5996 | } else { | ||
| 5997 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
| 5998 | "Disable CxSR\n"); | ||
| 5999 | dev_priv->display.update_wm = NULL; | ||
| 6000 | } | ||
| 6001 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; | 5581 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; |
| 6002 | } else if (INTEL_INFO(dev)->gen == 8) { | 5582 | else if (INTEL_INFO(dev)->gen == 8) |
| 6003 | dev_priv->display.init_clock_gating = gen8_init_clock_gating; | 5583 | dev_priv->display.init_clock_gating = gen8_init_clock_gating; |
| 6004 | } else | ||
| 6005 | dev_priv->display.update_wm = NULL; | ||
| 6006 | } else if (IS_VALLEYVIEW(dev)) { | 5584 | } else if (IS_VALLEYVIEW(dev)) { |
| 6007 | dev_priv->display.update_wm = valleyview_update_wm; | 5585 | dev_priv->display.update_wm = valleyview_update_wm; |
| 6008 | dev_priv->display.init_clock_gating = | 5586 | dev_priv->display.init_clock_gating = |
| @@ -6036,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev) | |||
| 6036 | dev_priv->display.update_wm = i9xx_update_wm; | 5614 | dev_priv->display.update_wm = i9xx_update_wm; |
| 6037 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 5615 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
| 6038 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | 5616 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
| 6039 | } else if (IS_I865G(dev)) { | 5617 | } else if (IS_GEN2(dev)) { |
| 6040 | dev_priv->display.update_wm = i830_update_wm; | 5618 | if (INTEL_INFO(dev)->num_pipes == 1) { |
| 6041 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | 5619 | dev_priv->display.update_wm = i845_update_wm; |
| 6042 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
| 6043 | } else if (IS_I85X(dev)) { | ||
| 6044 | dev_priv->display.update_wm = i9xx_update_wm; | ||
| 6045 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
| 6046 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
| 6047 | } else { | ||
| 6048 | dev_priv->display.update_wm = i830_update_wm; | ||
| 6049 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
| 6050 | if (IS_845G(dev)) | ||
| 6051 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 5620 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
| 6052 | else | 5621 | } else { |
| 5622 | dev_priv->display.update_wm = i9xx_update_wm; | ||
| 6053 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 5623 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
| 5624 | } | ||
| 5625 | |||
| 5626 | if (IS_I85X(dev) || IS_I865G(dev)) | ||
| 5627 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
| 5628 | else | ||
| 5629 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
| 5630 | } else { | ||
| 5631 | DRM_ERROR("unexpected fall-through in intel_init_pm\n"); | ||
| 6054 | } | 5632 | } |
| 6055 | } | 5633 | } |
| 6056 | 5634 | ||
| @@ -6101,59 +5679,48 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) | |||
| 6101 | return 0; | 5679 | return 0; |
| 6102 | } | 5680 | } |
| 6103 | 5681 | ||
| 6104 | int vlv_gpu_freq(int ddr_freq, int val) | 5682 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) |
| 6105 | { | 5683 | { |
| 6106 | int mult, base; | 5684 | int div; |
| 6107 | 5685 | ||
| 6108 | switch (ddr_freq) { | 5686 | /* 4 x czclk */ |
| 5687 | switch (dev_priv->mem_freq) { | ||
| 6109 | case 800: | 5688 | case 800: |
| 6110 | mult = 20; | 5689 | div = 10; |
| 6111 | base = 120; | ||
| 6112 | break; | 5690 | break; |
| 6113 | case 1066: | 5691 | case 1066: |
| 6114 | mult = 22; | 5692 | div = 12; |
| 6115 | base = 133; | ||
| 6116 | break; | 5693 | break; |
| 6117 | case 1333: | 5694 | case 1333: |
| 6118 | mult = 21; | 5695 | div = 16; |
| 6119 | base = 125; | ||
| 6120 | break; | 5696 | break; |
| 6121 | default: | 5697 | default: |
| 6122 | return -1; | 5698 | return -1; |
| 6123 | } | 5699 | } |
| 6124 | 5700 | ||
| 6125 | return ((val - 0xbd) * mult) + base; | 5701 | return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); |
| 6126 | } | 5702 | } |
| 6127 | 5703 | ||
| 6128 | int vlv_freq_opcode(int ddr_freq, int val) | 5704 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) |
| 6129 | { | 5705 | { |
| 6130 | int mult, base; | 5706 | int mul; |
| 6131 | 5707 | ||
| 6132 | switch (ddr_freq) { | 5708 | /* 4 x czclk */ |
| 5709 | switch (dev_priv->mem_freq) { | ||
| 6133 | case 800: | 5710 | case 800: |
| 6134 | mult = 20; | 5711 | mul = 10; |
| 6135 | base = 120; | ||
| 6136 | break; | 5712 | break; |
| 6137 | case 1066: | 5713 | case 1066: |
| 6138 | mult = 22; | 5714 | mul = 12; |
| 6139 | base = 133; | ||
| 6140 | break; | 5715 | break; |
| 6141 | case 1333: | 5716 | case 1333: |
| 6142 | mult = 21; | 5717 | mul = 16; |
| 6143 | base = 125; | ||
| 6144 | break; | 5718 | break; |
| 6145 | default: | 5719 | default: |
| 6146 | return -1; | 5720 | return -1; |
| 6147 | } | 5721 | } |
| 6148 | 5722 | ||
| 6149 | val /= mult; | 5723 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; |
| 6150 | val -= base / mult; | ||
| 6151 | val += 0xbd; | ||
| 6152 | |||
| 6153 | if (val > 0xea) | ||
| 6154 | val = 0xea; | ||
| 6155 | |||
| 6156 | return val; | ||
| 6157 | } | 5724 | } |
| 6158 | 5725 | ||
| 6159 | void intel_pm_setup(struct drm_device *dev) | 5726 | void intel_pm_setup(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c2f09d456300..b7f1742caf87 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) | |||
| 285 | if (!ring->fbc_dirty) | 285 | if (!ring->fbc_dirty) |
| 286 | return 0; | 286 | return 0; |
| 287 | 287 | ||
| 288 | ret = intel_ring_begin(ring, 4); | 288 | ret = intel_ring_begin(ring, 6); |
| 289 | if (ret) | 289 | if (ret) |
| 290 | return ret; | 290 | return ret; |
| 291 | intel_ring_emit(ring, MI_NOOP); | ||
| 292 | /* WaFbcNukeOn3DBlt:ivb/hsw */ | 291 | /* WaFbcNukeOn3DBlt:ivb/hsw */ |
| 293 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 292 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
| 294 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | 293 | intel_ring_emit(ring, MSG_FBC_REND_STATE); |
| 295 | intel_ring_emit(ring, value); | 294 | intel_ring_emit(ring, value); |
| 295 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); | ||
| 296 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | ||
| 297 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | ||
| 296 | intel_ring_advance(ring); | 298 | intel_ring_advance(ring); |
| 297 | 299 | ||
| 298 | ring->fbc_dirty = false; | 300 | ring->fbc_dirty = false; |
| @@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, | |||
| 354 | intel_ring_emit(ring, 0); | 356 | intel_ring_emit(ring, 0); |
| 355 | intel_ring_advance(ring); | 357 | intel_ring_advance(ring); |
| 356 | 358 | ||
| 357 | if (flush_domains) | 359 | if (!invalidate_domains && flush_domains) |
| 358 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); | 360 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); |
| 359 | 361 | ||
| 360 | return 0; | 362 | return 0; |
| @@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 436 | int ret = 0; | 438 | int ret = 0; |
| 437 | u32 head; | 439 | u32 head; |
| 438 | 440 | ||
| 439 | gen6_gt_force_wake_get(dev_priv); | 441 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 440 | 442 | ||
| 441 | if (I915_NEED_GFX_HWS(dev)) | 443 | if (I915_NEED_GFX_HWS(dev)) |
| 442 | intel_ring_setup_status_page(ring); | 444 | intel_ring_setup_status_page(ring); |
| @@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 509 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 511 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
| 510 | 512 | ||
| 511 | out: | 513 | out: |
| 512 | gen6_gt_force_wake_put(dev_priv); | 514 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 513 | 515 | ||
| 514 | return ret; | 516 | return ret; |
| 515 | } | 517 | } |
| @@ -661,19 +663,22 @@ gen6_add_request(struct intel_ring_buffer *ring) | |||
| 661 | struct drm_device *dev = ring->dev; | 663 | struct drm_device *dev = ring->dev; |
| 662 | struct drm_i915_private *dev_priv = dev->dev_private; | 664 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 663 | struct intel_ring_buffer *useless; | 665 | struct intel_ring_buffer *useless; |
| 664 | int i, ret; | 666 | int i, ret, num_dwords = 4; |
| 665 | 667 | ||
| 666 | ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * | 668 | if (i915_semaphore_is_enabled(dev)) |
| 667 | MBOX_UPDATE_DWORDS) + | 669 | num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); |
| 668 | 4); | 670 | #undef MBOX_UPDATE_DWORDS |
| 671 | |||
| 672 | ret = intel_ring_begin(ring, num_dwords); | ||
| 669 | if (ret) | 673 | if (ret) |
| 670 | return ret; | 674 | return ret; |
| 671 | #undef MBOX_UPDATE_DWORDS | ||
| 672 | 675 | ||
| 673 | for_each_ring(useless, dev_priv, i) { | 676 | if (i915_semaphore_is_enabled(dev)) { |
| 674 | u32 mbox_reg = ring->signal_mbox[i]; | 677 | for_each_ring(useless, dev_priv, i) { |
| 675 | if (mbox_reg != GEN6_NOSYNC) | 678 | u32 mbox_reg = ring->signal_mbox[i]; |
| 676 | update_mboxes(ring, mbox_reg); | 679 | if (mbox_reg != GEN6_NOSYNC) |
| 680 | update_mboxes(ring, mbox_reg); | ||
| 681 | } | ||
| 677 | } | 682 | } |
| 678 | 683 | ||
| 679 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 684 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
| @@ -1030,11 +1035,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) | |||
| 1030 | if (!dev->irq_enabled) | 1035 | if (!dev->irq_enabled) |
| 1031 | return false; | 1036 | return false; |
| 1032 | 1037 | ||
| 1033 | /* It looks like we need to prevent the gt from suspending while waiting | ||
| 1034 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | ||
| 1035 | * blt/bsd rings on ivb. */ | ||
| 1036 | gen6_gt_force_wake_get(dev_priv); | ||
| 1037 | |||
| 1038 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1038 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 1039 | if (ring->irq_refcount++ == 0) { | 1039 | if (ring->irq_refcount++ == 0) { |
| 1040 | if (HAS_L3_DPF(dev) && ring->id == RCS) | 1040 | if (HAS_L3_DPF(dev) && ring->id == RCS) |
| @@ -1066,8 +1066,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | |||
| 1066 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); | 1066 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
| 1067 | } | 1067 | } |
| 1068 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1068 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 1069 | |||
| 1070 | gen6_gt_force_wake_put(dev_priv); | ||
| 1071 | } | 1069 | } |
| 1072 | 1070 | ||
| 1073 | static bool | 1071 | static bool |
| @@ -1611,8 +1609,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring) | |||
| 1611 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); | 1609 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); |
| 1612 | } | 1610 | } |
| 1613 | 1611 | ||
| 1614 | static int __intel_ring_begin(struct intel_ring_buffer *ring, | 1612 | static int __intel_ring_prepare(struct intel_ring_buffer *ring, |
| 1615 | int bytes) | 1613 | int bytes) |
| 1616 | { | 1614 | { |
| 1617 | int ret; | 1615 | int ret; |
| 1618 | 1616 | ||
| @@ -1628,7 +1626,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring, | |||
| 1628 | return ret; | 1626 | return ret; |
| 1629 | } | 1627 | } |
| 1630 | 1628 | ||
| 1631 | ring->space -= bytes; | ||
| 1632 | return 0; | 1629 | return 0; |
| 1633 | } | 1630 | } |
| 1634 | 1631 | ||
| @@ -1643,12 +1640,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring, | |||
| 1643 | if (ret) | 1640 | if (ret) |
| 1644 | return ret; | 1641 | return ret; |
| 1645 | 1642 | ||
| 1643 | ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); | ||
| 1644 | if (ret) | ||
| 1645 | return ret; | ||
| 1646 | |||
| 1646 | /* Preallocate the olr before touching the ring */ | 1647 | /* Preallocate the olr before touching the ring */ |
| 1647 | ret = intel_ring_alloc_seqno(ring); | 1648 | ret = intel_ring_alloc_seqno(ring); |
| 1648 | if (ret) | 1649 | if (ret) |
| 1649 | return ret; | 1650 | return ret; |
| 1650 | 1651 | ||
| 1651 | return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); | 1652 | ring->space -= num_dwords * sizeof(uint32_t); |
| 1653 | return 0; | ||
| 1652 | } | 1654 | } |
| 1653 | 1655 | ||
| 1654 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) | 1656 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
| @@ -1838,7 +1840,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
| 1838 | } | 1840 | } |
| 1839 | intel_ring_advance(ring); | 1841 | intel_ring_advance(ring); |
| 1840 | 1842 | ||
| 1841 | if (IS_GEN7(dev) && flush) | 1843 | if (IS_GEN7(dev) && !invalidate && flush) |
| 1842 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); | 1844 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); |
| 1843 | 1845 | ||
| 1844 | return 0; | 1846 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a583e8f718a7..95bdfb3c431c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -413,23 +413,34 @@ static const struct _sdvo_cmd_name { | |||
| 413 | static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, | 413 | static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, |
| 414 | const void *args, int args_len) | 414 | const void *args, int args_len) |
| 415 | { | 415 | { |
| 416 | int i; | 416 | int i, pos = 0; |
| 417 | #define BUF_LEN 256 | ||
| 418 | char buffer[BUF_LEN]; | ||
| 419 | |||
| 420 | #define BUF_PRINT(args...) \ | ||
| 421 | pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) | ||
| 422 | |||
| 417 | 423 | ||
| 418 | DRM_DEBUG_KMS("%s: W: %02X ", | 424 | for (i = 0; i < args_len; i++) { |
| 419 | SDVO_NAME(intel_sdvo), cmd); | 425 | BUF_PRINT("%02X ", ((u8 *)args)[i]); |
| 420 | for (i = 0; i < args_len; i++) | 426 | } |
| 421 | DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); | 427 | for (; i < 8; i++) { |
| 422 | for (; i < 8; i++) | 428 | BUF_PRINT(" "); |
| 423 | DRM_LOG_KMS(" "); | 429 | } |
| 424 | for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { | 430 | for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { |
| 425 | if (cmd == sdvo_cmd_names[i].cmd) { | 431 | if (cmd == sdvo_cmd_names[i].cmd) { |
| 426 | DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); | 432 | BUF_PRINT("(%s)", sdvo_cmd_names[i].name); |
| 427 | break; | 433 | break; |
| 428 | } | 434 | } |
| 429 | } | 435 | } |
| 430 | if (i == ARRAY_SIZE(sdvo_cmd_names)) | 436 | if (i == ARRAY_SIZE(sdvo_cmd_names)) { |
| 431 | DRM_LOG_KMS("(%02X)", cmd); | 437 | BUF_PRINT("(%02X)", cmd); |
| 432 | DRM_LOG_KMS("\n"); | 438 | } |
| 439 | BUG_ON(pos >= BUF_LEN - 1); | ||
| 440 | #undef BUF_PRINT | ||
| 441 | #undef BUF_LEN | ||
| 442 | |||
| 443 | DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); | ||
| 433 | } | 444 | } |
| 434 | 445 | ||
| 435 | static const char *cmd_status_names[] = { | 446 | static const char *cmd_status_names[] = { |
| @@ -512,9 +523,10 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
| 512 | { | 523 | { |
| 513 | u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ | 524 | u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ |
| 514 | u8 status; | 525 | u8 status; |
| 515 | int i; | 526 | int i, pos = 0; |
| 527 | #define BUF_LEN 256 | ||
| 528 | char buffer[BUF_LEN]; | ||
| 516 | 529 | ||
| 517 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | ||
| 518 | 530 | ||
| 519 | /* | 531 | /* |
| 520 | * The documentation states that all commands will be | 532 | * The documentation states that all commands will be |
| @@ -551,10 +563,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
| 551 | goto log_fail; | 563 | goto log_fail; |
| 552 | } | 564 | } |
| 553 | 565 | ||
| 566 | #define BUF_PRINT(args...) \ | ||
| 567 | pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) | ||
| 568 | |||
| 554 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) | 569 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) |
| 555 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); | 570 | BUF_PRINT("(%s)", cmd_status_names[status]); |
| 556 | else | 571 | else |
| 557 | DRM_LOG_KMS("(??? %d)", status); | 572 | BUF_PRINT("(??? %d)", status); |
| 558 | 573 | ||
| 559 | if (status != SDVO_CMD_STATUS_SUCCESS) | 574 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 560 | goto log_fail; | 575 | goto log_fail; |
| @@ -565,13 +580,17 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
| 565 | SDVO_I2C_RETURN_0 + i, | 580 | SDVO_I2C_RETURN_0 + i, |
| 566 | &((u8 *)response)[i])) | 581 | &((u8 *)response)[i])) |
| 567 | goto log_fail; | 582 | goto log_fail; |
| 568 | DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); | 583 | BUF_PRINT(" %02X", ((u8 *)response)[i]); |
| 569 | } | 584 | } |
| 570 | DRM_LOG_KMS("\n"); | 585 | BUG_ON(pos >= BUF_LEN - 1); |
| 586 | #undef BUF_PRINT | ||
| 587 | #undef BUF_LEN | ||
| 588 | |||
| 589 | DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer); | ||
| 571 | return true; | 590 | return true; |
| 572 | 591 | ||
| 573 | log_fail: | 592 | log_fail: |
| 574 | DRM_LOG_KMS("... failed\n"); | 593 | DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo)); |
| 575 | return false; | 594 | return false; |
| 576 | } | 595 | } |
| 577 | 596 | ||
| @@ -933,7 +952,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) | |||
| 933 | 952 | ||
| 934 | static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, | 953 | static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, |
| 935 | unsigned if_index, uint8_t tx_rate, | 954 | unsigned if_index, uint8_t tx_rate, |
| 936 | uint8_t *data, unsigned length) | 955 | const uint8_t *data, unsigned length) |
| 937 | { | 956 | { |
| 938 | uint8_t set_buf_index[2] = { if_index, 0 }; | 957 | uint8_t set_buf_index[2] = { if_index, 0 }; |
| 939 | uint8_t hbuf_size, tmp[8]; | 958 | uint8_t hbuf_size, tmp[8]; |
| @@ -1517,8 +1536,9 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode) | |||
| 1517 | intel_modeset_check_state(connector->dev); | 1536 | intel_modeset_check_state(connector->dev); |
| 1518 | } | 1537 | } |
| 1519 | 1538 | ||
| 1520 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1539 | static enum drm_mode_status |
| 1521 | struct drm_display_mode *mode) | 1540 | intel_sdvo_mode_valid(struct drm_connector *connector, |
| 1541 | struct drm_display_mode *mode) | ||
| 1522 | { | 1542 | { |
| 1523 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | 1543 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
| 1524 | 1544 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 770bdd6ecd9f..2e2d4eb4a00d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
| @@ -59,7 +59,7 @@ struct intel_sdvo_caps { | |||
| 59 | unsigned int stall_support:1; | 59 | unsigned int stall_support:1; |
| 60 | unsigned int pad:1; | 60 | unsigned int pad:1; |
| 61 | u16 output_flags; | 61 | u16 output_flags; |
| 62 | } __attribute__((packed)); | 62 | } __packed; |
| 63 | 63 | ||
| 64 | /* Note: SDVO detailed timing flags match EDID misc flags. */ | 64 | /* Note: SDVO detailed timing flags match EDID misc flags. */ |
| 65 | #define DTD_FLAG_HSYNC_POSITIVE (1 << 1) | 65 | #define DTD_FLAG_HSYNC_POSITIVE (1 << 1) |
| @@ -94,12 +94,12 @@ struct intel_sdvo_dtd { | |||
| 94 | u8 v_sync_off_high; | 94 | u8 v_sync_off_high; |
| 95 | u8 reserved; | 95 | u8 reserved; |
| 96 | } part2; | 96 | } part2; |
| 97 | } __attribute__((packed)); | 97 | } __packed; |
| 98 | 98 | ||
| 99 | struct intel_sdvo_pixel_clock_range { | 99 | struct intel_sdvo_pixel_clock_range { |
| 100 | u16 min; /**< pixel clock, in 10kHz units */ | 100 | u16 min; /**< pixel clock, in 10kHz units */ |
| 101 | u16 max; /**< pixel clock, in 10kHz units */ | 101 | u16 max; /**< pixel clock, in 10kHz units */ |
| 102 | } __attribute__((packed)); | 102 | } __packed; |
| 103 | 103 | ||
| 104 | struct intel_sdvo_preferred_input_timing_args { | 104 | struct intel_sdvo_preferred_input_timing_args { |
| 105 | u16 clock; | 105 | u16 clock; |
| @@ -108,7 +108,7 @@ struct intel_sdvo_preferred_input_timing_args { | |||
| 108 | u8 interlace:1; | 108 | u8 interlace:1; |
| 109 | u8 scaled:1; | 109 | u8 scaled:1; |
| 110 | u8 pad:6; | 110 | u8 pad:6; |
| 111 | } __attribute__((packed)); | 111 | } __packed; |
| 112 | 112 | ||
| 113 | /* I2C registers for SDVO */ | 113 | /* I2C registers for SDVO */ |
| 114 | #define SDVO_I2C_ARG_0 0x07 | 114 | #define SDVO_I2C_ARG_0 0x07 |
| @@ -162,7 +162,7 @@ struct intel_sdvo_get_trained_inputs_response { | |||
| 162 | unsigned int input0_trained:1; | 162 | unsigned int input0_trained:1; |
| 163 | unsigned int input1_trained:1; | 163 | unsigned int input1_trained:1; |
| 164 | unsigned int pad:6; | 164 | unsigned int pad:6; |
| 165 | } __attribute__((packed)); | 165 | } __packed; |
| 166 | 166 | ||
| 167 | /** Returns a struct intel_sdvo_output_flags of active outputs. */ | 167 | /** Returns a struct intel_sdvo_output_flags of active outputs. */ |
| 168 | #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 | 168 | #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 |
| @@ -219,7 +219,7 @@ struct intel_sdvo_get_interrupt_event_source_response { | |||
| 219 | unsigned int ambient_light_interrupt:1; | 219 | unsigned int ambient_light_interrupt:1; |
| 220 | unsigned int hdmi_audio_encrypt_change:1; | 220 | unsigned int hdmi_audio_encrypt_change:1; |
| 221 | unsigned int pad:6; | 221 | unsigned int pad:6; |
| 222 | } __attribute__((packed)); | 222 | } __packed; |
| 223 | 223 | ||
| 224 | /** | 224 | /** |
| 225 | * Selects which input is affected by future input commands. | 225 | * Selects which input is affected by future input commands. |
| @@ -232,7 +232,7 @@ struct intel_sdvo_get_interrupt_event_source_response { | |||
| 232 | struct intel_sdvo_set_target_input_args { | 232 | struct intel_sdvo_set_target_input_args { |
| 233 | unsigned int target_1:1; | 233 | unsigned int target_1:1; |
| 234 | unsigned int pad:7; | 234 | unsigned int pad:7; |
| 235 | } __attribute__((packed)); | 235 | } __packed; |
| 236 | 236 | ||
| 237 | /** | 237 | /** |
| 238 | * Takes a struct intel_sdvo_output_flags of which outputs are targeted by | 238 | * Takes a struct intel_sdvo_output_flags of which outputs are targeted by |
| @@ -370,7 +370,7 @@ struct intel_sdvo_tv_format { | |||
| 370 | unsigned int hdtv_std_eia_7702a_480i_60:1; | 370 | unsigned int hdtv_std_eia_7702a_480i_60:1; |
| 371 | unsigned int hdtv_std_eia_7702a_480p_60:1; | 371 | unsigned int hdtv_std_eia_7702a_480p_60:1; |
| 372 | unsigned int pad:3; | 372 | unsigned int pad:3; |
| 373 | } __attribute__((packed)); | 373 | } __packed; |
| 374 | 374 | ||
| 375 | #define SDVO_CMD_GET_TV_FORMAT 0x28 | 375 | #define SDVO_CMD_GET_TV_FORMAT 0x28 |
| 376 | 376 | ||
| @@ -401,7 +401,7 @@ struct intel_sdvo_sdtv_resolution_request { | |||
| 401 | unsigned int secam_l:1; | 401 | unsigned int secam_l:1; |
| 402 | unsigned int secam_60:1; | 402 | unsigned int secam_60:1; |
| 403 | unsigned int pad:5; | 403 | unsigned int pad:5; |
| 404 | } __attribute__((packed)); | 404 | } __packed; |
| 405 | 405 | ||
| 406 | struct intel_sdvo_sdtv_resolution_reply { | 406 | struct intel_sdvo_sdtv_resolution_reply { |
| 407 | unsigned int res_320x200:1; | 407 | unsigned int res_320x200:1; |
| @@ -426,7 +426,7 @@ struct intel_sdvo_sdtv_resolution_reply { | |||
| 426 | unsigned int res_1024x768:1; | 426 | unsigned int res_1024x768:1; |
| 427 | unsigned int res_1280x1024:1; | 427 | unsigned int res_1280x1024:1; |
| 428 | unsigned int pad:5; | 428 | unsigned int pad:5; |
| 429 | } __attribute__((packed)); | 429 | } __packed; |
| 430 | 430 | ||
| 431 | /* Get supported resolution with squire pixel aspect ratio that can be | 431 | /* Get supported resolution with squire pixel aspect ratio that can be |
| 432 | scaled for the requested HDTV format */ | 432 | scaled for the requested HDTV format */ |
| @@ -463,7 +463,7 @@ struct intel_sdvo_hdtv_resolution_request { | |||
| 463 | unsigned int hdtv_std_eia_7702a_480i_60:1; | 463 | unsigned int hdtv_std_eia_7702a_480i_60:1; |
| 464 | unsigned int hdtv_std_eia_7702a_480p_60:1; | 464 | unsigned int hdtv_std_eia_7702a_480p_60:1; |
| 465 | unsigned int pad:6; | 465 | unsigned int pad:6; |
| 466 | } __attribute__((packed)); | 466 | } __packed; |
| 467 | 467 | ||
| 468 | struct intel_sdvo_hdtv_resolution_reply { | 468 | struct intel_sdvo_hdtv_resolution_reply { |
| 469 | unsigned int res_640x480:1; | 469 | unsigned int res_640x480:1; |
| @@ -517,7 +517,7 @@ struct intel_sdvo_hdtv_resolution_reply { | |||
| 517 | 517 | ||
| 518 | unsigned int res_1280x768:1; | 518 | unsigned int res_1280x768:1; |
| 519 | unsigned int pad5:7; | 519 | unsigned int pad5:7; |
| 520 | } __attribute__((packed)); | 520 | } __packed; |
| 521 | 521 | ||
| 522 | /* Get supported power state returns info for encoder and monitor, rely on | 522 | /* Get supported power state returns info for encoder and monitor, rely on |
| 523 | last SetTargetInput and SetTargetOutput calls */ | 523 | last SetTargetInput and SetTargetOutput calls */ |
| @@ -557,13 +557,13 @@ struct sdvo_panel_power_sequencing { | |||
| 557 | 557 | ||
| 558 | unsigned int t4_high:2; | 558 | unsigned int t4_high:2; |
| 559 | unsigned int pad:6; | 559 | unsigned int pad:6; |
| 560 | } __attribute__((packed)); | 560 | } __packed; |
| 561 | 561 | ||
| 562 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 | 562 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 |
| 563 | struct sdvo_max_backlight_reply { | 563 | struct sdvo_max_backlight_reply { |
| 564 | u8 max_value; | 564 | u8 max_value; |
| 565 | u8 default_value; | 565 | u8 default_value; |
| 566 | } __attribute__((packed)); | 566 | } __packed; |
| 567 | 567 | ||
| 568 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 | 568 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 |
| 569 | #define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 | 569 | #define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 |
| @@ -573,14 +573,14 @@ struct sdvo_get_ambient_light_reply { | |||
| 573 | u16 trip_low; | 573 | u16 trip_low; |
| 574 | u16 trip_high; | 574 | u16 trip_high; |
| 575 | u16 value; | 575 | u16 value; |
| 576 | } __attribute__((packed)); | 576 | } __packed; |
| 577 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 | 577 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 |
| 578 | struct sdvo_set_ambient_light_reply { | 578 | struct sdvo_set_ambient_light_reply { |
| 579 | u16 trip_low; | 579 | u16 trip_low; |
| 580 | u16 trip_high; | 580 | u16 trip_high; |
| 581 | unsigned int enable:1; | 581 | unsigned int enable:1; |
| 582 | unsigned int pad:7; | 582 | unsigned int pad:7; |
| 583 | } __attribute__((packed)); | 583 | } __packed; |
| 584 | 584 | ||
| 585 | /* Set display power state */ | 585 | /* Set display power state */ |
| 586 | #define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d | 586 | #define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d |
| @@ -608,7 +608,7 @@ struct intel_sdvo_enhancements_reply { | |||
| 608 | unsigned int dither:1; | 608 | unsigned int dither:1; |
| 609 | unsigned int tv_chroma_filter:1; | 609 | unsigned int tv_chroma_filter:1; |
| 610 | unsigned int tv_luma_filter:1; | 610 | unsigned int tv_luma_filter:1; |
| 611 | } __attribute__((packed)); | 611 | } __packed; |
| 612 | 612 | ||
| 613 | /* Picture enhancement limits below are dependent on the current TV format, | 613 | /* Picture enhancement limits below are dependent on the current TV format, |
| 614 | * and thus need to be queried and set after it. | 614 | * and thus need to be queried and set after it. |
| @@ -630,7 +630,7 @@ struct intel_sdvo_enhancements_reply { | |||
| 630 | struct intel_sdvo_enhancement_limits_reply { | 630 | struct intel_sdvo_enhancement_limits_reply { |
| 631 | u16 max_value; | 631 | u16 max_value; |
| 632 | u16 default_value; | 632 | u16 default_value; |
| 633 | } __attribute__((packed)); | 633 | } __packed; |
| 634 | 634 | ||
| 635 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f | 635 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f |
| 636 | #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 | 636 | #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 |
| @@ -671,7 +671,7 @@ struct intel_sdvo_enhancement_limits_reply { | |||
| 671 | #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 | 671 | #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 |
| 672 | struct intel_sdvo_enhancements_arg { | 672 | struct intel_sdvo_enhancements_arg { |
| 673 | u16 value; | 673 | u16 value; |
| 674 | } __attribute__((packed)); | 674 | } __packed; |
| 675 | 675 | ||
| 676 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 | 676 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 |
| 677 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 | 677 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 |
| @@ -727,4 +727,4 @@ struct intel_sdvo_enhancements_arg { | |||
| 727 | struct intel_sdvo_encode { | 727 | struct intel_sdvo_encode { |
| 728 | u8 dvi_rev; | 728 | u8 dvi_rev; |
| 729 | u8 hdmi_rev; | 729 | u8 hdmi_rev; |
| 730 | } __attribute__ ((packed)); | 730 | } __packed; |
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 9944d8135e87..0954f132726e 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c | |||
| @@ -90,6 +90,22 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val) | |||
| 90 | mutex_unlock(&dev_priv->dpio_lock); | 90 | mutex_unlock(&dev_priv->dpio_lock); |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) | ||
| 94 | { | ||
| 95 | u32 val = 0; | ||
| 96 | |||
| 97 | vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, | ||
| 98 | PUNIT_OPCODE_REG_READ, reg, &val); | ||
| 99 | |||
| 100 | return val; | ||
| 101 | } | ||
| 102 | |||
| 103 | void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) | ||
| 104 | { | ||
| 105 | vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, | ||
| 106 | PUNIT_OPCODE_REG_WRITE, reg, &val); | ||
| 107 | } | ||
| 108 | |||
| 93 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) | 109 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) |
| 94 | { | 110 | { |
| 95 | u32 val = 0; | 111 | u32 val = 0; |
| @@ -160,27 +176,18 @@ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) | |||
| 160 | PUNIT_OPCODE_REG_WRITE, reg, &val); | 176 | PUNIT_OPCODE_REG_WRITE, reg, &val); |
| 161 | } | 177 | } |
| 162 | 178 | ||
| 163 | static u32 vlv_get_phy_port(enum pipe pipe) | ||
| 164 | { | ||
| 165 | u32 port = IOSF_PORT_DPIO; | ||
| 166 | |||
| 167 | WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B)); | ||
| 168 | |||
| 169 | return port; | ||
| 170 | } | ||
| 171 | |||
| 172 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) | 179 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) |
| 173 | { | 180 | { |
| 174 | u32 val = 0; | 181 | u32 val = 0; |
| 175 | 182 | ||
| 176 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe), | 183 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)), |
| 177 | DPIO_OPCODE_REG_READ, reg, &val); | 184 | DPIO_OPCODE_REG_READ, reg, &val); |
| 178 | return val; | 185 | return val; |
| 179 | } | 186 | } |
| 180 | 187 | ||
| 181 | void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val) | 188 | void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val) |
| 182 | { | 189 | { |
| 183 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe), | 190 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)), |
| 184 | DPIO_OPCODE_REG_WRITE, reg, &val); | 191 | DPIO_OPCODE_REG_WRITE, reg, &val); |
| 185 | } | 192 | } |
| 186 | 193 | ||
| @@ -242,3 +249,17 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, | |||
| 242 | return; | 249 | return; |
| 243 | } | 250 | } |
| 244 | } | 251 | } |
| 252 | |||
| 253 | u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg) | ||
| 254 | { | ||
| 255 | u32 val = 0; | ||
| 256 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, | ||
| 257 | DPIO_OPCODE_REG_READ, reg, &val); | ||
| 258 | return val; | ||
| 259 | } | ||
| 260 | |||
| 261 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) | ||
| 262 | { | ||
| 263 | vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, | ||
| 264 | DPIO_OPCODE_REG_WRITE, reg, &val); | ||
| 265 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index b9fabf826f7d..716a3c9c0751 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -104,6 +104,12 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 104 | break; | 104 | break; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | /* | ||
| 108 | * Enable gamma to match primary/cursor plane behaviour. | ||
| 109 | * FIXME should be user controllable via propertiesa. | ||
| 110 | */ | ||
| 111 | sprctl |= SP_GAMMA_ENABLE; | ||
| 112 | |||
| 107 | if (obj->tiling_mode != I915_TILING_NONE) | 113 | if (obj->tiling_mode != I915_TILING_NONE) |
| 108 | sprctl |= SP_TILED; | 114 | sprctl |= SP_TILED; |
| 109 | 115 | ||
| @@ -135,8 +141,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, | |||
| 135 | 141 | ||
| 136 | I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); | 142 | I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); |
| 137 | I915_WRITE(SPCNTR(pipe, plane), sprctl); | 143 | I915_WRITE(SPCNTR(pipe, plane), sprctl); |
| 138 | I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + | 144 | I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + |
| 139 | sprsurf_offset); | 145 | sprsurf_offset); |
| 140 | POSTING_READ(SPSURF(pipe, plane)); | 146 | POSTING_READ(SPSURF(pipe, plane)); |
| 141 | } | 147 | } |
| 142 | 148 | ||
| @@ -152,7 +158,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) | |||
| 152 | I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & | 158 | I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & |
| 153 | ~SP_ENABLE); | 159 | ~SP_ENABLE); |
| 154 | /* Activate double buffered register update */ | 160 | /* Activate double buffered register update */ |
| 155 | I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); | 161 | I915_WRITE(SPSURF(pipe, plane), 0); |
| 156 | POSTING_READ(SPSURF(pipe, plane)); | 162 | POSTING_READ(SPSURF(pipe, plane)); |
| 157 | 163 | ||
| 158 | intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); | 164 | intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); |
| @@ -224,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 224 | u32 sprctl, sprscale = 0; | 230 | u32 sprctl, sprscale = 0; |
| 225 | unsigned long sprsurf_offset, linear_offset; | 231 | unsigned long sprsurf_offset, linear_offset; |
| 226 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 232 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
| 227 | bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; | ||
| 228 | 233 | ||
| 229 | sprctl = I915_READ(SPRCTL(pipe)); | 234 | sprctl = I915_READ(SPRCTL(pipe)); |
| 230 | 235 | ||
| @@ -257,6 +262,12 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 257 | BUG(); | 262 | BUG(); |
| 258 | } | 263 | } |
| 259 | 264 | ||
| 265 | /* | ||
| 266 | * Enable gamma to match primary/cursor plane behaviour. | ||
| 267 | * FIXME should be user controllable via propertiesa. | ||
| 268 | */ | ||
| 269 | sprctl |= SPRITE_GAMMA_ENABLE; | ||
| 270 | |||
| 260 | if (obj->tiling_mode != I915_TILING_NONE) | 271 | if (obj->tiling_mode != I915_TILING_NONE) |
| 261 | sprctl |= SPRITE_TILED; | 272 | sprctl |= SPRITE_TILED; |
| 262 | 273 | ||
| @@ -279,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 279 | crtc_w--; | 290 | crtc_w--; |
| 280 | crtc_h--; | 291 | crtc_h--; |
| 281 | 292 | ||
| 282 | /* | 293 | if (crtc_w != src_w || crtc_h != src_h) |
| 283 | * IVB workaround: must disable low power watermarks for at least | ||
| 284 | * one frame before enabling scaling. LP watermarks can be re-enabled | ||
| 285 | * when scaling is disabled. | ||
| 286 | */ | ||
| 287 | if (crtc_w != src_w || crtc_h != src_h) { | ||
| 288 | dev_priv->sprite_scaling_enabled |= 1 << pipe; | ||
| 289 | |||
| 290 | if (!scaling_was_enabled) { | ||
| 291 | intel_update_watermarks(crtc); | ||
| 292 | intel_wait_for_vblank(dev, pipe); | ||
| 293 | } | ||
| 294 | sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; | 294 | sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; |
| 295 | } else | ||
| 296 | dev_priv->sprite_scaling_enabled &= ~(1 << pipe); | ||
| 297 | 295 | ||
| 298 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); | 296 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); |
| 299 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); | 297 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); |
| @@ -317,13 +315,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 317 | if (intel_plane->can_scale) | 315 | if (intel_plane->can_scale) |
| 318 | I915_WRITE(SPRSCALE(pipe), sprscale); | 316 | I915_WRITE(SPRSCALE(pipe), sprscale); |
| 319 | I915_WRITE(SPRCTL(pipe), sprctl); | 317 | I915_WRITE(SPRCTL(pipe), sprctl); |
| 320 | I915_MODIFY_DISPBASE(SPRSURF(pipe), | 318 | I915_WRITE(SPRSURF(pipe), |
| 321 | i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); | 319 | i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); |
| 322 | POSTING_READ(SPRSURF(pipe)); | 320 | POSTING_READ(SPRSURF(pipe)); |
| 323 | |||
| 324 | /* potentially re-enable LP watermarks */ | ||
| 325 | if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) | ||
| 326 | intel_update_watermarks(crtc); | ||
| 327 | } | 321 | } |
| 328 | 322 | ||
| 329 | static void | 323 | static void |
| @@ -333,23 +327,22 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) | |||
| 333 | struct drm_i915_private *dev_priv = dev->dev_private; | 327 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 334 | struct intel_plane *intel_plane = to_intel_plane(plane); | 328 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 335 | int pipe = intel_plane->pipe; | 329 | int pipe = intel_plane->pipe; |
| 336 | bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; | ||
| 337 | 330 | ||
| 338 | I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); | 331 | I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); |
| 339 | /* Can't leave the scaler enabled... */ | 332 | /* Can't leave the scaler enabled... */ |
| 340 | if (intel_plane->can_scale) | 333 | if (intel_plane->can_scale) |
| 341 | I915_WRITE(SPRSCALE(pipe), 0); | 334 | I915_WRITE(SPRSCALE(pipe), 0); |
| 342 | /* Activate double buffered register update */ | 335 | /* Activate double buffered register update */ |
| 343 | I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); | 336 | I915_WRITE(SPRSURF(pipe), 0); |
| 344 | POSTING_READ(SPRSURF(pipe)); | 337 | POSTING_READ(SPRSURF(pipe)); |
| 345 | 338 | ||
| 346 | dev_priv->sprite_scaling_enabled &= ~(1 << pipe); | 339 | /* |
| 340 | * Avoid underruns when disabling the sprite. | ||
| 341 | * FIXME remove once watermark updates are done properly. | ||
| 342 | */ | ||
| 343 | intel_wait_for_vblank(dev, pipe); | ||
| 347 | 344 | ||
| 348 | intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); | 345 | intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); |
| 349 | |||
| 350 | /* potentially re-enable LP watermarks */ | ||
| 351 | if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) | ||
| 352 | intel_update_watermarks(crtc); | ||
| 353 | } | 346 | } |
| 354 | 347 | ||
| 355 | static int | 348 | static int |
| @@ -453,6 +446,12 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 453 | BUG(); | 446 | BUG(); |
| 454 | } | 447 | } |
| 455 | 448 | ||
| 449 | /* | ||
| 450 | * Enable gamma to match primary/cursor plane behaviour. | ||
| 451 | * FIXME should be user controllable via propertiesa. | ||
| 452 | */ | ||
| 453 | dvscntr |= DVS_GAMMA_ENABLE; | ||
| 454 | |||
| 456 | if (obj->tiling_mode != I915_TILING_NONE) | 455 | if (obj->tiling_mode != I915_TILING_NONE) |
| 457 | dvscntr |= DVS_TILED; | 456 | dvscntr |= DVS_TILED; |
| 458 | 457 | ||
| @@ -470,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 470 | crtc_h--; | 469 | crtc_h--; |
| 471 | 470 | ||
| 472 | dvsscale = 0; | 471 | dvsscale = 0; |
| 473 | if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) | 472 | if (crtc_w != src_w || crtc_h != src_h) |
| 474 | dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; | 473 | dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; |
| 475 | 474 | ||
| 476 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); | 475 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); |
| @@ -490,8 +489,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 490 | I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); | 489 | I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); |
| 491 | I915_WRITE(DVSSCALE(pipe), dvsscale); | 490 | I915_WRITE(DVSSCALE(pipe), dvsscale); |
| 492 | I915_WRITE(DVSCNTR(pipe), dvscntr); | 491 | I915_WRITE(DVSCNTR(pipe), dvscntr); |
| 493 | I915_MODIFY_DISPBASE(DVSSURF(pipe), | 492 | I915_WRITE(DVSSURF(pipe), |
| 494 | i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); | 493 | i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); |
| 495 | POSTING_READ(DVSSURF(pipe)); | 494 | POSTING_READ(DVSSURF(pipe)); |
| 496 | } | 495 | } |
| 497 | 496 | ||
| @@ -507,9 +506,15 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) | |||
| 507 | /* Disable the scaler */ | 506 | /* Disable the scaler */ |
| 508 | I915_WRITE(DVSSCALE(pipe), 0); | 507 | I915_WRITE(DVSSCALE(pipe), 0); |
| 509 | /* Flush double buffered register updates */ | 508 | /* Flush double buffered register updates */ |
| 510 | I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); | 509 | I915_WRITE(DVSSURF(pipe), 0); |
| 511 | POSTING_READ(DVSSURF(pipe)); | 510 | POSTING_READ(DVSSURF(pipe)); |
| 512 | 511 | ||
| 512 | /* | ||
| 513 | * Avoid underruns when disabling the sprite. | ||
| 514 | * FIXME remove once watermark updates are done properly. | ||
| 515 | */ | ||
| 516 | intel_wait_for_vblank(dev, pipe); | ||
| 517 | |||
| 513 | intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); | 518 | intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); |
| 514 | } | 519 | } |
| 515 | 520 | ||
| @@ -643,6 +648,15 @@ format_is_yuv(uint32_t format) | |||
| 643 | } | 648 | } |
| 644 | } | 649 | } |
| 645 | 650 | ||
| 651 | static bool colorkey_enabled(struct intel_plane *intel_plane) | ||
| 652 | { | ||
| 653 | struct drm_intel_sprite_colorkey key; | ||
| 654 | |||
| 655 | intel_plane->get_colorkey(&intel_plane->base, &key); | ||
| 656 | |||
| 657 | return key.flags != I915_SET_COLORKEY_NONE; | ||
| 658 | } | ||
| 659 | |||
| 646 | static int | 660 | static int |
| 647 | intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | 661 | intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, |
| 648 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | 662 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, |
| @@ -828,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 828 | * If the sprite is completely covering the primary plane, | 842 | * If the sprite is completely covering the primary plane, |
| 829 | * we can disable the primary and save power. | 843 | * we can disable the primary and save power. |
| 830 | */ | 844 | */ |
| 831 | disable_primary = drm_rect_equals(&dst, &clip); | 845 | disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane); |
| 832 | WARN_ON(disable_primary && !visible && intel_crtc->active); | 846 | WARN_ON(disable_primary && !visible && intel_crtc->active); |
| 833 | 847 | ||
| 834 | mutex_lock(&dev->struct_mutex); | 848 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 25cbe073c388..87df68f5f504 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
| @@ -64,7 +64,8 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | |||
| 64 | __raw_posting_read(dev_priv, ECOBUS); | 64 | __raw_posting_read(dev_priv, ECOBUS); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 67 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, |
| 68 | int fw_engine) | ||
| 68 | { | 69 | { |
| 69 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, | 70 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, |
| 70 | FORCEWAKE_ACK_TIMEOUT_MS)) | 71 | FORCEWAKE_ACK_TIMEOUT_MS)) |
| @@ -89,7 +90,8 @@ static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | |||
| 89 | __raw_posting_read(dev_priv, ECOBUS); | 90 | __raw_posting_read(dev_priv, ECOBUS); |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | 93 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, |
| 94 | int fw_engine) | ||
| 93 | { | 95 | { |
| 94 | u32 forcewake_ack; | 96 | u32 forcewake_ack; |
| 95 | 97 | ||
| @@ -121,12 +123,12 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | |||
| 121 | u32 gtfifodbg; | 123 | u32 gtfifodbg; |
| 122 | 124 | ||
| 123 | gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); | 125 | gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); |
| 124 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | 126 | if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) |
| 125 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | 127 | __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); |
| 126 | __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||
| 127 | } | 128 | } |
| 128 | 129 | ||
| 129 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 130 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, |
| 131 | int fw_engine) | ||
| 130 | { | 132 | { |
| 131 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | 133 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
| 132 | /* something from same cacheline, but !FORCEWAKE */ | 134 | /* something from same cacheline, but !FORCEWAKE */ |
| @@ -134,7 +136,8 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
| 134 | gen6_gt_check_fifodbg(dev_priv); | 136 | gen6_gt_check_fifodbg(dev_priv); |
| 135 | } | 137 | } |
| 136 | 138 | ||
| 137 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 139 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, |
| 140 | int fw_engine) | ||
| 138 | { | 141 | { |
| 139 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, | 142 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, |
| 140 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 143 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
| @@ -147,12 +150,19 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
| 147 | { | 150 | { |
| 148 | int ret = 0; | 151 | int ret = 0; |
| 149 | 152 | ||
| 153 | /* On VLV, FIFO will be shared by both SW and HW. | ||
| 154 | * So, we need to read the FREE_ENTRIES everytime */ | ||
| 155 | if (IS_VALLEYVIEW(dev_priv->dev)) | ||
| 156 | dev_priv->uncore.fifo_count = | ||
| 157 | __raw_i915_read32(dev_priv, GTFIFOCTL) & | ||
| 158 | GT_FIFO_FREE_ENTRIES_MASK; | ||
| 159 | |||
| 150 | if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | 160 | if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { |
| 151 | int loop = 500; | 161 | int loop = 500; |
| 152 | u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | 162 | u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; |
| 153 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | 163 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
| 154 | udelay(10); | 164 | udelay(10); |
| 155 | fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | 165 | fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; |
| 156 | } | 166 | } |
| 157 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | 167 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) |
| 158 | ++ret; | 168 | ++ret; |
| @@ -171,38 +181,112 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | |||
| 171 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); | 181 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
| 172 | } | 182 | } |
| 173 | 183 | ||
| 174 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | 184 | static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, |
| 185 | int fw_engine) | ||
| 175 | { | 186 | { |
| 176 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, | 187 | /* Check for Render Engine */ |
| 177 | FORCEWAKE_ACK_TIMEOUT_MS)) | 188 | if (FORCEWAKE_RENDER & fw_engine) { |
| 178 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 189 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
| 190 | FORCEWAKE_ACK_VLV) & | ||
| 191 | FORCEWAKE_KERNEL) == 0, | ||
| 192 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
| 193 | DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); | ||
| 179 | 194 | ||
| 180 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | 195 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
| 181 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | 196 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
| 182 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
| 183 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
| 184 | 197 | ||
| 185 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), | 198 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
| 186 | FORCEWAKE_ACK_TIMEOUT_MS)) | 199 | FORCEWAKE_ACK_VLV) & |
| 187 | DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); | 200 | FORCEWAKE_KERNEL), |
| 201 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
| 202 | DRM_ERROR("Timed out: waiting for Render to ack.\n"); | ||
| 203 | } | ||
| 188 | 204 | ||
| 189 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & | 205 | /* Check for Media Engine */ |
| 190 | FORCEWAKE_KERNEL), | 206 | if (FORCEWAKE_MEDIA & fw_engine) { |
| 191 | FORCEWAKE_ACK_TIMEOUT_MS)) | 207 | if (wait_for_atomic((__raw_i915_read32(dev_priv, |
| 192 | DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); | 208 | FORCEWAKE_ACK_MEDIA_VLV) & |
| 209 | FORCEWAKE_KERNEL) == 0, | ||
| 210 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
| 211 | DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); | ||
| 212 | |||
| 213 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
| 214 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
| 215 | |||
| 216 | if (wait_for_atomic((__raw_i915_read32(dev_priv, | ||
| 217 | FORCEWAKE_ACK_MEDIA_VLV) & | ||
| 218 | FORCEWAKE_KERNEL), | ||
| 219 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
| 220 | DRM_ERROR("Timed out: waiting for media to ack.\n"); | ||
| 221 | } | ||
| 193 | 222 | ||
| 194 | /* WaRsForcewakeWaitTC0:vlv */ | 223 | /* WaRsForcewakeWaitTC0:vlv */ |
| 195 | __gen6_gt_wait_for_thread_c0(dev_priv); | 224 | __gen6_gt_wait_for_thread_c0(dev_priv); |
| 225 | |||
| 196 | } | 226 | } |
| 197 | 227 | ||
| 198 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | 228 | static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, |
| 229 | int fw_engine) | ||
| 199 | { | 230 | { |
| 200 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | 231 | |
| 201 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 232 | /* Check for Render Engine */ |
| 202 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | 233 | if (FORCEWAKE_RENDER & fw_engine) |
| 203 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 234 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
| 235 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
| 236 | |||
| 237 | |||
| 238 | /* Check for Media Engine */ | ||
| 239 | if (FORCEWAKE_MEDIA & fw_engine) | ||
| 240 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
| 241 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
| 242 | |||
| 204 | /* The below doubles as a POSTING_READ */ | 243 | /* The below doubles as a POSTING_READ */ |
| 205 | gen6_gt_check_fifodbg(dev_priv); | 244 | gen6_gt_check_fifodbg(dev_priv); |
| 245 | |||
| 246 | } | ||
| 247 | |||
| 248 | void vlv_force_wake_get(struct drm_i915_private *dev_priv, | ||
| 249 | int fw_engine) | ||
| 250 | { | ||
| 251 | unsigned long irqflags; | ||
| 252 | |||
| 253 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
| 254 | if (FORCEWAKE_RENDER & fw_engine) { | ||
| 255 | if (dev_priv->uncore.fw_rendercount++ == 0) | ||
| 256 | dev_priv->uncore.funcs.force_wake_get(dev_priv, | ||
| 257 | FORCEWAKE_RENDER); | ||
| 258 | } | ||
| 259 | if (FORCEWAKE_MEDIA & fw_engine) { | ||
| 260 | if (dev_priv->uncore.fw_mediacount++ == 0) | ||
| 261 | dev_priv->uncore.funcs.force_wake_get(dev_priv, | ||
| 262 | FORCEWAKE_MEDIA); | ||
| 263 | } | ||
| 264 | |||
| 265 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
| 266 | } | ||
| 267 | |||
| 268 | void vlv_force_wake_put(struct drm_i915_private *dev_priv, | ||
| 269 | int fw_engine) | ||
| 270 | { | ||
| 271 | unsigned long irqflags; | ||
| 272 | |||
| 273 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
| 274 | |||
| 275 | if (FORCEWAKE_RENDER & fw_engine) { | ||
| 276 | WARN_ON(dev_priv->uncore.fw_rendercount == 0); | ||
| 277 | if (--dev_priv->uncore.fw_rendercount == 0) | ||
| 278 | dev_priv->uncore.funcs.force_wake_put(dev_priv, | ||
| 279 | FORCEWAKE_RENDER); | ||
| 280 | } | ||
| 281 | |||
| 282 | if (FORCEWAKE_MEDIA & fw_engine) { | ||
| 283 | WARN_ON(dev_priv->uncore.fw_mediacount == 0); | ||
| 284 | if (--dev_priv->uncore.fw_mediacount == 0) | ||
| 285 | dev_priv->uncore.funcs.force_wake_put(dev_priv, | ||
| 286 | FORCEWAKE_MEDIA); | ||
| 287 | } | ||
| 288 | |||
| 289 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
| 206 | } | 290 | } |
| 207 | 291 | ||
| 208 | static void gen6_force_wake_work(struct work_struct *work) | 292 | static void gen6_force_wake_work(struct work_struct *work) |
| @@ -213,7 +297,7 @@ static void gen6_force_wake_work(struct work_struct *work) | |||
| 213 | 297 | ||
| 214 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 298 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
| 215 | if (--dev_priv->uncore.forcewake_count == 0) | 299 | if (--dev_priv->uncore.forcewake_count == 0) |
| 216 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | 300 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 217 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 301 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
| 218 | } | 302 | } |
| 219 | 303 | ||
| @@ -248,6 +332,11 @@ void intel_uncore_early_sanitize(struct drm_device *dev) | |||
| 248 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | 332 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); |
| 249 | } | 333 | } |
| 250 | 334 | ||
| 335 | /* clear out old GT FIFO errors */ | ||
| 336 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
| 337 | __raw_i915_write32(dev_priv, GTFIFODBG, | ||
| 338 | __raw_i915_read32(dev_priv, GTFIFODBG)); | ||
| 339 | |||
| 251 | intel_uncore_forcewake_reset(dev); | 340 | intel_uncore_forcewake_reset(dev); |
| 252 | } | 341 | } |
| 253 | 342 | ||
| @@ -256,8 +345,6 @@ void intel_uncore_sanitize(struct drm_device *dev) | |||
| 256 | struct drm_i915_private *dev_priv = dev->dev_private; | 345 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 257 | u32 reg_val; | 346 | u32 reg_val; |
| 258 | 347 | ||
| 259 | intel_uncore_forcewake_reset(dev); | ||
| 260 | |||
| 261 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | 348 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
| 262 | intel_disable_gt_powersave(dev); | 349 | intel_disable_gt_powersave(dev); |
| 263 | 350 | ||
| @@ -281,29 +368,40 @@ void intel_uncore_sanitize(struct drm_device *dev) | |||
| 281 | * be called at the beginning of the sequence followed by a call to | 368 | * be called at the beginning of the sequence followed by a call to |
| 282 | * gen6_gt_force_wake_put() at the end of the sequence. | 369 | * gen6_gt_force_wake_put() at the end of the sequence. |
| 283 | */ | 370 | */ |
| 284 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 371 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) |
| 285 | { | 372 | { |
| 286 | unsigned long irqflags; | 373 | unsigned long irqflags; |
| 287 | 374 | ||
| 288 | if (!dev_priv->uncore.funcs.force_wake_get) | 375 | if (!dev_priv->uncore.funcs.force_wake_get) |
| 289 | return; | 376 | return; |
| 290 | 377 | ||
| 378 | intel_runtime_pm_get(dev_priv); | ||
| 379 | |||
| 380 | /* Redirect to VLV specific routine */ | ||
| 381 | if (IS_VALLEYVIEW(dev_priv->dev)) | ||
| 382 | return vlv_force_wake_get(dev_priv, fw_engine); | ||
| 383 | |||
| 291 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 384 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
| 292 | if (dev_priv->uncore.forcewake_count++ == 0) | 385 | if (dev_priv->uncore.forcewake_count++ == 0) |
| 293 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | 386 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 294 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 387 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
| 295 | } | 388 | } |
| 296 | 389 | ||
| 297 | /* | 390 | /* |
| 298 | * see gen6_gt_force_wake_get() | 391 | * see gen6_gt_force_wake_get() |
| 299 | */ | 392 | */ |
| 300 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 393 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) |
| 301 | { | 394 | { |
| 302 | unsigned long irqflags; | 395 | unsigned long irqflags; |
| 303 | 396 | ||
| 304 | if (!dev_priv->uncore.funcs.force_wake_put) | 397 | if (!dev_priv->uncore.funcs.force_wake_put) |
| 305 | return; | 398 | return; |
| 306 | 399 | ||
| 400 | /* Redirect to VLV specific routine */ | ||
| 401 | if (IS_VALLEYVIEW(dev_priv->dev)) | ||
| 402 | return vlv_force_wake_put(dev_priv, fw_engine); | ||
| 403 | |||
| 404 | |||
| 307 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 405 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
| 308 | if (--dev_priv->uncore.forcewake_count == 0) { | 406 | if (--dev_priv->uncore.forcewake_count == 0) { |
| 309 | dev_priv->uncore.forcewake_count++; | 407 | dev_priv->uncore.forcewake_count++; |
| @@ -312,6 +410,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
| 312 | 1); | 410 | 1); |
| 313 | } | 411 | } |
| 314 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 412 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
| 413 | |||
| 414 | intel_runtime_pm_put(dev_priv); | ||
| 315 | } | 415 | } |
| 316 | 416 | ||
| 317 | /* We give fast paths for the really cool registers */ | 417 | /* We give fast paths for the really cool registers */ |
| @@ -346,6 +446,13 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |||
| 346 | } | 446 | } |
| 347 | } | 447 | } |
| 348 | 448 | ||
| 449 | static void | ||
| 450 | assert_device_not_suspended(struct drm_i915_private *dev_priv) | ||
| 451 | { | ||
| 452 | WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, | ||
| 453 | "Device suspended\n"); | ||
| 454 | } | ||
| 455 | |||
| 349 | #define REG_READ_HEADER(x) \ | 456 | #define REG_READ_HEADER(x) \ |
| 350 | unsigned long irqflags; \ | 457 | unsigned long irqflags; \ |
| 351 | u##x val = 0; \ | 458 | u##x val = 0; \ |
| @@ -379,16 +486,51 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | |||
| 379 | REG_READ_HEADER(x); \ | 486 | REG_READ_HEADER(x); \ |
| 380 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 487 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 381 | if (dev_priv->uncore.forcewake_count == 0) \ | 488 | if (dev_priv->uncore.forcewake_count == 0) \ |
| 382 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ | 489 | dev_priv->uncore.funcs.force_wake_get(dev_priv, \ |
| 490 | FORCEWAKE_ALL); \ | ||
| 383 | val = __raw_i915_read##x(dev_priv, reg); \ | 491 | val = __raw_i915_read##x(dev_priv, reg); \ |
| 384 | if (dev_priv->uncore.forcewake_count == 0) \ | 492 | if (dev_priv->uncore.forcewake_count == 0) \ |
| 385 | dev_priv->uncore.funcs.force_wake_put(dev_priv); \ | 493 | dev_priv->uncore.funcs.force_wake_put(dev_priv, \ |
| 494 | FORCEWAKE_ALL); \ | ||
| 386 | } else { \ | 495 | } else { \ |
| 387 | val = __raw_i915_read##x(dev_priv, reg); \ | 496 | val = __raw_i915_read##x(dev_priv, reg); \ |
| 388 | } \ | 497 | } \ |
| 389 | REG_READ_FOOTER; \ | 498 | REG_READ_FOOTER; \ |
| 390 | } | 499 | } |
| 391 | 500 | ||
| 501 | #define __vlv_read(x) \ | ||
| 502 | static u##x \ | ||
| 503 | vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | ||
| 504 | unsigned fwengine = 0; \ | ||
| 505 | unsigned *fwcount; \ | ||
| 506 | REG_READ_HEADER(x); \ | ||
| 507 | if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ | ||
| 508 | fwengine = FORCEWAKE_RENDER; \ | ||
| 509 | fwcount = &dev_priv->uncore.fw_rendercount; \ | ||
| 510 | } \ | ||
| 511 | else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ | ||
| 512 | fwengine = FORCEWAKE_MEDIA; \ | ||
| 513 | fwcount = &dev_priv->uncore.fw_mediacount; \ | ||
| 514 | } \ | ||
| 515 | if (fwengine != 0) { \ | ||
| 516 | if ((*fwcount)++ == 0) \ | ||
| 517 | (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \ | ||
| 518 | fwengine); \ | ||
| 519 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
| 520 | if (--(*fwcount) == 0) \ | ||
| 521 | (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \ | ||
| 522 | fwengine); \ | ||
| 523 | } else { \ | ||
| 524 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
| 525 | } \ | ||
| 526 | REG_READ_FOOTER; \ | ||
| 527 | } | ||
| 528 | |||
| 529 | |||
| 530 | __vlv_read(8) | ||
| 531 | __vlv_read(16) | ||
| 532 | __vlv_read(32) | ||
| 533 | __vlv_read(64) | ||
| 392 | __gen6_read(8) | 534 | __gen6_read(8) |
| 393 | __gen6_read(16) | 535 | __gen6_read(16) |
| 394 | __gen6_read(32) | 536 | __gen6_read(32) |
| @@ -402,6 +544,7 @@ __gen4_read(16) | |||
| 402 | __gen4_read(32) | 544 | __gen4_read(32) |
| 403 | __gen4_read(64) | 545 | __gen4_read(64) |
| 404 | 546 | ||
| 547 | #undef __vlv_read | ||
| 405 | #undef __gen6_read | 548 | #undef __gen6_read |
| 406 | #undef __gen5_read | 549 | #undef __gen5_read |
| 407 | #undef __gen4_read | 550 | #undef __gen4_read |
| @@ -413,12 +556,15 @@ __gen4_read(64) | |||
| 413 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | 556 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
| 414 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | 557 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
| 415 | 558 | ||
| 559 | #define REG_WRITE_FOOTER \ | ||
| 560 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) | ||
| 561 | |||
| 416 | #define __gen4_write(x) \ | 562 | #define __gen4_write(x) \ |
| 417 | static void \ | 563 | static void \ |
| 418 | gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | 564 | gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
| 419 | REG_WRITE_HEADER; \ | 565 | REG_WRITE_HEADER; \ |
| 420 | __raw_i915_write##x(dev_priv, reg, val); \ | 566 | __raw_i915_write##x(dev_priv, reg, val); \ |
| 421 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 567 | REG_WRITE_FOOTER; \ |
| 422 | } | 568 | } |
| 423 | 569 | ||
| 424 | #define __gen5_write(x) \ | 570 | #define __gen5_write(x) \ |
| @@ -427,7 +573,7 @@ gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace | |||
| 427 | REG_WRITE_HEADER; \ | 573 | REG_WRITE_HEADER; \ |
| 428 | ilk_dummy_write(dev_priv); \ | 574 | ilk_dummy_write(dev_priv); \ |
| 429 | __raw_i915_write##x(dev_priv, reg, val); \ | 575 | __raw_i915_write##x(dev_priv, reg, val); \ |
| 430 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 576 | REG_WRITE_FOOTER; \ |
| 431 | } | 577 | } |
| 432 | 578 | ||
| 433 | #define __gen6_write(x) \ | 579 | #define __gen6_write(x) \ |
| @@ -438,11 +584,12 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace | |||
| 438 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 584 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 439 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 585 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
| 440 | } \ | 586 | } \ |
| 587 | assert_device_not_suspended(dev_priv); \ | ||
| 441 | __raw_i915_write##x(dev_priv, reg, val); \ | 588 | __raw_i915_write##x(dev_priv, reg, val); \ |
| 442 | if (unlikely(__fifo_ret)) { \ | 589 | if (unlikely(__fifo_ret)) { \ |
| 443 | gen6_gt_check_fifodbg(dev_priv); \ | 590 | gen6_gt_check_fifodbg(dev_priv); \ |
| 444 | } \ | 591 | } \ |
| 445 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 592 | REG_WRITE_FOOTER; \ |
| 446 | } | 593 | } |
| 447 | 594 | ||
| 448 | #define __hsw_write(x) \ | 595 | #define __hsw_write(x) \ |
| @@ -453,13 +600,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) | |||
| 453 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 600 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 454 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 601 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
| 455 | } \ | 602 | } \ |
| 603 | assert_device_not_suspended(dev_priv); \ | ||
| 456 | hsw_unclaimed_reg_clear(dev_priv, reg); \ | 604 | hsw_unclaimed_reg_clear(dev_priv, reg); \ |
| 457 | __raw_i915_write##x(dev_priv, reg, val); \ | 605 | __raw_i915_write##x(dev_priv, reg, val); \ |
| 458 | if (unlikely(__fifo_ret)) { \ | 606 | if (unlikely(__fifo_ret)) { \ |
| 459 | gen6_gt_check_fifodbg(dev_priv); \ | 607 | gen6_gt_check_fifodbg(dev_priv); \ |
| 460 | } \ | 608 | } \ |
| 461 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 609 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
| 462 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 610 | REG_WRITE_FOOTER; \ |
| 463 | } | 611 | } |
| 464 | 612 | ||
| 465 | static const u32 gen8_shadowed_regs[] = { | 613 | static const u32 gen8_shadowed_regs[] = { |
| @@ -486,16 +634,18 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) | |||
| 486 | #define __gen8_write(x) \ | 634 | #define __gen8_write(x) \ |
| 487 | static void \ | 635 | static void \ |
| 488 | gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | 636 | gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
| 489 | bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \ | 637 | bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \ |
| 490 | REG_WRITE_HEADER; \ | 638 | REG_WRITE_HEADER; \ |
| 491 | if (__needs_put) { \ | 639 | if (__needs_put) { \ |
| 492 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ | 640 | dev_priv->uncore.funcs.force_wake_get(dev_priv, \ |
| 641 | FORCEWAKE_ALL); \ | ||
| 493 | } \ | 642 | } \ |
| 494 | __raw_i915_write##x(dev_priv, reg, val); \ | 643 | __raw_i915_write##x(dev_priv, reg, val); \ |
| 495 | if (__needs_put) { \ | 644 | if (__needs_put) { \ |
| 496 | dev_priv->uncore.funcs.force_wake_put(dev_priv); \ | 645 | dev_priv->uncore.funcs.force_wake_put(dev_priv, \ |
| 646 | FORCEWAKE_ALL); \ | ||
| 497 | } \ | 647 | } \ |
| 498 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 648 | REG_WRITE_FOOTER; \ |
| 499 | } | 649 | } |
| 500 | 650 | ||
| 501 | __gen8_write(8) | 651 | __gen8_write(8) |
| @@ -524,6 +674,7 @@ __gen4_write(64) | |||
| 524 | #undef __gen6_write | 674 | #undef __gen6_write |
| 525 | #undef __gen5_write | 675 | #undef __gen5_write |
| 526 | #undef __gen4_write | 676 | #undef __gen4_write |
| 677 | #undef REG_WRITE_FOOTER | ||
| 527 | #undef REG_WRITE_HEADER | 678 | #undef REG_WRITE_HEADER |
| 528 | 679 | ||
| 529 | void intel_uncore_init(struct drm_device *dev) | 680 | void intel_uncore_init(struct drm_device *dev) |
| @@ -534,8 +685,8 @@ void intel_uncore_init(struct drm_device *dev) | |||
| 534 | gen6_force_wake_work); | 685 | gen6_force_wake_work); |
| 535 | 686 | ||
| 536 | if (IS_VALLEYVIEW(dev)) { | 687 | if (IS_VALLEYVIEW(dev)) { |
| 537 | dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; | 688 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; |
| 538 | dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; | 689 | dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; |
| 539 | } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { | 690 | } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { |
| 540 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; | 691 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; |
| 541 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; | 692 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; |
| @@ -552,9 +703,9 @@ void intel_uncore_init(struct drm_device *dev) | |||
| 552 | * forcewake being disabled. | 703 | * forcewake being disabled. |
| 553 | */ | 704 | */ |
| 554 | mutex_lock(&dev->struct_mutex); | 705 | mutex_lock(&dev->struct_mutex); |
| 555 | __gen6_gt_force_wake_mt_get(dev_priv); | 706 | __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); |
| 556 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); | 707 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
| 557 | __gen6_gt_force_wake_mt_put(dev_priv); | 708 | __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); |
| 558 | mutex_unlock(&dev->struct_mutex); | 709 | mutex_unlock(&dev->struct_mutex); |
| 559 | 710 | ||
| 560 | if (ecobus & FORCEWAKE_MT_ENABLE) { | 711 | if (ecobus & FORCEWAKE_MT_ENABLE) { |
| @@ -601,10 +752,18 @@ void intel_uncore_init(struct drm_device *dev) | |||
| 601 | dev_priv->uncore.funcs.mmio_writel = gen6_write32; | 752 | dev_priv->uncore.funcs.mmio_writel = gen6_write32; |
| 602 | dev_priv->uncore.funcs.mmio_writeq = gen6_write64; | 753 | dev_priv->uncore.funcs.mmio_writeq = gen6_write64; |
| 603 | } | 754 | } |
| 604 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; | 755 | |
| 605 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; | 756 | if (IS_VALLEYVIEW(dev)) { |
| 606 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; | 757 | dev_priv->uncore.funcs.mmio_readb = vlv_read8; |
| 607 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; | 758 | dev_priv->uncore.funcs.mmio_readw = vlv_read16; |
| 759 | dev_priv->uncore.funcs.mmio_readl = vlv_read32; | ||
| 760 | dev_priv->uncore.funcs.mmio_readq = vlv_read64; | ||
| 761 | } else { | ||
| 762 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; | ||
| 763 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; | ||
| 764 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; | ||
| 765 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; | ||
| 766 | } | ||
| 608 | break; | 767 | break; |
| 609 | case 5: | 768 | case 5: |
| 610 | dev_priv->uncore.funcs.mmio_writeb = gen5_write8; | 769 | dev_priv->uncore.funcs.mmio_writeb = gen5_write8; |
| @@ -646,7 +805,7 @@ static const struct register_whitelist { | |||
| 646 | uint32_t size; | 805 | uint32_t size; |
| 647 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ | 806 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ |
| 648 | } whitelist[] = { | 807 | } whitelist[] = { |
| 649 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, | 808 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 }, |
| 650 | }; | 809 | }; |
| 651 | 810 | ||
| 652 | int i915_reg_read_ioctl(struct drm_device *dev, | 811 | int i915_reg_read_ioctl(struct drm_device *dev, |
| @@ -687,6 +846,43 @@ int i915_reg_read_ioctl(struct drm_device *dev, | |||
| 687 | return 0; | 846 | return 0; |
| 688 | } | 847 | } |
| 689 | 848 | ||
| 849 | int i915_get_reset_stats_ioctl(struct drm_device *dev, | ||
| 850 | void *data, struct drm_file *file) | ||
| 851 | { | ||
| 852 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 853 | struct drm_i915_reset_stats *args = data; | ||
| 854 | struct i915_ctx_hang_stats *hs; | ||
| 855 | int ret; | ||
| 856 | |||
| 857 | if (args->flags || args->pad) | ||
| 858 | return -EINVAL; | ||
| 859 | |||
| 860 | if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) | ||
| 861 | return -EPERM; | ||
| 862 | |||
| 863 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
| 864 | if (ret) | ||
| 865 | return ret; | ||
| 866 | |||
| 867 | hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); | ||
| 868 | if (IS_ERR(hs)) { | ||
| 869 | mutex_unlock(&dev->struct_mutex); | ||
| 870 | return PTR_ERR(hs); | ||
| 871 | } | ||
| 872 | |||
| 873 | if (capable(CAP_SYS_ADMIN)) | ||
| 874 | args->reset_count = i915_reset_count(&dev_priv->gpu_error); | ||
| 875 | else | ||
| 876 | args->reset_count = 0; | ||
| 877 | |||
| 878 | args->batch_active = hs->batch_active; | ||
| 879 | args->batch_pending = hs->batch_pending; | ||
| 880 | |||
| 881 | mutex_unlock(&dev->struct_mutex); | ||
| 882 | |||
| 883 | return 0; | ||
| 884 | } | ||
| 885 | |||
| 690 | static int i965_reset_complete(struct drm_device *dev) | 886 | static int i965_reset_complete(struct drm_device *dev) |
| 691 | { | 887 | { |
| 692 | u8 gdrst; | 888 | u8 gdrst; |
| @@ -770,12 +966,12 @@ static int gen6_do_reset(struct drm_device *dev) | |||
| 770 | 966 | ||
| 771 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ | 967 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ |
| 772 | if (dev_priv->uncore.forcewake_count) | 968 | if (dev_priv->uncore.forcewake_count) |
| 773 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | 969 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 774 | else | 970 | else |
| 775 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | 971 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
| 776 | 972 | ||
| 777 | /* Restore fifo count */ | 973 | /* Restore fifo count */ |
| 778 | dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | 974 | dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; |
| 779 | 975 | ||
| 780 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 976 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
| 781 | return ret; | 977 | return ret; |
| @@ -793,15 +989,6 @@ int intel_gpu_reset(struct drm_device *dev) | |||
| 793 | } | 989 | } |
| 794 | } | 990 | } |
| 795 | 991 | ||
| 796 | void intel_uncore_clear_errors(struct drm_device *dev) | ||
| 797 | { | ||
| 798 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 799 | |||
| 800 | /* XXX needs spinlock around caller's grouping */ | ||
| 801 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | ||
| 802 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
| 803 | } | ||
| 804 | |||
| 805 | void intel_uncore_check_errors(struct drm_device *dev) | 992 | void intel_uncore_check_errors(struct drm_device *dev) |
| 806 | { | 993 | { |
| 807 | struct drm_i915_private *dev_priv = dev->dev_private; | 994 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 087db33f6cff..c3bf059ba720 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c | |||
| @@ -1075,10 +1075,10 @@ static int mga_dma_get_buffers(struct drm_device *dev, | |||
| 1075 | 1075 | ||
| 1076 | buf->file_priv = file_priv; | 1076 | buf->file_priv = file_priv; |
| 1077 | 1077 | ||
| 1078 | if (DRM_COPY_TO_USER(&d->request_indices[i], | 1078 | if (copy_to_user(&d->request_indices[i], |
| 1079 | &buf->idx, sizeof(buf->idx))) | 1079 | &buf->idx, sizeof(buf->idx))) |
| 1080 | return -EFAULT; | 1080 | return -EFAULT; |
| 1081 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | 1081 | if (copy_to_user(&d->request_sizes[i], |
| 1082 | &buf->total, sizeof(buf->total))) | 1082 | &buf->total, sizeof(buf->total))) |
| 1083 | return -EFAULT; | 1083 | return -EFAULT; |
| 1084 | 1084 | ||
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index ca4bc54ea214..fe453213600a 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h | |||
| @@ -186,14 +186,14 @@ extern void mga_disable_vblank(struct drm_device *dev, int crtc); | |||
| 186 | extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); | 186 | extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); |
| 187 | extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence); | 187 | extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence); |
| 188 | extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); | 188 | extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); |
| 189 | extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); | 189 | extern irqreturn_t mga_driver_irq_handler(int irq, void *arg); |
| 190 | extern void mga_driver_irq_preinstall(struct drm_device *dev); | 190 | extern void mga_driver_irq_preinstall(struct drm_device *dev); |
| 191 | extern int mga_driver_irq_postinstall(struct drm_device *dev); | 191 | extern int mga_driver_irq_postinstall(struct drm_device *dev); |
| 192 | extern void mga_driver_irq_uninstall(struct drm_device *dev); | 192 | extern void mga_driver_irq_uninstall(struct drm_device *dev); |
| 193 | extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, | 193 | extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, |
| 194 | unsigned long arg); | 194 | unsigned long arg); |
| 195 | 195 | ||
| 196 | #define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() | 196 | #define mga_flush_write_combine() wmb() |
| 197 | 197 | ||
| 198 | #define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) | 198 | #define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) |
| 199 | #define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) | 199 | #define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) |
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 709e90db8c40..86b4bb804852 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #include <drm/drmP.h> | 35 | #include <drm/drmP.h> |
| 36 | #include <drm/mga_drm.h> | 36 | #include <drm/mga_drm.h> |
| 37 | #include "mga_drv.h" | ||
| 37 | 38 | ||
| 38 | typedef struct drm32_mga_init { | 39 | typedef struct drm32_mga_init { |
| 39 | int func; | 40 | int func; |
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index 2b0ceb8dc11b..1b071b8ff9dc 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c | |||
| @@ -47,7 +47,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | 49 | ||
| 50 | irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) | 50 | irqreturn_t mga_driver_irq_handler(int irq, void *arg) |
| 51 | { | 51 | { |
| 52 | struct drm_device *dev = (struct drm_device *) arg; | 52 | struct drm_device *dev = (struct drm_device *) arg; |
| 53 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | 53 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
| @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 79 | MGA_WRITE(MGA_PRIMEND, prim_end); | 79 | MGA_WRITE(MGA_PRIMEND, prim_end); |
| 80 | 80 | ||
| 81 | atomic_inc(&dev_priv->last_fence_retired); | 81 | atomic_inc(&dev_priv->last_fence_retired); |
| 82 | DRM_WAKEUP(&dev_priv->fence_queue); | 82 | wake_up(&dev_priv->fence_queue); |
| 83 | handled = 1; | 83 | handled = 1; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| @@ -128,7 +128,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) | |||
| 128 | * by about a day rather than she wants to wait for years | 128 | * by about a day rather than she wants to wait for years |
| 129 | * using fences. | 129 | * using fences. |
| 130 | */ | 130 | */ |
| 131 | DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, | 131 | DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ, |
| 132 | (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) | 132 | (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) |
| 133 | - *sequence) <= (1 << 23))); | 133 | - *sequence) <= (1 << 23))); |
| 134 | 134 | ||
| @@ -151,7 +151,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev) | |||
| 151 | { | 151 | { |
| 152 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | 152 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
| 153 | 153 | ||
| 154 | DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); | 154 | init_waitqueue_head(&dev_priv->fence_queue); |
| 155 | 155 | ||
| 156 | /* Turn on soft trap interrupt. Vertical blank interrupts are enabled | 156 | /* Turn on soft trap interrupt. Vertical blank interrupts are enabled |
| 157 | * in mga_enable_vblank. | 157 | * in mga_enable_vblank. |
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 37cc2fb4eadd..314685b7f41f 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c | |||
| @@ -1029,7 +1029,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil | |||
| 1029 | return -EINVAL; | 1029 | return -EINVAL; |
| 1030 | } | 1030 | } |
| 1031 | 1031 | ||
| 1032 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | 1032 | if (copy_to_user(param->value, &value, sizeof(int))) { |
| 1033 | DRM_ERROR("copy_to_user\n"); | 1033 | DRM_ERROR("copy_to_user\n"); |
| 1034 | return -EFAULT; | 1034 | return -EFAULT; |
| 1035 | } | 1035 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 801731aeab61..9f9780b7ddf0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c | |||
| @@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev) | |||
| 22 | { | 22 | { |
| 23 | WREG8(MGA_CURPOSXL, 0); | 23 | WREG8(MGA_CURPOSXL, 0); |
| 24 | WREG8(MGA_CURPOSXH, 0); | 24 | WREG8(MGA_CURPOSXH, 0); |
| 25 | mgag200_bo_unpin(mdev->cursor.pixels_1); | 25 | if (mdev->cursor.pixels_1->pin_count) |
| 26 | mgag200_bo_unpin(mdev->cursor.pixels_2); | 26 | mgag200_bo_unpin(mdev->cursor.pixels_1); |
| 27 | if (mdev->cursor.pixels_2->pin_count) | ||
| 28 | mgag200_bo_unpin(mdev->cursor.pixels_2); | ||
| 27 | } | 29 | } |
| 28 | 30 | ||
| 29 | int mga_crtc_cursor_set(struct drm_crtc *crtc, | 31 | int mga_crtc_cursor_set(struct drm_crtc *crtc, |
| @@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 32 | uint32_t width, | 34 | uint32_t width, |
| 33 | uint32_t height) | 35 | uint32_t height) |
| 34 | { | 36 | { |
| 35 | struct drm_device *dev = (struct drm_device *)file_priv->minor->dev; | 37 | struct drm_device *dev = crtc->dev; |
| 36 | struct mga_device *mdev = (struct mga_device *)dev->dev_private; | 38 | struct mga_device *mdev = (struct mga_device *)dev->dev_private; |
| 37 | struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1; | 39 | struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1; |
| 38 | struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2; | 40 | struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 964f58cee5ea..f9adc27ef32a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c | |||
| @@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, | |||
| 41 | * then the BO is being moved and we should | 41 | * then the BO is being moved and we should |
| 42 | * store up the damage until later. | 42 | * store up the damage until later. |
| 43 | */ | 43 | */ |
| 44 | if (!in_interrupt()) | 44 | if (!drm_can_sleep()) |
| 45 | ret = mgag200_bo_reserve(bo, true); | 45 | ret = mgag200_bo_reserve(bo, true); |
| 46 | if (ret) { | 46 | if (ret) { |
| 47 | if (ret != -EBUSY) | 47 | if (ret != -EBUSY) |
| @@ -282,6 +282,11 @@ int mgag200_fbdev_init(struct mga_device *mdev) | |||
| 282 | { | 282 | { |
| 283 | struct mga_fbdev *mfbdev; | 283 | struct mga_fbdev *mfbdev; |
| 284 | int ret; | 284 | int ret; |
| 285 | int bpp_sel = 32; | ||
| 286 | |||
| 287 | /* prefer 16bpp on low end gpus with limited VRAM */ | ||
| 288 | if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024)) | ||
| 289 | bpp_sel = 16; | ||
| 285 | 290 | ||
| 286 | mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL); | 291 | mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL); |
| 287 | if (!mfbdev) | 292 | if (!mfbdev) |
| @@ -301,7 +306,7 @@ int mgag200_fbdev_init(struct mga_device *mdev) | |||
| 301 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 306 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
| 302 | drm_helper_disable_unused_functions(mdev->dev); | 307 | drm_helper_disable_unused_functions(mdev->dev); |
| 303 | 308 | ||
| 304 | drm_fb_helper_initial_config(&mfbdev->helper, 32); | 309 | drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); |
| 305 | 310 | ||
| 306 | return 0; | 311 | return 0; |
| 307 | } | 312 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index b1120cb1db6d..26868e5c55b0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c | |||
| @@ -217,7 +217,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 217 | 217 | ||
| 218 | drm_mode_config_init(dev); | 218 | drm_mode_config_init(dev); |
| 219 | dev->mode_config.funcs = (void *)&mga_mode_funcs; | 219 | dev->mode_config.funcs = (void *)&mga_mode_funcs; |
| 220 | dev->mode_config.preferred_depth = 24; | 220 | if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024)) |
| 221 | dev->mode_config.preferred_depth = 16; | ||
| 222 | else | ||
| 223 | dev->mode_config.preferred_depth = 24; | ||
| 221 | dev->mode_config.prefer_shadow = 1; | 224 | dev->mode_config.prefer_shadow = 1; |
| 222 | 225 | ||
| 223 | r = mgag200_modeset_init(mdev); | 226 | r = mgag200_modeset_init(mdev); |
| @@ -310,7 +313,7 @@ int mgag200_dumb_create(struct drm_file *file, | |||
| 310 | return 0; | 313 | return 0; |
| 311 | } | 314 | } |
| 312 | 315 | ||
| 313 | void mgag200_bo_unref(struct mgag200_bo **bo) | 316 | static void mgag200_bo_unref(struct mgag200_bo **bo) |
| 314 | { | 317 | { |
| 315 | struct ttm_buffer_object *tbo; | 318 | struct ttm_buffer_object *tbo; |
| 316 | 319 | ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index ee6ed633b7b1..b8583f275e80 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -691,7 +691,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc) | |||
| 691 | CRTCEXT0 has to be programmed last to trigger an update and make the | 691 | CRTCEXT0 has to be programmed last to trigger an update and make the |
| 692 | new addr variable take effect. | 692 | new addr variable take effect. |
| 693 | */ | 693 | */ |
| 694 | void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) | 694 | static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) |
| 695 | { | 695 | { |
| 696 | struct mga_device *mdev = crtc->dev->dev_private; | 696 | struct mga_device *mdev = crtc->dev->dev_private; |
| 697 | u32 addr; | 697 | u32 addr; |
| @@ -1398,7 +1398,7 @@ static void mga_encoder_commit(struct drm_encoder *encoder) | |||
| 1398 | { | 1398 | { |
| 1399 | } | 1399 | } |
| 1400 | 1400 | ||
| 1401 | void mga_encoder_destroy(struct drm_encoder *encoder) | 1401 | static void mga_encoder_destroy(struct drm_encoder *encoder) |
| 1402 | { | 1402 | { |
| 1403 | struct mga_encoder *mga_encoder = to_mga_encoder(encoder); | 1403 | struct mga_encoder *mga_encoder = to_mga_encoder(encoder); |
| 1404 | drm_encoder_cleanup(encoder); | 1404 | drm_encoder_cleanup(encoder); |
| @@ -1558,7 +1558,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector, | |||
| 1558 | return MODE_OK; | 1558 | return MODE_OK; |
| 1559 | } | 1559 | } |
| 1560 | 1560 | ||
| 1561 | struct drm_encoder *mga_connector_best_encoder(struct drm_connector | 1561 | static struct drm_encoder *mga_connector_best_encoder(struct drm_connector |
| 1562 | *connector) | 1562 | *connector) |
| 1563 | { | 1563 | { |
| 1564 | int enc_id = connector->encoder_ids[0]; | 1564 | int enc_id = connector->encoder_ids[0]; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 07b192fe15c6..adb5166a5dfd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
| @@ -80,7 +80,7 @@ static int mgag200_ttm_global_init(struct mga_device *ast) | |||
| 80 | return 0; | 80 | return 0; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | void | 83 | static void |
| 84 | mgag200_ttm_global_release(struct mga_device *ast) | 84 | mgag200_ttm_global_release(struct mga_device *ast) |
| 85 | { | 85 | { |
| 86 | if (ast->ttm.mem_global_ref.release == NULL) | 86 | if (ast->ttm.mem_global_ref.release == NULL) |
| @@ -102,7 +102,7 @@ static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo) | |||
| 102 | kfree(bo); | 102 | kfree(bo); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo) | 105 | static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo) |
| 106 | { | 106 | { |
| 107 | if (bo->destroy == &mgag200_bo_ttm_destroy) | 107 | if (bo->destroy == &mgag200_bo_ttm_destroy) |
| 108 | return true; | 108 | return true; |
| @@ -208,7 +208,7 @@ static struct ttm_backend_func mgag200_tt_backend_func = { | |||
| 208 | }; | 208 | }; |
| 209 | 209 | ||
| 210 | 210 | ||
| 211 | struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, | 211 | static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, |
| 212 | unsigned long size, uint32_t page_flags, | 212 | unsigned long size, uint32_t page_flags, |
| 213 | struct page *dummy_read_page) | 213 | struct page *dummy_read_page) |
| 214 | { | 214 | { |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index d3de8e1ae915..c69d1e07a3a6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
| @@ -2,9 +2,8 @@ | |||
| 2 | config DRM_MSM | 2 | config DRM_MSM |
| 3 | tristate "MSM DRM" | 3 | tristate "MSM DRM" |
| 4 | depends on DRM | 4 | depends on DRM |
| 5 | depends on ARCH_MSM | ||
| 6 | depends on ARCH_MSM8960 | ||
| 7 | depends on MSM_IOMMU | 5 | depends on MSM_IOMMU |
| 6 | depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST) | ||
| 8 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
| 9 | select SHMEM | 8 | select SHMEM |
| 10 | select TMPFS | 9 | select TMPFS |
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index e5fa12b0d21e..4f977a593bea 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile | |||
| @@ -12,18 +12,27 @@ msm-y := \ | |||
| 12 | hdmi/hdmi_i2c.o \ | 12 | hdmi/hdmi_i2c.o \ |
| 13 | hdmi/hdmi_phy_8960.o \ | 13 | hdmi/hdmi_phy_8960.o \ |
| 14 | hdmi/hdmi_phy_8x60.o \ | 14 | hdmi/hdmi_phy_8x60.o \ |
| 15 | mdp4/mdp4_crtc.o \ | 15 | hdmi/hdmi_phy_8x74.o \ |
| 16 | mdp4/mdp4_dtv_encoder.o \ | 16 | mdp/mdp_format.o \ |
| 17 | mdp4/mdp4_format.o \ | 17 | mdp/mdp_kms.o \ |
| 18 | mdp4/mdp4_irq.o \ | 18 | mdp/mdp4/mdp4_crtc.o \ |
| 19 | mdp4/mdp4_kms.o \ | 19 | mdp/mdp4/mdp4_dtv_encoder.o \ |
| 20 | mdp4/mdp4_plane.o \ | 20 | mdp/mdp4/mdp4_irq.o \ |
| 21 | mdp/mdp4/mdp4_kms.o \ | ||
| 22 | mdp/mdp4/mdp4_plane.o \ | ||
| 23 | mdp/mdp5/mdp5_crtc.o \ | ||
| 24 | mdp/mdp5/mdp5_encoder.o \ | ||
| 25 | mdp/mdp5/mdp5_irq.o \ | ||
| 26 | mdp/mdp5/mdp5_kms.o \ | ||
| 27 | mdp/mdp5/mdp5_plane.o \ | ||
| 28 | mdp/mdp5/mdp5_smp.o \ | ||
| 21 | msm_drv.o \ | 29 | msm_drv.o \ |
| 22 | msm_fb.o \ | 30 | msm_fb.o \ |
| 23 | msm_gem.o \ | 31 | msm_gem.o \ |
| 24 | msm_gem_prime.o \ | 32 | msm_gem_prime.o \ |
| 25 | msm_gem_submit.o \ | 33 | msm_gem_submit.o \ |
| 26 | msm_gpu.o \ | 34 | msm_gpu.o \ |
| 35 | msm_iommu.o \ | ||
| 27 | msm_ringbuffer.o | 36 | msm_ringbuffer.o |
| 28 | 37 | ||
| 29 | msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o | 38 | msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o |
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES index e036f6c1db94..9c4255b98021 100644 --- a/drivers/gpu/drm/msm/NOTES +++ b/drivers/gpu/drm/msm/NOTES | |||
| @@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different | |||
| 4 | display controller blocks at play: | 4 | display controller blocks at play: |
| 5 | + MDP3 - ?? seems to be what is on geeksphone peak device | 5 | + MDP3 - ?? seems to be what is on geeksphone peak device |
| 6 | + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) | 6 | + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) |
| 7 | + MDSS - snapdragon 800 | 7 | + MDP5 - snapdragon 800 |
| 8 | 8 | ||
| 9 | (I don't have a completely clear picture on which display controller | 9 | (I don't have a completely clear picture on which display controller |
| 10 | maps to which part #) | 10 | maps to which part #) |
| @@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors | |||
| 46 | may have their own irqs which they install themselves. For this reason | 46 | may have their own irqs which they install themselves. For this reason |
| 47 | the display controller is the "master" device. | 47 | the display controller is the "master" device. |
| 48 | 48 | ||
| 49 | For MDP5, the mapping is: | ||
| 50 | |||
| 51 | plane -> PIPE{RGBn,VIGn} \ | ||
| 52 | crtc -> LM (layer mixer) |-> MDP "device" | ||
| 53 | encoder -> INTF / | ||
| 54 | connector -> HDMI/DSI/eDP/etc --> other device(s) | ||
| 55 | |||
| 56 | Unlike MDP4, it appears we can get by with a single encoder, rather | ||
| 57 | than needing a different implementation for DTV, DSI, etc. (Ie. the | ||
| 58 | register interface is same, just different bases.) | ||
| 59 | |||
| 60 | Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI, | ||
| 61 | etc) are routed through MDP. | ||
| 62 | |||
| 63 | And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from | ||
| 64 | which blocks need to be allocated to the active pipes based on fetch | ||
| 65 | stride. | ||
| 66 | |||
| 49 | Each connector probably ends up being a separate device, just for the | 67 | Each connector probably ends up being a separate device, just for the |
| 50 | logistics of finding/mapping io region, irq, etc. Idealy we would | 68 | logistics of finding/mapping io region, irq, etc. Idealy we would |
| 51 | have a better way than just stashing the platform device in a global | 69 | have a better way than just stashing the platform device in a global |
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 9588098741b5..85d615e7d62f 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h | |||
| @@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
| 17 | 18 | ||
| 18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
| 19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select { | |||
| 202 | SAMPLE_0123 = 6, | 203 | SAMPLE_0123 = 6, |
| 203 | }; | 204 | }; |
| 204 | 205 | ||
| 206 | enum adreno_mmu_clnt_beh { | ||
| 207 | BEH_NEVR = 0, | ||
| 208 | BEH_TRAN_RNG = 1, | ||
| 209 | BEH_TRAN_FLT = 2, | ||
| 210 | }; | ||
| 211 | |||
| 205 | enum sq_tex_clamp { | 212 | enum sq_tex_clamp { |
| 206 | SQ_TEX_WRAP = 0, | 213 | SQ_TEX_WRAP = 0, |
| 207 | SQ_TEX_MIRROR = 1, | 214 | SQ_TEX_MIRROR = 1, |
| @@ -238,6 +245,92 @@ enum sq_tex_filter { | |||
| 238 | 245 | ||
| 239 | #define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 | 246 | #define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 |
| 240 | 247 | ||
| 248 | #define REG_A2XX_MH_MMU_CONFIG 0x00000040 | ||
| 249 | #define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 | ||
| 250 | #define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 | ||
| 251 | #define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 | ||
| 252 | #define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 | ||
| 253 | static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 254 | { | ||
| 255 | return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; | ||
| 256 | } | ||
| 257 | #define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 | ||
| 258 | #define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 | ||
| 259 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 260 | { | ||
| 261 | return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; | ||
| 262 | } | ||
| 263 | #define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 | ||
| 264 | #define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 | ||
| 265 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 266 | { | ||
| 267 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; | ||
| 268 | } | ||
| 269 | #define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 | ||
| 270 | #define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 | ||
| 271 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 272 | { | ||
| 273 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; | ||
| 274 | } | ||
| 275 | #define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 | ||
| 276 | #define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 | ||
| 277 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 278 | { | ||
| 279 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; | ||
| 280 | } | ||
| 281 | #define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 | ||
| 282 | #define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 | ||
| 283 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 284 | { | ||
| 285 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; | ||
| 286 | } | ||
| 287 | #define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 | ||
| 288 | #define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 | ||
| 289 | static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 290 | { | ||
| 291 | return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; | ||
| 292 | } | ||
| 293 | #define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 | ||
| 294 | #define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 | ||
| 295 | static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 296 | { | ||
| 297 | return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; | ||
| 298 | } | ||
| 299 | #define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 | ||
| 300 | #define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 | ||
| 301 | static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 302 | { | ||
| 303 | return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; | ||
| 304 | } | ||
| 305 | #define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 | ||
| 306 | #define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 | ||
| 307 | static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 308 | { | ||
| 309 | return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; | ||
| 310 | } | ||
| 311 | #define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 | ||
| 312 | #define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 | ||
| 313 | static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 314 | { | ||
| 315 | return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; | ||
| 316 | } | ||
| 317 | |||
| 318 | #define REG_A2XX_MH_MMU_VA_RANGE 0x00000041 | ||
| 319 | |||
| 320 | #define REG_A2XX_MH_MMU_PT_BASE 0x00000042 | ||
| 321 | |||
| 322 | #define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043 | ||
| 323 | |||
| 324 | #define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044 | ||
| 325 | |||
| 326 | #define REG_A2XX_MH_MMU_INVALIDATE 0x00000045 | ||
| 327 | |||
| 328 | #define REG_A2XX_MH_MMU_MPU_BASE 0x00000046 | ||
| 329 | |||
| 330 | #define REG_A2XX_MH_MMU_MPU_END 0x00000047 | ||
| 331 | |||
| 332 | #define REG_A2XX_NQWAIT_UNTIL 0x00000394 | ||
| 333 | |||
| 241 | #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 | 334 | #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 |
| 242 | 335 | ||
| 243 | #define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 | 336 | #define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 |
| @@ -276,20 +369,6 @@ enum sq_tex_filter { | |||
| 276 | 369 | ||
| 277 | #define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 | 370 | #define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 |
| 278 | 371 | ||
| 279 | #define REG_A2XX_CP_ST_BASE 0x0000044d | ||
| 280 | |||
| 281 | #define REG_A2XX_CP_ST_BUFSZ 0x0000044e | ||
| 282 | |||
| 283 | #define REG_A2XX_CP_IB1_BASE 0x00000458 | ||
| 284 | |||
| 285 | #define REG_A2XX_CP_IB1_BUFSZ 0x00000459 | ||
| 286 | |||
| 287 | #define REG_A2XX_CP_IB2_BASE 0x0000045a | ||
| 288 | |||
| 289 | #define REG_A2XX_CP_IB2_BUFSZ 0x0000045b | ||
| 290 | |||
| 291 | #define REG_A2XX_CP_STAT 0x0000047f | ||
| 292 | |||
| 293 | #define REG_A2XX_RBBM_STATUS 0x000005d0 | 372 | #define REG_A2XX_RBBM_STATUS 0x000005d0 |
| 294 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f | 373 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f |
| 295 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 | 374 | #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 |
| @@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) | |||
| 808 | 887 | ||
| 809 | #define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 | 888 | #define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 |
| 810 | 889 | ||
| 890 | #define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 | ||
| 891 | |||
| 892 | #define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc | ||
| 893 | |||
| 894 | #define REG_A2XX_VGT_IMMED_DATA 0x000021fd | ||
| 895 | |||
| 811 | #define REG_A2XX_RB_DEPTHCONTROL 0x00002200 | 896 | #define REG_A2XX_RB_DEPTHCONTROL 0x00002200 |
| 812 | #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 | 897 | #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 |
| 813 | #define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 | 898 | #define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index d4afdf657559..a7be56163d23 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h | |||
| @@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
| 17 | 18 | ||
| 18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
| 19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -292,6 +293,8 @@ enum a3xx_tex_type { | |||
| 292 | #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 | 293 | #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 |
| 293 | #define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 | 294 | #define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 |
| 294 | 295 | ||
| 296 | #define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040 | ||
| 297 | |||
| 295 | #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 | 298 | #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 |
| 296 | 299 | ||
| 297 | #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 | 300 | #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 |
| @@ -304,6 +307,8 @@ enum a3xx_tex_type { | |||
| 304 | 307 | ||
| 305 | #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a | 308 | #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a |
| 306 | 309 | ||
| 310 | #define REG_A3XX_RBBM_INT_SET_CMD 0x00000060 | ||
| 311 | |||
| 307 | #define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 | 312 | #define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 |
| 308 | 313 | ||
| 309 | #define REG_A3XX_RBBM_INT_0_MASK 0x00000063 | 314 | #define REG_A3XX_RBBM_INT_0_MASK 0x00000063 |
| @@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) | |||
| 937 | return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; | 942 | return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; |
| 938 | } | 943 | } |
| 939 | 944 | ||
| 940 | #define REG_A3XX_UNKNOWN_20E8 0x000020e8 | 945 | #define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8 |
| 941 | 946 | ||
| 942 | #define REG_A3XX_UNKNOWN_20E9 0x000020e9 | 947 | #define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9 |
| 943 | 948 | ||
| 944 | #define REG_A3XX_UNKNOWN_20EA 0x000020ea | 949 | #define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea |
| 945 | 950 | ||
| 946 | #define REG_A3XX_UNKNOWN_20EB 0x000020eb | 951 | #define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb |
| 947 | 952 | ||
| 948 | #define REG_A3XX_RB_COPY_CONTROL 0x000020ec | 953 | #define REG_A3XX_RB_COPY_CONTROL 0x000020ec |
| 949 | #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 | 954 | #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 |
| @@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) | |||
| 1026 | #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 | 1031 | #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 |
| 1027 | #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 | 1032 | #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 |
| 1028 | 1033 | ||
| 1029 | #define REG_A3XX_UNKNOWN_2101 0x00002101 | 1034 | #define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 |
| 1030 | 1035 | ||
| 1031 | #define REG_A3XX_RB_DEPTH_INFO 0x00002102 | 1036 | #define REG_A3XX_RB_DEPTH_INFO 0x00002102 |
| 1032 | #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 | 1037 | #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 |
| @@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v | |||
| 1103 | return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; | 1108 | return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; |
| 1104 | } | 1109 | } |
| 1105 | 1110 | ||
| 1106 | #define REG_A3XX_UNKNOWN_2105 0x00002105 | 1111 | #define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 |
| 1107 | 1112 | ||
| 1108 | #define REG_A3XX_UNKNOWN_2106 0x00002106 | 1113 | #define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106 |
| 1109 | 1114 | ||
| 1110 | #define REG_A3XX_UNKNOWN_2107 0x00002107 | 1115 | #define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107 |
| 1111 | 1116 | ||
| 1112 | #define REG_A3XX_RB_STENCILREFMASK 0x00002108 | 1117 | #define REG_A3XX_RB_STENCILREFMASK 0x00002108 |
| 1113 | #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff | 1118 | #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff |
| @@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) | |||
| 1149 | return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; | 1154 | return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; |
| 1150 | } | 1155 | } |
| 1151 | 1156 | ||
| 1152 | #define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e | 1157 | #define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c |
| 1153 | #define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff | 1158 | #define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002 |
| 1154 | #define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 | 1159 | |
| 1155 | static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val) | 1160 | #define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e |
| 1161 | #define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff | ||
| 1162 | #define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0 | ||
| 1163 | static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val) | ||
| 1156 | { | 1164 | { |
| 1157 | return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK; | 1165 | return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK; |
| 1158 | } | 1166 | } |
| 1159 | #define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000 | 1167 | #define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000 |
| 1160 | #define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 | 1168 | #define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16 |
| 1161 | static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val) | 1169 | static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) |
| 1162 | { | 1170 | { |
| 1163 | return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK; | 1171 | return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK; |
| 1164 | } | 1172 | } |
| 1165 | 1173 | ||
| 1174 | #define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 | ||
| 1175 | |||
| 1176 | #define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 | ||
| 1177 | |||
| 1178 | #define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114 | ||
| 1179 | |||
| 1180 | #define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 | ||
| 1181 | |||
| 1166 | #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 | 1182 | #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 |
| 1167 | 1183 | ||
| 1168 | #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea | 1184 | #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea |
| @@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) | |||
| 1309 | 1325 | ||
| 1310 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 | 1326 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 |
| 1311 | 1327 | ||
| 1328 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 | ||
| 1329 | |||
| 1312 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 | 1330 | #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 |
| 1313 | 1331 | ||
| 1314 | #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a | 1332 | #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a |
| @@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0 | |||
| 1491 | 1509 | ||
| 1492 | #define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 | 1510 | #define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 |
| 1493 | #define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 | 1511 | #define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 |
| 1494 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000 | 1512 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000 |
| 1495 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 | 1513 | #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 |
| 1496 | static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) | 1514 | static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) |
| 1497 | { | 1515 | { |
| 1498 | return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; | 1516 | return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; |
| 1499 | } | 1517 | } |
| 1518 | #define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000 | ||
| 1500 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 | 1519 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 |
| 1501 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 | 1520 | #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 |
| 1502 | static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) | 1521 | static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) |
| @@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) | |||
| 1669 | 1688 | ||
| 1670 | #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 | 1689 | #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 |
| 1671 | 1690 | ||
| 1672 | #define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6 | 1691 | #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6 |
| 1673 | 1692 | ||
| 1674 | #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 | 1693 | #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 |
| 1675 | 1694 | ||
| @@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) | |||
| 1772 | 1791 | ||
| 1773 | #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 | 1792 | #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 |
| 1774 | 1793 | ||
| 1775 | #define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4 | 1794 | #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4 |
| 1776 | 1795 | ||
| 1777 | #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 | 1796 | #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 |
| 1778 | 1797 | ||
| @@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00 | |||
| 1943 | 1962 | ||
| 1944 | static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } | 1963 | static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } |
| 1945 | 1964 | ||
| 1965 | #define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c | ||
| 1966 | #define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001 | ||
| 1967 | |||
| 1946 | #define REG_A3XX_UNKNOWN_0C3D 0x00000c3d | 1968 | #define REG_A3XX_UNKNOWN_0C3D 0x00000c3d |
| 1947 | 1969 | ||
| 1948 | #define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 | 1970 | #define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 |
| @@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000 | |||
| 1953 | 1975 | ||
| 1954 | #define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b | 1976 | #define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b |
| 1955 | 1977 | ||
| 1956 | #define REG_A3XX_UNKNOWN_0C81 0x00000c81 | 1978 | #define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81 |
| 1957 | 1979 | ||
| 1958 | #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 | 1980 | #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 |
| 1959 | 1981 | ||
| @@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000 | |||
| 1975 | 1997 | ||
| 1976 | #define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 | 1998 | #define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 |
| 1977 | 1999 | ||
| 2000 | #define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1 | ||
| 2001 | |||
| 1978 | #define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 | 2002 | #define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 |
| 1979 | 2003 | ||
| 1980 | #define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 | 2004 | #define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 |
| 1981 | 2005 | ||
| 1982 | #define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0 | 2006 | #define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 |
| 1983 | #define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff | 2007 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff |
| 1984 | #define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0 | 2008 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 |
| 1985 | static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val) | 2009 | static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) |
| 1986 | { | 2010 | { |
| 1987 | return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK; | 2011 | return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; |
| 1988 | } | 2012 | } |
| 1989 | #define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000 | 2013 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000 |
| 1990 | #define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14 | 2014 | #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14 |
| 1991 | static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val) | 2015 | static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) |
| 1992 | { | 2016 | { |
| 1993 | return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK; | 2017 | return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; |
| 1994 | } | 2018 | } |
| 1995 | 2019 | ||
| 1996 | #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 | 2020 | #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 |
| @@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op | |||
| 2088 | 2112 | ||
| 2089 | #define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 | 2113 | #define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 |
| 2090 | 2114 | ||
| 2115 | #define REG_A3XX_VGT_CL_INITIATOR 0x000021f0 | ||
| 2116 | |||
| 2117 | #define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 | ||
| 2118 | |||
| 2119 | #define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc | ||
| 2120 | |||
| 2121 | #define REG_A3XX_VGT_IMMED_DATA 0x000021fd | ||
| 2122 | |||
| 2091 | #define REG_A3XX_TEX_SAMP_0 0x00000000 | 2123 | #define REG_A3XX_TEX_SAMP_0 0x00000000 |
| 2092 | #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 | 2124 | #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 |
| 2093 | #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c | 2125 | #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c |
| @@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) | |||
| 2123 | #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 | 2155 | #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 |
| 2124 | 2156 | ||
| 2125 | #define REG_A3XX_TEX_SAMP_1 0x00000001 | 2157 | #define REG_A3XX_TEX_SAMP_1 0x00000001 |
| 2158 | #define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 | ||
| 2159 | #define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 | ||
| 2160 | static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) | ||
| 2161 | { | ||
| 2162 | return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; | ||
| 2163 | } | ||
| 2164 | #define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 | ||
| 2165 | #define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 | ||
| 2166 | static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) | ||
| 2167 | { | ||
| 2168 | return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; | ||
| 2169 | } | ||
| 2126 | 2170 | ||
| 2127 | #define REG_A3XX_TEX_CONST_0 0x00000000 | 2171 | #define REG_A3XX_TEX_CONST_0 0x00000000 |
| 2128 | #define A3XX_TEX_CONST_0_TILED 0x00000001 | 2172 | #define A3XX_TEX_CONST_0_TILED 0x00000001 |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 035bd13dc8bd..461df93e825e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c | |||
| @@ -15,6 +15,10 @@ | |||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_MSM_OCMEM | ||
| 19 | # include <mach/ocmem.h> | ||
| 20 | #endif | ||
| 21 | |||
| 18 | #include "a3xx_gpu.h" | 22 | #include "a3xx_gpu.h" |
| 19 | 23 | ||
| 20 | #define A3XX_INT0_MASK \ | 24 | #define A3XX_INT0_MASK \ |
| @@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu) | |||
| 63 | static int a3xx_hw_init(struct msm_gpu *gpu) | 67 | static int a3xx_hw_init(struct msm_gpu *gpu) |
| 64 | { | 68 | { |
| 65 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 69 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 70 | struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); | ||
| 66 | uint32_t *ptr, len; | 71 | uint32_t *ptr, len; |
| 67 | int i, ret; | 72 | int i, ret; |
| 68 | 73 | ||
| @@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 105 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); | 110 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); |
| 106 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | 111 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); |
| 107 | 112 | ||
| 113 | } else if (adreno_is_a330v2(adreno_gpu)) { | ||
| 114 | /* | ||
| 115 | * Most of the VBIF registers on 8974v2 have the correct | ||
| 116 | * values at power on, so we won't modify those if we don't | ||
| 117 | * need to | ||
| 118 | */ | ||
| 119 | /* Enable 1k sort: */ | ||
| 120 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); | ||
| 121 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | ||
| 122 | /* Enable WR-REQ: */ | ||
| 123 | gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); | ||
| 124 | gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); | ||
| 125 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ | ||
| 126 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); | ||
| 127 | |||
| 108 | } else if (adreno_is_a330(adreno_gpu)) { | 128 | } else if (adreno_is_a330(adreno_gpu)) { |
| 109 | /* Set up 16 deep read/write request queues: */ | 129 | /* Set up 16 deep read/write request queues: */ |
| 110 | gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); | 130 | gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); |
| @@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 121 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ | 141 | /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ |
| 122 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); | 142 | gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); |
| 123 | /* Set up AOOO: */ | 143 | /* Set up AOOO: */ |
| 124 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff); | 144 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); |
| 125 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff); | 145 | gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); |
| 126 | /* Enable 1K sort: */ | 146 | /* Enable 1K sort: */ |
| 127 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff); | 147 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); |
| 128 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); | 148 | gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); |
| 129 | /* Disable VBIF clock gating. This is to enable AXI running | 149 | /* Disable VBIF clock gating. This is to enable AXI running |
| 130 | * higher frequency than GPU: | 150 | * higher frequency than GPU: |
| @@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 162 | gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); | 182 | gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); |
| 163 | 183 | ||
| 164 | /* Enable Clock gating: */ | 184 | /* Enable Clock gating: */ |
| 165 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); | 185 | if (adreno_is_a320(adreno_gpu)) |
| 166 | 186 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); | |
| 167 | /* Set the OCMEM base address for A330 */ | 187 | else if (adreno_is_a330v2(adreno_gpu)) |
| 168 | //TODO: | 188 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); |
| 169 | // if (adreno_is_a330(adreno_gpu)) { | 189 | else if (adreno_is_a330(adreno_gpu)) |
| 170 | // gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, | 190 | gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); |
| 171 | // (unsigned int)(a3xx_gpu->ocmem_base >> 14)); | 191 | |
| 172 | // } | 192 | if (adreno_is_a330v2(adreno_gpu)) |
| 193 | gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); | ||
| 194 | else if (adreno_is_a330(adreno_gpu)) | ||
| 195 | gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); | ||
| 196 | |||
| 197 | /* Set the OCMEM base address for A330, etc */ | ||
| 198 | if (a3xx_gpu->ocmem_hdl) { | ||
| 199 | gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, | ||
| 200 | (unsigned int)(a3xx_gpu->ocmem_base >> 14)); | ||
| 201 | } | ||
| 173 | 202 | ||
| 174 | /* Turn on performance counters: */ | 203 | /* Turn on performance counters: */ |
| 175 | gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); | 204 | gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); |
| @@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 219 | /* Load PM4: */ | 248 | /* Load PM4: */ |
| 220 | ptr = (uint32_t *)(adreno_gpu->pm4->data); | 249 | ptr = (uint32_t *)(adreno_gpu->pm4->data); |
| 221 | len = adreno_gpu->pm4->size / 4; | 250 | len = adreno_gpu->pm4->size / 4; |
| 222 | DBG("loading PM4 ucode version: %u", ptr[0]); | 251 | DBG("loading PM4 ucode version: %x", ptr[1]); |
| 223 | 252 | ||
| 224 | gpu_write(gpu, REG_AXXX_CP_DEBUG, | 253 | gpu_write(gpu, REG_AXXX_CP_DEBUG, |
| 225 | AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | | 254 | AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | |
| @@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 231 | /* Load PFP: */ | 260 | /* Load PFP: */ |
| 232 | ptr = (uint32_t *)(adreno_gpu->pfp->data); | 261 | ptr = (uint32_t *)(adreno_gpu->pfp->data); |
| 233 | len = adreno_gpu->pfp->size / 4; | 262 | len = adreno_gpu->pfp->size / 4; |
| 234 | DBG("loading PFP ucode version: %u", ptr[0]); | 263 | DBG("loading PFP ucode version: %x", ptr[5]); |
| 235 | 264 | ||
| 236 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); | 265 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); |
| 237 | for (i = 1; i < len; i++) | 266 | for (i = 1; i < len; i++) |
| 238 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); | 267 | gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); |
| 239 | 268 | ||
| 240 | /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ | 269 | /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ |
| 241 | if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) | 270 | if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) { |
| 242 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, | 271 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, |
| 243 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | | 272 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | |
| 244 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | | 273 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | |
| 245 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); | 274 | AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); |
| 246 | 275 | } else if (adreno_is_a330(adreno_gpu)) { | |
| 276 | /* NOTE: this (value take from downstream android driver) | ||
| 277 | * includes some bits outside of the known bitfields. But | ||
| 278 | * A330 has this "MERCIU queue" thing too, which might | ||
| 279 | * explain a new bitfield or reshuffling: | ||
| 280 | */ | ||
| 281 | gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008); | ||
| 282 | } | ||
| 247 | 283 | ||
| 248 | /* clear ME_HALT to start micro engine */ | 284 | /* clear ME_HALT to start micro engine */ |
| 249 | gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); | 285 | gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); |
| @@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu) | |||
| 253 | return 0; | 289 | return 0; |
| 254 | } | 290 | } |
| 255 | 291 | ||
| 292 | static void a3xx_recover(struct msm_gpu *gpu) | ||
| 293 | { | ||
| 294 | gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); | ||
| 295 | gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); | ||
| 296 | gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); | ||
| 297 | adreno_recover(gpu); | ||
| 298 | } | ||
| 299 | |||
| 256 | static void a3xx_destroy(struct msm_gpu *gpu) | 300 | static void a3xx_destroy(struct msm_gpu *gpu) |
| 257 | { | 301 | { |
| 258 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 302 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| @@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu) | |||
| 261 | DBG("%s", gpu->name); | 305 | DBG("%s", gpu->name); |
| 262 | 306 | ||
| 263 | adreno_gpu_cleanup(adreno_gpu); | 307 | adreno_gpu_cleanup(adreno_gpu); |
| 308 | |||
| 309 | #ifdef CONFIG_MSM_OCMEM | ||
| 310 | if (a3xx_gpu->ocmem_base) | ||
| 311 | ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); | ||
| 312 | #endif | ||
| 313 | |||
| 264 | put_device(&a3xx_gpu->pdev->dev); | 314 | put_device(&a3xx_gpu->pdev->dev); |
| 265 | kfree(a3xx_gpu); | 315 | kfree(a3xx_gpu); |
| 266 | } | 316 | } |
| @@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = { | |||
| 371 | .hw_init = a3xx_hw_init, | 421 | .hw_init = a3xx_hw_init, |
| 372 | .pm_suspend = msm_gpu_pm_suspend, | 422 | .pm_suspend = msm_gpu_pm_suspend, |
| 373 | .pm_resume = msm_gpu_pm_resume, | 423 | .pm_resume = msm_gpu_pm_resume, |
| 374 | .recover = adreno_recover, | 424 | .recover = a3xx_recover, |
| 375 | .last_fence = adreno_last_fence, | 425 | .last_fence = adreno_last_fence, |
| 376 | .submit = adreno_submit, | 426 | .submit = adreno_submit, |
| 377 | .flush = adreno_flush, | 427 | .flush = adreno_flush, |
| @@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = { | |||
| 387 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | 437 | struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) |
| 388 | { | 438 | { |
| 389 | struct a3xx_gpu *a3xx_gpu = NULL; | 439 | struct a3xx_gpu *a3xx_gpu = NULL; |
| 440 | struct adreno_gpu *adreno_gpu; | ||
| 390 | struct msm_gpu *gpu; | 441 | struct msm_gpu *gpu; |
| 391 | struct platform_device *pdev = a3xx_pdev; | 442 | struct platform_device *pdev = a3xx_pdev; |
| 392 | struct adreno_platform_config *config; | 443 | struct adreno_platform_config *config; |
| @@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | |||
| 406 | goto fail; | 457 | goto fail; |
| 407 | } | 458 | } |
| 408 | 459 | ||
| 409 | gpu = &a3xx_gpu->base.base; | 460 | adreno_gpu = &a3xx_gpu->base; |
| 461 | gpu = &adreno_gpu->base; | ||
| 410 | 462 | ||
| 411 | get_device(&pdev->dev); | 463 | get_device(&pdev->dev); |
| 412 | a3xx_gpu->pdev = pdev; | 464 | a3xx_gpu->pdev = pdev; |
| @@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) | |||
| 414 | gpu->fast_rate = config->fast_rate; | 466 | gpu->fast_rate = config->fast_rate; |
| 415 | gpu->slow_rate = config->slow_rate; | 467 | gpu->slow_rate = config->slow_rate; |
| 416 | gpu->bus_freq = config->bus_freq; | 468 | gpu->bus_freq = config->bus_freq; |
| 469 | #ifdef CONFIG_MSM_BUS_SCALING | ||
| 470 | gpu->bus_scale_table = config->bus_scale_table; | ||
| 471 | #endif | ||
| 417 | 472 | ||
| 418 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", | 473 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", |
| 419 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); | 474 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); |
| 420 | 475 | ||
| 421 | ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base, | 476 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev); |
| 422 | &funcs, config->rev); | ||
| 423 | if (ret) | 477 | if (ret) |
| 424 | goto fail; | 478 | goto fail; |
| 425 | 479 | ||
| 426 | return &a3xx_gpu->base.base; | 480 | /* if needed, allocate gmem: */ |
| 481 | if (adreno_is_a330(adreno_gpu)) { | ||
| 482 | #ifdef CONFIG_MSM_OCMEM | ||
| 483 | /* TODO this is different/missing upstream: */ | ||
| 484 | struct ocmem_buf *ocmem_hdl = | ||
| 485 | ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); | ||
| 486 | |||
| 487 | a3xx_gpu->ocmem_hdl = ocmem_hdl; | ||
| 488 | a3xx_gpu->ocmem_base = ocmem_hdl->addr; | ||
| 489 | adreno_gpu->gmem = ocmem_hdl->len; | ||
| 490 | DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, | ||
| 491 | a3xx_gpu->ocmem_base); | ||
| 492 | #endif | ||
| 493 | } | ||
| 494 | |||
| 495 | if (!gpu->mmu) { | ||
| 496 | /* TODO we think it is possible to configure the GPU to | ||
| 497 | * restrict access to VRAM carveout. But the required | ||
| 498 | * registers are unknown. For now just bail out and | ||
| 499 | * limp along with just modesetting. If it turns out | ||
| 500 | * to not be possible to restrict access, then we must | ||
| 501 | * implement a cmdstream validator. | ||
| 502 | */ | ||
| 503 | dev_err(dev->dev, "No memory protection without IOMMU\n"); | ||
| 504 | ret = -ENXIO; | ||
| 505 | goto fail; | ||
| 506 | } | ||
| 507 | |||
| 508 | return gpu; | ||
| 427 | 509 | ||
| 428 | fail: | 510 | fail: |
| 429 | if (a3xx_gpu) | 511 | if (a3xx_gpu) |
| @@ -436,19 +518,59 @@ fail: | |||
| 436 | * The a3xx device: | 518 | * The a3xx device: |
| 437 | */ | 519 | */ |
| 438 | 520 | ||
| 521 | #if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) | ||
| 522 | # include <mach/kgsl.h> | ||
| 523 | #endif | ||
| 524 | |||
| 439 | static int a3xx_probe(struct platform_device *pdev) | 525 | static int a3xx_probe(struct platform_device *pdev) |
| 440 | { | 526 | { |
| 441 | static struct adreno_platform_config config = {}; | 527 | static struct adreno_platform_config config = {}; |
| 442 | #ifdef CONFIG_OF | 528 | #ifdef CONFIG_OF |
| 443 | /* TODO */ | 529 | struct device_node *child, *node = pdev->dev.of_node; |
| 530 | u32 val; | ||
| 531 | int ret; | ||
| 532 | |||
| 533 | ret = of_property_read_u32(node, "qcom,chipid", &val); | ||
| 534 | if (ret) { | ||
| 535 | dev_err(&pdev->dev, "could not find chipid: %d\n", ret); | ||
| 536 | return ret; | ||
| 537 | } | ||
| 538 | |||
| 539 | config.rev = ADRENO_REV((val >> 24) & 0xff, | ||
| 540 | (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff); | ||
| 541 | |||
| 542 | /* find clock rates: */ | ||
| 543 | config.fast_rate = 0; | ||
| 544 | config.slow_rate = ~0; | ||
| 545 | for_each_child_of_node(node, child) { | ||
| 546 | if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) { | ||
| 547 | struct device_node *pwrlvl; | ||
| 548 | for_each_child_of_node(child, pwrlvl) { | ||
| 549 | ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val); | ||
| 550 | if (ret) { | ||
| 551 | dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret); | ||
| 552 | return ret; | ||
| 553 | } | ||
| 554 | config.fast_rate = max(config.fast_rate, val); | ||
| 555 | config.slow_rate = min(config.slow_rate, val); | ||
| 556 | } | ||
| 557 | } | ||
| 558 | } | ||
| 559 | |||
| 560 | if (!config.fast_rate) { | ||
| 561 | dev_err(&pdev->dev, "could not find clk rates\n"); | ||
| 562 | return -ENXIO; | ||
| 563 | } | ||
| 564 | |||
| 444 | #else | 565 | #else |
| 566 | struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; | ||
| 445 | uint32_t version = socinfo_get_version(); | 567 | uint32_t version = socinfo_get_version(); |
| 446 | if (cpu_is_apq8064ab()) { | 568 | if (cpu_is_apq8064ab()) { |
| 447 | config.fast_rate = 450000000; | 569 | config.fast_rate = 450000000; |
| 448 | config.slow_rate = 27000000; | 570 | config.slow_rate = 27000000; |
| 449 | config.bus_freq = 4; | 571 | config.bus_freq = 4; |
| 450 | config.rev = ADRENO_REV(3, 2, 1, 0); | 572 | config.rev = ADRENO_REV(3, 2, 1, 0); |
| 451 | } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) { | 573 | } else if (cpu_is_apq8064()) { |
| 452 | config.fast_rate = 400000000; | 574 | config.fast_rate = 400000000; |
| 453 | config.slow_rate = 27000000; | 575 | config.slow_rate = 27000000; |
| 454 | config.bus_freq = 4; | 576 | config.bus_freq = 4; |
| @@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev) | |||
| 461 | else | 583 | else |
| 462 | config.rev = ADRENO_REV(3, 2, 0, 0); | 584 | config.rev = ADRENO_REV(3, 2, 0, 0); |
| 463 | 585 | ||
| 586 | } else if (cpu_is_msm8960ab()) { | ||
| 587 | config.fast_rate = 400000000; | ||
| 588 | config.slow_rate = 320000000; | ||
| 589 | config.bus_freq = 4; | ||
| 590 | |||
| 591 | if (SOCINFO_VERSION_MINOR(version) == 0) | ||
| 592 | config.rev = ADRENO_REV(3, 2, 1, 0); | ||
| 593 | else | ||
| 594 | config.rev = ADRENO_REV(3, 2, 1, 1); | ||
| 595 | |||
| 464 | } else if (cpu_is_msm8930()) { | 596 | } else if (cpu_is_msm8930()) { |
| 465 | config.fast_rate = 400000000; | 597 | config.fast_rate = 400000000; |
| 466 | config.slow_rate = 27000000; | 598 | config.slow_rate = 27000000; |
| @@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev) | |||
| 473 | config.rev = ADRENO_REV(3, 0, 5, 0); | 605 | config.rev = ADRENO_REV(3, 0, 5, 0); |
| 474 | 606 | ||
| 475 | } | 607 | } |
| 608 | # ifdef CONFIG_MSM_BUS_SCALING | ||
| 609 | config.bus_scale_table = pdata->bus_scale_table; | ||
| 610 | # endif | ||
| 476 | #endif | 611 | #endif |
| 477 | pdev->dev.platform_data = &config; | 612 | pdev->dev.platform_data = &config; |
| 478 | a3xx_pdev = pdev; | 613 | a3xx_pdev = pdev; |
| @@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev) | |||
| 485 | return 0; | 620 | return 0; |
| 486 | } | 621 | } |
| 487 | 622 | ||
| 623 | static const struct of_device_id dt_match[] = { | ||
| 624 | { .compatible = "qcom,kgsl-3d0" }, | ||
| 625 | {} | ||
| 626 | }; | ||
| 627 | MODULE_DEVICE_TABLE(of, dt_match); | ||
| 628 | |||
| 488 | static struct platform_driver a3xx_driver = { | 629 | static struct platform_driver a3xx_driver = { |
| 489 | .probe = a3xx_probe, | 630 | .probe = a3xx_probe, |
| 490 | .remove = a3xx_remove, | 631 | .remove = a3xx_remove, |
| 491 | .driver.name = "kgsl-3d0", | 632 | .driver = { |
| 633 | .name = "kgsl-3d0", | ||
| 634 | .of_match_table = dt_match, | ||
| 635 | }, | ||
| 492 | }; | 636 | }; |
| 493 | 637 | ||
| 494 | void __init a3xx_register(void) | 638 | void __init a3xx_register(void) |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 32c398c2d00a..bb9a8ca0507b 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h | |||
| @@ -24,6 +24,10 @@ | |||
| 24 | struct a3xx_gpu { | 24 | struct a3xx_gpu { |
| 25 | struct adreno_gpu base; | 25 | struct adreno_gpu base; |
| 26 | struct platform_device *pdev; | 26 | struct platform_device *pdev; |
| 27 | |||
| 28 | /* if OCMEM is used for GMEM: */ | ||
| 29 | uint32_t ocmem_base; | ||
| 30 | void *ocmem_hdl; | ||
| 27 | }; | 31 | }; |
| 28 | #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) | 32 | #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) |
| 29 | 33 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index 33dcc606c7c5..d6e6ce2d1abd 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h | |||
| @@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
| 17 | 18 | ||
| 18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
| 19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -115,96 +116,6 @@ enum adreno_rb_depth_format { | |||
| 115 | DEPTHX_24_8 = 1, | 116 | DEPTHX_24_8 = 1, |
| 116 | }; | 117 | }; |
| 117 | 118 | ||
| 118 | enum adreno_mmu_clnt_beh { | ||
| 119 | BEH_NEVR = 0, | ||
| 120 | BEH_TRAN_RNG = 1, | ||
| 121 | BEH_TRAN_FLT = 2, | ||
| 122 | }; | ||
| 123 | |||
| 124 | #define REG_AXXX_MH_MMU_CONFIG 0x00000040 | ||
| 125 | #define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 | ||
| 126 | #define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 | ||
| 127 | #define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 | ||
| 128 | #define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 | ||
| 129 | static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 130 | { | ||
| 131 | return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; | ||
| 132 | } | ||
| 133 | #define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 | ||
| 134 | #define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 | ||
| 135 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 136 | { | ||
| 137 | return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; | ||
| 138 | } | ||
| 139 | #define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 | ||
| 140 | #define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 | ||
| 141 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 142 | { | ||
| 143 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; | ||
| 144 | } | ||
| 145 | #define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 | ||
| 146 | #define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 | ||
| 147 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 148 | { | ||
| 149 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; | ||
| 150 | } | ||
| 151 | #define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 | ||
| 152 | #define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 | ||
| 153 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 154 | { | ||
| 155 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; | ||
| 156 | } | ||
| 157 | #define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 | ||
| 158 | #define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 | ||
| 159 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 160 | { | ||
| 161 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; | ||
| 162 | } | ||
| 163 | #define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 | ||
| 164 | #define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 | ||
| 165 | static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 166 | { | ||
| 167 | return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; | ||
| 168 | } | ||
| 169 | #define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 | ||
| 170 | #define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 | ||
| 171 | static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 172 | { | ||
| 173 | return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; | ||
| 174 | } | ||
| 175 | #define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 | ||
| 176 | #define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 | ||
| 177 | static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 178 | { | ||
| 179 | return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; | ||
| 180 | } | ||
| 181 | #define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 | ||
| 182 | #define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 | ||
| 183 | static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 184 | { | ||
| 185 | return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; | ||
| 186 | } | ||
| 187 | #define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 | ||
| 188 | #define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 | ||
| 189 | static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) | ||
| 190 | { | ||
| 191 | return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; | ||
| 192 | } | ||
| 193 | |||
| 194 | #define REG_AXXX_MH_MMU_VA_RANGE 0x00000041 | ||
| 195 | |||
| 196 | #define REG_AXXX_MH_MMU_PT_BASE 0x00000042 | ||
| 197 | |||
| 198 | #define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043 | ||
| 199 | |||
| 200 | #define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044 | ||
| 201 | |||
| 202 | #define REG_AXXX_MH_MMU_INVALIDATE 0x00000045 | ||
| 203 | |||
| 204 | #define REG_AXXX_MH_MMU_MPU_BASE 0x00000046 | ||
| 205 | |||
| 206 | #define REG_AXXX_MH_MMU_MPU_END 0x00000047 | ||
| 207 | |||
| 208 | #define REG_AXXX_CP_RB_BASE 0x000001c0 | 119 | #define REG_AXXX_CP_RB_BASE 0x000001c0 |
| 209 | 120 | ||
| 210 | #define REG_AXXX_CP_RB_CNTL 0x000001c1 | 121 | #define REG_AXXX_CP_RB_CNTL 0x000001c1 |
| @@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) | |||
| 275 | } | 186 | } |
| 276 | 187 | ||
| 277 | #define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 | 188 | #define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 |
| 189 | #define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000 | ||
| 190 | #define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16 | ||
| 191 | static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val) | ||
| 192 | { | ||
| 193 | return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK; | ||
| 194 | } | ||
| 195 | #define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000 | ||
| 196 | #define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24 | ||
| 197 | static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val) | ||
| 198 | { | ||
| 199 | return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK; | ||
| 200 | } | ||
| 278 | 201 | ||
| 279 | #define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 | 202 | #define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 |
| 280 | #define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f | 203 | #define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f |
| @@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
| 402 | return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; | 325 | return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; |
| 403 | } | 326 | } |
| 404 | 327 | ||
| 328 | #define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440 | ||
| 329 | |||
| 330 | #define REG_AXXX_CP_STQ_ST_STAT 0x00000443 | ||
| 331 | |||
| 332 | #define REG_AXXX_CP_ST_BASE 0x0000044d | ||
| 333 | |||
| 334 | #define REG_AXXX_CP_ST_BUFSZ 0x0000044e | ||
| 335 | |||
| 336 | #define REG_AXXX_CP_MEQ_STAT 0x0000044f | ||
| 337 | |||
| 338 | #define REG_AXXX_CP_MIU_TAG_STAT 0x00000452 | ||
| 339 | |||
| 340 | #define REG_AXXX_CP_BIN_MASK_LO 0x00000454 | ||
| 341 | |||
| 342 | #define REG_AXXX_CP_BIN_MASK_HI 0x00000455 | ||
| 343 | |||
| 344 | #define REG_AXXX_CP_BIN_SELECT_LO 0x00000456 | ||
| 345 | |||
| 346 | #define REG_AXXX_CP_BIN_SELECT_HI 0x00000457 | ||
| 347 | |||
| 348 | #define REG_AXXX_CP_IB1_BASE 0x00000458 | ||
| 349 | |||
| 350 | #define REG_AXXX_CP_IB1_BUFSZ 0x00000459 | ||
| 351 | |||
| 352 | #define REG_AXXX_CP_IB2_BASE 0x0000045a | ||
| 353 | |||
| 354 | #define REG_AXXX_CP_IB2_BUFSZ 0x0000045b | ||
| 355 | |||
| 356 | #define REG_AXXX_CP_STAT 0x0000047f | ||
| 357 | |||
| 405 | #define REG_AXXX_CP_SCRATCH_REG0 0x00000578 | 358 | #define REG_AXXX_CP_SCRATCH_REG0 0x00000578 |
| 406 | 359 | ||
| 407 | #define REG_AXXX_CP_SCRATCH_REG1 0x00000579 | 360 | #define REG_AXXX_CP_SCRATCH_REG1 0x00000579 |
| @@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
| 418 | 371 | ||
| 419 | #define REG_AXXX_CP_SCRATCH_REG7 0x0000057f | 372 | #define REG_AXXX_CP_SCRATCH_REG7 0x0000057f |
| 420 | 373 | ||
| 374 | #define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600 | ||
| 375 | |||
| 376 | #define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601 | ||
| 377 | |||
| 378 | #define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602 | ||
| 379 | |||
| 380 | #define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603 | ||
| 381 | |||
| 382 | #define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604 | ||
| 383 | |||
| 384 | #define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605 | ||
| 385 | |||
| 386 | #define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606 | ||
| 387 | |||
| 388 | #define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607 | ||
| 389 | |||
| 390 | #define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608 | ||
| 391 | |||
| 392 | #define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609 | ||
| 393 | |||
| 421 | #define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a | 394 | #define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a |
| 422 | 395 | ||
| 423 | #define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b | 396 | #define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b |
| @@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) | |||
| 428 | 401 | ||
| 429 | #define REG_AXXX_CP_ME_NRT_DATA 0x0000060e | 402 | #define REG_AXXX_CP_ME_NRT_DATA 0x0000060e |
| 430 | 403 | ||
| 404 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612 | ||
| 405 | |||
| 406 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613 | ||
| 407 | |||
| 408 | #define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614 | ||
| 409 | |||
| 431 | 410 | ||
| 432 | #endif /* ADRENO_COMMON_XML */ | 411 | #endif /* ADRENO_COMMON_XML */ |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a0b9d8a95b16..d321099abdd4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include "adreno_gpu.h" | 18 | #include "adreno_gpu.h" |
| 19 | #include "msm_gem.h" | 19 | #include "msm_gem.h" |
| 20 | #include "msm_mmu.h" | ||
| 20 | 21 | ||
| 21 | struct adreno_info { | 22 | struct adreno_info { |
| 22 | struct adreno_rev rev; | 23 | struct adreno_rev rev; |
| @@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = { | |||
| 44 | .pfpfw = "a300_pfp.fw", | 45 | .pfpfw = "a300_pfp.fw", |
| 45 | .gmem = SZ_512K, | 46 | .gmem = SZ_512K, |
| 46 | }, { | 47 | }, { |
| 47 | .rev = ADRENO_REV(3, 3, 0, 0), | 48 | .rev = ADRENO_REV(3, 3, 0, ANY_ID), |
| 48 | .revn = 330, | 49 | .revn = 330, |
| 49 | .name = "A330", | 50 | .name = "A330", |
| 50 | .pm4fw = "a330_pm4.fw", | 51 | .pm4fw = "a330_pm4.fw", |
| @@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = { | |||
| 53 | }, | 54 | }, |
| 54 | }; | 55 | }; |
| 55 | 56 | ||
| 57 | MODULE_FIRMWARE("a300_pm4.fw"); | ||
| 58 | MODULE_FIRMWARE("a300_pfp.fw"); | ||
| 59 | MODULE_FIRMWARE("a330_pm4.fw"); | ||
| 60 | MODULE_FIRMWARE("a330_pfp.fw"); | ||
| 61 | |||
| 56 | #define RB_SIZE SZ_32K | 62 | #define RB_SIZE SZ_32K |
| 57 | #define RB_BLKSIZE 16 | 63 | #define RB_BLKSIZE 16 |
| 58 | 64 | ||
| @@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) | |||
| 65 | *value = adreno_gpu->info->revn; | 71 | *value = adreno_gpu->info->revn; |
| 66 | return 0; | 72 | return 0; |
| 67 | case MSM_PARAM_GMEM_SIZE: | 73 | case MSM_PARAM_GMEM_SIZE: |
| 68 | *value = adreno_gpu->info->gmem; | 74 | *value = adreno_gpu->gmem; |
| 69 | return 0; | 75 | return 0; |
| 70 | default: | 76 | default: |
| 71 | DBG("%s: invalid param: %u", gpu->name, param); | 77 | DBG("%s: invalid param: %u", gpu->name, param); |
| @@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu) | |||
| 86 | gpu_write(gpu, REG_AXXX_CP_RB_CNTL, | 92 | gpu_write(gpu, REG_AXXX_CP_RB_CNTL, |
| 87 | /* size is log2(quad-words): */ | 93 | /* size is log2(quad-words): */ |
| 88 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | | 94 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | |
| 89 | AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE)); | 95 | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); |
| 90 | 96 | ||
| 91 | /* Setup ringbuffer address: */ | 97 | /* Setup ringbuffer address: */ |
| 92 | gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); | 98 | gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); |
| @@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 286 | struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, | 292 | struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, |
| 287 | struct adreno_rev rev) | 293 | struct adreno_rev rev) |
| 288 | { | 294 | { |
| 295 | struct msm_mmu *mmu; | ||
| 289 | int i, ret; | 296 | int i, ret; |
| 290 | 297 | ||
| 291 | /* identify gpu: */ | 298 | /* identify gpu: */ |
| @@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 311 | rev.core, rev.major, rev.minor, rev.patchid); | 318 | rev.core, rev.major, rev.minor, rev.patchid); |
| 312 | 319 | ||
| 313 | gpu->funcs = funcs; | 320 | gpu->funcs = funcs; |
| 321 | gpu->gmem = gpu->info->gmem; | ||
| 314 | gpu->rev = rev; | 322 | gpu->rev = rev; |
| 315 | 323 | ||
| 316 | ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); | 324 | ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); |
| @@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 333 | if (ret) | 341 | if (ret) |
| 334 | return ret; | 342 | return ret; |
| 335 | 343 | ||
| 336 | ret = msm_iommu_attach(drm, gpu->base.iommu, | 344 | mmu = gpu->base.mmu; |
| 337 | iommu_ports, ARRAY_SIZE(iommu_ports)); | 345 | if (mmu) { |
| 338 | if (ret) | 346 | ret = mmu->funcs->attach(mmu, iommu_ports, |
| 339 | return ret; | 347 | ARRAY_SIZE(iommu_ports)); |
| 348 | if (ret) | ||
| 349 | return ret; | ||
| 350 | } | ||
| 340 | 351 | ||
| 341 | gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), | 352 | gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), |
| 342 | MSM_BO_UNCACHED); | 353 | MSM_BO_UNCACHED); |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index f73abfba7c22..ca11ea4da165 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h | |||
| @@ -51,6 +51,7 @@ struct adreno_gpu { | |||
| 51 | struct msm_gpu base; | 51 | struct msm_gpu base; |
| 52 | struct adreno_rev rev; | 52 | struct adreno_rev rev; |
| 53 | const struct adreno_info *info; | 53 | const struct adreno_info *info; |
| 54 | uint32_t gmem; /* actual gmem size */ | ||
| 54 | uint32_t revn; /* numeric revision name */ | 55 | uint32_t revn; /* numeric revision name */ |
| 55 | const struct adreno_gpu_funcs *funcs; | 56 | const struct adreno_gpu_funcs *funcs; |
| 56 | 57 | ||
| @@ -70,6 +71,9 @@ struct adreno_gpu { | |||
| 70 | struct adreno_platform_config { | 71 | struct adreno_platform_config { |
| 71 | struct adreno_rev rev; | 72 | struct adreno_rev rev; |
| 72 | uint32_t fast_rate, slow_rate, bus_freq; | 73 | uint32_t fast_rate, slow_rate, bus_freq; |
| 74 | #ifdef CONFIG_MSM_BUS_SCALING | ||
| 75 | struct msm_bus_scale_pdata *bus_scale_table; | ||
| 76 | #endif | ||
| 73 | }; | 77 | }; |
| 74 | 78 | ||
| 75 | #define ADRENO_IDLE_TIMEOUT (20 * 1000) | 79 | #define ADRENO_IDLE_TIMEOUT (20 * 1000) |
| @@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu) | |||
| 94 | return gpu->revn == 330; | 98 | return gpu->revn == 330; |
| 95 | } | 99 | } |
| 96 | 100 | ||
| 101 | static inline bool adreno_is_a330v2(struct adreno_gpu *gpu) | ||
| 102 | { | ||
| 103 | return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); | ||
| 104 | } | ||
| 105 | |||
| 97 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); | 106 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); |
| 98 | int adreno_hw_init(struct msm_gpu *gpu); | 107 | int adreno_hw_init(struct msm_gpu *gpu); |
| 99 | uint32_t adreno_last_fence(struct msm_gpu *gpu); | 108 | uint32_t adreno_last_fence(struct msm_gpu *gpu); |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 259ad709b0cc..ae992c71703f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | |||
| @@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) | 14 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) | 15 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) | ||
| 17 | 18 | ||
| 18 | Copyright (C) 2013 by the following authors: | 19 | Copyright (C) 2013 by the following authors: |
| 19 | - Rob Clark <robdclark@gmail.com> (robclark) | 20 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -66,13 +67,15 @@ enum vgt_event_type { | |||
| 66 | 67 | ||
| 67 | enum pc_di_primtype { | 68 | enum pc_di_primtype { |
| 68 | DI_PT_NONE = 0, | 69 | DI_PT_NONE = 0, |
| 69 | DI_PT_POINTLIST = 1, | 70 | DI_PT_POINTLIST_A2XX = 1, |
| 70 | DI_PT_LINELIST = 2, | 71 | DI_PT_LINELIST = 2, |
| 71 | DI_PT_LINESTRIP = 3, | 72 | DI_PT_LINESTRIP = 3, |
| 72 | DI_PT_TRILIST = 4, | 73 | DI_PT_TRILIST = 4, |
| 73 | DI_PT_TRIFAN = 5, | 74 | DI_PT_TRIFAN = 5, |
| 74 | DI_PT_TRISTRIP = 6, | 75 | DI_PT_TRISTRIP = 6, |
| 76 | DI_PT_LINELOOP = 7, | ||
| 75 | DI_PT_RECTLIST = 8, | 77 | DI_PT_RECTLIST = 8, |
| 78 | DI_PT_POINTLIST_A3XX = 9, | ||
| 76 | DI_PT_QUADLIST = 13, | 79 | DI_PT_QUADLIST = 13, |
| 77 | DI_PT_QUADSTRIP = 14, | 80 | DI_PT_QUADSTRIP = 14, |
| 78 | DI_PT_POLYGON = 15, | 81 | DI_PT_POLYGON = 15, |
| @@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets { | |||
| 119 | CP_WAIT_FOR_IDLE = 38, | 122 | CP_WAIT_FOR_IDLE = 38, |
| 120 | CP_WAIT_REG_MEM = 60, | 123 | CP_WAIT_REG_MEM = 60, |
| 121 | CP_WAIT_REG_EQ = 82, | 124 | CP_WAIT_REG_EQ = 82, |
| 122 | CP_WAT_REG_GTE = 83, | 125 | CP_WAIT_REG_GTE = 83, |
| 123 | CP_WAIT_UNTIL_READ = 92, | 126 | CP_WAIT_UNTIL_READ = 92, |
| 124 | CP_WAIT_IB_PFD_COMPLETE = 93, | 127 | CP_WAIT_IB_PFD_COMPLETE = 93, |
| 125 | CP_REG_RMW = 33, | 128 | CP_REG_RMW = 33, |
| @@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets { | |||
| 151 | CP_CONTEXT_UPDATE = 94, | 154 | CP_CONTEXT_UPDATE = 94, |
| 152 | CP_INTERRUPT = 64, | 155 | CP_INTERRUPT = 64, |
| 153 | CP_IM_STORE = 44, | 156 | CP_IM_STORE = 44, |
| 154 | CP_SET_BIN_BASE_OFFSET = 75, | ||
| 155 | CP_SET_DRAW_INIT_FLAGS = 75, | 157 | CP_SET_DRAW_INIT_FLAGS = 75, |
| 156 | CP_SET_PROTECTED_MODE = 95, | 158 | CP_SET_PROTECTED_MODE = 95, |
| 157 | CP_LOAD_STATE = 48, | 159 | CP_LOAD_STATE = 48, |
| @@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets { | |||
| 159 | CP_COND_INDIRECT_BUFFER_PFD = 50, | 161 | CP_COND_INDIRECT_BUFFER_PFD = 50, |
| 160 | CP_INDIRECT_BUFFER_PFE = 63, | 162 | CP_INDIRECT_BUFFER_PFE = 63, |
| 161 | CP_SET_BIN = 76, | 163 | CP_SET_BIN = 76, |
| 164 | CP_TEST_TWO_MEMS = 113, | ||
| 165 | CP_WAIT_FOR_ME = 19, | ||
| 166 | IN_IB_PREFETCH_END = 23, | ||
| 167 | IN_SUBBLK_PREFETCH = 31, | ||
| 168 | IN_INSTR_PREFETCH = 32, | ||
| 169 | IN_INSTR_MATCH = 71, | ||
| 170 | IN_CONST_PREFETCH = 73, | ||
| 171 | IN_INCR_UPDT_STATE = 85, | ||
| 172 | IN_INCR_UPDT_CONST = 86, | ||
| 173 | IN_INCR_UPDT_INSTR = 87, | ||
| 162 | }; | 174 | }; |
| 163 | 175 | ||
| 164 | enum adreno_state_block { | 176 | enum adreno_state_block { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 6d4c62bf70dc..87be647e3825 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index d1df38bf5747..747a6ef4211f 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 0030a111302d..48e03acf19bf 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 50d11df35b21..6f1588aa9071 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c | |||
| @@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) | |||
| 41 | power_on ? "Enable" : "Disable", ctrl); | 41 | power_on ? "Enable" : "Disable", ctrl); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static irqreturn_t hdmi_irq(int irq, void *dev_id) | 44 | irqreturn_t hdmi_irq(int irq, void *dev_id) |
| 45 | { | 45 | { |
| 46 | struct hdmi *hdmi = dev_id; | 46 | struct hdmi *hdmi = dev_id; |
| 47 | 47 | ||
| @@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref) | |||
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | /* initialize connector */ | 73 | /* initialize connector */ |
| 74 | int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | 74 | struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) |
| 75 | { | 75 | { |
| 76 | struct hdmi *hdmi = NULL; | 76 | struct hdmi *hdmi = NULL; |
| 77 | struct msm_drm_private *priv = dev->dev_private; | 77 | struct msm_drm_private *priv = dev->dev_private; |
| 78 | struct platform_device *pdev = hdmi_pdev; | 78 | struct platform_device *pdev = hdmi_pdev; |
| 79 | struct hdmi_platform_config *config; | 79 | struct hdmi_platform_config *config; |
| 80 | int ret; | 80 | int i, ret; |
| 81 | 81 | ||
| 82 | if (!pdev) { | 82 | if (!pdev) { |
| 83 | dev_err(dev->dev, "no hdmi device\n"); | 83 | dev_err(dev->dev, "no hdmi device\n"); |
| @@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
| 99 | 99 | ||
| 100 | hdmi->dev = dev; | 100 | hdmi->dev = dev; |
| 101 | hdmi->pdev = pdev; | 101 | hdmi->pdev = pdev; |
| 102 | hdmi->config = config; | ||
| 102 | hdmi->encoder = encoder; | 103 | hdmi->encoder = encoder; |
| 103 | 104 | ||
| 104 | /* not sure about which phy maps to which msm.. probably I miss some */ | 105 | /* not sure about which phy maps to which msm.. probably I miss some */ |
| @@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
| 114 | goto fail; | 115 | goto fail; |
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI"); | 118 | hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); |
| 118 | if (IS_ERR(hdmi->mmio)) { | 119 | if (IS_ERR(hdmi->mmio)) { |
| 119 | ret = PTR_ERR(hdmi->mmio); | 120 | ret = PTR_ERR(hdmi->mmio); |
| 120 | goto fail; | 121 | goto fail; |
| 121 | } | 122 | } |
| 122 | 123 | ||
| 123 | hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs"); | 124 | BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs)); |
| 124 | if (IS_ERR(hdmi->mvs)) | 125 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
| 125 | hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs"); | 126 | struct regulator *reg; |
| 126 | if (IS_ERR(hdmi->mvs)) { | 127 | |
| 127 | ret = PTR_ERR(hdmi->mvs); | 128 | reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]); |
| 128 | dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret); | 129 | if (IS_ERR(reg)) { |
| 129 | goto fail; | 130 | ret = PTR_ERR(reg); |
| 131 | dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", | ||
| 132 | config->hpd_reg_names[i], ret); | ||
| 133 | goto fail; | ||
| 134 | } | ||
| 135 | |||
| 136 | hdmi->hpd_regs[i] = reg; | ||
| 130 | } | 137 | } |
| 131 | 138 | ||
| 132 | hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0"); | 139 | BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs)); |
| 133 | if (IS_ERR(hdmi->mpp0)) | 140 | for (i = 0; i < config->pwr_reg_cnt; i++) { |
| 134 | hdmi->mpp0 = NULL; | 141 | struct regulator *reg; |
| 135 | 142 | ||
| 136 | hdmi->clk = devm_clk_get(&pdev->dev, "core_clk"); | 143 | reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]); |
| 137 | if (IS_ERR(hdmi->clk)) { | 144 | if (IS_ERR(reg)) { |
| 138 | ret = PTR_ERR(hdmi->clk); | 145 | ret = PTR_ERR(reg); |
| 139 | dev_err(dev->dev, "failed to get 'clk': %d\n", ret); | 146 | dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", |
| 140 | goto fail; | 147 | config->pwr_reg_names[i], ret); |
| 148 | goto fail; | ||
| 149 | } | ||
| 150 | |||
| 151 | hdmi->pwr_regs[i] = reg; | ||
| 141 | } | 152 | } |
| 142 | 153 | ||
| 143 | hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk"); | 154 | BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks)); |
| 144 | if (IS_ERR(hdmi->m_pclk)) { | 155 | for (i = 0; i < config->hpd_clk_cnt; i++) { |
| 145 | ret = PTR_ERR(hdmi->m_pclk); | 156 | struct clk *clk; |
| 146 | dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret); | 157 | |
| 147 | goto fail; | 158 | clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]); |
| 159 | if (IS_ERR(clk)) { | ||
| 160 | ret = PTR_ERR(clk); | ||
| 161 | dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n", | ||
| 162 | config->hpd_clk_names[i], ret); | ||
| 163 | goto fail; | ||
| 164 | } | ||
| 165 | |||
| 166 | hdmi->hpd_clks[i] = clk; | ||
| 148 | } | 167 | } |
| 149 | 168 | ||
| 150 | hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk"); | 169 | BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks)); |
| 151 | if (IS_ERR(hdmi->s_pclk)) { | 170 | for (i = 0; i < config->pwr_clk_cnt; i++) { |
| 152 | ret = PTR_ERR(hdmi->s_pclk); | 171 | struct clk *clk; |
| 153 | dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret); | 172 | |
| 154 | goto fail; | 173 | clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]); |
| 174 | if (IS_ERR(clk)) { | ||
| 175 | ret = PTR_ERR(clk); | ||
| 176 | dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n", | ||
| 177 | config->pwr_clk_names[i], ret); | ||
| 178 | goto fail; | ||
| 179 | } | ||
| 180 | |||
| 181 | hdmi->pwr_clks[i] = clk; | ||
| 155 | } | 182 | } |
| 156 | 183 | ||
| 157 | hdmi->i2c = hdmi_i2c_init(hdmi); | 184 | hdmi->i2c = hdmi_i2c_init(hdmi); |
| @@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
| 178 | goto fail; | 205 | goto fail; |
| 179 | } | 206 | } |
| 180 | 207 | ||
| 181 | hdmi->irq = platform_get_irq(pdev, 0); | 208 | if (!config->shared_irq) { |
| 182 | if (hdmi->irq < 0) { | 209 | hdmi->irq = platform_get_irq(pdev, 0); |
| 183 | ret = hdmi->irq; | 210 | if (hdmi->irq < 0) { |
| 184 | dev_err(dev->dev, "failed to get irq: %d\n", ret); | 211 | ret = hdmi->irq; |
| 185 | goto fail; | 212 | dev_err(dev->dev, "failed to get irq: %d\n", ret); |
| 186 | } | 213 | goto fail; |
| 214 | } | ||
| 187 | 215 | ||
| 188 | ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, | 216 | ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, |
| 189 | NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, | 217 | NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
| 190 | "hdmi_isr", hdmi); | 218 | "hdmi_isr", hdmi); |
| 191 | if (ret < 0) { | 219 | if (ret < 0) { |
| 192 | dev_err(dev->dev, "failed to request IRQ%u: %d\n", | 220 | dev_err(dev->dev, "failed to request IRQ%u: %d\n", |
| 193 | hdmi->irq, ret); | 221 | hdmi->irq, ret); |
| 194 | goto fail; | 222 | goto fail; |
| 223 | } | ||
| 195 | } | 224 | } |
| 196 | 225 | ||
| 197 | encoder->bridge = hdmi->bridge; | 226 | encoder->bridge = hdmi->bridge; |
| @@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) | |||
| 199 | priv->bridges[priv->num_bridges++] = hdmi->bridge; | 228 | priv->bridges[priv->num_bridges++] = hdmi->bridge; |
| 200 | priv->connectors[priv->num_connectors++] = hdmi->connector; | 229 | priv->connectors[priv->num_connectors++] = hdmi->connector; |
| 201 | 230 | ||
| 202 | return 0; | 231 | return hdmi; |
| 203 | 232 | ||
| 204 | fail: | 233 | fail: |
| 205 | if (hdmi) { | 234 | if (hdmi) { |
| @@ -211,37 +240,100 @@ fail: | |||
| 211 | hdmi_destroy(&hdmi->refcount); | 240 | hdmi_destroy(&hdmi->refcount); |
| 212 | } | 241 | } |
| 213 | 242 | ||
| 214 | return ret; | 243 | return ERR_PTR(ret); |
| 215 | } | 244 | } |
| 216 | 245 | ||
| 217 | /* | 246 | /* |
| 218 | * The hdmi device: | 247 | * The hdmi device: |
| 219 | */ | 248 | */ |
| 220 | 249 | ||
| 250 | #include <linux/of_gpio.h> | ||
| 251 | |||
| 221 | static int hdmi_dev_probe(struct platform_device *pdev) | 252 | static int hdmi_dev_probe(struct platform_device *pdev) |
| 222 | { | 253 | { |
| 223 | static struct hdmi_platform_config config = {}; | 254 | static struct hdmi_platform_config config = {}; |
| 224 | #ifdef CONFIG_OF | 255 | #ifdef CONFIG_OF |
| 225 | /* TODO */ | 256 | struct device_node *of_node = pdev->dev.of_node; |
| 257 | |||
| 258 | int get_gpio(const char *name) | ||
| 259 | { | ||
| 260 | int gpio = of_get_named_gpio(of_node, name, 0); | ||
| 261 | if (gpio < 0) { | ||
| 262 | dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n", | ||
| 263 | name, gpio); | ||
| 264 | gpio = -1; | ||
| 265 | } | ||
| 266 | return gpio; | ||
| 267 | } | ||
| 268 | |||
| 269 | /* TODO actually use DT.. */ | ||
| 270 | static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; | ||
| 271 | static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; | ||
| 272 | static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; | ||
| 273 | static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; | ||
| 274 | |||
| 275 | config.phy_init = hdmi_phy_8x74_init; | ||
| 276 | config.mmio_name = "core_physical"; | ||
| 277 | config.hpd_reg_names = hpd_reg_names; | ||
| 278 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
| 279 | config.pwr_reg_names = pwr_reg_names; | ||
| 280 | config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); | ||
| 281 | config.hpd_clk_names = hpd_clk_names; | ||
| 282 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
| 283 | config.pwr_clk_names = pwr_clk_names; | ||
| 284 | config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); | ||
| 285 | config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); | ||
| 286 | config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); | ||
| 287 | config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); | ||
| 288 | config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); | ||
| 289 | config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); | ||
| 290 | config.shared_irq = true; | ||
| 291 | |||
| 226 | #else | 292 | #else |
| 293 | static const char *hpd_clk_names[] = { | ||
| 294 | "core_clk", "master_iface_clk", "slave_iface_clk", | ||
| 295 | }; | ||
| 227 | if (cpu_is_apq8064()) { | 296 | if (cpu_is_apq8064()) { |
| 297 | static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; | ||
| 228 | config.phy_init = hdmi_phy_8960_init; | 298 | config.phy_init = hdmi_phy_8960_init; |
| 299 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
| 300 | config.hpd_reg_names = hpd_reg_names; | ||
| 301 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
| 302 | config.hpd_clk_names = hpd_clk_names; | ||
| 303 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
| 229 | config.ddc_clk_gpio = 70; | 304 | config.ddc_clk_gpio = 70; |
| 230 | config.ddc_data_gpio = 71; | 305 | config.ddc_data_gpio = 71; |
| 231 | config.hpd_gpio = 72; | 306 | config.hpd_gpio = 72; |
| 232 | config.pmic_gpio = 13 + NR_GPIO_IRQS; | 307 | config.mux_en_gpio = -1; |
| 233 | } else if (cpu_is_msm8960()) { | 308 | config.mux_sel_gpio = 13 + NR_GPIO_IRQS; |
| 309 | } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { | ||
| 310 | static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; | ||
| 234 | config.phy_init = hdmi_phy_8960_init; | 311 | config.phy_init = hdmi_phy_8960_init; |
| 312 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
| 313 | config.hpd_reg_names = hpd_reg_names; | ||
| 314 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
| 315 | config.hpd_clk_names = hpd_clk_names; | ||
| 316 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
| 235 | config.ddc_clk_gpio = 100; | 317 | config.ddc_clk_gpio = 100; |
| 236 | config.ddc_data_gpio = 101; | 318 | config.ddc_data_gpio = 101; |
| 237 | config.hpd_gpio = 102; | 319 | config.hpd_gpio = 102; |
| 238 | config.pmic_gpio = -1; | 320 | config.mux_en_gpio = -1; |
| 321 | config.mux_sel_gpio = -1; | ||
| 239 | } else if (cpu_is_msm8x60()) { | 322 | } else if (cpu_is_msm8x60()) { |
| 323 | static const char *hpd_reg_names[] = { | ||
| 324 | "8901_hdmi_mvs", "8901_mpp0" | ||
| 325 | }; | ||
| 240 | config.phy_init = hdmi_phy_8x60_init; | 326 | config.phy_init = hdmi_phy_8x60_init; |
| 327 | config.mmio_name = "hdmi_msm_hdmi_addr"; | ||
| 328 | config.hpd_reg_names = hpd_reg_names; | ||
| 329 | config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); | ||
| 330 | config.hpd_clk_names = hpd_clk_names; | ||
| 331 | config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); | ||
| 241 | config.ddc_clk_gpio = 170; | 332 | config.ddc_clk_gpio = 170; |
| 242 | config.ddc_data_gpio = 171; | 333 | config.ddc_data_gpio = 171; |
| 243 | config.hpd_gpio = 172; | 334 | config.hpd_gpio = 172; |
| 244 | config.pmic_gpio = -1; | 335 | config.mux_en_gpio = -1; |
| 336 | config.mux_sel_gpio = -1; | ||
| 245 | } | 337 | } |
| 246 | #endif | 338 | #endif |
| 247 | pdev->dev.platform_data = &config; | 339 | pdev->dev.platform_data = &config; |
| @@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev) | |||
| 255 | return 0; | 347 | return 0; |
| 256 | } | 348 | } |
| 257 | 349 | ||
| 350 | static const struct of_device_id dt_match[] = { | ||
| 351 | { .compatible = "qcom,hdmi-tx" }, | ||
| 352 | {} | ||
| 353 | }; | ||
| 354 | MODULE_DEVICE_TABLE(of, dt_match); | ||
| 355 | |||
| 258 | static struct platform_driver hdmi_driver = { | 356 | static struct platform_driver hdmi_driver = { |
| 259 | .probe = hdmi_dev_probe, | 357 | .probe = hdmi_dev_probe, |
| 260 | .remove = hdmi_dev_remove, | 358 | .remove = hdmi_dev_remove, |
| 261 | .driver.name = "hdmi_msm", | 359 | .driver = { |
| 360 | .name = "hdmi_msm", | ||
| 361 | .of_match_table = dt_match, | ||
| 362 | }, | ||
| 262 | }; | 363 | }; |
| 263 | 364 | ||
| 264 | void __init hdmi_register(void) | 365 | void __init hdmi_register(void) |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 2c2ec566394c..41b29add70b1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | 28 | ||
| 29 | 29 | ||
| 30 | struct hdmi_phy; | 30 | struct hdmi_phy; |
| 31 | struct hdmi_platform_config; | ||
| 31 | 32 | ||
| 32 | struct hdmi { | 33 | struct hdmi { |
| 33 | struct kref refcount; | 34 | struct kref refcount; |
| @@ -35,14 +36,14 @@ struct hdmi { | |||
| 35 | struct drm_device *dev; | 36 | struct drm_device *dev; |
| 36 | struct platform_device *pdev; | 37 | struct platform_device *pdev; |
| 37 | 38 | ||
| 38 | void __iomem *mmio; | 39 | const struct hdmi_platform_config *config; |
| 39 | 40 | ||
| 40 | struct regulator *mvs; /* HDMI_5V */ | 41 | void __iomem *mmio; |
| 41 | struct regulator *mpp0; /* External 5V */ | ||
| 42 | 42 | ||
| 43 | struct clk *clk; | 43 | struct regulator *hpd_regs[2]; |
| 44 | struct clk *m_pclk; | 44 | struct regulator *pwr_regs[2]; |
| 45 | struct clk *s_pclk; | 45 | struct clk *hpd_clks[3]; |
| 46 | struct clk *pwr_clks[2]; | ||
| 46 | 47 | ||
| 47 | struct hdmi_phy *phy; | 48 | struct hdmi_phy *phy; |
| 48 | struct i2c_adapter *i2c; | 49 | struct i2c_adapter *i2c; |
| @@ -60,7 +61,29 @@ struct hdmi { | |||
| 60 | /* platform config data (ie. from DT, or pdata) */ | 61 | /* platform config data (ie. from DT, or pdata) */ |
| 61 | struct hdmi_platform_config { | 62 | struct hdmi_platform_config { |
| 62 | struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); | 63 | struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); |
| 63 | int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio; | 64 | const char *mmio_name; |
| 65 | |||
| 66 | /* regulators that need to be on for hpd: */ | ||
| 67 | const char **hpd_reg_names; | ||
| 68 | int hpd_reg_cnt; | ||
| 69 | |||
| 70 | /* regulators that need to be on for screen pwr: */ | ||
| 71 | const char **pwr_reg_names; | ||
| 72 | int pwr_reg_cnt; | ||
| 73 | |||
| 74 | /* clks that need to be on for hpd: */ | ||
| 75 | const char **hpd_clk_names; | ||
| 76 | int hpd_clk_cnt; | ||
| 77 | |||
| 78 | /* clks that need to be on for screen pwr (ie pixel clk): */ | ||
| 79 | const char **pwr_clk_names; | ||
| 80 | int pwr_clk_cnt; | ||
| 81 | |||
| 82 | /* gpio's: */ | ||
| 83 | int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; | ||
| 84 | |||
| 85 | /* older devices had their own irq, mdp5+ it is shared w/ mdp: */ | ||
| 86 | bool shared_irq; | ||
| 64 | }; | 87 | }; |
| 65 | 88 | ||
| 66 | void hdmi_set_mode(struct hdmi *hdmi, bool power_on); | 89 | void hdmi_set_mode(struct hdmi *hdmi, bool power_on); |
| @@ -106,6 +129,7 @@ struct hdmi_phy { | |||
| 106 | 129 | ||
| 107 | struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); | 130 | struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); |
| 108 | struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); | 131 | struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); |
| 132 | struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi); | ||
| 109 | 133 | ||
| 110 | /* | 134 | /* |
| 111 | * hdmi bridge: | 135 | * hdmi bridge: |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 4e939f82918c..e2636582cfd7 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state | |||
| 212 | #define REG_HDMI_HDCP_RESET 0x00000130 | 214 | #define REG_HDMI_HDCP_RESET 0x00000130 |
| 213 | #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 | 215 | #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 |
| 214 | 216 | ||
| 217 | #define REG_HDMI_VENSPEC_INFO0 0x0000016c | ||
| 218 | |||
| 219 | #define REG_HDMI_VENSPEC_INFO1 0x00000170 | ||
| 220 | |||
| 221 | #define REG_HDMI_VENSPEC_INFO2 0x00000174 | ||
| 222 | |||
| 223 | #define REG_HDMI_VENSPEC_INFO3 0x00000178 | ||
| 224 | |||
| 225 | #define REG_HDMI_VENSPEC_INFO4 0x0000017c | ||
| 226 | |||
| 227 | #define REG_HDMI_VENSPEC_INFO5 0x00000180 | ||
| 228 | |||
| 229 | #define REG_HDMI_VENSPEC_INFO6 0x00000184 | ||
| 230 | |||
| 215 | #define REG_HDMI_AUDIO_CFG 0x000001d0 | 231 | #define REG_HDMI_AUDIO_CFG 0x000001d0 |
| 216 | #define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 | 232 | #define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 |
| 217 | #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 | 233 | #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 |
| @@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) | |||
| 235 | return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; | 251 | return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; |
| 236 | } | 252 | } |
| 237 | 253 | ||
| 254 | #define REG_HDMI_DDC_ARBITRATION 0x00000210 | ||
| 255 | #define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010 | ||
| 256 | |||
| 238 | #define REG_HDMI_DDC_INT_CTRL 0x00000214 | 257 | #define REG_HDMI_DDC_INT_CTRL 0x00000214 |
| 239 | #define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 | 258 | #define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 |
| 240 | #define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 | 259 | #define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 |
| @@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) | |||
| 340 | return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; | 359 | return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; |
| 341 | } | 360 | } |
| 342 | 361 | ||
| 362 | #define REG_HDMI_CEC_STATUS 0x00000298 | ||
| 363 | |||
| 364 | #define REG_HDMI_CEC_INT 0x0000029c | ||
| 365 | |||
| 366 | #define REG_HDMI_CEC_ADDR 0x000002a0 | ||
| 367 | |||
| 368 | #define REG_HDMI_CEC_TIME 0x000002a4 | ||
| 369 | |||
| 370 | #define REG_HDMI_CEC_REFTIMER 0x000002a8 | ||
| 371 | |||
| 372 | #define REG_HDMI_CEC_RD_DATA 0x000002ac | ||
| 373 | |||
| 374 | #define REG_HDMI_CEC_RD_FILTER 0x000002b0 | ||
| 375 | |||
| 343 | #define REG_HDMI_ACTIVE_HSYNC 0x000002b4 | 376 | #define REG_HDMI_ACTIVE_HSYNC 0x000002b4 |
| 344 | #define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff | 377 | #define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff |
| 345 | #define HDMI_ACTIVE_HSYNC_START__SHIFT 0 | 378 | #define HDMI_ACTIVE_HSYNC_START__SHIFT 0 |
| @@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) | |||
| 410 | #define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 | 443 | #define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 |
| 411 | #define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 | 444 | #define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 |
| 412 | 445 | ||
| 446 | #define REG_HDMI_AUD_INT 0x000002cc | ||
| 447 | #define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 | ||
| 448 | #define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 | ||
| 449 | #define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 | ||
| 450 | #define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 | ||
| 451 | |||
| 413 | #define REG_HDMI_PHY_CTRL 0x000002d4 | 452 | #define REG_HDMI_PHY_CTRL 0x000002d4 |
| 414 | #define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 | 453 | #define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 |
| 415 | #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 | 454 | #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 |
| 416 | #define HDMI_PHY_CTRL_SW_RESET 0x00000004 | 455 | #define HDMI_PHY_CTRL_SW_RESET 0x00000004 |
| 417 | #define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 | 456 | #define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 |
| 418 | 457 | ||
| 419 | #define REG_HDMI_AUD_INT 0x000002cc | 458 | #define REG_HDMI_CEC_WR_RANGE 0x000002dc |
| 420 | #define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 | 459 | |
| 421 | #define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 | 460 | #define REG_HDMI_CEC_RD_RANGE 0x000002e0 |
| 422 | #define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 | 461 | |
| 423 | #define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 | 462 | #define REG_HDMI_VERSION 0x000002e4 |
| 463 | |||
| 464 | #define REG_HDMI_CEC_COMPL_CTL 0x00000360 | ||
| 465 | |||
| 466 | #define REG_HDMI_CEC_RD_START_RANGE 0x00000364 | ||
| 467 | |||
| 468 | #define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368 | ||
| 469 | |||
| 470 | #define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c | ||
| 471 | |||
| 472 | #define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 | ||
| 424 | 473 | ||
| 425 | #define REG_HDMI_8x60_PHY_REG0 0x00000300 | 474 | #define REG_HDMI_8x60_PHY_REG0 0x00000300 |
| 426 | #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c | 475 | #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c |
| @@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) | |||
| 504 | 553 | ||
| 505 | #define REG_HDMI_8960_PHY_REG12 0x00000430 | 554 | #define REG_HDMI_8960_PHY_REG12 0x00000430 |
| 506 | 555 | ||
| 556 | #define REG_HDMI_8x74_ANA_CFG0 0x00000000 | ||
| 557 | |||
| 558 | #define REG_HDMI_8x74_ANA_CFG1 0x00000004 | ||
| 559 | |||
| 560 | #define REG_HDMI_8x74_PD_CTRL0 0x00000010 | ||
| 561 | |||
| 562 | #define REG_HDMI_8x74_PD_CTRL1 0x00000014 | ||
| 563 | |||
| 564 | #define REG_HDMI_8x74_BIST_CFG0 0x00000034 | ||
| 565 | |||
| 566 | #define REG_HDMI_8x74_BIST_PATN0 0x0000003c | ||
| 567 | |||
| 568 | #define REG_HDMI_8x74_BIST_PATN1 0x00000040 | ||
| 569 | |||
| 570 | #define REG_HDMI_8x74_BIST_PATN2 0x00000044 | ||
| 571 | |||
| 572 | #define REG_HDMI_8x74_BIST_PATN3 0x00000048 | ||
| 573 | |||
| 507 | 574 | ||
| 508 | #endif /* HDMI_XML */ | 575 | #endif /* HDMI_XML */ |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 5a8ee3473cf5..7d10e55403c6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | |||
| @@ -21,6 +21,7 @@ struct hdmi_bridge { | |||
| 21 | struct drm_bridge base; | 21 | struct drm_bridge base; |
| 22 | 22 | ||
| 23 | struct hdmi *hdmi; | 23 | struct hdmi *hdmi; |
| 24 | bool power_on; | ||
| 24 | 25 | ||
| 25 | unsigned long int pixclock; | 26 | unsigned long int pixclock; |
| 26 | }; | 27 | }; |
| @@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge) | |||
| 34 | kfree(hdmi_bridge); | 35 | kfree(hdmi_bridge); |
| 35 | } | 36 | } |
| 36 | 37 | ||
| 38 | static void power_on(struct drm_bridge *bridge) | ||
| 39 | { | ||
| 40 | struct drm_device *dev = bridge->dev; | ||
| 41 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | ||
| 42 | struct hdmi *hdmi = hdmi_bridge->hdmi; | ||
| 43 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 44 | int i, ret; | ||
| 45 | |||
| 46 | for (i = 0; i < config->pwr_reg_cnt; i++) { | ||
| 47 | ret = regulator_enable(hdmi->pwr_regs[i]); | ||
| 48 | if (ret) { | ||
| 49 | dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n", | ||
| 50 | config->pwr_reg_names[i], ret); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | if (config->pwr_clk_cnt > 0) { | ||
| 55 | DBG("pixclock: %lu", hdmi_bridge->pixclock); | ||
| 56 | ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock); | ||
| 57 | if (ret) { | ||
| 58 | dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n", | ||
| 59 | config->pwr_clk_names[0], ret); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 63 | for (i = 0; i < config->pwr_clk_cnt; i++) { | ||
| 64 | ret = clk_prepare_enable(hdmi->pwr_clks[i]); | ||
| 65 | if (ret) { | ||
| 66 | dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n", | ||
| 67 | config->pwr_clk_names[i], ret); | ||
| 68 | } | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | static void power_off(struct drm_bridge *bridge) | ||
| 73 | { | ||
| 74 | struct drm_device *dev = bridge->dev; | ||
| 75 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | ||
| 76 | struct hdmi *hdmi = hdmi_bridge->hdmi; | ||
| 77 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 78 | int i, ret; | ||
| 79 | |||
| 80 | /* TODO do we need to wait for final vblank somewhere before | ||
| 81 | * cutting the clocks? | ||
| 82 | */ | ||
| 83 | mdelay(16 + 4); | ||
| 84 | |||
| 85 | for (i = 0; i < config->pwr_clk_cnt; i++) | ||
| 86 | clk_disable_unprepare(hdmi->pwr_clks[i]); | ||
| 87 | |||
| 88 | for (i = 0; i < config->pwr_reg_cnt; i++) { | ||
| 89 | ret = regulator_disable(hdmi->pwr_regs[i]); | ||
| 90 | if (ret) { | ||
| 91 | dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n", | ||
| 92 | config->pwr_reg_names[i], ret); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 37 | static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) | 97 | static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) |
| 38 | { | 98 | { |
| 39 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); | 99 | struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); |
| @@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) | |||
| 41 | struct hdmi_phy *phy = hdmi->phy; | 101 | struct hdmi_phy *phy = hdmi->phy; |
| 42 | 102 | ||
| 43 | DBG("power up"); | 103 | DBG("power up"); |
| 104 | |||
| 105 | if (!hdmi_bridge->power_on) { | ||
| 106 | power_on(bridge); | ||
| 107 | hdmi_bridge->power_on = true; | ||
| 108 | } | ||
| 109 | |||
| 44 | phy->funcs->powerup(phy, hdmi_bridge->pixclock); | 110 | phy->funcs->powerup(phy, hdmi_bridge->pixclock); |
| 45 | hdmi_set_mode(hdmi, true); | 111 | hdmi_set_mode(hdmi, true); |
| 46 | } | 112 | } |
| @@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) | |||
| 62 | DBG("power down"); | 128 | DBG("power down"); |
| 63 | hdmi_set_mode(hdmi, false); | 129 | hdmi_set_mode(hdmi, false); |
| 64 | phy->funcs->powerdown(phy); | 130 | phy->funcs->powerdown(phy); |
| 131 | |||
| 132 | if (hdmi_bridge->power_on) { | ||
| 133 | power_off(bridge); | ||
| 134 | hdmi_bridge->power_on = false; | ||
| 135 | } | ||
| 65 | } | 136 | } |
| 66 | 137 | ||
| 67 | static void hdmi_bridge_mode_set(struct drm_bridge *bridge, | 138 | static void hdmi_bridge_mode_set(struct drm_bridge *bridge, |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 823eee521a31..7dedfdd12075 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c | |||
| @@ -17,19 +17,20 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
| 19 | 19 | ||
| 20 | #include "msm_kms.h" | ||
| 20 | #include "hdmi.h" | 21 | #include "hdmi.h" |
| 21 | 22 | ||
| 22 | struct hdmi_connector { | 23 | struct hdmi_connector { |
| 23 | struct drm_connector base; | 24 | struct drm_connector base; |
| 24 | struct hdmi *hdmi; | 25 | struct hdmi *hdmi; |
| 26 | struct work_struct hpd_work; | ||
| 25 | }; | 27 | }; |
| 26 | #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) | 28 | #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) |
| 27 | 29 | ||
| 28 | static int gpio_config(struct hdmi *hdmi, bool on) | 30 | static int gpio_config(struct hdmi *hdmi, bool on) |
| 29 | { | 31 | { |
| 30 | struct drm_device *dev = hdmi->dev; | 32 | struct drm_device *dev = hdmi->dev; |
| 31 | struct hdmi_platform_config *config = | 33 | const struct hdmi_platform_config *config = hdmi->config; |
| 32 | hdmi->pdev->dev.platform_data; | ||
| 33 | int ret; | 34 | int ret; |
| 34 | 35 | ||
| 35 | if (on) { | 36 | if (on) { |
| @@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on) | |||
| 39 | "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); | 40 | "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); |
| 40 | goto error1; | 41 | goto error1; |
| 41 | } | 42 | } |
| 43 | gpio_set_value_cansleep(config->ddc_clk_gpio, 1); | ||
| 44 | |||
| 42 | ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); | 45 | ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); |
| 43 | if (ret) { | 46 | if (ret) { |
| 44 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 47 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
| 45 | "HDMI_DDC_DATA", config->ddc_data_gpio, ret); | 48 | "HDMI_DDC_DATA", config->ddc_data_gpio, ret); |
| 46 | goto error2; | 49 | goto error2; |
| 47 | } | 50 | } |
| 51 | gpio_set_value_cansleep(config->ddc_data_gpio, 1); | ||
| 52 | |||
| 48 | ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); | 53 | ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); |
| 49 | if (ret) { | 54 | if (ret) { |
| 50 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 55 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
| 51 | "HDMI_HPD", config->hpd_gpio, ret); | 56 | "HDMI_HPD", config->hpd_gpio, ret); |
| 52 | goto error3; | 57 | goto error3; |
| 53 | } | 58 | } |
| 54 | if (config->pmic_gpio != -1) { | 59 | gpio_direction_input(config->hpd_gpio); |
| 55 | ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL"); | 60 | gpio_set_value_cansleep(config->hpd_gpio, 1); |
| 61 | |||
| 62 | if (config->mux_en_gpio != -1) { | ||
| 63 | ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); | ||
| 56 | if (ret) { | 64 | if (ret) { |
| 57 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | 65 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", |
| 58 | "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret); | 66 | "HDMI_MUX_SEL", config->mux_en_gpio, ret); |
| 59 | goto error4; | 67 | goto error4; |
| 60 | } | 68 | } |
| 61 | gpio_set_value_cansleep(config->pmic_gpio, 0); | 69 | gpio_set_value_cansleep(config->mux_en_gpio, 1); |
| 70 | } | ||
| 71 | |||
| 72 | if (config->mux_sel_gpio != -1) { | ||
| 73 | ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); | ||
| 74 | if (ret) { | ||
| 75 | dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", | ||
| 76 | "HDMI_MUX_SEL", config->mux_sel_gpio, ret); | ||
| 77 | goto error5; | ||
| 78 | } | ||
| 79 | gpio_set_value_cansleep(config->mux_sel_gpio, 0); | ||
| 62 | } | 80 | } |
| 63 | DBG("gpio on"); | 81 | DBG("gpio on"); |
| 64 | } else { | 82 | } else { |
| @@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on) | |||
| 66 | gpio_free(config->ddc_data_gpio); | 84 | gpio_free(config->ddc_data_gpio); |
| 67 | gpio_free(config->hpd_gpio); | 85 | gpio_free(config->hpd_gpio); |
| 68 | 86 | ||
| 69 | if (config->pmic_gpio != -1) { | 87 | if (config->mux_en_gpio != -1) { |
| 70 | gpio_set_value_cansleep(config->pmic_gpio, 1); | 88 | gpio_set_value_cansleep(config->mux_en_gpio, 0); |
| 71 | gpio_free(config->pmic_gpio); | 89 | gpio_free(config->mux_en_gpio); |
| 90 | } | ||
| 91 | |||
| 92 | if (config->mux_sel_gpio != -1) { | ||
| 93 | gpio_set_value_cansleep(config->mux_sel_gpio, 1); | ||
| 94 | gpio_free(config->mux_sel_gpio); | ||
| 72 | } | 95 | } |
| 73 | DBG("gpio off"); | 96 | DBG("gpio off"); |
| 74 | } | 97 | } |
| 75 | 98 | ||
| 76 | return 0; | 99 | return 0; |
| 77 | 100 | ||
| 101 | error5: | ||
| 102 | if (config->mux_en_gpio != -1) | ||
| 103 | gpio_free(config->mux_en_gpio); | ||
| 78 | error4: | 104 | error4: |
| 79 | gpio_free(config->hpd_gpio); | 105 | gpio_free(config->hpd_gpio); |
| 80 | error3: | 106 | error3: |
| @@ -88,10 +114,11 @@ error1: | |||
| 88 | static int hpd_enable(struct hdmi_connector *hdmi_connector) | 114 | static int hpd_enable(struct hdmi_connector *hdmi_connector) |
| 89 | { | 115 | { |
| 90 | struct hdmi *hdmi = hdmi_connector->hdmi; | 116 | struct hdmi *hdmi = hdmi_connector->hdmi; |
| 117 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 91 | struct drm_device *dev = hdmi_connector->base.dev; | 118 | struct drm_device *dev = hdmi_connector->base.dev; |
| 92 | struct hdmi_phy *phy = hdmi->phy; | 119 | struct hdmi_phy *phy = hdmi->phy; |
| 93 | uint32_t hpd_ctrl; | 120 | uint32_t hpd_ctrl; |
| 94 | int ret; | 121 | int i, ret; |
| 95 | 122 | ||
| 96 | ret = gpio_config(hdmi, true); | 123 | ret = gpio_config(hdmi, true); |
| 97 | if (ret) { | 124 | if (ret) { |
| @@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) | |||
| 99 | goto fail; | 126 | goto fail; |
| 100 | } | 127 | } |
| 101 | 128 | ||
| 102 | ret = clk_prepare_enable(hdmi->clk); | 129 | for (i = 0; i < config->hpd_clk_cnt; i++) { |
| 103 | if (ret) { | 130 | ret = clk_prepare_enable(hdmi->hpd_clks[i]); |
| 104 | dev_err(dev->dev, "failed to enable 'clk': %d\n", ret); | 131 | if (ret) { |
| 105 | goto fail; | 132 | dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", |
| 106 | } | 133 | config->hpd_clk_names[i], ret); |
| 107 | 134 | goto fail; | |
| 108 | ret = clk_prepare_enable(hdmi->m_pclk); | 135 | } |
| 109 | if (ret) { | ||
| 110 | dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret); | ||
| 111 | goto fail; | ||
| 112 | } | ||
| 113 | |||
| 114 | ret = clk_prepare_enable(hdmi->s_pclk); | ||
| 115 | if (ret) { | ||
| 116 | dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret); | ||
| 117 | goto fail; | ||
| 118 | } | 136 | } |
| 119 | 137 | ||
| 120 | if (hdmi->mpp0) | 138 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
| 121 | ret = regulator_enable(hdmi->mpp0); | 139 | ret = regulator_enable(hdmi->hpd_regs[i]); |
| 122 | if (!ret) | 140 | if (ret) { |
| 123 | ret = regulator_enable(hdmi->mvs); | 141 | dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", |
| 124 | if (ret) { | 142 | config->hpd_reg_names[i], ret); |
| 125 | dev_err(dev->dev, "failed to enable regulators: %d\n", ret); | 143 | goto fail; |
| 126 | goto fail; | 144 | } |
| 127 | } | 145 | } |
| 128 | 146 | ||
| 129 | hdmi_set_mode(hdmi, false); | 147 | hdmi_set_mode(hdmi, false); |
| @@ -156,26 +174,26 @@ fail: | |||
| 156 | static int hdp_disable(struct hdmi_connector *hdmi_connector) | 174 | static int hdp_disable(struct hdmi_connector *hdmi_connector) |
| 157 | { | 175 | { |
| 158 | struct hdmi *hdmi = hdmi_connector->hdmi; | 176 | struct hdmi *hdmi = hdmi_connector->hdmi; |
| 177 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 159 | struct drm_device *dev = hdmi_connector->base.dev; | 178 | struct drm_device *dev = hdmi_connector->base.dev; |
| 160 | int ret = 0; | 179 | int i, ret = 0; |
| 161 | 180 | ||
| 162 | /* Disable HPD interrupt */ | 181 | /* Disable HPD interrupt */ |
| 163 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); | 182 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); |
| 164 | 183 | ||
| 165 | hdmi_set_mode(hdmi, false); | 184 | hdmi_set_mode(hdmi, false); |
| 166 | 185 | ||
| 167 | if (hdmi->mpp0) | 186 | for (i = 0; i < config->hpd_reg_cnt; i++) { |
| 168 | ret = regulator_disable(hdmi->mpp0); | 187 | ret = regulator_disable(hdmi->hpd_regs[i]); |
| 169 | if (!ret) | 188 | if (ret) { |
| 170 | ret = regulator_disable(hdmi->mvs); | 189 | dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n", |
| 171 | if (ret) { | 190 | config->hpd_reg_names[i], ret); |
| 172 | dev_err(dev->dev, "failed to enable regulators: %d\n", ret); | 191 | goto fail; |
| 173 | goto fail; | 192 | } |
| 174 | } | 193 | } |
| 175 | 194 | ||
| 176 | clk_disable_unprepare(hdmi->clk); | 195 | for (i = 0; i < config->hpd_clk_cnt; i++) |
| 177 | clk_disable_unprepare(hdmi->m_pclk); | 196 | clk_disable_unprepare(hdmi->hpd_clks[i]); |
| 178 | clk_disable_unprepare(hdmi->s_pclk); | ||
| 179 | 197 | ||
| 180 | ret = gpio_config(hdmi, false); | 198 | ret = gpio_config(hdmi, false); |
| 181 | if (ret) { | 199 | if (ret) { |
| @@ -189,9 +207,19 @@ fail: | |||
| 189 | return ret; | 207 | return ret; |
| 190 | } | 208 | } |
| 191 | 209 | ||
| 210 | static void | ||
| 211 | hotplug_work(struct work_struct *work) | ||
| 212 | { | ||
| 213 | struct hdmi_connector *hdmi_connector = | ||
| 214 | container_of(work, struct hdmi_connector, hpd_work); | ||
| 215 | struct drm_connector *connector = &hdmi_connector->base; | ||
| 216 | drm_helper_hpd_irq_event(connector->dev); | ||
| 217 | } | ||
| 218 | |||
| 192 | void hdmi_connector_irq(struct drm_connector *connector) | 219 | void hdmi_connector_irq(struct drm_connector *connector) |
| 193 | { | 220 | { |
| 194 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 221 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
| 222 | struct msm_drm_private *priv = connector->dev->dev_private; | ||
| 195 | struct hdmi *hdmi = hdmi_connector->hdmi; | 223 | struct hdmi *hdmi = hdmi_connector->hdmi; |
| 196 | uint32_t hpd_int_status, hpd_int_ctrl; | 224 | uint32_t hpd_int_status, hpd_int_ctrl; |
| 197 | 225 | ||
| @@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector) | |||
| 209 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, | 237 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, |
| 210 | hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); | 238 | hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); |
| 211 | 239 | ||
| 212 | drm_helper_hpd_irq_event(connector->dev); | ||
| 213 | |||
| 214 | /* detect disconnect if we are connected or visa versa: */ | 240 | /* detect disconnect if we are connected or visa versa: */ |
| 215 | hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; | 241 | hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; |
| 216 | if (!detected) | 242 | if (!detected) |
| 217 | hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; | 243 | hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; |
| 218 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); | 244 | hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); |
| 245 | |||
| 246 | queue_work(priv->wq, &hdmi_connector->hpd_work); | ||
| 219 | } | 247 | } |
| 220 | } | 248 | } |
| 221 | 249 | ||
| @@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect( | |||
| 224 | { | 252 | { |
| 225 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 253 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
| 226 | struct hdmi *hdmi = hdmi_connector->hdmi; | 254 | struct hdmi *hdmi = hdmi_connector->hdmi; |
| 255 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 227 | uint32_t hpd_int_status; | 256 | uint32_t hpd_int_status; |
| 228 | int retry = 20; | 257 | int retry = 20; |
| 229 | 258 | ||
| @@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect( | |||
| 233 | * let that trick us into thinking the monitor is gone: | 262 | * let that trick us into thinking the monitor is gone: |
| 234 | */ | 263 | */ |
| 235 | while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { | 264 | while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { |
| 265 | /* hdmi debounce logic seems to get stuck sometimes, | ||
| 266 | * read directly the gpio to get a second opinion: | ||
| 267 | */ | ||
| 268 | if (gpio_get_value(config->hpd_gpio)) { | ||
| 269 | DBG("gpio tells us we are connected!"); | ||
| 270 | hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED; | ||
| 271 | break; | ||
| 272 | } | ||
| 236 | mdelay(10); | 273 | mdelay(10); |
| 237 | hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); | 274 | hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); |
| 238 | DBG("status=%08x", hpd_int_status); | 275 | DBG("status=%08x", hpd_int_status); |
| @@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, | |||
| 285 | struct drm_display_mode *mode) | 322 | struct drm_display_mode *mode) |
| 286 | { | 323 | { |
| 287 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); | 324 | struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); |
| 325 | struct hdmi *hdmi = hdmi_connector->hdmi; | ||
| 326 | const struct hdmi_platform_config *config = hdmi->config; | ||
| 288 | struct msm_drm_private *priv = connector->dev->dev_private; | 327 | struct msm_drm_private *priv = connector->dev->dev_private; |
| 289 | struct msm_kms *kms = priv->kms; | 328 | struct msm_kms *kms = priv->kms; |
| 290 | long actual, requested; | 329 | long actual, requested; |
| @@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, | |||
| 293 | actual = kms->funcs->round_pixclk(kms, | 332 | actual = kms->funcs->round_pixclk(kms, |
| 294 | requested, hdmi_connector->hdmi->encoder); | 333 | requested, hdmi_connector->hdmi->encoder); |
| 295 | 334 | ||
| 335 | /* for mdp5/apq8074, we manage our own pixel clk (as opposed to | ||
| 336 | * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder | ||
| 337 | * instead): | ||
| 338 | */ | ||
| 339 | if (config->pwr_clk_cnt > 0) | ||
| 340 | actual = clk_round_rate(hdmi->pwr_clks[0], actual); | ||
| 341 | |||
| 296 | DBG("requested=%ld, actual=%ld", requested, actual); | 342 | DBG("requested=%ld, actual=%ld", requested, actual); |
| 297 | 343 | ||
| 298 | if (actual != requested) | 344 | if (actual != requested) |
| @@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) | |||
| 335 | } | 381 | } |
| 336 | 382 | ||
| 337 | hdmi_connector->hdmi = hdmi_reference(hdmi); | 383 | hdmi_connector->hdmi = hdmi_reference(hdmi); |
| 384 | INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); | ||
| 338 | 385 | ||
| 339 | connector = &hdmi_connector->base; | 386 | connector = &hdmi_connector->base; |
| 340 | 387 | ||
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c new file mode 100644 index 000000000000..59fa6cdacb2a --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "hdmi.h" | ||
| 19 | |||
| 20 | struct hdmi_phy_8x74 { | ||
| 21 | struct hdmi_phy base; | ||
| 22 | struct hdmi *hdmi; | ||
| 23 | void __iomem *mmio; | ||
| 24 | }; | ||
| 25 | #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) | ||
| 26 | |||
| 27 | |||
| 28 | static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data) | ||
| 29 | { | ||
| 30 | msm_writel(data, phy->mmio + reg); | ||
| 31 | } | ||
| 32 | |||
| 33 | //static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg) | ||
| 34 | //{ | ||
| 35 | // return msm_readl(phy->mmio + reg); | ||
| 36 | //} | ||
| 37 | |||
| 38 | static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) | ||
| 39 | { | ||
| 40 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
| 41 | kfree(phy_8x74); | ||
| 42 | } | ||
| 43 | |||
| 44 | static void hdmi_phy_8x74_reset(struct hdmi_phy *phy) | ||
| 45 | { | ||
| 46 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
| 47 | struct hdmi *hdmi = phy_8x74->hdmi; | ||
| 48 | unsigned int val; | ||
| 49 | |||
| 50 | /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */ | ||
| 51 | |||
| 52 | val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); | ||
| 53 | |||
| 54 | if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { | ||
| 55 | /* pull low */ | ||
| 56 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 57 | val & ~HDMI_PHY_CTRL_SW_RESET); | ||
| 58 | } else { | ||
| 59 | /* pull high */ | ||
| 60 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 61 | val | HDMI_PHY_CTRL_SW_RESET); | ||
| 62 | } | ||
| 63 | |||
| 64 | if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { | ||
| 65 | /* pull low */ | ||
| 66 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 67 | val & ~HDMI_PHY_CTRL_SW_RESET_PLL); | ||
| 68 | } else { | ||
| 69 | /* pull high */ | ||
| 70 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 71 | val | HDMI_PHY_CTRL_SW_RESET_PLL); | ||
| 72 | } | ||
| 73 | |||
| 74 | msleep(100); | ||
| 75 | |||
| 76 | if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { | ||
| 77 | /* pull high */ | ||
| 78 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 79 | val | HDMI_PHY_CTRL_SW_RESET); | ||
| 80 | } else { | ||
| 81 | /* pull low */ | ||
| 82 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 83 | val & ~HDMI_PHY_CTRL_SW_RESET); | ||
| 84 | } | ||
| 85 | |||
| 86 | if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { | ||
| 87 | /* pull high */ | ||
| 88 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 89 | val | HDMI_PHY_CTRL_SW_RESET_PLL); | ||
| 90 | } else { | ||
| 91 | /* pull low */ | ||
| 92 | hdmi_write(hdmi, REG_HDMI_PHY_CTRL, | ||
| 93 | val & ~HDMI_PHY_CTRL_SW_RESET_PLL); | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, | ||
| 98 | unsigned long int pixclock) | ||
| 99 | { | ||
| 100 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
| 101 | |||
| 102 | phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b); | ||
| 103 | phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2); | ||
| 104 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0); | ||
| 105 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0); | ||
| 106 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0); | ||
| 107 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0); | ||
| 108 | phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0); | ||
| 109 | phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20); | ||
| 110 | } | ||
| 111 | |||
| 112 | static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) | ||
| 113 | { | ||
| 114 | struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); | ||
| 115 | phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f); | ||
| 116 | } | ||
| 117 | |||
| 118 | static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { | ||
| 119 | .destroy = hdmi_phy_8x74_destroy, | ||
| 120 | .reset = hdmi_phy_8x74_reset, | ||
| 121 | .powerup = hdmi_phy_8x74_powerup, | ||
| 122 | .powerdown = hdmi_phy_8x74_powerdown, | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) | ||
| 126 | { | ||
| 127 | struct hdmi_phy_8x74 *phy_8x74; | ||
| 128 | struct hdmi_phy *phy = NULL; | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL); | ||
| 132 | if (!phy_8x74) { | ||
| 133 | ret = -ENOMEM; | ||
| 134 | goto fail; | ||
| 135 | } | ||
| 136 | |||
| 137 | phy = &phy_8x74->base; | ||
| 138 | |||
| 139 | phy->funcs = &hdmi_phy_8x74_funcs; | ||
| 140 | |||
| 141 | phy_8x74->hdmi = hdmi; | ||
| 142 | |||
| 143 | /* for 8x74, the phy mmio is mapped separately: */ | ||
| 144 | phy_8x74->mmio = msm_ioremap(hdmi->pdev, | ||
| 145 | "phy_physical", "HDMI_8x74"); | ||
| 146 | if (IS_ERR(phy_8x74->mmio)) { | ||
| 147 | ret = PTR_ERR(phy_8x74->mmio); | ||
| 148 | goto fail; | ||
| 149 | } | ||
| 150 | |||
| 151 | return phy; | ||
| 152 | |||
| 153 | fail: | ||
| 154 | if (phy) | ||
| 155 | hdmi_phy_8x74_destroy(phy); | ||
| 156 | return ERR_PTR(ret); | ||
| 157 | } | ||
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index dbde4f6339b9..d591567173c4 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 9908ffe1c3ad..416a26e1e58d 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h | |||
| @@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) | 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) | 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) |
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) |
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) | 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) |
| 19 | 21 | ||
| 20 | Copyright (C) 2013 by the following authors: | 22 | Copyright (C) 2013 by the following authors: |
| 21 | - Rob Clark <robdclark@gmail.com> (robclark) | 23 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| 42 | */ | 44 | */ |
| 43 | 45 | ||
| 44 | 46 | ||
| 45 | enum mdp4_bpc { | ||
| 46 | BPC1 = 0, | ||
| 47 | BPC5 = 1, | ||
| 48 | BPC6 = 2, | ||
| 49 | BPC8 = 3, | ||
| 50 | }; | ||
| 51 | |||
| 52 | enum mdp4_bpc_alpha { | ||
| 53 | BPC1A = 0, | ||
| 54 | BPC4A = 1, | ||
| 55 | BPC6A = 2, | ||
| 56 | BPC8A = 3, | ||
| 57 | }; | ||
| 58 | |||
| 59 | enum mdp4_alpha_type { | ||
| 60 | FG_CONST = 0, | ||
| 61 | BG_CONST = 1, | ||
| 62 | FG_PIXEL = 2, | ||
| 63 | BG_PIXEL = 3, | ||
| 64 | }; | ||
| 65 | |||
| 66 | enum mdp4_pipe { | 47 | enum mdp4_pipe { |
| 67 | VG1 = 0, | 48 | VG1 = 0, |
| 68 | VG2 = 1, | 49 | VG2 = 1, |
| @@ -79,15 +60,6 @@ enum mdp4_mixer { | |||
| 79 | MIXER2 = 2, | 60 | MIXER2 = 2, |
| 80 | }; | 61 | }; |
| 81 | 62 | ||
| 82 | enum mdp4_mixer_stage_id { | ||
| 83 | STAGE_UNUSED = 0, | ||
| 84 | STAGE_BASE = 1, | ||
| 85 | STAGE0 = 2, | ||
| 86 | STAGE1 = 3, | ||
| 87 | STAGE2 = 4, | ||
| 88 | STAGE3 = 5, | ||
| 89 | }; | ||
| 90 | |||
| 91 | enum mdp4_intf { | 63 | enum mdp4_intf { |
| 92 | INTF_LCDC_DTV = 0, | 64 | INTF_LCDC_DTV = 0, |
| 93 | INTF_DSI_VIDEO = 1, | 65 | INTF_DSI_VIDEO = 1, |
| @@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) | |||
| 194 | #define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 | 166 | #define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 |
| 195 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 | 167 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 |
| 196 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 | 168 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 |
| 197 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) | 169 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) |
| 198 | { | 170 | { |
| 199 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; | 171 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; |
| 200 | } | 172 | } |
| 201 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 | 173 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 |
| 202 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 | 174 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 |
| 203 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 | 175 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 |
| 204 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) | 176 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) |
| 205 | { | 177 | { |
| 206 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; | 178 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; |
| 207 | } | 179 | } |
| 208 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 | 180 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 |
| 209 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 | 181 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 |
| 210 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 | 182 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 |
| 211 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) | 183 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) |
| 212 | { | 184 | { |
| 213 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; | 185 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; |
| 214 | } | 186 | } |
| 215 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 | 187 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 |
| 216 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 | 188 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 |
| 217 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 | 189 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 |
| 218 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) | 190 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) |
| 219 | { | 191 | { |
| 220 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; | 192 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; |
| 221 | } | 193 | } |
| 222 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 | 194 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 |
| 223 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 | 195 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 |
| 224 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 | 196 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 |
| 225 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) | 197 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) |
| 226 | { | 198 | { |
| 227 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; | 199 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; |
| 228 | } | 200 | } |
| 229 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 | 201 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 |
| 230 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 | 202 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 |
| 231 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 | 203 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 |
| 232 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) | 204 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) |
| 233 | { | 205 | { |
| 234 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; | 206 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; |
| 235 | } | 207 | } |
| 236 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 | 208 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 |
| 237 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 | 209 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 |
| 238 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 | 210 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 |
| 239 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) | 211 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) |
| 240 | { | 212 | { |
| 241 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; | 213 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; |
| 242 | } | 214 | } |
| 243 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 | 215 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 |
| 244 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 | 216 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 |
| 245 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 | 217 | #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 |
| 246 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) | 218 | static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) |
| 247 | { | 219 | { |
| 248 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; | 220 | return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; |
| 249 | } | 221 | } |
| @@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va | |||
| 254 | #define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 | 226 | #define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 |
| 255 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 | 227 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 |
| 256 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 | 228 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 |
| 257 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) | 229 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) |
| 258 | { | 230 | { |
| 259 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; | 231 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; |
| 260 | } | 232 | } |
| 261 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 | 233 | #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 |
| 262 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 | 234 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 |
| 263 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 | 235 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 |
| 264 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) | 236 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) |
| 265 | { | 237 | { |
| 266 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; | 238 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; |
| 267 | } | 239 | } |
| 268 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 | 240 | #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 |
| 269 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 | 241 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 |
| 270 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 | 242 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 |
| 271 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) | 243 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) |
| 272 | { | 244 | { |
| 273 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; | 245 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; |
| 274 | } | 246 | } |
| 275 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 | 247 | #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 |
| 276 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 | 248 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 |
| 277 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 | 249 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 |
| 278 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) | 250 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) |
| 279 | { | 251 | { |
| 280 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; | 252 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; |
| 281 | } | 253 | } |
| 282 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 | 254 | #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 |
| 283 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 | 255 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 |
| 284 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 | 256 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 |
| 285 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) | 257 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) |
| 286 | { | 258 | { |
| 287 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; | 259 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; |
| 288 | } | 260 | } |
| 289 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 | 261 | #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 |
| 290 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 | 262 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 |
| 291 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 | 263 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 |
| 292 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) | 264 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) |
| 293 | { | 265 | { |
| 294 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; | 266 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; |
| 295 | } | 267 | } |
| 296 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 | 268 | #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 |
| 297 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 | 269 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 |
| 298 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 | 270 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 |
| 299 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) | 271 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) |
| 300 | { | 272 | { |
| 301 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; | 273 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; |
| 302 | } | 274 | } |
| 303 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 | 275 | #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 |
| 304 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 | 276 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 |
| 305 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 | 277 | #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 |
| 306 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) | 278 | static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) |
| 307 | { | 279 | { |
| 308 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; | 280 | return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; |
| 309 | } | 281 | } |
| @@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x | |||
| 369 | static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } | 341 | static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } |
| 370 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 | 342 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 |
| 371 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 | 343 | #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 |
| 372 | static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) | 344 | static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) |
| 373 | { | 345 | { |
| 374 | return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; | 346 | return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; |
| 375 | } | 347 | } |
| @@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) | |||
| 377 | #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 | 349 | #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 |
| 378 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 | 350 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 |
| 379 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 | 351 | #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 |
| 380 | static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val) | 352 | static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) |
| 381 | { | 353 | { |
| 382 | return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; | 354 | return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; |
| 383 | } | 355 | } |
| @@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of | |||
| 472 | static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } | 444 | static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } |
| 473 | #define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 | 445 | #define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 |
| 474 | #define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 | 446 | #define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 |
| 475 | static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val) | 447 | static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) |
| 476 | { | 448 | { |
| 477 | return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; | 449 | return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; |
| 478 | } | 450 | } |
| 479 | #define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c | 451 | #define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c |
| 480 | #define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 | 452 | #define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 |
| 481 | static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val) | 453 | static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) |
| 482 | { | 454 | { |
| 483 | return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; | 455 | return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; |
| 484 | } | 456 | } |
| 485 | #define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 | 457 | #define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 |
| 486 | #define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 | 458 | #define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 |
| 487 | static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val) | 459 | static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) |
| 488 | { | 460 | { |
| 489 | return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; | 461 | return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; |
| 490 | } | 462 | } |
| @@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) | |||
| 710 | static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } | 682 | static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } |
| 711 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 | 683 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 |
| 712 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 | 684 | #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 |
| 713 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val) | 685 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) |
| 714 | { | 686 | { |
| 715 | return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; | 687 | return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; |
| 716 | } | 688 | } |
| 717 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c | 689 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c |
| 718 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 | 690 | #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 |
| 719 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val) | 691 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) |
| 720 | { | 692 | { |
| 721 | return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; | 693 | return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; |
| 722 | } | 694 | } |
| 723 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 | 695 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 |
| 724 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 | 696 | #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 |
| 725 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val) | 697 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) |
| 726 | { | 698 | { |
| 727 | return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; | 699 | return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; |
| 728 | } | 700 | } |
| 729 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 | 701 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 |
| 730 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 | 702 | #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 |
| 731 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val) | 703 | static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) |
| 732 | { | 704 | { |
| 733 | return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; | 705 | return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; |
| 734 | } | 706 | } |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 019d530187ff..1964f4f0d452 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
| @@ -66,15 +66,15 @@ struct mdp4_crtc { | |||
| 66 | /* for unref'ing cursor bo's after scanout completes: */ | 66 | /* for unref'ing cursor bo's after scanout completes: */ |
| 67 | struct drm_flip_work unref_cursor_work; | 67 | struct drm_flip_work unref_cursor_work; |
| 68 | 68 | ||
| 69 | struct mdp4_irq vblank; | 69 | struct mdp_irq vblank; |
| 70 | struct mdp4_irq err; | 70 | struct mdp_irq err; |
| 71 | }; | 71 | }; |
| 72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) | 72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) |
| 73 | 73 | ||
| 74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | 74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) |
| 75 | { | 75 | { |
| 76 | struct msm_drm_private *priv = crtc->dev->dev_private; | 76 | struct msm_drm_private *priv = crtc->dev->dev_private; |
| 77 | return to_mdp4_kms(priv->kms); | 77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | static void update_fb(struct drm_crtc *crtc, bool async, | 80 | static void update_fb(struct drm_crtc *crtc, bool async, |
| @@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async, | |||
| 93 | 93 | ||
| 94 | if (!async) { | 94 | if (!async) { |
| 95 | /* enable vblank to pick up the old_fb */ | 95 | /* enable vblank to pick up the old_fb */ |
| 96 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
| 99 | 99 | ||
| @@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) | |||
| 145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
| 146 | 146 | ||
| 147 | atomic_or(pending, &mdp4_crtc->pending); | 147 | atomic_or(pending, &mdp4_crtc->pending); |
| 148 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static void pageflip_cb(struct msm_fence_cb *cb) | 151 | static void pageflip_cb(struct msm_fence_cb *cb) |
| @@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 210 | if (enabled != mdp4_crtc->enabled) { | 210 | if (enabled != mdp4_crtc->enabled) { |
| 211 | if (enabled) { | 211 | if (enabled) { |
| 212 | mdp4_enable(mdp4_kms); | 212 | mdp4_enable(mdp4_kms); |
| 213 | mdp4_irq_register(mdp4_kms, &mdp4_crtc->err); | 213 | mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); |
| 214 | } else { | 214 | } else { |
| 215 | mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err); | 215 | mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); |
| 216 | mdp4_disable(mdp4_kms); | 216 | mdp4_disable(mdp4_kms); |
| 217 | } | 217 | } |
| 218 | mdp4_crtc->enabled = enabled; | 218 | mdp4_crtc->enabled = enabled; |
| @@ -232,7 +232,7 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 232 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | 232 | struct mdp4_kms *mdp4_kms = get_kms(crtc); |
| 233 | int i, ovlp = mdp4_crtc->ovlp; | 233 | int i, ovlp = mdp4_crtc->ovlp; |
| 234 | uint32_t mixer_cfg = 0; | 234 | uint32_t mixer_cfg = 0; |
| 235 | static const enum mdp4_mixer_stage_id stages[] = { | 235 | static const enum mdp_mixer_stage_id stages[] = { |
| 236 | STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, | 236 | STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, |
| 237 | }; | 237 | }; |
| 238 | /* statically (for now) map planes to mixer stage (z-order): */ | 238 | /* statically (for now) map planes to mixer stage (z-order): */ |
| @@ -262,8 +262,8 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 262 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | 262 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); |
| 263 | int idx = idxs[pipe_id]; | 263 | int idx = idxs[pipe_id]; |
| 264 | if (idx > 0) { | 264 | if (idx > 0) { |
| 265 | const struct mdp4_format *format = | 265 | const struct mdp_format *format = |
| 266 | to_mdp4_format(msm_framebuffer_format(plane->fb)); | 266 | to_mdp_format(msm_framebuffer_format(plane->fb)); |
| 267 | alpha[idx-1] = format->alpha_enable; | 267 | alpha[idx-1] = format->alpha_enable; |
| 268 | } | 268 | } |
| 269 | mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); | 269 | mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); |
| @@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { | |||
| 571 | .load_lut = mdp4_crtc_load_lut, | 571 | .load_lut = mdp4_crtc_load_lut, |
| 572 | }; | 572 | }; |
| 573 | 573 | ||
| 574 | static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 574 | static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) |
| 575 | { | 575 | { |
| 576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); | 576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); |
| 577 | struct drm_crtc *crtc = &mdp4_crtc->base; | 577 | struct drm_crtc *crtc = &mdp4_crtc->base; |
| 578 | struct msm_drm_private *priv = crtc->dev->dev_private; | 578 | struct msm_drm_private *priv = crtc->dev->dev_private; |
| 579 | unsigned pending; | 579 | unsigned pending; |
| 580 | 580 | ||
| 581 | mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); | 581 | mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
| 582 | 582 | ||
| 583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); | 583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); |
| 584 | 584 | ||
| @@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | |||
| 593 | } | 593 | } |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 596 | static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) |
| 597 | { | 597 | { |
| 598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); | 598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); |
| 599 | struct drm_crtc *crtc = &mdp4_crtc->base; | 599 | struct drm_crtc *crtc = &mdp4_crtc->base; |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 5e0dcae70ab5..067ed03b35fe 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | |||
| @@ -15,8 +15,6 @@ | |||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <mach/clk.h> | ||
| 19 | |||
| 20 | #include "mdp4_kms.h" | 18 | #include "mdp4_kms.h" |
| 21 | 19 | ||
| 22 | #include "drm_crtc.h" | 20 | #include "drm_crtc.h" |
| @@ -37,7 +35,7 @@ struct mdp4_dtv_encoder { | |||
| 37 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) | 35 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) |
| 38 | { | 36 | { |
| 39 | struct msm_drm_private *priv = encoder->dev->dev_private; | 37 | struct msm_drm_private *priv = encoder->dev->dev_private; |
| 40 | return to_mdp4_kms(priv->kms); | 38 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
| 41 | } | 39 | } |
| 42 | 40 | ||
| 43 | #ifdef CONFIG_MSM_BUS_SCALING | 41 | #ifdef CONFIG_MSM_BUS_SCALING |
| @@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 139 | * the settings changes for the new modeset (like new | 137 | * the settings changes for the new modeset (like new |
| 140 | * scanout buffer) don't latch properly.. | 138 | * scanout buffer) don't latch properly.. |
| 141 | */ | 139 | */ |
| 142 | mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC); | 140 | mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); |
| 143 | 141 | ||
| 144 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); | 142 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); |
| 145 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); | 143 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c new file mode 100644 index 000000000000..c740ccd1cc67 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "msm_drv.h" | ||
| 20 | #include "mdp4_kms.h" | ||
| 21 | |||
| 22 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
| 23 | { | ||
| 24 | mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); | ||
| 25 | } | ||
| 26 | |||
| 27 | static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | ||
| 28 | { | ||
| 29 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
| 30 | } | ||
| 31 | |||
| 32 | void mdp4_irq_preinstall(struct msm_kms *kms) | ||
| 33 | { | ||
| 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | ||
| 35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | ||
| 36 | } | ||
| 37 | |||
| 38 | int mdp4_irq_postinstall(struct msm_kms *kms) | ||
| 39 | { | ||
| 40 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
| 41 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); | ||
| 42 | struct mdp_irq *error_handler = &mdp4_kms->error_handler; | ||
| 43 | |||
| 44 | error_handler->irq = mdp4_irq_error_handler; | ||
| 45 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | | ||
| 46 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; | ||
| 47 | |||
| 48 | mdp_irq_register(mdp_kms, error_handler); | ||
| 49 | |||
| 50 | return 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | void mdp4_irq_uninstall(struct msm_kms *kms) | ||
| 54 | { | ||
| 55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | ||
| 56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
| 57 | } | ||
| 58 | |||
| 59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | ||
| 60 | { | ||
| 61 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
| 62 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); | ||
| 63 | struct drm_device *dev = mdp4_kms->dev; | ||
| 64 | struct msm_drm_private *priv = dev->dev_private; | ||
| 65 | unsigned int id; | ||
| 66 | uint32_t status; | ||
| 67 | |||
| 68 | status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); | ||
| 69 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); | ||
| 70 | |||
| 71 | VERB("status=%08x", status); | ||
| 72 | |||
| 73 | for (id = 0; id < priv->num_crtcs; id++) | ||
| 74 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | ||
| 75 | drm_handle_vblank(dev, id); | ||
| 76 | |||
| 77 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 78 | |||
| 79 | return IRQ_HANDLED; | ||
| 80 | } | ||
| 81 | |||
| 82 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 83 | { | ||
| 84 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
| 85 | mdp4_crtc_vblank(crtc), true); | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 90 | { | ||
| 91 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
| 92 | mdp4_crtc_vblank(crtc), false); | ||
| 93 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 8972ac35a43d..272e707c9487 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
| @@ -17,13 +17,14 @@ | |||
| 17 | 17 | ||
| 18 | 18 | ||
| 19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
| 20 | #include "msm_mmu.h" | ||
| 20 | #include "mdp4_kms.h" | 21 | #include "mdp4_kms.h" |
| 21 | 22 | ||
| 22 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); | 23 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); |
| 23 | 24 | ||
| 24 | static int mdp4_hw_init(struct msm_kms *kms) | 25 | static int mdp4_hw_init(struct msm_kms *kms) |
| 25 | { | 26 | { |
| 26 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 27 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 27 | struct drm_device *dev = mdp4_kms->dev; | 28 | struct drm_device *dev = mdp4_kms->dev; |
| 28 | uint32_t version, major, minor, dmap_cfg, vg_cfg; | 29 | uint32_t version, major, minor, dmap_cfg, vg_cfg; |
| 29 | unsigned long clk; | 30 | unsigned long clk; |
| @@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms) | |||
| 31 | 32 | ||
| 32 | pm_runtime_get_sync(dev->dev); | 33 | pm_runtime_get_sync(dev->dev); |
| 33 | 34 | ||
| 35 | mdp4_enable(mdp4_kms); | ||
| 34 | version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); | 36 | version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); |
| 37 | mdp4_disable(mdp4_kms); | ||
| 35 | 38 | ||
| 36 | major = FIELD(version, MDP4_VERSION_MAJOR); | 39 | major = FIELD(version, MDP4_VERSION_MAJOR); |
| 37 | minor = FIELD(version, MDP4_VERSION_MINOR); | 40 | minor = FIELD(version, MDP4_VERSION_MINOR); |
| 38 | 41 | ||
| 39 | DBG("found MDP version v%d.%d", major, minor); | 42 | DBG("found MDP4 version v%d.%d", major, minor); |
| 40 | 43 | ||
| 41 | if (major != 4) { | 44 | if (major != 4) { |
| 42 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", | 45 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", |
| @@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, | |||
| 130 | 133 | ||
| 131 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | 134 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) |
| 132 | { | 135 | { |
| 133 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 134 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; | 137 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; |
| 135 | unsigned i; | 138 | unsigned i; |
| 136 | 139 | ||
| @@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | |||
| 140 | 143 | ||
| 141 | static void mdp4_destroy(struct msm_kms *kms) | 144 | static void mdp4_destroy(struct msm_kms *kms) |
| 142 | { | 145 | { |
| 143 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 144 | kfree(mdp4_kms); | 147 | kfree(mdp4_kms); |
| 145 | } | 148 | } |
| 146 | 149 | ||
| 147 | static const struct msm_kms_funcs kms_funcs = { | 150 | static const struct mdp_kms_funcs kms_funcs = { |
| 151 | .base = { | ||
| 148 | .hw_init = mdp4_hw_init, | 152 | .hw_init = mdp4_hw_init, |
| 149 | .irq_preinstall = mdp4_irq_preinstall, | 153 | .irq_preinstall = mdp4_irq_preinstall, |
| 150 | .irq_postinstall = mdp4_irq_postinstall, | 154 | .irq_postinstall = mdp4_irq_postinstall, |
| @@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = { | |||
| 152 | .irq = mdp4_irq, | 156 | .irq = mdp4_irq, |
| 153 | .enable_vblank = mdp4_enable_vblank, | 157 | .enable_vblank = mdp4_enable_vblank, |
| 154 | .disable_vblank = mdp4_disable_vblank, | 158 | .disable_vblank = mdp4_disable_vblank, |
| 155 | .get_format = mdp4_get_format, | 159 | .get_format = mdp_get_format, |
| 156 | .round_pixclk = mdp4_round_pixclk, | 160 | .round_pixclk = mdp4_round_pixclk, |
| 157 | .preclose = mdp4_preclose, | 161 | .preclose = mdp4_preclose, |
| 158 | .destroy = mdp4_destroy, | 162 | .destroy = mdp4_destroy, |
| 163 | }, | ||
| 164 | .set_irqmask = mdp4_set_irqmask, | ||
| 159 | }; | 165 | }; |
| 160 | 166 | ||
| 161 | int mdp4_disable(struct mdp4_kms *mdp4_kms) | 167 | int mdp4_disable(struct mdp4_kms *mdp4_kms) |
| @@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) | |||
| 189 | struct drm_plane *plane; | 195 | struct drm_plane *plane; |
| 190 | struct drm_crtc *crtc; | 196 | struct drm_crtc *crtc; |
| 191 | struct drm_encoder *encoder; | 197 | struct drm_encoder *encoder; |
| 198 | struct hdmi *hdmi; | ||
| 192 | int ret; | 199 | int ret; |
| 193 | 200 | ||
| 194 | /* | 201 | /* |
| @@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) | |||
| 238 | encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ | 245 | encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ |
| 239 | priv->encoders[priv->num_encoders++] = encoder; | 246 | priv->encoders[priv->num_encoders++] = encoder; |
| 240 | 247 | ||
| 241 | ret = hdmi_init(dev, encoder); | 248 | hdmi = hdmi_init(dev, encoder); |
| 242 | if (ret) { | 249 | if (IS_ERR(hdmi)) { |
| 243 | dev_err(dev->dev, "failed to initialize HDMI\n"); | 250 | ret = PTR_ERR(hdmi); |
| 251 | dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); | ||
| 244 | goto fail; | 252 | goto fail; |
| 245 | } | 253 | } |
| 246 | 254 | ||
| @@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
| 260 | struct mdp4_platform_config *config = mdp4_get_config(pdev); | 268 | struct mdp4_platform_config *config = mdp4_get_config(pdev); |
| 261 | struct mdp4_kms *mdp4_kms; | 269 | struct mdp4_kms *mdp4_kms; |
| 262 | struct msm_kms *kms = NULL; | 270 | struct msm_kms *kms = NULL; |
| 271 | struct msm_mmu *mmu; | ||
| 263 | int ret; | 272 | int ret; |
| 264 | 273 | ||
| 265 | mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); | 274 | mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); |
| @@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
| 269 | goto fail; | 278 | goto fail; |
| 270 | } | 279 | } |
| 271 | 280 | ||
| 272 | kms = &mdp4_kms->base; | 281 | mdp_kms_init(&mdp4_kms->base, &kms_funcs); |
| 273 | kms->funcs = &kms_funcs; | 282 | |
| 283 | kms = &mdp4_kms->base.base; | ||
| 274 | 284 | ||
| 275 | mdp4_kms->dev = dev; | 285 | mdp4_kms->dev = dev; |
| 276 | 286 | ||
| @@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
| 322 | clk_set_rate(mdp4_kms->clk, config->max_clk); | 332 | clk_set_rate(mdp4_kms->clk, config->max_clk); |
| 323 | clk_set_rate(mdp4_kms->lut_clk, config->max_clk); | 333 | clk_set_rate(mdp4_kms->lut_clk, config->max_clk); |
| 324 | 334 | ||
| 325 | if (!config->iommu) { | ||
| 326 | dev_err(dev->dev, "no iommu\n"); | ||
| 327 | ret = -ENXIO; | ||
| 328 | goto fail; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* make sure things are off before attaching iommu (bootloader could | 335 | /* make sure things are off before attaching iommu (bootloader could |
| 332 | * have left things on, in which case we'll start getting faults if | 336 | * have left things on, in which case we'll start getting faults if |
| 333 | * we don't disable): | 337 | * we don't disable): |
| 334 | */ | 338 | */ |
| 339 | mdp4_enable(mdp4_kms); | ||
| 335 | mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); | 340 | mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); |
| 336 | mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); | 341 | mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); |
| 337 | mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); | 342 | mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); |
| 343 | mdp4_disable(mdp4_kms); | ||
| 338 | mdelay(16); | 344 | mdelay(16); |
| 339 | 345 | ||
| 340 | ret = msm_iommu_attach(dev, config->iommu, | 346 | if (config->iommu) { |
| 341 | iommu_ports, ARRAY_SIZE(iommu_ports)); | 347 | mmu = msm_iommu_new(dev, config->iommu); |
| 342 | if (ret) | 348 | if (IS_ERR(mmu)) { |
| 343 | goto fail; | 349 | ret = PTR_ERR(mmu); |
| 350 | goto fail; | ||
| 351 | } | ||
| 352 | ret = mmu->funcs->attach(mmu, iommu_ports, | ||
| 353 | ARRAY_SIZE(iommu_ports)); | ||
| 354 | if (ret) | ||
| 355 | goto fail; | ||
| 356 | } else { | ||
| 357 | dev_info(dev->dev, "no iommu, fallback to phys " | ||
| 358 | "contig buffers for scanout\n"); | ||
| 359 | mmu = NULL; | ||
| 360 | } | ||
| 344 | 361 | ||
| 345 | mdp4_kms->id = msm_register_iommu(dev, config->iommu); | 362 | mdp4_kms->id = msm_register_mmu(dev, mmu); |
| 346 | if (mdp4_kms->id < 0) { | 363 | if (mdp4_kms->id < 0) { |
| 347 | ret = mdp4_kms->id; | 364 | ret = mdp4_kms->id; |
| 348 | dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); | 365 | dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index eb015c834087..66a4d31aec80 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | |||
| @@ -18,29 +18,13 @@ | |||
| 18 | #ifndef __MDP4_KMS_H__ | 18 | #ifndef __MDP4_KMS_H__ |
| 19 | #define __MDP4_KMS_H__ | 19 | #define __MDP4_KMS_H__ |
| 20 | 20 | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/regulator/consumer.h> | ||
| 24 | |||
| 25 | #include "msm_drv.h" | 21 | #include "msm_drv.h" |
| 22 | #include "msm_kms.h" | ||
| 23 | #include "mdp/mdp_kms.h" | ||
| 26 | #include "mdp4.xml.h" | 24 | #include "mdp4.xml.h" |
| 27 | 25 | ||
| 28 | |||
| 29 | /* For transiently registering for different MDP4 irqs that various parts | ||
| 30 | * of the KMS code need during setup/configuration. We these are not | ||
| 31 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
| 32 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
| 33 | * internal housekeeping related irq usage. | ||
| 34 | */ | ||
| 35 | struct mdp4_irq { | ||
| 36 | struct list_head node; | ||
| 37 | uint32_t irqmask; | ||
| 38 | bool registered; | ||
| 39 | void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus); | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct mdp4_kms { | 26 | struct mdp4_kms { |
| 43 | struct msm_kms base; | 27 | struct mdp_kms base; |
| 44 | 28 | ||
| 45 | struct drm_device *dev; | 29 | struct drm_device *dev; |
| 46 | 30 | ||
| @@ -59,11 +43,7 @@ struct mdp4_kms { | |||
| 59 | struct clk *pclk; | 43 | struct clk *pclk; |
| 60 | struct clk *lut_clk; | 44 | struct clk *lut_clk; |
| 61 | 45 | ||
| 62 | /* irq handling: */ | 46 | struct mdp_irq error_handler; |
| 63 | bool in_irq; | ||
| 64 | struct list_head irq_list; /* list of mdp4_irq */ | ||
| 65 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
| 66 | struct mdp4_irq error_handler; | ||
| 67 | }; | 47 | }; |
| 68 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) | 48 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) |
| 69 | 49 | ||
| @@ -73,16 +53,6 @@ struct mdp4_platform_config { | |||
| 73 | uint32_t max_clk; | 53 | uint32_t max_clk; |
| 74 | }; | 54 | }; |
| 75 | 55 | ||
| 76 | struct mdp4_format { | ||
| 77 | struct msm_format base; | ||
| 78 | enum mdp4_bpc bpc_r, bpc_g, bpc_b; | ||
| 79 | enum mdp4_bpc_alpha bpc_a; | ||
| 80 | uint8_t unpack[4]; | ||
| 81 | bool alpha_enable, unpack_tight; | ||
| 82 | uint8_t cpp, unpack_count; | ||
| 83 | }; | ||
| 84 | #define to_mdp4_format(x) container_of(x, struct mdp4_format, base) | ||
| 85 | |||
| 86 | static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) | 56 | static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) |
| 87 | { | 57 | { |
| 88 | msm_writel(data, mdp4_kms->mmio + reg); | 58 | msm_writel(data, mdp4_kms->mmio + reg); |
| @@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma) | |||
| 134 | } | 104 | } |
| 135 | 105 | ||
| 136 | static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, | 106 | static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, |
| 137 | enum mdp4_mixer_stage_id stage) | 107 | enum mdp_mixer_stage_id stage) |
| 138 | { | 108 | { |
| 139 | uint32_t mixer_cfg = 0; | 109 | uint32_t mixer_cfg = 0; |
| 140 | 110 | ||
| @@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, | |||
| 178 | int mdp4_disable(struct mdp4_kms *mdp4_kms); | 148 | int mdp4_disable(struct mdp4_kms *mdp4_kms); |
| 179 | int mdp4_enable(struct mdp4_kms *mdp4_kms); | 149 | int mdp4_enable(struct mdp4_kms *mdp4_kms); |
| 180 | 150 | ||
| 151 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
| 181 | void mdp4_irq_preinstall(struct msm_kms *kms); | 152 | void mdp4_irq_preinstall(struct msm_kms *kms); |
| 182 | int mdp4_irq_postinstall(struct msm_kms *kms); | 153 | int mdp4_irq_postinstall(struct msm_kms *kms); |
| 183 | void mdp4_irq_uninstall(struct msm_kms *kms); | 154 | void mdp4_irq_uninstall(struct msm_kms *kms); |
| 184 | irqreturn_t mdp4_irq(struct msm_kms *kms); | 155 | irqreturn_t mdp4_irq(struct msm_kms *kms); |
| 185 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask); | ||
| 186 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
| 187 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
| 188 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 156 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
| 189 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 157 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
| 190 | 158 | ||
| 191 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats, | 159 | static inline |
| 192 | uint32_t max_formats); | 160 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, |
| 193 | const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); | 161 | uint32_t max_formats) |
| 162 | { | ||
| 163 | /* TODO when we have YUV, we need to filter supported formats | ||
| 164 | * based on pipe_id.. | ||
| 165 | */ | ||
| 166 | return mdp_get_formats(pixel_formats, max_formats); | ||
| 167 | } | ||
| 194 | 168 | ||
| 195 | void mdp4_plane_install_properties(struct drm_plane *plane, | 169 | void mdp4_plane_install_properties(struct drm_plane *plane, |
| 196 | struct drm_mode_object *obj); | 170 | struct drm_mode_object *obj); |
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 0f0af243f6fc..2406027200ec 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
| @@ -34,7 +34,7 @@ struct mdp4_plane { | |||
| 34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) | 34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) |
| 35 | { | 35 | { |
| 36 | struct msm_drm_private *priv = plane->dev->dev_private; | 36 | struct msm_drm_private *priv = plane->dev->dev_private; |
| 37 | return to_mdp4_kms(priv->kms); | 37 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | static int mdp4_plane_update(struct drm_plane *plane, | 40 | static int mdp4_plane_update(struct drm_plane *plane, |
| @@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
| 132 | struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); | 132 | struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); |
| 133 | struct mdp4_kms *mdp4_kms = get_kms(plane); | 133 | struct mdp4_kms *mdp4_kms = get_kms(plane); |
| 134 | enum mdp4_pipe pipe = mdp4_plane->pipe; | 134 | enum mdp4_pipe pipe = mdp4_plane->pipe; |
| 135 | const struct mdp4_format *format; | 135 | const struct mdp_format *format; |
| 136 | uint32_t op_mode = 0; | 136 | uint32_t op_mode = 0; |
| 137 | uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; | 137 | uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; |
| 138 | uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; | 138 | uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; |
| @@ -175,7 +175,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
| 175 | 175 | ||
| 176 | mdp4_plane_set_scanout(plane, fb); | 176 | mdp4_plane_set_scanout(plane, fb); |
| 177 | 177 | ||
| 178 | format = to_mdp4_format(msm_framebuffer_format(fb)); | 178 | format = to_mdp_format(msm_framebuffer_format(fb)); |
| 179 | 179 | ||
| 180 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), | 180 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), |
| 181 | MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | | 181 | MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h new file mode 100644 index 000000000000..0aa51517f826 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
| @@ -0,0 +1,1036 @@ | |||
| 1 | #ifndef MDP5_XML | ||
| 2 | #define MDP5_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://github.com/freedreno/envytools/ | ||
| 8 | git clone https://github.com/freedreno/envytools.git | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) | ||
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | ||
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | ||
| 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
| 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) | ||
| 21 | |||
| 22 | Copyright (C) 2013 by the following authors: | ||
| 23 | - Rob Clark <robdclark@gmail.com> (robclark) | ||
| 24 | |||
| 25 | Permission is hereby granted, free of charge, to any person obtaining | ||
| 26 | a copy of this software and associated documentation files (the | ||
| 27 | "Software"), to deal in the Software without restriction, including | ||
| 28 | without limitation the rights to use, copy, modify, merge, publish, | ||
| 29 | distribute, sublicense, and/or sell copies of the Software, and to | ||
| 30 | permit persons to whom the Software is furnished to do so, subject to | ||
| 31 | the following conditions: | ||
| 32 | |||
| 33 | The above copyright notice and this permission notice (including the | ||
| 34 | next paragraph) shall be included in all copies or substantial | ||
| 35 | portions of the Software. | ||
| 36 | |||
| 37 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 38 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 39 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
| 40 | IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
| 41 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
| 42 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
| 43 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 44 | */ | ||
| 45 | |||
| 46 | |||
| 47 | enum mdp5_intf { | ||
| 48 | INTF_DSI = 1, | ||
| 49 | INTF_HDMI = 3, | ||
| 50 | INTF_LCDC = 5, | ||
| 51 | INTF_eDP = 9, | ||
| 52 | }; | ||
| 53 | |||
| 54 | enum mdp5_intfnum { | ||
| 55 | NO_INTF = 0, | ||
| 56 | INTF0 = 1, | ||
| 57 | INTF1 = 2, | ||
| 58 | INTF2 = 3, | ||
| 59 | INTF3 = 4, | ||
| 60 | }; | ||
| 61 | |||
| 62 | enum mdp5_pipe { | ||
| 63 | SSPP_VIG0 = 0, | ||
| 64 | SSPP_VIG1 = 1, | ||
| 65 | SSPP_VIG2 = 2, | ||
| 66 | SSPP_RGB0 = 3, | ||
| 67 | SSPP_RGB1 = 4, | ||
| 68 | SSPP_RGB2 = 5, | ||
| 69 | SSPP_DMA0 = 6, | ||
| 70 | SSPP_DMA1 = 7, | ||
| 71 | }; | ||
| 72 | |||
| 73 | enum mdp5_ctl_mode { | ||
| 74 | MODE_NONE = 0, | ||
| 75 | MODE_ROT0 = 1, | ||
| 76 | MODE_ROT1 = 2, | ||
| 77 | MODE_WB0 = 3, | ||
| 78 | MODE_WB1 = 4, | ||
| 79 | MODE_WFD = 5, | ||
| 80 | }; | ||
| 81 | |||
| 82 | enum mdp5_pack_3d { | ||
| 83 | PACK_3D_FRAME_INT = 0, | ||
| 84 | PACK_3D_H_ROW_INT = 1, | ||
| 85 | PACK_3D_V_ROW_INT = 2, | ||
| 86 | PACK_3D_COL_INT = 3, | ||
| 87 | }; | ||
| 88 | |||
| 89 | enum mdp5_chroma_samp_type { | ||
| 90 | CHROMA_RGB = 0, | ||
| 91 | CHROMA_H2V1 = 1, | ||
| 92 | CHROMA_H1V2 = 2, | ||
| 93 | CHROMA_420 = 3, | ||
| 94 | }; | ||
| 95 | |||
| 96 | enum mdp5_scale_filter { | ||
| 97 | SCALE_FILTER_NEAREST = 0, | ||
| 98 | SCALE_FILTER_BIL = 1, | ||
| 99 | SCALE_FILTER_PCMN = 2, | ||
| 100 | SCALE_FILTER_CA = 3, | ||
| 101 | }; | ||
| 102 | |||
| 103 | enum mdp5_pipe_bwc { | ||
| 104 | BWC_LOSSLESS = 0, | ||
| 105 | BWC_Q_HIGH = 1, | ||
| 106 | BWC_Q_MED = 2, | ||
| 107 | }; | ||
| 108 | |||
| 109 | enum mdp5_client_id { | ||
| 110 | CID_UNUSED = 0, | ||
| 111 | CID_VIG0_Y = 1, | ||
| 112 | CID_VIG0_CR = 2, | ||
| 113 | CID_VIG0_CB = 3, | ||
| 114 | CID_VIG1_Y = 4, | ||
| 115 | CID_VIG1_CR = 5, | ||
| 116 | CID_VIG1_CB = 6, | ||
| 117 | CID_VIG2_Y = 7, | ||
| 118 | CID_VIG2_CR = 8, | ||
| 119 | CID_VIG2_CB = 9, | ||
| 120 | CID_DMA0_Y = 10, | ||
| 121 | CID_DMA0_CR = 11, | ||
| 122 | CID_DMA0_CB = 12, | ||
| 123 | CID_DMA1_Y = 13, | ||
| 124 | CID_DMA1_CR = 14, | ||
| 125 | CID_DMA1_CB = 15, | ||
| 126 | CID_RGB0 = 16, | ||
| 127 | CID_RGB1 = 17, | ||
| 128 | CID_RGB2 = 18, | ||
| 129 | CID_MAX = 19, | ||
| 130 | }; | ||
| 131 | |||
| 132 | enum mdp5_igc_type { | ||
| 133 | IGC_VIG = 0, | ||
| 134 | IGC_RGB = 1, | ||
| 135 | IGC_DMA = 2, | ||
| 136 | IGC_DSPP = 3, | ||
| 137 | }; | ||
| 138 | |||
| 139 | #define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 | ||
| 140 | #define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 | ||
| 141 | #define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 | ||
| 142 | #define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008 | ||
| 143 | #define MDP5_IRQ_INTF0_WB_WFD 0x00000010 | ||
| 144 | #define MDP5_IRQ_INTF1_WB_WFD 0x00000020 | ||
| 145 | #define MDP5_IRQ_INTF2_WB_WFD 0x00000040 | ||
| 146 | #define MDP5_IRQ_INTF3_WB_WFD 0x00000080 | ||
| 147 | #define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100 | ||
| 148 | #define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200 | ||
| 149 | #define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400 | ||
| 150 | #define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800 | ||
| 151 | #define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000 | ||
| 152 | #define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000 | ||
| 153 | #define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000 | ||
| 154 | #define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000 | ||
| 155 | #define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000 | ||
| 156 | #define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000 | ||
| 157 | #define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000 | ||
| 158 | #define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000 | ||
| 159 | #define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000 | ||
| 160 | #define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000 | ||
| 161 | #define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000 | ||
| 162 | #define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000 | ||
| 163 | #define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 | ||
| 164 | #define MDP5_IRQ_INTF0_VSYNC 0x02000000 | ||
| 165 | #define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 | ||
| 166 | #define MDP5_IRQ_INTF1_VSYNC 0x08000000 | ||
| 167 | #define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 | ||
| 168 | #define MDP5_IRQ_INTF2_VSYNC 0x20000000 | ||
| 169 | #define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 | ||
| 170 | #define MDP5_IRQ_INTF3_VSYNC 0x80000000 | ||
| 171 | #define REG_MDP5_HW_VERSION 0x00000000 | ||
| 172 | |||
| 173 | #define REG_MDP5_HW_INTR_STATUS 0x00000010 | ||
| 174 | #define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001 | ||
| 175 | #define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010 | ||
| 176 | #define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020 | ||
| 177 | #define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100 | ||
| 178 | #define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000 | ||
| 179 | |||
| 180 | #define REG_MDP5_MDP_VERSION 0x00000100 | ||
| 181 | #define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000 | ||
| 182 | #define MDP5_MDP_VERSION_MINOR__SHIFT 16 | ||
| 183 | static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val) | ||
| 184 | { | ||
| 185 | return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK; | ||
| 186 | } | ||
| 187 | #define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000 | ||
| 188 | #define MDP5_MDP_VERSION_MAJOR__SHIFT 28 | ||
| 189 | static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val) | ||
| 190 | { | ||
| 191 | return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK; | ||
| 192 | } | ||
| 193 | |||
| 194 | #define REG_MDP5_DISP_INTF_SEL 0x00000104 | ||
| 195 | #define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff | ||
| 196 | #define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 | ||
| 197 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val) | ||
| 198 | { | ||
| 199 | return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; | ||
| 200 | } | ||
| 201 | #define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 | ||
| 202 | #define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 | ||
| 203 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val) | ||
| 204 | { | ||
| 205 | return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; | ||
| 206 | } | ||
| 207 | #define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 | ||
| 208 | #define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 | ||
| 209 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val) | ||
| 210 | { | ||
| 211 | return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; | ||
| 212 | } | ||
| 213 | #define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 | ||
| 214 | #define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 | ||
| 215 | static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val) | ||
| 216 | { | ||
| 217 | return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; | ||
| 218 | } | ||
| 219 | |||
| 220 | #define REG_MDP5_INTR_EN 0x00000110 | ||
| 221 | |||
| 222 | #define REG_MDP5_INTR_STATUS 0x00000114 | ||
| 223 | |||
| 224 | #define REG_MDP5_INTR_CLEAR 0x00000118 | ||
| 225 | |||
| 226 | #define REG_MDP5_HIST_INTR_EN 0x0000011c | ||
| 227 | |||
| 228 | #define REG_MDP5_HIST_INTR_STATUS 0x00000120 | ||
| 229 | |||
| 230 | #define REG_MDP5_HIST_INTR_CLEAR 0x00000124 | ||
| 231 | |||
| 232 | static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; } | ||
| 233 | |||
| 234 | static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; } | ||
| 235 | #define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff | ||
| 236 | #define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 | ||
| 237 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val) | ||
| 238 | { | ||
| 239 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; | ||
| 240 | } | ||
| 241 | #define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 | ||
| 242 | #define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 | ||
| 243 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val) | ||
| 244 | { | ||
| 245 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; | ||
| 246 | } | ||
| 247 | #define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 | ||
| 248 | #define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 | ||
| 249 | static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val) | ||
| 250 | { | ||
| 251 | return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; } | ||
| 255 | |||
| 256 | static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; } | ||
| 257 | #define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff | ||
| 258 | #define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 | ||
| 259 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val) | ||
| 260 | { | ||
| 261 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; | ||
| 262 | } | ||
| 263 | #define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 | ||
| 264 | #define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 | ||
| 265 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val) | ||
| 266 | { | ||
| 267 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; | ||
| 268 | } | ||
| 269 | #define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 | ||
| 270 | #define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 | ||
| 271 | static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val) | ||
| 272 | { | ||
| 273 | return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) | ||
| 277 | { | ||
| 278 | switch (idx) { | ||
| 279 | case IGC_VIG: return 0x00000300; | ||
| 280 | case IGC_RGB: return 0x00000310; | ||
| 281 | case IGC_DMA: return 0x00000320; | ||
| 282 | case IGC_DSPP: return 0x00000400; | ||
| 283 | default: return INVALID_IDX(idx); | ||
| 284 | } | ||
| 285 | } | ||
| 286 | static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } | ||
| 287 | |||
| 288 | static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } | ||
| 289 | |||
| 290 | static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } | ||
| 291 | #define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff | ||
| 292 | #define MDP5_IGC_LUT_REG_VAL__SHIFT 0 | ||
| 293 | static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) | ||
| 294 | { | ||
| 295 | return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; | ||
| 296 | } | ||
| 297 | #define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 | ||
| 298 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 | ||
| 299 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 | ||
| 300 | #define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 | ||
| 301 | |||
| 302 | static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; } | ||
| 303 | |||
| 304 | static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } | ||
| 305 | |||
| 306 | static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } | ||
| 307 | #define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 | ||
| 308 | #define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 | ||
| 309 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) | ||
| 310 | { | ||
| 311 | return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; | ||
| 312 | } | ||
| 313 | #define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 | ||
| 314 | #define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 | ||
| 315 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val) | ||
| 316 | { | ||
| 317 | return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; | ||
| 318 | } | ||
| 319 | #define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 | ||
| 320 | #define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 | ||
| 321 | static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val) | ||
| 322 | { | ||
| 323 | return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; | ||
| 324 | } | ||
| 325 | #define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 | ||
| 326 | #define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 | ||
| 327 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val) | ||
| 328 | { | ||
| 329 | return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; | ||
| 330 | } | ||
| 331 | #define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 | ||
| 332 | #define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 | ||
| 333 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val) | ||
| 334 | { | ||
| 335 | return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; | ||
| 336 | } | ||
| 337 | #define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 | ||
| 338 | #define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 | ||
| 339 | static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val) | ||
| 340 | { | ||
| 341 | return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; | ||
| 342 | } | ||
| 343 | #define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 | ||
| 344 | #define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 | ||
| 345 | static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val) | ||
| 346 | { | ||
| 347 | return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; | ||
| 348 | } | ||
| 349 | #define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 | ||
| 350 | #define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 | ||
| 351 | static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) | ||
| 352 | { | ||
| 353 | return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; | ||
| 354 | } | ||
| 355 | #define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 | ||
| 356 | #define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 | ||
| 357 | |||
| 358 | static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; } | ||
| 359 | #define MDP5_CTL_OP_MODE__MASK 0x0000000f | ||
| 360 | #define MDP5_CTL_OP_MODE__SHIFT 0 | ||
| 361 | static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) | ||
| 362 | { | ||
| 363 | return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; | ||
| 364 | } | ||
| 365 | #define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 | ||
| 366 | #define MDP5_CTL_OP_INTF_NUM__SHIFT 4 | ||
| 367 | static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) | ||
| 368 | { | ||
| 369 | return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; | ||
| 370 | } | ||
| 371 | #define MDP5_CTL_OP_CMD_MODE 0x00020000 | ||
| 372 | #define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 | ||
| 373 | #define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 | ||
| 374 | #define MDP5_CTL_OP_PACK_3D__SHIFT 20 | ||
| 375 | static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) | ||
| 376 | { | ||
| 377 | return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; | ||
| 378 | } | ||
| 379 | |||
| 380 | static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; } | ||
| 381 | #define MDP5_CTL_FLUSH_VIG0 0x00000001 | ||
| 382 | #define MDP5_CTL_FLUSH_VIG1 0x00000002 | ||
| 383 | #define MDP5_CTL_FLUSH_VIG2 0x00000004 | ||
| 384 | #define MDP5_CTL_FLUSH_RGB0 0x00000008 | ||
| 385 | #define MDP5_CTL_FLUSH_RGB1 0x00000010 | ||
| 386 | #define MDP5_CTL_FLUSH_RGB2 0x00000020 | ||
| 387 | #define MDP5_CTL_FLUSH_LM0 0x00000040 | ||
| 388 | #define MDP5_CTL_FLUSH_LM1 0x00000080 | ||
| 389 | #define MDP5_CTL_FLUSH_LM2 0x00000100 | ||
| 390 | #define MDP5_CTL_FLUSH_DMA0 0x00000800 | ||
| 391 | #define MDP5_CTL_FLUSH_DMA1 0x00001000 | ||
| 392 | #define MDP5_CTL_FLUSH_DSPP0 0x00002000 | ||
| 393 | #define MDP5_CTL_FLUSH_DSPP1 0x00004000 | ||
| 394 | #define MDP5_CTL_FLUSH_DSPP2 0x00008000 | ||
| 395 | #define MDP5_CTL_FLUSH_CTL 0x00020000 | ||
| 396 | |||
| 397 | static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; } | ||
| 398 | |||
| 399 | static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; } | ||
| 400 | |||
| 401 | static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } | ||
| 402 | |||
| 403 | static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; } | ||
| 404 | |||
| 405 | static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; } | ||
| 406 | |||
| 407 | static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; } | ||
| 408 | |||
| 409 | static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } | ||
| 410 | #define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 | ||
| 411 | #define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 | ||
| 412 | static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) | ||
| 413 | { | ||
| 414 | return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; | ||
| 415 | } | ||
| 416 | #define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff | ||
| 417 | #define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 | ||
| 418 | static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) | ||
| 419 | { | ||
| 420 | return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; | ||
| 421 | } | ||
| 422 | |||
| 423 | static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; } | ||
| 424 | #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 | ||
| 425 | #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 | ||
| 426 | static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) | ||
| 427 | { | ||
| 428 | return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; | ||
| 429 | } | ||
| 430 | #define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff | ||
| 431 | #define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 | ||
| 432 | static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) | ||
| 433 | { | ||
| 434 | return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; | ||
| 435 | } | ||
| 436 | |||
| 437 | static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; } | ||
| 438 | #define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 | ||
| 439 | #define MDP5_PIPE_SRC_XY_Y__SHIFT 16 | ||
| 440 | static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) | ||
| 441 | { | ||
| 442 | return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; | ||
| 443 | } | ||
| 444 | #define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff | ||
| 445 | #define MDP5_PIPE_SRC_XY_X__SHIFT 0 | ||
| 446 | static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) | ||
| 447 | { | ||
| 448 | return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; | ||
| 449 | } | ||
| 450 | |||
| 451 | static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; } | ||
| 452 | #define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 | ||
| 453 | #define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 | ||
| 454 | static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) | ||
| 455 | { | ||
| 456 | return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; | ||
| 457 | } | ||
| 458 | #define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff | ||
| 459 | #define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 | ||
| 460 | static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) | ||
| 461 | { | ||
| 462 | return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; | ||
| 463 | } | ||
| 464 | |||
| 465 | static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; } | ||
| 466 | #define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 | ||
| 467 | #define MDP5_PIPE_OUT_XY_Y__SHIFT 16 | ||
| 468 | static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) | ||
| 469 | { | ||
| 470 | return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; | ||
| 471 | } | ||
| 472 | #define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff | ||
| 473 | #define MDP5_PIPE_OUT_XY_X__SHIFT 0 | ||
| 474 | static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) | ||
| 475 | { | ||
| 476 | return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; | ||
| 477 | } | ||
| 478 | |||
| 479 | static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; } | ||
| 480 | |||
| 481 | static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; } | ||
| 482 | |||
| 483 | static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; } | ||
| 484 | |||
| 485 | static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; } | ||
| 486 | |||
| 487 | static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; } | ||
| 488 | #define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff | ||
| 489 | #define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 | ||
| 490 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) | ||
| 491 | { | ||
| 492 | return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; | ||
| 493 | } | ||
| 494 | #define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 | ||
| 495 | #define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 | ||
| 496 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) | ||
| 497 | { | ||
| 498 | return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; | ||
| 499 | } | ||
| 500 | |||
| 501 | static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; } | ||
| 502 | #define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff | ||
| 503 | #define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 | ||
| 504 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) | ||
| 505 | { | ||
| 506 | return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; | ||
| 507 | } | ||
| 508 | #define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 | ||
| 509 | #define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 | ||
| 510 | static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) | ||
| 511 | { | ||
| 512 | return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; | ||
| 513 | } | ||
| 514 | |||
| 515 | static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; } | ||
| 516 | |||
| 517 | static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; } | ||
| 518 | #define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 | ||
| 519 | #define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 | ||
| 520 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) | ||
| 521 | { | ||
| 522 | return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; | ||
| 523 | } | ||
| 524 | #define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c | ||
| 525 | #define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 | ||
| 526 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) | ||
| 527 | { | ||
| 528 | return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; | ||
| 529 | } | ||
| 530 | #define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 | ||
| 531 | #define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 | ||
| 532 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) | ||
| 533 | { | ||
| 534 | return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; | ||
| 535 | } | ||
| 536 | #define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 | ||
| 537 | #define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 | ||
| 538 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) | ||
| 539 | { | ||
| 540 | return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; | ||
| 541 | } | ||
| 542 | #define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 | ||
| 543 | #define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 | ||
| 544 | #define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 | ||
| 545 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) | ||
| 546 | { | ||
| 547 | return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; | ||
| 548 | } | ||
| 549 | #define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 | ||
| 550 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 | ||
| 551 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 | ||
| 552 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) | ||
| 553 | { | ||
| 554 | return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; | ||
| 555 | } | ||
| 556 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 | ||
| 557 | #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 | ||
| 558 | #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000 | ||
| 559 | #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 | ||
| 560 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val) | ||
| 561 | { | ||
| 562 | return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; | ||
| 563 | } | ||
| 564 | #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 | ||
| 565 | #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 | ||
| 566 | static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val) | ||
| 567 | { | ||
| 568 | return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; | ||
| 569 | } | ||
| 570 | |||
| 571 | static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; } | ||
| 572 | #define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff | ||
| 573 | #define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 | ||
| 574 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) | ||
| 575 | { | ||
| 576 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; | ||
| 577 | } | ||
| 578 | #define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 | ||
| 579 | #define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 | ||
| 580 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) | ||
| 581 | { | ||
| 582 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; | ||
| 583 | } | ||
| 584 | #define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 | ||
| 585 | #define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 | ||
| 586 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) | ||
| 587 | { | ||
| 588 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; | ||
| 589 | } | ||
| 590 | #define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 | ||
| 591 | #define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 | ||
| 592 | static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) | ||
| 593 | { | ||
| 594 | return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; | ||
| 595 | } | ||
| 596 | |||
| 597 | static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; } | ||
| 598 | #define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 | ||
| 599 | #define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 | ||
| 600 | #define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 | ||
| 601 | static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) | ||
| 602 | { | ||
| 603 | return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; | ||
| 604 | } | ||
| 605 | #define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 | ||
| 606 | #define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 | ||
| 607 | #define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 | ||
| 608 | #define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 | ||
| 609 | #define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 | ||
| 610 | #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 | ||
| 611 | #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 | ||
| 612 | |||
| 613 | static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; } | ||
| 614 | |||
| 615 | static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; } | ||
| 616 | |||
| 617 | static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; } | ||
| 618 | |||
| 619 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; } | ||
| 620 | |||
| 621 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; } | ||
| 622 | |||
| 623 | static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; } | ||
| 624 | |||
| 625 | static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; } | ||
| 626 | |||
| 627 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; } | ||
| 628 | |||
| 629 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; } | ||
| 630 | |||
| 631 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; } | ||
| 632 | |||
| 633 | static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; } | ||
| 634 | |||
| 635 | static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; } | ||
| 636 | #define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff | ||
| 637 | #define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 | ||
| 638 | static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) | ||
| 639 | { | ||
| 640 | return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; | ||
| 641 | } | ||
| 642 | #define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 | ||
| 643 | #define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 | ||
| 644 | static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) | ||
| 645 | { | ||
| 646 | return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; | ||
| 647 | } | ||
| 648 | |||
| 649 | static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; } | ||
| 650 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 | ||
| 651 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 | ||
| 652 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 | ||
| 653 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8 | ||
| 654 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val) | ||
| 655 | { | ||
| 656 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK; | ||
| 657 | } | ||
| 658 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00 | ||
| 659 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10 | ||
| 660 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val) | ||
| 661 | { | ||
| 662 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK; | ||
| 663 | } | ||
| 664 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000 | ||
| 665 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12 | ||
| 666 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val) | ||
| 667 | { | ||
| 668 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK; | ||
| 669 | } | ||
| 670 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000 | ||
| 671 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14 | ||
| 672 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val) | ||
| 673 | { | ||
| 674 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK; | ||
| 675 | } | ||
| 676 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000 | ||
| 677 | #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16 | ||
| 678 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val) | ||
| 679 | { | ||
| 680 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK; | ||
| 681 | } | ||
| 682 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000 | ||
| 683 | #define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18 | ||
| 684 | static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val) | ||
| 685 | { | ||
| 686 | return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; | ||
| 687 | } | ||
| 688 | |||
| 689 | static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; } | ||
| 690 | |||
| 691 | static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; } | ||
| 692 | |||
| 693 | static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; } | ||
| 694 | |||
| 695 | static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; } | ||
| 696 | |||
| 697 | static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; } | ||
| 698 | |||
| 699 | static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; } | ||
| 700 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 | ||
| 701 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 | ||
| 702 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 | ||
| 703 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 | ||
| 704 | |||
| 705 | static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; } | ||
| 706 | #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 | ||
| 707 | #define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 | ||
| 708 | static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) | ||
| 709 | { | ||
| 710 | return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; | ||
| 711 | } | ||
| 712 | #define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff | ||
| 713 | #define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 | ||
| 714 | static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) | ||
| 715 | { | ||
| 716 | return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; | ||
| 717 | } | ||
| 718 | |||
| 719 | static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; } | ||
| 720 | |||
| 721 | static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; } | ||
| 722 | |||
| 723 | static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } | ||
| 724 | |||
| 725 | static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } | ||
| 726 | #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 | ||
| 727 | #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 | ||
| 728 | static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) | ||
| 729 | { | ||
| 730 | return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; | ||
| 731 | } | ||
| 732 | #define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 | ||
| 733 | #define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 | ||
| 734 | #define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 | ||
| 735 | #define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 | ||
| 736 | #define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 | ||
| 737 | #define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 | ||
| 738 | static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) | ||
| 739 | { | ||
| 740 | return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; | ||
| 741 | } | ||
| 742 | #define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 | ||
| 743 | #define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 | ||
| 744 | #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 | ||
| 745 | #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 | ||
| 746 | |||
| 747 | static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; } | ||
| 748 | |||
| 749 | static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; } | ||
| 750 | |||
| 751 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; } | ||
| 752 | |||
| 753 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; } | ||
| 754 | |||
| 755 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; } | ||
| 756 | |||
| 757 | static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; } | ||
| 758 | |||
| 759 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; } | ||
| 760 | |||
| 761 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; } | ||
| 762 | |||
| 763 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; } | ||
| 764 | |||
| 765 | static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; } | ||
| 766 | |||
| 767 | static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; } | ||
| 768 | |||
| 769 | static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; } | ||
| 770 | |||
| 771 | static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; } | ||
| 772 | |||
| 773 | static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; } | ||
| 774 | |||
| 775 | static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; } | ||
| 776 | |||
| 777 | static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; } | ||
| 778 | |||
| 779 | static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; } | ||
| 780 | |||
| 781 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; } | ||
| 782 | |||
| 783 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; } | ||
| 784 | |||
| 785 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; } | ||
| 786 | |||
| 787 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; } | ||
| 788 | |||
| 789 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; } | ||
| 790 | |||
| 791 | static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; } | ||
| 792 | |||
| 793 | static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; } | ||
| 794 | |||
| 795 | static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; } | ||
| 796 | |||
| 797 | static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; } | ||
| 798 | #define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 | ||
| 799 | #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e | ||
| 800 | #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 | ||
| 801 | static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) | ||
| 802 | { | ||
| 803 | return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; | ||
| 804 | } | ||
| 805 | #define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 | ||
| 806 | #define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 | ||
| 807 | #define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 | ||
| 808 | #define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 | ||
| 809 | #define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 | ||
| 810 | #define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 | ||
| 811 | #define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 | ||
| 812 | #define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 | ||
| 813 | |||
| 814 | static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; } | ||
| 815 | |||
| 816 | static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; } | ||
| 817 | |||
| 818 | static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; } | ||
| 819 | |||
| 820 | static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; } | ||
| 821 | |||
| 822 | static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; } | ||
| 823 | |||
| 824 | static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; } | ||
| 825 | |||
| 826 | static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; } | ||
| 827 | |||
| 828 | static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; } | ||
| 829 | |||
| 830 | static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; } | ||
| 831 | |||
| 832 | static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; } | ||
| 833 | |||
| 834 | static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; } | ||
| 835 | |||
| 836 | static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; } | ||
| 837 | #define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff | ||
| 838 | #define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 | ||
| 839 | static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) | ||
| 840 | { | ||
| 841 | return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; | ||
| 842 | } | ||
| 843 | #define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 | ||
| 844 | #define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 | ||
| 845 | static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) | ||
| 846 | { | ||
| 847 | return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; | ||
| 848 | } | ||
| 849 | |||
| 850 | static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; } | ||
| 851 | |||
| 852 | static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; } | ||
| 853 | |||
| 854 | static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; } | ||
| 855 | |||
| 856 | static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; } | ||
| 857 | |||
| 858 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; } | ||
| 859 | |||
| 860 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; } | ||
| 861 | |||
| 862 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; } | ||
| 863 | |||
| 864 | static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; } | ||
| 865 | |||
| 866 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; } | ||
| 867 | #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff | ||
| 868 | #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 | ||
| 869 | static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) | ||
| 870 | { | ||
| 871 | return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; | ||
| 872 | } | ||
| 873 | #define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 | ||
| 874 | |||
| 875 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; } | ||
| 876 | #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff | ||
| 877 | #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 | ||
| 878 | static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) | ||
| 879 | { | ||
| 880 | return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; | ||
| 881 | } | ||
| 882 | |||
| 883 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; } | ||
| 884 | |||
| 885 | static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; } | ||
| 886 | |||
| 887 | static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; } | ||
| 888 | #define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff | ||
| 889 | #define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 | ||
| 890 | static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) | ||
| 891 | { | ||
| 892 | return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; | ||
| 893 | } | ||
| 894 | #define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 | ||
| 895 | #define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 | ||
| 896 | static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) | ||
| 897 | { | ||
| 898 | return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; | ||
| 899 | } | ||
| 900 | |||
| 901 | static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; } | ||
| 902 | #define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff | ||
| 903 | #define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 | ||
| 904 | static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) | ||
| 905 | { | ||
| 906 | return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; | ||
| 907 | } | ||
| 908 | #define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 | ||
| 909 | #define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 | ||
| 910 | static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) | ||
| 911 | { | ||
| 912 | return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; | ||
| 913 | } | ||
| 914 | #define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 | ||
| 915 | |||
| 916 | static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; } | ||
| 917 | |||
| 918 | static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; } | ||
| 919 | |||
| 920 | static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; } | ||
| 921 | |||
| 922 | static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; } | ||
| 923 | #define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 | ||
| 924 | #define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 | ||
| 925 | #define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 | ||
| 926 | |||
| 927 | static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; } | ||
| 928 | |||
| 929 | static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; } | ||
| 930 | |||
| 931 | static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; } | ||
| 932 | |||
| 933 | static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; } | ||
| 934 | |||
| 935 | static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; } | ||
| 936 | |||
| 937 | static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; } | ||
| 938 | |||
| 939 | static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; } | ||
| 940 | |||
| 941 | static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; } | ||
| 942 | |||
| 943 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; } | ||
| 944 | |||
| 945 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; } | ||
| 946 | |||
| 947 | static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; } | ||
| 948 | |||
| 949 | static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; } | ||
| 950 | |||
| 951 | static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; } | ||
| 952 | |||
| 953 | static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; } | ||
| 954 | |||
| 955 | static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; } | ||
| 956 | |||
| 957 | static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; } | ||
| 958 | |||
| 959 | static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; } | ||
| 960 | |||
| 961 | static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; } | ||
| 962 | |||
| 963 | static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; } | ||
| 964 | |||
| 965 | static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; } | ||
| 966 | |||
| 967 | static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; } | ||
| 968 | |||
| 969 | static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; } | ||
| 970 | |||
| 971 | static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; } | ||
| 972 | |||
| 973 | static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; } | ||
| 974 | |||
| 975 | static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; } | ||
| 976 | |||
| 977 | static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; } | ||
| 978 | |||
| 979 | static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; } | ||
| 980 | |||
| 981 | static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; } | ||
| 982 | |||
| 983 | static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; } | ||
| 984 | |||
| 985 | static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; } | ||
| 986 | |||
| 987 | static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; } | ||
| 988 | |||
| 989 | static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; } | ||
| 990 | |||
| 991 | static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; } | ||
| 992 | |||
| 993 | static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; } | ||
| 994 | |||
| 995 | static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; } | ||
| 996 | |||
| 997 | static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; } | ||
| 998 | |||
| 999 | static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; } | ||
| 1000 | |||
| 1001 | static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; } | ||
| 1002 | |||
| 1003 | static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; } | ||
| 1004 | |||
| 1005 | static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; } | ||
| 1006 | |||
| 1007 | static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; } | ||
| 1008 | |||
| 1009 | static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; } | ||
| 1010 | |||
| 1011 | static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; } | ||
| 1012 | |||
| 1013 | static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; } | ||
| 1014 | |||
| 1015 | static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; } | ||
| 1016 | |||
| 1017 | static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; } | ||
| 1018 | |||
| 1019 | static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; } | ||
| 1020 | |||
| 1021 | static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; } | ||
| 1022 | |||
| 1023 | static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; } | ||
| 1024 | |||
| 1025 | static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; } | ||
| 1026 | |||
| 1027 | static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; } | ||
| 1028 | |||
| 1029 | static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; } | ||
| 1030 | |||
| 1031 | static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; } | ||
| 1032 | |||
| 1033 | static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; } | ||
| 1034 | |||
| 1035 | |||
| 1036 | #endif /* MDP5_XML */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c new file mode 100644 index 000000000000..71a3b2345eb3 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -0,0 +1,569 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "mdp5_kms.h" | ||
| 19 | |||
| 20 | #include <drm/drm_mode.h> | ||
| 21 | #include "drm_crtc.h" | ||
| 22 | #include "drm_crtc_helper.h" | ||
| 23 | #include "drm_flip_work.h" | ||
| 24 | |||
| 25 | struct mdp5_crtc { | ||
| 26 | struct drm_crtc base; | ||
| 27 | char name[8]; | ||
| 28 | struct drm_plane *plane; | ||
| 29 | struct drm_plane *planes[8]; | ||
| 30 | int id; | ||
| 31 | bool enabled; | ||
| 32 | |||
| 33 | /* which mixer/encoder we route output to: */ | ||
| 34 | int mixer; | ||
| 35 | |||
| 36 | /* if there is a pending flip, these will be non-null: */ | ||
| 37 | struct drm_pending_vblank_event *event; | ||
| 38 | struct msm_fence_cb pageflip_cb; | ||
| 39 | |||
| 40 | #define PENDING_CURSOR 0x1 | ||
| 41 | #define PENDING_FLIP 0x2 | ||
| 42 | atomic_t pending; | ||
| 43 | |||
| 44 | /* the fb that we logically (from PoV of KMS API) hold a ref | ||
| 45 | * to. Which we may not yet be scanning out (we may still | ||
| 46 | * be scanning out previous in case of page_flip while waiting | ||
| 47 | * for gpu rendering to complete: | ||
| 48 | */ | ||
| 49 | struct drm_framebuffer *fb; | ||
| 50 | |||
| 51 | /* the fb that we currently hold a scanout ref to: */ | ||
| 52 | struct drm_framebuffer *scanout_fb; | ||
| 53 | |||
| 54 | /* for unref'ing framebuffers after scanout completes: */ | ||
| 55 | struct drm_flip_work unref_fb_work; | ||
| 56 | |||
| 57 | struct mdp_irq vblank; | ||
| 58 | struct mdp_irq err; | ||
| 59 | }; | ||
| 60 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | ||
| 61 | |||
| 62 | static struct mdp5_kms *get_kms(struct drm_crtc *crtc) | ||
| 63 | { | ||
| 64 | struct msm_drm_private *priv = crtc->dev->dev_private; | ||
| 65 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
| 66 | } | ||
| 67 | |||
| 68 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
| 69 | { | ||
| 70 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 71 | |||
| 72 | atomic_or(pending, &mdp5_crtc->pending); | ||
| 73 | mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void crtc_flush(struct drm_crtc *crtc) | ||
| 77 | { | ||
| 78 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 79 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
| 80 | int id = mdp5_crtc->id; | ||
| 81 | uint32_t i, flush = 0; | ||
| 82 | |||
| 83 | for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { | ||
| 84 | struct drm_plane *plane = mdp5_crtc->planes[i]; | ||
| 85 | if (plane) { | ||
| 86 | enum mdp5_pipe pipe = mdp5_plane_pipe(plane); | ||
| 87 | flush |= pipe2flush(pipe); | ||
| 88 | } | ||
| 89 | } | ||
| 90 | flush |= mixer2flush(mdp5_crtc->id); | ||
| 91 | flush |= MDP5_CTL_FLUSH_CTL; | ||
| 92 | |||
| 93 | DBG("%s: flush=%08x", mdp5_crtc->name, flush); | ||
| 94 | |||
| 95 | mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush); | ||
| 96 | } | ||
| 97 | |||
| 98 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
| 99 | { | ||
| 100 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 101 | struct drm_framebuffer *old_fb = mdp5_crtc->fb; | ||
| 102 | |||
| 103 | /* grab reference to incoming scanout fb: */ | ||
| 104 | drm_framebuffer_reference(new_fb); | ||
| 105 | mdp5_crtc->base.fb = new_fb; | ||
| 106 | mdp5_crtc->fb = new_fb; | ||
| 107 | |||
| 108 | if (old_fb) | ||
| 109 | drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); | ||
| 110 | } | ||
| 111 | |||
| 112 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
| 113 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
| 114 | * is actually still being scanned out. | ||
| 115 | * | ||
| 116 | * Note that this whole thing goes away with atomic.. since we can defer | ||
| 117 | * calling into driver until rendering is done. | ||
| 118 | */ | ||
| 119 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
| 120 | { | ||
| 121 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 122 | |||
| 123 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
| 124 | * so that we can safely queue unref to current fb (ie. next | ||
| 125 | * vblank we know hw is done w/ previous scanout_fb). | ||
| 126 | */ | ||
| 127 | crtc_flush(crtc); | ||
| 128 | |||
| 129 | if (mdp5_crtc->scanout_fb) | ||
| 130 | drm_flip_work_queue(&mdp5_crtc->unref_fb_work, | ||
| 131 | mdp5_crtc->scanout_fb); | ||
| 132 | |||
| 133 | mdp5_crtc->scanout_fb = fb; | ||
| 134 | |||
| 135 | /* enable vblank to complete flip: */ | ||
| 136 | request_pending(crtc, PENDING_FLIP); | ||
| 137 | } | ||
| 138 | |||
| 139 | /* if file!=NULL, this is preclose potential cancel-flip path */ | ||
| 140 | static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | ||
| 141 | { | ||
| 142 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 143 | struct drm_device *dev = crtc->dev; | ||
| 144 | struct drm_pending_vblank_event *event; | ||
| 145 | unsigned long flags, i; | ||
| 146 | |||
| 147 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 148 | event = mdp5_crtc->event; | ||
| 149 | if (event) { | ||
| 150 | /* if regular vblank case (!file) or if cancel-flip from | ||
| 151 | * preclose on file that requested flip, then send the | ||
| 152 | * event: | ||
| 153 | */ | ||
| 154 | if (!file || (event->base.file_priv == file)) { | ||
| 155 | mdp5_crtc->event = NULL; | ||
| 156 | drm_send_vblank_event(dev, mdp5_crtc->id, event); | ||
| 157 | } | ||
| 158 | } | ||
| 159 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 160 | |||
| 161 | for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { | ||
| 162 | struct drm_plane *plane = mdp5_crtc->planes[i]; | ||
| 163 | if (plane) | ||
| 164 | mdp5_plane_complete_flip(plane); | ||
| 165 | } | ||
| 166 | } | ||
| 167 | |||
| 168 | static void pageflip_cb(struct msm_fence_cb *cb) | ||
| 169 | { | ||
| 170 | struct mdp5_crtc *mdp5_crtc = | ||
| 171 | container_of(cb, struct mdp5_crtc, pageflip_cb); | ||
| 172 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
| 173 | struct drm_framebuffer *fb = mdp5_crtc->fb; | ||
| 174 | |||
| 175 | if (!fb) | ||
| 176 | return; | ||
| 177 | |||
| 178 | drm_framebuffer_reference(fb); | ||
| 179 | mdp5_plane_set_scanout(mdp5_crtc->plane, fb); | ||
| 180 | update_scanout(crtc, fb); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | ||
| 184 | { | ||
| 185 | struct mdp5_crtc *mdp5_crtc = | ||
| 186 | container_of(work, struct mdp5_crtc, unref_fb_work); | ||
| 187 | struct drm_device *dev = mdp5_crtc->base.dev; | ||
| 188 | |||
| 189 | mutex_lock(&dev->mode_config.mutex); | ||
| 190 | drm_framebuffer_unreference(val); | ||
| 191 | mutex_unlock(&dev->mode_config.mutex); | ||
| 192 | } | ||
| 193 | |||
| 194 | static void mdp5_crtc_destroy(struct drm_crtc *crtc) | ||
| 195 | { | ||
| 196 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 197 | |||
| 198 | mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane); | ||
| 199 | |||
| 200 | drm_crtc_cleanup(crtc); | ||
| 201 | drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work); | ||
| 202 | |||
| 203 | kfree(mdp5_crtc); | ||
| 204 | } | ||
| 205 | |||
| 206 | static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
| 207 | { | ||
| 208 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 209 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
| 210 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
| 211 | |||
| 212 | DBG("%s: mode=%d", mdp5_crtc->name, mode); | ||
| 213 | |||
| 214 | if (enabled != mdp5_crtc->enabled) { | ||
| 215 | if (enabled) { | ||
| 216 | mdp5_enable(mdp5_kms); | ||
| 217 | mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); | ||
| 218 | } else { | ||
| 219 | mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); | ||
| 220 | mdp5_disable(mdp5_kms); | ||
| 221 | } | ||
| 222 | mdp5_crtc->enabled = enabled; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | |||
| 226 | static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, | ||
| 227 | const struct drm_display_mode *mode, | ||
| 228 | struct drm_display_mode *adjusted_mode) | ||
| 229 | { | ||
| 230 | return true; | ||
| 231 | } | ||
| 232 | |||
| 233 | static void blend_setup(struct drm_crtc *crtc) | ||
| 234 | { | ||
| 235 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 236 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
| 237 | int id = mdp5_crtc->id; | ||
| 238 | |||
| 239 | /* | ||
| 240 | * Hard-coded setup for now until I figure out how the | ||
| 241 | * layer-mixer works | ||
| 242 | */ | ||
| 243 | |||
| 244 | /* LM[id]: */ | ||
| 245 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), | ||
| 246 | MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); | ||
| 247 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), | ||
| 248 | MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | | ||
| 249 | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) | | ||
| 250 | MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA); | ||
| 251 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff); | ||
| 252 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00); | ||
| 253 | |||
| 254 | /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but | ||
| 255 | * we want to be setting CTL[m].LAYER[n]. Not sure what the | ||
| 256 | * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is | ||
| 257 | * used when chaining up mixers for high resolution displays? | ||
| 258 | */ | ||
| 259 | |||
| 260 | /* CTL[id]: */ | ||
| 261 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), | ||
| 262 | MDP5_CTL_LAYER_REG_RGB0(STAGE0) | | ||
| 263 | MDP5_CTL_LAYER_REG_BORDER_COLOR); | ||
| 264 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); | ||
| 265 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); | ||
| 266 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); | ||
| 267 | mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); | ||
| 268 | } | ||
| 269 | |||
| 270 | static int mdp5_crtc_mode_set(struct drm_crtc *crtc, | ||
| 271 | struct drm_display_mode *mode, | ||
| 272 | struct drm_display_mode *adjusted_mode, | ||
| 273 | int x, int y, | ||
| 274 | struct drm_framebuffer *old_fb) | ||
| 275 | { | ||
| 276 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 277 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
| 278 | int ret; | ||
| 279 | |||
| 280 | mode = adjusted_mode; | ||
| 281 | |||
| 282 | DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
| 283 | mdp5_crtc->name, mode->base.id, mode->name, | ||
| 284 | mode->vrefresh, mode->clock, | ||
| 285 | mode->hdisplay, mode->hsync_start, | ||
| 286 | mode->hsync_end, mode->htotal, | ||
| 287 | mode->vdisplay, mode->vsync_start, | ||
| 288 | mode->vsync_end, mode->vtotal, | ||
| 289 | mode->type, mode->flags); | ||
| 290 | |||
| 291 | /* grab extra ref for update_scanout() */ | ||
| 292 | drm_framebuffer_reference(crtc->fb); | ||
| 293 | |||
| 294 | ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb, | ||
| 295 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
| 296 | x << 16, y << 16, | ||
| 297 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
| 298 | if (ret) { | ||
| 299 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
| 300 | mdp5_crtc->name, ret); | ||
| 301 | return ret; | ||
| 302 | } | ||
| 303 | |||
| 304 | mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id), | ||
| 305 | MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | | ||
| 306 | MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); | ||
| 307 | |||
| 308 | update_fb(crtc, crtc->fb); | ||
| 309 | update_scanout(crtc, crtc->fb); | ||
| 310 | |||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | |||
| 314 | static void mdp5_crtc_prepare(struct drm_crtc *crtc) | ||
| 315 | { | ||
| 316 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 317 | DBG("%s", mdp5_crtc->name); | ||
| 318 | /* make sure we hold a ref to mdp clks while setting up mode: */ | ||
| 319 | mdp5_enable(get_kms(crtc)); | ||
| 320 | mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
| 321 | } | ||
| 322 | |||
| 323 | static void mdp5_crtc_commit(struct drm_crtc *crtc) | ||
| 324 | { | ||
| 325 | mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
| 326 | crtc_flush(crtc); | ||
| 327 | /* drop the ref to mdp clk's that we got in prepare: */ | ||
| 328 | mdp5_disable(get_kms(crtc)); | ||
| 329 | } | ||
| 330 | |||
| 331 | static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
| 332 | struct drm_framebuffer *old_fb) | ||
| 333 | { | ||
| 334 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 335 | struct drm_plane *plane = mdp5_crtc->plane; | ||
| 336 | struct drm_display_mode *mode = &crtc->mode; | ||
| 337 | int ret; | ||
| 338 | |||
| 339 | /* grab extra ref for update_scanout() */ | ||
| 340 | drm_framebuffer_reference(crtc->fb); | ||
| 341 | |||
| 342 | ret = mdp5_plane_mode_set(plane, crtc, crtc->fb, | ||
| 343 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
| 344 | x << 16, y << 16, | ||
| 345 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
| 346 | |||
| 347 | update_fb(crtc, crtc->fb); | ||
| 348 | update_scanout(crtc, crtc->fb); | ||
| 349 | |||
| 350 | return ret; | ||
| 351 | } | ||
| 352 | |||
| 353 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) | ||
| 354 | { | ||
| 355 | } | ||
| 356 | |||
| 357 | static int mdp5_crtc_page_flip(struct drm_crtc *crtc, | ||
| 358 | struct drm_framebuffer *new_fb, | ||
| 359 | struct drm_pending_vblank_event *event, | ||
| 360 | uint32_t page_flip_flags) | ||
| 361 | { | ||
| 362 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 363 | struct drm_device *dev = crtc->dev; | ||
| 364 | struct drm_gem_object *obj; | ||
| 365 | unsigned long flags; | ||
| 366 | |||
| 367 | if (mdp5_crtc->event) { | ||
| 368 | dev_err(dev->dev, "already pending flip!\n"); | ||
| 369 | return -EBUSY; | ||
| 370 | } | ||
| 371 | |||
| 372 | obj = msm_framebuffer_bo(new_fb, 0); | ||
| 373 | |||
| 374 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 375 | mdp5_crtc->event = event; | ||
| 376 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 377 | |||
| 378 | update_fb(crtc, new_fb); | ||
| 379 | |||
| 380 | return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); | ||
| 381 | } | ||
| 382 | |||
| 383 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | ||
| 384 | struct drm_property *property, uint64_t val) | ||
| 385 | { | ||
| 386 | // XXX | ||
| 387 | return -EINVAL; | ||
| 388 | } | ||
| 389 | |||
| 390 | static const struct drm_crtc_funcs mdp5_crtc_funcs = { | ||
| 391 | .set_config = drm_crtc_helper_set_config, | ||
| 392 | .destroy = mdp5_crtc_destroy, | ||
| 393 | .page_flip = mdp5_crtc_page_flip, | ||
| 394 | .set_property = mdp5_crtc_set_property, | ||
| 395 | }; | ||
| 396 | |||
| 397 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | ||
| 398 | .dpms = mdp5_crtc_dpms, | ||
| 399 | .mode_fixup = mdp5_crtc_mode_fixup, | ||
| 400 | .mode_set = mdp5_crtc_mode_set, | ||
| 401 | .prepare = mdp5_crtc_prepare, | ||
| 402 | .commit = mdp5_crtc_commit, | ||
| 403 | .mode_set_base = mdp5_crtc_mode_set_base, | ||
| 404 | .load_lut = mdp5_crtc_load_lut, | ||
| 405 | }; | ||
| 406 | |||
| 407 | static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
| 408 | { | ||
| 409 | struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); | ||
| 410 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
| 411 | struct msm_drm_private *priv = crtc->dev->dev_private; | ||
| 412 | unsigned pending; | ||
| 413 | |||
| 414 | mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); | ||
| 415 | |||
| 416 | pending = atomic_xchg(&mdp5_crtc->pending, 0); | ||
| 417 | |||
| 418 | if (pending & PENDING_FLIP) { | ||
| 419 | complete_flip(crtc, NULL); | ||
| 420 | drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq); | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
| 425 | { | ||
| 426 | struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); | ||
| 427 | struct drm_crtc *crtc = &mdp5_crtc->base; | ||
| 428 | DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); | ||
| 429 | crtc_flush(crtc); | ||
| 430 | } | ||
| 431 | |||
| 432 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) | ||
| 433 | { | ||
| 434 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 435 | return mdp5_crtc->vblank.irqmask; | ||
| 436 | } | ||
| 437 | |||
| 438 | void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) | ||
| 439 | { | ||
| 440 | DBG("cancel: %p", file); | ||
| 441 | complete_flip(crtc, file); | ||
| 442 | } | ||
| 443 | |||
| 444 | /* set interface for routing crtc->encoder: */ | ||
| 445 | void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, | ||
| 446 | enum mdp5_intf intf_id) | ||
| 447 | { | ||
| 448 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 449 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | ||
| 450 | static const enum mdp5_intfnum intfnum[] = { | ||
| 451 | INTF0, INTF1, INTF2, INTF3, | ||
| 452 | }; | ||
| 453 | uint32_t intf_sel; | ||
| 454 | |||
| 455 | /* now that we know what irq's we want: */ | ||
| 456 | mdp5_crtc->err.irqmask = intf2err(intf); | ||
| 457 | mdp5_crtc->vblank.irqmask = intf2vblank(intf); | ||
| 458 | |||
| 459 | /* when called from modeset_init(), skip the rest until later: */ | ||
| 460 | if (!mdp5_kms) | ||
| 461 | return; | ||
| 462 | |||
| 463 | intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); | ||
| 464 | |||
| 465 | switch (intf) { | ||
| 466 | case 0: | ||
| 467 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; | ||
| 468 | intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id); | ||
| 469 | break; | ||
| 470 | case 1: | ||
| 471 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; | ||
| 472 | intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id); | ||
| 473 | break; | ||
| 474 | case 2: | ||
| 475 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; | ||
| 476 | intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id); | ||
| 477 | break; | ||
| 478 | case 3: | ||
| 479 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; | ||
| 480 | intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id); | ||
| 481 | break; | ||
| 482 | default: | ||
| 483 | BUG(); | ||
| 484 | break; | ||
| 485 | } | ||
| 486 | |||
| 487 | blend_setup(crtc); | ||
| 488 | |||
| 489 | DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); | ||
| 490 | |||
| 491 | mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); | ||
| 492 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id), | ||
| 493 | MDP5_CTL_OP_MODE(MODE_NONE) | | ||
| 494 | MDP5_CTL_OP_INTF_NUM(intfnum[intf])); | ||
| 495 | |||
| 496 | crtc_flush(crtc); | ||
| 497 | } | ||
| 498 | |||
| 499 | static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, | ||
| 500 | struct drm_plane *plane) | ||
| 501 | { | ||
| 502 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 503 | |||
| 504 | BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes)); | ||
| 505 | |||
| 506 | if (mdp5_crtc->planes[pipe_id] == plane) | ||
| 507 | return; | ||
| 508 | |||
| 509 | mdp5_crtc->planes[pipe_id] = plane; | ||
| 510 | blend_setup(crtc); | ||
| 511 | if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane)) | ||
| 512 | crtc_flush(crtc); | ||
| 513 | } | ||
| 514 | |||
| 515 | void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) | ||
| 516 | { | ||
| 517 | set_attach(crtc, mdp5_plane_pipe(plane), plane); | ||
| 518 | } | ||
| 519 | |||
| 520 | void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) | ||
| 521 | { | ||
| 522 | set_attach(crtc, mdp5_plane_pipe(plane), NULL); | ||
| 523 | } | ||
| 524 | |||
| 525 | /* initialize crtc */ | ||
| 526 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | ||
| 527 | struct drm_plane *plane, int id) | ||
| 528 | { | ||
| 529 | struct drm_crtc *crtc = NULL; | ||
| 530 | struct mdp5_crtc *mdp5_crtc; | ||
| 531 | int ret; | ||
| 532 | |||
| 533 | mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); | ||
| 534 | if (!mdp5_crtc) { | ||
| 535 | ret = -ENOMEM; | ||
| 536 | goto fail; | ||
| 537 | } | ||
| 538 | |||
| 539 | crtc = &mdp5_crtc->base; | ||
| 540 | |||
| 541 | mdp5_crtc->plane = plane; | ||
| 542 | mdp5_crtc->id = id; | ||
| 543 | |||
| 544 | mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; | ||
| 545 | mdp5_crtc->err.irq = mdp5_crtc_err_irq; | ||
| 546 | |||
| 547 | snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", | ||
| 548 | pipe2name(mdp5_plane_pipe(plane)), id); | ||
| 549 | |||
| 550 | ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16, | ||
| 551 | "unref fb", unref_fb_worker); | ||
| 552 | if (ret) | ||
| 553 | goto fail; | ||
| 554 | |||
| 555 | INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb); | ||
| 556 | |||
| 557 | drm_crtc_init(dev, crtc, &mdp5_crtc_funcs); | ||
| 558 | drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); | ||
| 559 | |||
| 560 | mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); | ||
| 561 | |||
| 562 | return crtc; | ||
| 563 | |||
| 564 | fail: | ||
| 565 | if (crtc) | ||
| 566 | mdp5_crtc_destroy(crtc); | ||
| 567 | |||
| 568 | return ERR_PTR(ret); | ||
| 569 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c new file mode 100644 index 000000000000..edec7bfaa952 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
| @@ -0,0 +1,258 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "mdp5_kms.h" | ||
| 19 | |||
| 20 | #include "drm_crtc.h" | ||
| 21 | #include "drm_crtc_helper.h" | ||
| 22 | |||
| 23 | struct mdp5_encoder { | ||
| 24 | struct drm_encoder base; | ||
| 25 | int intf; | ||
| 26 | enum mdp5_intf intf_id; | ||
| 27 | bool enabled; | ||
| 28 | uint32_t bsc; | ||
| 29 | }; | ||
| 30 | #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) | ||
| 31 | |||
| 32 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) | ||
| 33 | { | ||
| 34 | struct msm_drm_private *priv = encoder->dev->dev_private; | ||
| 35 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
| 36 | } | ||
| 37 | |||
| 38 | #ifdef CONFIG_MSM_BUS_SCALING | ||
| 39 | #include <mach/board.h> | ||
| 40 | #include <mach/msm_bus.h> | ||
| 41 | #include <mach/msm_bus_board.h> | ||
| 42 | #define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ | ||
| 43 | { \ | ||
| 44 | .src = MSM_BUS_MASTER_MDP_PORT0, \ | ||
| 45 | .dst = MSM_BUS_SLAVE_EBI_CH0, \ | ||
| 46 | .ab = (ab_val), \ | ||
| 47 | .ib = (ib_val), \ | ||
| 48 | } | ||
| 49 | |||
| 50 | static struct msm_bus_vectors mdp_bus_vectors[] = { | ||
| 51 | MDP_BUS_VECTOR_ENTRY(0, 0), | ||
| 52 | MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), | ||
| 53 | }; | ||
| 54 | static struct msm_bus_paths mdp_bus_usecases[] = { { | ||
| 55 | .num_paths = 1, | ||
| 56 | .vectors = &mdp_bus_vectors[0], | ||
| 57 | }, { | ||
| 58 | .num_paths = 1, | ||
| 59 | .vectors = &mdp_bus_vectors[1], | ||
| 60 | } }; | ||
| 61 | static struct msm_bus_scale_pdata mdp_bus_scale_table = { | ||
| 62 | .usecase = mdp_bus_usecases, | ||
| 63 | .num_usecases = ARRAY_SIZE(mdp_bus_usecases), | ||
| 64 | .name = "mdss_mdp", | ||
| 65 | }; | ||
| 66 | |||
| 67 | static void bs_init(struct mdp5_encoder *mdp5_encoder) | ||
| 68 | { | ||
| 69 | mdp5_encoder->bsc = msm_bus_scale_register_client( | ||
| 70 | &mdp_bus_scale_table); | ||
| 71 | DBG("bus scale client: %08x", mdp5_encoder->bsc); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void bs_fini(struct mdp5_encoder *mdp5_encoder) | ||
| 75 | { | ||
| 76 | if (mdp5_encoder->bsc) { | ||
| 77 | msm_bus_scale_unregister_client(mdp5_encoder->bsc); | ||
| 78 | mdp5_encoder->bsc = 0; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) | ||
| 83 | { | ||
| 84 | if (mdp5_encoder->bsc) { | ||
| 85 | DBG("set bus scaling: %d", idx); | ||
| 86 | /* HACK: scaling down, and then immediately back up | ||
| 87 | * seems to leave things broken (underflow).. so | ||
| 88 | * never disable: | ||
| 89 | */ | ||
| 90 | idx = 1; | ||
| 91 | msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | #else | ||
| 95 | static void bs_init(struct mdp5_encoder *mdp5_encoder) {} | ||
| 96 | static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} | ||
| 97 | static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} | ||
| 98 | #endif | ||
| 99 | |||
| 100 | static void mdp5_encoder_destroy(struct drm_encoder *encoder) | ||
| 101 | { | ||
| 102 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 103 | bs_fini(mdp5_encoder); | ||
| 104 | drm_encoder_cleanup(encoder); | ||
| 105 | kfree(mdp5_encoder); | ||
| 106 | } | ||
| 107 | |||
| 108 | static const struct drm_encoder_funcs mdp5_encoder_funcs = { | ||
| 109 | .destroy = mdp5_encoder_destroy, | ||
| 110 | }; | ||
| 111 | |||
| 112 | static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
| 113 | { | ||
| 114 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 115 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | ||
| 116 | int intf = mdp5_encoder->intf; | ||
| 117 | bool enabled = (mode == DRM_MODE_DPMS_ON); | ||
| 118 | |||
| 119 | DBG("mode=%d", mode); | ||
| 120 | |||
| 121 | if (enabled == mdp5_encoder->enabled) | ||
| 122 | return; | ||
| 123 | |||
| 124 | if (enabled) { | ||
| 125 | bs_set(mdp5_encoder, 1); | ||
| 126 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | ||
| 127 | } else { | ||
| 128 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); | ||
| 129 | bs_set(mdp5_encoder, 0); | ||
| 130 | } | ||
| 131 | |||
| 132 | mdp5_encoder->enabled = enabled; | ||
| 133 | } | ||
| 134 | |||
| 135 | static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder, | ||
| 136 | const struct drm_display_mode *mode, | ||
| 137 | struct drm_display_mode *adjusted_mode) | ||
| 138 | { | ||
| 139 | return true; | ||
| 140 | } | ||
| 141 | |||
| 142 | static void mdp5_encoder_mode_set(struct drm_encoder *encoder, | ||
| 143 | struct drm_display_mode *mode, | ||
| 144 | struct drm_display_mode *adjusted_mode) | ||
| 145 | { | ||
| 146 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 147 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | ||
| 148 | int intf = mdp5_encoder->intf; | ||
| 149 | uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; | ||
| 150 | uint32_t display_v_start, display_v_end; | ||
| 151 | uint32_t hsync_start_x, hsync_end_x; | ||
| 152 | uint32_t format; | ||
| 153 | |||
| 154 | mode = adjusted_mode; | ||
| 155 | |||
| 156 | DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", | ||
| 157 | mode->base.id, mode->name, | ||
| 158 | mode->vrefresh, mode->clock, | ||
| 159 | mode->hdisplay, mode->hsync_start, | ||
| 160 | mode->hsync_end, mode->htotal, | ||
| 161 | mode->vdisplay, mode->vsync_start, | ||
| 162 | mode->vsync_end, mode->vtotal, | ||
| 163 | mode->type, mode->flags); | ||
| 164 | |||
| 165 | ctrl_pol = 0; | ||
| 166 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
| 167 | ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; | ||
| 168 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
| 169 | ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; | ||
| 170 | /* probably need to get DATA_EN polarity from panel.. */ | ||
| 171 | |||
| 172 | dtv_hsync_skew = 0; /* get this from panel? */ | ||
| 173 | format = 0x213f; /* get this from panel? */ | ||
| 174 | |||
| 175 | hsync_start_x = (mode->htotal - mode->hsync_start); | ||
| 176 | hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; | ||
| 177 | |||
| 178 | vsync_period = mode->vtotal * mode->htotal; | ||
| 179 | vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; | ||
| 180 | display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; | ||
| 181 | display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; | ||
| 182 | |||
| 183 | mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), | ||
| 184 | MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | | ||
| 185 | MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); | ||
| 186 | mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period); | ||
| 187 | mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len); | ||
| 188 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf), | ||
| 189 | MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) | | ||
| 190 | MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x)); | ||
| 191 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start); | ||
| 192 | mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end); | ||
| 193 | mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0); | ||
| 194 | mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff); | ||
| 195 | mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew); | ||
| 196 | mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol); | ||
| 197 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf), | ||
| 198 | MDP5_INTF_ACTIVE_HCTL_START(0) | | ||
| 199 | MDP5_INTF_ACTIVE_HCTL_END(0)); | ||
| 200 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0); | ||
| 201 | mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); | ||
| 202 | mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); | ||
| 203 | mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ | ||
| 204 | } | ||
| 205 | |||
| 206 | static void mdp5_encoder_prepare(struct drm_encoder *encoder) | ||
| 207 | { | ||
| 208 | mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
| 209 | } | ||
| 210 | |||
| 211 | static void mdp5_encoder_commit(struct drm_encoder *encoder) | ||
| 212 | { | ||
| 213 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 214 | mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf, | ||
| 215 | mdp5_encoder->intf_id); | ||
| 216 | mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
| 217 | } | ||
| 218 | |||
| 219 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | ||
| 220 | .dpms = mdp5_encoder_dpms, | ||
| 221 | .mode_fixup = mdp5_encoder_mode_fixup, | ||
| 222 | .mode_set = mdp5_encoder_mode_set, | ||
| 223 | .prepare = mdp5_encoder_prepare, | ||
| 224 | .commit = mdp5_encoder_commit, | ||
| 225 | }; | ||
| 226 | |||
| 227 | /* initialize encoder */ | ||
| 228 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, | ||
| 229 | enum mdp5_intf intf_id) | ||
| 230 | { | ||
| 231 | struct drm_encoder *encoder = NULL; | ||
| 232 | struct mdp5_encoder *mdp5_encoder; | ||
| 233 | int ret; | ||
| 234 | |||
| 235 | mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); | ||
| 236 | if (!mdp5_encoder) { | ||
| 237 | ret = -ENOMEM; | ||
| 238 | goto fail; | ||
| 239 | } | ||
| 240 | |||
| 241 | mdp5_encoder->intf = intf; | ||
| 242 | mdp5_encoder->intf_id = intf_id; | ||
| 243 | encoder = &mdp5_encoder->base; | ||
| 244 | |||
| 245 | drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, | ||
| 246 | DRM_MODE_ENCODER_TMDS); | ||
| 247 | drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); | ||
| 248 | |||
| 249 | bs_init(mdp5_encoder); | ||
| 250 | |||
| 251 | return encoder; | ||
| 252 | |||
| 253 | fail: | ||
| 254 | if (encoder) | ||
| 255 | mdp5_encoder_destroy(encoder); | ||
| 256 | |||
| 257 | return ERR_PTR(ret); | ||
| 258 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c new file mode 100644 index 000000000000..353d494a497f --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "msm_drv.h" | ||
| 20 | #include "mdp5_kms.h" | ||
| 21 | |||
| 22 | void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
| 23 | { | ||
| 24 | mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); | ||
| 25 | } | ||
| 26 | |||
| 27 | static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | ||
| 28 | { | ||
| 29 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
| 30 | } | ||
| 31 | |||
| 32 | void mdp5_irq_preinstall(struct msm_kms *kms) | ||
| 33 | { | ||
| 34 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
| 35 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | ||
| 36 | } | ||
| 37 | |||
| 38 | int mdp5_irq_postinstall(struct msm_kms *kms) | ||
| 39 | { | ||
| 40 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
| 41 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
| 42 | struct mdp_irq *error_handler = &mdp5_kms->error_handler; | ||
| 43 | |||
| 44 | error_handler->irq = mdp5_irq_error_handler; | ||
| 45 | error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN | | ||
| 46 | MDP5_IRQ_INTF1_UNDER_RUN | | ||
| 47 | MDP5_IRQ_INTF2_UNDER_RUN | | ||
| 48 | MDP5_IRQ_INTF3_UNDER_RUN; | ||
| 49 | |||
| 50 | mdp_irq_register(mdp_kms, error_handler); | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | void mdp5_irq_uninstall(struct msm_kms *kms) | ||
| 56 | { | ||
| 57 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
| 58 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
| 59 | } | ||
| 60 | |||
| 61 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | ||
| 62 | { | ||
| 63 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
| 64 | struct drm_device *dev = mdp5_kms->dev; | ||
| 65 | struct msm_drm_private *priv = dev->dev_private; | ||
| 66 | unsigned int id; | ||
| 67 | uint32_t status; | ||
| 68 | |||
| 69 | status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS); | ||
| 70 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); | ||
| 71 | |||
| 72 | VERB("status=%08x", status); | ||
| 73 | |||
| 74 | for (id = 0; id < priv->num_crtcs; id++) | ||
| 75 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) | ||
| 76 | drm_handle_vblank(dev, id); | ||
| 77 | |||
| 78 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 79 | } | ||
| 80 | |||
| 81 | irqreturn_t mdp5_irq(struct msm_kms *kms) | ||
| 82 | { | ||
| 83 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); | ||
| 84 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); | ||
| 85 | uint32_t intr; | ||
| 86 | |||
| 87 | intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); | ||
| 88 | |||
| 89 | VERB("intr=%08x", intr); | ||
| 90 | |||
| 91 | if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) | ||
| 92 | mdp5_irq_mdp(mdp_kms); | ||
| 93 | |||
| 94 | if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) | ||
| 95 | hdmi_irq(0, mdp5_kms->hdmi); | ||
| 96 | |||
| 97 | return IRQ_HANDLED; | ||
| 98 | } | ||
| 99 | |||
| 100 | int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 101 | { | ||
| 102 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
| 103 | mdp5_crtc_vblank(crtc), true); | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 108 | { | ||
| 109 | mdp_update_vblank_mask(to_mdp_kms(kms), | ||
| 110 | mdp5_crtc_vblank(crtc), false); | ||
| 111 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c new file mode 100644 index 000000000000..ee8446c1b5f6 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "msm_drv.h" | ||
| 20 | #include "msm_mmu.h" | ||
| 21 | #include "mdp5_kms.h" | ||
| 22 | |||
| 23 | static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); | ||
| 24 | |||
| 25 | static int mdp5_hw_init(struct msm_kms *kms) | ||
| 26 | { | ||
| 27 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
| 28 | struct drm_device *dev = mdp5_kms->dev; | ||
| 29 | uint32_t version, major, minor; | ||
| 30 | int ret = 0; | ||
| 31 | |||
| 32 | pm_runtime_get_sync(dev->dev); | ||
| 33 | |||
| 34 | mdp5_enable(mdp5_kms); | ||
| 35 | version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); | ||
| 36 | mdp5_disable(mdp5_kms); | ||
| 37 | |||
| 38 | major = FIELD(version, MDP5_MDP_VERSION_MAJOR); | ||
| 39 | minor = FIELD(version, MDP5_MDP_VERSION_MINOR); | ||
| 40 | |||
| 41 | DBG("found MDP5 version v%d.%d", major, minor); | ||
| 42 | |||
| 43 | if ((major != 1) || ((minor != 0) && (minor != 2))) { | ||
| 44 | dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", | ||
| 45 | major, minor); | ||
| 46 | ret = -ENXIO; | ||
| 47 | goto out; | ||
| 48 | } | ||
| 49 | |||
| 50 | mdp5_kms->rev = minor; | ||
| 51 | |||
| 52 | /* Magic unknown register writes: | ||
| 53 | * | ||
| 54 | * W VBIF:0x004 00000001 (mdss_mdp.c:839) | ||
| 55 | * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) | ||
| 56 | * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) | ||
| 57 | * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) | ||
| 58 | * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) | ||
| 59 | * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) | ||
| 60 | * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) | ||
| 61 | * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) | ||
| 62 | * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) | ||
| 63 | * | ||
| 64 | * Downstream fbdev driver gets these register offsets/values | ||
| 65 | * from DT.. not really sure what these registers are or if | ||
| 66 | * different values for different boards/SoC's, etc. I guess | ||
| 67 | * they are the golden registers. | ||
| 68 | * | ||
| 69 | * Not setting these does not seem to cause any problem. But | ||
| 70 | * we may be getting lucky with the bootloader initializing | ||
| 71 | * them for us. OTOH, if we can always count on the bootloader | ||
| 72 | * setting the golden registers, then perhaps we don't need to | ||
| 73 | * care. | ||
| 74 | */ | ||
| 75 | |||
| 76 | mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); | ||
| 77 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0); | ||
| 78 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0); | ||
| 79 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0); | ||
| 80 | mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0); | ||
| 81 | |||
| 82 | out: | ||
| 83 | pm_runtime_put_sync(dev->dev); | ||
| 84 | |||
| 85 | return ret; | ||
| 86 | } | ||
| 87 | |||
| 88 | static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, | ||
| 89 | struct drm_encoder *encoder) | ||
| 90 | { | ||
| 91 | return rate; | ||
| 92 | } | ||
| 93 | |||
| 94 | static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) | ||
| 95 | { | ||
| 96 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
| 97 | struct msm_drm_private *priv = mdp5_kms->dev->dev_private; | ||
| 98 | unsigned i; | ||
| 99 | |||
| 100 | for (i = 0; i < priv->num_crtcs; i++) | ||
| 101 | mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file); | ||
| 102 | } | ||
| 103 | |||
| 104 | static void mdp5_destroy(struct msm_kms *kms) | ||
| 105 | { | ||
| 106 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | ||
| 107 | kfree(mdp5_kms); | ||
| 108 | } | ||
| 109 | |||
| 110 | static const struct mdp_kms_funcs kms_funcs = { | ||
| 111 | .base = { | ||
| 112 | .hw_init = mdp5_hw_init, | ||
| 113 | .irq_preinstall = mdp5_irq_preinstall, | ||
| 114 | .irq_postinstall = mdp5_irq_postinstall, | ||
| 115 | .irq_uninstall = mdp5_irq_uninstall, | ||
| 116 | .irq = mdp5_irq, | ||
| 117 | .enable_vblank = mdp5_enable_vblank, | ||
| 118 | .disable_vblank = mdp5_disable_vblank, | ||
| 119 | .get_format = mdp_get_format, | ||
| 120 | .round_pixclk = mdp5_round_pixclk, | ||
| 121 | .preclose = mdp5_preclose, | ||
| 122 | .destroy = mdp5_destroy, | ||
| 123 | }, | ||
| 124 | .set_irqmask = mdp5_set_irqmask, | ||
| 125 | }; | ||
| 126 | |||
| 127 | int mdp5_disable(struct mdp5_kms *mdp5_kms) | ||
| 128 | { | ||
| 129 | DBG(""); | ||
| 130 | |||
| 131 | clk_disable_unprepare(mdp5_kms->ahb_clk); | ||
| 132 | clk_disable_unprepare(mdp5_kms->axi_clk); | ||
| 133 | clk_disable_unprepare(mdp5_kms->core_clk); | ||
| 134 | clk_disable_unprepare(mdp5_kms->lut_clk); | ||
| 135 | |||
| 136 | return 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | int mdp5_enable(struct mdp5_kms *mdp5_kms) | ||
| 140 | { | ||
| 141 | DBG(""); | ||
| 142 | |||
| 143 | clk_prepare_enable(mdp5_kms->ahb_clk); | ||
| 144 | clk_prepare_enable(mdp5_kms->axi_clk); | ||
| 145 | clk_prepare_enable(mdp5_kms->core_clk); | ||
| 146 | clk_prepare_enable(mdp5_kms->lut_clk); | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static int modeset_init(struct mdp5_kms *mdp5_kms) | ||
| 152 | { | ||
| 153 | static const enum mdp5_pipe crtcs[] = { | ||
| 154 | SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, | ||
| 155 | }; | ||
| 156 | struct drm_device *dev = mdp5_kms->dev; | ||
| 157 | struct msm_drm_private *priv = dev->dev_private; | ||
| 158 | struct drm_encoder *encoder; | ||
| 159 | int i, ret; | ||
| 160 | |||
| 161 | /* construct CRTCs: */ | ||
| 162 | for (i = 0; i < ARRAY_SIZE(crtcs); i++) { | ||
| 163 | struct drm_plane *plane; | ||
| 164 | struct drm_crtc *crtc; | ||
| 165 | |||
| 166 | plane = mdp5_plane_init(dev, crtcs[i], true); | ||
| 167 | if (IS_ERR(plane)) { | ||
| 168 | ret = PTR_ERR(plane); | ||
| 169 | dev_err(dev->dev, "failed to construct plane for %s (%d)\n", | ||
| 170 | pipe2name(crtcs[i]), ret); | ||
| 171 | goto fail; | ||
| 172 | } | ||
| 173 | |||
| 174 | crtc = mdp5_crtc_init(dev, plane, i); | ||
| 175 | if (IS_ERR(crtc)) { | ||
| 176 | ret = PTR_ERR(crtc); | ||
| 177 | dev_err(dev->dev, "failed to construct crtc for %s (%d)\n", | ||
| 178 | pipe2name(crtcs[i]), ret); | ||
| 179 | goto fail; | ||
| 180 | } | ||
| 181 | priv->crtcs[priv->num_crtcs++] = crtc; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* Construct encoder for HDMI: */ | ||
| 185 | encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); | ||
| 186 | if (IS_ERR(encoder)) { | ||
| 187 | dev_err(dev->dev, "failed to construct encoder\n"); | ||
| 188 | ret = PTR_ERR(encoder); | ||
| 189 | goto fail; | ||
| 190 | } | ||
| 191 | |||
| 192 | /* NOTE: the vsync and error irq's are actually associated with | ||
| 193 | * the INTF/encoder.. the easiest way to deal with this (ie. what | ||
| 194 | * we do now) is assume a fixed relationship between crtc's and | ||
| 195 | * encoders. I'm not sure if there is ever a need to more freely | ||
| 196 | * assign crtcs to encoders, but if there is then we need to take | ||
| 197 | * care of error and vblank irq's that the crtc has registered, | ||
| 198 | * and also update user-requested vblank_mask. | ||
| 199 | */ | ||
| 200 | encoder->possible_crtcs = BIT(0); | ||
| 201 | mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI); | ||
| 202 | |||
| 203 | priv->encoders[priv->num_encoders++] = encoder; | ||
| 204 | |||
| 205 | /* Construct bridge/connector for HDMI: */ | ||
| 206 | mdp5_kms->hdmi = hdmi_init(dev, encoder); | ||
| 207 | if (IS_ERR(mdp5_kms->hdmi)) { | ||
| 208 | ret = PTR_ERR(mdp5_kms->hdmi); | ||
| 209 | dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); | ||
| 210 | goto fail; | ||
| 211 | } | ||
| 212 | |||
| 213 | return 0; | ||
| 214 | |||
| 215 | fail: | ||
| 216 | return ret; | ||
| 217 | } | ||
| 218 | |||
| 219 | static const char *iommu_ports[] = { | ||
| 220 | "mdp_0", | ||
| 221 | }; | ||
| 222 | |||
| 223 | static int get_clk(struct platform_device *pdev, struct clk **clkp, | ||
| 224 | const char *name) | ||
| 225 | { | ||
| 226 | struct device *dev = &pdev->dev; | ||
| 227 | struct clk *clk = devm_clk_get(dev, name); | ||
| 228 | if (IS_ERR(clk)) { | ||
| 229 | dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); | ||
| 230 | return PTR_ERR(clk); | ||
| 231 | } | ||
| 232 | *clkp = clk; | ||
| 233 | return 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | struct msm_kms *mdp5_kms_init(struct drm_device *dev) | ||
| 237 | { | ||
| 238 | struct platform_device *pdev = dev->platformdev; | ||
| 239 | struct mdp5_platform_config *config = mdp5_get_config(pdev); | ||
| 240 | struct mdp5_kms *mdp5_kms; | ||
| 241 | struct msm_kms *kms = NULL; | ||
| 242 | struct msm_mmu *mmu; | ||
| 243 | int ret; | ||
| 244 | |||
| 245 | mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); | ||
| 246 | if (!mdp5_kms) { | ||
| 247 | dev_err(dev->dev, "failed to allocate kms\n"); | ||
| 248 | ret = -ENOMEM; | ||
| 249 | goto fail; | ||
| 250 | } | ||
| 251 | |||
| 252 | mdp_kms_init(&mdp5_kms->base, &kms_funcs); | ||
| 253 | |||
| 254 | kms = &mdp5_kms->base.base; | ||
| 255 | |||
| 256 | mdp5_kms->dev = dev; | ||
| 257 | mdp5_kms->smp_blk_cnt = config->smp_blk_cnt; | ||
| 258 | |||
| 259 | mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); | ||
| 260 | if (IS_ERR(mdp5_kms->mmio)) { | ||
| 261 | ret = PTR_ERR(mdp5_kms->mmio); | ||
| 262 | goto fail; | ||
| 263 | } | ||
| 264 | |||
| 265 | mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); | ||
| 266 | if (IS_ERR(mdp5_kms->vbif)) { | ||
| 267 | ret = PTR_ERR(mdp5_kms->vbif); | ||
| 268 | goto fail; | ||
| 269 | } | ||
| 270 | |||
| 271 | mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); | ||
| 272 | if (IS_ERR(mdp5_kms->vdd)) { | ||
| 273 | ret = PTR_ERR(mdp5_kms->vdd); | ||
| 274 | goto fail; | ||
| 275 | } | ||
| 276 | |||
| 277 | ret = regulator_enable(mdp5_kms->vdd); | ||
| 278 | if (ret) { | ||
| 279 | dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); | ||
| 280 | goto fail; | ||
| 281 | } | ||
| 282 | |||
| 283 | ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") || | ||
| 284 | get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") || | ||
| 285 | get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") || | ||
| 286 | get_clk(pdev, &mdp5_kms->core_clk, "core_clk") || | ||
| 287 | get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") || | ||
| 288 | get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); | ||
| 289 | if (ret) | ||
| 290 | goto fail; | ||
| 291 | |||
| 292 | ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); | ||
| 293 | |||
| 294 | /* make sure things are off before attaching iommu (bootloader could | ||
| 295 | * have left things on, in which case we'll start getting faults if | ||
| 296 | * we don't disable): | ||
| 297 | */ | ||
| 298 | mdp5_enable(mdp5_kms); | ||
| 299 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0); | ||
| 300 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0); | ||
| 301 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0); | ||
| 302 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0); | ||
| 303 | mdp5_disable(mdp5_kms); | ||
| 304 | mdelay(16); | ||
| 305 | |||
| 306 | if (config->iommu) { | ||
| 307 | mmu = msm_iommu_new(dev, config->iommu); | ||
| 308 | if (IS_ERR(mmu)) { | ||
| 309 | ret = PTR_ERR(mmu); | ||
| 310 | goto fail; | ||
| 311 | } | ||
| 312 | ret = mmu->funcs->attach(mmu, iommu_ports, | ||
| 313 | ARRAY_SIZE(iommu_ports)); | ||
| 314 | if (ret) | ||
| 315 | goto fail; | ||
| 316 | } else { | ||
| 317 | dev_info(dev->dev, "no iommu, fallback to phys " | ||
| 318 | "contig buffers for scanout\n"); | ||
| 319 | mmu = NULL; | ||
| 320 | } | ||
| 321 | |||
| 322 | mdp5_kms->id = msm_register_mmu(dev, mmu); | ||
| 323 | if (mdp5_kms->id < 0) { | ||
| 324 | ret = mdp5_kms->id; | ||
| 325 | dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret); | ||
| 326 | goto fail; | ||
| 327 | } | ||
| 328 | |||
| 329 | ret = modeset_init(mdp5_kms); | ||
| 330 | if (ret) { | ||
| 331 | dev_err(dev->dev, "modeset_init failed: %d\n", ret); | ||
| 332 | goto fail; | ||
| 333 | } | ||
| 334 | |||
| 335 | return kms; | ||
| 336 | |||
| 337 | fail: | ||
| 338 | if (kms) | ||
| 339 | mdp5_destroy(kms); | ||
| 340 | return ERR_PTR(ret); | ||
| 341 | } | ||
| 342 | |||
| 343 | static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev) | ||
| 344 | { | ||
| 345 | static struct mdp5_platform_config config = {}; | ||
| 346 | #ifdef CONFIG_OF | ||
| 347 | /* TODO */ | ||
| 348 | #endif | ||
| 349 | return &config; | ||
| 350 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h new file mode 100644 index 000000000000..c8b1a2522c25 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __MDP5_KMS_H__ | ||
| 19 | #define __MDP5_KMS_H__ | ||
| 20 | |||
| 21 | #include "msm_drv.h" | ||
| 22 | #include "msm_kms.h" | ||
| 23 | #include "mdp/mdp_kms.h" | ||
| 24 | #include "mdp5.xml.h" | ||
| 25 | #include "mdp5_smp.h" | ||
| 26 | |||
| 27 | struct mdp5_kms { | ||
| 28 | struct mdp_kms base; | ||
| 29 | |||
| 30 | struct drm_device *dev; | ||
| 31 | |||
| 32 | int rev; | ||
| 33 | |||
| 34 | /* mapper-id used to request GEM buffer mapped for scanout: */ | ||
| 35 | int id; | ||
| 36 | |||
| 37 | /* for tracking smp allocation amongst pipes: */ | ||
| 38 | mdp5_smp_state_t smp_state; | ||
| 39 | struct mdp5_client_smp_state smp_client_state[CID_MAX]; | ||
| 40 | int smp_blk_cnt; | ||
| 41 | |||
| 42 | /* io/register spaces: */ | ||
| 43 | void __iomem *mmio, *vbif; | ||
| 44 | |||
| 45 | struct regulator *vdd; | ||
| 46 | |||
| 47 | struct clk *axi_clk; | ||
| 48 | struct clk *ahb_clk; | ||
| 49 | struct clk *src_clk; | ||
| 50 | struct clk *core_clk; | ||
| 51 | struct clk *lut_clk; | ||
| 52 | struct clk *vsync_clk; | ||
| 53 | |||
| 54 | struct hdmi *hdmi; | ||
| 55 | |||
| 56 | struct mdp_irq error_handler; | ||
| 57 | }; | ||
| 58 | #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) | ||
| 59 | |||
| 60 | /* platform config data (ie. from DT, or pdata) */ | ||
| 61 | struct mdp5_platform_config { | ||
| 62 | struct iommu_domain *iommu; | ||
| 63 | uint32_t max_clk; | ||
| 64 | int smp_blk_cnt; | ||
| 65 | }; | ||
| 66 | |||
| 67 | static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) | ||
| 68 | { | ||
| 69 | msm_writel(data, mdp5_kms->mmio + reg); | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) | ||
| 73 | { | ||
| 74 | return msm_readl(mdp5_kms->mmio + reg); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline const char *pipe2name(enum mdp5_pipe pipe) | ||
| 78 | { | ||
| 79 | static const char *names[] = { | ||
| 80 | #define NAME(n) [SSPP_ ## n] = #n | ||
| 81 | NAME(VIG0), NAME(VIG1), NAME(VIG2), | ||
| 82 | NAME(RGB0), NAME(RGB1), NAME(RGB2), | ||
| 83 | NAME(DMA0), NAME(DMA1), | ||
| 84 | #undef NAME | ||
| 85 | }; | ||
| 86 | return names[pipe]; | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline uint32_t pipe2flush(enum mdp5_pipe pipe) | ||
| 90 | { | ||
| 91 | switch (pipe) { | ||
| 92 | case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; | ||
| 93 | case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; | ||
| 94 | case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; | ||
| 95 | case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; | ||
| 96 | case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; | ||
| 97 | case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; | ||
| 98 | case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; | ||
| 99 | case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; | ||
| 100 | default: return 0; | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline int pipe2nclients(enum mdp5_pipe pipe) | ||
| 105 | { | ||
| 106 | switch (pipe) { | ||
| 107 | case SSPP_RGB0: | ||
| 108 | case SSPP_RGB1: | ||
| 109 | case SSPP_RGB2: | ||
| 110 | return 1; | ||
| 111 | default: | ||
| 112 | return 3; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) | ||
| 117 | { | ||
| 118 | WARN_ON(plane >= pipe2nclients(pipe)); | ||
| 119 | switch (pipe) { | ||
| 120 | case SSPP_VIG0: return CID_VIG0_Y + plane; | ||
| 121 | case SSPP_VIG1: return CID_VIG1_Y + plane; | ||
| 122 | case SSPP_VIG2: return CID_VIG2_Y + plane; | ||
| 123 | case SSPP_RGB0: return CID_RGB0; | ||
| 124 | case SSPP_RGB1: return CID_RGB1; | ||
| 125 | case SSPP_RGB2: return CID_RGB2; | ||
| 126 | case SSPP_DMA0: return CID_DMA0_Y + plane; | ||
| 127 | case SSPP_DMA1: return CID_DMA1_Y + plane; | ||
| 128 | default: return CID_UNUSED; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline uint32_t mixer2flush(int lm) | ||
| 133 | { | ||
| 134 | switch (lm) { | ||
| 135 | case 0: return MDP5_CTL_FLUSH_LM0; | ||
| 136 | case 1: return MDP5_CTL_FLUSH_LM1; | ||
| 137 | case 2: return MDP5_CTL_FLUSH_LM2; | ||
| 138 | default: return 0; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline uint32_t intf2err(int intf) | ||
| 143 | { | ||
| 144 | switch (intf) { | ||
| 145 | case 0: return MDP5_IRQ_INTF0_UNDER_RUN; | ||
| 146 | case 1: return MDP5_IRQ_INTF1_UNDER_RUN; | ||
| 147 | case 2: return MDP5_IRQ_INTF2_UNDER_RUN; | ||
| 148 | case 3: return MDP5_IRQ_INTF3_UNDER_RUN; | ||
| 149 | default: return 0; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | static inline uint32_t intf2vblank(int intf) | ||
| 154 | { | ||
| 155 | switch (intf) { | ||
| 156 | case 0: return MDP5_IRQ_INTF0_VSYNC; | ||
| 157 | case 1: return MDP5_IRQ_INTF1_VSYNC; | ||
| 158 | case 2: return MDP5_IRQ_INTF2_VSYNC; | ||
| 159 | case 3: return MDP5_IRQ_INTF3_VSYNC; | ||
| 160 | default: return 0; | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | int mdp5_disable(struct mdp5_kms *mdp5_kms); | ||
| 165 | int mdp5_enable(struct mdp5_kms *mdp5_kms); | ||
| 166 | |||
| 167 | void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
| 168 | void mdp5_irq_preinstall(struct msm_kms *kms); | ||
| 169 | int mdp5_irq_postinstall(struct msm_kms *kms); | ||
| 170 | void mdp5_irq_uninstall(struct msm_kms *kms); | ||
| 171 | irqreturn_t mdp5_irq(struct msm_kms *kms); | ||
| 172 | int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 173 | void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 174 | |||
| 175 | static inline | ||
| 176 | uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, | ||
| 177 | uint32_t max_formats) | ||
| 178 | { | ||
| 179 | /* TODO when we have YUV, we need to filter supported formats | ||
| 180 | * based on pipe id.. | ||
| 181 | */ | ||
| 182 | return mdp_get_formats(pixel_formats, max_formats); | ||
| 183 | } | ||
| 184 | |||
| 185 | void mdp5_plane_install_properties(struct drm_plane *plane, | ||
| 186 | struct drm_mode_object *obj); | ||
| 187 | void mdp5_plane_set_scanout(struct drm_plane *plane, | ||
| 188 | struct drm_framebuffer *fb); | ||
| 189 | int mdp5_plane_mode_set(struct drm_plane *plane, | ||
| 190 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
| 191 | int crtc_x, int crtc_y, | ||
| 192 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 193 | uint32_t src_x, uint32_t src_y, | ||
| 194 | uint32_t src_w, uint32_t src_h); | ||
| 195 | void mdp5_plane_complete_flip(struct drm_plane *plane); | ||
| 196 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); | ||
| 197 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, | ||
| 198 | enum mdp5_pipe pipe, bool private_plane); | ||
| 199 | |||
| 200 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); | ||
| 201 | |||
| 202 | void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); | ||
| 203 | void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, | ||
| 204 | enum mdp5_intf intf_id); | ||
| 205 | void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); | ||
| 206 | void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); | ||
| 207 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | ||
| 208 | struct drm_plane *plane, int id); | ||
| 209 | |||
| 210 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, | ||
| 211 | enum mdp5_intf intf_id); | ||
| 212 | |||
| 213 | #endif /* __MDP5_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c new file mode 100644 index 000000000000..0ac8bb5e7e85 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -0,0 +1,389 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "mdp5_kms.h" | ||
| 19 | |||
| 20 | |||
| 21 | struct mdp5_plane { | ||
| 22 | struct drm_plane base; | ||
| 23 | const char *name; | ||
| 24 | |||
| 25 | enum mdp5_pipe pipe; | ||
| 26 | |||
| 27 | uint32_t nformats; | ||
| 28 | uint32_t formats[32]; | ||
| 29 | |||
| 30 | bool enabled; | ||
| 31 | }; | ||
| 32 | #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) | ||
| 33 | |||
| 34 | static struct mdp5_kms *get_kms(struct drm_plane *plane) | ||
| 35 | { | ||
| 36 | struct msm_drm_private *priv = plane->dev->dev_private; | ||
| 37 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | ||
| 38 | } | ||
| 39 | |||
| 40 | static int mdp5_plane_update(struct drm_plane *plane, | ||
| 41 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
| 42 | int crtc_x, int crtc_y, | ||
| 43 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 44 | uint32_t src_x, uint32_t src_y, | ||
| 45 | uint32_t src_w, uint32_t src_h) | ||
| 46 | { | ||
| 47 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 48 | |||
| 49 | mdp5_plane->enabled = true; | ||
| 50 | |||
| 51 | if (plane->fb) | ||
| 52 | drm_framebuffer_unreference(plane->fb); | ||
| 53 | |||
| 54 | drm_framebuffer_reference(fb); | ||
| 55 | |||
| 56 | return mdp5_plane_mode_set(plane, crtc, fb, | ||
| 57 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
| 58 | src_x, src_y, src_w, src_h); | ||
| 59 | } | ||
| 60 | |||
| 61 | static int mdp5_plane_disable(struct drm_plane *plane) | ||
| 62 | { | ||
| 63 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 64 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 65 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
| 66 | int i; | ||
| 67 | |||
| 68 | DBG("%s: disable", mdp5_plane->name); | ||
| 69 | |||
| 70 | /* update our SMP request to zero (release all our blks): */ | ||
| 71 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
| 72 | mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); | ||
| 73 | |||
| 74 | /* TODO detaching now will cause us not to get the last | ||
| 75 | * vblank and mdp5_smp_commit().. so other planes will | ||
| 76 | * still see smp blocks previously allocated to us as | ||
| 77 | * in-use.. | ||
| 78 | */ | ||
| 79 | if (plane->crtc) | ||
| 80 | mdp5_crtc_detach(plane->crtc, plane); | ||
| 81 | |||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | static void mdp5_plane_destroy(struct drm_plane *plane) | ||
| 86 | { | ||
| 87 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 88 | |||
| 89 | mdp5_plane_disable(plane); | ||
| 90 | drm_plane_cleanup(plane); | ||
| 91 | |||
| 92 | kfree(mdp5_plane); | ||
| 93 | } | ||
| 94 | |||
| 95 | /* helper to install properties which are common to planes and crtcs */ | ||
| 96 | void mdp5_plane_install_properties(struct drm_plane *plane, | ||
| 97 | struct drm_mode_object *obj) | ||
| 98 | { | ||
| 99 | // XXX | ||
| 100 | } | ||
| 101 | |||
| 102 | int mdp5_plane_set_property(struct drm_plane *plane, | ||
| 103 | struct drm_property *property, uint64_t val) | ||
| 104 | { | ||
| 105 | // XXX | ||
| 106 | return -EINVAL; | ||
| 107 | } | ||
| 108 | |||
| 109 | static const struct drm_plane_funcs mdp5_plane_funcs = { | ||
| 110 | .update_plane = mdp5_plane_update, | ||
| 111 | .disable_plane = mdp5_plane_disable, | ||
| 112 | .destroy = mdp5_plane_destroy, | ||
| 113 | .set_property = mdp5_plane_set_property, | ||
| 114 | }; | ||
| 115 | |||
| 116 | void mdp5_plane_set_scanout(struct drm_plane *plane, | ||
| 117 | struct drm_framebuffer *fb) | ||
| 118 | { | ||
| 119 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 120 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 121 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
| 122 | uint32_t nplanes = drm_format_num_planes(fb->pixel_format); | ||
| 123 | uint32_t iova[4]; | ||
| 124 | int i; | ||
| 125 | |||
| 126 | for (i = 0; i < nplanes; i++) { | ||
| 127 | struct drm_gem_object *bo = msm_framebuffer_bo(fb, i); | ||
| 128 | msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]); | ||
| 129 | } | ||
| 130 | for (; i < 4; i++) | ||
| 131 | iova[i] = 0; | ||
| 132 | |||
| 133 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), | ||
| 134 | MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | | ||
| 135 | MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); | ||
| 136 | |||
| 137 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), | ||
| 138 | MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | | ||
| 139 | MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); | ||
| 140 | |||
| 141 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]); | ||
| 142 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]); | ||
| 143 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]); | ||
| 144 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]); | ||
| 145 | |||
| 146 | plane->fb = fb; | ||
| 147 | } | ||
| 148 | |||
| 149 | /* NOTE: looks like if horizontal decimation is used (if we supported that) | ||
| 150 | * then the width used to calculate SMP block requirements is the post- | ||
| 151 | * decimated width. Ie. SMP buffering sits downstream of decimation (which | ||
| 152 | * presumably happens during the dma from scanout buffer). | ||
| 153 | */ | ||
| 154 | static int request_smp_blocks(struct drm_plane *plane, uint32_t format, | ||
| 155 | uint32_t nplanes, uint32_t width) | ||
| 156 | { | ||
| 157 | struct drm_device *dev = plane->dev; | ||
| 158 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 159 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 160 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
| 161 | int i, hsub, nlines, nblks, ret; | ||
| 162 | |||
| 163 | hsub = drm_format_horz_chroma_subsampling(format); | ||
| 164 | |||
| 165 | /* different if BWC (compressed framebuffer?) enabled: */ | ||
| 166 | nlines = 2; | ||
| 167 | |||
| 168 | for (i = 0, nblks = 0; i < nplanes; i++) { | ||
| 169 | int n, fetch_stride, cpp; | ||
| 170 | |||
| 171 | cpp = drm_format_plane_cpp(format, i); | ||
| 172 | fetch_stride = width * cpp / (i ? hsub : 1); | ||
| 173 | |||
| 174 | n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); | ||
| 175 | |||
| 176 | /* for hw rev v1.00 */ | ||
| 177 | if (mdp5_kms->rev == 0) | ||
| 178 | n = roundup_pow_of_two(n); | ||
| 179 | |||
| 180 | DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); | ||
| 181 | ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n); | ||
| 182 | if (ret) { | ||
| 183 | dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n", | ||
| 184 | n, ret); | ||
| 185 | return ret; | ||
| 186 | } | ||
| 187 | |||
| 188 | nblks += n; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* in success case, return total # of blocks allocated: */ | ||
| 192 | return nblks; | ||
| 193 | } | ||
| 194 | |||
| 195 | static void set_fifo_thresholds(struct drm_plane *plane, int nblks) | ||
| 196 | { | ||
| 197 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 198 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 199 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
| 200 | uint32_t val; | ||
| 201 | |||
| 202 | /* 1/4 of SMP pool that is being fetched */ | ||
| 203 | val = (nblks * SMP_ENTRIES_PER_BLK) / 4; | ||
| 204 | |||
| 205 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); | ||
| 206 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); | ||
| 207 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); | ||
| 208 | |||
| 209 | } | ||
| 210 | |||
| 211 | int mdp5_plane_mode_set(struct drm_plane *plane, | ||
| 212 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
| 213 | int crtc_x, int crtc_y, | ||
| 214 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 215 | uint32_t src_x, uint32_t src_y, | ||
| 216 | uint32_t src_w, uint32_t src_h) | ||
| 217 | { | ||
| 218 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 219 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 220 | enum mdp5_pipe pipe = mdp5_plane->pipe; | ||
| 221 | const struct mdp_format *format; | ||
| 222 | uint32_t nplanes, config = 0; | ||
| 223 | uint32_t phasex_step = 0, phasey_step = 0; | ||
| 224 | uint32_t hdecm = 0, vdecm = 0; | ||
| 225 | int i, nblks; | ||
| 226 | |||
| 227 | nplanes = drm_format_num_planes(fb->pixel_format); | ||
| 228 | |||
| 229 | /* bad formats should already be rejected: */ | ||
| 230 | if (WARN_ON(nplanes > pipe2nclients(pipe))) | ||
| 231 | return -EINVAL; | ||
| 232 | |||
| 233 | /* src values are in Q16 fixed point, convert to integer: */ | ||
| 234 | src_x = src_x >> 16; | ||
| 235 | src_y = src_y >> 16; | ||
| 236 | src_w = src_w >> 16; | ||
| 237 | src_h = src_h >> 16; | ||
| 238 | |||
| 239 | DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name, | ||
| 240 | fb->base.id, src_x, src_y, src_w, src_h, | ||
| 241 | crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); | ||
| 242 | |||
| 243 | /* | ||
| 244 | * Calculate and request required # of smp blocks: | ||
| 245 | */ | ||
| 246 | nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); | ||
| 247 | if (nblks < 0) | ||
| 248 | return nblks; | ||
| 249 | |||
| 250 | /* | ||
| 251 | * Currently we update the hw for allocations/requests immediately, | ||
| 252 | * but once atomic modeset/pageflip is in place, the allocation | ||
| 253 | * would move into atomic->check_plane_state(), while updating the | ||
| 254 | * hw would remain here: | ||
| 255 | */ | ||
| 256 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
| 257 | mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i)); | ||
| 258 | |||
| 259 | if (src_w != crtc_w) { | ||
| 260 | config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; | ||
| 261 | /* TODO calc phasex_step, hdecm */ | ||
| 262 | } | ||
| 263 | |||
| 264 | if (src_h != crtc_h) { | ||
| 265 | config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN; | ||
| 266 | /* TODO calc phasey_step, vdecm */ | ||
| 267 | } | ||
| 268 | |||
| 269 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), | ||
| 270 | MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | | ||
| 271 | MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); | ||
| 272 | |||
| 273 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), | ||
| 274 | MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | | ||
| 275 | MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); | ||
| 276 | |||
| 277 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), | ||
| 278 | MDP5_PIPE_SRC_XY_X(src_x) | | ||
| 279 | MDP5_PIPE_SRC_XY_Y(src_y)); | ||
| 280 | |||
| 281 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), | ||
| 282 | MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | | ||
| 283 | MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); | ||
| 284 | |||
| 285 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), | ||
| 286 | MDP5_PIPE_OUT_XY_X(crtc_x) | | ||
| 287 | MDP5_PIPE_OUT_XY_Y(crtc_y)); | ||
| 288 | |||
| 289 | mdp5_plane_set_scanout(plane, fb); | ||
| 290 | |||
| 291 | format = to_mdp_format(msm_framebuffer_format(fb)); | ||
| 292 | |||
| 293 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), | ||
| 294 | MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | | ||
| 295 | MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | | ||
| 296 | MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | | ||
| 297 | MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | | ||
| 298 | COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | | ||
| 299 | MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | | ||
| 300 | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | | ||
| 301 | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | | ||
| 302 | MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) | | ||
| 303 | MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB)); | ||
| 304 | |||
| 305 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), | ||
| 306 | MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | | ||
| 307 | MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | | ||
| 308 | MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | | ||
| 309 | MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); | ||
| 310 | |||
| 311 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), | ||
| 312 | MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); | ||
| 313 | |||
| 314 | /* not using secure mode: */ | ||
| 315 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); | ||
| 316 | |||
| 317 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step); | ||
| 318 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step); | ||
| 319 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), | ||
| 320 | MDP5_PIPE_DECIMATION_VERT(vdecm) | | ||
| 321 | MDP5_PIPE_DECIMATION_HORZ(hdecm)); | ||
| 322 | mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), | ||
| 323 | MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) | | ||
| 324 | MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) | | ||
| 325 | MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) | | ||
| 326 | MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) | | ||
| 327 | MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | | ||
| 328 | MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); | ||
| 329 | |||
| 330 | set_fifo_thresholds(plane, nblks); | ||
| 331 | |||
| 332 | /* TODO detach from old crtc (if we had more than one) */ | ||
| 333 | mdp5_crtc_attach(crtc, plane); | ||
| 334 | |||
| 335 | return 0; | ||
| 336 | } | ||
| 337 | |||
| 338 | void mdp5_plane_complete_flip(struct drm_plane *plane) | ||
| 339 | { | ||
| 340 | struct mdp5_kms *mdp5_kms = get_kms(plane); | ||
| 341 | enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; | ||
| 342 | int i; | ||
| 343 | |||
| 344 | for (i = 0; i < pipe2nclients(pipe); i++) | ||
| 345 | mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); | ||
| 346 | } | ||
| 347 | |||
| 348 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) | ||
| 349 | { | ||
| 350 | struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); | ||
| 351 | return mdp5_plane->pipe; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* initialize plane */ | ||
| 355 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, | ||
| 356 | enum mdp5_pipe pipe, bool private_plane) | ||
| 357 | { | ||
| 358 | struct drm_plane *plane = NULL; | ||
| 359 | struct mdp5_plane *mdp5_plane; | ||
| 360 | int ret; | ||
| 361 | |||
| 362 | mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); | ||
| 363 | if (!mdp5_plane) { | ||
| 364 | ret = -ENOMEM; | ||
| 365 | goto fail; | ||
| 366 | } | ||
| 367 | |||
| 368 | plane = &mdp5_plane->base; | ||
| 369 | |||
| 370 | mdp5_plane->pipe = pipe; | ||
| 371 | mdp5_plane->name = pipe2name(pipe); | ||
| 372 | |||
| 373 | mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, | ||
| 374 | ARRAY_SIZE(mdp5_plane->formats)); | ||
| 375 | |||
| 376 | drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, | ||
| 377 | mdp5_plane->formats, mdp5_plane->nformats, | ||
| 378 | private_plane); | ||
| 379 | |||
| 380 | mdp5_plane_install_properties(plane, &plane->base); | ||
| 381 | |||
| 382 | return plane; | ||
| 383 | |||
| 384 | fail: | ||
| 385 | if (plane) | ||
| 386 | mdp5_plane_destroy(plane); | ||
| 387 | |||
| 388 | return ERR_PTR(ret); | ||
| 389 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c new file mode 100644 index 000000000000..2d0236b963a6 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "mdp5_kms.h" | ||
| 20 | #include "mdp5_smp.h" | ||
| 21 | |||
| 22 | |||
| 23 | /* SMP - Shared Memory Pool | ||
| 24 | * | ||
| 25 | * These are shared between all the clients, where each plane in a | ||
| 26 | * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on | ||
| 27 | * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. | ||
| 28 | * | ||
| 29 | * Based on the size of the attached scanout buffer, a certain # of | ||
| 30 | * blocks must be allocated to that client out of the shared pool. | ||
| 31 | * | ||
| 32 | * For each block, it can be either free, or pending/in-use by a | ||
| 33 | * client. The updates happen in three steps: | ||
| 34 | * | ||
| 35 | * 1) mdp5_smp_request(): | ||
| 36 | * When plane scanout is setup, calculate required number of | ||
| 37 | * blocks needed per client, and request. Blocks not inuse or | ||
| 38 | * pending by any other client are added to client's pending | ||
| 39 | * set. | ||
| 40 | * | ||
| 41 | * 2) mdp5_smp_configure(): | ||
| 42 | * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers | ||
| 43 | * are configured for the union(pending, inuse) | ||
| 44 | * | ||
| 45 | * 3) mdp5_smp_commit(): | ||
| 46 | * After next vblank, copy pending -> inuse. Optionally update | ||
| 47 | * MDP5_SMP_ALLOC registers if there are newly unused blocks | ||
| 48 | * | ||
| 49 | * On the next vblank after changes have been committed to hw, the | ||
| 50 | * client's pending blocks become it's in-use blocks (and no-longer | ||
| 51 | * in-use blocks become available to other clients). | ||
| 52 | * | ||
| 53 | * btw, hurray for confusing overloaded acronyms! :-/ | ||
| 54 | * | ||
| 55 | * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1 | ||
| 56 | * should happen at (or before)? atomic->check(). And we'd need | ||
| 57 | * an API to discard previous requests if update is aborted or | ||
| 58 | * (test-only). | ||
| 59 | * | ||
| 60 | * TODO would perhaps be nice to have debugfs to dump out kernel | ||
| 61 | * inuse and pending state of all clients.. | ||
| 62 | */ | ||
| 63 | |||
| 64 | static DEFINE_SPINLOCK(smp_lock); | ||
| 65 | |||
| 66 | |||
| 67 | /* step #1: update # of blocks pending for the client: */ | ||
| 68 | int mdp5_smp_request(struct mdp5_kms *mdp5_kms, | ||
| 69 | enum mdp5_client_id cid, int nblks) | ||
| 70 | { | ||
| 71 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
| 72 | int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; | ||
| 73 | unsigned long flags; | ||
| 74 | |||
| 75 | spin_lock_irqsave(&smp_lock, flags); | ||
| 76 | |||
| 77 | avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); | ||
| 78 | if (nblks > avail) { | ||
| 79 | ret = -ENOSPC; | ||
| 80 | goto fail; | ||
| 81 | } | ||
| 82 | |||
| 83 | cur_nblks = bitmap_weight(ps->pending, cnt); | ||
| 84 | if (nblks > cur_nblks) { | ||
| 85 | /* grow the existing pending reservation: */ | ||
| 86 | for (i = cur_nblks; i < nblks; i++) { | ||
| 87 | int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); | ||
| 88 | set_bit(blk, ps->pending); | ||
| 89 | set_bit(blk, mdp5_kms->smp_state); | ||
| 90 | } | ||
| 91 | } else { | ||
| 92 | /* shrink the existing pending reservation: */ | ||
| 93 | for (i = cur_nblks; i > nblks; i--) { | ||
| 94 | int blk = find_first_bit(ps->pending, cnt); | ||
| 95 | clear_bit(blk, ps->pending); | ||
| 96 | /* don't clear in global smp_state until _commit() */ | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | fail: | ||
| 101 | spin_unlock_irqrestore(&smp_lock, flags); | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | static void update_smp_state(struct mdp5_kms *mdp5_kms, | ||
| 106 | enum mdp5_client_id cid, mdp5_smp_state_t *assigned) | ||
| 107 | { | ||
| 108 | int cnt = mdp5_kms->smp_blk_cnt; | ||
| 109 | uint32_t blk, val; | ||
| 110 | |||
| 111 | for_each_set_bit(blk, *assigned, cnt) { | ||
| 112 | int idx = blk / 3; | ||
| 113 | int fld = blk % 3; | ||
| 114 | |||
| 115 | val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); | ||
| 116 | |||
| 117 | switch (fld) { | ||
| 118 | case 0: | ||
| 119 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; | ||
| 120 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); | ||
| 121 | break; | ||
| 122 | case 1: | ||
| 123 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; | ||
| 124 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); | ||
| 125 | break; | ||
| 126 | case 2: | ||
| 127 | val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; | ||
| 128 | val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); | ||
| 129 | break; | ||
| 130 | } | ||
| 131 | |||
| 132 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); | ||
| 133 | mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | /* step #2: configure hw for union(pending, inuse): */ | ||
| 138 | void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) | ||
| 139 | { | ||
| 140 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
| 141 | int cnt = mdp5_kms->smp_blk_cnt; | ||
| 142 | mdp5_smp_state_t assigned; | ||
| 143 | |||
| 144 | bitmap_or(assigned, ps->inuse, ps->pending, cnt); | ||
| 145 | update_smp_state(mdp5_kms, cid, &assigned); | ||
| 146 | } | ||
| 147 | |||
| 148 | /* step #3: after vblank, copy pending -> inuse: */ | ||
| 149 | void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) | ||
| 150 | { | ||
| 151 | struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; | ||
| 152 | int cnt = mdp5_kms->smp_blk_cnt; | ||
| 153 | mdp5_smp_state_t released; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Figure out if there are any blocks we where previously | ||
| 157 | * using, which can be released and made available to other | ||
| 158 | * clients: | ||
| 159 | */ | ||
| 160 | if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { | ||
| 161 | unsigned long flags; | ||
| 162 | |||
| 163 | spin_lock_irqsave(&smp_lock, flags); | ||
| 164 | /* clear released blocks: */ | ||
| 165 | bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state, | ||
| 166 | released, cnt); | ||
| 167 | spin_unlock_irqrestore(&smp_lock, flags); | ||
| 168 | |||
| 169 | update_smp_state(mdp5_kms, CID_UNUSED, &released); | ||
| 170 | } | ||
| 171 | |||
| 172 | bitmap_copy(ps->inuse, ps->pending, cnt); | ||
| 173 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h new file mode 100644 index 000000000000..0ab739e1a1dd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __MDP5_SMP_H__ | ||
| 19 | #define __MDP5_SMP_H__ | ||
| 20 | |||
| 21 | #include "msm_drv.h" | ||
| 22 | |||
| 23 | #define MAX_SMP_BLOCKS 22 | ||
| 24 | #define SMP_BLK_SIZE 4096 | ||
| 25 | #define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16) | ||
| 26 | |||
| 27 | typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); | ||
| 28 | |||
| 29 | struct mdp5_client_smp_state { | ||
| 30 | mdp5_smp_state_t inuse; | ||
| 31 | mdp5_smp_state_t pending; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct mdp5_kms; | ||
| 35 | |||
| 36 | int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); | ||
| 37 | void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); | ||
| 38 | void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); | ||
| 39 | |||
| 40 | |||
| 41 | #endif /* __MDP5_SMP_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h new file mode 100644 index 000000000000..a9629b85b983 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | #ifndef MDP_COMMON_XML | ||
| 2 | #define MDP_COMMON_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://github.com/freedreno/envytools/ | ||
| 8 | git clone https://github.com/freedreno/envytools.git | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) | ||
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | ||
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) | ||
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) | ||
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) | ||
| 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
| 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) | ||
| 21 | |||
| 22 | Copyright (C) 2013 by the following authors: | ||
| 23 | - Rob Clark <robdclark@gmail.com> (robclark) | ||
| 24 | |||
| 25 | Permission is hereby granted, free of charge, to any person obtaining | ||
| 26 | a copy of this software and associated documentation files (the | ||
| 27 | "Software"), to deal in the Software without restriction, including | ||
| 28 | without limitation the rights to use, copy, modify, merge, publish, | ||
| 29 | distribute, sublicense, and/or sell copies of the Software, and to | ||
| 30 | permit persons to whom the Software is furnished to do so, subject to | ||
| 31 | the following conditions: | ||
| 32 | |||
| 33 | The above copyright notice and this permission notice (including the | ||
| 34 | next paragraph) shall be included in all copies or substantial | ||
| 35 | portions of the Software. | ||
| 36 | |||
| 37 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 38 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 39 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
| 40 | IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
| 41 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
| 42 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
| 43 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 44 | */ | ||
| 45 | |||
| 46 | |||
| 47 | enum mdp_mixer_stage_id { | ||
| 48 | STAGE_UNUSED = 0, | ||
| 49 | STAGE_BASE = 1, | ||
| 50 | STAGE0 = 2, | ||
| 51 | STAGE1 = 3, | ||
| 52 | STAGE2 = 4, | ||
| 53 | STAGE3 = 5, | ||
| 54 | }; | ||
| 55 | |||
| 56 | enum mdp_alpha_type { | ||
| 57 | FG_CONST = 0, | ||
| 58 | BG_CONST = 1, | ||
| 59 | FG_PIXEL = 2, | ||
| 60 | BG_PIXEL = 3, | ||
| 61 | }; | ||
| 62 | |||
| 63 | enum mdp_bpc { | ||
| 64 | BPC1 = 0, | ||
| 65 | BPC5 = 1, | ||
| 66 | BPC6 = 2, | ||
| 67 | BPC8 = 3, | ||
| 68 | }; | ||
| 69 | |||
| 70 | enum mdp_bpc_alpha { | ||
| 71 | BPC1A = 0, | ||
| 72 | BPC4A = 1, | ||
| 73 | BPC6A = 2, | ||
| 74 | BPC8A = 3, | ||
| 75 | }; | ||
| 76 | |||
| 77 | |||
| 78 | #endif /* MDP_COMMON_XML */ | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index 17330b0927b2..e0a6ffbe6ab4 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | 18 | ||
| 19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
| 20 | #include "mdp4_kms.h" | 20 | #include "mdp_kms.h" |
| 21 | 21 | ||
| 22 | #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ | 22 | #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ |
| 23 | .base = { .pixel_format = DRM_FORMAT_ ## name }, \ | 23 | .base = { .pixel_format = DRM_FORMAT_ ## name }, \ |
| @@ -34,7 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #define BPC0A 0 | 35 | #define BPC0A 0 |
| 36 | 36 | ||
| 37 | static const struct mdp4_format formats[] = { | 37 | static const struct mdp_format formats[] = { |
| 38 | /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ | 38 | /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ |
| 39 | FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), | 39 | FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), |
| 40 | FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), | 40 | FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), |
| @@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = { | |||
| 44 | FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), | 44 | FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, | 47 | uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) |
| 48 | uint32_t max_formats) | ||
| 49 | { | 48 | { |
| 50 | uint32_t i; | 49 | uint32_t i; |
| 51 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | 50 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
| 52 | const struct mdp4_format *f = &formats[i]; | 51 | const struct mdp_format *f = &formats[i]; |
| 53 | 52 | ||
| 54 | if (i == max_formats) | 53 | if (i == max_formats) |
| 55 | break; | 54 | break; |
| @@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, | |||
| 60 | return i; | 59 | return i; |
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) | 62 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) |
| 64 | { | 63 | { |
| 65 | int i; | 64 | int i; |
| 66 | for (i = 0; i < ARRAY_SIZE(formats); i++) { | 65 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
| 67 | const struct mdp4_format *f = &formats[i]; | 66 | const struct mdp_format *f = &formats[i]; |
| 68 | if (f->base.pixel_format == format) | 67 | if (f->base.pixel_format == format) |
| 69 | return &f->base; | 68 | return &f->base; |
| 70 | } | 69 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c new file mode 100644 index 000000000000..3be48f7c36be --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c | |||
| @@ -0,0 +1,144 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "msm_drv.h" | ||
| 20 | #include "mdp_kms.h" | ||
| 21 | |||
| 22 | |||
| 23 | struct mdp_irq_wait { | ||
| 24 | struct mdp_irq irq; | ||
| 25 | int count; | ||
| 26 | }; | ||
| 27 | |||
| 28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
| 29 | |||
| 30 | static DEFINE_SPINLOCK(list_lock); | ||
| 31 | |||
| 32 | static void update_irq(struct mdp_kms *mdp_kms) | ||
| 33 | { | ||
| 34 | struct mdp_irq *irq; | ||
| 35 | uint32_t irqmask = mdp_kms->vblank_mask; | ||
| 36 | |||
| 37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
| 38 | |||
| 39 | list_for_each_entry(irq, &mdp_kms->irq_list, node) | ||
| 40 | irqmask |= irq->irqmask; | ||
| 41 | |||
| 42 | mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); | ||
| 43 | } | ||
| 44 | |||
| 45 | static void update_irq_unlocked(struct mdp_kms *mdp_kms) | ||
| 46 | { | ||
| 47 | unsigned long flags; | ||
| 48 | spin_lock_irqsave(&list_lock, flags); | ||
| 49 | update_irq(mdp_kms); | ||
| 50 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 51 | } | ||
| 52 | |||
| 53 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) | ||
| 54 | { | ||
| 55 | struct mdp_irq *handler, *n; | ||
| 56 | unsigned long flags; | ||
| 57 | |||
| 58 | spin_lock_irqsave(&list_lock, flags); | ||
| 59 | mdp_kms->in_irq = true; | ||
| 60 | list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { | ||
| 61 | if (handler->irqmask & status) { | ||
| 62 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 63 | handler->irq(handler, handler->irqmask & status); | ||
| 64 | spin_lock_irqsave(&list_lock, flags); | ||
| 65 | } | ||
| 66 | } | ||
| 67 | mdp_kms->in_irq = false; | ||
| 68 | update_irq(mdp_kms); | ||
| 69 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 70 | |||
| 71 | } | ||
| 72 | |||
| 73 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) | ||
| 74 | { | ||
| 75 | unsigned long flags; | ||
| 76 | |||
| 77 | spin_lock_irqsave(&list_lock, flags); | ||
| 78 | if (enable) | ||
| 79 | mdp_kms->vblank_mask |= mask; | ||
| 80 | else | ||
| 81 | mdp_kms->vblank_mask &= ~mask; | ||
| 82 | update_irq(mdp_kms); | ||
| 83 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 84 | } | ||
| 85 | |||
| 86 | static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
| 87 | { | ||
| 88 | struct mdp_irq_wait *wait = | ||
| 89 | container_of(irq, struct mdp_irq_wait, irq); | ||
| 90 | wait->count--; | ||
| 91 | wake_up_all(&wait_event); | ||
| 92 | } | ||
| 93 | |||
| 94 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
| 95 | { | ||
| 96 | struct mdp_irq_wait wait = { | ||
| 97 | .irq = { | ||
| 98 | .irq = wait_irq, | ||
| 99 | .irqmask = irqmask, | ||
| 100 | }, | ||
| 101 | .count = 1, | ||
| 102 | }; | ||
| 103 | mdp_irq_register(mdp_kms, &wait.irq); | ||
| 104 | wait_event(wait_event, (wait.count <= 0)); | ||
| 105 | mdp_irq_unregister(mdp_kms, &wait.irq); | ||
| 106 | } | ||
| 107 | |||
| 108 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
| 109 | { | ||
| 110 | unsigned long flags; | ||
| 111 | bool needs_update = false; | ||
| 112 | |||
| 113 | spin_lock_irqsave(&list_lock, flags); | ||
| 114 | |||
| 115 | if (!irq->registered) { | ||
| 116 | irq->registered = true; | ||
| 117 | list_add(&irq->node, &mdp_kms->irq_list); | ||
| 118 | needs_update = !mdp_kms->in_irq; | ||
| 119 | } | ||
| 120 | |||
| 121 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 122 | |||
| 123 | if (needs_update) | ||
| 124 | update_irq_unlocked(mdp_kms); | ||
| 125 | } | ||
| 126 | |||
| 127 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
| 128 | { | ||
| 129 | unsigned long flags; | ||
| 130 | bool needs_update = false; | ||
| 131 | |||
| 132 | spin_lock_irqsave(&list_lock, flags); | ||
| 133 | |||
| 134 | if (irq->registered) { | ||
| 135 | irq->registered = false; | ||
| 136 | list_del(&irq->node); | ||
| 137 | needs_update = !mdp_kms->in_irq; | ||
| 138 | } | ||
| 139 | |||
| 140 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 141 | |||
| 142 | if (needs_update) | ||
| 143 | update_irq_unlocked(mdp_kms); | ||
| 144 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h new file mode 100644 index 000000000000..99557b5ad4fd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h | |||
| @@ -0,0 +1,97 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __MDP_KMS_H__ | ||
| 19 | #define __MDP_KMS_H__ | ||
| 20 | |||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/regulator/consumer.h> | ||
| 24 | |||
| 25 | #include "msm_drv.h" | ||
| 26 | #include "msm_kms.h" | ||
| 27 | #include "mdp_common.xml.h" | ||
| 28 | |||
| 29 | struct mdp_kms; | ||
| 30 | |||
| 31 | struct mdp_kms_funcs { | ||
| 32 | struct msm_kms_funcs base; | ||
| 33 | void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct mdp_kms { | ||
| 37 | struct msm_kms base; | ||
| 38 | |||
| 39 | const struct mdp_kms_funcs *funcs; | ||
| 40 | |||
| 41 | /* irq handling: */ | ||
| 42 | bool in_irq; | ||
| 43 | struct list_head irq_list; /* list of mdp4_irq */ | ||
| 44 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
| 45 | }; | ||
| 46 | #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) | ||
| 47 | |||
| 48 | static inline void mdp_kms_init(struct mdp_kms *mdp_kms, | ||
| 49 | const struct mdp_kms_funcs *funcs) | ||
| 50 | { | ||
| 51 | mdp_kms->funcs = funcs; | ||
| 52 | INIT_LIST_HEAD(&mdp_kms->irq_list); | ||
| 53 | msm_kms_init(&mdp_kms->base, &funcs->base); | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * irq helpers: | ||
| 58 | */ | ||
| 59 | |||
| 60 | /* For transiently registering for different MDP irqs that various parts | ||
| 61 | * of the KMS code need during setup/configuration. These are not | ||
| 62 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
| 63 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
| 64 | * internal housekeeping related irq usage. | ||
| 65 | */ | ||
| 66 | struct mdp_irq { | ||
| 67 | struct list_head node; | ||
| 68 | uint32_t irqmask; | ||
| 69 | bool registered; | ||
| 70 | void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); | ||
| 71 | }; | ||
| 72 | |||
| 73 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); | ||
| 74 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); | ||
| 75 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
| 76 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
| 77 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
| 78 | |||
| 79 | |||
| 80 | /* | ||
| 81 | * pixel format helpers: | ||
| 82 | */ | ||
| 83 | |||
| 84 | struct mdp_format { | ||
| 85 | struct msm_format base; | ||
| 86 | enum mdp_bpc bpc_r, bpc_g, bpc_b; | ||
| 87 | enum mdp_bpc_alpha bpc_a; | ||
| 88 | uint8_t unpack[4]; | ||
| 89 | bool alpha_enable, unpack_tight; | ||
| 90 | uint8_t cpp, unpack_count; | ||
| 91 | }; | ||
| 92 | #define to_mdp_format(x) container_of(x, struct mdp_format, base) | ||
| 93 | |||
| 94 | uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); | ||
| 95 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); | ||
| 96 | |||
| 97 | #endif /* __MDP_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c deleted file mode 100644 index 5c6b7fca4edd..000000000000 --- a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c +++ /dev/null | |||
| @@ -1,203 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | |||
| 19 | #include "msm_drv.h" | ||
| 20 | #include "mdp4_kms.h" | ||
| 21 | |||
| 22 | |||
| 23 | struct mdp4_irq_wait { | ||
| 24 | struct mdp4_irq irq; | ||
| 25 | int count; | ||
| 26 | }; | ||
| 27 | |||
| 28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
| 29 | |||
| 30 | static DEFINE_SPINLOCK(list_lock); | ||
| 31 | |||
| 32 | static void update_irq(struct mdp4_kms *mdp4_kms) | ||
| 33 | { | ||
| 34 | struct mdp4_irq *irq; | ||
| 35 | uint32_t irqmask = mdp4_kms->vblank_mask; | ||
| 36 | |||
| 37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
| 38 | |||
| 39 | list_for_each_entry(irq, &mdp4_kms->irq_list, node) | ||
| 40 | irqmask |= irq->irqmask; | ||
| 41 | |||
| 42 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); | ||
| 43 | } | ||
| 44 | |||
| 45 | static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) | ||
| 46 | { | ||
| 47 | unsigned long flags; | ||
| 48 | spin_lock_irqsave(&list_lock, flags); | ||
| 49 | update_irq(mdp4_kms); | ||
| 50 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 51 | } | ||
| 52 | |||
| 53 | static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) | ||
| 54 | { | ||
| 55 | DRM_ERROR("errors: %08x\n", irqstatus); | ||
| 56 | } | ||
| 57 | |||
| 58 | void mdp4_irq_preinstall(struct msm_kms *kms) | ||
| 59 | { | ||
| 60 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 61 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | ||
| 62 | } | ||
| 63 | |||
| 64 | int mdp4_irq_postinstall(struct msm_kms *kms) | ||
| 65 | { | ||
| 66 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 67 | struct mdp4_irq *error_handler = &mdp4_kms->error_handler; | ||
| 68 | |||
| 69 | INIT_LIST_HEAD(&mdp4_kms->irq_list); | ||
| 70 | |||
| 71 | error_handler->irq = mdp4_irq_error_handler; | ||
| 72 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | | ||
| 73 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; | ||
| 74 | |||
| 75 | mdp4_irq_register(mdp4_kms, error_handler); | ||
| 76 | |||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | void mdp4_irq_uninstall(struct msm_kms *kms) | ||
| 81 | { | ||
| 82 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 83 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
| 84 | } | ||
| 85 | |||
| 86 | irqreturn_t mdp4_irq(struct msm_kms *kms) | ||
| 87 | { | ||
| 88 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 89 | struct drm_device *dev = mdp4_kms->dev; | ||
| 90 | struct msm_drm_private *priv = dev->dev_private; | ||
| 91 | struct mdp4_irq *handler, *n; | ||
| 92 | unsigned long flags; | ||
| 93 | unsigned int id; | ||
| 94 | uint32_t status; | ||
| 95 | |||
| 96 | status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); | ||
| 97 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); | ||
| 98 | |||
| 99 | VERB("status=%08x", status); | ||
| 100 | |||
| 101 | for (id = 0; id < priv->num_crtcs; id++) | ||
| 102 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | ||
| 103 | drm_handle_vblank(dev, id); | ||
| 104 | |||
| 105 | spin_lock_irqsave(&list_lock, flags); | ||
| 106 | mdp4_kms->in_irq = true; | ||
| 107 | list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { | ||
| 108 | if (handler->irqmask & status) { | ||
| 109 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 110 | handler->irq(handler, handler->irqmask & status); | ||
| 111 | spin_lock_irqsave(&list_lock, flags); | ||
| 112 | } | ||
| 113 | } | ||
| 114 | mdp4_kms->in_irq = false; | ||
| 115 | update_irq(mdp4_kms); | ||
| 116 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 117 | |||
| 118 | return IRQ_HANDLED; | ||
| 119 | } | ||
| 120 | |||
| 121 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 122 | { | ||
| 123 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 124 | unsigned long flags; | ||
| 125 | |||
| 126 | spin_lock_irqsave(&list_lock, flags); | ||
| 127 | mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc); | ||
| 128 | update_irq(mdp4_kms); | ||
| 129 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 130 | |||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | ||
| 135 | { | ||
| 136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | ||
| 137 | unsigned long flags; | ||
| 138 | |||
| 139 | spin_lock_irqsave(&list_lock, flags); | ||
| 140 | mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); | ||
| 141 | update_irq(mdp4_kms); | ||
| 142 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 143 | } | ||
| 144 | |||
| 145 | static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) | ||
| 146 | { | ||
| 147 | struct mdp4_irq_wait *wait = | ||
| 148 | container_of(irq, struct mdp4_irq_wait, irq); | ||
| 149 | wait->count--; | ||
| 150 | wake_up_all(&wait_event); | ||
| 151 | } | ||
| 152 | |||
| 153 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) | ||
| 154 | { | ||
| 155 | struct mdp4_irq_wait wait = { | ||
| 156 | .irq = { | ||
| 157 | .irq = wait_irq, | ||
| 158 | .irqmask = irqmask, | ||
| 159 | }, | ||
| 160 | .count = 1, | ||
| 161 | }; | ||
| 162 | mdp4_irq_register(mdp4_kms, &wait.irq); | ||
| 163 | wait_event(wait_event, (wait.count <= 0)); | ||
| 164 | mdp4_irq_unregister(mdp4_kms, &wait.irq); | ||
| 165 | } | ||
| 166 | |||
| 167 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
| 168 | { | ||
| 169 | unsigned long flags; | ||
| 170 | bool needs_update = false; | ||
| 171 | |||
| 172 | spin_lock_irqsave(&list_lock, flags); | ||
| 173 | |||
| 174 | if (!irq->registered) { | ||
| 175 | irq->registered = true; | ||
| 176 | list_add(&irq->node, &mdp4_kms->irq_list); | ||
| 177 | needs_update = !mdp4_kms->in_irq; | ||
| 178 | } | ||
| 179 | |||
| 180 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 181 | |||
| 182 | if (needs_update) | ||
| 183 | update_irq_unlocked(mdp4_kms); | ||
| 184 | } | ||
| 185 | |||
| 186 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
| 187 | { | ||
| 188 | unsigned long flags; | ||
| 189 | bool needs_update = false; | ||
| 190 | |||
| 191 | spin_lock_irqsave(&list_lock, flags); | ||
| 192 | |||
| 193 | if (irq->registered) { | ||
| 194 | irq->registered = false; | ||
| 195 | list_del(&irq->node); | ||
| 196 | needs_update = !mdp4_kms->in_irq; | ||
| 197 | } | ||
| 198 | |||
| 199 | spin_unlock_irqrestore(&list_lock, flags); | ||
| 200 | |||
| 201 | if (needs_update) | ||
| 202 | update_irq_unlocked(mdp4_kms); | ||
| 203 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 86537692e45c..e6adafc7eff3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
| 19 | #include "msm_gpu.h" | 19 | #include "msm_gpu.h" |
| 20 | #include "msm_kms.h" | ||
| 20 | 21 | ||
| 21 | static void msm_fb_output_poll_changed(struct drm_device *dev) | 22 | static void msm_fb_output_poll_changed(struct drm_device *dev) |
| 22 | { | 23 | { |
| @@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = { | |||
| 30 | .output_poll_changed = msm_fb_output_poll_changed, | 31 | .output_poll_changed = msm_fb_output_poll_changed, |
| 31 | }; | 32 | }; |
| 32 | 33 | ||
| 33 | static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, | 34 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) |
| 34 | unsigned long iova, int flags, void *arg) | ||
| 35 | { | ||
| 36 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu) | ||
| 41 | { | 35 | { |
| 42 | struct msm_drm_private *priv = dev->dev_private; | 36 | struct msm_drm_private *priv = dev->dev_private; |
| 43 | int idx = priv->num_iommus++; | 37 | int idx = priv->num_mmus++; |
| 44 | 38 | ||
| 45 | if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus))) | 39 | if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) |
| 46 | return -EINVAL; | 40 | return -EINVAL; |
| 47 | 41 | ||
| 48 | priv->iommus[idx] = iommu; | 42 | priv->mmus[idx] = mmu; |
| 49 | |||
| 50 | iommu_set_fault_handler(iommu, msm_fault_handler, dev); | ||
| 51 | |||
| 52 | /* need to iommu_attach_device() somewhere?? on resume?? */ | ||
| 53 | 43 | ||
| 54 | return idx; | 44 | return idx; |
| 55 | } | 45 | } |
| 56 | 46 | ||
| 57 | int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, | ||
| 58 | const char **names, int cnt) | ||
| 59 | { | ||
| 60 | int i, ret; | ||
| 61 | |||
| 62 | for (i = 0; i < cnt; i++) { | ||
| 63 | /* TODO maybe some day msm iommu won't require this hack: */ | ||
| 64 | struct device *msm_iommu_get_ctx(const char *ctx_name); | ||
| 65 | struct device *ctx = msm_iommu_get_ctx(names[i]); | ||
| 66 | if (!ctx) | ||
| 67 | continue; | ||
| 68 | ret = iommu_attach_device(iommu, ctx); | ||
| 69 | if (ret) { | ||
| 70 | dev_warn(dev->dev, "could not attach iommu to %s", names[i]); | ||
| 71 | return ret; | ||
| 72 | } | ||
| 73 | } | ||
| 74 | return 0; | ||
| 75 | } | ||
| 76 | |||
| 77 | #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING | 47 | #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING |
| 78 | static bool reglog = false; | 48 | static bool reglog = false; |
| 79 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); | 49 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); |
| @@ -82,6 +52,10 @@ module_param(reglog, bool, 0600); | |||
| 82 | #define reglog 0 | 52 | #define reglog 0 |
| 83 | #endif | 53 | #endif |
| 84 | 54 | ||
| 55 | static char *vram; | ||
| 56 | MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); | ||
| 57 | module_param(vram, charp, 0); | ||
| 58 | |||
| 85 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, | 59 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 86 | const char *dbgname) | 60 | const char *dbgname) |
| 87 | { | 61 | { |
| @@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev) | |||
| 161 | mutex_unlock(&dev->struct_mutex); | 135 | mutex_unlock(&dev->struct_mutex); |
| 162 | } | 136 | } |
| 163 | 137 | ||
| 138 | if (priv->vram.paddr) { | ||
| 139 | DEFINE_DMA_ATTRS(attrs); | ||
| 140 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | ||
| 141 | drm_mm_takedown(&priv->vram.mm); | ||
| 142 | dma_free_attrs(dev->dev, priv->vram.size, NULL, | ||
| 143 | priv->vram.paddr, &attrs); | ||
| 144 | } | ||
| 145 | |||
| 164 | dev->dev_private = NULL; | 146 | dev->dev_private = NULL; |
| 165 | 147 | ||
| 166 | kfree(priv); | 148 | kfree(priv); |
| @@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev) | |||
| 168 | return 0; | 150 | return 0; |
| 169 | } | 151 | } |
| 170 | 152 | ||
| 153 | static int get_mdp_ver(struct platform_device *pdev) | ||
| 154 | { | ||
| 155 | #ifdef CONFIG_OF | ||
| 156 | const static struct of_device_id match_types[] = { { | ||
| 157 | .compatible = "qcom,mdss_mdp", | ||
| 158 | .data = (void *)5, | ||
| 159 | }, { | ||
| 160 | /* end node */ | ||
| 161 | } }; | ||
| 162 | struct device *dev = &pdev->dev; | ||
| 163 | const struct of_device_id *match; | ||
| 164 | match = of_match_node(match_types, dev->of_node); | ||
| 165 | if (match) | ||
| 166 | return (int)match->data; | ||
| 167 | #endif | ||
| 168 | return 4; | ||
| 169 | } | ||
| 170 | |||
| 171 | static int msm_load(struct drm_device *dev, unsigned long flags) | 171 | static int msm_load(struct drm_device *dev, unsigned long flags) |
| 172 | { | 172 | { |
| 173 | struct platform_device *pdev = dev->platformdev; | 173 | struct platform_device *pdev = dev->platformdev; |
| @@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags) | |||
| 191 | 191 | ||
| 192 | drm_mode_config_init(dev); | 192 | drm_mode_config_init(dev); |
| 193 | 193 | ||
| 194 | kms = mdp4_kms_init(dev); | 194 | /* if we have no IOMMU, then we need to use carveout allocator. |
| 195 | * Grab the entire CMA chunk carved out in early startup in | ||
| 196 | * mach-msm: | ||
| 197 | */ | ||
| 198 | if (!iommu_present(&platform_bus_type)) { | ||
| 199 | DEFINE_DMA_ATTRS(attrs); | ||
| 200 | unsigned long size; | ||
| 201 | void *p; | ||
| 202 | |||
| 203 | DBG("using %s VRAM carveout", vram); | ||
| 204 | size = memparse(vram, NULL); | ||
| 205 | priv->vram.size = size; | ||
| 206 | |||
| 207 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); | ||
| 208 | |||
| 209 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | ||
| 210 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
| 211 | |||
| 212 | /* note that for no-kernel-mapping, the vaddr returned | ||
| 213 | * is bogus, but non-null if allocation succeeded: | ||
| 214 | */ | ||
| 215 | p = dma_alloc_attrs(dev->dev, size, | ||
| 216 | &priv->vram.paddr, 0, &attrs); | ||
| 217 | if (!p) { | ||
| 218 | dev_err(dev->dev, "failed to allocate VRAM\n"); | ||
| 219 | priv->vram.paddr = 0; | ||
| 220 | ret = -ENOMEM; | ||
| 221 | goto fail; | ||
| 222 | } | ||
| 223 | |||
| 224 | dev_info(dev->dev, "VRAM: %08x->%08x\n", | ||
| 225 | (uint32_t)priv->vram.paddr, | ||
| 226 | (uint32_t)(priv->vram.paddr + size)); | ||
| 227 | } | ||
| 228 | |||
| 229 | switch (get_mdp_ver(pdev)) { | ||
| 230 | case 4: | ||
| 231 | kms = mdp4_kms_init(dev); | ||
| 232 | break; | ||
| 233 | case 5: | ||
| 234 | kms = mdp5_kms_init(dev); | ||
| 235 | break; | ||
| 236 | default: | ||
| 237 | kms = ERR_PTR(-ENODEV); | ||
| 238 | break; | ||
| 239 | } | ||
| 240 | |||
| 195 | if (IS_ERR(kms)) { | 241 | if (IS_ERR(kms)) { |
| 196 | /* | 242 | /* |
| 197 | * NOTE: once we have GPU support, having no kms should not | 243 | * NOTE: once we have GPU support, having no kms should not |
| @@ -326,7 +372,7 @@ static void msm_lastclose(struct drm_device *dev) | |||
| 326 | } | 372 | } |
| 327 | } | 373 | } |
| 328 | 374 | ||
| 329 | static irqreturn_t msm_irq(DRM_IRQ_ARGS) | 375 | static irqreturn_t msm_irq(int irq, void *arg) |
| 330 | { | 376 | { |
| 331 | struct drm_device *dev = arg; | 377 | struct drm_device *dev = arg; |
| 332 | struct msm_drm_private *priv = dev->dev_private; | 378 | struct msm_drm_private *priv = dev->dev_private; |
| @@ -415,7 +461,7 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) | |||
| 415 | 461 | ||
| 416 | static int msm_mm_show(struct drm_device *dev, struct seq_file *m) | 462 | static int msm_mm_show(struct drm_device *dev, struct seq_file *m) |
| 417 | { | 463 | { |
| 418 | return drm_mm_dump_table(m, dev->mm_private); | 464 | return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); |
| 419 | } | 465 | } |
| 420 | 466 | ||
| 421 | static int msm_fb_show(struct drm_device *dev, struct seq_file *m) | 467 | static int msm_fb_show(struct drm_device *dev, struct seq_file *m) |
| @@ -778,12 +824,13 @@ static const struct dev_pm_ops msm_pm_ops = { | |||
| 778 | 824 | ||
| 779 | static int msm_pdev_probe(struct platform_device *pdev) | 825 | static int msm_pdev_probe(struct platform_device *pdev) |
| 780 | { | 826 | { |
| 827 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
| 781 | return drm_platform_init(&msm_driver, pdev); | 828 | return drm_platform_init(&msm_driver, pdev); |
| 782 | } | 829 | } |
| 783 | 830 | ||
| 784 | static int msm_pdev_remove(struct platform_device *pdev) | 831 | static int msm_pdev_remove(struct platform_device *pdev) |
| 785 | { | 832 | { |
| 786 | drm_platform_exit(&msm_driver, pdev); | 833 | drm_put_dev(platform_get_drvdata(pdev)); |
| 787 | 834 | ||
| 788 | return 0; | 835 | return 0; |
| 789 | } | 836 | } |
| @@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = { | |||
| 793 | { } | 840 | { } |
| 794 | }; | 841 | }; |
| 795 | 842 | ||
| 843 | static const struct of_device_id dt_match[] = { | ||
| 844 | { .compatible = "qcom,mdss_mdp" }, | ||
| 845 | {} | ||
| 846 | }; | ||
| 847 | MODULE_DEVICE_TABLE(of, dt_match); | ||
| 848 | |||
| 796 | static struct platform_driver msm_platform_driver = { | 849 | static struct platform_driver msm_platform_driver = { |
| 797 | .probe = msm_pdev_probe, | 850 | .probe = msm_pdev_probe, |
| 798 | .remove = msm_pdev_remove, | 851 | .remove = msm_pdev_remove, |
| 799 | .driver = { | 852 | .driver = { |
| 800 | .owner = THIS_MODULE, | 853 | .owner = THIS_MODULE, |
| 801 | .name = "msm", | 854 | .name = "msm", |
| 855 | .of_match_table = dt_match, | ||
| 802 | .pm = &msm_pm_ops, | 856 | .pm = &msm_pm_ops, |
| 803 | }, | 857 | }, |
| 804 | .id_table = msm_id, | 858 | .id_table = msm_id, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d39f0862b19e..3d63269c5b29 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -31,6 +31,15 @@ | |||
| 31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
| 32 | #include <asm/sizes.h> | 32 | #include <asm/sizes.h> |
| 33 | 33 | ||
| 34 | |||
| 35 | #if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM) | ||
| 36 | /* stubs we need for compile-test: */ | ||
| 37 | static inline struct device *msm_iommu_get_ctx(const char *ctx_name) | ||
| 38 | { | ||
| 39 | return NULL; | ||
| 40 | } | ||
| 41 | #endif | ||
| 42 | |||
| 34 | #ifndef CONFIG_OF | 43 | #ifndef CONFIG_OF |
| 35 | #include <mach/board.h> | 44 | #include <mach/board.h> |
| 36 | #include <mach/socinfo.h> | 45 | #include <mach/socinfo.h> |
| @@ -44,6 +53,7 @@ | |||
| 44 | 53 | ||
| 45 | struct msm_kms; | 54 | struct msm_kms; |
| 46 | struct msm_gpu; | 55 | struct msm_gpu; |
| 56 | struct msm_mmu; | ||
| 47 | 57 | ||
| 48 | #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ | 58 | #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ |
| 49 | 59 | ||
| @@ -76,9 +86,9 @@ struct msm_drm_private { | |||
| 76 | /* callbacks deferred until bo is inactive: */ | 86 | /* callbacks deferred until bo is inactive: */ |
| 77 | struct list_head fence_cbs; | 87 | struct list_head fence_cbs; |
| 78 | 88 | ||
| 79 | /* registered IOMMU domains: */ | 89 | /* registered MMUs: */ |
| 80 | unsigned int num_iommus; | 90 | unsigned int num_mmus; |
| 81 | struct iommu_domain *iommus[NUM_DOMAINS]; | 91 | struct msm_mmu *mmus[NUM_DOMAINS]; |
| 82 | 92 | ||
| 83 | unsigned int num_planes; | 93 | unsigned int num_planes; |
| 84 | struct drm_plane *planes[8]; | 94 | struct drm_plane *planes[8]; |
| @@ -94,6 +104,16 @@ struct msm_drm_private { | |||
| 94 | 104 | ||
| 95 | unsigned int num_connectors; | 105 | unsigned int num_connectors; |
| 96 | struct drm_connector *connectors[8]; | 106 | struct drm_connector *connectors[8]; |
| 107 | |||
| 108 | /* VRAM carveout, used when no IOMMU: */ | ||
| 109 | struct { | ||
| 110 | unsigned long size; | ||
| 111 | dma_addr_t paddr; | ||
| 112 | /* NOTE: mm managed at the page level, size is in # of pages | ||
| 113 | * and position mm_node->start is in # of pages: | ||
| 114 | */ | ||
| 115 | struct drm_mm mm; | ||
| 116 | } vram; | ||
| 97 | }; | 117 | }; |
| 98 | 118 | ||
| 99 | struct msm_format { | 119 | struct msm_format { |
| @@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work); | |||
| 114 | (_cb)->func = _func; \ | 134 | (_cb)->func = _func; \ |
| 115 | } while (0) | 135 | } while (0) |
| 116 | 136 | ||
| 117 | /* As there are different display controller blocks depending on the | 137 | int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); |
| 118 | * snapdragon version, the kms support is split out and the appropriate | ||
| 119 | * implementation is loaded at runtime. The kms module is responsible | ||
| 120 | * for constructing the appropriate planes/crtcs/encoders/connectors. | ||
| 121 | */ | ||
| 122 | struct msm_kms_funcs { | ||
| 123 | /* hw initialization: */ | ||
| 124 | int (*hw_init)(struct msm_kms *kms); | ||
| 125 | /* irq handling: */ | ||
| 126 | void (*irq_preinstall)(struct msm_kms *kms); | ||
| 127 | int (*irq_postinstall)(struct msm_kms *kms); | ||
| 128 | void (*irq_uninstall)(struct msm_kms *kms); | ||
| 129 | irqreturn_t (*irq)(struct msm_kms *kms); | ||
| 130 | int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 131 | void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 132 | /* misc: */ | ||
| 133 | const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); | ||
| 134 | long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, | ||
| 135 | struct drm_encoder *encoder); | ||
| 136 | /* cleanup: */ | ||
| 137 | void (*preclose)(struct msm_kms *kms, struct drm_file *file); | ||
| 138 | void (*destroy)(struct msm_kms *kms); | ||
| 139 | }; | ||
| 140 | |||
| 141 | struct msm_kms { | ||
| 142 | const struct msm_kms_funcs *funcs; | ||
| 143 | }; | ||
| 144 | |||
| 145 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); | ||
| 146 | |||
| 147 | int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu); | ||
| 148 | int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, | ||
| 149 | const char **names, int cnt); | ||
| 150 | 138 | ||
| 151 | int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, | 139 | int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, |
| 152 | struct timespec *timeout); | 140 | struct timespec *timeout); |
| @@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, | |||
| 202 | 190 | ||
| 203 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); | 191 | struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); |
| 204 | 192 | ||
| 205 | int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); | 193 | struct hdmi; |
| 194 | struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); | ||
| 195 | irqreturn_t hdmi_irq(int irq, void *dev_id); | ||
| 206 | void __init hdmi_register(void); | 196 | void __init hdmi_register(void); |
| 207 | void __exit hdmi_unregister(void); | 197 | void __exit hdmi_unregister(void); |
| 208 | 198 | ||
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 0286c0eeb10c..81bafdf19ab3 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
| 19 | #include "msm_kms.h" | ||
| 19 | 20 | ||
| 20 | #include "drm_crtc.h" | 21 | #include "drm_crtc.h" |
| 21 | #include "drm_crtc_helper.h" | 22 | #include "drm_crtc_helper.h" |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index e587d251c590..d8d60c969ac7 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -22,7 +22,45 @@ | |||
| 22 | #include "msm_drv.h" | 22 | #include "msm_drv.h" |
| 23 | #include "msm_gem.h" | 23 | #include "msm_gem.h" |
| 24 | #include "msm_gpu.h" | 24 | #include "msm_gpu.h" |
| 25 | #include "msm_mmu.h" | ||
| 25 | 26 | ||
| 27 | static dma_addr_t physaddr(struct drm_gem_object *obj) | ||
| 28 | { | ||
| 29 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 30 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
| 31 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | ||
| 32 | priv->vram.paddr; | ||
| 33 | } | ||
| 34 | |||
| 35 | /* allocate pages from VRAM carveout, used when no IOMMU: */ | ||
| 36 | static struct page **get_pages_vram(struct drm_gem_object *obj, | ||
| 37 | int npages) | ||
| 38 | { | ||
| 39 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 40 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
| 41 | dma_addr_t paddr; | ||
| 42 | struct page **p; | ||
| 43 | int ret, i; | ||
| 44 | |||
| 45 | p = drm_malloc_ab(npages, sizeof(struct page *)); | ||
| 46 | if (!p) | ||
| 47 | return ERR_PTR(-ENOMEM); | ||
| 48 | |||
| 49 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, | ||
| 50 | npages, 0, DRM_MM_SEARCH_DEFAULT); | ||
| 51 | if (ret) { | ||
| 52 | drm_free_large(p); | ||
| 53 | return ERR_PTR(ret); | ||
| 54 | } | ||
| 55 | |||
| 56 | paddr = physaddr(obj); | ||
| 57 | for (i = 0; i < npages; i++) { | ||
| 58 | p[i] = phys_to_page(paddr); | ||
| 59 | paddr += PAGE_SIZE; | ||
| 60 | } | ||
| 61 | |||
| 62 | return p; | ||
| 63 | } | ||
| 26 | 64 | ||
| 27 | /* called with dev->struct_mutex held */ | 65 | /* called with dev->struct_mutex held */ |
| 28 | static struct page **get_pages(struct drm_gem_object *obj) | 66 | static struct page **get_pages(struct drm_gem_object *obj) |
| @@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
| 31 | 69 | ||
| 32 | if (!msm_obj->pages) { | 70 | if (!msm_obj->pages) { |
| 33 | struct drm_device *dev = obj->dev; | 71 | struct drm_device *dev = obj->dev; |
| 34 | struct page **p = drm_gem_get_pages(obj, 0); | 72 | struct page **p; |
| 35 | int npages = obj->size >> PAGE_SHIFT; | 73 | int npages = obj->size >> PAGE_SHIFT; |
| 36 | 74 | ||
| 75 | if (iommu_present(&platform_bus_type)) | ||
| 76 | p = drm_gem_get_pages(obj, 0); | ||
| 77 | else | ||
| 78 | p = get_pages_vram(obj, npages); | ||
| 79 | |||
| 37 | if (IS_ERR(p)) { | 80 | if (IS_ERR(p)) { |
| 38 | dev_err(dev->dev, "could not get pages: %ld\n", | 81 | dev_err(dev->dev, "could not get pages: %ld\n", |
| 39 | PTR_ERR(p)); | 82 | PTR_ERR(p)); |
| @@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj) | |||
| 73 | sg_free_table(msm_obj->sgt); | 116 | sg_free_table(msm_obj->sgt); |
| 74 | kfree(msm_obj->sgt); | 117 | kfree(msm_obj->sgt); |
| 75 | 118 | ||
| 76 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | 119 | if (iommu_present(&platform_bus_type)) |
| 120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | ||
| 121 | else | ||
| 122 | drm_mm_remove_node(msm_obj->vram_node); | ||
| 123 | |||
| 77 | msm_obj->pages = NULL; | 124 | msm_obj->pages = NULL; |
| 78 | } | 125 | } |
| 79 | } | 126 | } |
| @@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 138 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 185 | int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 139 | { | 186 | { |
| 140 | struct drm_gem_object *obj = vma->vm_private_data; | 187 | struct drm_gem_object *obj = vma->vm_private_data; |
| 141 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 142 | struct drm_device *dev = obj->dev; | 188 | struct drm_device *dev = obj->dev; |
| 143 | struct page **pages; | 189 | struct page **pages; |
| 144 | unsigned long pfn; | 190 | unsigned long pfn; |
| @@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 163 | pgoff = ((unsigned long)vmf->virtual_address - | 209 | pgoff = ((unsigned long)vmf->virtual_address - |
| 164 | vma->vm_start) >> PAGE_SHIFT; | 210 | vma->vm_start) >> PAGE_SHIFT; |
| 165 | 211 | ||
| 166 | pfn = page_to_pfn(msm_obj->pages[pgoff]); | 212 | pfn = page_to_pfn(pages[pgoff]); |
| 167 | 213 | ||
| 168 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | 214 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 169 | pfn, pfn << PAGE_SHIFT); | 215 | pfn, pfn << PAGE_SHIFT); |
| @@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |||
| 219 | return offset; | 265 | return offset; |
| 220 | } | 266 | } |
| 221 | 267 | ||
| 222 | /* helpers for dealing w/ iommu: */ | ||
| 223 | static int map_range(struct iommu_domain *domain, unsigned int iova, | ||
| 224 | struct sg_table *sgt, unsigned int len, int prot) | ||
| 225 | { | ||
| 226 | struct scatterlist *sg; | ||
| 227 | unsigned int da = iova; | ||
| 228 | unsigned int i, j; | ||
| 229 | int ret; | ||
| 230 | |||
| 231 | if (!domain || !sgt) | ||
| 232 | return -EINVAL; | ||
| 233 | |||
| 234 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 235 | u32 pa = sg_phys(sg) - sg->offset; | ||
| 236 | size_t bytes = sg->length + sg->offset; | ||
| 237 | |||
| 238 | VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); | ||
| 239 | |||
| 240 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
| 241 | if (ret) | ||
| 242 | goto fail; | ||
| 243 | |||
| 244 | da += bytes; | ||
| 245 | } | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | |||
| 249 | fail: | ||
| 250 | da = iova; | ||
| 251 | |||
| 252 | for_each_sg(sgt->sgl, sg, i, j) { | ||
| 253 | size_t bytes = sg->length + sg->offset; | ||
| 254 | iommu_unmap(domain, da, bytes); | ||
| 255 | da += bytes; | ||
| 256 | } | ||
| 257 | return ret; | ||
| 258 | } | ||
| 259 | |||
| 260 | static void unmap_range(struct iommu_domain *domain, unsigned int iova, | ||
| 261 | struct sg_table *sgt, unsigned int len) | ||
| 262 | { | ||
| 263 | struct scatterlist *sg; | ||
| 264 | unsigned int da = iova; | ||
| 265 | int i; | ||
| 266 | |||
| 267 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 268 | size_t bytes = sg->length + sg->offset; | ||
| 269 | size_t unmapped; | ||
| 270 | |||
| 271 | unmapped = iommu_unmap(domain, da, bytes); | ||
| 272 | if (unmapped < bytes) | ||
| 273 | break; | ||
| 274 | |||
| 275 | VERB("unmap[%d]: %08x(%x)", i, iova, bytes); | ||
| 276 | |||
| 277 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
| 278 | |||
| 279 | da += bytes; | ||
| 280 | } | ||
| 281 | } | ||
| 282 | |||
| 283 | /* should be called under struct_mutex.. although it can be called | 268 | /* should be called under struct_mutex.. although it can be called |
| 284 | * from atomic context without struct_mutex to acquire an extra | 269 | * from atomic context without struct_mutex to acquire an extra |
| 285 | * iova ref if you know one is already held. | 270 | * iova ref if you know one is already held. |
| @@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |||
| 295 | 280 | ||
| 296 | if (!msm_obj->domain[id].iova) { | 281 | if (!msm_obj->domain[id].iova) { |
| 297 | struct msm_drm_private *priv = obj->dev->dev_private; | 282 | struct msm_drm_private *priv = obj->dev->dev_private; |
| 298 | uint32_t offset = (uint32_t)mmap_offset(obj); | 283 | struct msm_mmu *mmu = priv->mmus[id]; |
| 299 | struct page **pages; | 284 | struct page **pages = get_pages(obj); |
| 300 | pages = get_pages(obj); | 285 | |
| 301 | if (IS_ERR(pages)) | 286 | if (IS_ERR(pages)) |
| 302 | return PTR_ERR(pages); | 287 | return PTR_ERR(pages); |
| 303 | // XXX ideally we would not map buffers writable when not needed... | 288 | |
| 304 | ret = map_range(priv->iommus[id], offset, msm_obj->sgt, | 289 | if (iommu_present(&platform_bus_type)) { |
| 305 | obj->size, IOMMU_READ | IOMMU_WRITE); | 290 | uint32_t offset = (uint32_t)mmap_offset(obj); |
| 306 | msm_obj->domain[id].iova = offset; | 291 | ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, |
| 292 | obj->size, IOMMU_READ | IOMMU_WRITE); | ||
| 293 | msm_obj->domain[id].iova = offset; | ||
| 294 | } else { | ||
| 295 | msm_obj->domain[id].iova = physaddr(obj); | ||
| 296 | } | ||
| 307 | } | 297 | } |
| 308 | 298 | ||
| 309 | if (!ret) | 299 | if (!ret) |
| @@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |||
| 514 | void msm_gem_free_object(struct drm_gem_object *obj) | 504 | void msm_gem_free_object(struct drm_gem_object *obj) |
| 515 | { | 505 | { |
| 516 | struct drm_device *dev = obj->dev; | 506 | struct drm_device *dev = obj->dev; |
| 507 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
| 517 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 508 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 518 | int id; | 509 | int id; |
| 519 | 510 | ||
| @@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
| 525 | list_del(&msm_obj->mm_list); | 516 | list_del(&msm_obj->mm_list); |
| 526 | 517 | ||
| 527 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | 518 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { |
| 528 | if (msm_obj->domain[id].iova) { | 519 | struct msm_mmu *mmu = priv->mmus[id]; |
| 529 | struct msm_drm_private *priv = obj->dev->dev_private; | 520 | if (mmu && msm_obj->domain[id].iova) { |
| 530 | uint32_t offset = (uint32_t)mmap_offset(obj); | 521 | uint32_t offset = (uint32_t)mmap_offset(obj); |
| 531 | unmap_range(priv->iommus[id], offset, | 522 | mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); |
| 532 | msm_obj->sgt, obj->size); | ||
| 533 | } | 523 | } |
| 534 | } | 524 | } |
| 535 | 525 | ||
| @@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
| 591 | { | 581 | { |
| 592 | struct msm_drm_private *priv = dev->dev_private; | 582 | struct msm_drm_private *priv = dev->dev_private; |
| 593 | struct msm_gem_object *msm_obj; | 583 | struct msm_gem_object *msm_obj; |
| 584 | unsigned sz; | ||
| 594 | 585 | ||
| 595 | switch (flags & MSM_BO_CACHE_MASK) { | 586 | switch (flags & MSM_BO_CACHE_MASK) { |
| 596 | case MSM_BO_UNCACHED: | 587 | case MSM_BO_UNCACHED: |
| @@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
| 603 | return -EINVAL; | 594 | return -EINVAL; |
| 604 | } | 595 | } |
| 605 | 596 | ||
| 606 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); | 597 | sz = sizeof(*msm_obj); |
| 598 | if (!iommu_present(&platform_bus_type)) | ||
| 599 | sz += sizeof(struct drm_mm_node); | ||
| 600 | |||
| 601 | msm_obj = kzalloc(sz, GFP_KERNEL); | ||
| 607 | if (!msm_obj) | 602 | if (!msm_obj) |
| 608 | return -ENOMEM; | 603 | return -ENOMEM; |
| 609 | 604 | ||
| 605 | if (!iommu_present(&platform_bus_type)) | ||
| 606 | msm_obj->vram_node = (void *)&msm_obj[1]; | ||
| 607 | |||
| 610 | msm_obj->flags = flags; | 608 | msm_obj->flags = flags; |
| 611 | 609 | ||
| 612 | msm_obj->resv = &msm_obj->_resv; | 610 | msm_obj->resv = &msm_obj->_resv; |
| @@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
| 623 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | 621 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, |
| 624 | uint32_t size, uint32_t flags) | 622 | uint32_t size, uint32_t flags) |
| 625 | { | 623 | { |
| 626 | struct drm_gem_object *obj; | 624 | struct drm_gem_object *obj = NULL; |
| 627 | int ret; | 625 | int ret; |
| 628 | 626 | ||
| 629 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 627 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| @@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
| 634 | if (ret) | 632 | if (ret) |
| 635 | goto fail; | 633 | goto fail; |
| 636 | 634 | ||
| 637 | ret = drm_gem_object_init(dev, obj, size); | 635 | if (iommu_present(&platform_bus_type)) { |
| 638 | if (ret) | 636 | ret = drm_gem_object_init(dev, obj, size); |
| 639 | goto fail; | 637 | if (ret) |
| 638 | goto fail; | ||
| 639 | } else { | ||
| 640 | drm_gem_private_object_init(dev, obj, size); | ||
| 641 | } | ||
| 640 | 642 | ||
| 641 | return obj; | 643 | return obj; |
| 642 | 644 | ||
| @@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
| 654 | struct drm_gem_object *obj; | 656 | struct drm_gem_object *obj; |
| 655 | int ret, npages; | 657 | int ret, npages; |
| 656 | 658 | ||
| 659 | /* if we don't have IOMMU, don't bother pretending we can import: */ | ||
| 660 | if (!iommu_present(&platform_bus_type)) { | ||
| 661 | dev_err(dev->dev, "cannot import without IOMMU\n"); | ||
| 662 | return ERR_PTR(-EINVAL); | ||
| 663 | } | ||
| 664 | |||
| 657 | size = PAGE_ALIGN(size); | 665 | size = PAGE_ALIGN(size); |
| 658 | 666 | ||
| 659 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); | 667 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); |
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f4f23a578d9d..3246bb46c4f2 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
| @@ -57,6 +57,11 @@ struct msm_gem_object { | |||
| 57 | /* normally (resv == &_resv) except for imported bo's */ | 57 | /* normally (resv == &_resv) except for imported bo's */ |
| 58 | struct reservation_object *resv; | 58 | struct reservation_object *resv; |
| 59 | struct reservation_object _resv; | 59 | struct reservation_object _resv; |
| 60 | |||
| 61 | /* For physically contiguous buffers. Used when we don't have | ||
| 62 | * an IOMMU. | ||
| 63 | */ | ||
| 64 | struct drm_mm_node *vram_node; | ||
| 60 | }; | 65 | }; |
| 61 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) | 66 | #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) |
| 62 | 67 | ||
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4583d61556f5..4ebce8be489d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include "msm_gpu.h" | 18 | #include "msm_gpu.h" |
| 19 | #include "msm_gem.h" | 19 | #include "msm_gem.h" |
| 20 | #include "msm_mmu.h" | ||
| 20 | 21 | ||
| 21 | 22 | ||
| 22 | /* | 23 | /* |
| @@ -25,20 +26,10 @@ | |||
| 25 | 26 | ||
| 26 | #ifdef CONFIG_MSM_BUS_SCALING | 27 | #ifdef CONFIG_MSM_BUS_SCALING |
| 27 | #include <mach/board.h> | 28 | #include <mach/board.h> |
| 28 | #include <mach/kgsl.h> | 29 | static void bs_init(struct msm_gpu *gpu) |
| 29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) | ||
| 30 | { | 30 | { |
| 31 | struct drm_device *dev = gpu->dev; | 31 | if (gpu->bus_scale_table) { |
| 32 | struct kgsl_device_platform_data *pdata; | 32 | gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); |
| 33 | |||
| 34 | if (!pdev) { | ||
| 35 | dev_err(dev->dev, "could not find dtv pdata\n"); | ||
| 36 | return; | ||
| 37 | } | ||
| 38 | |||
| 39 | pdata = pdev->dev.platform_data; | ||
| 40 | if (pdata->bus_scale_table) { | ||
| 41 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); | ||
| 42 | DBG("bus scale client: %08x", gpu->bsc); | 33 | DBG("bus scale client: %08x", gpu->bsc); |
| 43 | } | 34 | } |
| 44 | } | 35 | } |
| @@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx) | |||
| 59 | } | 50 | } |
| 60 | } | 51 | } |
| 61 | #else | 52 | #else |
| 62 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {} | 53 | static void bs_init(struct msm_gpu *gpu) {} |
| 63 | static void bs_fini(struct msm_gpu *gpu) {} | 54 | static void bs_fini(struct msm_gpu *gpu) {} |
| 64 | static void bs_set(struct msm_gpu *gpu, int idx) {} | 55 | static void bs_set(struct msm_gpu *gpu, int idx) {} |
| 65 | #endif | 56 | #endif |
| @@ -363,6 +354,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 363 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, | 354 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, |
| 364 | const char *name, const char *ioname, const char *irqname, int ringsz) | 355 | const char *name, const char *ioname, const char *irqname, int ringsz) |
| 365 | { | 356 | { |
| 357 | struct iommu_domain *iommu; | ||
| 366 | int i, ret; | 358 | int i, ret; |
| 367 | 359 | ||
| 368 | gpu->dev = drm; | 360 | gpu->dev = drm; |
| @@ -428,13 +420,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 428 | * and have separate page tables per context. For now, to keep things | 420 | * and have separate page tables per context. For now, to keep things |
| 429 | * simple and to get something working, just use a single address space: | 421 | * simple and to get something working, just use a single address space: |
| 430 | */ | 422 | */ |
| 431 | gpu->iommu = iommu_domain_alloc(&platform_bus_type); | 423 | iommu = iommu_domain_alloc(&platform_bus_type); |
| 432 | if (!gpu->iommu) { | 424 | if (iommu) { |
| 433 | dev_err(drm->dev, "failed to allocate IOMMU\n"); | 425 | dev_info(drm->dev, "%s: using IOMMU\n", name); |
| 434 | ret = -ENOMEM; | 426 | gpu->mmu = msm_iommu_new(drm, iommu); |
| 435 | goto fail; | 427 | } else { |
| 428 | dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); | ||
| 436 | } | 429 | } |
| 437 | gpu->id = msm_register_iommu(drm, gpu->iommu); | 430 | gpu->id = msm_register_mmu(drm, gpu->mmu); |
| 438 | 431 | ||
| 439 | /* Create ringbuffer: */ | 432 | /* Create ringbuffer: */ |
| 440 | gpu->rb = msm_ringbuffer_new(gpu, ringsz); | 433 | gpu->rb = msm_ringbuffer_new(gpu, ringsz); |
| @@ -452,7 +445,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 452 | goto fail; | 445 | goto fail; |
| 453 | } | 446 | } |
| 454 | 447 | ||
| 455 | bs_init(gpu, pdev); | 448 | bs_init(gpu); |
| 456 | 449 | ||
| 457 | return 0; | 450 | return 0; |
| 458 | 451 | ||
| @@ -474,6 +467,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
| 474 | msm_ringbuffer_destroy(gpu->rb); | 467 | msm_ringbuffer_destroy(gpu->rb); |
| 475 | } | 468 | } |
| 476 | 469 | ||
| 477 | if (gpu->iommu) | 470 | if (gpu->mmu) |
| 478 | iommu_domain_free(gpu->iommu); | 471 | gpu->mmu->funcs->destroy(gpu->mmu); |
| 479 | } | 472 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 8cd829e520bb..458db8c64c28 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h | |||
| @@ -78,14 +78,18 @@ struct msm_gpu { | |||
| 78 | void __iomem *mmio; | 78 | void __iomem *mmio; |
| 79 | int irq; | 79 | int irq; |
| 80 | 80 | ||
| 81 | struct iommu_domain *iommu; | 81 | struct msm_mmu *mmu; |
| 82 | int id; | 82 | int id; |
| 83 | 83 | ||
| 84 | /* Power Control: */ | 84 | /* Power Control: */ |
| 85 | struct regulator *gpu_reg, *gpu_cx; | 85 | struct regulator *gpu_reg, *gpu_cx; |
| 86 | struct clk *ebi1_clk, *grp_clks[5]; | 86 | struct clk *ebi1_clk, *grp_clks[5]; |
| 87 | uint32_t fast_rate, slow_rate, bus_freq; | 87 | uint32_t fast_rate, slow_rate, bus_freq; |
| 88 | |||
| 89 | #ifdef CONFIG_MSM_BUS_SCALING | ||
| 90 | struct msm_bus_scale_pdata *bus_scale_table; | ||
| 88 | uint32_t bsc; | 91 | uint32_t bsc; |
| 92 | #endif | ||
| 89 | 93 | ||
| 90 | /* Hang Detction: */ | 94 | /* Hang Detction: */ |
| 91 | #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ | 95 | #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ |
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c new file mode 100644 index 000000000000..92b745986231 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_iommu.c | |||
| @@ -0,0 +1,148 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "msm_drv.h" | ||
| 19 | #include "msm_mmu.h" | ||
| 20 | |||
| 21 | struct msm_iommu { | ||
| 22 | struct msm_mmu base; | ||
| 23 | struct iommu_domain *domain; | ||
| 24 | }; | ||
| 25 | #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) | ||
| 26 | |||
| 27 | static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, | ||
| 28 | unsigned long iova, int flags, void *arg) | ||
| 29 | { | ||
| 30 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) | ||
| 35 | { | ||
| 36 | struct drm_device *dev = mmu->dev; | ||
| 37 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
| 38 | int i, ret; | ||
| 39 | |||
| 40 | for (i = 0; i < cnt; i++) { | ||
| 41 | struct device *msm_iommu_get_ctx(const char *ctx_name); | ||
| 42 | struct device *ctx = msm_iommu_get_ctx(names[i]); | ||
| 43 | if (IS_ERR_OR_NULL(ctx)) | ||
| 44 | continue; | ||
| 45 | ret = iommu_attach_device(iommu->domain, ctx); | ||
| 46 | if (ret) { | ||
| 47 | dev_warn(dev->dev, "could not attach iommu to %s", names[i]); | ||
| 48 | return ret; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, | ||
| 56 | struct sg_table *sgt, unsigned len, int prot) | ||
| 57 | { | ||
| 58 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
| 59 | struct iommu_domain *domain = iommu->domain; | ||
| 60 | struct scatterlist *sg; | ||
| 61 | unsigned int da = iova; | ||
| 62 | unsigned int i, j; | ||
| 63 | int ret; | ||
| 64 | |||
| 65 | if (!domain || !sgt) | ||
| 66 | return -EINVAL; | ||
| 67 | |||
| 68 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 69 | u32 pa = sg_phys(sg) - sg->offset; | ||
| 70 | size_t bytes = sg->length + sg->offset; | ||
| 71 | |||
| 72 | VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); | ||
| 73 | |||
| 74 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
| 75 | if (ret) | ||
| 76 | goto fail; | ||
| 77 | |||
| 78 | da += bytes; | ||
| 79 | } | ||
| 80 | |||
| 81 | return 0; | ||
| 82 | |||
| 83 | fail: | ||
| 84 | da = iova; | ||
| 85 | |||
| 86 | for_each_sg(sgt->sgl, sg, i, j) { | ||
| 87 | size_t bytes = sg->length + sg->offset; | ||
| 88 | iommu_unmap(domain, da, bytes); | ||
| 89 | da += bytes; | ||
| 90 | } | ||
| 91 | return ret; | ||
| 92 | } | ||
| 93 | |||
| 94 | static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, | ||
| 95 | struct sg_table *sgt, unsigned len) | ||
| 96 | { | ||
| 97 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
| 98 | struct iommu_domain *domain = iommu->domain; | ||
| 99 | struct scatterlist *sg; | ||
| 100 | unsigned int da = iova; | ||
| 101 | int i; | ||
| 102 | |||
| 103 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 104 | size_t bytes = sg->length + sg->offset; | ||
| 105 | size_t unmapped; | ||
| 106 | |||
| 107 | unmapped = iommu_unmap(domain, da, bytes); | ||
| 108 | if (unmapped < bytes) | ||
| 109 | return unmapped; | ||
| 110 | |||
| 111 | VERB("unmap[%d]: %08x(%x)", i, iova, bytes); | ||
| 112 | |||
| 113 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
| 114 | |||
| 115 | da += bytes; | ||
| 116 | } | ||
| 117 | |||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | |||
| 121 | static void msm_iommu_destroy(struct msm_mmu *mmu) | ||
| 122 | { | ||
| 123 | struct msm_iommu *iommu = to_msm_iommu(mmu); | ||
| 124 | iommu_domain_free(iommu->domain); | ||
| 125 | kfree(iommu); | ||
| 126 | } | ||
| 127 | |||
| 128 | static const struct msm_mmu_funcs funcs = { | ||
| 129 | .attach = msm_iommu_attach, | ||
| 130 | .map = msm_iommu_map, | ||
| 131 | .unmap = msm_iommu_unmap, | ||
| 132 | .destroy = msm_iommu_destroy, | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) | ||
| 136 | { | ||
| 137 | struct msm_iommu *iommu; | ||
| 138 | |||
| 139 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
| 140 | if (!iommu) | ||
| 141 | return ERR_PTR(-ENOMEM); | ||
| 142 | |||
| 143 | iommu->domain = domain; | ||
| 144 | msm_mmu_init(&iommu->base, dev, &funcs); | ||
| 145 | iommu_set_fault_handler(domain, msm_fault_handler, dev); | ||
| 146 | |||
| 147 | return &iommu->base; | ||
| 148 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h new file mode 100644 index 000000000000..06437745bc2c --- /dev/null +++ b/drivers/gpu/drm/msm/msm_kms.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __MSM_KMS_H__ | ||
| 19 | #define __MSM_KMS_H__ | ||
| 20 | |||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/regulator/consumer.h> | ||
| 23 | |||
| 24 | #include "msm_drv.h" | ||
| 25 | |||
| 26 | /* As there are different display controller blocks depending on the | ||
| 27 | * snapdragon version, the kms support is split out and the appropriate | ||
| 28 | * implementation is loaded at runtime. The kms module is responsible | ||
| 29 | * for constructing the appropriate planes/crtcs/encoders/connectors. | ||
| 30 | */ | ||
| 31 | struct msm_kms_funcs { | ||
| 32 | /* hw initialization: */ | ||
| 33 | int (*hw_init)(struct msm_kms *kms); | ||
| 34 | /* irq handling: */ | ||
| 35 | void (*irq_preinstall)(struct msm_kms *kms); | ||
| 36 | int (*irq_postinstall)(struct msm_kms *kms); | ||
| 37 | void (*irq_uninstall)(struct msm_kms *kms); | ||
| 38 | irqreturn_t (*irq)(struct msm_kms *kms); | ||
| 39 | int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 40 | void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); | ||
| 41 | /* misc: */ | ||
| 42 | const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); | ||
| 43 | long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, | ||
| 44 | struct drm_encoder *encoder); | ||
| 45 | /* cleanup: */ | ||
| 46 | void (*preclose)(struct msm_kms *kms, struct drm_file *file); | ||
| 47 | void (*destroy)(struct msm_kms *kms); | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct msm_kms { | ||
| 51 | const struct msm_kms_funcs *funcs; | ||
| 52 | |||
| 53 | /* irq handling: */ | ||
| 54 | bool in_irq; | ||
| 55 | struct list_head irq_list; /* list of mdp4_irq */ | ||
| 56 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
| 57 | }; | ||
| 58 | |||
| 59 | static inline void msm_kms_init(struct msm_kms *kms, | ||
| 60 | const struct msm_kms_funcs *funcs) | ||
| 61 | { | ||
| 62 | kms->funcs = funcs; | ||
| 63 | } | ||
| 64 | |||
| 65 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); | ||
| 66 | struct msm_kms *mdp5_kms_init(struct drm_device *dev); | ||
| 67 | |||
| 68 | #endif /* __MSM_KMS_H__ */ | ||
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h new file mode 100644 index 000000000000..030324482b4a --- /dev/null +++ b/drivers/gpu/drm/msm/msm_mmu.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __MSM_MMU_H__ | ||
| 19 | #define __MSM_MMU_H__ | ||
| 20 | |||
| 21 | #include <linux/iommu.h> | ||
| 22 | |||
| 23 | struct msm_mmu_funcs { | ||
| 24 | int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); | ||
| 25 | int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, | ||
| 26 | unsigned len, int prot); | ||
| 27 | int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, | ||
| 28 | unsigned len); | ||
| 29 | void (*destroy)(struct msm_mmu *mmu); | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct msm_mmu { | ||
| 33 | const struct msm_mmu_funcs *funcs; | ||
| 34 | struct drm_device *dev; | ||
| 35 | }; | ||
| 36 | |||
| 37 | static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, | ||
| 38 | const struct msm_mmu_funcs *funcs) | ||
| 39 | { | ||
| 40 | mmu->dev = dev; | ||
| 41 | mmu->funcs = funcs; | ||
| 42 | } | ||
| 43 | |||
| 44 | struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); | ||
| 45 | struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); | ||
| 46 | |||
| 47 | #endif /* __MSM_MMU_H__ */ | ||
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index b3fa1ba191b7..e88145ba1bf5 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
| @@ -41,6 +41,7 @@ nouveau-y += core/subdev/bios/init.o | |||
| 41 | nouveau-y += core/subdev/bios/mxm.o | 41 | nouveau-y += core/subdev/bios/mxm.o |
| 42 | nouveau-y += core/subdev/bios/perf.o | 42 | nouveau-y += core/subdev/bios/perf.o |
| 43 | nouveau-y += core/subdev/bios/pll.o | 43 | nouveau-y += core/subdev/bios/pll.o |
| 44 | nouveau-y += core/subdev/bios/ramcfg.o | ||
| 44 | nouveau-y += core/subdev/bios/rammap.o | 45 | nouveau-y += core/subdev/bios/rammap.o |
| 45 | nouveau-y += core/subdev/bios/timing.o | 46 | nouveau-y += core/subdev/bios/timing.o |
| 46 | nouveau-y += core/subdev/bios/therm.o | 47 | nouveau-y += core/subdev/bios/therm.o |
| @@ -71,7 +72,10 @@ nouveau-y += core/subdev/devinit/nv10.o | |||
| 71 | nouveau-y += core/subdev/devinit/nv1a.o | 72 | nouveau-y += core/subdev/devinit/nv1a.o |
| 72 | nouveau-y += core/subdev/devinit/nv20.o | 73 | nouveau-y += core/subdev/devinit/nv20.o |
| 73 | nouveau-y += core/subdev/devinit/nv50.o | 74 | nouveau-y += core/subdev/devinit/nv50.o |
| 75 | nouveau-y += core/subdev/devinit/nv84.o | ||
| 76 | nouveau-y += core/subdev/devinit/nv98.o | ||
| 74 | nouveau-y += core/subdev/devinit/nva3.o | 77 | nouveau-y += core/subdev/devinit/nva3.o |
| 78 | nouveau-y += core/subdev/devinit/nvaf.o | ||
| 75 | nouveau-y += core/subdev/devinit/nvc0.o | 79 | nouveau-y += core/subdev/devinit/nvc0.o |
| 76 | nouveau-y += core/subdev/fb/base.o | 80 | nouveau-y += core/subdev/fb/base.o |
| 77 | nouveau-y += core/subdev/fb/nv04.o | 81 | nouveau-y += core/subdev/fb/nv04.o |
| @@ -232,6 +236,7 @@ nouveau-y += core/engine/fifo/nv50.o | |||
| 232 | nouveau-y += core/engine/fifo/nv84.o | 236 | nouveau-y += core/engine/fifo/nv84.o |
| 233 | nouveau-y += core/engine/fifo/nvc0.o | 237 | nouveau-y += core/engine/fifo/nvc0.o |
| 234 | nouveau-y += core/engine/fifo/nve0.o | 238 | nouveau-y += core/engine/fifo/nve0.o |
| 239 | nouveau-y += core/engine/fifo/nv108.o | ||
| 235 | nouveau-y += core/engine/graph/ctxnv40.o | 240 | nouveau-y += core/engine/graph/ctxnv40.o |
| 236 | nouveau-y += core/engine/graph/ctxnv50.o | 241 | nouveau-y += core/engine/graph/ctxnv50.o |
| 237 | nouveau-y += core/engine/graph/ctxnvc0.o | 242 | nouveau-y += core/engine/graph/ctxnvc0.o |
| @@ -242,6 +247,7 @@ nouveau-y += core/engine/graph/ctxnvd7.o | |||
| 242 | nouveau-y += core/engine/graph/ctxnvd9.o | 247 | nouveau-y += core/engine/graph/ctxnvd9.o |
| 243 | nouveau-y += core/engine/graph/ctxnve4.o | 248 | nouveau-y += core/engine/graph/ctxnve4.o |
| 244 | nouveau-y += core/engine/graph/ctxnvf0.o | 249 | nouveau-y += core/engine/graph/ctxnvf0.o |
| 250 | nouveau-y += core/engine/graph/ctxnv108.o | ||
| 245 | nouveau-y += core/engine/graph/nv04.o | 251 | nouveau-y += core/engine/graph/nv04.o |
| 246 | nouveau-y += core/engine/graph/nv10.o | 252 | nouveau-y += core/engine/graph/nv10.o |
| 247 | nouveau-y += core/engine/graph/nv20.o | 253 | nouveau-y += core/engine/graph/nv20.o |
| @@ -260,6 +266,7 @@ nouveau-y += core/engine/graph/nvd7.o | |||
| 260 | nouveau-y += core/engine/graph/nvd9.o | 266 | nouveau-y += core/engine/graph/nvd9.o |
| 261 | nouveau-y += core/engine/graph/nve4.o | 267 | nouveau-y += core/engine/graph/nve4.o |
| 262 | nouveau-y += core/engine/graph/nvf0.o | 268 | nouveau-y += core/engine/graph/nvf0.o |
| 269 | nouveau-y += core/engine/graph/nv108.o | ||
| 263 | nouveau-y += core/engine/mpeg/nv31.o | 270 | nouveau-y += core/engine/mpeg/nv31.o |
| 264 | nouveau-y += core/engine/mpeg/nv40.o | 271 | nouveau-y += core/engine/mpeg/nv40.o |
| 265 | nouveau-y += core/engine/mpeg/nv44.o | 272 | nouveau-y += core/engine/mpeg/nv44.o |
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c index c8bed4a26833..1f6954ae9dd3 100644 --- a/drivers/gpu/drm/nouveau/core/core/engine.c +++ b/drivers/gpu/drm/nouveau/core/core/engine.c | |||
| @@ -42,11 +42,24 @@ nouveau_engine_create_(struct nouveau_object *parent, | |||
| 42 | if (ret) | 42 | if (ret) |
| 43 | return ret; | 43 | return ret; |
| 44 | 44 | ||
| 45 | if ( parent && | 45 | if (parent) { |
| 46 | !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) { | 46 | struct nouveau_device *device = nv_device(parent); |
| 47 | if (!enable) | 47 | int engidx = nv_engidx(nv_object(engine)); |
| 48 | nv_warn(engine, "disabled, %s=1 to enable\n", iname); | 48 | |
| 49 | return -ENODEV; | 49 | if (device->disable_mask & (1ULL << engidx)) { |
| 50 | if (!nouveau_boolopt(device->cfgopt, iname, false)) { | ||
| 51 | nv_debug(engine, "engine disabled by hw/fw\n"); | ||
| 52 | return -ENODEV; | ||
| 53 | } | ||
| 54 | |||
| 55 | nv_warn(engine, "ignoring hw/fw engine disable\n"); | ||
| 56 | } | ||
| 57 | |||
| 58 | if (!nouveau_boolopt(device->cfgopt, iname, enable)) { | ||
| 59 | if (!enable) | ||
| 60 | nv_warn(engine, "disabled, %s=1 to enable\n", iname); | ||
| 61 | return -ENODEV; | ||
| 62 | } | ||
| 50 | } | 63 | } |
| 51 | 64 | ||
| 52 | INIT_LIST_HEAD(&engine->contexts); | 65 | INIT_LIST_HEAD(&engine->contexts); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c index 993df09ad643..ac3291f781f6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c | |||
| @@ -105,9 +105,6 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 105 | struct nvc0_copy_priv *priv; | 105 | struct nvc0_copy_priv *priv; |
| 106 | int ret; | 106 | int ret; |
| 107 | 107 | ||
| 108 | if (nv_rd32(parent, 0x022500) & 0x00000100) | ||
| 109 | return -ENODEV; | ||
| 110 | |||
| 111 | ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, | 108 | ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, |
| 112 | "PCE0", "copy0", &priv); | 109 | "PCE0", "copy0", &priv); |
| 113 | *pobject = nv_object(priv); | 110 | *pobject = nv_object(priv); |
| @@ -133,9 +130,6 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 133 | struct nvc0_copy_priv *priv; | 130 | struct nvc0_copy_priv *priv; |
| 134 | int ret; | 131 | int ret; |
| 135 | 132 | ||
| 136 | if (nv_rd32(parent, 0x022500) & 0x00000200) | ||
| 137 | return -ENODEV; | ||
| 138 | |||
| 139 | ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, | 133 | ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, |
| 140 | "PCE1", "copy1", &priv); | 134 | "PCE1", "copy1", &priv); |
| 141 | *pobject = nv_object(priv); | 135 | *pobject = nv_object(priv); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c index 30f1ef1edcc5..748a61eb3c6f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c | |||
| @@ -88,9 +88,6 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 88 | struct nve0_copy_priv *priv; | 88 | struct nve0_copy_priv *priv; |
| 89 | int ret; | 89 | int ret; |
| 90 | 90 | ||
| 91 | if (nv_rd32(parent, 0x022500) & 0x00000100) | ||
| 92 | return -ENODEV; | ||
| 93 | |||
| 94 | ret = nouveau_engine_create(parent, engine, oclass, true, | 91 | ret = nouveau_engine_create(parent, engine, oclass, true, |
| 95 | "PCE0", "copy0", &priv); | 92 | "PCE0", "copy0", &priv); |
| 96 | *pobject = nv_object(priv); | 93 | *pobject = nv_object(priv); |
| @@ -112,9 +109,6 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 112 | struct nve0_copy_priv *priv; | 109 | struct nve0_copy_priv *priv; |
| 113 | int ret; | 110 | int ret; |
| 114 | 111 | ||
| 115 | if (nv_rd32(parent, 0x022500) & 0x00000200) | ||
| 116 | return -ENODEV; | ||
| 117 | |||
| 118 | ret = nouveau_engine_create(parent, engine, oclass, true, | 112 | ret = nouveau_engine_create(parent, engine, oclass, true, |
| 119 | "PCE1", "copy1", &priv); | 113 | "PCE1", "copy1", &priv); |
| 120 | *pobject = nv_object(priv); | 114 | *pobject = nv_object(priv); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c index dbd2dde7b7e7..32113b08c4d5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c | |||
| @@ -49,12 +49,12 @@ nv04_identify(struct nouveau_device *device) | |||
| 49 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; | 49 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; |
| 50 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 50 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 51 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 51 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 52 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; | 52 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass; |
| 53 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 53 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 54 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 54 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 55 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 55 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 56 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; | 56 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; |
| 57 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 57 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 58 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 58 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 59 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 59 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 60 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; | 60 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; |
| @@ -67,12 +67,12 @@ nv04_identify(struct nouveau_device *device) | |||
| 67 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; | 67 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; |
| 68 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 68 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 69 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 69 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 70 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; | 70 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass; |
| 71 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 71 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 72 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 73 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; | 74 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; | 78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c index 6e03dd6abeea..744f15d7e131 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c | |||
| @@ -51,12 +51,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 51 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 51 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 52 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 52 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 53 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 53 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 54 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 54 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 55 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 55 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 56 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 56 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 57 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 57 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 58 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 58 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 59 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 59 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 60 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 60 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 61 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 61 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 62 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 62 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
| @@ -68,12 +68,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 68 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 68 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 69 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 69 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 70 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 70 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 71 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 71 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 72 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 73 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 74 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 75 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 76 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 77 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 77 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 78 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 78 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 79 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 79 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
| @@ -87,12 +87,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 87 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 87 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 88 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 88 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 89 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 89 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 90 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 90 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 91 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 91 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 92 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 92 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 93 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 93 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 94 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 94 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 95 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 95 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 96 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 96 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 97 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 97 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 98 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 98 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
| @@ -106,12 +106,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 106 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 106 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 107 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 107 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 108 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 108 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 109 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 109 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 110 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 110 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 111 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 111 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 112 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 112 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 113 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; | 113 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; |
| 114 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 114 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 115 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 115 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 116 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 116 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 117 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 117 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
| @@ -125,12 +125,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 125 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 125 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 126 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 126 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 127 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 127 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 128 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 128 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 129 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 129 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 130 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 130 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 131 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 131 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 132 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 132 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 133 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 133 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 134 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 134 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 135 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 135 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 136 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 136 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
| @@ -144,12 +144,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 144 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 144 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 145 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 145 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 146 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 146 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 147 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 147 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 148 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 148 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 149 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 149 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 150 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 150 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 151 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 151 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 152 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 152 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 153 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 153 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 154 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 154 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 155 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 155 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -163,12 +163,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 163 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 163 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 164 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 164 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 165 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 165 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 166 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 166 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 167 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 167 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 168 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 168 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 169 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 169 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 170 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; | 170 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; |
| 171 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 171 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 172 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 172 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 173 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 173 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 174 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 174 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -182,12 +182,12 @@ nv10_identify(struct nouveau_device *device) | |||
| 182 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 182 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 183 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 183 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 184 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 184 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 185 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 185 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 186 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 186 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 187 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 187 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 188 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 188 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 189 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 189 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 190 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 190 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 191 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 191 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 192 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 192 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 193 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 193 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c index dcde53b9f07f..27ba61fb2710 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c | |||
| @@ -52,12 +52,12 @@ nv20_identify(struct nouveau_device *device) | |||
| 52 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 52 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 53 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 53 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 54 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 54 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 55 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 55 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 56 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 56 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 57 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 57 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 58 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 58 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; | 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; |
| 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -71,12 +71,12 @@ nv20_identify(struct nouveau_device *device) | |||
| 71 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 71 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 72 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 73 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 74 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 75 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 76 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 77 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 77 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
| 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -90,12 +90,12 @@ nv20_identify(struct nouveau_device *device) | |||
| 90 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 90 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 91 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 91 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 92 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 92 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
| 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -109,12 +109,12 @@ nv20_identify(struct nouveau_device *device) | |||
| 109 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 109 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 110 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 110 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 111 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 111 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 112 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 112 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 113 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 113 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 114 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 114 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 115 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 115 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 116 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 116 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
| 117 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 117 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 118 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 118 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 119 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 119 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 120 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 120 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c index 7b8662ef4f59..fd47ace67543 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c | |||
| @@ -52,12 +52,12 @@ nv30_identify(struct nouveau_device *device) | |||
| 52 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 52 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 53 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 53 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 54 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 54 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 55 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 55 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 56 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 56 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 57 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 57 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 58 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 58 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; | 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; |
| 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -71,12 +71,12 @@ nv30_identify(struct nouveau_device *device) | |||
| 71 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 71 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 72 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 73 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 74 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 75 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; | 76 | device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; |
| 77 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 77 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; | 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; |
| 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -90,12 +90,12 @@ nv30_identify(struct nouveau_device *device) | |||
| 90 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 90 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 91 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 91 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 92 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 92 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; | 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; |
| 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -110,12 +110,12 @@ nv30_identify(struct nouveau_device *device) | |||
| 110 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 110 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 111 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 111 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 112 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 112 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 113 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; | 113 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; |
| 114 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 114 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 115 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 115 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 116 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 116 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 117 | device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; | 117 | device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; |
| 118 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 118 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 119 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 119 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 120 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 120 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 121 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 121 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
| @@ -130,12 +130,12 @@ nv30_identify(struct nouveau_device *device) | |||
| 130 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; | 130 | device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; |
| 131 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 131 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 132 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; | 132 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; |
| 133 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; | 133 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; |
| 134 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; | 134 | device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; |
| 135 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 135 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 136 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 136 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 137 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 137 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
| 138 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; | 138 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
| 139 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 139 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 140 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 140 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| 141 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 141 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index c8c41e93695e..1b653dd74a70 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c | |||
| @@ -57,12 +57,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 57 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 57 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 58 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 58 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 59 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 59 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 60 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 60 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 61 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 61 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 62 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 62 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 63 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 63 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 64 | device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass; | 64 | device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass; |
| 65 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 65 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 66 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 66 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 67 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 67 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 68 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 68 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -80,12 +80,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 80 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 80 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 81 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 81 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 82 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 82 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 83 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 83 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 84 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 84 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 85 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 85 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 86 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 86 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 87 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; | 87 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; |
| 88 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 88 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 89 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 89 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 90 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 90 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 91 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 91 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -103,12 +103,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 103 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 103 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 104 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 104 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 105 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 105 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 106 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 106 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 107 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 107 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 108 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 108 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 109 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 109 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 110 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; | 110 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; |
| 111 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 111 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 112 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 112 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 113 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 113 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 114 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 114 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -126,12 +126,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 126 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 126 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 127 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 127 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 128 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 128 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 129 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 129 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 130 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 130 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 131 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 131 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 132 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 132 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 133 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; | 133 | device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass; |
| 134 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 134 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 135 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 135 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 136 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 136 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 137 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 137 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -149,12 +149,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 149 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 149 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 150 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 150 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 151 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 151 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 152 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 152 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 153 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 153 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 154 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 154 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 155 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 155 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 156 | device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass; | 156 | device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass; |
| 157 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 157 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 158 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 158 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
| 159 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 159 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 160 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 160 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -172,12 +172,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 172 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 172 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 173 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 173 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 174 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 174 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 175 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 175 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 176 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 176 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 177 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 177 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 178 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 178 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 179 | device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass; | 179 | device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass; |
| 180 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 180 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 181 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 181 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 182 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 182 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 183 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 183 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -195,12 +195,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 195 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 195 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 196 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 196 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 197 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 197 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 198 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 198 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 199 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 199 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 200 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 200 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 201 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 201 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 202 | device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass; | 202 | device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass; |
| 203 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 203 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 204 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 204 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 205 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 205 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 206 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 206 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -218,12 +218,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 218 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 218 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 219 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 219 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 220 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 220 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 221 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 221 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 222 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; | 222 | device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; |
| 223 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 223 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 224 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 224 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 225 | device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass; | 225 | device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass; |
| 226 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 226 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 227 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 227 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
| 228 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 228 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 229 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 229 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -241,12 +241,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 241 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 241 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 242 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 242 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 243 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 243 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 244 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 244 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 245 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 245 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 246 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 246 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 247 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 247 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 248 | device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass; | 248 | device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass; |
| 249 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 249 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 250 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 250 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 251 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 251 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 252 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 252 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -264,12 +264,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 264 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 264 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 265 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 265 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 266 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 266 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 267 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 267 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 268 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 268 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 269 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 269 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 270 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 270 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 271 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 271 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| 272 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 272 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 273 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 273 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 274 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 274 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 275 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 275 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -287,12 +287,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 287 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 287 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 288 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 288 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 289 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 289 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 290 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 290 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 291 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 291 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 292 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 292 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 294 | device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass; | 294 | device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass; |
| 295 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 295 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 296 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 296 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -310,12 +310,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 310 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 310 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 314 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 314 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| 318 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 318 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 319 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 319 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 320 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 320 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 321 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 321 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -333,12 +333,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 333 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass; | 333 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass; |
| 334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 337 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 337 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; | 340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; |
| 341 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 341 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 342 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 342 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 343 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 343 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 344 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 344 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -356,12 +356,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 356 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 356 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 360 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 360 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| 364 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 364 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 365 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 365 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 366 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 366 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 367 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 367 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -379,12 +379,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 379 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 379 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 383 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 383 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| 387 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 387 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 388 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 388 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 389 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 389 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 390 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 390 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
| @@ -402,12 +402,12 @@ nv40_identify(struct nouveau_device *device) | |||
| 402 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; | 402 | device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; |
| 403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; | 405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 406 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 406 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; |
| 407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| 410 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; | 410 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
| 411 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 411 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
| 412 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 412 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 413 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 413 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c index db3fc7be856a..81d5c26643d5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c | |||
| @@ -65,12 +65,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass; | 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass; |
| 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; | 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; |
| 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass; |
| 69 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; | 69 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; |
| 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; | 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; |
| 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass; | 72 | device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 73 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 74 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 75 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -90,12 +90,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 90 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 90 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 91 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 91 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 92 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 92 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 93 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; | 94 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; |
| 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; | 95 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; |
| 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 96 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 100 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 100 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 101 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 101 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -118,12 +118,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 118 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 118 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 119 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 119 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 120 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 120 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 121 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 121 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 122 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; | 122 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; |
| 123 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; | 123 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; |
| 124 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 124 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 125 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 125 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 126 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 126 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 127 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 127 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 128 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 128 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 129 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 129 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -146,12 +146,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 146 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 146 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 147 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 147 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 148 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 148 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 149 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 149 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 150 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; | 150 | device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; |
| 151 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; | 151 | device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; |
| 152 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 152 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 153 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 153 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 154 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 154 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 155 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 155 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 156 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 156 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 157 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 157 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -174,12 +174,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 174 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 174 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 175 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 175 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 176 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 176 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 177 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 177 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 178 | device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; | 178 | device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; |
| 179 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 179 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 180 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 180 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 181 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 181 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 182 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 182 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 183 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 183 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 184 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 184 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 185 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 185 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -202,12 +202,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 202 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 202 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 203 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 203 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 204 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 204 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 205 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 205 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 206 | device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; | 206 | device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; |
| 207 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 207 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 208 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 208 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 209 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 209 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 210 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 210 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 211 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 211 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 212 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 212 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -230,12 +230,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 230 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 230 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 231 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 231 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 232 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 232 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 233 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 233 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; |
| 234 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 234 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 235 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 235 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 236 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 236 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 237 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 237 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 238 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 238 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 239 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 239 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 240 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 240 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 241 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 241 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -258,12 +258,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 258 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; | 258 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; |
| 259 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 259 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 260 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 260 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 261 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 261 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; |
| 262 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 262 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 263 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 263 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 264 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 264 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 265 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; | 265 | device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass; |
| 266 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 266 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 267 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 267 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 268 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 268 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -286,12 +286,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 286 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; | 286 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; |
| 287 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 287 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 288 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 288 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 289 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 289 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; |
| 290 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 290 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 291 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 291 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 292 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 292 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 293 | device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass; | 293 | device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass; |
| 294 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 294 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 295 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 295 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 296 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 296 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -314,12 +314,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 314 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; | 314 | device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; |
| 315 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; | 315 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; |
| 316 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 316 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 317 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; | 317 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; |
| 318 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 318 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 319 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 319 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 320 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 320 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 321 | device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass; | 321 | device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass; |
| 322 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 322 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 323 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 323 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 324 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 324 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 325 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 325 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| @@ -342,12 +342,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 342 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; | 342 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; |
| 343 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 343 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 344 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 344 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 345 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; | 345 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; |
| 346 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 346 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 347 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 347 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 348 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 348 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 349 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; | 349 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; |
| 350 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 350 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 351 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 351 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 352 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 352 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 353 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 353 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; |
| @@ -372,12 +372,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 372 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; | 372 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; |
| 373 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 373 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 374 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 374 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 375 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; | 375 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; |
| 376 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 376 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 377 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 377 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 378 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 378 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 379 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; | 379 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; |
| 380 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 380 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 381 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 381 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 382 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 382 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 383 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 383 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; |
| @@ -401,12 +401,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 401 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; | 401 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; |
| 402 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 402 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 403 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 403 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 404 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; | 404 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; |
| 405 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 405 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 406 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 406 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 407 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 407 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 408 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; | 408 | device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass; |
| 409 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 409 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 410 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 410 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 411 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 411 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 412 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 412 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; |
| @@ -430,12 +430,12 @@ nv50_identify(struct nouveau_device *device) | |||
| 430 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; | 430 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; |
| 431 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 431 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 432 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 432 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 433 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; | 433 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass; |
| 434 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; | 434 | device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; |
| 435 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; | 435 | device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; |
| 436 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 436 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 437 | device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass; | 437 | device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass; |
| 438 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 438 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 439 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 439 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
| 440 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 440 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
| 441 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 441 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index dbc5e33de94f..b7d66b59f43d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
| @@ -65,14 +65,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 69 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; | 69 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 72 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 73 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -97,14 +97,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 97 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 97 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 98 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 98 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 99 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 99 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 100 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 100 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 101 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; | 101 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 102 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 102 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 103 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 103 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 104 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 104 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 105 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 105 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 106 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 106 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 107 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 107 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 108 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 108 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 109 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 109 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 110 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 110 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -129,14 +129,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 129 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 129 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 130 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 130 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 131 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 131 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 132 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 132 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 133 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 133 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 134 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 134 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 135 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 135 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 136 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 136 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 137 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 137 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 138 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 138 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 139 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 139 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 140 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 140 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 141 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 141 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 142 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 142 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -160,14 +160,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 160 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 160 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 161 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 162 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 163 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; | 164 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 168 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 168 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 169 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 169 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 170 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 170 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 171 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 171 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 172 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 172 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 173 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 173 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -192,14 +192,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 192 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 192 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 193 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 193 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 194 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 194 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 195 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 195 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 196 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 196 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 197 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 197 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 198 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 198 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 199 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 199 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 200 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 200 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 201 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 201 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 202 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 202 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 203 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 203 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 204 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 204 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 205 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 205 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -224,14 +224,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 224 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 224 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 225 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 225 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 226 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 226 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 227 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 227 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 228 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 228 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 229 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 229 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 230 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 230 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 231 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 231 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 232 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 232 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 233 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 233 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 234 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 234 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 235 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 235 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 236 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 236 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 237 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 237 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -255,14 +255,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 255 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 255 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 256 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 256 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 257 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 257 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 258 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 258 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 259 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; | 259 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 263 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 263 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 264 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 264 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 265 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 265 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 266 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 266 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 267 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 267 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 268 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 268 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; |
| @@ -287,14 +287,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 287 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 287 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 288 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 288 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 289 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 289 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 290 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 290 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 291 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 291 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 292 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 292 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 294 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 294 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 295 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 295 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 296 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 296 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 297 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 297 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 298 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 298 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 299 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 299 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 300 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 300 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; |
| @@ -318,14 +318,14 @@ nvc0_identify(struct nouveau_device *device) | |||
| 318 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; | 318 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; |
| 319 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 319 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 320 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 320 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 321 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 321 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 322 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 322 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 323 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 323 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 324 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 324 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 325 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 325 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
| 326 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 326 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 327 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 327 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
| 328 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 328 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 329 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 329 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 330 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 330 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 331 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 331 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c index 3900104976fc..987edbc30a09 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c | |||
| @@ -65,14 +65,14 @@ nve0_identify(struct nouveau_device *device) | |||
| 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; | 65 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; |
| 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 66 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 67 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 68 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 69 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 69 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 72 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 72 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
| 73 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 73 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
| 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; |
| @@ -98,14 +98,14 @@ nve0_identify(struct nouveau_device *device) | |||
| 98 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; | 98 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; |
| 99 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 99 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 100 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 100 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 101 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 101 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 102 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 102 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 103 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 103 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 104 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 104 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 105 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 105 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
| 106 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 106 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 107 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 107 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
| 108 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 108 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 109 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 109 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 110 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 110 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 111 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 111 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; |
| @@ -131,14 +131,14 @@ nve0_identify(struct nouveau_device *device) | |||
| 131 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; | 131 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; |
| 132 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 132 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 133 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 133 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 134 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 134 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 135 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 135 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 136 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 136 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 137 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 137 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 138 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 138 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
| 139 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 139 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 140 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 140 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
| 141 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 141 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 142 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 142 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 143 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 143 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 144 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 144 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; |
| @@ -164,14 +164,14 @@ nve0_identify(struct nouveau_device *device) | |||
| 164 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; | 164 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; |
| 165 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 165 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 166 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 166 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 167 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 167 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 168 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 168 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 169 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 169 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 170 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 170 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 171 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 171 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
| 172 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 172 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 173 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 173 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
| 174 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 174 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 175 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 175 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 176 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 176 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 177 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 177 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; |
| @@ -199,29 +199,27 @@ nve0_identify(struct nouveau_device *device) | |||
| 199 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; | 199 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; |
| 200 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 200 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
| 201 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 201 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 202 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 202 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
| 203 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 203 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; |
| 204 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 204 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 205 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 205 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 206 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 206 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
| 207 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; | 207 | device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; |
| 208 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 208 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
| 209 | device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; | 209 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
| 210 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 210 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
| 211 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 211 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
| 212 | device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; | 212 | device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; |
| 213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
| 214 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 214 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; |
| 215 | #if 0 | 215 | device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; |
| 216 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | ||
| 217 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 216 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
| 218 | device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; | 217 | device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; |
| 219 | #endif | ||
| 220 | device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; | 218 | device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; |
| 221 | #if 0 | ||
| 222 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; | 219 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; |
| 223 | device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; | 220 | device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; |
| 224 | device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; | 221 | device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; |
| 222 | #if 0 | ||
| 225 | device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; | 223 | device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; |
| 226 | device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; | 224 | device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; |
| 227 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; | 225 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c index a0bc8a89b699..7cf8b1348632 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c | |||
| @@ -31,9 +31,45 @@ struct nv04_disp_priv { | |||
| 31 | struct nouveau_disp base; | 31 | struct nouveau_disp base; |
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | static int | ||
| 35 | nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd, | ||
| 36 | void *data, u32 size) | ||
| 37 | { | ||
| 38 | struct nv04_disp_priv *priv = (void *)object->engine; | ||
| 39 | struct nv04_display_scanoutpos *args = data; | ||
| 40 | const int head = (mthd & NV04_DISP_MTHD_HEAD); | ||
| 41 | u32 line; | ||
| 42 | |||
| 43 | if (size < sizeof(*args)) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff; | ||
| 47 | args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff; | ||
| 48 | args->vblanke = args->vtotal - 1; | ||
| 49 | |||
| 50 | args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff; | ||
| 51 | args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff; | ||
| 52 | args->hblanke = args->htotal - 1; | ||
| 53 | |||
| 54 | args->time[0] = ktime_to_ns(ktime_get()); | ||
| 55 | line = nv_rd32(priv, 0x600868 + (head * 0x2000)); | ||
| 56 | args->time[1] = ktime_to_ns(ktime_get()); | ||
| 57 | args->hline = (line & 0xffff0000) >> 16; | ||
| 58 | args->vline = (line & 0x0000ffff); | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | #define HEAD_MTHD(n) (n), (n) + 0x01 | ||
| 63 | |||
| 64 | static struct nouveau_omthds | ||
| 65 | nv04_disp_omthds[] = { | ||
| 66 | { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos }, | ||
| 67 | {} | ||
| 68 | }; | ||
| 69 | |||
| 34 | static struct nouveau_oclass | 70 | static struct nouveau_oclass |
| 35 | nv04_disp_sclass[] = { | 71 | nv04_disp_sclass[] = { |
| 36 | { NV04_DISP_CLASS, &nouveau_object_ofuncs }, | 72 | { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds }, |
| 37 | {}, | 73 | {}, |
| 38 | }; | 74 | }; |
| 39 | 75 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index c168ae3eaa97..940eaa5d8b9a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
| @@ -541,6 +541,35 @@ nv50_disp_curs_ofuncs = { | |||
| 541 | * Base display object | 541 | * Base display object |
| 542 | ******************************************************************************/ | 542 | ******************************************************************************/ |
| 543 | 543 | ||
| 544 | int | ||
| 545 | nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, | ||
| 546 | void *data, u32 size) | ||
| 547 | { | ||
| 548 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
| 549 | struct nv04_display_scanoutpos *args = data; | ||
| 550 | const int head = (mthd & NV50_DISP_MTHD_HEAD); | ||
| 551 | u32 blanke, blanks, total; | ||
| 552 | |||
| 553 | if (size < sizeof(*args) || head >= priv->head.nr) | ||
| 554 | return -EINVAL; | ||
| 555 | blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); | ||
| 556 | blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); | ||
| 557 | total = nv_rd32(priv, 0x610afc + (head * 0x540)); | ||
| 558 | |||
| 559 | args->vblanke = (blanke & 0xffff0000) >> 16; | ||
| 560 | args->hblanke = (blanke & 0x0000ffff); | ||
| 561 | args->vblanks = (blanks & 0xffff0000) >> 16; | ||
| 562 | args->hblanks = (blanks & 0x0000ffff); | ||
| 563 | args->vtotal = ( total & 0xffff0000) >> 16; | ||
| 564 | args->htotal = ( total & 0x0000ffff); | ||
| 565 | |||
| 566 | args->time[0] = ktime_to_ns(ktime_get()); | ||
| 567 | args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
| 568 | args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ | ||
| 569 | args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
| 570 | return 0; | ||
| 571 | } | ||
| 572 | |||
| 544 | static void | 573 | static void |
| 545 | nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) | 574 | nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) |
| 546 | { | 575 | { |
| @@ -675,6 +704,7 @@ nv50_disp_base_ofuncs = { | |||
| 675 | 704 | ||
| 676 | static struct nouveau_omthds | 705 | static struct nouveau_omthds |
| 677 | nv50_disp_base_omthds[] = { | 706 | nv50_disp_base_omthds[] = { |
| 707 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
| 678 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | 708 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, |
| 679 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | 709 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, |
| 680 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | 710 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h index 1ae6ceb56704..d31d426ea1f6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h | |||
| @@ -43,6 +43,10 @@ struct nv50_disp_priv { | |||
| 43 | } pior; | 43 | } pior; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | #define HEAD_MTHD(n) (n), (n) + 0x03 | ||
| 47 | |||
| 48 | int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32); | ||
| 49 | |||
| 46 | #define DAC_MTHD(n) (n), (n) + 0x03 | 50 | #define DAC_MTHD(n) (n), (n) + 0x03 |
| 47 | 51 | ||
| 48 | int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); | 52 | int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); |
| @@ -132,13 +136,12 @@ void nv50_disp_intr(struct nouveau_subdev *); | |||
| 132 | 136 | ||
| 133 | extern struct nouveau_omthds nv84_disp_base_omthds[]; | 137 | extern struct nouveau_omthds nv84_disp_base_omthds[]; |
| 134 | 138 | ||
| 135 | extern struct nouveau_omthds nva3_disp_base_omthds[]; | ||
| 136 | |||
| 137 | extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; | 139 | extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; |
| 138 | extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; | 140 | extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; |
| 139 | extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; | 141 | extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; |
| 140 | extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; | 142 | extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; |
| 141 | extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; | 143 | extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; |
| 144 | extern struct nouveau_omthds nvd0_disp_base_omthds[]; | ||
| 142 | extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; | 145 | extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; |
| 143 | extern struct nouveau_oclass nvd0_disp_cclass; | 146 | extern struct nouveau_oclass nvd0_disp_cclass; |
| 144 | void nvd0_disp_intr_supervisor(struct work_struct *); | 147 | void nvd0_disp_intr_supervisor(struct work_struct *); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c index d8c74c0883a1..ef9ce300a496 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c | |||
| @@ -41,6 +41,7 @@ nv84_disp_sclass[] = { | |||
| 41 | 41 | ||
| 42 | struct nouveau_omthds | 42 | struct nouveau_omthds |
| 43 | nv84_disp_base_omthds[] = { | 43 | nv84_disp_base_omthds[] = { |
| 44 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
| 44 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | 45 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, |
| 45 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | 46 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, |
| 46 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | 47 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c index a66f949c1f84..a518543c00ab 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c | |||
| @@ -41,6 +41,7 @@ nv94_disp_sclass[] = { | |||
| 41 | 41 | ||
| 42 | static struct nouveau_omthds | 42 | static struct nouveau_omthds |
| 43 | nv94_disp_base_omthds[] = { | 43 | nv94_disp_base_omthds[] = { |
| 44 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
| 44 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | 45 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, |
| 45 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | 46 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, |
| 46 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | 47 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c index b75413169eae..6ad6dcece43b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c | |||
| @@ -39,8 +39,9 @@ nva3_disp_sclass[] = { | |||
| 39 | {} | 39 | {} |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | struct nouveau_omthds | 42 | static struct nouveau_omthds |
| 43 | nva3_disp_base_omthds[] = { | 43 | nva3_disp_base_omthds[] = { |
| 44 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
| 44 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | 45 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, |
| 45 | { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, | 46 | { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, |
| 46 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | 47 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 378a015091d2..1c5e4e8b2c82 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c | |||
| @@ -440,6 +440,36 @@ nvd0_disp_curs_ofuncs = { | |||
| 440 | * Base display object | 440 | * Base display object |
| 441 | ******************************************************************************/ | 441 | ******************************************************************************/ |
| 442 | 442 | ||
| 443 | static int | ||
| 444 | nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, | ||
| 445 | void *data, u32 size) | ||
| 446 | { | ||
| 447 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
| 448 | struct nv04_display_scanoutpos *args = data; | ||
| 449 | const int head = (mthd & NV50_DISP_MTHD_HEAD); | ||
| 450 | u32 blanke, blanks, total; | ||
| 451 | |||
| 452 | if (size < sizeof(*args) || head >= priv->head.nr) | ||
| 453 | return -EINVAL; | ||
| 454 | |||
| 455 | total = nv_rd32(priv, 0x640414 + (head * 0x300)); | ||
| 456 | blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); | ||
| 457 | blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); | ||
| 458 | |||
| 459 | args->vblanke = (blanke & 0xffff0000) >> 16; | ||
| 460 | args->hblanke = (blanke & 0x0000ffff); | ||
| 461 | args->vblanks = (blanks & 0xffff0000) >> 16; | ||
| 462 | args->hblanks = (blanks & 0x0000ffff); | ||
| 463 | args->vtotal = ( total & 0xffff0000) >> 16; | ||
| 464 | args->htotal = ( total & 0x0000ffff); | ||
| 465 | |||
| 466 | args->time[0] = ktime_to_ns(ktime_get()); | ||
| 467 | args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
| 468 | args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ | ||
| 469 | args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
| 470 | return 0; | ||
| 471 | } | ||
| 472 | |||
| 443 | static void | 473 | static void |
| 444 | nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head) | 474 | nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head) |
| 445 | { | 475 | { |
| @@ -573,9 +603,24 @@ nvd0_disp_base_ofuncs = { | |||
| 573 | .fini = nvd0_disp_base_fini, | 603 | .fini = nvd0_disp_base_fini, |
| 574 | }; | 604 | }; |
| 575 | 605 | ||
| 606 | struct nouveau_omthds | ||
| 607 | nvd0_disp_base_omthds[] = { | ||
| 608 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos }, | ||
| 609 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
| 610 | { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, | ||
| 611 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | ||
| 612 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
| 613 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
| 614 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
| 615 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
| 616 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
| 617 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
| 618 | {}, | ||
| 619 | }; | ||
| 620 | |||
| 576 | static struct nouveau_oclass | 621 | static struct nouveau_oclass |
| 577 | nvd0_disp_base_oclass[] = { | 622 | nvd0_disp_base_oclass[] = { |
| 578 | { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, | 623 | { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, |
| 579 | {} | 624 | {} |
| 580 | }; | 625 | }; |
| 581 | 626 | ||
| @@ -967,9 +1012,6 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 967 | int heads = nv_rd32(parent, 0x022448); | 1012 | int heads = nv_rd32(parent, 0x022448); |
| 968 | int ret; | 1013 | int ret; |
| 969 | 1014 | ||
| 970 | if (nv_rd32(parent, 0x022500) & 0x00000001) | ||
| 971 | return -ENODEV; | ||
| 972 | |||
| 973 | ret = nouveau_disp_create(parent, engine, oclass, heads, | 1015 | ret = nouveau_disp_create(parent, engine, oclass, heads, |
| 974 | "PDISP", "display", &priv); | 1016 | "PDISP", "display", &priv); |
| 975 | *pobject = nv_object(priv); | 1017 | *pobject = nv_object(priv); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c index fb1fe6ae5e74..ab63f32c00b2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c | |||
| @@ -41,7 +41,7 @@ nve0_disp_sclass[] = { | |||
| 41 | 41 | ||
| 42 | static struct nouveau_oclass | 42 | static struct nouveau_oclass |
| 43 | nve0_disp_base_oclass[] = { | 43 | nve0_disp_base_oclass[] = { |
| 44 | { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, | 44 | { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, |
| 45 | {} | 45 | {} |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| @@ -54,9 +54,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 54 | int heads = nv_rd32(parent, 0x022448); | 54 | int heads = nv_rd32(parent, 0x022448); |
| 55 | int ret; | 55 | int ret; |
| 56 | 56 | ||
| 57 | if (nv_rd32(parent, 0x022500) & 0x00000001) | ||
| 58 | return -ENODEV; | ||
| 59 | |||
| 60 | ret = nouveau_disp_create(parent, engine, oclass, heads, | 57 | ret = nouveau_disp_create(parent, engine, oclass, heads, |
| 61 | "PDISP", "display", &priv); | 58 | "PDISP", "display", &priv); |
| 62 | *pobject = nv_object(priv); | 59 | *pobject = nv_object(priv); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c index 42aa6b97dbea..05fee10e0c97 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c | |||
| @@ -41,7 +41,7 @@ nvf0_disp_sclass[] = { | |||
| 41 | 41 | ||
| 42 | static struct nouveau_oclass | 42 | static struct nouveau_oclass |
| 43 | nvf0_disp_base_oclass[] = { | 43 | nvf0_disp_base_oclass[] = { |
| 44 | { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, | 44 | { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, |
| 45 | {} | 45 | {} |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| @@ -54,9 +54,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 54 | int heads = nv_rd32(parent, 0x022448); | 54 | int heads = nv_rd32(parent, 0x022448); |
| 55 | int ret; | 55 | int ret; |
| 56 | 56 | ||
| 57 | if (nv_rd32(parent, 0x022500) & 0x00000001) | ||
| 58 | return -ENODEV; | ||
| 59 | |||
| 60 | ret = nouveau_disp_create(parent, engine, oclass, heads, | 57 | ret = nouveau_disp_create(parent, engine, oclass, heads, |
| 61 | "PDISP", "display", &priv); | 58 | "PDISP", "display", &priv); |
| 62 | *pobject = nv_object(priv); | 59 | *pobject = nv_object(priv); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c index 5a1c68474597..8836c3cb99c3 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c | |||
| @@ -138,10 +138,15 @@ nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value) | |||
| 138 | bool | 138 | bool |
| 139 | nv_lockvgac(void *obj, bool lock) | 139 | nv_lockvgac(void *obj, bool lock) |
| 140 | { | 140 | { |
| 141 | struct nouveau_device *dev = nv_device(obj); | ||
| 142 | |||
| 141 | bool locked = !nv_rdvgac(obj, 0, 0x1f); | 143 | bool locked = !nv_rdvgac(obj, 0, 0x1f); |
| 142 | u8 data = lock ? 0x99 : 0x57; | 144 | u8 data = lock ? 0x99 : 0x57; |
| 143 | nv_wrvgac(obj, 0, 0x1f, data); | 145 | if (dev->card_type < NV_50) |
| 144 | if (nv_device(obj)->chipset == 0x11) { | 146 | nv_wrvgac(obj, 0, 0x1f, data); |
| 147 | else | ||
| 148 | nv_wrvgac(obj, 0, 0x3f, data); | ||
| 149 | if (dev->chipset == 0x11) { | ||
| 145 | if (!(nv_rd32(obj, 0x001084) & 0x10000000)) | 150 | if (!(nv_rd32(obj, 0x001084) & 0x10000000)) |
| 146 | nv_wrvgac(obj, 1, 0x1f, data); | 151 | nv_wrvgac(obj, 1, 0x1f, data); |
| 147 | } | 152 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index e03fc8e4dc1d..5e077e4ed7f6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
| @@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data) | |||
| 56 | nv_wr32(falcon, falcon->addr + addr, data); | 56 | nv_wr32(falcon, falcon->addr + addr, data); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static void * | ||
| 60 | vmemdup(const void *src, size_t len) | ||
| 61 | { | ||
| 62 | void *p = vmalloc(len); | ||
| 63 | |||
| 64 | if (p) | ||
| 65 | memcpy(p, src, len); | ||
| 66 | return p; | ||
| 67 | } | ||
| 68 | |||
| 59 | int | 69 | int |
| 60 | _nouveau_falcon_init(struct nouveau_object *object) | 70 | _nouveau_falcon_init(struct nouveau_object *object) |
| 61 | { | 71 | { |
| @@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
| 111 | 121 | ||
| 112 | ret = request_firmware(&fw, name, &device->pdev->dev); | 122 | ret = request_firmware(&fw, name, &device->pdev->dev); |
| 113 | if (ret == 0) { | 123 | if (ret == 0) { |
| 114 | falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); | 124 | falcon->code.data = vmemdup(fw->data, fw->size); |
| 115 | falcon->code.size = fw->size; | 125 | falcon->code.size = fw->size; |
| 116 | falcon->data.data = NULL; | 126 | falcon->data.data = NULL; |
| 117 | falcon->data.size = 0; | 127 | falcon->data.size = 0; |
| @@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
| 134 | return ret; | 144 | return ret; |
| 135 | } | 145 | } |
| 136 | 146 | ||
| 137 | falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL); | 147 | falcon->data.data = vmemdup(fw->data, fw->size); |
| 138 | falcon->data.size = fw->size; | 148 | falcon->data.size = fw->size; |
| 139 | release_firmware(fw); | 149 | release_firmware(fw); |
| 140 | if (!falcon->data.data) | 150 | if (!falcon->data.data) |
| @@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
| 149 | return ret; | 159 | return ret; |
| 150 | } | 160 | } |
| 151 | 161 | ||
| 152 | falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); | 162 | falcon->code.data = vmemdup(fw->data, fw->size); |
| 153 | falcon->code.size = fw->size; | 163 | falcon->code.size = fw->size; |
| 154 | release_firmware(fw); | 164 | release_firmware(fw); |
| 155 | if (!falcon->code.data) | 165 | if (!falcon->code.data) |
| @@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend) | |||
| 235 | if (!suspend) { | 245 | if (!suspend) { |
| 236 | nouveau_gpuobj_ref(NULL, &falcon->core); | 246 | nouveau_gpuobj_ref(NULL, &falcon->core); |
| 237 | if (falcon->external) { | 247 | if (falcon->external) { |
| 238 | kfree(falcon->data.data); | 248 | vfree(falcon->data.data); |
| 239 | kfree(falcon->code.data); | 249 | vfree(falcon->code.data); |
| 240 | falcon->code.data = NULL; | 250 | falcon->code.data = NULL; |
| 241 | } | 251 | } |
| 242 | } | 252 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c new file mode 100644 index 000000000000..09362a51ba57 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nve0.h" | ||
| 26 | |||
| 27 | struct nouveau_oclass * | ||
| 28 | nv108_fifo_oclass = &(struct nve0_fifo_impl) { | ||
| 29 | .base.handle = NV_ENGINE(FIFO, 0x08), | ||
| 30 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 31 | .ctor = nve0_fifo_ctor, | ||
| 32 | .dtor = nve0_fifo_dtor, | ||
| 33 | .init = nve0_fifo_init, | ||
| 34 | .fini = _nouveau_fifo_fini, | ||
| 35 | }, | ||
| 36 | .channels = 1024, | ||
| 37 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 9ac94d4e5646..b22a33f0702d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #include <subdev/timer.h> | 34 | #include <subdev/timer.h> |
| 35 | #include <subdev/bar.h> | 35 | #include <subdev/bar.h> |
| 36 | #include <subdev/fb.h> | ||
| 36 | #include <subdev/vm.h> | 37 | #include <subdev/vm.h> |
| 37 | 38 | ||
| 38 | #include <engine/dmaobj.h> | 39 | #include <engine/dmaobj.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 04f412922d2d..9a850fe19515 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
| @@ -33,10 +33,12 @@ | |||
| 33 | 33 | ||
| 34 | #include <subdev/timer.h> | 34 | #include <subdev/timer.h> |
| 35 | #include <subdev/bar.h> | 35 | #include <subdev/bar.h> |
| 36 | #include <subdev/fb.h> | ||
| 36 | #include <subdev/vm.h> | 37 | #include <subdev/vm.h> |
| 37 | 38 | ||
| 38 | #include <engine/dmaobj.h> | 39 | #include <engine/dmaobj.h> |
| 39 | #include <engine/fifo.h> | 40 | |
| 41 | #include "nve0.h" | ||
| 40 | 42 | ||
| 41 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } | 43 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } |
| 42 | static const struct { | 44 | static const struct { |
| @@ -56,8 +58,8 @@ static const struct { | |||
| 56 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) | 58 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) |
| 57 | 59 | ||
| 58 | struct nve0_fifo_engn { | 60 | struct nve0_fifo_engn { |
| 59 | struct nouveau_gpuobj *playlist[2]; | 61 | struct nouveau_gpuobj *runlist[2]; |
| 60 | int cur_playlist; | 62 | int cur_runlist; |
| 61 | }; | 63 | }; |
| 62 | 64 | ||
| 63 | struct nve0_fifo_priv { | 65 | struct nve0_fifo_priv { |
| @@ -86,7 +88,7 @@ struct nve0_fifo_chan { | |||
| 86 | ******************************************************************************/ | 88 | ******************************************************************************/ |
| 87 | 89 | ||
| 88 | static void | 90 | static void |
| 89 | nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) | 91 | nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) |
| 90 | { | 92 | { |
| 91 | struct nouveau_bar *bar = nouveau_bar(priv); | 93 | struct nouveau_bar *bar = nouveau_bar(priv); |
| 92 | struct nve0_fifo_engn *engn = &priv->engine[engine]; | 94 | struct nve0_fifo_engn *engn = &priv->engine[engine]; |
| @@ -95,8 +97,8 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) | |||
| 95 | int i, p; | 97 | int i, p; |
| 96 | 98 | ||
| 97 | mutex_lock(&nv_subdev(priv)->mutex); | 99 | mutex_lock(&nv_subdev(priv)->mutex); |
| 98 | cur = engn->playlist[engn->cur_playlist]; | 100 | cur = engn->runlist[engn->cur_runlist]; |
| 99 | engn->cur_playlist = !engn->cur_playlist; | 101 | engn->cur_runlist = !engn->cur_runlist; |
| 100 | 102 | ||
| 101 | for (i = 0, p = 0; i < priv->base.max; i++) { | 103 | for (i = 0, p = 0; i < priv->base.max; i++) { |
| 102 | u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001; | 104 | u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001; |
| @@ -111,7 +113,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) | |||
| 111 | nv_wr32(priv, 0x002270, cur->addr >> 12); | 113 | nv_wr32(priv, 0x002270, cur->addr >> 12); |
| 112 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); | 114 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); |
| 113 | if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) | 115 | if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) |
| 114 | nv_error(priv, "playlist %d update timeout\n", engine); | 116 | nv_error(priv, "runlist %d update timeout\n", engine); |
| 115 | mutex_unlock(&nv_subdev(priv)->mutex); | 117 | mutex_unlock(&nv_subdev(priv)->mutex); |
| 116 | } | 118 | } |
| 117 | 119 | ||
| @@ -278,7 +280,7 @@ nve0_fifo_chan_init(struct nouveau_object *object) | |||
| 278 | nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); | 280 | nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); |
| 279 | nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); | 281 | nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); |
| 280 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); | 282 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
| 281 | nve0_fifo_playlist_update(priv, chan->engine); | 283 | nve0_fifo_runlist_update(priv, chan->engine); |
| 282 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); | 284 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
| 283 | return 0; | 285 | return 0; |
| 284 | } | 286 | } |
| @@ -291,7 +293,7 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend) | |||
| 291 | u32 chid = chan->base.chid; | 293 | u32 chid = chan->base.chid; |
| 292 | 294 | ||
| 293 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); | 295 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); |
| 294 | nve0_fifo_playlist_update(priv, chan->engine); | 296 | nve0_fifo_runlist_update(priv, chan->engine); |
| 295 | nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); | 297 | nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); |
| 296 | 298 | ||
| 297 | return nouveau_fifo_channel_fini(&chan->base, suspend); | 299 | return nouveau_fifo_channel_fini(&chan->base, suspend); |
| @@ -375,54 +377,189 @@ nve0_fifo_cclass = { | |||
| 375 | * PFIFO engine | 377 | * PFIFO engine |
| 376 | ******************************************************************************/ | 378 | ******************************************************************************/ |
| 377 | 379 | ||
| 378 | static const struct nouveau_enum nve0_fifo_fault_unit[] = { | 380 | static const struct nouveau_enum nve0_fifo_sched_reason[] = { |
| 381 | { 0x0a, "CTXSW_TIMEOUT" }, | ||
| 382 | {} | ||
| 383 | }; | ||
| 384 | |||
| 385 | static const struct nouveau_enum nve0_fifo_fault_engine[] = { | ||
| 386 | { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, | ||
| 387 | { 0x03, "IFB" }, | ||
| 388 | { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, | ||
| 389 | { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, | ||
| 390 | { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, | ||
| 391 | { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, | ||
| 392 | { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, | ||
| 393 | { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP }, | ||
| 394 | { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP }, | ||
| 395 | { 0x13, "PERF" }, | ||
| 396 | { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP }, | ||
| 397 | { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 }, | ||
| 398 | { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 }, | ||
| 399 | { 0x17, "PMU" }, | ||
| 400 | { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC }, | ||
| 401 | { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 }, | ||
| 379 | {} | 402 | {} |
| 380 | }; | 403 | }; |
| 381 | 404 | ||
| 382 | static const struct nouveau_enum nve0_fifo_fault_reason[] = { | 405 | static const struct nouveau_enum nve0_fifo_fault_reason[] = { |
| 383 | { 0x00, "PT_NOT_PRESENT" }, | 406 | { 0x00, "PDE" }, |
| 384 | { 0x01, "PT_TOO_SHORT" }, | 407 | { 0x01, "PDE_SIZE" }, |
| 385 | { 0x02, "PAGE_NOT_PRESENT" }, | 408 | { 0x02, "PTE" }, |
| 386 | { 0x03, "VM_LIMIT_EXCEEDED" }, | 409 | { 0x03, "VA_LIMIT_VIOLATION" }, |
| 387 | { 0x04, "NO_CHANNEL" }, | 410 | { 0x04, "UNBOUND_INST_BLOCK" }, |
| 388 | { 0x05, "PAGE_SYSTEM_ONLY" }, | 411 | { 0x05, "PRIV_VIOLATION" }, |
| 389 | { 0x06, "PAGE_READ_ONLY" }, | 412 | { 0x06, "RO_VIOLATION" }, |
| 390 | { 0x0a, "COMPRESSED_SYSRAM" }, | 413 | { 0x07, "WO_VIOLATION" }, |
| 391 | { 0x0c, "INVALID_STORAGE_TYPE" }, | 414 | { 0x08, "PITCH_MASK_VIOLATION" }, |
| 415 | { 0x09, "WORK_CREATION" }, | ||
| 416 | { 0x0a, "UNSUPPORTED_APERTURE" }, | ||
| 417 | { 0x0b, "COMPRESSION_FAILURE" }, | ||
| 418 | { 0x0c, "UNSUPPORTED_KIND" }, | ||
| 419 | { 0x0d, "REGION_VIOLATION" }, | ||
| 420 | { 0x0e, "BOTH_PTES_VALID" }, | ||
| 421 | { 0x0f, "INFO_TYPE_POISONED" }, | ||
| 392 | {} | 422 | {} |
| 393 | }; | 423 | }; |
| 394 | 424 | ||
| 395 | static const struct nouveau_enum nve0_fifo_fault_hubclient[] = { | 425 | static const struct nouveau_enum nve0_fifo_fault_hubclient[] = { |
| 426 | { 0x00, "VIP" }, | ||
| 427 | { 0x01, "CE0" }, | ||
| 428 | { 0x02, "CE1" }, | ||
| 429 | { 0x03, "DNISO" }, | ||
| 430 | { 0x04, "FE" }, | ||
| 431 | { 0x05, "FECS" }, | ||
| 432 | { 0x06, "HOST" }, | ||
| 433 | { 0x07, "HOST_CPU" }, | ||
| 434 | { 0x08, "HOST_CPU_NB" }, | ||
| 435 | { 0x09, "ISO" }, | ||
| 436 | { 0x0a, "MMU" }, | ||
| 437 | { 0x0b, "MSPDEC" }, | ||
| 438 | { 0x0c, "MSPPP" }, | ||
| 439 | { 0x0d, "MSVLD" }, | ||
| 440 | { 0x0e, "NISO" }, | ||
| 441 | { 0x0f, "P2P" }, | ||
| 442 | { 0x10, "PD" }, | ||
| 443 | { 0x11, "PERF" }, | ||
| 444 | { 0x12, "PMU" }, | ||
| 445 | { 0x13, "RASTERTWOD" }, | ||
| 446 | { 0x14, "SCC" }, | ||
| 447 | { 0x15, "SCC_NB" }, | ||
| 448 | { 0x16, "SEC" }, | ||
| 449 | { 0x17, "SSYNC" }, | ||
| 450 | { 0x18, "GR_COPY" }, | ||
| 451 | { 0x19, "CE2" }, | ||
| 452 | { 0x1a, "XV" }, | ||
| 453 | { 0x1b, "MMU_NB" }, | ||
| 454 | { 0x1c, "MSENC" }, | ||
| 455 | { 0x1d, "DFALCON" }, | ||
| 456 | { 0x1e, "SKED" }, | ||
| 457 | { 0x1f, "AFALCON" }, | ||
| 396 | {} | 458 | {} |
| 397 | }; | 459 | }; |
| 398 | 460 | ||
| 399 | static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = { | 461 | static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = { |
| 462 | { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, | ||
| 463 | { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, | ||
| 464 | { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, | ||
| 465 | { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, | ||
| 466 | { 0x0c, "RAST" }, | ||
| 467 | { 0x0d, "GCC" }, | ||
| 468 | { 0x0e, "GPCCS" }, | ||
| 469 | { 0x0f, "PROP_0" }, | ||
| 470 | { 0x10, "PROP_1" }, | ||
| 471 | { 0x11, "PROP_2" }, | ||
| 472 | { 0x12, "PROP_3" }, | ||
| 473 | { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, | ||
| 474 | { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, | ||
| 475 | { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, | ||
| 476 | { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, | ||
| 477 | { 0x1f, "GPM" }, | ||
| 478 | { 0x20, "LTP_UTLB_0" }, | ||
| 479 | { 0x21, "LTP_UTLB_1" }, | ||
| 480 | { 0x22, "LTP_UTLB_2" }, | ||
| 481 | { 0x23, "LTP_UTLB_3" }, | ||
| 482 | { 0x24, "GPC_RGG_UTLB" }, | ||
| 400 | {} | 483 | {} |
| 401 | }; | 484 | }; |
| 402 | 485 | ||
| 403 | static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = { | 486 | static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = { |
| 404 | { 0x00200000, "ILLEGAL_MTHD" }, | 487 | { 0x00000001, "MEMREQ" }, |
| 405 | { 0x00800000, "EMPTY_SUBC" }, | 488 | { 0x00000002, "MEMACK_TIMEOUT" }, |
| 489 | { 0x00000004, "MEMACK_EXTRA" }, | ||
| 490 | { 0x00000008, "MEMDAT_TIMEOUT" }, | ||
| 491 | { 0x00000010, "MEMDAT_EXTRA" }, | ||
| 492 | { 0x00000020, "MEMFLUSH" }, | ||
| 493 | { 0x00000040, "MEMOP" }, | ||
| 494 | { 0x00000080, "LBCONNECT" }, | ||
| 495 | { 0x00000100, "LBREQ" }, | ||
| 496 | { 0x00000200, "LBACK_TIMEOUT" }, | ||
| 497 | { 0x00000400, "LBACK_EXTRA" }, | ||
| 498 | { 0x00000800, "LBDAT_TIMEOUT" }, | ||
| 499 | { 0x00001000, "LBDAT_EXTRA" }, | ||
| 500 | { 0x00002000, "GPFIFO" }, | ||
| 501 | { 0x00004000, "GPPTR" }, | ||
| 502 | { 0x00008000, "GPENTRY" }, | ||
| 503 | { 0x00010000, "GPCRC" }, | ||
| 504 | { 0x00020000, "PBPTR" }, | ||
| 505 | { 0x00040000, "PBENTRY" }, | ||
| 506 | { 0x00080000, "PBCRC" }, | ||
| 507 | { 0x00100000, "XBARCONNECT" }, | ||
| 508 | { 0x00200000, "METHOD" }, | ||
| 509 | { 0x00400000, "METHODCRC" }, | ||
| 510 | { 0x00800000, "DEVICE" }, | ||
| 511 | { 0x02000000, "SEMAPHORE" }, | ||
| 512 | { 0x04000000, "ACQUIRE" }, | ||
| 513 | { 0x08000000, "PRI" }, | ||
| 514 | { 0x20000000, "NO_CTXSW_SEG" }, | ||
| 515 | { 0x40000000, "PBSEG" }, | ||
| 516 | { 0x80000000, "SIGNATURE" }, | ||
| 406 | {} | 517 | {} |
| 407 | }; | 518 | }; |
| 408 | 519 | ||
| 409 | static void | 520 | static void |
| 410 | nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit) | 521 | nve0_fifo_intr_sched(struct nve0_fifo_priv *priv) |
| 522 | { | ||
| 523 | u32 intr = nv_rd32(priv, 0x00254c); | ||
| 524 | u32 code = intr & 0x000000ff; | ||
| 525 | nv_error(priv, "SCHED_ERROR ["); | ||
| 526 | nouveau_enum_print(nve0_fifo_sched_reason, code); | ||
| 527 | pr_cont("]\n"); | ||
| 528 | } | ||
| 529 | |||
| 530 | static void | ||
| 531 | nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv) | ||
| 532 | { | ||
| 533 | u32 stat = nv_rd32(priv, 0x00256c); | ||
| 534 | nv_error(priv, "CHSW_ERROR 0x%08x\n", stat); | ||
| 535 | nv_wr32(priv, 0x00256c, stat); | ||
| 536 | } | ||
| 537 | |||
| 538 | static void | ||
| 539 | nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv) | ||
| 540 | { | ||
| 541 | u32 stat = nv_rd32(priv, 0x00259c); | ||
| 542 | nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat); | ||
| 543 | } | ||
| 544 | |||
| 545 | static void | ||
| 546 | nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit) | ||
| 411 | { | 547 | { |
| 412 | u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10)); | 548 | u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10)); |
| 413 | u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10)); | 549 | u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10)); |
| 414 | u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10)); | 550 | u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10)); |
| 415 | u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10)); | 551 | u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10)); |
| 416 | u32 client = (stat & 0x00001f00) >> 8; | 552 | u32 client = (stat & 0x00001f00) >> 8; |
| 417 | const struct nouveau_enum *en; | 553 | struct nouveau_engine *engine = NULL; |
| 418 | struct nouveau_engine *engine; | ||
| 419 | struct nouveau_object *engctx = NULL; | 554 | struct nouveau_object *engctx = NULL; |
| 555 | const struct nouveau_enum *en; | ||
| 556 | const char *name = "unknown"; | ||
| 420 | 557 | ||
| 421 | nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ? | 558 | nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ? |
| 422 | "write" : "read", (u64)vahi << 32 | valo); | 559 | "write" : "read", (u64)vahi << 32 | valo); |
| 423 | nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f); | 560 | nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f); |
| 424 | pr_cont("] from "); | 561 | pr_cont("] from "); |
| 425 | en = nouveau_enum_print(nve0_fifo_fault_unit, unit); | 562 | en = nouveau_enum_print(nve0_fifo_fault_engine, unit); |
| 426 | if (stat & 0x00000040) { | 563 | if (stat & 0x00000040) { |
| 427 | pr_cont("/"); | 564 | pr_cont("/"); |
| 428 | nouveau_enum_print(nve0_fifo_fault_hubclient, client); | 565 | nouveau_enum_print(nve0_fifo_fault_hubclient, client); |
| @@ -432,14 +569,22 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit) | |||
| 432 | } | 569 | } |
| 433 | 570 | ||
| 434 | if (en && en->data2) { | 571 | if (en && en->data2) { |
| 435 | engine = nouveau_engine(priv, en->data2); | 572 | if (en->data2 == NVDEV_SUBDEV_BAR) { |
| 436 | if (engine) | 573 | nv_mask(priv, 0x001704, 0x00000000, 0x00000000); |
| 437 | engctx = nouveau_engctx_get(engine, inst); | 574 | name = "BAR1"; |
| 438 | 575 | } else | |
| 576 | if (en->data2 == NVDEV_SUBDEV_INSTMEM) { | ||
| 577 | nv_mask(priv, 0x001714, 0x00000000, 0x00000000); | ||
| 578 | name = "BAR3"; | ||
| 579 | } else { | ||
| 580 | engine = nouveau_engine(priv, en->data2); | ||
| 581 | if (engine) { | ||
| 582 | engctx = nouveau_engctx_get(engine, inst); | ||
| 583 | name = nouveau_client_name(engctx); | ||
| 584 | } | ||
| 585 | } | ||
| 439 | } | 586 | } |
| 440 | 587 | pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name); | |
| 441 | pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, | ||
| 442 | nouveau_client_name(engctx)); | ||
| 443 | 588 | ||
| 444 | nouveau_engctx_put(engctx); | 589 | nouveau_engctx_put(engctx); |
| 445 | } | 590 | } |
| @@ -471,7 +616,7 @@ out: | |||
| 471 | } | 616 | } |
| 472 | 617 | ||
| 473 | static void | 618 | static void |
| 474 | nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit) | 619 | nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit) |
| 475 | { | 620 | { |
| 476 | u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); | 621 | u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); |
| 477 | u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); | 622 | u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); |
| @@ -487,11 +632,11 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit) | |||
| 487 | } | 632 | } |
| 488 | 633 | ||
| 489 | if (show) { | 634 | if (show) { |
| 490 | nv_error(priv, "SUBFIFO%d:", unit); | 635 | nv_error(priv, "PBDMA%d:", unit); |
| 491 | nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); | 636 | nouveau_bitfield_print(nve0_fifo_pbdma_intr, show); |
| 492 | pr_cont("\n"); | 637 | pr_cont("\n"); |
| 493 | nv_error(priv, | 638 | nv_error(priv, |
| 494 | "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", | 639 | "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", |
| 495 | unit, chid, | 640 | unit, chid, |
| 496 | nouveau_client_name_for_fifo_chid(&priv->base, chid), | 641 | nouveau_client_name_for_fifo_chid(&priv->base, chid), |
| 497 | subc, mthd, data); | 642 | subc, mthd, data); |
| @@ -508,19 +653,56 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) | |||
| 508 | u32 mask = nv_rd32(priv, 0x002140); | 653 | u32 mask = nv_rd32(priv, 0x002140); |
| 509 | u32 stat = nv_rd32(priv, 0x002100) & mask; | 654 | u32 stat = nv_rd32(priv, 0x002100) & mask; |
| 510 | 655 | ||
| 656 | if (stat & 0x00000001) { | ||
| 657 | u32 stat = nv_rd32(priv, 0x00252c); | ||
| 658 | nv_error(priv, "BIND_ERROR 0x%08x\n", stat); | ||
| 659 | nv_wr32(priv, 0x002100, 0x00000001); | ||
| 660 | stat &= ~0x00000001; | ||
| 661 | } | ||
| 662 | |||
| 663 | if (stat & 0x00000010) { | ||
| 664 | nv_error(priv, "PIO_ERROR\n"); | ||
| 665 | nv_wr32(priv, 0x002100, 0x00000010); | ||
| 666 | stat &= ~0x00000010; | ||
| 667 | } | ||
| 668 | |||
| 511 | if (stat & 0x00000100) { | 669 | if (stat & 0x00000100) { |
| 512 | nv_warn(priv, "unknown status 0x00000100\n"); | 670 | nve0_fifo_intr_sched(priv); |
| 513 | nv_wr32(priv, 0x002100, 0x00000100); | 671 | nv_wr32(priv, 0x002100, 0x00000100); |
| 514 | stat &= ~0x00000100; | 672 | stat &= ~0x00000100; |
| 515 | } | 673 | } |
| 516 | 674 | ||
| 675 | if (stat & 0x00010000) { | ||
| 676 | nve0_fifo_intr_chsw(priv); | ||
| 677 | nv_wr32(priv, 0x002100, 0x00010000); | ||
| 678 | stat &= ~0x00010000; | ||
| 679 | } | ||
| 680 | |||
| 681 | if (stat & 0x00800000) { | ||
| 682 | nv_error(priv, "FB_FLUSH_TIMEOUT\n"); | ||
| 683 | nv_wr32(priv, 0x002100, 0x00800000); | ||
| 684 | stat &= ~0x00800000; | ||
| 685 | } | ||
| 686 | |||
| 687 | if (stat & 0x01000000) { | ||
| 688 | nv_error(priv, "LB_ERROR\n"); | ||
| 689 | nv_wr32(priv, 0x002100, 0x01000000); | ||
| 690 | stat &= ~0x01000000; | ||
| 691 | } | ||
| 692 | |||
| 693 | if (stat & 0x08000000) { | ||
| 694 | nve0_fifo_intr_dropped_fault(priv); | ||
| 695 | nv_wr32(priv, 0x002100, 0x08000000); | ||
| 696 | stat &= ~0x08000000; | ||
| 697 | } | ||
| 698 | |||
| 517 | if (stat & 0x10000000) { | 699 | if (stat & 0x10000000) { |
| 518 | u32 units = nv_rd32(priv, 0x00259c); | 700 | u32 units = nv_rd32(priv, 0x00259c); |
| 519 | u32 u = units; | 701 | u32 u = units; |
| 520 | 702 | ||
| 521 | while (u) { | 703 | while (u) { |
| 522 | int i = ffs(u) - 1; | 704 | int i = ffs(u) - 1; |
| 523 | nve0_fifo_isr_vm_fault(priv, i); | 705 | nve0_fifo_intr_fault(priv, i); |
| 524 | u &= ~(1 << i); | 706 | u &= ~(1 << i); |
| 525 | } | 707 | } |
| 526 | 708 | ||
| @@ -529,22 +711,28 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) | |||
| 529 | } | 711 | } |
| 530 | 712 | ||
| 531 | if (stat & 0x20000000) { | 713 | if (stat & 0x20000000) { |
| 532 | u32 units = nv_rd32(priv, 0x0025a0); | 714 | u32 mask = nv_rd32(priv, 0x0025a0); |
| 533 | u32 u = units; | 715 | u32 temp = mask; |
| 534 | 716 | ||
| 535 | while (u) { | 717 | while (temp) { |
| 536 | int i = ffs(u) - 1; | 718 | u32 unit = ffs(temp) - 1; |
| 537 | nve0_fifo_isr_subfifo_intr(priv, i); | 719 | nve0_fifo_intr_pbdma(priv, unit); |
| 538 | u &= ~(1 << i); | 720 | temp &= ~(1 << unit); |
| 539 | } | 721 | } |
| 540 | 722 | ||
| 541 | nv_wr32(priv, 0x0025a0, units); | 723 | nv_wr32(priv, 0x0025a0, mask); |
| 542 | stat &= ~0x20000000; | 724 | stat &= ~0x20000000; |
| 543 | } | 725 | } |
| 544 | 726 | ||
| 545 | if (stat & 0x40000000) { | 727 | if (stat & 0x40000000) { |
| 546 | nv_warn(priv, "unknown status 0x40000000\n"); | 728 | u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000); |
| 547 | nv_mask(priv, 0x002a00, 0x00000000, 0x00000000); | 729 | |
| 730 | while (mask) { | ||
| 731 | u32 engn = ffs(mask) - 1; | ||
| 732 | /* runlist event, not currently used */ | ||
| 733 | mask &= ~(1 << engn); | ||
| 734 | } | ||
| 735 | |||
| 548 | stat &= ~0x40000000; | 736 | stat &= ~0x40000000; |
| 549 | } | 737 | } |
| 550 | 738 | ||
| @@ -575,53 +763,52 @@ nve0_fifo_uevent_disable(struct nouveau_event *event, int index) | |||
| 575 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); | 763 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); |
| 576 | } | 764 | } |
| 577 | 765 | ||
| 578 | static int | 766 | int |
| 579 | nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 767 | nve0_fifo_fini(struct nouveau_object *object, bool suspend) |
| 580 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
| 581 | struct nouveau_object **pobject) | ||
| 582 | { | 768 | { |
| 583 | struct nve0_fifo_priv *priv; | 769 | struct nve0_fifo_priv *priv = (void *)object; |
| 584 | int ret, i; | 770 | int ret; |
| 585 | 771 | ||
| 586 | ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv); | 772 | ret = nouveau_fifo_fini(&priv->base, suspend); |
| 587 | *pobject = nv_object(priv); | ||
| 588 | if (ret) | 773 | if (ret) |
| 589 | return ret; | 774 | return ret; |
| 590 | 775 | ||
| 591 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | 776 | /* allow mmu fault interrupts, even when we're not using fifo */ |
| 592 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, | 777 | nv_mask(priv, 0x002140, 0x10000000, 0x10000000); |
| 593 | 0, &priv->engine[i].playlist[0]); | 778 | return 0; |
| 594 | if (ret) | 779 | } |
| 595 | return ret; | ||
| 596 | 780 | ||
| 597 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, | 781 | int |
| 598 | 0, &priv->engine[i].playlist[1]); | 782 | nve0_fifo_init(struct nouveau_object *object) |
| 599 | if (ret) | 783 | { |
| 600 | return ret; | 784 | struct nve0_fifo_priv *priv = (void *)object; |
| 601 | } | 785 | int ret, i; |
| 602 | 786 | ||
| 603 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000, | 787 | ret = nouveau_fifo_init(&priv->base); |
| 604 | NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); | ||
| 605 | if (ret) | 788 | if (ret) |
| 606 | return ret; | 789 | return ret; |
| 607 | 790 | ||
| 608 | ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, | 791 | /* enable all available PBDMA units */ |
| 609 | &priv->user.bar); | 792 | nv_wr32(priv, 0x000204, 0xffffffff); |
| 610 | if (ret) | 793 | priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); |
| 611 | return ret; | 794 | nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); |
| 612 | 795 | ||
| 613 | priv->base.uevent->enable = nve0_fifo_uevent_enable; | 796 | /* PBDMA[n] */ |
| 614 | priv->base.uevent->disable = nve0_fifo_uevent_disable; | 797 | for (i = 0; i < priv->spoon_nr; i++) { |
| 615 | priv->base.uevent->priv = priv; | 798 | nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); |
| 799 | nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ | ||
| 800 | nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ | ||
| 801 | } | ||
| 616 | 802 | ||
| 617 | nv_subdev(priv)->unit = 0x00000100; | 803 | nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); |
| 618 | nv_subdev(priv)->intr = nve0_fifo_intr; | 804 | |
| 619 | nv_engine(priv)->cclass = &nve0_fifo_cclass; | 805 | nv_wr32(priv, 0x002a00, 0xffffffff); |
| 620 | nv_engine(priv)->sclass = nve0_fifo_sclass; | 806 | nv_wr32(priv, 0x002100, 0xffffffff); |
| 807 | nv_wr32(priv, 0x002140, 0x3fffffff); | ||
| 621 | return 0; | 808 | return 0; |
| 622 | } | 809 | } |
| 623 | 810 | ||
| 624 | static void | 811 | void |
| 625 | nve0_fifo_dtor(struct nouveau_object *object) | 812 | nve0_fifo_dtor(struct nouveau_object *object) |
| 626 | { | 813 | { |
| 627 | struct nve0_fifo_priv *priv = (void *)object; | 814 | struct nve0_fifo_priv *priv = (void *)object; |
| @@ -631,50 +818,69 @@ nve0_fifo_dtor(struct nouveau_object *object) | |||
| 631 | nouveau_gpuobj_ref(NULL, &priv->user.mem); | 818 | nouveau_gpuobj_ref(NULL, &priv->user.mem); |
| 632 | 819 | ||
| 633 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | 820 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
| 634 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]); | 821 | nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]); |
| 635 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]); | 822 | nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]); |
| 636 | } | 823 | } |
| 637 | 824 | ||
| 638 | nouveau_fifo_destroy(&priv->base); | 825 | nouveau_fifo_destroy(&priv->base); |
| 639 | } | 826 | } |
| 640 | 827 | ||
| 641 | static int | 828 | int |
| 642 | nve0_fifo_init(struct nouveau_object *object) | 829 | nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 830 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
| 831 | struct nouveau_object **pobject) | ||
| 643 | { | 832 | { |
| 644 | struct nve0_fifo_priv *priv = (void *)object; | 833 | struct nve0_fifo_impl *impl = (void *)oclass; |
| 834 | struct nve0_fifo_priv *priv; | ||
| 645 | int ret, i; | 835 | int ret, i; |
| 646 | 836 | ||
| 647 | ret = nouveau_fifo_init(&priv->base); | 837 | ret = nouveau_fifo_create(parent, engine, oclass, 0, |
| 838 | impl->channels - 1, &priv); | ||
| 839 | *pobject = nv_object(priv); | ||
| 648 | if (ret) | 840 | if (ret) |
| 649 | return ret; | 841 | return ret; |
| 650 | 842 | ||
| 651 | /* enable all available PSUBFIFOs */ | 843 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
| 652 | nv_wr32(priv, 0x000204, 0xffffffff); | 844 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, |
| 653 | priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); | 845 | 0, &priv->engine[i].runlist[0]); |
| 654 | nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr); | 846 | if (ret) |
| 847 | return ret; | ||
| 655 | 848 | ||
| 656 | /* PSUBFIFO[n] */ | 849 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, |
| 657 | for (i = 0; i < priv->spoon_nr; i++) { | 850 | 0, &priv->engine[i].runlist[1]); |
| 658 | nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); | 851 | if (ret) |
| 659 | nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ | 852 | return ret; |
| 660 | nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ | ||
| 661 | } | 853 | } |
| 662 | 854 | ||
| 663 | nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); | 855 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000, |
| 856 | NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); | ||
| 857 | if (ret) | ||
| 858 | return ret; | ||
| 664 | 859 | ||
| 665 | nv_wr32(priv, 0x002a00, 0xffffffff); | 860 | ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, |
| 666 | nv_wr32(priv, 0x002100, 0xffffffff); | 861 | &priv->user.bar); |
| 667 | nv_wr32(priv, 0x002140, 0x3fffffff); | 862 | if (ret) |
| 863 | return ret; | ||
| 864 | |||
| 865 | priv->base.uevent->enable = nve0_fifo_uevent_enable; | ||
| 866 | priv->base.uevent->disable = nve0_fifo_uevent_disable; | ||
| 867 | priv->base.uevent->priv = priv; | ||
| 868 | |||
| 869 | nv_subdev(priv)->unit = 0x00000100; | ||
| 870 | nv_subdev(priv)->intr = nve0_fifo_intr; | ||
| 871 | nv_engine(priv)->cclass = &nve0_fifo_cclass; | ||
| 872 | nv_engine(priv)->sclass = nve0_fifo_sclass; | ||
| 668 | return 0; | 873 | return 0; |
| 669 | } | 874 | } |
| 670 | 875 | ||
| 671 | struct nouveau_oclass * | 876 | struct nouveau_oclass * |
| 672 | nve0_fifo_oclass = &(struct nouveau_oclass) { | 877 | nve0_fifo_oclass = &(struct nve0_fifo_impl) { |
| 673 | .handle = NV_ENGINE(FIFO, 0xe0), | 878 | .base.handle = NV_ENGINE(FIFO, 0xe0), |
| 674 | .ofuncs = &(struct nouveau_ofuncs) { | 879 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 675 | .ctor = nve0_fifo_ctor, | 880 | .ctor = nve0_fifo_ctor, |
| 676 | .dtor = nve0_fifo_dtor, | 881 | .dtor = nve0_fifo_dtor, |
| 677 | .init = nve0_fifo_init, | 882 | .init = nve0_fifo_init, |
| 678 | .fini = _nouveau_fifo_fini, | 883 | .fini = nve0_fifo_fini, |
| 679 | }, | 884 | }, |
| 680 | }; | 885 | .channels = 4096, |
| 886 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h new file mode 100644 index 000000000000..014344ebee66 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | #ifndef __NVKM_FIFO_NVE0_H__ | ||
| 2 | #define __NVKM_FIFO_NVE0_H__ | ||
| 3 | |||
| 4 | #include <engine/fifo.h> | ||
| 5 | |||
| 6 | int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *, | ||
| 7 | struct nouveau_oclass *, void *, u32, | ||
| 8 | struct nouveau_object **); | ||
| 9 | void nve0_fifo_dtor(struct nouveau_object *); | ||
| 10 | int nve0_fifo_init(struct nouveau_object *); | ||
| 11 | |||
| 12 | struct nve0_fifo_impl { | ||
| 13 | struct nouveau_oclass base; | ||
| 14 | u32 channels; | ||
| 15 | }; | ||
| 16 | |||
| 17 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c new file mode 100644 index 000000000000..a86bd3352bf8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c | |||
| @@ -0,0 +1,1408 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nvc0.h" | ||
| 26 | |||
| 27 | static struct nvc0_graph_init | ||
| 28 | nv108_grctx_init_icmd[] = { | ||
| 29 | { 0x001000, 1, 0x01, 0x00000004 }, | ||
| 30 | { 0x000039, 3, 0x01, 0x00000000 }, | ||
| 31 | { 0x0000a9, 1, 0x01, 0x0000ffff }, | ||
| 32 | { 0x000038, 1, 0x01, 0x0fac6881 }, | ||
| 33 | { 0x00003d, 1, 0x01, 0x00000001 }, | ||
| 34 | { 0x0000e8, 8, 0x01, 0x00000400 }, | ||
| 35 | { 0x000078, 8, 0x01, 0x00000300 }, | ||
| 36 | { 0x000050, 1, 0x01, 0x00000011 }, | ||
| 37 | { 0x000058, 8, 0x01, 0x00000008 }, | ||
| 38 | { 0x000208, 8, 0x01, 0x00000001 }, | ||
| 39 | { 0x000081, 1, 0x01, 0x00000001 }, | ||
| 40 | { 0x000085, 1, 0x01, 0x00000004 }, | ||
| 41 | { 0x000088, 1, 0x01, 0x00000400 }, | ||
| 42 | { 0x000090, 1, 0x01, 0x00000300 }, | ||
| 43 | { 0x000098, 1, 0x01, 0x00001001 }, | ||
| 44 | { 0x0000e3, 1, 0x01, 0x00000001 }, | ||
| 45 | { 0x0000da, 1, 0x01, 0x00000001 }, | ||
| 46 | { 0x0000f8, 1, 0x01, 0x00000003 }, | ||
| 47 | { 0x0000fa, 1, 0x01, 0x00000001 }, | ||
| 48 | { 0x00009f, 4, 0x01, 0x0000ffff }, | ||
| 49 | { 0x0000b1, 1, 0x01, 0x00000001 }, | ||
| 50 | { 0x0000ad, 1, 0x01, 0x0000013e }, | ||
| 51 | { 0x0000e1, 1, 0x01, 0x00000010 }, | ||
| 52 | { 0x000290, 16, 0x01, 0x00000000 }, | ||
| 53 | { 0x0003b0, 16, 0x01, 0x00000000 }, | ||
| 54 | { 0x0002a0, 16, 0x01, 0x00000000 }, | ||
| 55 | { 0x000420, 16, 0x01, 0x00000000 }, | ||
| 56 | { 0x0002b0, 16, 0x01, 0x00000000 }, | ||
| 57 | { 0x000430, 16, 0x01, 0x00000000 }, | ||
| 58 | { 0x0002c0, 16, 0x01, 0x00000000 }, | ||
| 59 | { 0x0004d0, 16, 0x01, 0x00000000 }, | ||
| 60 | { 0x000720, 16, 0x01, 0x00000000 }, | ||
| 61 | { 0x0008c0, 16, 0x01, 0x00000000 }, | ||
| 62 | { 0x000890, 16, 0x01, 0x00000000 }, | ||
| 63 | { 0x0008e0, 16, 0x01, 0x00000000 }, | ||
| 64 | { 0x0008a0, 16, 0x01, 0x00000000 }, | ||
| 65 | { 0x0008f0, 16, 0x01, 0x00000000 }, | ||
| 66 | { 0x00094c, 1, 0x01, 0x000000ff }, | ||
| 67 | { 0x00094d, 1, 0x01, 0xffffffff }, | ||
| 68 | { 0x00094e, 1, 0x01, 0x00000002 }, | ||
| 69 | { 0x0002ec, 1, 0x01, 0x00000001 }, | ||
| 70 | { 0x0002f2, 2, 0x01, 0x00000001 }, | ||
| 71 | { 0x0002f5, 1, 0x01, 0x00000001 }, | ||
| 72 | { 0x0002f7, 1, 0x01, 0x00000001 }, | ||
| 73 | { 0x000303, 1, 0x01, 0x00000001 }, | ||
| 74 | { 0x0002e6, 1, 0x01, 0x00000001 }, | ||
| 75 | { 0x000466, 1, 0x01, 0x00000052 }, | ||
| 76 | { 0x000301, 1, 0x01, 0x3f800000 }, | ||
| 77 | { 0x000304, 1, 0x01, 0x30201000 }, | ||
| 78 | { 0x000305, 1, 0x01, 0x70605040 }, | ||
| 79 | { 0x000306, 1, 0x01, 0xb8a89888 }, | ||
| 80 | { 0x000307, 1, 0x01, 0xf8e8d8c8 }, | ||
| 81 | { 0x00030a, 1, 0x01, 0x00ffff00 }, | ||
| 82 | { 0x00030b, 1, 0x01, 0x0000001a }, | ||
| 83 | { 0x00030c, 1, 0x01, 0x00000001 }, | ||
| 84 | { 0x000318, 1, 0x01, 0x00000001 }, | ||
| 85 | { 0x000340, 1, 0x01, 0x00000000 }, | ||
| 86 | { 0x000375, 1, 0x01, 0x00000001 }, | ||
| 87 | { 0x00037d, 1, 0x01, 0x00000006 }, | ||
| 88 | { 0x0003a0, 1, 0x01, 0x00000002 }, | ||
| 89 | { 0x0003aa, 1, 0x01, 0x00000001 }, | ||
| 90 | { 0x0003a9, 1, 0x01, 0x00000001 }, | ||
| 91 | { 0x000380, 1, 0x01, 0x00000001 }, | ||
| 92 | { 0x000383, 1, 0x01, 0x00000011 }, | ||
| 93 | { 0x000360, 1, 0x01, 0x00000040 }, | ||
| 94 | { 0x000366, 2, 0x01, 0x00000000 }, | ||
| 95 | { 0x000368, 1, 0x01, 0x00000fff }, | ||
| 96 | { 0x000370, 2, 0x01, 0x00000000 }, | ||
| 97 | { 0x000372, 1, 0x01, 0x000fffff }, | ||
| 98 | { 0x00037a, 1, 0x01, 0x00000012 }, | ||
| 99 | { 0x000619, 1, 0x01, 0x00000003 }, | ||
| 100 | { 0x000811, 1, 0x01, 0x00000003 }, | ||
| 101 | { 0x000812, 1, 0x01, 0x00000004 }, | ||
| 102 | { 0x000813, 1, 0x01, 0x00000006 }, | ||
| 103 | { 0x000814, 1, 0x01, 0x00000008 }, | ||
| 104 | { 0x000815, 1, 0x01, 0x0000000b }, | ||
| 105 | { 0x000800, 6, 0x01, 0x00000001 }, | ||
| 106 | { 0x000632, 1, 0x01, 0x00000001 }, | ||
| 107 | { 0x000633, 1, 0x01, 0x00000002 }, | ||
| 108 | { 0x000634, 1, 0x01, 0x00000003 }, | ||
| 109 | { 0x000635, 1, 0x01, 0x00000004 }, | ||
| 110 | { 0x000654, 1, 0x01, 0x3f800000 }, | ||
| 111 | { 0x000657, 1, 0x01, 0x3f800000 }, | ||
| 112 | { 0x000655, 2, 0x01, 0x3f800000 }, | ||
| 113 | { 0x0006cd, 1, 0x01, 0x3f800000 }, | ||
| 114 | { 0x0007f5, 1, 0x01, 0x3f800000 }, | ||
| 115 | { 0x0007dc, 1, 0x01, 0x39291909 }, | ||
| 116 | { 0x0007dd, 1, 0x01, 0x79695949 }, | ||
| 117 | { 0x0007de, 1, 0x01, 0xb9a99989 }, | ||
| 118 | { 0x0007df, 1, 0x01, 0xf9e9d9c9 }, | ||
| 119 | { 0x0007e8, 1, 0x01, 0x00003210 }, | ||
| 120 | { 0x0007e9, 1, 0x01, 0x00007654 }, | ||
| 121 | { 0x0007ea, 1, 0x01, 0x00000098 }, | ||
| 122 | { 0x0007ec, 1, 0x01, 0x39291909 }, | ||
| 123 | { 0x0007ed, 1, 0x01, 0x79695949 }, | ||
| 124 | { 0x0007ee, 1, 0x01, 0xb9a99989 }, | ||
| 125 | { 0x0007ef, 1, 0x01, 0xf9e9d9c9 }, | ||
| 126 | { 0x0007f0, 1, 0x01, 0x00003210 }, | ||
| 127 | { 0x0007f1, 1, 0x01, 0x00007654 }, | ||
| 128 | { 0x0007f2, 1, 0x01, 0x00000098 }, | ||
| 129 | { 0x0005a5, 1, 0x01, 0x00000001 }, | ||
| 130 | { 0x000980, 128, 0x01, 0x00000000 }, | ||
| 131 | { 0x000468, 1, 0x01, 0x00000004 }, | ||
| 132 | { 0x00046c, 1, 0x01, 0x00000001 }, | ||
| 133 | { 0x000470, 96, 0x01, 0x00000000 }, | ||
| 134 | { 0x000510, 16, 0x01, 0x3f800000 }, | ||
| 135 | { 0x000520, 1, 0x01, 0x000002b6 }, | ||
| 136 | { 0x000529, 1, 0x01, 0x00000001 }, | ||
| 137 | { 0x000530, 16, 0x01, 0xffff0000 }, | ||
| 138 | { 0x000585, 1, 0x01, 0x0000003f }, | ||
| 139 | { 0x000576, 1, 0x01, 0x00000003 }, | ||
| 140 | { 0x00057b, 1, 0x01, 0x00000059 }, | ||
| 141 | { 0x000586, 1, 0x01, 0x00000040 }, | ||
| 142 | { 0x000582, 2, 0x01, 0x00000080 }, | ||
| 143 | { 0x0005c2, 1, 0x01, 0x00000001 }, | ||
| 144 | { 0x000638, 2, 0x01, 0x00000001 }, | ||
| 145 | { 0x00063a, 1, 0x01, 0x00000002 }, | ||
| 146 | { 0x00063b, 2, 0x01, 0x00000001 }, | ||
| 147 | { 0x00063d, 1, 0x01, 0x00000002 }, | ||
| 148 | { 0x00063e, 1, 0x01, 0x00000001 }, | ||
| 149 | { 0x0008b8, 8, 0x01, 0x00000001 }, | ||
| 150 | { 0x000900, 8, 0x01, 0x00000001 }, | ||
| 151 | { 0x000908, 8, 0x01, 0x00000002 }, | ||
| 152 | { 0x000910, 16, 0x01, 0x00000001 }, | ||
| 153 | { 0x000920, 8, 0x01, 0x00000002 }, | ||
| 154 | { 0x000928, 8, 0x01, 0x00000001 }, | ||
| 155 | { 0x000662, 1, 0x01, 0x00000001 }, | ||
| 156 | { 0x000648, 9, 0x01, 0x00000001 }, | ||
| 157 | { 0x000658, 1, 0x01, 0x0000000f }, | ||
| 158 | { 0x0007ff, 1, 0x01, 0x0000000a }, | ||
| 159 | { 0x00066a, 1, 0x01, 0x40000000 }, | ||
| 160 | { 0x00066b, 1, 0x01, 0x10000000 }, | ||
| 161 | { 0x00066c, 2, 0x01, 0xffff0000 }, | ||
| 162 | { 0x0007af, 2, 0x01, 0x00000008 }, | ||
| 163 | { 0x0007f6, 1, 0x01, 0x00000001 }, | ||
| 164 | { 0x00080b, 1, 0x01, 0x00000002 }, | ||
| 165 | { 0x0006b2, 1, 0x01, 0x00000055 }, | ||
| 166 | { 0x0007ad, 1, 0x01, 0x00000003 }, | ||
| 167 | { 0x000937, 1, 0x01, 0x00000001 }, | ||
| 168 | { 0x000971, 1, 0x01, 0x00000008 }, | ||
| 169 | { 0x000972, 1, 0x01, 0x00000040 }, | ||
| 170 | { 0x000973, 1, 0x01, 0x0000012c }, | ||
| 171 | { 0x00097c, 1, 0x01, 0x00000040 }, | ||
| 172 | { 0x000979, 1, 0x01, 0x00000003 }, | ||
| 173 | { 0x000975, 1, 0x01, 0x00000020 }, | ||
| 174 | { 0x000976, 1, 0x01, 0x00000001 }, | ||
| 175 | { 0x000977, 1, 0x01, 0x00000020 }, | ||
| 176 | { 0x000978, 1, 0x01, 0x00000001 }, | ||
| 177 | { 0x000957, 1, 0x01, 0x00000003 }, | ||
| 178 | { 0x00095e, 1, 0x01, 0x20164010 }, | ||
| 179 | { 0x00095f, 1, 0x01, 0x00000020 }, | ||
| 180 | { 0x000a0d, 1, 0x01, 0x00000006 }, | ||
| 181 | { 0x00097d, 1, 0x01, 0x00000020 }, | ||
| 182 | { 0x000683, 1, 0x01, 0x00000006 }, | ||
| 183 | { 0x000685, 1, 0x01, 0x003fffff }, | ||
| 184 | { 0x000687, 1, 0x01, 0x003fffff }, | ||
| 185 | { 0x0006a0, 1, 0x01, 0x00000005 }, | ||
| 186 | { 0x000840, 1, 0x01, 0x00400008 }, | ||
| 187 | { 0x000841, 1, 0x01, 0x08000080 }, | ||
| 188 | { 0x000842, 1, 0x01, 0x00400008 }, | ||
| 189 | { 0x000843, 1, 0x01, 0x08000080 }, | ||
| 190 | { 0x0006aa, 1, 0x01, 0x00000001 }, | ||
| 191 | { 0x0006ab, 1, 0x01, 0x00000002 }, | ||
| 192 | { 0x0006ac, 1, 0x01, 0x00000080 }, | ||
| 193 | { 0x0006ad, 2, 0x01, 0x00000100 }, | ||
| 194 | { 0x0006b1, 1, 0x01, 0x00000011 }, | ||
| 195 | { 0x0006bb, 1, 0x01, 0x000000cf }, | ||
| 196 | { 0x0006ce, 1, 0x01, 0x2a712488 }, | ||
| 197 | { 0x000739, 1, 0x01, 0x4085c000 }, | ||
| 198 | { 0x00073a, 1, 0x01, 0x00000080 }, | ||
| 199 | { 0x000786, 1, 0x01, 0x80000100 }, | ||
| 200 | { 0x00073c, 1, 0x01, 0x00010100 }, | ||
| 201 | { 0x00073d, 1, 0x01, 0x02800000 }, | ||
| 202 | { 0x000787, 1, 0x01, 0x000000cf }, | ||
| 203 | { 0x00078c, 1, 0x01, 0x00000008 }, | ||
| 204 | { 0x000792, 1, 0x01, 0x00000001 }, | ||
| 205 | { 0x000794, 3, 0x01, 0x00000001 }, | ||
| 206 | { 0x000797, 1, 0x01, 0x000000cf }, | ||
| 207 | { 0x000836, 1, 0x01, 0x00000001 }, | ||
| 208 | { 0x00079a, 1, 0x01, 0x00000002 }, | ||
| 209 | { 0x000833, 1, 0x01, 0x04444480 }, | ||
| 210 | { 0x0007a1, 1, 0x01, 0x00000001 }, | ||
| 211 | { 0x0007a3, 3, 0x01, 0x00000001 }, | ||
| 212 | { 0x000831, 1, 0x01, 0x00000004 }, | ||
| 213 | { 0x000b07, 1, 0x01, 0x00000002 }, | ||
| 214 | { 0x000b08, 2, 0x01, 0x00000100 }, | ||
| 215 | { 0x000b0a, 1, 0x01, 0x00000001 }, | ||
| 216 | { 0x000a04, 1, 0x01, 0x000000ff }, | ||
| 217 | { 0x000a0b, 1, 0x01, 0x00000040 }, | ||
| 218 | { 0x00097f, 1, 0x01, 0x00000100 }, | ||
| 219 | { 0x000a02, 1, 0x01, 0x00000001 }, | ||
| 220 | { 0x000809, 1, 0x01, 0x00000007 }, | ||
| 221 | { 0x00c221, 1, 0x01, 0x00000040 }, | ||
| 222 | { 0x00c1b0, 8, 0x01, 0x0000000f }, | ||
| 223 | { 0x00c1b8, 1, 0x01, 0x0fac6881 }, | ||
| 224 | { 0x00c1b9, 1, 0x01, 0x00fac688 }, | ||
| 225 | { 0x00c401, 1, 0x01, 0x00000001 }, | ||
| 226 | { 0x00c402, 1, 0x01, 0x00010001 }, | ||
| 227 | { 0x00c403, 2, 0x01, 0x00000001 }, | ||
| 228 | { 0x00c40e, 1, 0x01, 0x00000020 }, | ||
| 229 | { 0x00c500, 1, 0x01, 0x00000003 }, | ||
| 230 | { 0x01e100, 1, 0x01, 0x00000001 }, | ||
| 231 | { 0x001000, 1, 0x01, 0x00000002 }, | ||
| 232 | { 0x0006aa, 1, 0x01, 0x00000001 }, | ||
| 233 | { 0x0006ad, 2, 0x01, 0x00000100 }, | ||
| 234 | { 0x0006b1, 1, 0x01, 0x00000011 }, | ||
| 235 | { 0x00078c, 1, 0x01, 0x00000008 }, | ||
| 236 | { 0x000792, 1, 0x01, 0x00000001 }, | ||
| 237 | { 0x000794, 3, 0x01, 0x00000001 }, | ||
| 238 | { 0x000797, 1, 0x01, 0x000000cf }, | ||
| 239 | { 0x00079a, 1, 0x01, 0x00000002 }, | ||
| 240 | { 0x0007a1, 1, 0x01, 0x00000001 }, | ||
| 241 | { 0x0007a3, 3, 0x01, 0x00000001 }, | ||
| 242 | { 0x000831, 1, 0x01, 0x00000004 }, | ||
| 243 | { 0x01e100, 1, 0x01, 0x00000001 }, | ||
| 244 | { 0x001000, 1, 0x01, 0x00000008 }, | ||
| 245 | { 0x000039, 3, 0x01, 0x00000000 }, | ||
| 246 | { 0x000380, 1, 0x01, 0x00000001 }, | ||
| 247 | { 0x000366, 2, 0x01, 0x00000000 }, | ||
| 248 | { 0x000368, 1, 0x01, 0x00000fff }, | ||
| 249 | { 0x000370, 2, 0x01, 0x00000000 }, | ||
| 250 | { 0x000372, 1, 0x01, 0x000fffff }, | ||
| 251 | { 0x000813, 1, 0x01, 0x00000006 }, | ||
| 252 | { 0x000814, 1, 0x01, 0x00000008 }, | ||
| 253 | { 0x000957, 1, 0x01, 0x00000003 }, | ||
| 254 | { 0x000b07, 1, 0x01, 0x00000002 }, | ||
| 255 | { 0x000b08, 2, 0x01, 0x00000100 }, | ||
| 256 | { 0x000b0a, 1, 0x01, 0x00000001 }, | ||
| 257 | { 0x000a04, 1, 0x01, 0x000000ff }, | ||
| 258 | { 0x000a0b, 1, 0x01, 0x00000040 }, | ||
| 259 | { 0x00097f, 1, 0x01, 0x00000100 }, | ||
| 260 | { 0x000a02, 1, 0x01, 0x00000001 }, | ||
| 261 | { 0x000809, 1, 0x01, 0x00000007 }, | ||
| 262 | { 0x00c221, 1, 0x01, 0x00000040 }, | ||
| 263 | { 0x00c401, 1, 0x01, 0x00000001 }, | ||
| 264 | { 0x00c402, 1, 0x01, 0x00010001 }, | ||
| 265 | { 0x00c403, 2, 0x01, 0x00000001 }, | ||
| 266 | { 0x00c40e, 1, 0x01, 0x00000020 }, | ||
| 267 | { 0x00c500, 1, 0x01, 0x00000003 }, | ||
| 268 | { 0x01e100, 1, 0x01, 0x00000001 }, | ||
| 269 | { 0x001000, 1, 0x01, 0x00000001 }, | ||
| 270 | { 0x000b07, 1, 0x01, 0x00000002 }, | ||
| 271 | { 0x000b08, 2, 0x01, 0x00000100 }, | ||
| 272 | { 0x000b0a, 1, 0x01, 0x00000001 }, | ||
| 273 | { 0x01e100, 1, 0x01, 0x00000001 }, | ||
| 274 | {} | ||
| 275 | }; | ||
| 276 | |||
| 277 | static struct nvc0_graph_init | ||
| 278 | nv108_grctx_init_a197[] = { | ||
| 279 | { 0x000800, 1, 0x04, 0x00000000 }, | ||
| 280 | { 0x000840, 1, 0x04, 0x00000000 }, | ||
| 281 | { 0x000880, 1, 0x04, 0x00000000 }, | ||
| 282 | { 0x0008c0, 1, 0x04, 0x00000000 }, | ||
| 283 | { 0x000900, 1, 0x04, 0x00000000 }, | ||
| 284 | { 0x000940, 1, 0x04, 0x00000000 }, | ||
| 285 | { 0x000980, 1, 0x04, 0x00000000 }, | ||
| 286 | { 0x0009c0, 1, 0x04, 0x00000000 }, | ||
| 287 | { 0x000804, 1, 0x04, 0x00000000 }, | ||
| 288 | { 0x000844, 1, 0x04, 0x00000000 }, | ||
| 289 | { 0x000884, 1, 0x04, 0x00000000 }, | ||
| 290 | { 0x0008c4, 1, 0x04, 0x00000000 }, | ||
| 291 | { 0x000904, 1, 0x04, 0x00000000 }, | ||
| 292 | { 0x000944, 1, 0x04, 0x00000000 }, | ||
| 293 | { 0x000984, 1, 0x04, 0x00000000 }, | ||
| 294 | { 0x0009c4, 1, 0x04, 0x00000000 }, | ||
| 295 | { 0x000808, 1, 0x04, 0x00000400 }, | ||
| 296 | { 0x000848, 1, 0x04, 0x00000400 }, | ||
| 297 | { 0x000888, 1, 0x04, 0x00000400 }, | ||
| 298 | { 0x0008c8, 1, 0x04, 0x00000400 }, | ||
| 299 | { 0x000908, 1, 0x04, 0x00000400 }, | ||
| 300 | { 0x000948, 1, 0x04, 0x00000400 }, | ||
| 301 | { 0x000988, 1, 0x04, 0x00000400 }, | ||
| 302 | { 0x0009c8, 1, 0x04, 0x00000400 }, | ||
| 303 | { 0x00080c, 1, 0x04, 0x00000300 }, | ||
| 304 | { 0x00084c, 1, 0x04, 0x00000300 }, | ||
| 305 | { 0x00088c, 1, 0x04, 0x00000300 }, | ||
| 306 | { 0x0008cc, 1, 0x04, 0x00000300 }, | ||
| 307 | { 0x00090c, 1, 0x04, 0x00000300 }, | ||
| 308 | { 0x00094c, 1, 0x04, 0x00000300 }, | ||
| 309 | { 0x00098c, 1, 0x04, 0x00000300 }, | ||
| 310 | { 0x0009cc, 1, 0x04, 0x00000300 }, | ||
| 311 | { 0x000810, 1, 0x04, 0x000000cf }, | ||
| 312 | { 0x000850, 1, 0x04, 0x00000000 }, | ||
| 313 | { 0x000890, 1, 0x04, 0x00000000 }, | ||
| 314 | { 0x0008d0, 1, 0x04, 0x00000000 }, | ||
| 315 | { 0x000910, 1, 0x04, 0x00000000 }, | ||
| 316 | { 0x000950, 1, 0x04, 0x00000000 }, | ||
| 317 | { 0x000990, 1, 0x04, 0x00000000 }, | ||
| 318 | { 0x0009d0, 1, 0x04, 0x00000000 }, | ||
| 319 | { 0x000814, 1, 0x04, 0x00000040 }, | ||
| 320 | { 0x000854, 1, 0x04, 0x00000040 }, | ||
| 321 | { 0x000894, 1, 0x04, 0x00000040 }, | ||
| 322 | { 0x0008d4, 1, 0x04, 0x00000040 }, | ||
| 323 | { 0x000914, 1, 0x04, 0x00000040 }, | ||
| 324 | { 0x000954, 1, 0x04, 0x00000040 }, | ||
| 325 | { 0x000994, 1, 0x04, 0x00000040 }, | ||
| 326 | { 0x0009d4, 1, 0x04, 0x00000040 }, | ||
| 327 | { 0x000818, 1, 0x04, 0x00000001 }, | ||
| 328 | { 0x000858, 1, 0x04, 0x00000001 }, | ||
| 329 | { 0x000898, 1, 0x04, 0x00000001 }, | ||
| 330 | { 0x0008d8, 1, 0x04, 0x00000001 }, | ||
| 331 | { 0x000918, 1, 0x04, 0x00000001 }, | ||
| 332 | { 0x000958, 1, 0x04, 0x00000001 }, | ||
| 333 | { 0x000998, 1, 0x04, 0x00000001 }, | ||
| 334 | { 0x0009d8, 1, 0x04, 0x00000001 }, | ||
| 335 | { 0x00081c, 1, 0x04, 0x00000000 }, | ||
| 336 | { 0x00085c, 1, 0x04, 0x00000000 }, | ||
| 337 | { 0x00089c, 1, 0x04, 0x00000000 }, | ||
| 338 | { 0x0008dc, 1, 0x04, 0x00000000 }, | ||
| 339 | { 0x00091c, 1, 0x04, 0x00000000 }, | ||
| 340 | { 0x00095c, 1, 0x04, 0x00000000 }, | ||
| 341 | { 0x00099c, 1, 0x04, 0x00000000 }, | ||
| 342 | { 0x0009dc, 1, 0x04, 0x00000000 }, | ||
| 343 | { 0x000820, 1, 0x04, 0x00000000 }, | ||
| 344 | { 0x000860, 1, 0x04, 0x00000000 }, | ||
| 345 | { 0x0008a0, 1, 0x04, 0x00000000 }, | ||
| 346 | { 0x0008e0, 1, 0x04, 0x00000000 }, | ||
| 347 | { 0x000920, 1, 0x04, 0x00000000 }, | ||
| 348 | { 0x000960, 1, 0x04, 0x00000000 }, | ||
| 349 | { 0x0009a0, 1, 0x04, 0x00000000 }, | ||
| 350 | { 0x0009e0, 1, 0x04, 0x00000000 }, | ||
| 351 | { 0x001c00, 1, 0x04, 0x00000000 }, | ||
| 352 | { 0x001c10, 1, 0x04, 0x00000000 }, | ||
| 353 | { 0x001c20, 1, 0x04, 0x00000000 }, | ||
| 354 | { 0x001c30, 1, 0x04, 0x00000000 }, | ||
| 355 | { 0x001c40, 1, 0x04, 0x00000000 }, | ||
| 356 | { 0x001c50, 1, 0x04, 0x00000000 }, | ||
| 357 | { 0x001c60, 1, 0x04, 0x00000000 }, | ||
| 358 | { 0x001c70, 1, 0x04, 0x00000000 }, | ||
| 359 | { 0x001c80, 1, 0x04, 0x00000000 }, | ||
| 360 | { 0x001c90, 1, 0x04, 0x00000000 }, | ||
| 361 | { 0x001ca0, 1, 0x04, 0x00000000 }, | ||
| 362 | { 0x001cb0, 1, 0x04, 0x00000000 }, | ||
| 363 | { 0x001cc0, 1, 0x04, 0x00000000 }, | ||
| 364 | { 0x001cd0, 1, 0x04, 0x00000000 }, | ||
| 365 | { 0x001ce0, 1, 0x04, 0x00000000 }, | ||
| 366 | { 0x001cf0, 1, 0x04, 0x00000000 }, | ||
| 367 | { 0x001c04, 1, 0x04, 0x00000000 }, | ||
| 368 | { 0x001c14, 1, 0x04, 0x00000000 }, | ||
| 369 | { 0x001c24, 1, 0x04, 0x00000000 }, | ||
| 370 | { 0x001c34, 1, 0x04, 0x00000000 }, | ||
| 371 | { 0x001c44, 1, 0x04, 0x00000000 }, | ||
| 372 | { 0x001c54, 1, 0x04, 0x00000000 }, | ||
| 373 | { 0x001c64, 1, 0x04, 0x00000000 }, | ||
| 374 | { 0x001c74, 1, 0x04, 0x00000000 }, | ||
| 375 | { 0x001c84, 1, 0x04, 0x00000000 }, | ||
| 376 | { 0x001c94, 1, 0x04, 0x00000000 }, | ||
| 377 | { 0x001ca4, 1, 0x04, 0x00000000 }, | ||
| 378 | { 0x001cb4, 1, 0x04, 0x00000000 }, | ||
| 379 | { 0x001cc4, 1, 0x04, 0x00000000 }, | ||
| 380 | { 0x001cd4, 1, 0x04, 0x00000000 }, | ||
| 381 | { 0x001ce4, 1, 0x04, 0x00000000 }, | ||
| 382 | { 0x001cf4, 1, 0x04, 0x00000000 }, | ||
| 383 | { 0x001c08, 1, 0x04, 0x00000000 }, | ||
| 384 | { 0x001c18, 1, 0x04, 0x00000000 }, | ||
| 385 | { 0x001c28, 1, 0x04, 0x00000000 }, | ||
| 386 | { 0x001c38, 1, 0x04, 0x00000000 }, | ||
| 387 | { 0x001c48, 1, 0x04, 0x00000000 }, | ||
| 388 | { 0x001c58, 1, 0x04, 0x00000000 }, | ||
| 389 | { 0x001c68, 1, 0x04, 0x00000000 }, | ||
| 390 | { 0x001c78, 1, 0x04, 0x00000000 }, | ||
| 391 | { 0x001c88, 1, 0x04, 0x00000000 }, | ||
| 392 | { 0x001c98, 1, 0x04, 0x00000000 }, | ||
| 393 | { 0x001ca8, 1, 0x04, 0x00000000 }, | ||
| 394 | { 0x001cb8, 1, 0x04, 0x00000000 }, | ||
| 395 | { 0x001cc8, 1, 0x04, 0x00000000 }, | ||
| 396 | { 0x001cd8, 1, 0x04, 0x00000000 }, | ||
| 397 | { 0x001ce8, 1, 0x04, 0x00000000 }, | ||
| 398 | { 0x001cf8, 1, 0x04, 0x00000000 }, | ||
| 399 | { 0x001c0c, 1, 0x04, 0x00000000 }, | ||
| 400 | { 0x001c1c, 1, 0x04, 0x00000000 }, | ||
| 401 | { 0x001c2c, 1, 0x04, 0x00000000 }, | ||
| 402 | { 0x001c3c, 1, 0x04, 0x00000000 }, | ||
| 403 | { 0x001c4c, 1, 0x04, 0x00000000 }, | ||
| 404 | { 0x001c5c, 1, 0x04, 0x00000000 }, | ||
| 405 | { 0x001c6c, 1, 0x04, 0x00000000 }, | ||
| 406 | { 0x001c7c, 1, 0x04, 0x00000000 }, | ||
| 407 | { 0x001c8c, 1, 0x04, 0x00000000 }, | ||
| 408 | { 0x001c9c, 1, 0x04, 0x00000000 }, | ||
| 409 | { 0x001cac, 1, 0x04, 0x00000000 }, | ||
| 410 | { 0x001cbc, 1, 0x04, 0x00000000 }, | ||
| 411 | { 0x001ccc, 1, 0x04, 0x00000000 }, | ||
| 412 | { 0x001cdc, 1, 0x04, 0x00000000 }, | ||
| 413 | { 0x001cec, 1, 0x04, 0x00000000 }, | ||
| 414 | { 0x001cfc, 2, 0x04, 0x00000000 }, | ||
| 415 | { 0x001d10, 1, 0x04, 0x00000000 }, | ||
| 416 | { 0x001d20, 1, 0x04, 0x00000000 }, | ||
| 417 | { 0x001d30, 1, 0x04, 0x00000000 }, | ||
| 418 | { 0x001d40, 1, 0x04, 0x00000000 }, | ||
| 419 | { 0x001d50, 1, 0x04, 0x00000000 }, | ||
| 420 | { 0x001d60, 1, 0x04, 0x00000000 }, | ||
| 421 | { 0x001d70, 1, 0x04, 0x00000000 }, | ||
| 422 | { 0x001d80, 1, 0x04, 0x00000000 }, | ||
| 423 | { 0x001d90, 1, 0x04, 0x00000000 }, | ||
| 424 | { 0x001da0, 1, 0x04, 0x00000000 }, | ||
| 425 | { 0x001db0, 1, 0x04, 0x00000000 }, | ||
| 426 | { 0x001dc0, 1, 0x04, 0x00000000 }, | ||
| 427 | { 0x001dd0, 1, 0x04, 0x00000000 }, | ||
| 428 | { 0x001de0, 1, 0x04, 0x00000000 }, | ||
| 429 | { 0x001df0, 1, 0x04, 0x00000000 }, | ||
| 430 | { 0x001d04, 1, 0x04, 0x00000000 }, | ||
| 431 | { 0x001d14, 1, 0x04, 0x00000000 }, | ||
| 432 | { 0x001d24, 1, 0x04, 0x00000000 }, | ||
| 433 | { 0x001d34, 1, 0x04, 0x00000000 }, | ||
| 434 | { 0x001d44, 1, 0x04, 0x00000000 }, | ||
| 435 | { 0x001d54, 1, 0x04, 0x00000000 }, | ||
| 436 | { 0x001d64, 1, 0x04, 0x00000000 }, | ||
| 437 | { 0x001d74, 1, 0x04, 0x00000000 }, | ||
| 438 | { 0x001d84, 1, 0x04, 0x00000000 }, | ||
| 439 | { 0x001d94, 1, 0x04, 0x00000000 }, | ||
| 440 | { 0x001da4, 1, 0x04, 0x00000000 }, | ||
| 441 | { 0x001db4, 1, 0x04, 0x00000000 }, | ||
| 442 | { 0x001dc4, 1, 0x04, 0x00000000 }, | ||
| 443 | { 0x001dd4, 1, 0x04, 0x00000000 }, | ||
| 444 | { 0x001de4, 1, 0x04, 0x00000000 }, | ||
| 445 | { 0x001df4, 1, 0x04, 0x00000000 }, | ||
| 446 | { 0x001d08, 1, 0x04, 0x00000000 }, | ||
| 447 | { 0x001d18, 1, 0x04, 0x00000000 }, | ||
| 448 | { 0x001d28, 1, 0x04, 0x00000000 }, | ||
| 449 | { 0x001d38, 1, 0x04, 0x00000000 }, | ||
| 450 | { 0x001d48, 1, 0x04, 0x00000000 }, | ||
| 451 | { 0x001d58, 1, 0x04, 0x00000000 }, | ||
| 452 | { 0x001d68, 1, 0x04, 0x00000000 }, | ||
| 453 | { 0x001d78, 1, 0x04, 0x00000000 }, | ||
| 454 | { 0x001d88, 1, 0x04, 0x00000000 }, | ||
| 455 | { 0x001d98, 1, 0x04, 0x00000000 }, | ||
| 456 | { 0x001da8, 1, 0x04, 0x00000000 }, | ||
| 457 | { 0x001db8, 1, 0x04, 0x00000000 }, | ||
| 458 | { 0x001dc8, 1, 0x04, 0x00000000 }, | ||
| 459 | { 0x001dd8, 1, 0x04, 0x00000000 }, | ||
| 460 | { 0x001de8, 1, 0x04, 0x00000000 }, | ||
| 461 | { 0x001df8, 1, 0x04, 0x00000000 }, | ||
| 462 | { 0x001d0c, 1, 0x04, 0x00000000 }, | ||
| 463 | { 0x001d1c, 1, 0x04, 0x00000000 }, | ||
| 464 | { 0x001d2c, 1, 0x04, 0x00000000 }, | ||
| 465 | { 0x001d3c, 1, 0x04, 0x00000000 }, | ||
| 466 | { 0x001d4c, 1, 0x04, 0x00000000 }, | ||
| 467 | { 0x001d5c, 1, 0x04, 0x00000000 }, | ||
| 468 | { 0x001d6c, 1, 0x04, 0x00000000 }, | ||
| 469 | { 0x001d7c, 1, 0x04, 0x00000000 }, | ||
| 470 | { 0x001d8c, 1, 0x04, 0x00000000 }, | ||
| 471 | { 0x001d9c, 1, 0x04, 0x00000000 }, | ||
| 472 | { 0x001dac, 1, 0x04, 0x00000000 }, | ||
| 473 | { 0x001dbc, 1, 0x04, 0x00000000 }, | ||
| 474 | { 0x001dcc, 1, 0x04, 0x00000000 }, | ||
| 475 | { 0x001ddc, 1, 0x04, 0x00000000 }, | ||
| 476 | { 0x001dec, 1, 0x04, 0x00000000 }, | ||
| 477 | { 0x001dfc, 1, 0x04, 0x00000000 }, | ||
| 478 | { 0x001f00, 1, 0x04, 0x00000000 }, | ||
| 479 | { 0x001f08, 1, 0x04, 0x00000000 }, | ||
| 480 | { 0x001f10, 1, 0x04, 0x00000000 }, | ||
| 481 | { 0x001f18, 1, 0x04, 0x00000000 }, | ||
| 482 | { 0x001f20, 1, 0x04, 0x00000000 }, | ||
| 483 | { 0x001f28, 1, 0x04, 0x00000000 }, | ||
| 484 | { 0x001f30, 1, 0x04, 0x00000000 }, | ||
| 485 | { 0x001f38, 1, 0x04, 0x00000000 }, | ||
| 486 | { 0x001f40, 1, 0x04, 0x00000000 }, | ||
| 487 | { 0x001f48, 1, 0x04, 0x00000000 }, | ||
| 488 | { 0x001f50, 1, 0x04, 0x00000000 }, | ||
| 489 | { 0x001f58, 1, 0x04, 0x00000000 }, | ||
| 490 | { 0x001f60, 1, 0x04, 0x00000000 }, | ||
| 491 | { 0x001f68, 1, 0x04, 0x00000000 }, | ||
| 492 | { 0x001f70, 1, 0x04, 0x00000000 }, | ||
| 493 | { 0x001f78, 1, 0x04, 0x00000000 }, | ||
| 494 | { 0x001f04, 1, 0x04, 0x00000000 }, | ||
| 495 | { 0x001f0c, 1, 0x04, 0x00000000 }, | ||
| 496 | { 0x001f14, 1, 0x04, 0x00000000 }, | ||
| 497 | { 0x001f1c, 1, 0x04, 0x00000000 }, | ||
| 498 | { 0x001f24, 1, 0x04, 0x00000000 }, | ||
| 499 | { 0x001f2c, 1, 0x04, 0x00000000 }, | ||
| 500 | { 0x001f34, 1, 0x04, 0x00000000 }, | ||
| 501 | { 0x001f3c, 1, 0x04, 0x00000000 }, | ||
| 502 | { 0x001f44, 1, 0x04, 0x00000000 }, | ||
| 503 | { 0x001f4c, 1, 0x04, 0x00000000 }, | ||
| 504 | { 0x001f54, 1, 0x04, 0x00000000 }, | ||
| 505 | { 0x001f5c, 1, 0x04, 0x00000000 }, | ||
| 506 | { 0x001f64, 1, 0x04, 0x00000000 }, | ||
| 507 | { 0x001f6c, 1, 0x04, 0x00000000 }, | ||
| 508 | { 0x001f74, 1, 0x04, 0x00000000 }, | ||
| 509 | { 0x001f7c, 2, 0x04, 0x00000000 }, | ||
| 510 | { 0x001f88, 1, 0x04, 0x00000000 }, | ||
| 511 | { 0x001f90, 1, 0x04, 0x00000000 }, | ||
| 512 | { 0x001f98, 1, 0x04, 0x00000000 }, | ||
| 513 | { 0x001fa0, 1, 0x04, 0x00000000 }, | ||
| 514 | { 0x001fa8, 1, 0x04, 0x00000000 }, | ||
| 515 | { 0x001fb0, 1, 0x04, 0x00000000 }, | ||
| 516 | { 0x001fb8, 1, 0x04, 0x00000000 }, | ||
| 517 | { 0x001fc0, 1, 0x04, 0x00000000 }, | ||
| 518 | { 0x001fc8, 1, 0x04, 0x00000000 }, | ||
| 519 | { 0x001fd0, 1, 0x04, 0x00000000 }, | ||
| 520 | { 0x001fd8, 1, 0x04, 0x00000000 }, | ||
| 521 | { 0x001fe0, 1, 0x04, 0x00000000 }, | ||
| 522 | { 0x001fe8, 1, 0x04, 0x00000000 }, | ||
| 523 | { 0x001ff0, 1, 0x04, 0x00000000 }, | ||
| 524 | { 0x001ff8, 1, 0x04, 0x00000000 }, | ||
| 525 | { 0x001f84, 1, 0x04, 0x00000000 }, | ||
| 526 | { 0x001f8c, 1, 0x04, 0x00000000 }, | ||
| 527 | { 0x001f94, 1, 0x04, 0x00000000 }, | ||
| 528 | { 0x001f9c, 1, 0x04, 0x00000000 }, | ||
| 529 | { 0x001fa4, 1, 0x04, 0x00000000 }, | ||
| 530 | { 0x001fac, 1, 0x04, 0x00000000 }, | ||
| 531 | { 0x001fb4, 1, 0x04, 0x00000000 }, | ||
| 532 | { 0x001fbc, 1, 0x04, 0x00000000 }, | ||
| 533 | { 0x001fc4, 1, 0x04, 0x00000000 }, | ||
| 534 | { 0x001fcc, 1, 0x04, 0x00000000 }, | ||
| 535 | { 0x001fd4, 1, 0x04, 0x00000000 }, | ||
| 536 | { 0x001fdc, 1, 0x04, 0x00000000 }, | ||
| 537 | { 0x001fe4, 1, 0x04, 0x00000000 }, | ||
| 538 | { 0x001fec, 1, 0x04, 0x00000000 }, | ||
| 539 | { 0x001ff4, 1, 0x04, 0x00000000 }, | ||
| 540 | { 0x001ffc, 2, 0x04, 0x00000000 }, | ||
| 541 | { 0x002040, 1, 0x04, 0x00000011 }, | ||
| 542 | { 0x002080, 1, 0x04, 0x00000020 }, | ||
| 543 | { 0x0020c0, 1, 0x04, 0x00000030 }, | ||
| 544 | { 0x002100, 1, 0x04, 0x00000040 }, | ||
| 545 | { 0x002140, 1, 0x04, 0x00000051 }, | ||
| 546 | { 0x00200c, 1, 0x04, 0x00000001 }, | ||
| 547 | { 0x00204c, 1, 0x04, 0x00000001 }, | ||
| 548 | { 0x00208c, 1, 0x04, 0x00000001 }, | ||
| 549 | { 0x0020cc, 1, 0x04, 0x00000001 }, | ||
| 550 | { 0x00210c, 1, 0x04, 0x00000001 }, | ||
| 551 | { 0x00214c, 1, 0x04, 0x00000001 }, | ||
| 552 | { 0x002010, 1, 0x04, 0x00000000 }, | ||
| 553 | { 0x002050, 1, 0x04, 0x00000000 }, | ||
| 554 | { 0x002090, 1, 0x04, 0x00000001 }, | ||
| 555 | { 0x0020d0, 1, 0x04, 0x00000002 }, | ||
| 556 | { 0x002110, 1, 0x04, 0x00000003 }, | ||
| 557 | { 0x002150, 1, 0x04, 0x00000004 }, | ||
| 558 | { 0x000380, 1, 0x04, 0x00000000 }, | ||
| 559 | { 0x0003a0, 1, 0x04, 0x00000000 }, | ||
| 560 | { 0x0003c0, 1, 0x04, 0x00000000 }, | ||
| 561 | { 0x0003e0, 1, 0x04, 0x00000000 }, | ||
| 562 | { 0x000384, 1, 0x04, 0x00000000 }, | ||
| 563 | { 0x0003a4, 1, 0x04, 0x00000000 }, | ||
| 564 | { 0x0003c4, 1, 0x04, 0x00000000 }, | ||
| 565 | { 0x0003e4, 1, 0x04, 0x00000000 }, | ||
| 566 | { 0x000388, 1, 0x04, 0x00000000 }, | ||
| 567 | { 0x0003a8, 1, 0x04, 0x00000000 }, | ||
| 568 | { 0x0003c8, 1, 0x04, 0x00000000 }, | ||
| 569 | { 0x0003e8, 1, 0x04, 0x00000000 }, | ||
| 570 | { 0x00038c, 1, 0x04, 0x00000000 }, | ||
| 571 | { 0x0003ac, 1, 0x04, 0x00000000 }, | ||
| 572 | { 0x0003cc, 1, 0x04, 0x00000000 }, | ||
| 573 | { 0x0003ec, 1, 0x04, 0x00000000 }, | ||
| 574 | { 0x000700, 1, 0x04, 0x00000000 }, | ||
| 575 | { 0x000710, 1, 0x04, 0x00000000 }, | ||
| 576 | { 0x000720, 1, 0x04, 0x00000000 }, | ||
| 577 | { 0x000730, 1, 0x04, 0x00000000 }, | ||
| 578 | { 0x000704, 1, 0x04, 0x00000000 }, | ||
| 579 | { 0x000714, 1, 0x04, 0x00000000 }, | ||
| 580 | { 0x000724, 1, 0x04, 0x00000000 }, | ||
| 581 | { 0x000734, 1, 0x04, 0x00000000 }, | ||
| 582 | { 0x000708, 1, 0x04, 0x00000000 }, | ||
| 583 | { 0x000718, 1, 0x04, 0x00000000 }, | ||
| 584 | { 0x000728, 1, 0x04, 0x00000000 }, | ||
| 585 | { 0x000738, 1, 0x04, 0x00000000 }, | ||
| 586 | { 0x002800, 128, 0x04, 0x00000000 }, | ||
| 587 | { 0x000a00, 1, 0x04, 0x00000000 }, | ||
| 588 | { 0x000a20, 1, 0x04, 0x00000000 }, | ||
| 589 | { 0x000a40, 1, 0x04, 0x00000000 }, | ||
| 590 | { 0x000a60, 1, 0x04, 0x00000000 }, | ||
| 591 | { 0x000a80, 1, 0x04, 0x00000000 }, | ||
| 592 | { 0x000aa0, 1, 0x04, 0x00000000 }, | ||
| 593 | { 0x000ac0, 1, 0x04, 0x00000000 }, | ||
| 594 | { 0x000ae0, 1, 0x04, 0x00000000 }, | ||
| 595 | { 0x000b00, 1, 0x04, 0x00000000 }, | ||
| 596 | { 0x000b20, 1, 0x04, 0x00000000 }, | ||
| 597 | { 0x000b40, 1, 0x04, 0x00000000 }, | ||
| 598 | { 0x000b60, 1, 0x04, 0x00000000 }, | ||
| 599 | { 0x000b80, 1, 0x04, 0x00000000 }, | ||
| 600 | { 0x000ba0, 1, 0x04, 0x00000000 }, | ||
| 601 | { 0x000bc0, 1, 0x04, 0x00000000 }, | ||
| 602 | { 0x000be0, 1, 0x04, 0x00000000 }, | ||
| 603 | { 0x000a04, 1, 0x04, 0x00000000 }, | ||
| 604 | { 0x000a24, 1, 0x04, 0x00000000 }, | ||
| 605 | { 0x000a44, 1, 0x04, 0x00000000 }, | ||
| 606 | { 0x000a64, 1, 0x04, 0x00000000 }, | ||
| 607 | { 0x000a84, 1, 0x04, 0x00000000 }, | ||
| 608 | { 0x000aa4, 1, 0x04, 0x00000000 }, | ||
| 609 | { 0x000ac4, 1, 0x04, 0x00000000 }, | ||
| 610 | { 0x000ae4, 1, 0x04, 0x00000000 }, | ||
| 611 | { 0x000b04, 1, 0x04, 0x00000000 }, | ||
| 612 | { 0x000b24, 1, 0x04, 0x00000000 }, | ||
| 613 | { 0x000b44, 1, 0x04, 0x00000000 }, | ||
| 614 | { 0x000b64, 1, 0x04, 0x00000000 }, | ||
| 615 | { 0x000b84, 1, 0x04, 0x00000000 }, | ||
| 616 | { 0x000ba4, 1, 0x04, 0x00000000 }, | ||
| 617 | { 0x000bc4, 1, 0x04, 0x00000000 }, | ||
| 618 | { 0x000be4, 1, 0x04, 0x00000000 }, | ||
| 619 | { 0x000a08, 1, 0x04, 0x00000000 }, | ||
| 620 | { 0x000a28, 1, 0x04, 0x00000000 }, | ||
| 621 | { 0x000a48, 1, 0x04, 0x00000000 }, | ||
| 622 | { 0x000a68, 1, 0x04, 0x00000000 }, | ||
| 623 | { 0x000a88, 1, 0x04, 0x00000000 }, | ||
| 624 | { 0x000aa8, 1, 0x04, 0x00000000 }, | ||
| 625 | { 0x000ac8, 1, 0x04, 0x00000000 }, | ||
| 626 | { 0x000ae8, 1, 0x04, 0x00000000 }, | ||
| 627 | { 0x000b08, 1, 0x04, 0x00000000 }, | ||
| 628 | { 0x000b28, 1, 0x04, 0x00000000 }, | ||
| 629 | { 0x000b48, 1, 0x04, 0x00000000 }, | ||
| 630 | { 0x000b68, 1, 0x04, 0x00000000 }, | ||
| 631 | { 0x000b88, 1, 0x04, 0x00000000 }, | ||
| 632 | { 0x000ba8, 1, 0x04, 0x00000000 }, | ||
| 633 | { 0x000bc8, 1, 0x04, 0x00000000 }, | ||
| 634 | { 0x000be8, 1, 0x04, 0x00000000 }, | ||
| 635 | { 0x000a0c, 1, 0x04, 0x00000000 }, | ||
| 636 | { 0x000a2c, 1, 0x04, 0x00000000 }, | ||
| 637 | { 0x000a4c, 1, 0x04, 0x00000000 }, | ||
| 638 | { 0x000a6c, 1, 0x04, 0x00000000 }, | ||
| 639 | { 0x000a8c, 1, 0x04, 0x00000000 }, | ||
| 640 | { 0x000aac, 1, 0x04, 0x00000000 }, | ||
| 641 | { 0x000acc, 1, 0x04, 0x00000000 }, | ||
| 642 | { 0x000aec, 1, 0x04, 0x00000000 }, | ||
| 643 | { 0x000b0c, 1, 0x04, 0x00000000 }, | ||
| 644 | { 0x000b2c, 1, 0x04, 0x00000000 }, | ||
| 645 | { 0x000b4c, 1, 0x04, 0x00000000 }, | ||
| 646 | { 0x000b6c, 1, 0x04, 0x00000000 }, | ||
| 647 | { 0x000b8c, 1, 0x04, 0x00000000 }, | ||
| 648 | { 0x000bac, 1, 0x04, 0x00000000 }, | ||
| 649 | { 0x000bcc, 1, 0x04, 0x00000000 }, | ||
| 650 | { 0x000bec, 1, 0x04, 0x00000000 }, | ||
| 651 | { 0x000a10, 1, 0x04, 0x00000000 }, | ||
| 652 | { 0x000a30, 1, 0x04, 0x00000000 }, | ||
| 653 | { 0x000a50, 1, 0x04, 0x00000000 }, | ||
| 654 | { 0x000a70, 1, 0x04, 0x00000000 }, | ||
| 655 | { 0x000a90, 1, 0x04, 0x00000000 }, | ||
| 656 | { 0x000ab0, 1, 0x04, 0x00000000 }, | ||
| 657 | { 0x000ad0, 1, 0x04, 0x00000000 }, | ||
| 658 | { 0x000af0, 1, 0x04, 0x00000000 }, | ||
| 659 | { 0x000b10, 1, 0x04, 0x00000000 }, | ||
| 660 | { 0x000b30, 1, 0x04, 0x00000000 }, | ||
| 661 | { 0x000b50, 1, 0x04, 0x00000000 }, | ||
| 662 | { 0x000b70, 1, 0x04, 0x00000000 }, | ||
| 663 | { 0x000b90, 1, 0x04, 0x00000000 }, | ||
| 664 | { 0x000bb0, 1, 0x04, 0x00000000 }, | ||
| 665 | { 0x000bd0, 1, 0x04, 0x00000000 }, | ||
| 666 | { 0x000bf0, 1, 0x04, 0x00000000 }, | ||
| 667 | { 0x000a14, 1, 0x04, 0x00000000 }, | ||
| 668 | { 0x000a34, 1, 0x04, 0x00000000 }, | ||
| 669 | { 0x000a54, 1, 0x04, 0x00000000 }, | ||
| 670 | { 0x000a74, 1, 0x04, 0x00000000 }, | ||
| 671 | { 0x000a94, 1, 0x04, 0x00000000 }, | ||
| 672 | { 0x000ab4, 1, 0x04, 0x00000000 }, | ||
| 673 | { 0x000ad4, 1, 0x04, 0x00000000 }, | ||
| 674 | { 0x000af4, 1, 0x04, 0x00000000 }, | ||
| 675 | { 0x000b14, 1, 0x04, 0x00000000 }, | ||
| 676 | { 0x000b34, 1, 0x04, 0x00000000 }, | ||
| 677 | { 0x000b54, 1, 0x04, 0x00000000 }, | ||
| 678 | { 0x000b74, 1, 0x04, 0x00000000 }, | ||
| 679 | { 0x000b94, 1, 0x04, 0x00000000 }, | ||
| 680 | { 0x000bb4, 1, 0x04, 0x00000000 }, | ||
| 681 | { 0x000bd4, 1, 0x04, 0x00000000 }, | ||
| 682 | { 0x000bf4, 1, 0x04, 0x00000000 }, | ||
| 683 | { 0x000c00, 1, 0x04, 0x00000000 }, | ||
| 684 | { 0x000c10, 1, 0x04, 0x00000000 }, | ||
| 685 | { 0x000c20, 1, 0x04, 0x00000000 }, | ||
| 686 | { 0x000c30, 1, 0x04, 0x00000000 }, | ||
| 687 | { 0x000c40, 1, 0x04, 0x00000000 }, | ||
| 688 | { 0x000c50, 1, 0x04, 0x00000000 }, | ||
| 689 | { 0x000c60, 1, 0x04, 0x00000000 }, | ||
| 690 | { 0x000c70, 1, 0x04, 0x00000000 }, | ||
| 691 | { 0x000c80, 1, 0x04, 0x00000000 }, | ||
| 692 | { 0x000c90, 1, 0x04, 0x00000000 }, | ||
| 693 | { 0x000ca0, 1, 0x04, 0x00000000 }, | ||
| 694 | { 0x000cb0, 1, 0x04, 0x00000000 }, | ||
| 695 | { 0x000cc0, 1, 0x04, 0x00000000 }, | ||
| 696 | { 0x000cd0, 1, 0x04, 0x00000000 }, | ||
| 697 | { 0x000ce0, 1, 0x04, 0x00000000 }, | ||
| 698 | { 0x000cf0, 1, 0x04, 0x00000000 }, | ||
| 699 | { 0x000c04, 1, 0x04, 0x00000000 }, | ||
| 700 | { 0x000c14, 1, 0x04, 0x00000000 }, | ||
| 701 | { 0x000c24, 1, 0x04, 0x00000000 }, | ||
| 702 | { 0x000c34, 1, 0x04, 0x00000000 }, | ||
| 703 | { 0x000c44, 1, 0x04, 0x00000000 }, | ||
| 704 | { 0x000c54, 1, 0x04, 0x00000000 }, | ||
| 705 | { 0x000c64, 1, 0x04, 0x00000000 }, | ||
| 706 | { 0x000c74, 1, 0x04, 0x00000000 }, | ||
| 707 | { 0x000c84, 1, 0x04, 0x00000000 }, | ||
| 708 | { 0x000c94, 1, 0x04, 0x00000000 }, | ||
| 709 | { 0x000ca4, 1, 0x04, 0x00000000 }, | ||
| 710 | { 0x000cb4, 1, 0x04, 0x00000000 }, | ||
| 711 | { 0x000cc4, 1, 0x04, 0x00000000 }, | ||
| 712 | { 0x000cd4, 1, 0x04, 0x00000000 }, | ||
| 713 | { 0x000ce4, 1, 0x04, 0x00000000 }, | ||
| 714 | { 0x000cf4, 1, 0x04, 0x00000000 }, | ||
| 715 | { 0x000c08, 1, 0x04, 0x00000000 }, | ||
| 716 | { 0x000c18, 1, 0x04, 0x00000000 }, | ||
| 717 | { 0x000c28, 1, 0x04, 0x00000000 }, | ||
| 718 | { 0x000c38, 1, 0x04, 0x00000000 }, | ||
| 719 | { 0x000c48, 1, 0x04, 0x00000000 }, | ||
| 720 | { 0x000c58, 1, 0x04, 0x00000000 }, | ||
| 721 | { 0x000c68, 1, 0x04, 0x00000000 }, | ||
| 722 | { 0x000c78, 1, 0x04, 0x00000000 }, | ||
| 723 | { 0x000c88, 1, 0x04, 0x00000000 }, | ||
| 724 | { 0x000c98, 1, 0x04, 0x00000000 }, | ||
| 725 | { 0x000ca8, 1, 0x04, 0x00000000 }, | ||
| 726 | { 0x000cb8, 1, 0x04, 0x00000000 }, | ||
| 727 | { 0x000cc8, 1, 0x04, 0x00000000 }, | ||
| 728 | { 0x000cd8, 1, 0x04, 0x00000000 }, | ||
| 729 | { 0x000ce8, 1, 0x04, 0x00000000 }, | ||
| 730 | { 0x000cf8, 1, 0x04, 0x00000000 }, | ||
| 731 | { 0x000c0c, 1, 0x04, 0x3f800000 }, | ||
| 732 | { 0x000c1c, 1, 0x04, 0x3f800000 }, | ||
| 733 | { 0x000c2c, 1, 0x04, 0x3f800000 }, | ||
| 734 | { 0x000c3c, 1, 0x04, 0x3f800000 }, | ||
| 735 | { 0x000c4c, 1, 0x04, 0x3f800000 }, | ||
| 736 | { 0x000c5c, 1, 0x04, 0x3f800000 }, | ||
| 737 | { 0x000c6c, 1, 0x04, 0x3f800000 }, | ||
| 738 | { 0x000c7c, 1, 0x04, 0x3f800000 }, | ||
| 739 | { 0x000c8c, 1, 0x04, 0x3f800000 }, | ||
| 740 | { 0x000c9c, 1, 0x04, 0x3f800000 }, | ||
| 741 | { 0x000cac, 1, 0x04, 0x3f800000 }, | ||
| 742 | { 0x000cbc, 1, 0x04, 0x3f800000 }, | ||
| 743 | { 0x000ccc, 1, 0x04, 0x3f800000 }, | ||
| 744 | { 0x000cdc, 1, 0x04, 0x3f800000 }, | ||
| 745 | { 0x000cec, 1, 0x04, 0x3f800000 }, | ||
| 746 | { 0x000cfc, 1, 0x04, 0x3f800000 }, | ||
| 747 | { 0x000d00, 1, 0x04, 0xffff0000 }, | ||
| 748 | { 0x000d08, 1, 0x04, 0xffff0000 }, | ||
| 749 | { 0x000d10, 1, 0x04, 0xffff0000 }, | ||
| 750 | { 0x000d18, 1, 0x04, 0xffff0000 }, | ||
| 751 | { 0x000d20, 1, 0x04, 0xffff0000 }, | ||
| 752 | { 0x000d28, 1, 0x04, 0xffff0000 }, | ||
| 753 | { 0x000d30, 1, 0x04, 0xffff0000 }, | ||
| 754 | { 0x000d38, 1, 0x04, 0xffff0000 }, | ||
| 755 | { 0x000d04, 1, 0x04, 0xffff0000 }, | ||
| 756 | { 0x000d0c, 1, 0x04, 0xffff0000 }, | ||
| 757 | { 0x000d14, 1, 0x04, 0xffff0000 }, | ||
| 758 | { 0x000d1c, 1, 0x04, 0xffff0000 }, | ||
| 759 | { 0x000d24, 1, 0x04, 0xffff0000 }, | ||
| 760 | { 0x000d2c, 1, 0x04, 0xffff0000 }, | ||
| 761 | { 0x000d34, 1, 0x04, 0xffff0000 }, | ||
| 762 | { 0x000d3c, 1, 0x04, 0xffff0000 }, | ||
| 763 | { 0x000e00, 1, 0x04, 0x00000000 }, | ||
| 764 | { 0x000e10, 1, 0x04, 0x00000000 }, | ||
| 765 | { 0x000e20, 1, 0x04, 0x00000000 }, | ||
| 766 | { 0x000e30, 1, 0x04, 0x00000000 }, | ||
| 767 | { 0x000e40, 1, 0x04, 0x00000000 }, | ||
| 768 | { 0x000e50, 1, 0x04, 0x00000000 }, | ||
| 769 | { 0x000e60, 1, 0x04, 0x00000000 }, | ||
| 770 | { 0x000e70, 1, 0x04, 0x00000000 }, | ||
| 771 | { 0x000e80, 1, 0x04, 0x00000000 }, | ||
| 772 | { 0x000e90, 1, 0x04, 0x00000000 }, | ||
| 773 | { 0x000ea0, 1, 0x04, 0x00000000 }, | ||
| 774 | { 0x000eb0, 1, 0x04, 0x00000000 }, | ||
| 775 | { 0x000ec0, 1, 0x04, 0x00000000 }, | ||
| 776 | { 0x000ed0, 1, 0x04, 0x00000000 }, | ||
| 777 | { 0x000ee0, 1, 0x04, 0x00000000 }, | ||
| 778 | { 0x000ef0, 1, 0x04, 0x00000000 }, | ||
| 779 | { 0x000e04, 1, 0x04, 0xffff0000 }, | ||
| 780 | { 0x000e14, 1, 0x04, 0xffff0000 }, | ||
| 781 | { 0x000e24, 1, 0x04, 0xffff0000 }, | ||
| 782 | { 0x000e34, 1, 0x04, 0xffff0000 }, | ||
| 783 | { 0x000e44, 1, 0x04, 0xffff0000 }, | ||
| 784 | { 0x000e54, 1, 0x04, 0xffff0000 }, | ||
| 785 | { 0x000e64, 1, 0x04, 0xffff0000 }, | ||
| 786 | { 0x000e74, 1, 0x04, 0xffff0000 }, | ||
| 787 | { 0x000e84, 1, 0x04, 0xffff0000 }, | ||
| 788 | { 0x000e94, 1, 0x04, 0xffff0000 }, | ||
| 789 | { 0x000ea4, 1, 0x04, 0xffff0000 }, | ||
| 790 | { 0x000eb4, 1, 0x04, 0xffff0000 }, | ||
| 791 | { 0x000ec4, 1, 0x04, 0xffff0000 }, | ||
| 792 | { 0x000ed4, 1, 0x04, 0xffff0000 }, | ||
| 793 | { 0x000ee4, 1, 0x04, 0xffff0000 }, | ||
| 794 | { 0x000ef4, 1, 0x04, 0xffff0000 }, | ||
| 795 | { 0x000e08, 1, 0x04, 0xffff0000 }, | ||
| 796 | { 0x000e18, 1, 0x04, 0xffff0000 }, | ||
| 797 | { 0x000e28, 1, 0x04, 0xffff0000 }, | ||
| 798 | { 0x000e38, 1, 0x04, 0xffff0000 }, | ||
| 799 | { 0x000e48, 1, 0x04, 0xffff0000 }, | ||
| 800 | { 0x000e58, 1, 0x04, 0xffff0000 }, | ||
| 801 | { 0x000e68, 1, 0x04, 0xffff0000 }, | ||
| 802 | { 0x000e78, 1, 0x04, 0xffff0000 }, | ||
| 803 | { 0x000e88, 1, 0x04, 0xffff0000 }, | ||
| 804 | { 0x000e98, 1, 0x04, 0xffff0000 }, | ||
| 805 | { 0x000ea8, 1, 0x04, 0xffff0000 }, | ||
| 806 | { 0x000eb8, 1, 0x04, 0xffff0000 }, | ||
| 807 | { 0x000ec8, 1, 0x04, 0xffff0000 }, | ||
| 808 | { 0x000ed8, 1, 0x04, 0xffff0000 }, | ||
| 809 | { 0x000ee8, 1, 0x04, 0xffff0000 }, | ||
| 810 | { 0x000ef8, 1, 0x04, 0xffff0000 }, | ||
| 811 | { 0x000d40, 1, 0x04, 0x00000000 }, | ||
| 812 | { 0x000d48, 1, 0x04, 0x00000000 }, | ||
| 813 | { 0x000d50, 1, 0x04, 0x00000000 }, | ||
| 814 | { 0x000d58, 1, 0x04, 0x00000000 }, | ||
| 815 | { 0x000d44, 1, 0x04, 0x00000000 }, | ||
| 816 | { 0x000d4c, 1, 0x04, 0x00000000 }, | ||
| 817 | { 0x000d54, 1, 0x04, 0x00000000 }, | ||
| 818 | { 0x000d5c, 1, 0x04, 0x00000000 }, | ||
| 819 | { 0x001e00, 1, 0x04, 0x00000001 }, | ||
| 820 | { 0x001e20, 1, 0x04, 0x00000001 }, | ||
| 821 | { 0x001e40, 1, 0x04, 0x00000001 }, | ||
| 822 | { 0x001e60, 1, 0x04, 0x00000001 }, | ||
| 823 | { 0x001e80, 1, 0x04, 0x00000001 }, | ||
| 824 | { 0x001ea0, 1, 0x04, 0x00000001 }, | ||
| 825 | { 0x001ec0, 1, 0x04, 0x00000001 }, | ||
| 826 | { 0x001ee0, 1, 0x04, 0x00000001 }, | ||
| 827 | { 0x001e04, 1, 0x04, 0x00000001 }, | ||
| 828 | { 0x001e24, 1, 0x04, 0x00000001 }, | ||
| 829 | { 0x001e44, 1, 0x04, 0x00000001 }, | ||
| 830 | { 0x001e64, 1, 0x04, 0x00000001 }, | ||
| 831 | { 0x001e84, 1, 0x04, 0x00000001 }, | ||
| 832 | { 0x001ea4, 1, 0x04, 0x00000001 }, | ||
| 833 | { 0x001ec4, 1, 0x04, 0x00000001 }, | ||
| 834 | { 0x001ee4, 1, 0x04, 0x00000001 }, | ||
| 835 | { 0x001e08, 1, 0x04, 0x00000002 }, | ||
| 836 | { 0x001e28, 1, 0x04, 0x00000002 }, | ||
| 837 | { 0x001e48, 1, 0x04, 0x00000002 }, | ||
| 838 | { 0x001e68, 1, 0x04, 0x00000002 }, | ||
| 839 | { 0x001e88, 1, 0x04, 0x00000002 }, | ||
| 840 | { 0x001ea8, 1, 0x04, 0x00000002 }, | ||
| 841 | { 0x001ec8, 1, 0x04, 0x00000002 }, | ||
| 842 | { 0x001ee8, 1, 0x04, 0x00000002 }, | ||
| 843 | { 0x001e0c, 1, 0x04, 0x00000001 }, | ||
| 844 | { 0x001e2c, 1, 0x04, 0x00000001 }, | ||
| 845 | { 0x001e4c, 1, 0x04, 0x00000001 }, | ||
| 846 | { 0x001e6c, 1, 0x04, 0x00000001 }, | ||
| 847 | { 0x001e8c, 1, 0x04, 0x00000001 }, | ||
| 848 | { 0x001eac, 1, 0x04, 0x00000001 }, | ||
| 849 | { 0x001ecc, 1, 0x04, 0x00000001 }, | ||
| 850 | { 0x001eec, 1, 0x04, 0x00000001 }, | ||
| 851 | { 0x001e10, 1, 0x04, 0x00000001 }, | ||
| 852 | { 0x001e30, 1, 0x04, 0x00000001 }, | ||
| 853 | { 0x001e50, 1, 0x04, 0x00000001 }, | ||
| 854 | { 0x001e70, 1, 0x04, 0x00000001 }, | ||
| 855 | { 0x001e90, 1, 0x04, 0x00000001 }, | ||
| 856 | { 0x001eb0, 1, 0x04, 0x00000001 }, | ||
| 857 | { 0x001ed0, 1, 0x04, 0x00000001 }, | ||
| 858 | { 0x001ef0, 1, 0x04, 0x00000001 }, | ||
| 859 | { 0x001e14, 1, 0x04, 0x00000002 }, | ||
| 860 | { 0x001e34, 1, 0x04, 0x00000002 }, | ||
| 861 | { 0x001e54, 1, 0x04, 0x00000002 }, | ||
| 862 | { 0x001e74, 1, 0x04, 0x00000002 }, | ||
| 863 | { 0x001e94, 1, 0x04, 0x00000002 }, | ||
| 864 | { 0x001eb4, 1, 0x04, 0x00000002 }, | ||
| 865 | { 0x001ed4, 1, 0x04, 0x00000002 }, | ||
| 866 | { 0x001ef4, 1, 0x04, 0x00000002 }, | ||
| 867 | { 0x001e18, 1, 0x04, 0x00000001 }, | ||
| 868 | { 0x001e38, 1, 0x04, 0x00000001 }, | ||
| 869 | { 0x001e58, 1, 0x04, 0x00000001 }, | ||
| 870 | { 0x001e78, 1, 0x04, 0x00000001 }, | ||
| 871 | { 0x001e98, 1, 0x04, 0x00000001 }, | ||
| 872 | { 0x001eb8, 1, 0x04, 0x00000001 }, | ||
| 873 | { 0x001ed8, 1, 0x04, 0x00000001 }, | ||
| 874 | { 0x001ef8, 1, 0x04, 0x00000001 }, | ||
| 875 | { 0x003400, 128, 0x04, 0x00000000 }, | ||
| 876 | { 0x00030c, 1, 0x04, 0x00000001 }, | ||
| 877 | { 0x001944, 1, 0x04, 0x00000000 }, | ||
| 878 | { 0x001514, 1, 0x04, 0x00000000 }, | ||
| 879 | { 0x000d68, 1, 0x04, 0x0000ffff }, | ||
| 880 | { 0x00121c, 1, 0x04, 0x0fac6881 }, | ||
| 881 | { 0x000fac, 1, 0x04, 0x00000001 }, | ||
| 882 | { 0x001538, 1, 0x04, 0x00000001 }, | ||
| 883 | { 0x000fe0, 2, 0x04, 0x00000000 }, | ||
| 884 | { 0x000fe8, 1, 0x04, 0x00000014 }, | ||
| 885 | { 0x000fec, 1, 0x04, 0x00000040 }, | ||
| 886 | { 0x000ff0, 1, 0x04, 0x00000000 }, | ||
| 887 | { 0x00179c, 1, 0x04, 0x00000000 }, | ||
| 888 | { 0x001228, 1, 0x04, 0x00000400 }, | ||
| 889 | { 0x00122c, 1, 0x04, 0x00000300 }, | ||
| 890 | { 0x001230, 1, 0x04, 0x00010001 }, | ||
| 891 | { 0x0007f8, 1, 0x04, 0x00000000 }, | ||
| 892 | { 0x0015b4, 1, 0x04, 0x00000001 }, | ||
| 893 | { 0x0015cc, 1, 0x04, 0x00000000 }, | ||
| 894 | { 0x001534, 1, 0x04, 0x00000000 }, | ||
| 895 | { 0x000fb0, 1, 0x04, 0x00000000 }, | ||
| 896 | { 0x0015d0, 1, 0x04, 0x00000000 }, | ||
| 897 | { 0x00153c, 1, 0x04, 0x00000000 }, | ||
| 898 | { 0x0016b4, 1, 0x04, 0x00000003 }, | ||
| 899 | { 0x000fbc, 4, 0x04, 0x0000ffff }, | ||
| 900 | { 0x000df8, 2, 0x04, 0x00000000 }, | ||
| 901 | { 0x001948, 1, 0x04, 0x00000000 }, | ||
| 902 | { 0x001970, 1, 0x04, 0x00000001 }, | ||
| 903 | { 0x00161c, 1, 0x04, 0x000009f0 }, | ||
| 904 | { 0x000dcc, 1, 0x04, 0x00000010 }, | ||
| 905 | { 0x00163c, 1, 0x04, 0x00000000 }, | ||
| 906 | { 0x0015e4, 1, 0x04, 0x00000000 }, | ||
| 907 | { 0x001160, 32, 0x04, 0x25e00040 }, | ||
| 908 | { 0x001880, 32, 0x04, 0x00000000 }, | ||
| 909 | { 0x000f84, 2, 0x04, 0x00000000 }, | ||
| 910 | { 0x0017c8, 2, 0x04, 0x00000000 }, | ||
| 911 | { 0x0017d0, 1, 0x04, 0x000000ff }, | ||
| 912 | { 0x0017d4, 1, 0x04, 0xffffffff }, | ||
| 913 | { 0x0017d8, 1, 0x04, 0x00000002 }, | ||
| 914 | { 0x0017dc, 1, 0x04, 0x00000000 }, | ||
| 915 | { 0x0015f4, 2, 0x04, 0x00000000 }, | ||
| 916 | { 0x001434, 2, 0x04, 0x00000000 }, | ||
| 917 | { 0x000d74, 1, 0x04, 0x00000000 }, | ||
| 918 | { 0x000dec, 1, 0x04, 0x00000001 }, | ||
| 919 | { 0x0013a4, 1, 0x04, 0x00000000 }, | ||
| 920 | { 0x001318, 1, 0x04, 0x00000001 }, | ||
| 921 | { 0x001644, 1, 0x04, 0x00000000 }, | ||
| 922 | { 0x000748, 1, 0x04, 0x00000000 }, | ||
| 923 | { 0x000de8, 1, 0x04, 0x00000000 }, | ||
| 924 | { 0x001648, 1, 0x04, 0x00000000 }, | ||
| 925 | { 0x0012a4, 1, 0x04, 0x00000000 }, | ||
| 926 | { 0x001120, 4, 0x04, 0x00000000 }, | ||
| 927 | { 0x001118, 1, 0x04, 0x00000000 }, | ||
| 928 | { 0x00164c, 1, 0x04, 0x00000000 }, | ||
| 929 | { 0x001658, 1, 0x04, 0x00000000 }, | ||
| 930 | { 0x001910, 1, 0x04, 0x00000290 }, | ||
| 931 | { 0x001518, 1, 0x04, 0x00000000 }, | ||
| 932 | { 0x00165c, 1, 0x04, 0x00000001 }, | ||
| 933 | { 0x001520, 1, 0x04, 0x00000000 }, | ||
| 934 | { 0x001604, 1, 0x04, 0x00000000 }, | ||
| 935 | { 0x001570, 1, 0x04, 0x00000000 }, | ||
| 936 | { 0x0013b0, 2, 0x04, 0x3f800000 }, | ||
| 937 | { 0x00020c, 1, 0x04, 0x00000000 }, | ||
| 938 | { 0x001670, 1, 0x04, 0x30201000 }, | ||
| 939 | { 0x001674, 1, 0x04, 0x70605040 }, | ||
| 940 | { 0x001678, 1, 0x04, 0xb8a89888 }, | ||
| 941 | { 0x00167c, 1, 0x04, 0xf8e8d8c8 }, | ||
| 942 | { 0x00166c, 1, 0x04, 0x00000000 }, | ||
| 943 | { 0x001680, 1, 0x04, 0x00ffff00 }, | ||
| 944 | { 0x0012d0, 1, 0x04, 0x00000003 }, | ||
| 945 | { 0x0012d4, 1, 0x04, 0x00000002 }, | ||
| 946 | { 0x001684, 2, 0x04, 0x00000000 }, | ||
| 947 | { 0x000dac, 2, 0x04, 0x00001b02 }, | ||
| 948 | { 0x000db4, 1, 0x04, 0x00000000 }, | ||
| 949 | { 0x00168c, 1, 0x04, 0x00000000 }, | ||
| 950 | { 0x0015bc, 1, 0x04, 0x00000000 }, | ||
| 951 | { 0x00156c, 1, 0x04, 0x00000000 }, | ||
| 952 | { 0x00187c, 1, 0x04, 0x00000000 }, | ||
| 953 | { 0x001110, 1, 0x04, 0x00000001 }, | ||
| 954 | { 0x000dc0, 3, 0x04, 0x00000000 }, | ||
| 955 | { 0x001234, 1, 0x04, 0x00000000 }, | ||
| 956 | { 0x001690, 1, 0x04, 0x00000000 }, | ||
| 957 | { 0x0012ac, 1, 0x04, 0x00000001 }, | ||
| 958 | { 0x0002c4, 1, 0x04, 0x00000000 }, | ||
| 959 | { 0x000790, 5, 0x04, 0x00000000 }, | ||
| 960 | { 0x00077c, 1, 0x04, 0x00000000 }, | ||
| 961 | { 0x001000, 1, 0x04, 0x00000010 }, | ||
| 962 | { 0x0010fc, 1, 0x04, 0x00000000 }, | ||
| 963 | { 0x001290, 1, 0x04, 0x00000000 }, | ||
| 964 | { 0x000218, 1, 0x04, 0x00000010 }, | ||
| 965 | { 0x0012d8, 1, 0x04, 0x00000000 }, | ||
| 966 | { 0x0012dc, 1, 0x04, 0x00000010 }, | ||
| 967 | { 0x000d94, 1, 0x04, 0x00000001 }, | ||
| 968 | { 0x00155c, 2, 0x04, 0x00000000 }, | ||
| 969 | { 0x001564, 1, 0x04, 0x00000fff }, | ||
| 970 | { 0x001574, 2, 0x04, 0x00000000 }, | ||
| 971 | { 0x00157c, 1, 0x04, 0x000fffff }, | ||
| 972 | { 0x001354, 1, 0x04, 0x00000000 }, | ||
| 973 | { 0x001610, 1, 0x04, 0x00000012 }, | ||
| 974 | { 0x001608, 2, 0x04, 0x00000000 }, | ||
| 975 | { 0x00260c, 1, 0x04, 0x00000000 }, | ||
| 976 | { 0x0007ac, 1, 0x04, 0x00000000 }, | ||
| 977 | { 0x00162c, 1, 0x04, 0x00000003 }, | ||
| 978 | { 0x000210, 1, 0x04, 0x00000000 }, | ||
| 979 | { 0x000320, 1, 0x04, 0x00000000 }, | ||
| 980 | { 0x000324, 6, 0x04, 0x3f800000 }, | ||
| 981 | { 0x000750, 1, 0x04, 0x00000000 }, | ||
| 982 | { 0x000760, 1, 0x04, 0x39291909 }, | ||
| 983 | { 0x000764, 1, 0x04, 0x79695949 }, | ||
| 984 | { 0x000768, 1, 0x04, 0xb9a99989 }, | ||
| 985 | { 0x00076c, 1, 0x04, 0xf9e9d9c9 }, | ||
| 986 | { 0x000770, 1, 0x04, 0x30201000 }, | ||
| 987 | { 0x000774, 1, 0x04, 0x70605040 }, | ||
| 988 | { 0x000778, 1, 0x04, 0x00009080 }, | ||
| 989 | { 0x000780, 1, 0x04, 0x39291909 }, | ||
| 990 | { 0x000784, 1, 0x04, 0x79695949 }, | ||
| 991 | { 0x000788, 1, 0x04, 0xb9a99989 }, | ||
| 992 | { 0x00078c, 1, 0x04, 0xf9e9d9c9 }, | ||
| 993 | { 0x0007d0, 1, 0x04, 0x30201000 }, | ||
| 994 | { 0x0007d4, 1, 0x04, 0x70605040 }, | ||
| 995 | { 0x0007d8, 1, 0x04, 0x00009080 }, | ||
| 996 | { 0x00037c, 1, 0x04, 0x00000001 }, | ||
| 997 | { 0x000740, 2, 0x04, 0x00000000 }, | ||
| 998 | { 0x002600, 1, 0x04, 0x00000000 }, | ||
| 999 | { 0x001918, 1, 0x04, 0x00000000 }, | ||
| 1000 | { 0x00191c, 1, 0x04, 0x00000900 }, | ||
| 1001 | { 0x001920, 1, 0x04, 0x00000405 }, | ||
| 1002 | { 0x001308, 1, 0x04, 0x00000001 }, | ||
| 1003 | { 0x001924, 1, 0x04, 0x00000000 }, | ||
| 1004 | { 0x0013ac, 1, 0x04, 0x00000000 }, | ||
| 1005 | { 0x00192c, 1, 0x04, 0x00000001 }, | ||
| 1006 | { 0x00193c, 1, 0x04, 0x00002c1c }, | ||
| 1007 | { 0x000d7c, 1, 0x04, 0x00000000 }, | ||
| 1008 | { 0x000f8c, 1, 0x04, 0x00000000 }, | ||
| 1009 | { 0x0002c0, 1, 0x04, 0x00000001 }, | ||
| 1010 | { 0x001510, 1, 0x04, 0x00000000 }, | ||
| 1011 | { 0x001940, 1, 0x04, 0x00000000 }, | ||
| 1012 | { 0x000ff4, 2, 0x04, 0x00000000 }, | ||
| 1013 | { 0x00194c, 2, 0x04, 0x00000000 }, | ||
| 1014 | { 0x001968, 1, 0x04, 0x00000000 }, | ||
| 1015 | { 0x001590, 1, 0x04, 0x0000003f }, | ||
| 1016 | { 0x0007e8, 4, 0x04, 0x00000000 }, | ||
| 1017 | { 0x00196c, 1, 0x04, 0x00000011 }, | ||
| 1018 | { 0x0002e4, 1, 0x04, 0x0000b001 }, | ||
| 1019 | { 0x00036c, 2, 0x04, 0x00000000 }, | ||
| 1020 | { 0x00197c, 1, 0x04, 0x00000000 }, | ||
| 1021 | { 0x000fcc, 2, 0x04, 0x00000000 }, | ||
| 1022 | { 0x0002d8, 1, 0x04, 0x00000040 }, | ||
| 1023 | { 0x001980, 1, 0x04, 0x00000080 }, | ||
| 1024 | { 0x001504, 1, 0x04, 0x00000080 }, | ||
| 1025 | { 0x001984, 1, 0x04, 0x00000000 }, | ||
| 1026 | { 0x000300, 1, 0x04, 0x00000001 }, | ||
| 1027 | { 0x0013a8, 1, 0x04, 0x00000000 }, | ||
| 1028 | { 0x0012ec, 1, 0x04, 0x00000000 }, | ||
| 1029 | { 0x001310, 1, 0x04, 0x00000000 }, | ||
| 1030 | { 0x001314, 1, 0x04, 0x00000001 }, | ||
| 1031 | { 0x001380, 1, 0x04, 0x00000000 }, | ||
| 1032 | { 0x001384, 4, 0x04, 0x00000001 }, | ||
| 1033 | { 0x001394, 1, 0x04, 0x00000000 }, | ||
| 1034 | { 0x00139c, 1, 0x04, 0x00000000 }, | ||
| 1035 | { 0x001398, 1, 0x04, 0x00000000 }, | ||
| 1036 | { 0x001594, 1, 0x04, 0x00000000 }, | ||
| 1037 | { 0x001598, 4, 0x04, 0x00000001 }, | ||
| 1038 | { 0x000f54, 3, 0x04, 0x00000000 }, | ||
| 1039 | { 0x0019bc, 1, 0x04, 0x00000000 }, | ||
| 1040 | { 0x000f9c, 2, 0x04, 0x00000000 }, | ||
| 1041 | { 0x0012cc, 1, 0x04, 0x00000000 }, | ||
| 1042 | { 0x0012e8, 1, 0x04, 0x00000000 }, | ||
| 1043 | { 0x00130c, 1, 0x04, 0x00000001 }, | ||
| 1044 | { 0x001360, 8, 0x04, 0x00000000 }, | ||
| 1045 | { 0x00133c, 2, 0x04, 0x00000001 }, | ||
| 1046 | { 0x001344, 1, 0x04, 0x00000002 }, | ||
| 1047 | { 0x001348, 2, 0x04, 0x00000001 }, | ||
| 1048 | { 0x001350, 1, 0x04, 0x00000002 }, | ||
| 1049 | { 0x001358, 1, 0x04, 0x00000001 }, | ||
| 1050 | { 0x0012e4, 1, 0x04, 0x00000000 }, | ||
| 1051 | { 0x00131c, 4, 0x04, 0x00000000 }, | ||
| 1052 | { 0x0019c0, 1, 0x04, 0x00000000 }, | ||
| 1053 | { 0x001140, 1, 0x04, 0x00000000 }, | ||
| 1054 | { 0x0019c4, 1, 0x04, 0x00000000 }, | ||
| 1055 | { 0x0019c8, 1, 0x04, 0x00001500 }, | ||
| 1056 | { 0x00135c, 1, 0x04, 0x00000000 }, | ||
| 1057 | { 0x000f90, 1, 0x04, 0x00000000 }, | ||
| 1058 | { 0x0019e0, 8, 0x04, 0x00000001 }, | ||
| 1059 | { 0x0019cc, 1, 0x04, 0x00000001 }, | ||
| 1060 | { 0x0015b8, 1, 0x04, 0x00000000 }, | ||
| 1061 | { 0x001a00, 1, 0x04, 0x00001111 }, | ||
| 1062 | { 0x001a04, 7, 0x04, 0x00000000 }, | ||
| 1063 | { 0x000d6c, 2, 0x04, 0xffff0000 }, | ||
| 1064 | { 0x0010f8, 1, 0x04, 0x00001010 }, | ||
| 1065 | { 0x000d80, 5, 0x04, 0x00000000 }, | ||
| 1066 | { 0x000da0, 1, 0x04, 0x00000000 }, | ||
| 1067 | { 0x0007a4, 2, 0x04, 0x00000000 }, | ||
| 1068 | { 0x001508, 1, 0x04, 0x80000000 }, | ||
| 1069 | { 0x00150c, 1, 0x04, 0x40000000 }, | ||
| 1070 | { 0x001668, 1, 0x04, 0x00000000 }, | ||
| 1071 | { 0x000318, 2, 0x04, 0x00000008 }, | ||
| 1072 | { 0x000d9c, 1, 0x04, 0x00000001 }, | ||
| 1073 | { 0x000ddc, 1, 0x04, 0x00000002 }, | ||
| 1074 | { 0x000374, 1, 0x04, 0x00000000 }, | ||
| 1075 | { 0x000378, 1, 0x04, 0x00000020 }, | ||
| 1076 | { 0x0007dc, 1, 0x04, 0x00000000 }, | ||
| 1077 | { 0x00074c, 1, 0x04, 0x00000055 }, | ||
| 1078 | { 0x001420, 1, 0x04, 0x00000003 }, | ||
| 1079 | { 0x0017bc, 2, 0x04, 0x00000000 }, | ||
| 1080 | { 0x0017c4, 1, 0x04, 0x00000001 }, | ||
| 1081 | { 0x001008, 1, 0x04, 0x00000008 }, | ||
| 1082 | { 0x00100c, 1, 0x04, 0x00000040 }, | ||
| 1083 | { 0x001010, 1, 0x04, 0x0000012c }, | ||
| 1084 | { 0x000d60, 1, 0x04, 0x00000040 }, | ||
| 1085 | { 0x00075c, 1, 0x04, 0x00000003 }, | ||
| 1086 | { 0x001018, 1, 0x04, 0x00000020 }, | ||
| 1087 | { 0x00101c, 1, 0x04, 0x00000001 }, | ||
| 1088 | { 0x001020, 1, 0x04, 0x00000020 }, | ||
| 1089 | { 0x001024, 1, 0x04, 0x00000001 }, | ||
| 1090 | { 0x001444, 3, 0x04, 0x00000000 }, | ||
| 1091 | { 0x000360, 1, 0x04, 0x20164010 }, | ||
| 1092 | { 0x000364, 1, 0x04, 0x00000020 }, | ||
| 1093 | { 0x000368, 1, 0x04, 0x00000000 }, | ||
| 1094 | { 0x000de4, 1, 0x04, 0x00000000 }, | ||
| 1095 | { 0x000204, 1, 0x04, 0x00000006 }, | ||
| 1096 | { 0x000208, 1, 0x04, 0x00000000 }, | ||
| 1097 | { 0x0002cc, 2, 0x04, 0x003fffff }, | ||
| 1098 | { 0x001220, 1, 0x04, 0x00000005 }, | ||
| 1099 | { 0x000fdc, 1, 0x04, 0x00000000 }, | ||
| 1100 | { 0x000f98, 1, 0x04, 0x00400008 }, | ||
| 1101 | { 0x001284, 1, 0x04, 0x08000080 }, | ||
| 1102 | { 0x001450, 1, 0x04, 0x00400008 }, | ||
| 1103 | { 0x001454, 1, 0x04, 0x08000080 }, | ||
| 1104 | { 0x000214, 1, 0x04, 0x00000000 }, | ||
| 1105 | {} | ||
| 1106 | }; | ||
| 1107 | |||
| 1108 | static struct nvc0_graph_init | ||
| 1109 | nv108_grctx_init_unk40xx[] = { | ||
| 1110 | { 0x404004, 8, 0x04, 0x00000000 }, | ||
| 1111 | { 0x404024, 1, 0x04, 0x0000e000 }, | ||
| 1112 | { 0x404028, 8, 0x04, 0x00000000 }, | ||
| 1113 | { 0x4040a8, 8, 0x04, 0x00000000 }, | ||
| 1114 | { 0x4040c8, 1, 0x04, 0xf800008f }, | ||
| 1115 | { 0x4040d0, 6, 0x04, 0x00000000 }, | ||
| 1116 | { 0x4040e8, 1, 0x04, 0x00001000 }, | ||
| 1117 | { 0x4040f8, 1, 0x04, 0x00000000 }, | ||
| 1118 | { 0x404100, 10, 0x04, 0x00000000 }, | ||
| 1119 | { 0x404130, 2, 0x04, 0x00000000 }, | ||
| 1120 | { 0x404138, 1, 0x04, 0x20000040 }, | ||
| 1121 | { 0x404150, 1, 0x04, 0x0000002e }, | ||
| 1122 | { 0x404154, 1, 0x04, 0x00000400 }, | ||
| 1123 | { 0x404158, 1, 0x04, 0x00000200 }, | ||
| 1124 | { 0x404164, 1, 0x04, 0x00000055 }, | ||
| 1125 | { 0x40417c, 2, 0x04, 0x00000000 }, | ||
| 1126 | { 0x404194, 1, 0x04, 0x01000700 }, | ||
| 1127 | { 0x4041a0, 4, 0x04, 0x00000000 }, | ||
| 1128 | { 0x404200, 1, 0x04, 0x0000a197 }, | ||
| 1129 | { 0x404204, 1, 0x04, 0x0000a1c0 }, | ||
| 1130 | { 0x404208, 1, 0x04, 0x0000a140 }, | ||
| 1131 | { 0x40420c, 1, 0x04, 0x0000902d }, | ||
| 1132 | {} | ||
| 1133 | }; | ||
| 1134 | |||
| 1135 | static struct nvc0_graph_init | ||
| 1136 | nv108_grctx_init_unk58xx[] = { | ||
| 1137 | { 0x405800, 1, 0x04, 0x0f8000bf }, | ||
| 1138 | { 0x405830, 1, 0x04, 0x02180648 }, | ||
| 1139 | { 0x405834, 1, 0x04, 0x08000000 }, | ||
| 1140 | { 0x405838, 1, 0x04, 0x00000000 }, | ||
| 1141 | { 0x405854, 1, 0x04, 0x00000000 }, | ||
| 1142 | { 0x405870, 4, 0x04, 0x00000001 }, | ||
| 1143 | { 0x405a00, 2, 0x04, 0x00000000 }, | ||
| 1144 | { 0x405a18, 1, 0x04, 0x00000000 }, | ||
| 1145 | { 0x405a1c, 1, 0x04, 0x000000ff }, | ||
| 1146 | {} | ||
| 1147 | }; | ||
| 1148 | |||
| 1149 | static struct nvc0_graph_init | ||
| 1150 | nv108_grctx_init_unk64xx[] = { | ||
| 1151 | { 0x4064a8, 1, 0x04, 0x00000000 }, | ||
| 1152 | { 0x4064ac, 1, 0x04, 0x00003fff }, | ||
| 1153 | { 0x4064b0, 3, 0x04, 0x00000000 }, | ||
| 1154 | { 0x4064c0, 1, 0x04, 0x802000f0 }, | ||
| 1155 | { 0x4064c4, 1, 0x04, 0x0192ffff }, | ||
| 1156 | { 0x4064c8, 1, 0x04, 0x00c20200 }, | ||
| 1157 | { 0x4064cc, 9, 0x04, 0x00000000 }, | ||
| 1158 | { 0x4064fc, 1, 0x04, 0x0000022a }, | ||
| 1159 | {} | ||
| 1160 | }; | ||
| 1161 | |||
| 1162 | static struct nvc0_graph_init | ||
| 1163 | nv108_grctx_init_unk78xx[] = { | ||
| 1164 | { 0x407804, 1, 0x04, 0x00000063 }, | ||
| 1165 | { 0x40780c, 1, 0x04, 0x0a418820 }, | ||
| 1166 | { 0x407810, 1, 0x04, 0x062080e6 }, | ||
| 1167 | { 0x407814, 1, 0x04, 0x020398a4 }, | ||
| 1168 | { 0x407818, 1, 0x04, 0x0e629062 }, | ||
| 1169 | { 0x40781c, 1, 0x04, 0x0a418820 }, | ||
| 1170 | { 0x407820, 1, 0x04, 0x000000e6 }, | ||
| 1171 | { 0x4078bc, 1, 0x04, 0x00000103 }, | ||
| 1172 | {} | ||
| 1173 | }; | ||
| 1174 | |||
| 1175 | static struct nvc0_graph_init | ||
| 1176 | nv108_grctx_init_unk88xx[] = { | ||
| 1177 | { 0x408800, 1, 0x04, 0x32802a3c }, | ||
| 1178 | { 0x408804, 1, 0x04, 0x00000040 }, | ||
| 1179 | { 0x408808, 1, 0x04, 0x1003e005 }, | ||
| 1180 | { 0x408840, 1, 0x04, 0x0000000b }, | ||
| 1181 | { 0x408900, 1, 0x04, 0xb080b801 }, | ||
| 1182 | { 0x408904, 1, 0x04, 0x62000001 }, | ||
| 1183 | { 0x408908, 1, 0x04, 0x02c8102f }, | ||
| 1184 | { 0x408980, 1, 0x04, 0x0000011d }, | ||
| 1185 | {} | ||
| 1186 | }; | ||
| 1187 | |||
| 1188 | static struct nvc0_graph_init | ||
| 1189 | nv108_grctx_init_gpc_0[] = { | ||
| 1190 | { 0x418380, 1, 0x04, 0x00000016 }, | ||
| 1191 | { 0x418400, 1, 0x04, 0x38005e00 }, | ||
| 1192 | { 0x418404, 1, 0x04, 0x71e0ffff }, | ||
| 1193 | { 0x41840c, 1, 0x04, 0x00001008 }, | ||
| 1194 | { 0x418410, 1, 0x04, 0x0fff0fff }, | ||
| 1195 | { 0x418414, 1, 0x04, 0x02200fff }, | ||
| 1196 | { 0x418450, 6, 0x04, 0x00000000 }, | ||
| 1197 | { 0x418468, 1, 0x04, 0x00000001 }, | ||
| 1198 | { 0x41846c, 2, 0x04, 0x00000000 }, | ||
| 1199 | { 0x418600, 1, 0x04, 0x0000007f }, | ||
| 1200 | { 0x418684, 1, 0x04, 0x0000001f }, | ||
| 1201 | { 0x418700, 1, 0x04, 0x00000002 }, | ||
| 1202 | { 0x418704, 2, 0x04, 0x00000080 }, | ||
| 1203 | { 0x41870c, 2, 0x04, 0x00000000 }, | ||
| 1204 | { 0x418800, 1, 0x04, 0x7006863a }, | ||
| 1205 | { 0x418808, 1, 0x04, 0x00000000 }, | ||
| 1206 | { 0x41880c, 1, 0x04, 0x00000030 }, | ||
| 1207 | { 0x418810, 1, 0x04, 0x00000000 }, | ||
| 1208 | { 0x418828, 1, 0x04, 0x00000044 }, | ||
| 1209 | { 0x418830, 1, 0x04, 0x10000001 }, | ||
| 1210 | { 0x4188d8, 1, 0x04, 0x00000008 }, | ||
| 1211 | { 0x4188e0, 1, 0x04, 0x01000000 }, | ||
| 1212 | { 0x4188e8, 5, 0x04, 0x00000000 }, | ||
| 1213 | { 0x4188fc, 1, 0x04, 0x20100058 }, | ||
| 1214 | { 0x41891c, 1, 0x04, 0x00ff00ff }, | ||
| 1215 | { 0x418924, 1, 0x04, 0x00000000 }, | ||
| 1216 | { 0x418928, 1, 0x04, 0x00ffff00 }, | ||
| 1217 | { 0x41892c, 1, 0x04, 0x0000ff00 }, | ||
| 1218 | { 0x418b00, 1, 0x04, 0x0000001e }, | ||
| 1219 | { 0x418b08, 1, 0x04, 0x0a418820 }, | ||
| 1220 | { 0x418b0c, 1, 0x04, 0x062080e6 }, | ||
| 1221 | { 0x418b10, 1, 0x04, 0x020398a4 }, | ||
| 1222 | { 0x418b14, 1, 0x04, 0x0e629062 }, | ||
| 1223 | { 0x418b18, 1, 0x04, 0x0a418820 }, | ||
| 1224 | { 0x418b1c, 1, 0x04, 0x000000e6 }, | ||
| 1225 | { 0x418bb8, 1, 0x04, 0x00000103 }, | ||
| 1226 | { 0x418c08, 1, 0x04, 0x00000001 }, | ||
| 1227 | { 0x418c10, 8, 0x04, 0x00000000 }, | ||
| 1228 | { 0x418c40, 1, 0x04, 0xffffffff }, | ||
| 1229 | { 0x418c6c, 1, 0x04, 0x00000001 }, | ||
| 1230 | { 0x418c80, 1, 0x04, 0x2020000c }, | ||
| 1231 | { 0x418c8c, 1, 0x04, 0x00000001 }, | ||
| 1232 | { 0x418d24, 1, 0x04, 0x00000000 }, | ||
| 1233 | { 0x419000, 1, 0x04, 0x00000780 }, | ||
| 1234 | { 0x419004, 2, 0x04, 0x00000000 }, | ||
| 1235 | { 0x419014, 1, 0x04, 0x00000004 }, | ||
| 1236 | {} | ||
| 1237 | }; | ||
| 1238 | |||
| 1239 | static struct nvc0_graph_init | ||
| 1240 | nv108_grctx_init_tpc[] = { | ||
| 1241 | { 0x419848, 1, 0x04, 0x00000000 }, | ||
| 1242 | { 0x419864, 1, 0x04, 0x00000129 }, | ||
| 1243 | { 0x419888, 1, 0x04, 0x00000000 }, | ||
| 1244 | { 0x419a00, 1, 0x04, 0x000100f0 }, | ||
| 1245 | { 0x419a04, 1, 0x04, 0x00000001 }, | ||
| 1246 | { 0x419a08, 1, 0x04, 0x00000421 }, | ||
| 1247 | { 0x419a0c, 1, 0x04, 0x00120000 }, | ||
| 1248 | { 0x419a10, 1, 0x04, 0x00000000 }, | ||
| 1249 | { 0x419a14, 1, 0x04, 0x00000200 }, | ||
| 1250 | { 0x419a1c, 1, 0x04, 0x0000c000 }, | ||
| 1251 | { 0x419a20, 1, 0x04, 0x00000800 }, | ||
| 1252 | { 0x419a30, 1, 0x04, 0x00000001 }, | ||
| 1253 | { 0x419ac4, 1, 0x04, 0x0037f440 }, | ||
| 1254 | { 0x419c00, 1, 0x04, 0x0000001a }, | ||
| 1255 | { 0x419c04, 1, 0x04, 0x80000006 }, | ||
| 1256 | { 0x419c08, 1, 0x04, 0x00000002 }, | ||
| 1257 | { 0x419c20, 1, 0x04, 0x00000000 }, | ||
| 1258 | { 0x419c24, 1, 0x04, 0x00084210 }, | ||
| 1259 | { 0x419c28, 1, 0x04, 0x3efbefbe }, | ||
| 1260 | { 0x419ce8, 1, 0x04, 0x00000000 }, | ||
| 1261 | { 0x419cf4, 1, 0x04, 0x00000203 }, | ||
| 1262 | { 0x419e04, 1, 0x04, 0x00000000 }, | ||
| 1263 | { 0x419e08, 1, 0x04, 0x0000001d }, | ||
| 1264 | { 0x419e0c, 1, 0x04, 0x00000000 }, | ||
| 1265 | { 0x419e10, 1, 0x04, 0x00001c02 }, | ||
| 1266 | { 0x419e44, 1, 0x04, 0x0013eff2 }, | ||
| 1267 | { 0x419e48, 1, 0x04, 0x00000000 }, | ||
| 1268 | { 0x419e4c, 1, 0x04, 0x0000007f }, | ||
| 1269 | { 0x419e50, 2, 0x04, 0x00000000 }, | ||
| 1270 | { 0x419e58, 1, 0x04, 0x00000001 }, | ||
| 1271 | { 0x419e5c, 3, 0x04, 0x00000000 }, | ||
| 1272 | { 0x419e68, 1, 0x04, 0x00000002 }, | ||
| 1273 | { 0x419e6c, 12, 0x04, 0x00000000 }, | ||
| 1274 | { 0x419eac, 1, 0x04, 0x00001f8f }, | ||
| 1275 | { 0x419eb0, 1, 0x04, 0x0db00da0 }, | ||
| 1276 | { 0x419eb8, 1, 0x04, 0x00000000 }, | ||
| 1277 | { 0x419ec8, 1, 0x04, 0x0001304f }, | ||
| 1278 | { 0x419f30, 4, 0x04, 0x00000000 }, | ||
| 1279 | { 0x419f40, 1, 0x04, 0x00000018 }, | ||
| 1280 | { 0x419f44, 3, 0x04, 0x00000000 }, | ||
| 1281 | { 0x419f58, 1, 0x04, 0x00000020 }, | ||
| 1282 | { 0x419f70, 1, 0x04, 0x00000000 }, | ||
| 1283 | { 0x419f78, 1, 0x04, 0x000001eb }, | ||
| 1284 | { 0x419f7c, 1, 0x04, 0x00000404 }, | ||
| 1285 | {} | ||
| 1286 | }; | ||
| 1287 | |||
| 1288 | static struct nvc0_graph_init | ||
| 1289 | nv108_grctx_init_unk[] = { | ||
| 1290 | { 0x41be24, 1, 0x04, 0x00000006 }, | ||
| 1291 | { 0x41bec0, 1, 0x04, 0x10000000 }, | ||
| 1292 | { 0x41bec4, 1, 0x04, 0x00037f7f }, | ||
| 1293 | { 0x41bee4, 1, 0x04, 0x00000000 }, | ||
| 1294 | { 0x41bef0, 1, 0x04, 0x000003ff }, | ||
| 1295 | { 0x41bf00, 1, 0x04, 0x0a418820 }, | ||
| 1296 | { 0x41bf04, 1, 0x04, 0x062080e6 }, | ||
| 1297 | { 0x41bf08, 1, 0x04, 0x020398a4 }, | ||
| 1298 | { 0x41bf0c, 1, 0x04, 0x0e629062 }, | ||
| 1299 | { 0x41bf10, 1, 0x04, 0x0a418820 }, | ||
| 1300 | { 0x41bf14, 1, 0x04, 0x000000e6 }, | ||
| 1301 | { 0x41bfd0, 1, 0x04, 0x00900103 }, | ||
| 1302 | { 0x41bfe0, 1, 0x04, 0x00400001 }, | ||
| 1303 | { 0x41bfe4, 1, 0x04, 0x00000000 }, | ||
| 1304 | {} | ||
| 1305 | }; | ||
| 1306 | |||
| 1307 | static void | ||
| 1308 | nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | ||
| 1309 | { | ||
| 1310 | u32 magic[GPC_MAX][2]; | ||
| 1311 | u32 offset; | ||
| 1312 | int gpc; | ||
| 1313 | |||
| 1314 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
| 1315 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
| 1316 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | ||
| 1317 | mmio_list(0x40800c, 0x00000000, 8, 1); | ||
| 1318 | mmio_list(0x408010, 0x80000000, 0, 0); | ||
| 1319 | mmio_list(0x419004, 0x00000000, 8, 1); | ||
| 1320 | mmio_list(0x419008, 0x00000000, 0, 0); | ||
| 1321 | mmio_list(0x408004, 0x00000000, 8, 0); | ||
| 1322 | mmio_list(0x408008, 0x80000030, 0, 0); | ||
| 1323 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
| 1324 | mmio_list(0x41880c, 0x80000030, 0, 0); | ||
| 1325 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
| 1326 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
| 1327 | |||
| 1328 | mmio_list(0x405830, 0x02180648, 0, 0); | ||
| 1329 | mmio_list(0x4064c4, 0x0192ffff, 0, 0); | ||
| 1330 | |||
| 1331 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
| 1332 | u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; | ||
| 1333 | u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; | ||
| 1334 | magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; | ||
| 1335 | magic[gpc][1] = 0x00000000 | (magic1 << 16); | ||
| 1336 | offset += 0x0324 * priv->tpc_nr[gpc]; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
| 1340 | mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); | ||
| 1341 | mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); | ||
| 1342 | offset += 0x07ff * priv->tpc_nr[gpc]; | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | mmio_list(0x17e91c, 0x0b040a0b, 0, 0); | ||
| 1346 | mmio_list(0x17e920, 0x00090d08, 0, 0); | ||
| 1347 | } | ||
| 1348 | |||
| 1349 | static struct nvc0_graph_init * | ||
| 1350 | nv108_grctx_init_hub[] = { | ||
| 1351 | nvc0_grctx_init_base, | ||
| 1352 | nv108_grctx_init_unk40xx, | ||
| 1353 | nvf0_grctx_init_unk44xx, | ||
| 1354 | nve4_grctx_init_unk46xx, | ||
| 1355 | nve4_grctx_init_unk47xx, | ||
| 1356 | nv108_grctx_init_unk58xx, | ||
| 1357 | nvf0_grctx_init_unk5bxx, | ||
| 1358 | nvf0_grctx_init_unk60xx, | ||
| 1359 | nv108_grctx_init_unk64xx, | ||
| 1360 | nv108_grctx_init_unk78xx, | ||
| 1361 | nve4_grctx_init_unk80xx, | ||
| 1362 | nv108_grctx_init_unk88xx, | ||
| 1363 | NULL | ||
| 1364 | }; | ||
| 1365 | |||
| 1366 | struct nvc0_graph_init * | ||
| 1367 | nv108_grctx_init_gpc[] = { | ||
| 1368 | nv108_grctx_init_gpc_0, | ||
| 1369 | nvc0_grctx_init_gpc_1, | ||
| 1370 | nv108_grctx_init_tpc, | ||
| 1371 | nv108_grctx_init_unk, | ||
| 1372 | NULL | ||
| 1373 | }; | ||
| 1374 | |||
| 1375 | struct nvc0_graph_init | ||
| 1376 | nv108_grctx_init_mthd_magic[] = { | ||
| 1377 | { 0x3410, 1, 0x04, 0x8e0e2006 }, | ||
| 1378 | { 0x3414, 1, 0x04, 0x00000038 }, | ||
| 1379 | {} | ||
| 1380 | }; | ||
| 1381 | |||
| 1382 | static struct nvc0_graph_mthd | ||
| 1383 | nv108_grctx_init_mthd[] = { | ||
| 1384 | { 0xa197, nv108_grctx_init_a197, }, | ||
| 1385 | { 0x902d, nvc0_grctx_init_902d, }, | ||
| 1386 | { 0x902d, nv108_grctx_init_mthd_magic, }, | ||
| 1387 | {} | ||
| 1388 | }; | ||
| 1389 | |||
| 1390 | struct nouveau_oclass * | ||
| 1391 | nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { | ||
| 1392 | .base.handle = NV_ENGCTX(GR, 0x08), | ||
| 1393 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 1394 | .ctor = nvc0_graph_context_ctor, | ||
| 1395 | .dtor = nvc0_graph_context_dtor, | ||
| 1396 | .init = _nouveau_graph_context_init, | ||
| 1397 | .fini = _nouveau_graph_context_fini, | ||
| 1398 | .rd32 = _nouveau_graph_context_rd32, | ||
| 1399 | .wr32 = _nouveau_graph_context_wr32, | ||
| 1400 | }, | ||
| 1401 | .main = nve4_grctx_generate_main, | ||
| 1402 | .mods = nv108_grctx_generate_mods, | ||
| 1403 | .unkn = nve4_grctx_generate_unkn, | ||
| 1404 | .hub = nv108_grctx_init_hub, | ||
| 1405 | .gpc = nv108_grctx_init_gpc, | ||
| 1406 | .icmd = nv108_grctx_init_icmd, | ||
| 1407 | .mthd = nv108_grctx_init_mthd, | ||
| 1408 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c index dcb2ebb8c29d..44012c3da538 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c | |||
| @@ -50,7 +50,7 @@ nvf0_grctx_init_unk40xx[] = { | |||
| 50 | {} | 50 | {} |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | static struct nvc0_graph_init | 53 | struct nvc0_graph_init |
| 54 | nvf0_grctx_init_unk44xx[] = { | 54 | nvf0_grctx_init_unk44xx[] = { |
| 55 | { 0x404404, 12, 0x04, 0x00000000 }, | 55 | { 0x404404, 12, 0x04, 0x00000000 }, |
| 56 | { 0x404438, 1, 0x04, 0x00000000 }, | 56 | { 0x404438, 1, 0x04, 0x00000000 }, |
| @@ -62,7 +62,7 @@ nvf0_grctx_init_unk44xx[] = { | |||
| 62 | {} | 62 | {} |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | static struct nvc0_graph_init | 65 | struct nvc0_graph_init |
| 66 | nvf0_grctx_init_unk5bxx[] = { | 66 | nvf0_grctx_init_unk5bxx[] = { |
| 67 | { 0x405b00, 1, 0x04, 0x00000000 }, | 67 | { 0x405b00, 1, 0x04, 0x00000000 }, |
| 68 | { 0x405b10, 1, 0x04, 0x00001000 }, | 68 | { 0x405b10, 1, 0x04, 0x00001000 }, |
| @@ -70,7 +70,7 @@ nvf0_grctx_init_unk5bxx[] = { | |||
| 70 | {} | 70 | {} |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | static struct nvc0_graph_init | 73 | struct nvc0_graph_init |
| 74 | nvf0_grctx_init_unk60xx[] = { | 74 | nvf0_grctx_init_unk60xx[] = { |
| 75 | { 0x406020, 1, 0x04, 0x034103c1 }, | 75 | { 0x406020, 1, 0x04, 0x034103c1 }, |
| 76 | { 0x406028, 4, 0x04, 0x00000001 }, | 76 | { 0x406028, 4, 0x04, 0x00000001 }, |
| @@ -286,7 +286,6 @@ nvf0_grctx_init_hub[] = { | |||
| 286 | nvf0_grctx_init_unk64xx, | 286 | nvf0_grctx_init_unk64xx, |
| 287 | nve4_grctx_init_unk80xx, | 287 | nve4_grctx_init_unk80xx, |
| 288 | nvf0_grctx_init_unk88xx, | 288 | nvf0_grctx_init_unk88xx, |
| 289 | nvd9_grctx_init_rop, | ||
| 290 | NULL | 289 | NULL |
| 291 | }; | 290 | }; |
| 292 | 291 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc index 5d24b6de16cc..e148961b8075 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc | |||
| @@ -38,7 +38,7 @@ queue_put: | |||
| 38 | cmpu b32 $r8 $r9 | 38 | cmpu b32 $r8 $r9 |
| 39 | bra ne #queue_put_next | 39 | bra ne #queue_put_next |
| 40 | mov $r15 E_CMD_OVERFLOW | 40 | mov $r15 E_CMD_OVERFLOW |
| 41 | call #error | 41 | call(error) |
| 42 | ret | 42 | ret |
| 43 | 43 | ||
| 44 | // store cmd/data on queue | 44 | // store cmd/data on queue |
| @@ -92,18 +92,16 @@ queue_get_done: | |||
| 92 | // Out: $r15 value | 92 | // Out: $r15 value |
| 93 | // | 93 | // |
| 94 | nv_rd32: | 94 | nv_rd32: |
| 95 | mov $r11 0x728 | ||
| 96 | shl b32 $r11 6 | ||
| 97 | mov b32 $r12 $r14 | 95 | mov b32 $r12 $r14 |
| 98 | bset $r12 31 // MMIO_CTRL_PENDING | 96 | bset $r12 31 // MMIO_CTRL_PENDING |
| 99 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | 97 | nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12) |
| 100 | nv_rd32_wait: | 98 | nv_rd32_wait: |
| 101 | iord $r12 I[$r11 + 0x000] | 99 | nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0) |
| 102 | xbit $r12 $r12 31 | 100 | xbit $r12 $r12 31 |
| 103 | bra ne #nv_rd32_wait | 101 | bra ne #nv_rd32_wait |
| 104 | mov $r10 6 // DONE_MMIO_RD | 102 | mov $r10 6 // DONE_MMIO_RD |
| 105 | call #wait_doneo | 103 | call(wait_doneo) |
| 106 | iord $r15 I[$r11 + 0x100] // MMIO_RDVAL | 104 | nv_iord($r15, NV_PGRAPH_FECS_MMIO_RDVAL, 0) |
| 107 | ret | 105 | ret |
| 108 | 106 | ||
| 109 | // nv_wr32 - write 32-bit value to nv register | 107 | // nv_wr32 - write 32-bit value to nv register |
| @@ -112,37 +110,17 @@ nv_rd32: | |||
| 112 | // $r15 value | 110 | // $r15 value |
| 113 | // | 111 | // |
| 114 | nv_wr32: | 112 | nv_wr32: |
| 115 | mov $r11 0x728 | 113 | nv_iowr(NV_PGRAPH_FECS_MMIO_WRVAL, 0, $r15) |
| 116 | shl b32 $r11 6 | ||
| 117 | iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL | ||
| 118 | mov b32 $r12 $r14 | 114 | mov b32 $r12 $r14 |
| 119 | bset $r12 31 // MMIO_CTRL_PENDING | 115 | bset $r12 31 // MMIO_CTRL_PENDING |
| 120 | bset $r12 30 // MMIO_CTRL_WRITE | 116 | bset $r12 30 // MMIO_CTRL_WRITE |
| 121 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | 117 | nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12) |
| 122 | nv_wr32_wait: | 118 | nv_wr32_wait: |
| 123 | iord $r12 I[$r11 + 0x000] | 119 | nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0) |
| 124 | xbit $r12 $r12 31 | 120 | xbit $r12 $r12 31 |
| 125 | bra ne #nv_wr32_wait | 121 | bra ne #nv_wr32_wait |
| 126 | ret | 122 | ret |
| 127 | 123 | ||
| 128 | // (re)set watchdog timer | ||
| 129 | // | ||
| 130 | // In : $r15 timeout | ||
| 131 | // | ||
| 132 | watchdog_reset: | ||
| 133 | mov $r8 0x430 | ||
| 134 | shl b32 $r8 6 | ||
| 135 | bset $r15 31 | ||
| 136 | iowr I[$r8 + 0x000] $r15 | ||
| 137 | ret | ||
| 138 | |||
| 139 | // clear watchdog timer | ||
| 140 | watchdog_clear: | ||
| 141 | mov $r8 0x430 | ||
| 142 | shl b32 $r8 6 | ||
| 143 | iowr I[$r8 + 0x000] $r0 | ||
| 144 | ret | ||
| 145 | |||
| 146 | // wait_donez - wait on FUC_DONE bit to become clear | 124 | // wait_donez - wait on FUC_DONE bit to become clear |
| 147 | // | 125 | // |
| 148 | // In : $r10 bit to wait on | 126 | // In : $r10 bit to wait on |
| @@ -163,13 +141,9 @@ wait_donez: | |||
| 163 | // | 141 | // |
| 164 | wait_doneo: | 142 | wait_doneo: |
| 165 | trace_set(T_WAIT); | 143 | trace_set(T_WAIT); |
| 166 | mov $r8 0x818 | 144 | nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(6), 0, $r10) |
| 167 | shl b32 $r8 6 | ||
| 168 | iowr I[$r8 + 0x000] $r10 | ||
| 169 | wait_doneo_e: | 145 | wait_doneo_e: |
| 170 | mov $r8 0x400 | 146 | nv_iord($r8, NV_PGRAPH_FECS_SIGNAL, 0) |
| 171 | shl b32 $r8 6 | ||
| 172 | iord $r8 I[$r8 + 0x000] | ||
| 173 | xbit $r8 $r8 $r10 | 147 | xbit $r8 $r8 $r10 |
| 174 | bra e #wait_doneo_e | 148 | bra e #wait_doneo_e |
| 175 | trace_clr(T_WAIT) | 149 | trace_clr(T_WAIT) |
| @@ -209,21 +183,18 @@ mmctx_size: | |||
| 209 | // | 183 | // |
| 210 | mmctx_xfer: | 184 | mmctx_xfer: |
| 211 | trace_set(T_MMCTX) | 185 | trace_set(T_MMCTX) |
| 212 | mov $r8 0x710 | ||
| 213 | shl b32 $r8 6 | ||
| 214 | clear b32 $r9 | 186 | clear b32 $r9 |
| 215 | or $r11 $r11 | 187 | or $r11 $r11 |
| 216 | bra e #mmctx_base_disabled | 188 | bra e #mmctx_base_disabled |
| 217 | iowr I[$r8 + 0x000] $r11 // MMCTX_BASE | 189 | nv_iowr(NV_PGRAPH_FECS_MMCTX_BASE, 0, $r11) |
| 218 | bset $r9 0 // BASE_EN | 190 | bset $r9 0 // BASE_EN |
| 219 | mmctx_base_disabled: | 191 | mmctx_base_disabled: |
| 220 | or $r14 $r14 | 192 | or $r14 $r14 |
| 221 | bra e #mmctx_multi_disabled | 193 | bra e #mmctx_multi_disabled |
| 222 | iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE | 194 | nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE, 0, $r14) |
| 223 | iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK | 195 | nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_MASK, 0, $r15) |
| 224 | bset $r9 1 // MULTI_EN | 196 | bset $r9 1 // MULTI_EN |
| 225 | mmctx_multi_disabled: | 197 | mmctx_multi_disabled: |
| 226 | add b32 $r8 0x100 | ||
| 227 | 198 | ||
| 228 | xbit $r11 $r10 0 | 199 | xbit $r11 $r10 0 |
| 229 | shl b32 $r11 16 // DIR | 200 | shl b32 $r11 16 // DIR |
| @@ -231,20 +202,20 @@ mmctx_xfer: | |||
| 231 | xbit $r14 $r10 1 | 202 | xbit $r14 $r10 1 |
| 232 | shl b32 $r14 17 | 203 | shl b32 $r14 17 |
| 233 | or $r11 $r14 // START_TRIGGER | 204 | or $r11 $r14 // START_TRIGGER |
| 234 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | 205 | nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11) |
| 235 | 206 | ||
| 236 | // loop over the mmio list, and send requests to the hw | 207 | // loop over the mmio list, and send requests to the hw |
| 237 | mmctx_exec_loop: | 208 | mmctx_exec_loop: |
| 238 | // wait for space in mmctx queue | 209 | // wait for space in mmctx queue |
| 239 | mmctx_wait_free: | 210 | mmctx_wait_free: |
| 240 | iord $r14 I[$r8 + 0x000] // MMCTX_CTRL | 211 | nv_iord($r14, NV_PGRAPH_FECS_MMCTX_CTRL, 0) |
| 241 | and $r14 0x1f | 212 | and $r14 0x1f |
| 242 | bra e #mmctx_wait_free | 213 | bra e #mmctx_wait_free |
| 243 | 214 | ||
| 244 | // queue up an entry | 215 | // queue up an entry |
| 245 | ld b32 $r14 D[$r12] | 216 | ld b32 $r14 D[$r12] |
| 246 | or $r14 $r9 | 217 | or $r14 $r9 |
| 247 | iowr I[$r8 + 0x300] $r14 | 218 | nv_iowr(NV_PGRAPH_FECS_MMCTX_QUEUE, 0, $r14) |
| 248 | add b32 $r12 4 | 219 | add b32 $r12 4 |
| 249 | cmpu b32 $r12 $r13 | 220 | cmpu b32 $r12 $r13 |
| 250 | bra ne #mmctx_exec_loop | 221 | bra ne #mmctx_exec_loop |
| @@ -253,22 +224,22 @@ mmctx_xfer: | |||
| 253 | bra ne #mmctx_stop | 224 | bra ne #mmctx_stop |
| 254 | // wait for queue to empty | 225 | // wait for queue to empty |
| 255 | mmctx_fini_wait: | 226 | mmctx_fini_wait: |
| 256 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | 227 | nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0) |
| 257 | and $r11 0x1f | 228 | and $r11 0x1f |
| 258 | cmpu b32 $r11 0x10 | 229 | cmpu b32 $r11 0x10 |
| 259 | bra ne #mmctx_fini_wait | 230 | bra ne #mmctx_fini_wait |
| 260 | mov $r10 2 // DONE_MMCTX | 231 | mov $r10 2 // DONE_MMCTX |
| 261 | call #wait_donez | 232 | call(wait_donez) |
| 262 | bra #mmctx_done | 233 | bra #mmctx_done |
| 263 | mmctx_stop: | 234 | mmctx_stop: |
| 264 | xbit $r11 $r10 0 | 235 | xbit $r11 $r10 0 |
| 265 | shl b32 $r11 16 // DIR | 236 | shl b32 $r11 16 // DIR |
| 266 | bset $r11 12 // QLIMIT = 0x10 | 237 | bset $r11 12 // QLIMIT = 0x10 |
| 267 | bset $r11 18 // STOP_TRIGGER | 238 | bset $r11 18 // STOP_TRIGGER |
| 268 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | 239 | nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11) |
| 269 | mmctx_stop_wait: | 240 | mmctx_stop_wait: |
| 270 | // wait for STOP_TRIGGER to clear | 241 | // wait for STOP_TRIGGER to clear |
| 271 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | 242 | nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0) |
| 272 | xbit $r11 $r11 18 | 243 | xbit $r11 $r11 18 |
| 273 | bra ne #mmctx_stop_wait | 244 | bra ne #mmctx_stop_wait |
| 274 | mmctx_done: | 245 | mmctx_done: |
| @@ -280,28 +251,24 @@ mmctx_xfer: | |||
| 280 | strand_wait: | 251 | strand_wait: |
| 281 | push $r10 | 252 | push $r10 |
| 282 | mov $r10 2 | 253 | mov $r10 2 |
| 283 | call #wait_donez | 254 | call(wait_donez) |
| 284 | pop $r10 | 255 | pop $r10 |
| 285 | ret | 256 | ret |
| 286 | 257 | ||
| 287 | // unknown - call before issuing strand commands | 258 | // unknown - call before issuing strand commands |
| 288 | // | 259 | // |
| 289 | strand_pre: | 260 | strand_pre: |
| 290 | mov $r8 0x4afc | 261 | mov $r9 NV_PGRAPH_FECS_STRAND_CMD_ENABLE |
| 291 | sethi $r8 0x20000 | 262 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9) |
| 292 | mov $r9 0xc | 263 | call(strand_wait) |
| 293 | iowr I[$r8] $r9 | ||
| 294 | call #strand_wait | ||
| 295 | ret | 264 | ret |
| 296 | 265 | ||
| 297 | // unknown - call after issuing strand commands | 266 | // unknown - call after issuing strand commands |
| 298 | // | 267 | // |
| 299 | strand_post: | 268 | strand_post: |
| 300 | mov $r8 0x4afc | 269 | mov $r9 NV_PGRAPH_FECS_STRAND_CMD_DISABLE |
| 301 | sethi $r8 0x20000 | 270 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9) |
| 302 | mov $r9 0xd | 271 | call(strand_wait) |
| 303 | iowr I[$r8] $r9 | ||
| 304 | call #strand_wait | ||
| 305 | ret | 272 | ret |
| 306 | 273 | ||
| 307 | // Selects strand set?! | 274 | // Selects strand set?! |
| @@ -309,18 +276,14 @@ strand_post: | |||
| 309 | // In: $r14 id | 276 | // In: $r14 id |
| 310 | // | 277 | // |
| 311 | strand_set: | 278 | strand_set: |
| 312 | mov $r10 0x4ffc | ||
| 313 | sethi $r10 0x20000 | ||
| 314 | sub b32 $r11 $r10 0x500 | ||
| 315 | mov $r12 0xf | 279 | mov $r12 0xf |
| 316 | iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf | 280 | nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r12) |
| 317 | mov $r12 0xb | 281 | mov $r12 NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER |
| 318 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb | 282 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12) |
| 319 | call #strand_wait | 283 | nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r14) |
| 320 | iowr I[$r10 + 0x000] $r14 // 0x93c = <id> | 284 | mov $r12 NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER |
| 321 | mov $r12 0xa | 285 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12) |
| 322 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa | 286 | call(strand_wait) |
| 323 | call #strand_wait | ||
| 324 | ret | 287 | ret |
| 325 | 288 | ||
| 326 | // Initialise strand context data | 289 | // Initialise strand context data |
| @@ -332,30 +295,27 @@ strand_set: | |||
| 332 | // | 295 | // |
| 333 | strand_ctx_init: | 296 | strand_ctx_init: |
| 334 | trace_set(T_STRINIT) | 297 | trace_set(T_STRINIT) |
| 335 | call #strand_pre | 298 | call(strand_pre) |
| 336 | mov $r14 3 | 299 | mov $r14 3 |
| 337 | call #strand_set | 300 | call(strand_set) |
| 338 | mov $r10 0x46fc | 301 | |
| 339 | sethi $r10 0x20000 | 302 | clear b32 $r12 |
| 340 | add b32 $r11 $r10 0x400 | 303 | nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r12) |
| 341 | iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 | 304 | mov $r12 NV_PGRAPH_FECS_STRAND_CMD_SEEK |
| 342 | mov $r12 1 | 305 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12) |
| 343 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE | 306 | call(strand_wait) |
| 344 | call #strand_wait | ||
| 345 | sub b32 $r12 $r0 1 | 307 | sub b32 $r12 $r0 1 |
| 346 | iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff | 308 | nv_iowr(NV_PGRAPH_FECS_STRAND_DATA, 0x3f, $r12) |
| 347 | mov $r12 2 | 309 | mov $r12 NV_PGRAPH_FECS_STRAND_CMD_GET_INFO |
| 348 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT | 310 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12) |
| 349 | call #strand_wait | 311 | call(strand_wait) |
| 350 | call #strand_post | 312 | call(strand_post) |
| 351 | 313 | ||
| 352 | // read the size of each strand, poke the context offset of | 314 | // read the size of each strand, poke the context offset of |
| 353 | // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry | 315 | // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry |
| 354 | // about it later then. | 316 | // about it later then. |
| 355 | mov $r8 0x880 | 317 | nv_mkio($r8, NV_PGRAPH_FECS_STRAND_SAVE_SWBASE, 0x00) |
| 356 | shl b32 $r8 6 | 318 | nv_iord($r9, NV_PGRAPH_FECS_STRANDS_CNT, 0x00) |
| 357 | iord $r9 I[$r8 + 0x000] // STRANDS | ||
| 358 | add b32 $r8 0x2200 | ||
| 359 | shr b32 $r14 $r15 8 | 319 | shr b32 $r14 $r15 8 |
| 360 | ctx_init_strand_loop: | 320 | ctx_init_strand_loop: |
| 361 | iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE | 321 | iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc index 5547c1b3f4f2..96cbcea3b2c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc | |||
| @@ -58,12 +58,9 @@ mmio_list_base: | |||
| 58 | // | 58 | // |
| 59 | error: | 59 | error: |
| 60 | push $r14 | 60 | push $r14 |
| 61 | mov $r14 -0x67ec // 0x9814 | 61 | nv_wr32(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), $r15) |
| 62 | sethi $r14 0x400000 | ||
| 63 | call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code | ||
| 64 | add b32 $r14 0x41c | ||
| 65 | mov $r15 1 | 62 | mov $r15 1 |
| 66 | call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET | 63 | nv_wr32(NV_PGRAPH_FECS_INTR_UP_SET, $r15) |
| 67 | pop $r14 | 64 | pop $r14 |
| 68 | ret | 65 | ret |
| 69 | 66 | ||
| @@ -84,46 +81,40 @@ init: | |||
| 84 | mov $sp $r0 | 81 | mov $sp $r0 |
| 85 | 82 | ||
| 86 | // enable fifo access | 83 | // enable fifo access |
| 87 | mov $r1 0x1200 | 84 | mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO |
| 88 | mov $r2 2 | 85 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_ACCESS, 0, $r2) |
| 89 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
| 90 | 86 | ||
| 91 | // setup i0 handler, and route all interrupts to it | 87 | // setup i0 handler, and route all interrupts to it |
| 92 | mov $r1 #ih | 88 | mov $r1 #ih |
| 93 | mov $iv0 $r1 | 89 | mov $iv0 $r1 |
| 94 | mov $r1 0x400 | 90 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE, 0, $r0) |
| 95 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
| 96 | 91 | ||
| 97 | // enable fifo interrupt | 92 | // enable fifo interrupt |
| 98 | mov $r2 4 | 93 | mov $r2 NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO |
| 99 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | 94 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET, 0, $r2) |
| 100 | 95 | ||
| 101 | // enable interrupts | 96 | // enable interrupts |
| 102 | bset $flags ie0 | 97 | bset $flags ie0 |
| 103 | 98 | ||
| 104 | // figure out which GPC we are, and how many TPCs we have | 99 | // figure out which GPC we are, and how many TPCs we have |
| 105 | mov $r1 0x608 | 100 | nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0) |
| 106 | shl b32 $r1 6 | ||
| 107 | iord $r2 I[$r1 + 0x000] // UNITS | ||
| 108 | mov $r3 1 | 101 | mov $r3 1 |
| 109 | and $r2 0x1f | 102 | and $r2 0x1f |
| 110 | shl b32 $r3 $r2 | 103 | shl b32 $r3 $r2 |
| 111 | sub b32 $r3 1 | 104 | sub b32 $r3 1 |
| 112 | st b32 D[$r0 + #tpc_count] $r2 | 105 | st b32 D[$r0 + #tpc_count] $r2 |
| 113 | st b32 D[$r0 + #tpc_mask] $r3 | 106 | st b32 D[$r0 + #tpc_mask] $r3 |
| 114 | add b32 $r1 0x400 | 107 | nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0) |
| 115 | iord $r2 I[$r1 + 0x000] // MYINDEX | ||
| 116 | st b32 D[$r0 + #gpc_id] $r2 | 108 | st b32 D[$r0 + #gpc_id] $r2 |
| 117 | 109 | ||
| 118 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 | 110 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 |
| 119 | // figure out which, and how many, UNKs are actually present | 111 | // figure out which, and how many, UNKs are actually present |
| 120 | mov $r14 0x0c30 | 112 | imm32($r14, 0x500c30) |
| 121 | sethi $r14 0x500000 | ||
| 122 | clear b32 $r2 | 113 | clear b32 $r2 |
| 123 | clear b32 $r3 | 114 | clear b32 $r3 |
| 124 | clear b32 $r4 | 115 | clear b32 $r4 |
| 125 | init_unk_loop: | 116 | init_unk_loop: |
| 126 | call #nv_rd32 | 117 | call(nv_rd32) |
| 127 | cmp b32 $r15 0 | 118 | cmp b32 $r15 0 |
| 128 | bra z #init_unk_next | 119 | bra z #init_unk_next |
| 129 | mov $r15 1 | 120 | mov $r15 1 |
| @@ -146,23 +137,21 @@ init: | |||
| 146 | 137 | ||
| 147 | // set mmctx base addresses now so we don't have to do it later, | 138 | // set mmctx base addresses now so we don't have to do it later, |
| 148 | // they don't currently ever change | 139 | // they don't currently ever change |
| 149 | mov $r4 0x700 | ||
| 150 | shl b32 $r4 6 | ||
| 151 | shr b32 $r5 $r2 8 | 140 | shr b32 $r5 $r2 8 |
| 152 | iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE | 141 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE, 0, $r5) |
| 153 | iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE | 142 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE, 0, $r5) |
| 154 | 143 | ||
| 155 | // calculate GPC mmio context size | 144 | // calculate GPC mmio context size |
| 156 | ld b32 $r14 D[$r0 + #gpc_mmio_list_head] | 145 | ld b32 $r14 D[$r0 + #gpc_mmio_list_head] |
| 157 | ld b32 $r15 D[$r0 + #gpc_mmio_list_tail] | 146 | ld b32 $r15 D[$r0 + #gpc_mmio_list_tail] |
| 158 | call #mmctx_size | 147 | call(mmctx_size) |
| 159 | add b32 $r2 $r15 | 148 | add b32 $r2 $r15 |
| 160 | add b32 $r3 $r15 | 149 | add b32 $r3 $r15 |
| 161 | 150 | ||
| 162 | // calculate per-TPC mmio context size | 151 | // calculate per-TPC mmio context size |
| 163 | ld b32 $r14 D[$r0 + #tpc_mmio_list_head] | 152 | ld b32 $r14 D[$r0 + #tpc_mmio_list_head] |
| 164 | ld b32 $r15 D[$r0 + #tpc_mmio_list_tail] | 153 | ld b32 $r15 D[$r0 + #tpc_mmio_list_tail] |
| 165 | call #mmctx_size | 154 | call(mmctx_size) |
| 166 | ld b32 $r14 D[$r0 + #tpc_count] | 155 | ld b32 $r14 D[$r0 + #tpc_count] |
| 167 | mulu $r14 $r15 | 156 | mulu $r14 $r15 |
| 168 | add b32 $r2 $r14 | 157 | add b32 $r2 $r14 |
| @@ -172,7 +161,7 @@ init: | |||
| 172 | // calculate per-UNK mmio context size | 161 | // calculate per-UNK mmio context size |
| 173 | ld b32 $r14 D[$r0 + #unk_mmio_list_head] | 162 | ld b32 $r14 D[$r0 + #unk_mmio_list_head] |
| 174 | ld b32 $r15 D[$r0 + #unk_mmio_list_tail] | 163 | ld b32 $r15 D[$r0 + #unk_mmio_list_tail] |
| 175 | call #mmctx_size | 164 | call(mmctx_size) |
| 176 | ld b32 $r14 D[$r0 + #unk_count] | 165 | ld b32 $r14 D[$r0 + #unk_count] |
| 177 | mulu $r14 $r15 | 166 | mulu $r14 $r15 |
| 178 | add b32 $r2 $r14 | 167 | add b32 $r2 $r14 |
| @@ -180,9 +169,8 @@ init: | |||
| 180 | #endif | 169 | #endif |
| 181 | 170 | ||
| 182 | // round up base/size to 256 byte boundary (for strand SWBASE) | 171 | // round up base/size to 256 byte boundary (for strand SWBASE) |
| 183 | add b32 $r4 0x1300 | ||
| 184 | shr b32 $r3 2 | 172 | shr b32 $r3 2 |
| 185 | iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? | 173 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT, 0, $r3) // wtf for?! |
| 186 | shr b32 $r2 8 | 174 | shr b32 $r2 8 |
| 187 | shr b32 $r3 6 | 175 | shr b32 $r3 6 |
| 188 | add b32 $r2 1 | 176 | add b32 $r2 1 |
| @@ -192,7 +180,7 @@ init: | |||
| 192 | 180 | ||
| 193 | // calculate size of strand context data | 181 | // calculate size of strand context data |
| 194 | mov b32 $r15 $r2 | 182 | mov b32 $r15 $r2 |
| 195 | call #strand_ctx_init | 183 | call(strand_ctx_init) |
| 196 | add b32 $r3 $r15 | 184 | add b32 $r3 $r15 |
| 197 | 185 | ||
| 198 | // save context size, and tell HUB we're done | 186 | // save context size, and tell HUB we're done |
| @@ -208,7 +196,7 @@ main: | |||
| 208 | bset $flags $p0 | 196 | bset $flags $p0 |
| 209 | sleep $p0 | 197 | sleep $p0 |
| 210 | mov $r13 #cmd_queue | 198 | mov $r13 #cmd_queue |
| 211 | call #queue_get | 199 | call(queue_get) |
| 212 | bra $p1 #main | 200 | bra $p1 #main |
| 213 | 201 | ||
| 214 | // 0x0000-0x0003 are all context transfers | 202 | // 0x0000-0x0003 are all context transfers |
| @@ -224,13 +212,13 @@ main: | |||
| 224 | or $r1 $r14 | 212 | or $r1 $r14 |
| 225 | mov $flags $r1 | 213 | mov $flags $r1 |
| 226 | // transfer context data | 214 | // transfer context data |
| 227 | call #ctx_xfer | 215 | call(ctx_xfer) |
| 228 | bra #main | 216 | bra #main |
| 229 | 217 | ||
| 230 | main_not_ctx_xfer: | 218 | main_not_ctx_xfer: |
| 231 | shl b32 $r15 $r14 16 | 219 | shl b32 $r15 $r14 16 |
| 232 | or $r15 E_BAD_COMMAND | 220 | or $r15 E_BAD_COMMAND |
| 233 | call #error | 221 | call(error) |
| 234 | bra #main | 222 | bra #main |
| 235 | 223 | ||
| 236 | // interrupt handler | 224 | // interrupt handler |
| @@ -247,22 +235,20 @@ ih: | |||
| 247 | clear b32 $r0 | 235 | clear b32 $r0 |
| 248 | 236 | ||
| 249 | // incoming fifo command? | 237 | // incoming fifo command? |
| 250 | iord $r10 I[$r0 + 0x200] // INTR | 238 | nv_iord($r10, NV_PGRAPH_GPCX_GPCCS_INTR, 0) |
| 251 | and $r11 $r10 0x00000004 | 239 | and $r11 $r10 NV_PGRAPH_GPCX_GPCCS_INTR_FIFO |
| 252 | bra e #ih_no_fifo | 240 | bra e #ih_no_fifo |
| 253 | // queue incoming fifo command for later processing | 241 | // queue incoming fifo command for later processing |
| 254 | mov $r11 0x1900 | ||
| 255 | mov $r13 #cmd_queue | 242 | mov $r13 #cmd_queue |
| 256 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | 243 | nv_iord($r14, NV_PGRAPH_GPCX_GPCCS_FIFO_CMD, 0) |
| 257 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | 244 | nv_iord($r15, NV_PGRAPH_GPCX_GPCCS_FIFO_DATA, 0) |
| 258 | call #queue_put | 245 | call(queue_put) |
| 259 | add b32 $r11 0x400 | ||
| 260 | mov $r14 1 | 246 | mov $r14 1 |
| 261 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | 247 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_FIFO_ACK, 0, $r14) |
| 262 | 248 | ||
| 263 | // ack, and wake up main() | 249 | // ack, and wake up main() |
| 264 | ih_no_fifo: | 250 | ih_no_fifo: |
| 265 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | 251 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ACK, 0, $r10) |
| 266 | 252 | ||
| 267 | pop $r15 | 253 | pop $r15 |
| 268 | pop $r14 | 254 | pop $r14 |
| @@ -283,9 +269,7 @@ hub_barrier_done: | |||
| 283 | mov $r15 1 | 269 | mov $r15 1 |
| 284 | ld b32 $r14 D[$r0 + #gpc_id] | 270 | ld b32 $r14 D[$r0 + #gpc_id] |
| 285 | shl b32 $r15 $r14 | 271 | shl b32 $r15 $r14 |
| 286 | mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET | 272 | nv_wr32(0x409418, $r15) // 0x409418 - HUB_BAR_SET |
| 287 | sethi $r14 0x400000 | ||
| 288 | call #nv_wr32 | ||
| 289 | ret | 273 | ret |
| 290 | 274 | ||
| 291 | // Disables various things, waits a bit, and re-enables them.. | 275 | // Disables various things, waits a bit, and re-enables them.. |
| @@ -295,16 +279,15 @@ hub_barrier_done: | |||
| 295 | // funny things happen. | 279 | // funny things happen. |
| 296 | // | 280 | // |
| 297 | ctx_redswitch: | 281 | ctx_redswitch: |
| 298 | mov $r14 0x614 | 282 | mov $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER |
| 299 | shl b32 $r14 6 | 283 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15) |
| 300 | mov $r15 0x020 | 284 | mov $r14 8 |
| 301 | iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER | ||
| 302 | mov $r15 8 | ||
| 303 | ctx_redswitch_delay: | 285 | ctx_redswitch_delay: |
| 304 | sub b32 $r15 1 | 286 | sub b32 $r14 1 |
| 305 | bra ne #ctx_redswitch_delay | 287 | bra ne #ctx_redswitch_delay |
| 306 | mov $r15 0xa20 | 288 | or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 |
| 307 | iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER | 289 | or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE |
| 290 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15) | ||
| 308 | ret | 291 | ret |
| 309 | 292 | ||
| 310 | // Transfer GPC context data between GPU and storage area | 293 | // Transfer GPC context data between GPU and storage area |
| @@ -317,46 +300,37 @@ ctx_redswitch: | |||
| 317 | // | 300 | // |
| 318 | ctx_xfer: | 301 | ctx_xfer: |
| 319 | // set context base address | 302 | // set context base address |
| 320 | mov $r1 0xa04 | 303 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15) |
| 321 | shl b32 $r1 6 | ||
| 322 | iowr I[$r1 + 0x000] $r15// MEM_BASE | ||
| 323 | bra not $p1 #ctx_xfer_not_load | 304 | bra not $p1 #ctx_xfer_not_load |
| 324 | call #ctx_redswitch | 305 | call(ctx_redswitch) |
| 325 | ctx_xfer_not_load: | 306 | ctx_xfer_not_load: |
| 326 | 307 | ||
| 327 | // strands | 308 | // strands |
| 328 | mov $r1 0x4afc | 309 | call(strand_pre) |
| 329 | sethi $r1 0x20000 | 310 | clear b32 $r2 |
| 330 | mov $r2 0xc | 311 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT, 0x3f, $r2) |
| 331 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | 312 | xbit $r2 $flags $p1 // SAVE/LOAD |
| 332 | call #strand_wait | 313 | add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE |
| 333 | mov $r2 0x47fc | 314 | nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2) |
| 334 | sethi $r2 0x20000 | ||
| 335 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
| 336 | xbit $r2 $flags $p1 | ||
| 337 | add b32 $r2 3 | ||
| 338 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
| 339 | 315 | ||
| 340 | // mmio context | 316 | // mmio context |
| 341 | xbit $r10 $flags $p1 // direction | 317 | xbit $r10 $flags $p1 // direction |
| 342 | or $r10 2 // first | 318 | or $r10 2 // first |
| 343 | mov $r11 0x0000 | 319 | imm32($r11,0x500000) |
| 344 | sethi $r11 0x500000 | ||
| 345 | ld b32 $r12 D[$r0 + #gpc_id] | 320 | ld b32 $r12 D[$r0 + #gpc_id] |
| 346 | shl b32 $r12 15 | 321 | shl b32 $r12 15 |
| 347 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn | 322 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn |
| 348 | ld b32 $r12 D[$r0 + #gpc_mmio_list_head] | 323 | ld b32 $r12 D[$r0 + #gpc_mmio_list_head] |
| 349 | ld b32 $r13 D[$r0 + #gpc_mmio_list_tail] | 324 | ld b32 $r13 D[$r0 + #gpc_mmio_list_tail] |
| 350 | mov $r14 0 // not multi | 325 | mov $r14 0 // not multi |
| 351 | call #mmctx_xfer | 326 | call(mmctx_xfer) |
| 352 | 327 | ||
| 353 | // per-TPC mmio context | 328 | // per-TPC mmio context |
| 354 | xbit $r10 $flags $p1 // direction | 329 | xbit $r10 $flags $p1 // direction |
| 355 | #if !NV_PGRAPH_GPCX_UNK__SIZE | 330 | #if !NV_PGRAPH_GPCX_UNK__SIZE |
| 356 | or $r10 4 // last | 331 | or $r10 4 // last |
| 357 | #endif | 332 | #endif |
| 358 | mov $r11 0x4000 | 333 | imm32($r11, 0x504000) |
| 359 | sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 | ||
| 360 | ld b32 $r12 D[$r0 + #gpc_id] | 334 | ld b32 $r12 D[$r0 + #gpc_id] |
| 361 | shl b32 $r12 15 | 335 | shl b32 $r12 15 |
| 362 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 | 336 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 |
| @@ -364,14 +338,13 @@ ctx_xfer: | |||
| 364 | ld b32 $r13 D[$r0 + #tpc_mmio_list_tail] | 338 | ld b32 $r13 D[$r0 + #tpc_mmio_list_tail] |
| 365 | ld b32 $r15 D[$r0 + #tpc_mask] | 339 | ld b32 $r15 D[$r0 + #tpc_mask] |
| 366 | mov $r14 0x800 // stride = 0x800 | 340 | mov $r14 0x800 // stride = 0x800 |
| 367 | call #mmctx_xfer | 341 | call(mmctx_xfer) |
| 368 | 342 | ||
| 369 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 | 343 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 |
| 370 | // per-UNK mmio context | 344 | // per-UNK mmio context |
| 371 | xbit $r10 $flags $p1 // direction | 345 | xbit $r10 $flags $p1 // direction |
| 372 | or $r10 4 // last | 346 | or $r10 4 // last |
| 373 | mov $r11 0x3000 | 347 | imm32($r11, 0x503000) |
| 374 | sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_UNK0 | ||
| 375 | ld b32 $r12 D[$r0 + #gpc_id] | 348 | ld b32 $r12 D[$r0 + #gpc_id] |
| 376 | shl b32 $r12 15 | 349 | shl b32 $r12 15 |
| 377 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_UNK0 | 350 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_UNK0 |
| @@ -379,11 +352,11 @@ ctx_xfer: | |||
| 379 | ld b32 $r13 D[$r0 + #unk_mmio_list_tail] | 352 | ld b32 $r13 D[$r0 + #unk_mmio_list_tail] |
| 380 | ld b32 $r15 D[$r0 + #unk_mask] | 353 | ld b32 $r15 D[$r0 + #unk_mask] |
| 381 | mov $r14 0x200 // stride = 0x200 | 354 | mov $r14 0x200 // stride = 0x200 |
| 382 | call #mmctx_xfer | 355 | call(mmctx_xfer) |
| 383 | #endif | 356 | #endif |
| 384 | 357 | ||
| 385 | // wait for strands to finish | 358 | // wait for strands to finish |
| 386 | call #strand_wait | 359 | call(strand_wait) |
| 387 | 360 | ||
| 388 | // if load, or a save without a load following, do some | 361 | // if load, or a save without a load following, do some |
| 389 | // unknown stuff that's done after finishing a block of | 362 | // unknown stuff that's done after finishing a block of |
| @@ -391,14 +364,10 @@ ctx_xfer: | |||
| 391 | bra $p1 #ctx_xfer_post | 364 | bra $p1 #ctx_xfer_post |
| 392 | bra not $p2 #ctx_xfer_done | 365 | bra not $p2 #ctx_xfer_done |
| 393 | ctx_xfer_post: | 366 | ctx_xfer_post: |
| 394 | mov $r1 0x4afc | 367 | call(strand_post) |
| 395 | sethi $r1 0x20000 | ||
| 396 | mov $r2 0xd | ||
| 397 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d | ||
| 398 | call #strand_wait | ||
| 399 | 368 | ||
| 400 | // mark completion in HUB's barrier | 369 | // mark completion in HUB's barrier |
| 401 | ctx_xfer_done: | 370 | ctx_xfer_done: |
| 402 | call #hub_barrier_done | 371 | call(hub_barrier_done) |
| 403 | ret | 372 | ret |
| 404 | #endif | 373 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 new file mode 100644 index 000000000000..bd30262d635b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define NV_PGRAPH_GPCX_UNK__SIZE 0x00000001 | ||
| 26 | |||
| 27 | #define CHIPSET GK208 | ||
| 28 | #include "macros.fuc" | ||
| 29 | |||
| 30 | .section #nv108_grgpc_data | ||
| 31 | #define INCLUDE_DATA | ||
| 32 | #include "com.fuc" | ||
| 33 | #include "gpc.fuc" | ||
| 34 | #undef INCLUDE_DATA | ||
| 35 | |||
| 36 | .section #nv108_grgpc_code | ||
| 37 | #define INCLUDE_CODE | ||
| 38 | bra #init | ||
| 39 | #include "com.fuc" | ||
| 40 | #include "gpc.fuc" | ||
| 41 | .align 256 | ||
| 42 | #undef INCLUDE_CODE | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h new file mode 100644 index 000000000000..27dc1280dc10 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h | |||
| @@ -0,0 +1,473 @@ | |||
| 1 | uint32_t nv108_grgpc_data[] = { | ||
| 2 | /* 0x0000: gpc_mmio_list_head */ | ||
| 3 | 0x0000006c, | ||
| 4 | /* 0x0004: gpc_mmio_list_tail */ | ||
| 5 | /* 0x0004: tpc_mmio_list_head */ | ||
| 6 | 0x0000006c, | ||
| 7 | /* 0x0008: tpc_mmio_list_tail */ | ||
| 8 | /* 0x0008: unk_mmio_list_head */ | ||
| 9 | 0x0000006c, | ||
| 10 | /* 0x000c: unk_mmio_list_tail */ | ||
| 11 | 0x0000006c, | ||
| 12 | /* 0x0010: gpc_id */ | ||
| 13 | 0x00000000, | ||
| 14 | /* 0x0014: tpc_count */ | ||
| 15 | 0x00000000, | ||
| 16 | /* 0x0018: tpc_mask */ | ||
| 17 | 0x00000000, | ||
| 18 | /* 0x001c: unk_count */ | ||
| 19 | 0x00000000, | ||
| 20 | /* 0x0020: unk_mask */ | ||
| 21 | 0x00000000, | ||
| 22 | /* 0x0024: cmd_queue */ | ||
| 23 | 0x00000000, | ||
| 24 | 0x00000000, | ||
| 25 | 0x00000000, | ||
| 26 | 0x00000000, | ||
| 27 | 0x00000000, | ||
| 28 | 0x00000000, | ||
| 29 | 0x00000000, | ||
| 30 | 0x00000000, | ||
| 31 | 0x00000000, | ||
| 32 | 0x00000000, | ||
| 33 | 0x00000000, | ||
| 34 | 0x00000000, | ||
| 35 | 0x00000000, | ||
| 36 | 0x00000000, | ||
| 37 | 0x00000000, | ||
| 38 | 0x00000000, | ||
| 39 | 0x00000000, | ||
| 40 | 0x00000000, | ||
| 41 | }; | ||
| 42 | |||
| 43 | uint32_t nv108_grgpc_code[] = { | ||
| 44 | 0x03140ef5, | ||
| 45 | /* 0x0004: queue_put */ | ||
| 46 | 0x9800d898, | ||
| 47 | 0x86f001d9, | ||
| 48 | 0xf489a408, | ||
| 49 | 0x020f0b1b, | ||
| 50 | 0x0002f87e, | ||
| 51 | /* 0x001a: queue_put_next */ | ||
| 52 | 0x98c400f8, | ||
| 53 | 0x0384b607, | ||
| 54 | 0xb6008dbb, | ||
| 55 | 0x8eb50880, | ||
| 56 | 0x018fb500, | ||
| 57 | 0xf00190b6, | ||
| 58 | 0xd9b50f94, | ||
| 59 | /* 0x0037: queue_get */ | ||
| 60 | 0xf400f801, | ||
| 61 | 0xd8980131, | ||
| 62 | 0x01d99800, | ||
| 63 | 0x0bf489a4, | ||
| 64 | 0x0789c421, | ||
| 65 | 0xbb0394b6, | ||
| 66 | 0x90b6009d, | ||
| 67 | 0x009e9808, | ||
| 68 | 0xb6019f98, | ||
| 69 | 0x84f00180, | ||
| 70 | 0x00d8b50f, | ||
| 71 | /* 0x0063: queue_get_done */ | ||
| 72 | 0xf80132f4, | ||
| 73 | /* 0x0065: nv_rd32 */ | ||
| 74 | 0xf0ecb200, | ||
| 75 | 0x00801fc9, | ||
| 76 | 0x0cf601ca, | ||
| 77 | /* 0x0073: nv_rd32_wait */ | ||
| 78 | 0x8c04bd00, | ||
| 79 | 0xcf01ca00, | ||
| 80 | 0xccc800cc, | ||
| 81 | 0xf61bf41f, | ||
| 82 | 0xec7e060a, | ||
| 83 | 0x008f0000, | ||
| 84 | 0xffcf01cb, | ||
| 85 | /* 0x008f: nv_wr32 */ | ||
| 86 | 0x8000f800, | ||
| 87 | 0xf601cc00, | ||
| 88 | 0x04bd000f, | ||
| 89 | 0xc9f0ecb2, | ||
| 90 | 0x1ec9f01f, | ||
| 91 | 0x01ca0080, | ||
| 92 | 0xbd000cf6, | ||
| 93 | /* 0x00a9: nv_wr32_wait */ | ||
| 94 | 0xca008c04, | ||
| 95 | 0x00cccf01, | ||
| 96 | 0xf41fccc8, | ||
| 97 | 0x00f8f61b, | ||
| 98 | /* 0x00b8: wait_donez */ | ||
| 99 | 0x99f094bd, | ||
| 100 | 0x37008000, | ||
| 101 | 0x0009f602, | ||
| 102 | 0x008004bd, | ||
| 103 | 0x0af60206, | ||
| 104 | /* 0x00cf: wait_donez_ne */ | ||
| 105 | 0x8804bd00, | ||
| 106 | 0xcf010000, | ||
| 107 | 0x8aff0088, | ||
| 108 | 0xf61bf488, | ||
| 109 | 0x99f094bd, | ||
| 110 | 0x17008000, | ||
| 111 | 0x0009f602, | ||
| 112 | 0x00f804bd, | ||
| 113 | /* 0x00ec: wait_doneo */ | ||
| 114 | 0x99f094bd, | ||
| 115 | 0x37008000, | ||
| 116 | 0x0009f602, | ||
| 117 | 0x008004bd, | ||
| 118 | 0x0af60206, | ||
| 119 | /* 0x0103: wait_doneo_e */ | ||
| 120 | 0x8804bd00, | ||
| 121 | 0xcf010000, | ||
| 122 | 0x8aff0088, | ||
| 123 | 0xf60bf488, | ||
| 124 | 0x99f094bd, | ||
| 125 | 0x17008000, | ||
| 126 | 0x0009f602, | ||
| 127 | 0x00f804bd, | ||
| 128 | /* 0x0120: mmctx_size */ | ||
| 129 | /* 0x0122: nv_mmctx_size_loop */ | ||
| 130 | 0xe89894bd, | ||
| 131 | 0x1a85b600, | ||
| 132 | 0xb60180b6, | ||
| 133 | 0x98bb0284, | ||
| 134 | 0x04e0b600, | ||
| 135 | 0x1bf4efa4, | ||
| 136 | 0xf89fb2ec, | ||
| 137 | /* 0x013d: mmctx_xfer */ | ||
| 138 | 0xf094bd00, | ||
| 139 | 0x00800199, | ||
| 140 | 0x09f60237, | ||
| 141 | 0xbd04bd00, | ||
| 142 | 0x05bbfd94, | ||
| 143 | 0x800f0bf4, | ||
| 144 | 0xf601c400, | ||
| 145 | 0x04bd000b, | ||
| 146 | /* 0x015f: mmctx_base_disabled */ | ||
| 147 | 0xfd0099f0, | ||
| 148 | 0x0bf405ee, | ||
| 149 | 0xc6008018, | ||
| 150 | 0x000ef601, | ||
| 151 | 0x008004bd, | ||
| 152 | 0x0ff601c7, | ||
| 153 | 0xf004bd00, | ||
| 154 | /* 0x017a: mmctx_multi_disabled */ | ||
| 155 | 0xabc80199, | ||
| 156 | 0x10b4b600, | ||
| 157 | 0xc80cb9f0, | ||
| 158 | 0xe4b601ae, | ||
| 159 | 0x05befd11, | ||
| 160 | 0x01c50080, | ||
| 161 | 0xbd000bf6, | ||
| 162 | /* 0x0195: mmctx_exec_loop */ | ||
| 163 | /* 0x0195: mmctx_wait_free */ | ||
| 164 | 0xc5008e04, | ||
| 165 | 0x00eecf01, | ||
| 166 | 0xf41fe4f0, | ||
| 167 | 0xce98f60b, | ||
| 168 | 0x05e9fd00, | ||
| 169 | 0x01c80080, | ||
| 170 | 0xbd000ef6, | ||
| 171 | 0x04c0b604, | ||
| 172 | 0x1bf4cda4, | ||
| 173 | 0x02abc8df, | ||
| 174 | /* 0x01bf: mmctx_fini_wait */ | ||
| 175 | 0x8b1c1bf4, | ||
| 176 | 0xcf01c500, | ||
| 177 | 0xb4f000bb, | ||
| 178 | 0x10b4b01f, | ||
| 179 | 0x0af31bf4, | ||
| 180 | 0x00b87e02, | ||
| 181 | 0x250ef400, | ||
| 182 | /* 0x01d8: mmctx_stop */ | ||
| 183 | 0xb600abc8, | ||
| 184 | 0xb9f010b4, | ||
| 185 | 0x12b9f00c, | ||
| 186 | 0x01c50080, | ||
| 187 | 0xbd000bf6, | ||
| 188 | /* 0x01ed: mmctx_stop_wait */ | ||
| 189 | 0xc5008b04, | ||
| 190 | 0x00bbcf01, | ||
| 191 | 0xf412bbc8, | ||
| 192 | /* 0x01fa: mmctx_done */ | ||
| 193 | 0x94bdf61b, | ||
| 194 | 0x800199f0, | ||
| 195 | 0xf6021700, | ||
| 196 | 0x04bd0009, | ||
| 197 | /* 0x020a: strand_wait */ | ||
| 198 | 0xa0f900f8, | ||
| 199 | 0xb87e020a, | ||
| 200 | 0xa0fc0000, | ||
| 201 | /* 0x0216: strand_pre */ | ||
| 202 | 0x0c0900f8, | ||
| 203 | 0x024afc80, | ||
| 204 | 0xbd0009f6, | ||
| 205 | 0x020a7e04, | ||
| 206 | /* 0x0227: strand_post */ | ||
| 207 | 0x0900f800, | ||
| 208 | 0x4afc800d, | ||
| 209 | 0x0009f602, | ||
| 210 | 0x0a7e04bd, | ||
| 211 | 0x00f80002, | ||
| 212 | /* 0x0238: strand_set */ | ||
| 213 | 0xfc800f0c, | ||
| 214 | 0x0cf6024f, | ||
| 215 | 0x0c04bd00, | ||
| 216 | 0x4afc800b, | ||
| 217 | 0x000cf602, | ||
| 218 | 0xfc8004bd, | ||
| 219 | 0x0ef6024f, | ||
| 220 | 0x0c04bd00, | ||
| 221 | 0x4afc800a, | ||
| 222 | 0x000cf602, | ||
| 223 | 0x0a7e04bd, | ||
| 224 | 0x00f80002, | ||
| 225 | /* 0x0268: strand_ctx_init */ | ||
| 226 | 0x99f094bd, | ||
| 227 | 0x37008003, | ||
| 228 | 0x0009f602, | ||
| 229 | 0x167e04bd, | ||
| 230 | 0x030e0002, | ||
| 231 | 0x0002387e, | ||
| 232 | 0xfc80c4bd, | ||
| 233 | 0x0cf60247, | ||
| 234 | 0x0c04bd00, | ||
| 235 | 0x4afc8001, | ||
| 236 | 0x000cf602, | ||
| 237 | 0x0a7e04bd, | ||
| 238 | 0x0c920002, | ||
| 239 | 0x46fc8001, | ||
| 240 | 0x000cf602, | ||
| 241 | 0x020c04bd, | ||
| 242 | 0x024afc80, | ||
| 243 | 0xbd000cf6, | ||
| 244 | 0x020a7e04, | ||
| 245 | 0x02277e00, | ||
| 246 | 0x42008800, | ||
| 247 | 0x20008902, | ||
| 248 | 0x0099cf02, | ||
| 249 | /* 0x02c7: ctx_init_strand_loop */ | ||
| 250 | 0xf608fe95, | ||
| 251 | 0x8ef6008e, | ||
| 252 | 0x808acf40, | ||
| 253 | 0xb606a5b6, | ||
| 254 | 0xeabb01a0, | ||
| 255 | 0x0480b600, | ||
| 256 | 0xf40192b6, | ||
| 257 | 0xe4b6e81b, | ||
| 258 | 0xf2efbc08, | ||
| 259 | 0x99f094bd, | ||
| 260 | 0x17008003, | ||
| 261 | 0x0009f602, | ||
| 262 | 0x00f804bd, | ||
| 263 | /* 0x02f8: error */ | ||
| 264 | 0xffb2e0f9, | ||
| 265 | 0x4098148e, | ||
| 266 | 0x00008f7e, | ||
| 267 | 0xffb2010f, | ||
| 268 | 0x409c1c8e, | ||
| 269 | 0x00008f7e, | ||
| 270 | 0x00f8e0fc, | ||
| 271 | /* 0x0314: init */ | ||
| 272 | 0x04fe04bd, | ||
| 273 | 0x40020200, | ||
| 274 | 0x02f61200, | ||
| 275 | 0x4104bd00, | ||
| 276 | 0x10fe0465, | ||
| 277 | 0x07004000, | ||
| 278 | 0xbd0000f6, | ||
| 279 | 0x40040204, | ||
| 280 | 0x02f60400, | ||
| 281 | 0xf404bd00, | ||
| 282 | 0x00821031, | ||
| 283 | 0x22cf0182, | ||
| 284 | 0xf0010300, | ||
| 285 | 0x32bb1f24, | ||
| 286 | 0x0132b604, | ||
| 287 | 0xb50502b5, | ||
| 288 | 0x00820603, | ||
| 289 | 0x22cf0186, | ||
| 290 | 0x0402b500, | ||
| 291 | 0x500c308e, | ||
| 292 | 0x34bd24bd, | ||
| 293 | /* 0x036a: init_unk_loop */ | ||
| 294 | 0x657e44bd, | ||
| 295 | 0xf6b00000, | ||
| 296 | 0x0e0bf400, | ||
| 297 | 0xf2bb010f, | ||
| 298 | 0x054ffd04, | ||
| 299 | /* 0x037f: init_unk_next */ | ||
| 300 | 0xb60130b6, | ||
| 301 | 0xe0b60120, | ||
| 302 | 0x0126b004, | ||
| 303 | /* 0x038b: init_unk_done */ | ||
| 304 | 0xb5e21bf4, | ||
| 305 | 0x04b50703, | ||
| 306 | 0x01008208, | ||
| 307 | 0x0022cf02, | ||
| 308 | 0x259534bd, | ||
| 309 | 0xc0008008, | ||
| 310 | 0x0005f601, | ||
| 311 | 0x008004bd, | ||
| 312 | 0x05f601c1, | ||
| 313 | 0x9804bd00, | ||
| 314 | 0x0f98000e, | ||
| 315 | 0x01207e01, | ||
| 316 | 0x002fbb00, | ||
| 317 | 0x98003fbb, | ||
| 318 | 0x0f98010e, | ||
| 319 | 0x01207e02, | ||
| 320 | 0x050e9800, | ||
| 321 | 0xbb00effd, | ||
| 322 | 0x3ebb002e, | ||
| 323 | 0x020e9800, | ||
| 324 | 0x7e030f98, | ||
| 325 | 0x98000120, | ||
| 326 | 0xeffd070e, | ||
| 327 | 0x002ebb00, | ||
| 328 | 0xb6003ebb, | ||
| 329 | 0x00800235, | ||
| 330 | 0x03f601d3, | ||
| 331 | 0xb604bd00, | ||
| 332 | 0x35b60825, | ||
| 333 | 0x0120b606, | ||
| 334 | 0xb60130b6, | ||
| 335 | 0x34b60824, | ||
| 336 | 0x7e2fb208, | ||
| 337 | 0xbb000268, | ||
| 338 | 0x0080003f, | ||
| 339 | 0x03f60201, | ||
| 340 | 0xbd04bd00, | ||
| 341 | 0x1f29f024, | ||
| 342 | 0x02300080, | ||
| 343 | 0xbd0002f6, | ||
| 344 | /* 0x0429: main */ | ||
| 345 | 0x0031f404, | ||
| 346 | 0x0d0028f4, | ||
| 347 | 0x00377e24, | ||
| 348 | 0xf401f400, | ||
| 349 | 0xf404e4b0, | ||
| 350 | 0x81fe1d18, | ||
| 351 | 0xbd060201, | ||
| 352 | 0x0412fd20, | ||
| 353 | 0xfd01e4b6, | ||
| 354 | 0x18fe051e, | ||
| 355 | 0x04fc7e00, | ||
| 356 | 0xd40ef400, | ||
| 357 | /* 0x0458: main_not_ctx_xfer */ | ||
| 358 | 0xf010ef94, | ||
| 359 | 0xf87e01f5, | ||
| 360 | 0x0ef40002, | ||
| 361 | /* 0x0465: ih */ | ||
| 362 | 0xfe80f9c7, | ||
| 363 | 0x80f90188, | ||
| 364 | 0xa0f990f9, | ||
| 365 | 0xd0f9b0f9, | ||
| 366 | 0xf0f9e0f9, | ||
| 367 | 0x004a04bd, | ||
| 368 | 0x00aacf02, | ||
| 369 | 0xf404abc4, | ||
| 370 | 0x240d1f0b, | ||
| 371 | 0xcf1a004e, | ||
| 372 | 0x004f00ee, | ||
| 373 | 0x00ffcf19, | ||
| 374 | 0x0000047e, | ||
| 375 | 0x0040010e, | ||
| 376 | 0x000ef61d, | ||
| 377 | /* 0x04a2: ih_no_fifo */ | ||
| 378 | 0x004004bd, | ||
| 379 | 0x000af601, | ||
| 380 | 0xf0fc04bd, | ||
| 381 | 0xd0fce0fc, | ||
| 382 | 0xa0fcb0fc, | ||
| 383 | 0x80fc90fc, | ||
| 384 | 0xfc0088fe, | ||
| 385 | 0x0032f480, | ||
| 386 | /* 0x04c2: hub_barrier_done */ | ||
| 387 | 0x010f01f8, | ||
| 388 | 0xbb040e98, | ||
| 389 | 0xffb204fe, | ||
| 390 | 0x4094188e, | ||
| 391 | 0x00008f7e, | ||
| 392 | /* 0x04d6: ctx_redswitch */ | ||
| 393 | 0x200f00f8, | ||
| 394 | 0x01850080, | ||
| 395 | 0xbd000ff6, | ||
| 396 | /* 0x04e3: ctx_redswitch_delay */ | ||
| 397 | 0xb6080e04, | ||
| 398 | 0x1bf401e2, | ||
| 399 | 0x00f5f1fd, | ||
| 400 | 0x00f5f108, | ||
| 401 | 0x85008002, | ||
| 402 | 0x000ff601, | ||
| 403 | 0x00f804bd, | ||
| 404 | /* 0x04fc: ctx_xfer */ | ||
| 405 | 0x02810080, | ||
| 406 | 0xbd000ff6, | ||
| 407 | 0x0711f404, | ||
| 408 | 0x0004d67e, | ||
| 409 | /* 0x050c: ctx_xfer_not_load */ | ||
| 410 | 0x0002167e, | ||
| 411 | 0xfc8024bd, | ||
| 412 | 0x02f60247, | ||
| 413 | 0xf004bd00, | ||
| 414 | 0x20b6012c, | ||
| 415 | 0x4afc8003, | ||
| 416 | 0x0002f602, | ||
| 417 | 0xacf004bd, | ||
| 418 | 0x02a5f001, | ||
| 419 | 0x5000008b, | ||
| 420 | 0xb6040c98, | ||
| 421 | 0xbcbb0fc4, | ||
| 422 | 0x000c9800, | ||
| 423 | 0x0e010d98, | ||
| 424 | 0x013d7e00, | ||
| 425 | 0x01acf000, | ||
| 426 | 0x5040008b, | ||
| 427 | 0xb6040c98, | ||
| 428 | 0xbcbb0fc4, | ||
| 429 | 0x010c9800, | ||
| 430 | 0x98020d98, | ||
| 431 | 0x004e060f, | ||
| 432 | 0x013d7e08, | ||
| 433 | 0x01acf000, | ||
| 434 | 0x8b04a5f0, | ||
| 435 | 0x98503000, | ||
| 436 | 0xc4b6040c, | ||
| 437 | 0x00bcbb0f, | ||
| 438 | 0x98020c98, | ||
| 439 | 0x0f98030d, | ||
| 440 | 0x02004e08, | ||
| 441 | 0x00013d7e, | ||
| 442 | 0x00020a7e, | ||
| 443 | 0xf40601f4, | ||
| 444 | /* 0x0596: ctx_xfer_post */ | ||
| 445 | 0x277e0712, | ||
| 446 | /* 0x059a: ctx_xfer_done */ | ||
| 447 | 0xc27e0002, | ||
| 448 | 0x00f80004, | ||
| 449 | 0x00000000, | ||
| 450 | 0x00000000, | ||
| 451 | 0x00000000, | ||
| 452 | 0x00000000, | ||
| 453 | 0x00000000, | ||
| 454 | 0x00000000, | ||
| 455 | 0x00000000, | ||
| 456 | 0x00000000, | ||
| 457 | 0x00000000, | ||
| 458 | 0x00000000, | ||
| 459 | 0x00000000, | ||
| 460 | 0x00000000, | ||
| 461 | 0x00000000, | ||
| 462 | 0x00000000, | ||
| 463 | 0x00000000, | ||
| 464 | 0x00000000, | ||
| 465 | 0x00000000, | ||
| 466 | 0x00000000, | ||
| 467 | 0x00000000, | ||
| 468 | 0x00000000, | ||
| 469 | 0x00000000, | ||
| 470 | 0x00000000, | ||
| 471 | 0x00000000, | ||
| 472 | 0x00000000, | ||
| 473 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h index f2b0dea80116..0e7b01efae8d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h | |||
| @@ -37,14 +37,14 @@ uint32_t nvc0_grgpc_data[] = { | |||
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | uint32_t nvc0_grgpc_code[] = { | 39 | uint32_t nvc0_grgpc_code[] = { |
| 40 | 0x03180ef5, | 40 | 0x03a10ef5, |
| 41 | /* 0x0004: queue_put */ | 41 | /* 0x0004: queue_put */ |
| 42 | 0x9800d898, | 42 | 0x9800d898, |
| 43 | 0x86f001d9, | 43 | 0x86f001d9, |
| 44 | 0x0489b808, | 44 | 0x0489b808, |
| 45 | 0xf00c1bf4, | 45 | 0xf00c1bf4, |
| 46 | 0x21f502f7, | 46 | 0x21f502f7, |
| 47 | 0x00f802fe, | 47 | 0x00f8037e, |
| 48 | /* 0x001c: queue_put_next */ | 48 | /* 0x001c: queue_put_next */ |
| 49 | 0xb60798c4, | 49 | 0xb60798c4, |
| 50 | 0x8dbb0384, | 50 | 0x8dbb0384, |
| @@ -68,184 +68,214 @@ uint32_t nvc0_grgpc_code[] = { | |||
| 68 | /* 0x0066: queue_get_done */ | 68 | /* 0x0066: queue_get_done */ |
| 69 | 0x00f80132, | 69 | 0x00f80132, |
| 70 | /* 0x0068: nv_rd32 */ | 70 | /* 0x0068: nv_rd32 */ |
| 71 | 0x0728b7f1, | 71 | 0xf002ecb9, |
| 72 | 0xb906b4b6, | 72 | 0x07f11fc9, |
| 73 | 0xc9f002ec, | 73 | 0x03f0ca00, |
| 74 | 0x00bcd01f, | 74 | 0x000cd001, |
| 75 | /* 0x0078: nv_rd32_wait */ | 75 | /* 0x007a: nv_rd32_wait */ |
| 76 | 0xc800bccf, | 76 | 0xc7f104bd, |
| 77 | 0x1bf41fcc, | 77 | 0xc3f0ca00, |
| 78 | 0x06a7f0fa, | 78 | 0x00cccf01, |
| 79 | 0x010921f5, | 79 | 0xf41fccc8, |
| 80 | 0xf840bfcf, | 80 | 0xa7f0f31b, |
| 81 | /* 0x008d: nv_wr32 */ | 81 | 0x1021f506, |
| 82 | 0x28b7f100, | 82 | 0x00f7f101, |
| 83 | 0x06b4b607, | 83 | 0x01f3f0cb, |
| 84 | 0xb980bfd0, | 84 | 0xf800ffcf, |
| 85 | 0xc9f002ec, | 85 | /* 0x009d: nv_wr32 */ |
| 86 | 0x1ec9f01f, | 86 | 0x0007f100, |
| 87 | /* 0x00a3: nv_wr32_wait */ | 87 | 0x0103f0cc, |
| 88 | 0xcf00bcd0, | 88 | 0xbd000fd0, |
| 89 | 0xccc800bc, | 89 | 0x02ecb904, |
| 90 | 0xfa1bf41f, | 90 | 0xf01fc9f0, |
| 91 | /* 0x00ae: watchdog_reset */ | 91 | 0x07f11ec9, |
| 92 | 0x87f100f8, | 92 | 0x03f0ca00, |
| 93 | 0x84b60430, | 93 | 0x000cd001, |
| 94 | 0x1ff9f006, | 94 | /* 0x00be: nv_wr32_wait */ |
| 95 | 0xf8008fd0, | 95 | 0xc7f104bd, |
| 96 | /* 0x00bd: watchdog_clear */ | 96 | 0xc3f0ca00, |
| 97 | 0x3087f100, | 97 | 0x00cccf01, |
| 98 | 0x0684b604, | 98 | 0xf41fccc8, |
| 99 | 0xf80080d0, | 99 | 0x00f8f31b, |
| 100 | /* 0x00c9: wait_donez */ | 100 | /* 0x00d0: wait_donez */ |
| 101 | 0xf094bd00, | 101 | 0x99f094bd, |
| 102 | 0x07f10099, | 102 | 0x0007f100, |
| 103 | 0x03f00f00, | 103 | 0x0203f00f, |
| 104 | 0x0009d002, | 104 | 0xbd0009d0, |
| 105 | 0x07f104bd, | 105 | 0x0007f104, |
| 106 | 0x03f00600, | 106 | 0x0203f006, |
| 107 | 0x000ad002, | 107 | 0xbd000ad0, |
| 108 | /* 0x00e6: wait_donez_ne */ | 108 | /* 0x00ed: wait_donez_ne */ |
| 109 | 0x87f104bd, | 109 | 0x0087f104, |
| 110 | 0x83f00000, | 110 | 0x0183f000, |
| 111 | 0x0088cf01, | 111 | 0xff0088cf, |
| 112 | 0xf4888aff, | 112 | 0x1bf4888a, |
| 113 | 0x94bdf31b, | 113 | 0xf094bdf3, |
| 114 | 0xf10099f0, | ||
| 115 | 0xf0170007, | ||
| 116 | 0x09d00203, | ||
| 117 | 0xf804bd00, | ||
| 118 | /* 0x0109: wait_doneo */ | ||
| 119 | 0xf094bd00, | ||
| 120 | 0x07f10099, | 114 | 0x07f10099, |
| 121 | 0x03f00f00, | 115 | 0x03f01700, |
| 122 | 0x0009d002, | 116 | 0x0009d002, |
| 123 | 0x87f104bd, | 117 | 0x00f804bd, |
| 124 | 0x84b60818, | 118 | /* 0x0110: wait_doneo */ |
| 125 | 0x008ad006, | ||
| 126 | /* 0x0124: wait_doneo_e */ | ||
| 127 | 0x040087f1, | ||
| 128 | 0xcf0684b6, | ||
| 129 | 0x8aff0088, | ||
| 130 | 0xf30bf488, | ||
| 131 | 0x99f094bd, | 119 | 0x99f094bd, |
| 132 | 0x0007f100, | 120 | 0x0007f100, |
| 133 | 0x0203f017, | 121 | 0x0203f00f, |
| 134 | 0xbd0009d0, | 122 | 0xbd0009d0, |
| 135 | /* 0x0147: mmctx_size */ | 123 | 0x0007f104, |
| 136 | 0xbd00f804, | 124 | 0x0203f006, |
| 137 | /* 0x0149: nv_mmctx_size_loop */ | 125 | 0xbd000ad0, |
| 138 | 0x00e89894, | 126 | /* 0x012d: wait_doneo_e */ |
| 139 | 0xb61a85b6, | 127 | 0x0087f104, |
| 140 | 0x84b60180, | 128 | 0x0183f000, |
| 141 | 0x0098bb02, | 129 | 0xff0088cf, |
| 142 | 0xb804e0b6, | 130 | 0x0bf4888a, |
| 143 | 0x1bf404ef, | 131 | 0xf094bdf3, |
| 144 | 0x029fb9eb, | 132 | 0x07f10099, |
| 145 | /* 0x0166: mmctx_xfer */ | 133 | 0x03f01700, |
| 146 | 0x94bd00f8, | 134 | 0x0009d002, |
| 147 | 0xf10199f0, | 135 | 0x00f804bd, |
| 148 | 0xf00f0007, | 136 | /* 0x0150: mmctx_size */ |
| 149 | 0x09d00203, | 137 | /* 0x0152: nv_mmctx_size_loop */ |
| 150 | 0xf104bd00, | 138 | 0xe89894bd, |
| 151 | 0xb6071087, | 139 | 0x1a85b600, |
| 152 | 0x94bd0684, | 140 | 0xb60180b6, |
| 153 | 0xf405bbfd, | 141 | 0x98bb0284, |
| 154 | 0x8bd0090b, | 142 | 0x04e0b600, |
| 155 | 0x0099f000, | 143 | 0xf404efb8, |
| 156 | /* 0x018c: mmctx_base_disabled */ | 144 | 0x9fb9eb1b, |
| 157 | 0xf405eefd, | 145 | /* 0x016f: mmctx_xfer */ |
| 158 | 0x8ed00c0b, | 146 | 0xbd00f802, |
| 159 | 0xc08fd080, | 147 | 0x0199f094, |
| 160 | /* 0x019b: mmctx_multi_disabled */ | 148 | 0x0f0007f1, |
| 161 | 0xb70199f0, | 149 | 0xd00203f0, |
| 162 | 0xc8010080, | 150 | 0x04bd0009, |
| 151 | 0xbbfd94bd, | ||
| 152 | 0x120bf405, | ||
| 153 | 0xc40007f1, | ||
| 154 | 0xd00103f0, | ||
| 155 | 0x04bd000b, | ||
| 156 | /* 0x0197: mmctx_base_disabled */ | ||
| 157 | 0xfd0099f0, | ||
| 158 | 0x0bf405ee, | ||
| 159 | 0x0007f11e, | ||
| 160 | 0x0103f0c6, | ||
| 161 | 0xbd000ed0, | ||
| 162 | 0x0007f104, | ||
| 163 | 0x0103f0c7, | ||
| 164 | 0xbd000fd0, | ||
| 165 | 0x0199f004, | ||
| 166 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 167 | 0xb600abc8, | ||
| 168 | 0xb9f010b4, | ||
| 169 | 0x01aec80c, | ||
| 170 | 0xfd11e4b6, | ||
| 171 | 0x07f105be, | ||
| 172 | 0x03f0c500, | ||
| 173 | 0x000bd001, | ||
| 174 | /* 0x01d6: mmctx_exec_loop */ | ||
| 175 | /* 0x01d6: mmctx_wait_free */ | ||
| 176 | 0xe7f104bd, | ||
| 177 | 0xe3f0c500, | ||
| 178 | 0x00eecf01, | ||
| 179 | 0xf41fe4f0, | ||
| 180 | 0xce98f30b, | ||
| 181 | 0x05e9fd00, | ||
| 182 | 0xc80007f1, | ||
| 183 | 0xd00103f0, | ||
| 184 | 0x04bd000e, | ||
| 185 | 0xb804c0b6, | ||
| 186 | 0x1bf404cd, | ||
| 187 | 0x02abc8d8, | ||
| 188 | /* 0x0207: mmctx_fini_wait */ | ||
| 189 | 0xf11f1bf4, | ||
| 190 | 0xf0c500b7, | ||
| 191 | 0xbbcf01b3, | ||
| 192 | 0x1fb4f000, | ||
| 193 | 0xf410b4b0, | ||
| 194 | 0xa7f0f01b, | ||
| 195 | 0xd021f402, | ||
| 196 | /* 0x0223: mmctx_stop */ | ||
| 197 | 0xc82b0ef4, | ||
| 163 | 0xb4b600ab, | 198 | 0xb4b600ab, |
| 164 | 0x0cb9f010, | 199 | 0x0cb9f010, |
| 165 | 0xb601aec8, | 200 | 0xf112b9f0, |
| 166 | 0xbefd11e4, | 201 | 0xf0c50007, |
| 167 | 0x008bd005, | 202 | 0x0bd00103, |
| 168 | /* 0x01b4: mmctx_exec_loop */ | 203 | /* 0x023b: mmctx_stop_wait */ |
| 169 | /* 0x01b4: mmctx_wait_free */ | 204 | 0xf104bd00, |
| 170 | 0xf0008ecf, | 205 | 0xf0c500b7, |
| 171 | 0x0bf41fe4, | 206 | 0xbbcf01b3, |
| 172 | 0x00ce98fa, | 207 | 0x12bbc800, |
| 173 | 0xd005e9fd, | 208 | /* 0x024b: mmctx_done */ |
| 174 | 0xc0b6c08e, | 209 | 0xbdf31bf4, |
| 175 | 0x04cdb804, | 210 | 0x0199f094, |
| 176 | 0xc8e81bf4, | 211 | 0x170007f1, |
| 177 | 0x1bf402ab, | 212 | 0xd00203f0, |
| 178 | /* 0x01d5: mmctx_fini_wait */ | 213 | 0x04bd0009, |
| 179 | 0x008bcf18, | 214 | /* 0x025e: strand_wait */ |
| 180 | 0xb01fb4f0, | 215 | 0xa0f900f8, |
| 181 | 0x1bf410b4, | 216 | 0xf402a7f0, |
| 182 | 0x02a7f0f7, | 217 | 0xa0fcd021, |
| 183 | 0xf4c921f4, | 218 | /* 0x026a: strand_pre */ |
| 184 | /* 0x01ea: mmctx_stop */ | 219 | 0x97f000f8, |
| 185 | 0xabc81b0e, | 220 | 0xfc07f10c, |
| 186 | 0x10b4b600, | 221 | 0x0203f04a, |
| 187 | 0xf00cb9f0, | 222 | 0xbd0009d0, |
| 188 | 0x8bd012b9, | 223 | 0x5e21f504, |
| 189 | /* 0x01f9: mmctx_stop_wait */ | 224 | /* 0x027f: strand_post */ |
| 190 | 0x008bcf00, | 225 | 0xf000f802, |
| 191 | 0xf412bbc8, | 226 | 0x07f10d97, |
| 192 | /* 0x0202: mmctx_done */ | 227 | 0x03f04afc, |
| 193 | 0x94bdfa1b, | ||
| 194 | 0xf10199f0, | ||
| 195 | 0xf0170007, | ||
| 196 | 0x09d00203, | ||
| 197 | 0xf804bd00, | ||
| 198 | /* 0x0215: strand_wait */ | ||
| 199 | 0xf0a0f900, | ||
| 200 | 0x21f402a7, | ||
| 201 | 0xf8a0fcc9, | ||
| 202 | /* 0x0221: strand_pre */ | ||
| 203 | 0xfc87f100, | ||
| 204 | 0x0283f04a, | ||
| 205 | 0xd00c97f0, | ||
| 206 | 0x21f50089, | ||
| 207 | 0x00f80215, | ||
| 208 | /* 0x0234: strand_post */ | ||
| 209 | 0x4afc87f1, | ||
| 210 | 0xf00283f0, | ||
| 211 | 0x89d00d97, | ||
| 212 | 0x1521f500, | ||
| 213 | /* 0x0247: strand_set */ | ||
| 214 | 0xf100f802, | ||
| 215 | 0xf04ffca7, | ||
| 216 | 0xaba202a3, | ||
| 217 | 0xc7f00500, | ||
| 218 | 0x00acd00f, | ||
| 219 | 0xd00bc7f0, | ||
| 220 | 0x21f500bc, | ||
| 221 | 0xaed00215, | ||
| 222 | 0x0ac7f000, | ||
| 223 | 0xf500bcd0, | ||
| 224 | 0xf8021521, | ||
| 225 | /* 0x0271: strand_ctx_init */ | ||
| 226 | 0xf094bd00, | ||
| 227 | 0x07f10399, | ||
| 228 | 0x03f00f00, | ||
| 229 | 0x0009d002, | 228 | 0x0009d002, |
| 230 | 0x21f504bd, | 229 | 0x21f504bd, |
| 231 | 0xe7f00221, | 230 | 0x00f8025e, |
| 232 | 0x4721f503, | 231 | /* 0x0294: strand_set */ |
| 233 | 0xfca7f102, | 232 | 0xf10fc7f0, |
| 234 | 0x02a3f046, | 233 | 0xf04ffc07, |
| 235 | 0x0400aba0, | 234 | 0x0cd00203, |
| 236 | 0xf040a0d0, | 235 | 0xf004bd00, |
| 237 | 0xbcd001c7, | 236 | 0x07f10bc7, |
| 238 | 0x1521f500, | 237 | 0x03f04afc, |
| 239 | 0x010c9202, | 238 | 0x000cd002, |
| 240 | 0xf000acd0, | 239 | 0x07f104bd, |
| 241 | 0xbcd002c7, | 240 | 0x03f04ffc, |
| 242 | 0x1521f500, | 241 | 0x000ed002, |
| 243 | 0x3421f502, | 242 | 0xc7f004bd, |
| 244 | 0x8087f102, | 243 | 0xfc07f10a, |
| 245 | 0x0684b608, | 244 | 0x0203f04a, |
| 246 | 0xb70089cf, | 245 | 0xbd000cd0, |
| 247 | 0x95220080, | 246 | 0x5e21f504, |
| 248 | /* 0x02ca: ctx_init_strand_loop */ | 247 | /* 0x02d3: strand_ctx_init */ |
| 248 | 0xbd00f802, | ||
| 249 | 0x0399f094, | ||
| 250 | 0x0f0007f1, | ||
| 251 | 0xd00203f0, | ||
| 252 | 0x04bd0009, | ||
| 253 | 0x026a21f5, | ||
| 254 | 0xf503e7f0, | ||
| 255 | 0xbd029421, | ||
| 256 | 0xfc07f1c4, | ||
| 257 | 0x0203f047, | ||
| 258 | 0xbd000cd0, | ||
| 259 | 0x01c7f004, | ||
| 260 | 0x4afc07f1, | ||
| 261 | 0xd00203f0, | ||
| 262 | 0x04bd000c, | ||
| 263 | 0x025e21f5, | ||
| 264 | 0xf1010c92, | ||
| 265 | 0xf046fc07, | ||
| 266 | 0x0cd00203, | ||
| 267 | 0xf004bd00, | ||
| 268 | 0x07f102c7, | ||
| 269 | 0x03f04afc, | ||
| 270 | 0x000cd002, | ||
| 271 | 0x21f504bd, | ||
| 272 | 0x21f5025e, | ||
| 273 | 0x87f1027f, | ||
| 274 | 0x83f04200, | ||
| 275 | 0x0097f102, | ||
| 276 | 0x0293f020, | ||
| 277 | 0x950099cf, | ||
| 278 | /* 0x034a: ctx_init_strand_loop */ | ||
| 249 | 0x8ed008fe, | 279 | 0x8ed008fe, |
| 250 | 0x408ed000, | 280 | 0x408ed000, |
| 251 | 0xb6808acf, | 281 | 0xb6808acf, |
| @@ -259,167 +289,199 @@ uint32_t nvc0_grgpc_code[] = { | |||
| 259 | 0x170007f1, | 289 | 0x170007f1, |
| 260 | 0xd00203f0, | 290 | 0xd00203f0, |
| 261 | 0x04bd0009, | 291 | 0x04bd0009, |
| 262 | /* 0x02fe: error */ | 292 | /* 0x037e: error */ |
| 263 | 0xe0f900f8, | 293 | 0xe0f900f8, |
| 264 | 0x9814e7f1, | 294 | 0xf102ffb9, |
| 265 | 0xf440e3f0, | 295 | 0xf09814e7, |
| 266 | 0xe0b78d21, | 296 | 0x21f440e3, |
| 267 | 0xf7f0041c, | 297 | 0x01f7f09d, |
| 268 | 0x8d21f401, | 298 | 0xf102ffb9, |
| 269 | 0x00f8e0fc, | 299 | 0xf09c1ce7, |
| 270 | /* 0x0318: init */ | 300 | 0x21f440e3, |
| 271 | 0x04fe04bd, | 301 | 0xf8e0fc9d, |
| 272 | 0x0017f100, | 302 | /* 0x03a1: init */ |
| 273 | 0x0227f012, | 303 | 0xfe04bd00, |
| 274 | 0xf10012d0, | 304 | 0x27f00004, |
| 275 | 0xfe042617, | 305 | 0x0007f102, |
| 276 | 0x17f10010, | 306 | 0x0003f012, |
| 277 | 0x10d00400, | 307 | 0xbd0002d0, |
| 278 | 0x0427f0c0, | 308 | 0xd517f104, |
| 279 | 0xf40012d0, | 309 | 0x0010fe04, |
| 280 | 0x17f11031, | 310 | 0x070007f1, |
| 281 | 0x14b60608, | 311 | 0xd00003f0, |
| 282 | 0x0012cf06, | 312 | 0x04bd0000, |
| 313 | 0xf10427f0, | ||
| 314 | 0xf0040007, | ||
| 315 | 0x02d00003, | ||
| 316 | 0xf404bd00, | ||
| 317 | 0x27f11031, | ||
| 318 | 0x23f08200, | ||
| 319 | 0x0022cf01, | ||
| 283 | 0xf00137f0, | 320 | 0xf00137f0, |
| 284 | 0x32bb1f24, | 321 | 0x32bb1f24, |
| 285 | 0x0132b604, | 322 | 0x0132b604, |
| 286 | 0x80050280, | 323 | 0x80050280, |
| 287 | 0x10b70603, | 324 | 0x27f10603, |
| 288 | 0x12cf0400, | 325 | 0x23f08600, |
| 289 | 0x04028000, | 326 | 0x0022cf01, |
| 290 | 0x010027f1, | 327 | 0xf1040280, |
| 291 | 0xcf0223f0, | 328 | 0xf0010027, |
| 292 | 0x34bd0022, | 329 | 0x22cf0223, |
| 293 | 0x070047f1, | 330 | 0x9534bd00, |
| 294 | 0x950644b6, | 331 | 0x07f10825, |
| 295 | 0x45d00825, | 332 | 0x03f0c000, |
| 296 | 0x4045d000, | 333 | 0x0005d001, |
| 297 | 0x98000e98, | 334 | 0x07f104bd, |
| 298 | 0x21f5010f, | 335 | 0x03f0c100, |
| 299 | 0x2fbb0147, | 336 | 0x0005d001, |
| 300 | 0x003fbb00, | 337 | 0x0e9804bd, |
| 301 | 0x98010e98, | 338 | 0x010f9800, |
| 302 | 0x21f5020f, | 339 | 0x015021f5, |
| 303 | 0x0e980147, | 340 | 0xbb002fbb, |
| 304 | 0x00effd05, | 341 | 0x0e98003f, |
| 305 | 0xbb002ebb, | 342 | 0x020f9801, |
| 306 | 0x40b7003e, | 343 | 0x015021f5, |
| 307 | 0x35b61300, | 344 | 0xfd050e98, |
| 308 | 0x0043d002, | 345 | 0x2ebb00ef, |
| 309 | 0xb60825b6, | 346 | 0x003ebb00, |
| 310 | 0x20b60635, | 347 | 0xf10235b6, |
| 311 | 0x0130b601, | 348 | 0xf0d30007, |
| 312 | 0xb60824b6, | 349 | 0x03d00103, |
| 313 | 0x2fb90834, | 350 | 0xb604bd00, |
| 314 | 0x7121f502, | 351 | 0x35b60825, |
| 315 | 0x003fbb02, | 352 | 0x0120b606, |
| 316 | 0x010007f1, | 353 | 0xb60130b6, |
| 354 | 0x34b60824, | ||
| 355 | 0x022fb908, | ||
| 356 | 0x02d321f5, | ||
| 357 | 0xf1003fbb, | ||
| 358 | 0xf0010007, | ||
| 359 | 0x03d00203, | ||
| 360 | 0xbd04bd00, | ||
| 361 | 0x1f29f024, | ||
| 362 | 0x080007f1, | ||
| 317 | 0xd00203f0, | 363 | 0xd00203f0, |
| 318 | 0x04bd0003, | 364 | 0x04bd0002, |
| 319 | 0x29f024bd, | 365 | /* 0x0498: main */ |
| 320 | 0x0007f11f, | 366 | 0xf40031f4, |
| 321 | 0x0203f008, | 367 | 0xd7f00028, |
| 322 | 0xbd0002d0, | 368 | 0x3921f41c, |
| 323 | /* 0x03e9: main */ | 369 | 0xb0f401f4, |
| 324 | 0x0031f404, | 370 | 0x18f404e4, |
| 325 | 0xf00028f4, | 371 | 0x0181fe1e, |
| 326 | 0x21f41cd7, | 372 | 0xbd0627f0, |
| 327 | 0xf401f439, | 373 | 0x0412fd20, |
| 328 | 0xf404e4b0, | 374 | 0xfd01e4b6, |
| 329 | 0x81fe1e18, | 375 | 0x18fe051e, |
| 330 | 0x0627f001, | 376 | 0x8d21f500, |
| 331 | 0x12fd20bd, | 377 | 0xd30ef405, |
| 332 | 0x01e4b604, | 378 | /* 0x04c8: main_not_ctx_xfer */ |
| 333 | 0xfe051efd, | 379 | 0xf010ef94, |
| 334 | 0x21f50018, | 380 | 0x21f501f5, |
| 335 | 0x0ef404ad, | 381 | 0x0ef4037e, |
| 336 | /* 0x0419: main_not_ctx_xfer */ | 382 | /* 0x04d5: ih */ |
| 337 | 0x10ef94d3, | 383 | 0xfe80f9c6, |
| 338 | 0xf501f5f0, | 384 | 0x80f90188, |
| 339 | 0xf402fe21, | 385 | 0xa0f990f9, |
| 340 | /* 0x0426: ih */ | 386 | 0xd0f9b0f9, |
| 341 | 0x80f9c60e, | 387 | 0xf0f9e0f9, |
| 342 | 0xf90188fe, | 388 | 0xa7f104bd, |
| 343 | 0xf990f980, | 389 | 0xa3f00200, |
| 344 | 0xf9b0f9a0, | 390 | 0x00aacf00, |
| 345 | 0xf9e0f9d0, | 391 | 0xf404abc4, |
| 346 | 0xcf04bdf0, | 392 | 0xd7f02c0b, |
| 347 | 0xabc4800a, | 393 | 0x00e7f11c, |
| 348 | 0x1d0bf404, | 394 | 0x00e3f01a, |
| 349 | 0x1900b7f1, | 395 | 0xf100eecf, |
| 350 | 0xcf1cd7f0, | 396 | 0xf01900f7, |
| 351 | 0xbfcf40be, | 397 | 0xffcf00f3, |
| 352 | 0x0421f400, | 398 | 0x0421f400, |
| 353 | 0x0400b0b7, | 399 | 0xf101e7f0, |
| 354 | 0xd001e7f0, | 400 | 0xf01d0007, |
| 355 | /* 0x045e: ih_no_fifo */ | 401 | 0x0ed00003, |
| 356 | 0x0ad000be, | 402 | /* 0x0523: ih_no_fifo */ |
| 357 | 0xfcf0fc40, | 403 | 0xf104bd00, |
| 358 | 0xfcd0fce0, | 404 | 0xf0010007, |
| 359 | 0xfca0fcb0, | 405 | 0x0ad00003, |
| 360 | 0xfe80fc90, | 406 | 0xfc04bd00, |
| 361 | 0x80fc0088, | 407 | 0xfce0fcf0, |
| 362 | 0xf80032f4, | 408 | 0xfcb0fcd0, |
| 363 | /* 0x0479: hub_barrier_done */ | 409 | 0xfc90fca0, |
| 364 | 0x01f7f001, | 410 | 0x0088fe80, |
| 365 | 0xbb040e98, | 411 | 0x32f480fc, |
| 366 | 0xe7f104fe, | 412 | /* 0x0547: hub_barrier_done */ |
| 367 | 0xe3f09418, | 413 | 0xf001f800, |
| 368 | 0x8d21f440, | 414 | 0x0e9801f7, |
| 369 | /* 0x048e: ctx_redswitch */ | 415 | 0x04febb04, |
| 370 | 0xe7f100f8, | 416 | 0xf102ffb9, |
| 371 | 0xe4b60614, | 417 | 0xf09418e7, |
| 372 | 0x20f7f006, | 418 | 0x21f440e3, |
| 373 | 0xf000efd0, | 419 | /* 0x055f: ctx_redswitch */ |
| 374 | /* 0x049e: ctx_redswitch_delay */ | 420 | 0xf000f89d, |
| 375 | 0xf2b608f7, | 421 | 0x07f120f7, |
| 376 | 0xfd1bf401, | 422 | 0x03f08500, |
| 377 | 0x0a20f7f1, | 423 | 0x000fd001, |
| 378 | 0xf800efd0, | 424 | 0xe7f004bd, |
| 379 | /* 0x04ad: ctx_xfer */ | 425 | /* 0x0571: ctx_redswitch_delay */ |
| 380 | 0x0417f100, | 426 | 0x01e2b608, |
| 381 | 0x0614b60a, | 427 | 0xf1fd1bf4, |
| 382 | 0xf4001fd0, | 428 | 0xf10800f5, |
| 383 | 0x21f50711, | 429 | 0xf10200f5, |
| 384 | /* 0x04be: ctx_xfer_not_load */ | 430 | 0xf0850007, |
| 385 | 0x17f1048e, | 431 | 0x0fd00103, |
| 386 | 0x13f04afc, | 432 | 0xf804bd00, |
| 387 | 0x0c27f002, | 433 | /* 0x058d: ctx_xfer */ |
| 388 | 0xf50012d0, | 434 | 0x0007f100, |
| 389 | 0xf1021521, | 435 | 0x0203f081, |
| 390 | 0xf047fc27, | 436 | 0xbd000fd0, |
| 391 | 0x20d00223, | 437 | 0x0711f404, |
| 392 | 0x012cf000, | 438 | 0x055f21f5, |
| 393 | 0xd00320b6, | 439 | /* 0x05a0: ctx_xfer_not_load */ |
| 394 | 0xacf00012, | 440 | 0x026a21f5, |
| 395 | 0x02a5f001, | 441 | 0x07f124bd, |
| 396 | 0xf000b7f0, | 442 | 0x03f047fc, |
| 397 | 0x0c9850b3, | 443 | 0x0002d002, |
| 398 | 0x0fc4b604, | 444 | 0x2cf004bd, |
| 399 | 0x9800bcbb, | 445 | 0x0320b601, |
| 400 | 0x0d98000c, | 446 | 0x4afc07f1, |
| 401 | 0x00e7f001, | 447 | 0xd00203f0, |
| 402 | 0x016621f5, | 448 | 0x04bd0002, |
| 403 | 0xf001acf0, | 449 | 0xf001acf0, |
| 404 | 0xb7f104a5, | 450 | 0xb7f102a5, |
| 405 | 0xb3f04000, | 451 | 0xb3f00000, |
| 406 | 0x040c9850, | 452 | 0x040c9850, |
| 407 | 0xbb0fc4b6, | 453 | 0xbb0fc4b6, |
| 408 | 0x0c9800bc, | 454 | 0x0c9800bc, |
| 409 | 0x020d9801, | 455 | 0x010d9800, |
| 410 | 0xf1060f98, | 456 | 0xf500e7f0, |
| 411 | 0xf50800e7, | 457 | 0xf0016f21, |
| 412 | 0xf5016621, | 458 | 0xa5f001ac, |
| 413 | 0xf4021521, | 459 | 0x00b7f104, |
| 414 | 0x12f40601, | 460 | 0x50b3f040, |
| 415 | /* 0x0535: ctx_xfer_post */ | 461 | 0xb6040c98, |
| 416 | 0xfc17f114, | 462 | 0xbcbb0fc4, |
| 417 | 0x0213f04a, | 463 | 0x010c9800, |
| 418 | 0xd00d27f0, | 464 | 0x98020d98, |
| 419 | 0x21f50012, | 465 | 0xe7f1060f, |
| 420 | /* 0x0546: ctx_xfer_done */ | 466 | 0x21f50800, |
| 421 | 0x21f50215, | 467 | 0x21f5016f, |
| 422 | 0x00f80479, | 468 | 0x01f4025e, |
| 469 | 0x0712f406, | ||
| 470 | /* 0x0618: ctx_xfer_post */ | ||
| 471 | 0x027f21f5, | ||
| 472 | /* 0x061c: ctx_xfer_done */ | ||
| 473 | 0x054721f5, | ||
| 474 | 0x000000f8, | ||
| 475 | 0x00000000, | ||
| 476 | 0x00000000, | ||
| 477 | 0x00000000, | ||
| 478 | 0x00000000, | ||
| 479 | 0x00000000, | ||
| 480 | 0x00000000, | ||
| 481 | 0x00000000, | ||
| 482 | 0x00000000, | ||
| 483 | 0x00000000, | ||
| 484 | 0x00000000, | ||
| 423 | 0x00000000, | 485 | 0x00000000, |
| 424 | 0x00000000, | 486 | 0x00000000, |
| 425 | 0x00000000, | 487 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h index dd346c2a1624..84dd32db28a0 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h | |||
| @@ -41,14 +41,14 @@ uint32_t nvd7_grgpc_data[] = { | |||
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | uint32_t nvd7_grgpc_code[] = { | 43 | uint32_t nvd7_grgpc_code[] = { |
| 44 | 0x03180ef5, | 44 | 0x03a10ef5, |
| 45 | /* 0x0004: queue_put */ | 45 | /* 0x0004: queue_put */ |
| 46 | 0x9800d898, | 46 | 0x9800d898, |
| 47 | 0x86f001d9, | 47 | 0x86f001d9, |
| 48 | 0x0489b808, | 48 | 0x0489b808, |
| 49 | 0xf00c1bf4, | 49 | 0xf00c1bf4, |
| 50 | 0x21f502f7, | 50 | 0x21f502f7, |
| 51 | 0x00f802fe, | 51 | 0x00f8037e, |
| 52 | /* 0x001c: queue_put_next */ | 52 | /* 0x001c: queue_put_next */ |
| 53 | 0xb60798c4, | 53 | 0xb60798c4, |
| 54 | 0x8dbb0384, | 54 | 0x8dbb0384, |
| @@ -72,184 +72,214 @@ uint32_t nvd7_grgpc_code[] = { | |||
| 72 | /* 0x0066: queue_get_done */ | 72 | /* 0x0066: queue_get_done */ |
| 73 | 0x00f80132, | 73 | 0x00f80132, |
| 74 | /* 0x0068: nv_rd32 */ | 74 | /* 0x0068: nv_rd32 */ |
| 75 | 0x0728b7f1, | 75 | 0xf002ecb9, |
| 76 | 0xb906b4b6, | 76 | 0x07f11fc9, |
| 77 | 0xc9f002ec, | 77 | 0x03f0ca00, |
| 78 | 0x00bcd01f, | 78 | 0x000cd001, |
| 79 | /* 0x0078: nv_rd32_wait */ | 79 | /* 0x007a: nv_rd32_wait */ |
| 80 | 0xc800bccf, | 80 | 0xc7f104bd, |
| 81 | 0x1bf41fcc, | 81 | 0xc3f0ca00, |
| 82 | 0x06a7f0fa, | 82 | 0x00cccf01, |
| 83 | 0x010921f5, | 83 | 0xf41fccc8, |
| 84 | 0xf840bfcf, | 84 | 0xa7f0f31b, |
| 85 | /* 0x008d: nv_wr32 */ | 85 | 0x1021f506, |
| 86 | 0x28b7f100, | 86 | 0x00f7f101, |
| 87 | 0x06b4b607, | 87 | 0x01f3f0cb, |
| 88 | 0xb980bfd0, | 88 | 0xf800ffcf, |
| 89 | 0xc9f002ec, | 89 | /* 0x009d: nv_wr32 */ |
| 90 | 0x1ec9f01f, | 90 | 0x0007f100, |
| 91 | /* 0x00a3: nv_wr32_wait */ | 91 | 0x0103f0cc, |
| 92 | 0xcf00bcd0, | 92 | 0xbd000fd0, |
| 93 | 0xccc800bc, | 93 | 0x02ecb904, |
| 94 | 0xfa1bf41f, | 94 | 0xf01fc9f0, |
| 95 | /* 0x00ae: watchdog_reset */ | 95 | 0x07f11ec9, |
| 96 | 0x87f100f8, | 96 | 0x03f0ca00, |
| 97 | 0x84b60430, | 97 | 0x000cd001, |
| 98 | 0x1ff9f006, | 98 | /* 0x00be: nv_wr32_wait */ |
| 99 | 0xf8008fd0, | 99 | 0xc7f104bd, |
| 100 | /* 0x00bd: watchdog_clear */ | 100 | 0xc3f0ca00, |
| 101 | 0x3087f100, | 101 | 0x00cccf01, |
| 102 | 0x0684b604, | 102 | 0xf41fccc8, |
| 103 | 0xf80080d0, | 103 | 0x00f8f31b, |
| 104 | /* 0x00c9: wait_donez */ | 104 | /* 0x00d0: wait_donez */ |
| 105 | 0xf094bd00, | 105 | 0x99f094bd, |
| 106 | 0x07f10099, | 106 | 0x0007f100, |
| 107 | 0x03f00f00, | 107 | 0x0203f00f, |
| 108 | 0x0009d002, | 108 | 0xbd0009d0, |
| 109 | 0x07f104bd, | 109 | 0x0007f104, |
| 110 | 0x03f00600, | 110 | 0x0203f006, |
| 111 | 0x000ad002, | 111 | 0xbd000ad0, |
| 112 | /* 0x00e6: wait_donez_ne */ | 112 | /* 0x00ed: wait_donez_ne */ |
| 113 | 0x87f104bd, | 113 | 0x0087f104, |
| 114 | 0x83f00000, | 114 | 0x0183f000, |
| 115 | 0x0088cf01, | 115 | 0xff0088cf, |
| 116 | 0xf4888aff, | 116 | 0x1bf4888a, |
| 117 | 0x94bdf31b, | 117 | 0xf094bdf3, |
| 118 | 0xf10099f0, | ||
| 119 | 0xf0170007, | ||
| 120 | 0x09d00203, | ||
| 121 | 0xf804bd00, | ||
| 122 | /* 0x0109: wait_doneo */ | ||
| 123 | 0xf094bd00, | ||
| 124 | 0x07f10099, | 118 | 0x07f10099, |
| 125 | 0x03f00f00, | 119 | 0x03f01700, |
| 126 | 0x0009d002, | 120 | 0x0009d002, |
| 127 | 0x87f104bd, | 121 | 0x00f804bd, |
| 128 | 0x84b60818, | 122 | /* 0x0110: wait_doneo */ |
| 129 | 0x008ad006, | ||
| 130 | /* 0x0124: wait_doneo_e */ | ||
| 131 | 0x040087f1, | ||
| 132 | 0xcf0684b6, | ||
| 133 | 0x8aff0088, | ||
| 134 | 0xf30bf488, | ||
| 135 | 0x99f094bd, | 123 | 0x99f094bd, |
| 136 | 0x0007f100, | 124 | 0x0007f100, |
| 137 | 0x0203f017, | 125 | 0x0203f00f, |
| 138 | 0xbd0009d0, | 126 | 0xbd0009d0, |
| 139 | /* 0x0147: mmctx_size */ | 127 | 0x0007f104, |
| 140 | 0xbd00f804, | 128 | 0x0203f006, |
| 141 | /* 0x0149: nv_mmctx_size_loop */ | 129 | 0xbd000ad0, |
| 142 | 0x00e89894, | 130 | /* 0x012d: wait_doneo_e */ |
| 143 | 0xb61a85b6, | 131 | 0x0087f104, |
| 144 | 0x84b60180, | 132 | 0x0183f000, |
| 145 | 0x0098bb02, | 133 | 0xff0088cf, |
| 146 | 0xb804e0b6, | 134 | 0x0bf4888a, |
| 147 | 0x1bf404ef, | 135 | 0xf094bdf3, |
| 148 | 0x029fb9eb, | 136 | 0x07f10099, |
| 149 | /* 0x0166: mmctx_xfer */ | 137 | 0x03f01700, |
| 150 | 0x94bd00f8, | 138 | 0x0009d002, |
| 151 | 0xf10199f0, | 139 | 0x00f804bd, |
| 152 | 0xf00f0007, | 140 | /* 0x0150: mmctx_size */ |
| 153 | 0x09d00203, | 141 | /* 0x0152: nv_mmctx_size_loop */ |
| 154 | 0xf104bd00, | 142 | 0xe89894bd, |
| 155 | 0xb6071087, | 143 | 0x1a85b600, |
| 156 | 0x94bd0684, | 144 | 0xb60180b6, |
| 157 | 0xf405bbfd, | 145 | 0x98bb0284, |
| 158 | 0x8bd0090b, | 146 | 0x04e0b600, |
| 159 | 0x0099f000, | 147 | 0xf404efb8, |
| 160 | /* 0x018c: mmctx_base_disabled */ | 148 | 0x9fb9eb1b, |
| 161 | 0xf405eefd, | 149 | /* 0x016f: mmctx_xfer */ |
| 162 | 0x8ed00c0b, | 150 | 0xbd00f802, |
| 163 | 0xc08fd080, | 151 | 0x0199f094, |
| 164 | /* 0x019b: mmctx_multi_disabled */ | 152 | 0x0f0007f1, |
| 165 | 0xb70199f0, | 153 | 0xd00203f0, |
| 166 | 0xc8010080, | 154 | 0x04bd0009, |
| 155 | 0xbbfd94bd, | ||
| 156 | 0x120bf405, | ||
| 157 | 0xc40007f1, | ||
| 158 | 0xd00103f0, | ||
| 159 | 0x04bd000b, | ||
| 160 | /* 0x0197: mmctx_base_disabled */ | ||
| 161 | 0xfd0099f0, | ||
| 162 | 0x0bf405ee, | ||
| 163 | 0x0007f11e, | ||
| 164 | 0x0103f0c6, | ||
| 165 | 0xbd000ed0, | ||
| 166 | 0x0007f104, | ||
| 167 | 0x0103f0c7, | ||
| 168 | 0xbd000fd0, | ||
| 169 | 0x0199f004, | ||
| 170 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 171 | 0xb600abc8, | ||
| 172 | 0xb9f010b4, | ||
| 173 | 0x01aec80c, | ||
| 174 | 0xfd11e4b6, | ||
| 175 | 0x07f105be, | ||
| 176 | 0x03f0c500, | ||
| 177 | 0x000bd001, | ||
| 178 | /* 0x01d6: mmctx_exec_loop */ | ||
| 179 | /* 0x01d6: mmctx_wait_free */ | ||
| 180 | 0xe7f104bd, | ||
| 181 | 0xe3f0c500, | ||
| 182 | 0x00eecf01, | ||
| 183 | 0xf41fe4f0, | ||
| 184 | 0xce98f30b, | ||
| 185 | 0x05e9fd00, | ||
| 186 | 0xc80007f1, | ||
| 187 | 0xd00103f0, | ||
| 188 | 0x04bd000e, | ||
| 189 | 0xb804c0b6, | ||
| 190 | 0x1bf404cd, | ||
| 191 | 0x02abc8d8, | ||
| 192 | /* 0x0207: mmctx_fini_wait */ | ||
| 193 | 0xf11f1bf4, | ||
| 194 | 0xf0c500b7, | ||
| 195 | 0xbbcf01b3, | ||
| 196 | 0x1fb4f000, | ||
| 197 | 0xf410b4b0, | ||
| 198 | 0xa7f0f01b, | ||
| 199 | 0xd021f402, | ||
| 200 | /* 0x0223: mmctx_stop */ | ||
| 201 | 0xc82b0ef4, | ||
| 167 | 0xb4b600ab, | 202 | 0xb4b600ab, |
| 168 | 0x0cb9f010, | 203 | 0x0cb9f010, |
| 169 | 0xb601aec8, | 204 | 0xf112b9f0, |
| 170 | 0xbefd11e4, | 205 | 0xf0c50007, |
| 171 | 0x008bd005, | 206 | 0x0bd00103, |
| 172 | /* 0x01b4: mmctx_exec_loop */ | 207 | /* 0x023b: mmctx_stop_wait */ |
| 173 | /* 0x01b4: mmctx_wait_free */ | 208 | 0xf104bd00, |
| 174 | 0xf0008ecf, | 209 | 0xf0c500b7, |
| 175 | 0x0bf41fe4, | 210 | 0xbbcf01b3, |
| 176 | 0x00ce98fa, | 211 | 0x12bbc800, |
| 177 | 0xd005e9fd, | 212 | /* 0x024b: mmctx_done */ |
| 178 | 0xc0b6c08e, | 213 | 0xbdf31bf4, |
| 179 | 0x04cdb804, | 214 | 0x0199f094, |
| 180 | 0xc8e81bf4, | 215 | 0x170007f1, |
| 181 | 0x1bf402ab, | 216 | 0xd00203f0, |
| 182 | /* 0x01d5: mmctx_fini_wait */ | 217 | 0x04bd0009, |
| 183 | 0x008bcf18, | 218 | /* 0x025e: strand_wait */ |
| 184 | 0xb01fb4f0, | 219 | 0xa0f900f8, |
| 185 | 0x1bf410b4, | 220 | 0xf402a7f0, |
| 186 | 0x02a7f0f7, | 221 | 0xa0fcd021, |
| 187 | 0xf4c921f4, | 222 | /* 0x026a: strand_pre */ |
| 188 | /* 0x01ea: mmctx_stop */ | 223 | 0x97f000f8, |
| 189 | 0xabc81b0e, | 224 | 0xfc07f10c, |
| 190 | 0x10b4b600, | 225 | 0x0203f04a, |
| 191 | 0xf00cb9f0, | 226 | 0xbd0009d0, |
| 192 | 0x8bd012b9, | 227 | 0x5e21f504, |
| 193 | /* 0x01f9: mmctx_stop_wait */ | 228 | /* 0x027f: strand_post */ |
| 194 | 0x008bcf00, | 229 | 0xf000f802, |
| 195 | 0xf412bbc8, | 230 | 0x07f10d97, |
| 196 | /* 0x0202: mmctx_done */ | 231 | 0x03f04afc, |
| 197 | 0x94bdfa1b, | ||
| 198 | 0xf10199f0, | ||
| 199 | 0xf0170007, | ||
| 200 | 0x09d00203, | ||
| 201 | 0xf804bd00, | ||
| 202 | /* 0x0215: strand_wait */ | ||
| 203 | 0xf0a0f900, | ||
| 204 | 0x21f402a7, | ||
| 205 | 0xf8a0fcc9, | ||
| 206 | /* 0x0221: strand_pre */ | ||
| 207 | 0xfc87f100, | ||
| 208 | 0x0283f04a, | ||
| 209 | 0xd00c97f0, | ||
| 210 | 0x21f50089, | ||
| 211 | 0x00f80215, | ||
| 212 | /* 0x0234: strand_post */ | ||
| 213 | 0x4afc87f1, | ||
| 214 | 0xf00283f0, | ||
| 215 | 0x89d00d97, | ||
| 216 | 0x1521f500, | ||
| 217 | /* 0x0247: strand_set */ | ||
| 218 | 0xf100f802, | ||
| 219 | 0xf04ffca7, | ||
| 220 | 0xaba202a3, | ||
| 221 | 0xc7f00500, | ||
| 222 | 0x00acd00f, | ||
| 223 | 0xd00bc7f0, | ||
| 224 | 0x21f500bc, | ||
| 225 | 0xaed00215, | ||
| 226 | 0x0ac7f000, | ||
| 227 | 0xf500bcd0, | ||
| 228 | 0xf8021521, | ||
| 229 | /* 0x0271: strand_ctx_init */ | ||
| 230 | 0xf094bd00, | ||
| 231 | 0x07f10399, | ||
| 232 | 0x03f00f00, | ||
| 233 | 0x0009d002, | 232 | 0x0009d002, |
| 234 | 0x21f504bd, | 233 | 0x21f504bd, |
| 235 | 0xe7f00221, | 234 | 0x00f8025e, |
| 236 | 0x4721f503, | 235 | /* 0x0294: strand_set */ |
| 237 | 0xfca7f102, | 236 | 0xf10fc7f0, |
| 238 | 0x02a3f046, | 237 | 0xf04ffc07, |
| 239 | 0x0400aba0, | 238 | 0x0cd00203, |
| 240 | 0xf040a0d0, | 239 | 0xf004bd00, |
| 241 | 0xbcd001c7, | 240 | 0x07f10bc7, |
| 242 | 0x1521f500, | 241 | 0x03f04afc, |
| 243 | 0x010c9202, | 242 | 0x000cd002, |
| 244 | 0xf000acd0, | 243 | 0x07f104bd, |
| 245 | 0xbcd002c7, | 244 | 0x03f04ffc, |
| 246 | 0x1521f500, | 245 | 0x000ed002, |
| 247 | 0x3421f502, | 246 | 0xc7f004bd, |
| 248 | 0x8087f102, | 247 | 0xfc07f10a, |
| 249 | 0x0684b608, | 248 | 0x0203f04a, |
| 250 | 0xb70089cf, | 249 | 0xbd000cd0, |
| 251 | 0x95220080, | 250 | 0x5e21f504, |
| 252 | /* 0x02ca: ctx_init_strand_loop */ | 251 | /* 0x02d3: strand_ctx_init */ |
| 252 | 0xbd00f802, | ||
| 253 | 0x0399f094, | ||
| 254 | 0x0f0007f1, | ||
| 255 | 0xd00203f0, | ||
| 256 | 0x04bd0009, | ||
| 257 | 0x026a21f5, | ||
| 258 | 0xf503e7f0, | ||
| 259 | 0xbd029421, | ||
| 260 | 0xfc07f1c4, | ||
| 261 | 0x0203f047, | ||
| 262 | 0xbd000cd0, | ||
| 263 | 0x01c7f004, | ||
| 264 | 0x4afc07f1, | ||
| 265 | 0xd00203f0, | ||
| 266 | 0x04bd000c, | ||
| 267 | 0x025e21f5, | ||
| 268 | 0xf1010c92, | ||
| 269 | 0xf046fc07, | ||
| 270 | 0x0cd00203, | ||
| 271 | 0xf004bd00, | ||
| 272 | 0x07f102c7, | ||
| 273 | 0x03f04afc, | ||
| 274 | 0x000cd002, | ||
| 275 | 0x21f504bd, | ||
| 276 | 0x21f5025e, | ||
| 277 | 0x87f1027f, | ||
| 278 | 0x83f04200, | ||
| 279 | 0x0097f102, | ||
| 280 | 0x0293f020, | ||
| 281 | 0x950099cf, | ||
| 282 | /* 0x034a: ctx_init_strand_loop */ | ||
| 253 | 0x8ed008fe, | 283 | 0x8ed008fe, |
| 254 | 0x408ed000, | 284 | 0x408ed000, |
| 255 | 0xb6808acf, | 285 | 0xb6808acf, |
| @@ -263,198 +293,230 @@ uint32_t nvd7_grgpc_code[] = { | |||
| 263 | 0x170007f1, | 293 | 0x170007f1, |
| 264 | 0xd00203f0, | 294 | 0xd00203f0, |
| 265 | 0x04bd0009, | 295 | 0x04bd0009, |
| 266 | /* 0x02fe: error */ | 296 | /* 0x037e: error */ |
| 267 | 0xe0f900f8, | 297 | 0xe0f900f8, |
| 268 | 0x9814e7f1, | 298 | 0xf102ffb9, |
| 269 | 0xf440e3f0, | 299 | 0xf09814e7, |
| 270 | 0xe0b78d21, | 300 | 0x21f440e3, |
| 271 | 0xf7f0041c, | 301 | 0x01f7f09d, |
| 272 | 0x8d21f401, | 302 | 0xf102ffb9, |
| 273 | 0x00f8e0fc, | 303 | 0xf09c1ce7, |
| 274 | /* 0x0318: init */ | 304 | 0x21f440e3, |
| 275 | 0x04fe04bd, | 305 | 0xf8e0fc9d, |
| 276 | 0x0017f100, | 306 | /* 0x03a1: init */ |
| 277 | 0x0227f012, | 307 | 0xfe04bd00, |
| 278 | 0xf10012d0, | 308 | 0x27f00004, |
| 279 | 0xfe047017, | 309 | 0x0007f102, |
| 280 | 0x17f10010, | 310 | 0x0003f012, |
| 281 | 0x10d00400, | 311 | 0xbd0002d0, |
| 282 | 0x0427f0c0, | 312 | 0x1f17f104, |
| 283 | 0xf40012d0, | 313 | 0x0010fe05, |
| 284 | 0x17f11031, | 314 | 0x070007f1, |
| 285 | 0x14b60608, | 315 | 0xd00003f0, |
| 286 | 0x0012cf06, | 316 | 0x04bd0000, |
| 317 | 0xf10427f0, | ||
| 318 | 0xf0040007, | ||
| 319 | 0x02d00003, | ||
| 320 | 0xf404bd00, | ||
| 321 | 0x27f11031, | ||
| 322 | 0x23f08200, | ||
| 323 | 0x0022cf01, | ||
| 287 | 0xf00137f0, | 324 | 0xf00137f0, |
| 288 | 0x32bb1f24, | 325 | 0x32bb1f24, |
| 289 | 0x0132b604, | 326 | 0x0132b604, |
| 290 | 0x80050280, | 327 | 0x80050280, |
| 291 | 0x10b70603, | 328 | 0x27f10603, |
| 292 | 0x12cf0400, | 329 | 0x23f08600, |
| 293 | 0x04028000, | 330 | 0x0022cf01, |
| 294 | 0x0c30e7f1, | 331 | 0xf1040280, |
| 295 | 0xbd50e3f0, | 332 | 0xf00c30e7, |
| 296 | 0xbd34bd24, | 333 | 0x24bd50e3, |
| 297 | /* 0x0371: init_unk_loop */ | 334 | 0x44bd34bd, |
| 298 | 0x6821f444, | 335 | /* 0x0410: init_unk_loop */ |
| 299 | 0xf400f6b0, | 336 | 0xb06821f4, |
| 300 | 0xf7f00f0b, | 337 | 0x0bf400f6, |
| 301 | 0x04f2bb01, | 338 | 0x01f7f00f, |
| 302 | 0xb6054ffd, | 339 | 0xfd04f2bb, |
| 303 | /* 0x0386: init_unk_next */ | 340 | 0x30b6054f, |
| 304 | 0x20b60130, | 341 | /* 0x0425: init_unk_next */ |
| 305 | 0x04e0b601, | 342 | 0x0120b601, |
| 306 | 0xf40126b0, | 343 | 0xb004e0b6, |
| 307 | /* 0x0392: init_unk_done */ | 344 | 0x1bf40126, |
| 308 | 0x0380e21b, | 345 | /* 0x0431: init_unk_done */ |
| 309 | 0x08048007, | 346 | 0x070380e2, |
| 310 | 0x010027f1, | 347 | 0xf1080480, |
| 311 | 0xcf0223f0, | 348 | 0xf0010027, |
| 312 | 0x34bd0022, | 349 | 0x22cf0223, |
| 313 | 0x070047f1, | 350 | 0x9534bd00, |
| 314 | 0x950644b6, | 351 | 0x07f10825, |
| 315 | 0x45d00825, | 352 | 0x03f0c000, |
| 316 | 0x4045d000, | 353 | 0x0005d001, |
| 317 | 0x98000e98, | 354 | 0x07f104bd, |
| 318 | 0x21f5010f, | 355 | 0x03f0c100, |
| 319 | 0x2fbb0147, | 356 | 0x0005d001, |
| 320 | 0x003fbb00, | 357 | 0x0e9804bd, |
| 321 | 0x98010e98, | 358 | 0x010f9800, |
| 322 | 0x21f5020f, | 359 | 0x015021f5, |
| 323 | 0x0e980147, | 360 | 0xbb002fbb, |
| 324 | 0x00effd05, | 361 | 0x0e98003f, |
| 325 | 0xbb002ebb, | 362 | 0x020f9801, |
| 326 | 0x0e98003e, | 363 | 0x015021f5, |
| 327 | 0x030f9802, | 364 | 0xfd050e98, |
| 328 | 0x014721f5, | ||
| 329 | 0xfd070e98, | ||
| 330 | 0x2ebb00ef, | 365 | 0x2ebb00ef, |
| 331 | 0x003ebb00, | 366 | 0x003ebb00, |
| 332 | 0x130040b7, | 367 | 0x98020e98, |
| 333 | 0xd00235b6, | 368 | 0x21f5030f, |
| 334 | 0x25b60043, | 369 | 0x0e980150, |
| 335 | 0x0635b608, | 370 | 0x00effd07, |
| 336 | 0xb60120b6, | 371 | 0xbb002ebb, |
| 337 | 0x24b60130, | 372 | 0x35b6003e, |
| 338 | 0x0834b608, | 373 | 0x0007f102, |
| 339 | 0xf5022fb9, | 374 | 0x0103f0d3, |
| 340 | 0xbb027121, | 375 | 0xbd0003d0, |
| 341 | 0x07f1003f, | 376 | 0x0825b604, |
| 342 | 0x03f00100, | 377 | 0xb60635b6, |
| 343 | 0x0003d002, | 378 | 0x30b60120, |
| 344 | 0x24bd04bd, | 379 | 0x0824b601, |
| 345 | 0xf11f29f0, | 380 | 0xb90834b6, |
| 346 | 0xf0080007, | 381 | 0x21f5022f, |
| 347 | 0x02d00203, | 382 | 0x3fbb02d3, |
| 348 | /* 0x0433: main */ | 383 | 0x0007f100, |
| 384 | 0x0203f001, | ||
| 385 | 0xbd0003d0, | ||
| 386 | 0xf024bd04, | ||
| 387 | 0x07f11f29, | ||
| 388 | 0x03f00800, | ||
| 389 | 0x0002d002, | ||
| 390 | /* 0x04e2: main */ | ||
| 391 | 0x31f404bd, | ||
| 392 | 0x0028f400, | ||
| 393 | 0xf424d7f0, | ||
| 394 | 0x01f43921, | ||
| 395 | 0x04e4b0f4, | ||
| 396 | 0xfe1e18f4, | ||
| 397 | 0x27f00181, | ||
| 398 | 0xfd20bd06, | ||
| 399 | 0xe4b60412, | ||
| 400 | 0x051efd01, | ||
| 401 | 0xf50018fe, | ||
| 402 | 0xf405d721, | ||
| 403 | /* 0x0512: main_not_ctx_xfer */ | ||
| 404 | 0xef94d30e, | ||
| 405 | 0x01f5f010, | ||
| 406 | 0x037e21f5, | ||
| 407 | /* 0x051f: ih */ | ||
| 408 | 0xf9c60ef4, | ||
| 409 | 0x0188fe80, | ||
| 410 | 0x90f980f9, | ||
| 411 | 0xb0f9a0f9, | ||
| 412 | 0xe0f9d0f9, | ||
| 413 | 0x04bdf0f9, | ||
| 414 | 0x0200a7f1, | ||
| 415 | 0xcf00a3f0, | ||
| 416 | 0xabc400aa, | ||
| 417 | 0x2c0bf404, | ||
| 418 | 0xf124d7f0, | ||
| 419 | 0xf01a00e7, | ||
| 420 | 0xeecf00e3, | ||
| 421 | 0x00f7f100, | ||
| 422 | 0x00f3f019, | ||
| 423 | 0xf400ffcf, | ||
| 424 | 0xe7f00421, | ||
| 425 | 0x0007f101, | ||
| 426 | 0x0003f01d, | ||
| 427 | 0xbd000ed0, | ||
| 428 | /* 0x056d: ih_no_fifo */ | ||
| 429 | 0x0007f104, | ||
| 430 | 0x0003f001, | ||
| 431 | 0xbd000ad0, | ||
| 432 | 0xfcf0fc04, | ||
| 433 | 0xfcd0fce0, | ||
| 434 | 0xfca0fcb0, | ||
| 435 | 0xfe80fc90, | ||
| 436 | 0x80fc0088, | ||
| 437 | 0xf80032f4, | ||
| 438 | /* 0x0591: hub_barrier_done */ | ||
| 439 | 0x01f7f001, | ||
| 440 | 0xbb040e98, | ||
| 441 | 0xffb904fe, | ||
| 442 | 0x18e7f102, | ||
| 443 | 0x40e3f094, | ||
| 444 | 0xf89d21f4, | ||
| 445 | /* 0x05a9: ctx_redswitch */ | ||
| 446 | 0x20f7f000, | ||
| 447 | 0x850007f1, | ||
| 448 | 0xd00103f0, | ||
| 449 | 0x04bd000f, | ||
| 450 | /* 0x05bb: ctx_redswitch_delay */ | ||
| 451 | 0xb608e7f0, | ||
| 452 | 0x1bf401e2, | ||
| 453 | 0x00f5f1fd, | ||
| 454 | 0x00f5f108, | ||
| 455 | 0x0007f102, | ||
| 456 | 0x0103f085, | ||
| 457 | 0xbd000fd0, | ||
| 458 | /* 0x05d7: ctx_xfer */ | ||
| 459 | 0xf100f804, | ||
| 460 | 0xf0810007, | ||
| 461 | 0x0fd00203, | ||
| 349 | 0xf404bd00, | 462 | 0xf404bd00, |
| 350 | 0x28f40031, | 463 | 0x21f50711, |
| 351 | 0x24d7f000, | 464 | /* 0x05ea: ctx_xfer_not_load */ |
| 352 | 0xf43921f4, | 465 | 0x21f505a9, |
| 353 | 0xe4b0f401, | 466 | 0x24bd026a, |
| 354 | 0x1e18f404, | 467 | 0x47fc07f1, |
| 355 | 0xf00181fe, | 468 | 0xd00203f0, |
| 356 | 0x20bd0627, | 469 | 0x04bd0002, |
| 357 | 0xb60412fd, | 470 | 0xb6012cf0, |
| 358 | 0x1efd01e4, | 471 | 0x07f10320, |
| 359 | 0x0018fe05, | 472 | 0x03f04afc, |
| 360 | 0x04f721f5, | 473 | 0x0002d002, |
| 361 | /* 0x0463: main_not_ctx_xfer */ | 474 | 0xacf004bd, |
| 362 | 0x94d30ef4, | 475 | 0x02a5f001, |
| 363 | 0xf5f010ef, | 476 | 0x0000b7f1, |
| 364 | 0xfe21f501, | ||
| 365 | 0xc60ef402, | ||
| 366 | /* 0x0470: ih */ | ||
| 367 | 0x88fe80f9, | ||
| 368 | 0xf980f901, | ||
| 369 | 0xf9a0f990, | ||
| 370 | 0xf9d0f9b0, | ||
| 371 | 0xbdf0f9e0, | ||
| 372 | 0x800acf04, | ||
| 373 | 0xf404abc4, | ||
| 374 | 0xb7f11d0b, | ||
| 375 | 0xd7f01900, | ||
| 376 | 0x40becf24, | ||
| 377 | 0xf400bfcf, | ||
| 378 | 0xb0b70421, | ||
| 379 | 0xe7f00400, | ||
| 380 | 0x00bed001, | ||
| 381 | /* 0x04a8: ih_no_fifo */ | ||
| 382 | 0xfc400ad0, | ||
| 383 | 0xfce0fcf0, | ||
| 384 | 0xfcb0fcd0, | ||
| 385 | 0xfc90fca0, | ||
| 386 | 0x0088fe80, | ||
| 387 | 0x32f480fc, | ||
| 388 | /* 0x04c3: hub_barrier_done */ | ||
| 389 | 0xf001f800, | ||
| 390 | 0x0e9801f7, | ||
| 391 | 0x04febb04, | ||
| 392 | 0x9418e7f1, | ||
| 393 | 0xf440e3f0, | ||
| 394 | 0x00f88d21, | ||
| 395 | /* 0x04d8: ctx_redswitch */ | ||
| 396 | 0x0614e7f1, | ||
| 397 | 0xf006e4b6, | ||
| 398 | 0xefd020f7, | ||
| 399 | 0x08f7f000, | ||
| 400 | /* 0x04e8: ctx_redswitch_delay */ | ||
| 401 | 0xf401f2b6, | ||
| 402 | 0xf7f1fd1b, | ||
| 403 | 0xefd00a20, | ||
| 404 | /* 0x04f7: ctx_xfer */ | ||
| 405 | 0xf100f800, | ||
| 406 | 0xb60a0417, | ||
| 407 | 0x1fd00614, | ||
| 408 | 0x0711f400, | ||
| 409 | 0x04d821f5, | ||
| 410 | /* 0x0508: ctx_xfer_not_load */ | ||
| 411 | 0x4afc17f1, | ||
| 412 | 0xf00213f0, | ||
| 413 | 0x12d00c27, | ||
| 414 | 0x1521f500, | ||
| 415 | 0xfc27f102, | ||
| 416 | 0x0223f047, | ||
| 417 | 0xf00020d0, | ||
| 418 | 0x20b6012c, | ||
| 419 | 0x0012d003, | ||
| 420 | 0xf001acf0, | ||
| 421 | 0xb7f002a5, | ||
| 422 | 0x50b3f000, | ||
| 423 | 0xb6040c98, | ||
| 424 | 0xbcbb0fc4, | ||
| 425 | 0x000c9800, | ||
| 426 | 0xf0010d98, | ||
| 427 | 0x21f500e7, | ||
| 428 | 0xacf00166, | ||
| 429 | 0x00b7f101, | ||
| 430 | 0x50b3f040, | ||
| 431 | 0xb6040c98, | ||
| 432 | 0xbcbb0fc4, | ||
| 433 | 0x010c9800, | ||
| 434 | 0x98020d98, | ||
| 435 | 0xe7f1060f, | ||
| 436 | 0x21f50800, | ||
| 437 | 0xacf00166, | ||
| 438 | 0x04a5f001, | ||
| 439 | 0x3000b7f1, | ||
| 440 | 0x9850b3f0, | 477 | 0x9850b3f0, |
| 441 | 0xc4b6040c, | 478 | 0xc4b6040c, |
| 442 | 0x00bcbb0f, | 479 | 0x00bcbb0f, |
| 443 | 0x98020c98, | 480 | 0x98000c98, |
| 444 | 0x0f98030d, | 481 | 0xe7f0010d, |
| 445 | 0x00e7f108, | 482 | 0x6f21f500, |
| 446 | 0x6621f502, | 483 | 0x01acf001, |
| 447 | 0x1521f501, | 484 | 0x4000b7f1, |
| 448 | 0x0601f402, | 485 | 0x9850b3f0, |
| 449 | /* 0x05a3: ctx_xfer_post */ | 486 | 0xc4b6040c, |
| 450 | 0xf11412f4, | 487 | 0x00bcbb0f, |
| 451 | 0xf04afc17, | 488 | 0x98010c98, |
| 452 | 0x27f00213, | 489 | 0x0f98020d, |
| 453 | 0x0012d00d, | 490 | 0x00e7f106, |
| 454 | 0x021521f5, | 491 | 0x6f21f508, |
| 455 | /* 0x05b4: ctx_xfer_done */ | 492 | 0x01acf001, |
| 456 | 0x04c321f5, | 493 | 0xf104a5f0, |
| 457 | 0x000000f8, | 494 | 0xf03000b7, |
| 495 | 0x0c9850b3, | ||
| 496 | 0x0fc4b604, | ||
| 497 | 0x9800bcbb, | ||
| 498 | 0x0d98020c, | ||
| 499 | 0x080f9803, | ||
| 500 | 0x0200e7f1, | ||
| 501 | 0x016f21f5, | ||
| 502 | 0x025e21f5, | ||
| 503 | 0xf40601f4, | ||
| 504 | /* 0x0686: ctx_xfer_post */ | ||
| 505 | 0x21f50712, | ||
| 506 | /* 0x068a: ctx_xfer_done */ | ||
| 507 | 0x21f5027f, | ||
| 508 | 0x00f80591, | ||
| 509 | 0x00000000, | ||
| 510 | 0x00000000, | ||
| 511 | 0x00000000, | ||
| 512 | 0x00000000, | ||
| 513 | 0x00000000, | ||
| 514 | 0x00000000, | ||
| 515 | 0x00000000, | ||
| 516 | 0x00000000, | ||
| 517 | 0x00000000, | ||
| 518 | 0x00000000, | ||
| 519 | 0x00000000, | ||
| 458 | 0x00000000, | 520 | 0x00000000, |
| 459 | 0x00000000, | 521 | 0x00000000, |
| 460 | 0x00000000, | 522 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h index 7ff5ef6b0804..b6da800ee9c2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h | |||
| @@ -41,14 +41,14 @@ uint32_t nve0_grgpc_data[] = { | |||
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | uint32_t nve0_grgpc_code[] = { | 43 | uint32_t nve0_grgpc_code[] = { |
| 44 | 0x03180ef5, | 44 | 0x03a10ef5, |
| 45 | /* 0x0004: queue_put */ | 45 | /* 0x0004: queue_put */ |
| 46 | 0x9800d898, | 46 | 0x9800d898, |
| 47 | 0x86f001d9, | 47 | 0x86f001d9, |
| 48 | 0x0489b808, | 48 | 0x0489b808, |
| 49 | 0xf00c1bf4, | 49 | 0xf00c1bf4, |
| 50 | 0x21f502f7, | 50 | 0x21f502f7, |
| 51 | 0x00f802fe, | 51 | 0x00f8037e, |
| 52 | /* 0x001c: queue_put_next */ | 52 | /* 0x001c: queue_put_next */ |
| 53 | 0xb60798c4, | 53 | 0xb60798c4, |
| 54 | 0x8dbb0384, | 54 | 0x8dbb0384, |
| @@ -72,184 +72,214 @@ uint32_t nve0_grgpc_code[] = { | |||
| 72 | /* 0x0066: queue_get_done */ | 72 | /* 0x0066: queue_get_done */ |
| 73 | 0x00f80132, | 73 | 0x00f80132, |
| 74 | /* 0x0068: nv_rd32 */ | 74 | /* 0x0068: nv_rd32 */ |
| 75 | 0x0728b7f1, | 75 | 0xf002ecb9, |
| 76 | 0xb906b4b6, | 76 | 0x07f11fc9, |
| 77 | 0xc9f002ec, | 77 | 0x03f0ca00, |
| 78 | 0x00bcd01f, | 78 | 0x000cd001, |
| 79 | /* 0x0078: nv_rd32_wait */ | 79 | /* 0x007a: nv_rd32_wait */ |
| 80 | 0xc800bccf, | 80 | 0xc7f104bd, |
| 81 | 0x1bf41fcc, | 81 | 0xc3f0ca00, |
| 82 | 0x06a7f0fa, | 82 | 0x00cccf01, |
| 83 | 0x010921f5, | 83 | 0xf41fccc8, |
| 84 | 0xf840bfcf, | 84 | 0xa7f0f31b, |
| 85 | /* 0x008d: nv_wr32 */ | 85 | 0x1021f506, |
| 86 | 0x28b7f100, | 86 | 0x00f7f101, |
| 87 | 0x06b4b607, | 87 | 0x01f3f0cb, |
| 88 | 0xb980bfd0, | 88 | 0xf800ffcf, |
| 89 | 0xc9f002ec, | 89 | /* 0x009d: nv_wr32 */ |
| 90 | 0x1ec9f01f, | 90 | 0x0007f100, |
| 91 | /* 0x00a3: nv_wr32_wait */ | 91 | 0x0103f0cc, |
| 92 | 0xcf00bcd0, | 92 | 0xbd000fd0, |
| 93 | 0xccc800bc, | 93 | 0x02ecb904, |
| 94 | 0xfa1bf41f, | 94 | 0xf01fc9f0, |
| 95 | /* 0x00ae: watchdog_reset */ | 95 | 0x07f11ec9, |
| 96 | 0x87f100f8, | 96 | 0x03f0ca00, |
| 97 | 0x84b60430, | 97 | 0x000cd001, |
| 98 | 0x1ff9f006, | 98 | /* 0x00be: nv_wr32_wait */ |
| 99 | 0xf8008fd0, | 99 | 0xc7f104bd, |
| 100 | /* 0x00bd: watchdog_clear */ | 100 | 0xc3f0ca00, |
| 101 | 0x3087f100, | 101 | 0x00cccf01, |
| 102 | 0x0684b604, | 102 | 0xf41fccc8, |
| 103 | 0xf80080d0, | 103 | 0x00f8f31b, |
| 104 | /* 0x00c9: wait_donez */ | 104 | /* 0x00d0: wait_donez */ |
| 105 | 0xf094bd00, | 105 | 0x99f094bd, |
| 106 | 0x07f10099, | 106 | 0x0007f100, |
| 107 | 0x03f00f00, | 107 | 0x0203f00f, |
| 108 | 0x0009d002, | 108 | 0xbd0009d0, |
| 109 | 0x07f104bd, | 109 | 0x0007f104, |
| 110 | 0x03f00600, | 110 | 0x0203f006, |
| 111 | 0x000ad002, | 111 | 0xbd000ad0, |
| 112 | /* 0x00e6: wait_donez_ne */ | 112 | /* 0x00ed: wait_donez_ne */ |
| 113 | 0x87f104bd, | 113 | 0x0087f104, |
| 114 | 0x83f00000, | 114 | 0x0183f000, |
| 115 | 0x0088cf01, | 115 | 0xff0088cf, |
| 116 | 0xf4888aff, | 116 | 0x1bf4888a, |
| 117 | 0x94bdf31b, | 117 | 0xf094bdf3, |
| 118 | 0xf10099f0, | ||
| 119 | 0xf0170007, | ||
| 120 | 0x09d00203, | ||
| 121 | 0xf804bd00, | ||
| 122 | /* 0x0109: wait_doneo */ | ||
| 123 | 0xf094bd00, | ||
| 124 | 0x07f10099, | 118 | 0x07f10099, |
| 125 | 0x03f00f00, | 119 | 0x03f01700, |
| 126 | 0x0009d002, | 120 | 0x0009d002, |
| 127 | 0x87f104bd, | 121 | 0x00f804bd, |
| 128 | 0x84b60818, | 122 | /* 0x0110: wait_doneo */ |
| 129 | 0x008ad006, | ||
| 130 | /* 0x0124: wait_doneo_e */ | ||
| 131 | 0x040087f1, | ||
| 132 | 0xcf0684b6, | ||
| 133 | 0x8aff0088, | ||
| 134 | 0xf30bf488, | ||
| 135 | 0x99f094bd, | 123 | 0x99f094bd, |
| 136 | 0x0007f100, | 124 | 0x0007f100, |
| 137 | 0x0203f017, | 125 | 0x0203f00f, |
| 138 | 0xbd0009d0, | 126 | 0xbd0009d0, |
| 139 | /* 0x0147: mmctx_size */ | 127 | 0x0007f104, |
| 140 | 0xbd00f804, | 128 | 0x0203f006, |
| 141 | /* 0x0149: nv_mmctx_size_loop */ | 129 | 0xbd000ad0, |
| 142 | 0x00e89894, | 130 | /* 0x012d: wait_doneo_e */ |
| 143 | 0xb61a85b6, | 131 | 0x0087f104, |
| 144 | 0x84b60180, | 132 | 0x0183f000, |
| 145 | 0x0098bb02, | 133 | 0xff0088cf, |
| 146 | 0xb804e0b6, | 134 | 0x0bf4888a, |
| 147 | 0x1bf404ef, | 135 | 0xf094bdf3, |
| 148 | 0x029fb9eb, | 136 | 0x07f10099, |
| 149 | /* 0x0166: mmctx_xfer */ | 137 | 0x03f01700, |
| 150 | 0x94bd00f8, | 138 | 0x0009d002, |
| 151 | 0xf10199f0, | 139 | 0x00f804bd, |
| 152 | 0xf00f0007, | 140 | /* 0x0150: mmctx_size */ |
| 153 | 0x09d00203, | 141 | /* 0x0152: nv_mmctx_size_loop */ |
| 154 | 0xf104bd00, | 142 | 0xe89894bd, |
| 155 | 0xb6071087, | 143 | 0x1a85b600, |
| 156 | 0x94bd0684, | 144 | 0xb60180b6, |
| 157 | 0xf405bbfd, | 145 | 0x98bb0284, |
| 158 | 0x8bd0090b, | 146 | 0x04e0b600, |
| 159 | 0x0099f000, | 147 | 0xf404efb8, |
| 160 | /* 0x018c: mmctx_base_disabled */ | 148 | 0x9fb9eb1b, |
| 161 | 0xf405eefd, | 149 | /* 0x016f: mmctx_xfer */ |
| 162 | 0x8ed00c0b, | 150 | 0xbd00f802, |
| 163 | 0xc08fd080, | 151 | 0x0199f094, |
| 164 | /* 0x019b: mmctx_multi_disabled */ | 152 | 0x0f0007f1, |
| 165 | 0xb70199f0, | 153 | 0xd00203f0, |
| 166 | 0xc8010080, | 154 | 0x04bd0009, |
| 155 | 0xbbfd94bd, | ||
| 156 | 0x120bf405, | ||
| 157 | 0xc40007f1, | ||
| 158 | 0xd00103f0, | ||
| 159 | 0x04bd000b, | ||
| 160 | /* 0x0197: mmctx_base_disabled */ | ||
| 161 | 0xfd0099f0, | ||
| 162 | 0x0bf405ee, | ||
| 163 | 0x0007f11e, | ||
| 164 | 0x0103f0c6, | ||
| 165 | 0xbd000ed0, | ||
| 166 | 0x0007f104, | ||
| 167 | 0x0103f0c7, | ||
| 168 | 0xbd000fd0, | ||
| 169 | 0x0199f004, | ||
| 170 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 171 | 0xb600abc8, | ||
| 172 | 0xb9f010b4, | ||
| 173 | 0x01aec80c, | ||
| 174 | 0xfd11e4b6, | ||
| 175 | 0x07f105be, | ||
| 176 | 0x03f0c500, | ||
| 177 | 0x000bd001, | ||
| 178 | /* 0x01d6: mmctx_exec_loop */ | ||
| 179 | /* 0x01d6: mmctx_wait_free */ | ||
| 180 | 0xe7f104bd, | ||
| 181 | 0xe3f0c500, | ||
| 182 | 0x00eecf01, | ||
| 183 | 0xf41fe4f0, | ||
| 184 | 0xce98f30b, | ||
| 185 | 0x05e9fd00, | ||
| 186 | 0xc80007f1, | ||
| 187 | 0xd00103f0, | ||
| 188 | 0x04bd000e, | ||
| 189 | 0xb804c0b6, | ||
| 190 | 0x1bf404cd, | ||
| 191 | 0x02abc8d8, | ||
| 192 | /* 0x0207: mmctx_fini_wait */ | ||
| 193 | 0xf11f1bf4, | ||
| 194 | 0xf0c500b7, | ||
| 195 | 0xbbcf01b3, | ||
| 196 | 0x1fb4f000, | ||
| 197 | 0xf410b4b0, | ||
| 198 | 0xa7f0f01b, | ||
| 199 | 0xd021f402, | ||
| 200 | /* 0x0223: mmctx_stop */ | ||
| 201 | 0xc82b0ef4, | ||
| 167 | 0xb4b600ab, | 202 | 0xb4b600ab, |
| 168 | 0x0cb9f010, | 203 | 0x0cb9f010, |
| 169 | 0xb601aec8, | 204 | 0xf112b9f0, |
| 170 | 0xbefd11e4, | 205 | 0xf0c50007, |
| 171 | 0x008bd005, | 206 | 0x0bd00103, |
| 172 | /* 0x01b4: mmctx_exec_loop */ | 207 | /* 0x023b: mmctx_stop_wait */ |
| 173 | /* 0x01b4: mmctx_wait_free */ | 208 | 0xf104bd00, |
| 174 | 0xf0008ecf, | 209 | 0xf0c500b7, |
| 175 | 0x0bf41fe4, | 210 | 0xbbcf01b3, |
| 176 | 0x00ce98fa, | 211 | 0x12bbc800, |
| 177 | 0xd005e9fd, | 212 | /* 0x024b: mmctx_done */ |
| 178 | 0xc0b6c08e, | 213 | 0xbdf31bf4, |
| 179 | 0x04cdb804, | 214 | 0x0199f094, |
| 180 | 0xc8e81bf4, | 215 | 0x170007f1, |
| 181 | 0x1bf402ab, | 216 | 0xd00203f0, |
| 182 | /* 0x01d5: mmctx_fini_wait */ | 217 | 0x04bd0009, |
| 183 | 0x008bcf18, | 218 | /* 0x025e: strand_wait */ |
| 184 | 0xb01fb4f0, | 219 | 0xa0f900f8, |
| 185 | 0x1bf410b4, | 220 | 0xf402a7f0, |
| 186 | 0x02a7f0f7, | 221 | 0xa0fcd021, |
| 187 | 0xf4c921f4, | 222 | /* 0x026a: strand_pre */ |
| 188 | /* 0x01ea: mmctx_stop */ | 223 | 0x97f000f8, |
| 189 | 0xabc81b0e, | 224 | 0xfc07f10c, |
| 190 | 0x10b4b600, | 225 | 0x0203f04a, |
| 191 | 0xf00cb9f0, | 226 | 0xbd0009d0, |
| 192 | 0x8bd012b9, | 227 | 0x5e21f504, |
| 193 | /* 0x01f9: mmctx_stop_wait */ | 228 | /* 0x027f: strand_post */ |
| 194 | 0x008bcf00, | 229 | 0xf000f802, |
| 195 | 0xf412bbc8, | 230 | 0x07f10d97, |
| 196 | /* 0x0202: mmctx_done */ | 231 | 0x03f04afc, |
| 197 | 0x94bdfa1b, | ||
| 198 | 0xf10199f0, | ||
| 199 | 0xf0170007, | ||
| 200 | 0x09d00203, | ||
| 201 | 0xf804bd00, | ||
| 202 | /* 0x0215: strand_wait */ | ||
| 203 | 0xf0a0f900, | ||
| 204 | 0x21f402a7, | ||
| 205 | 0xf8a0fcc9, | ||
| 206 | /* 0x0221: strand_pre */ | ||
| 207 | 0xfc87f100, | ||
| 208 | 0x0283f04a, | ||
| 209 | 0xd00c97f0, | ||
| 210 | 0x21f50089, | ||
| 211 | 0x00f80215, | ||
| 212 | /* 0x0234: strand_post */ | ||
| 213 | 0x4afc87f1, | ||
| 214 | 0xf00283f0, | ||
| 215 | 0x89d00d97, | ||
| 216 | 0x1521f500, | ||
| 217 | /* 0x0247: strand_set */ | ||
| 218 | 0xf100f802, | ||
| 219 | 0xf04ffca7, | ||
| 220 | 0xaba202a3, | ||
| 221 | 0xc7f00500, | ||
| 222 | 0x00acd00f, | ||
| 223 | 0xd00bc7f0, | ||
| 224 | 0x21f500bc, | ||
| 225 | 0xaed00215, | ||
| 226 | 0x0ac7f000, | ||
| 227 | 0xf500bcd0, | ||
| 228 | 0xf8021521, | ||
| 229 | /* 0x0271: strand_ctx_init */ | ||
| 230 | 0xf094bd00, | ||
| 231 | 0x07f10399, | ||
| 232 | 0x03f00f00, | ||
| 233 | 0x0009d002, | 232 | 0x0009d002, |
| 234 | 0x21f504bd, | 233 | 0x21f504bd, |
| 235 | 0xe7f00221, | 234 | 0x00f8025e, |
| 236 | 0x4721f503, | 235 | /* 0x0294: strand_set */ |
| 237 | 0xfca7f102, | 236 | 0xf10fc7f0, |
| 238 | 0x02a3f046, | 237 | 0xf04ffc07, |
| 239 | 0x0400aba0, | 238 | 0x0cd00203, |
| 240 | 0xf040a0d0, | 239 | 0xf004bd00, |
| 241 | 0xbcd001c7, | 240 | 0x07f10bc7, |
| 242 | 0x1521f500, | 241 | 0x03f04afc, |
| 243 | 0x010c9202, | 242 | 0x000cd002, |
| 244 | 0xf000acd0, | 243 | 0x07f104bd, |
| 245 | 0xbcd002c7, | 244 | 0x03f04ffc, |
| 246 | 0x1521f500, | 245 | 0x000ed002, |
| 247 | 0x3421f502, | 246 | 0xc7f004bd, |
| 248 | 0x8087f102, | 247 | 0xfc07f10a, |
| 249 | 0x0684b608, | 248 | 0x0203f04a, |
| 250 | 0xb70089cf, | 249 | 0xbd000cd0, |
| 251 | 0x95220080, | 250 | 0x5e21f504, |
| 252 | /* 0x02ca: ctx_init_strand_loop */ | 251 | /* 0x02d3: strand_ctx_init */ |
| 252 | 0xbd00f802, | ||
| 253 | 0x0399f094, | ||
| 254 | 0x0f0007f1, | ||
| 255 | 0xd00203f0, | ||
| 256 | 0x04bd0009, | ||
| 257 | 0x026a21f5, | ||
| 258 | 0xf503e7f0, | ||
| 259 | 0xbd029421, | ||
| 260 | 0xfc07f1c4, | ||
| 261 | 0x0203f047, | ||
| 262 | 0xbd000cd0, | ||
| 263 | 0x01c7f004, | ||
| 264 | 0x4afc07f1, | ||
| 265 | 0xd00203f0, | ||
| 266 | 0x04bd000c, | ||
| 267 | 0x025e21f5, | ||
| 268 | 0xf1010c92, | ||
| 269 | 0xf046fc07, | ||
| 270 | 0x0cd00203, | ||
| 271 | 0xf004bd00, | ||
| 272 | 0x07f102c7, | ||
| 273 | 0x03f04afc, | ||
| 274 | 0x000cd002, | ||
| 275 | 0x21f504bd, | ||
| 276 | 0x21f5025e, | ||
| 277 | 0x87f1027f, | ||
| 278 | 0x83f04200, | ||
| 279 | 0x0097f102, | ||
| 280 | 0x0293f020, | ||
| 281 | 0x950099cf, | ||
| 282 | /* 0x034a: ctx_init_strand_loop */ | ||
| 253 | 0x8ed008fe, | 283 | 0x8ed008fe, |
| 254 | 0x408ed000, | 284 | 0x408ed000, |
| 255 | 0xb6808acf, | 285 | 0xb6808acf, |
| @@ -263,198 +293,230 @@ uint32_t nve0_grgpc_code[] = { | |||
| 263 | 0x170007f1, | 293 | 0x170007f1, |
| 264 | 0xd00203f0, | 294 | 0xd00203f0, |
| 265 | 0x04bd0009, | 295 | 0x04bd0009, |
| 266 | /* 0x02fe: error */ | 296 | /* 0x037e: error */ |
| 267 | 0xe0f900f8, | 297 | 0xe0f900f8, |
| 268 | 0x9814e7f1, | 298 | 0xf102ffb9, |
| 269 | 0xf440e3f0, | 299 | 0xf09814e7, |
| 270 | 0xe0b78d21, | 300 | 0x21f440e3, |
| 271 | 0xf7f0041c, | 301 | 0x01f7f09d, |
| 272 | 0x8d21f401, | 302 | 0xf102ffb9, |
| 273 | 0x00f8e0fc, | 303 | 0xf09c1ce7, |
| 274 | /* 0x0318: init */ | 304 | 0x21f440e3, |
| 275 | 0x04fe04bd, | 305 | 0xf8e0fc9d, |
| 276 | 0x0017f100, | 306 | /* 0x03a1: init */ |
| 277 | 0x0227f012, | 307 | 0xfe04bd00, |
| 278 | 0xf10012d0, | 308 | 0x27f00004, |
| 279 | 0xfe047017, | 309 | 0x0007f102, |
| 280 | 0x17f10010, | 310 | 0x0003f012, |
| 281 | 0x10d00400, | 311 | 0xbd0002d0, |
| 282 | 0x0427f0c0, | 312 | 0x1f17f104, |
| 283 | 0xf40012d0, | 313 | 0x0010fe05, |
| 284 | 0x17f11031, | 314 | 0x070007f1, |
| 285 | 0x14b60608, | 315 | 0xd00003f0, |
| 286 | 0x0012cf06, | 316 | 0x04bd0000, |
| 317 | 0xf10427f0, | ||
| 318 | 0xf0040007, | ||
| 319 | 0x02d00003, | ||
| 320 | 0xf404bd00, | ||
| 321 | 0x27f11031, | ||
| 322 | 0x23f08200, | ||
| 323 | 0x0022cf01, | ||
| 287 | 0xf00137f0, | 324 | 0xf00137f0, |
| 288 | 0x32bb1f24, | 325 | 0x32bb1f24, |
| 289 | 0x0132b604, | 326 | 0x0132b604, |
| 290 | 0x80050280, | 327 | 0x80050280, |
| 291 | 0x10b70603, | 328 | 0x27f10603, |
| 292 | 0x12cf0400, | 329 | 0x23f08600, |
| 293 | 0x04028000, | 330 | 0x0022cf01, |
| 294 | 0x0c30e7f1, | 331 | 0xf1040280, |
| 295 | 0xbd50e3f0, | 332 | 0xf00c30e7, |
| 296 | 0xbd34bd24, | 333 | 0x24bd50e3, |
| 297 | /* 0x0371: init_unk_loop */ | 334 | 0x44bd34bd, |
| 298 | 0x6821f444, | 335 | /* 0x0410: init_unk_loop */ |
| 299 | 0xf400f6b0, | 336 | 0xb06821f4, |
| 300 | 0xf7f00f0b, | 337 | 0x0bf400f6, |
| 301 | 0x04f2bb01, | 338 | 0x01f7f00f, |
| 302 | 0xb6054ffd, | 339 | 0xfd04f2bb, |
| 303 | /* 0x0386: init_unk_next */ | 340 | 0x30b6054f, |
| 304 | 0x20b60130, | 341 | /* 0x0425: init_unk_next */ |
| 305 | 0x04e0b601, | 342 | 0x0120b601, |
| 306 | 0xf40126b0, | 343 | 0xb004e0b6, |
| 307 | /* 0x0392: init_unk_done */ | 344 | 0x1bf40126, |
| 308 | 0x0380e21b, | 345 | /* 0x0431: init_unk_done */ |
| 309 | 0x08048007, | 346 | 0x070380e2, |
| 310 | 0x010027f1, | 347 | 0xf1080480, |
| 311 | 0xcf0223f0, | 348 | 0xf0010027, |
| 312 | 0x34bd0022, | 349 | 0x22cf0223, |
| 313 | 0x070047f1, | 350 | 0x9534bd00, |
| 314 | 0x950644b6, | 351 | 0x07f10825, |
| 315 | 0x45d00825, | 352 | 0x03f0c000, |
| 316 | 0x4045d000, | 353 | 0x0005d001, |
| 317 | 0x98000e98, | 354 | 0x07f104bd, |
| 318 | 0x21f5010f, | 355 | 0x03f0c100, |
| 319 | 0x2fbb0147, | 356 | 0x0005d001, |
| 320 | 0x003fbb00, | 357 | 0x0e9804bd, |
| 321 | 0x98010e98, | 358 | 0x010f9800, |
| 322 | 0x21f5020f, | 359 | 0x015021f5, |
| 323 | 0x0e980147, | 360 | 0xbb002fbb, |
| 324 | 0x00effd05, | 361 | 0x0e98003f, |
| 325 | 0xbb002ebb, | 362 | 0x020f9801, |
| 326 | 0x0e98003e, | 363 | 0x015021f5, |
| 327 | 0x030f9802, | 364 | 0xfd050e98, |
| 328 | 0x014721f5, | ||
| 329 | 0xfd070e98, | ||
| 330 | 0x2ebb00ef, | 365 | 0x2ebb00ef, |
| 331 | 0x003ebb00, | 366 | 0x003ebb00, |
| 332 | 0x130040b7, | 367 | 0x98020e98, |
| 333 | 0xd00235b6, | 368 | 0x21f5030f, |
| 334 | 0x25b60043, | 369 | 0x0e980150, |
| 335 | 0x0635b608, | 370 | 0x00effd07, |
| 336 | 0xb60120b6, | 371 | 0xbb002ebb, |
| 337 | 0x24b60130, | 372 | 0x35b6003e, |
| 338 | 0x0834b608, | 373 | 0x0007f102, |
| 339 | 0xf5022fb9, | 374 | 0x0103f0d3, |
| 340 | 0xbb027121, | 375 | 0xbd0003d0, |
| 341 | 0x07f1003f, | 376 | 0x0825b604, |
| 342 | 0x03f00100, | 377 | 0xb60635b6, |
| 343 | 0x0003d002, | 378 | 0x30b60120, |
| 344 | 0x24bd04bd, | 379 | 0x0824b601, |
| 345 | 0xf11f29f0, | 380 | 0xb90834b6, |
| 346 | 0xf0080007, | 381 | 0x21f5022f, |
| 347 | 0x02d00203, | 382 | 0x3fbb02d3, |
| 348 | /* 0x0433: main */ | 383 | 0x0007f100, |
| 384 | 0x0203f001, | ||
| 385 | 0xbd0003d0, | ||
| 386 | 0xf024bd04, | ||
| 387 | 0x07f11f29, | ||
| 388 | 0x03f00800, | ||
| 389 | 0x0002d002, | ||
| 390 | /* 0x04e2: main */ | ||
| 391 | 0x31f404bd, | ||
| 392 | 0x0028f400, | ||
| 393 | 0xf424d7f0, | ||
| 394 | 0x01f43921, | ||
| 395 | 0x04e4b0f4, | ||
| 396 | 0xfe1e18f4, | ||
| 397 | 0x27f00181, | ||
| 398 | 0xfd20bd06, | ||
| 399 | 0xe4b60412, | ||
| 400 | 0x051efd01, | ||
| 401 | 0xf50018fe, | ||
| 402 | 0xf405d721, | ||
| 403 | /* 0x0512: main_not_ctx_xfer */ | ||
| 404 | 0xef94d30e, | ||
| 405 | 0x01f5f010, | ||
| 406 | 0x037e21f5, | ||
| 407 | /* 0x051f: ih */ | ||
| 408 | 0xf9c60ef4, | ||
| 409 | 0x0188fe80, | ||
| 410 | 0x90f980f9, | ||
| 411 | 0xb0f9a0f9, | ||
| 412 | 0xe0f9d0f9, | ||
| 413 | 0x04bdf0f9, | ||
| 414 | 0x0200a7f1, | ||
| 415 | 0xcf00a3f0, | ||
| 416 | 0xabc400aa, | ||
| 417 | 0x2c0bf404, | ||
| 418 | 0xf124d7f0, | ||
| 419 | 0xf01a00e7, | ||
| 420 | 0xeecf00e3, | ||
| 421 | 0x00f7f100, | ||
| 422 | 0x00f3f019, | ||
| 423 | 0xf400ffcf, | ||
| 424 | 0xe7f00421, | ||
| 425 | 0x0007f101, | ||
| 426 | 0x0003f01d, | ||
| 427 | 0xbd000ed0, | ||
| 428 | /* 0x056d: ih_no_fifo */ | ||
| 429 | 0x0007f104, | ||
| 430 | 0x0003f001, | ||
| 431 | 0xbd000ad0, | ||
| 432 | 0xfcf0fc04, | ||
| 433 | 0xfcd0fce0, | ||
| 434 | 0xfca0fcb0, | ||
| 435 | 0xfe80fc90, | ||
| 436 | 0x80fc0088, | ||
| 437 | 0xf80032f4, | ||
| 438 | /* 0x0591: hub_barrier_done */ | ||
| 439 | 0x01f7f001, | ||
| 440 | 0xbb040e98, | ||
| 441 | 0xffb904fe, | ||
| 442 | 0x18e7f102, | ||
| 443 | 0x40e3f094, | ||
| 444 | 0xf89d21f4, | ||
| 445 | /* 0x05a9: ctx_redswitch */ | ||
| 446 | 0x20f7f000, | ||
| 447 | 0x850007f1, | ||
| 448 | 0xd00103f0, | ||
| 449 | 0x04bd000f, | ||
| 450 | /* 0x05bb: ctx_redswitch_delay */ | ||
| 451 | 0xb608e7f0, | ||
| 452 | 0x1bf401e2, | ||
| 453 | 0x00f5f1fd, | ||
| 454 | 0x00f5f108, | ||
| 455 | 0x0007f102, | ||
| 456 | 0x0103f085, | ||
| 457 | 0xbd000fd0, | ||
| 458 | /* 0x05d7: ctx_xfer */ | ||
| 459 | 0xf100f804, | ||
| 460 | 0xf0810007, | ||
| 461 | 0x0fd00203, | ||
| 349 | 0xf404bd00, | 462 | 0xf404bd00, |
| 350 | 0x28f40031, | 463 | 0x21f50711, |
| 351 | 0x24d7f000, | 464 | /* 0x05ea: ctx_xfer_not_load */ |
| 352 | 0xf43921f4, | 465 | 0x21f505a9, |
| 353 | 0xe4b0f401, | 466 | 0x24bd026a, |
| 354 | 0x1e18f404, | 467 | 0x47fc07f1, |
| 355 | 0xf00181fe, | 468 | 0xd00203f0, |
| 356 | 0x20bd0627, | 469 | 0x04bd0002, |
| 357 | 0xb60412fd, | 470 | 0xb6012cf0, |
| 358 | 0x1efd01e4, | 471 | 0x07f10320, |
| 359 | 0x0018fe05, | 472 | 0x03f04afc, |
| 360 | 0x04f721f5, | 473 | 0x0002d002, |
| 361 | /* 0x0463: main_not_ctx_xfer */ | 474 | 0xacf004bd, |
| 362 | 0x94d30ef4, | 475 | 0x02a5f001, |
| 363 | 0xf5f010ef, | 476 | 0x0000b7f1, |
| 364 | 0xfe21f501, | ||
| 365 | 0xc60ef402, | ||
| 366 | /* 0x0470: ih */ | ||
| 367 | 0x88fe80f9, | ||
| 368 | 0xf980f901, | ||
| 369 | 0xf9a0f990, | ||
| 370 | 0xf9d0f9b0, | ||
| 371 | 0xbdf0f9e0, | ||
| 372 | 0x800acf04, | ||
| 373 | 0xf404abc4, | ||
| 374 | 0xb7f11d0b, | ||
| 375 | 0xd7f01900, | ||
| 376 | 0x40becf24, | ||
| 377 | 0xf400bfcf, | ||
| 378 | 0xb0b70421, | ||
| 379 | 0xe7f00400, | ||
| 380 | 0x00bed001, | ||
| 381 | /* 0x04a8: ih_no_fifo */ | ||
| 382 | 0xfc400ad0, | ||
| 383 | 0xfce0fcf0, | ||
| 384 | 0xfcb0fcd0, | ||
| 385 | 0xfc90fca0, | ||
| 386 | 0x0088fe80, | ||
| 387 | 0x32f480fc, | ||
| 388 | /* 0x04c3: hub_barrier_done */ | ||
| 389 | 0xf001f800, | ||
| 390 | 0x0e9801f7, | ||
| 391 | 0x04febb04, | ||
| 392 | 0x9418e7f1, | ||
| 393 | 0xf440e3f0, | ||
| 394 | 0x00f88d21, | ||
| 395 | /* 0x04d8: ctx_redswitch */ | ||
| 396 | 0x0614e7f1, | ||
| 397 | 0xf006e4b6, | ||
| 398 | 0xefd020f7, | ||
| 399 | 0x08f7f000, | ||
| 400 | /* 0x04e8: ctx_redswitch_delay */ | ||
| 401 | 0xf401f2b6, | ||
| 402 | 0xf7f1fd1b, | ||
| 403 | 0xefd00a20, | ||
| 404 | /* 0x04f7: ctx_xfer */ | ||
| 405 | 0xf100f800, | ||
| 406 | 0xb60a0417, | ||
| 407 | 0x1fd00614, | ||
| 408 | 0x0711f400, | ||
| 409 | 0x04d821f5, | ||
| 410 | /* 0x0508: ctx_xfer_not_load */ | ||
| 411 | 0x4afc17f1, | ||
| 412 | 0xf00213f0, | ||
| 413 | 0x12d00c27, | ||
| 414 | 0x1521f500, | ||
| 415 | 0xfc27f102, | ||
| 416 | 0x0223f047, | ||
| 417 | 0xf00020d0, | ||
| 418 | 0x20b6012c, | ||
| 419 | 0x0012d003, | ||
| 420 | 0xf001acf0, | ||
| 421 | 0xb7f002a5, | ||
| 422 | 0x50b3f000, | ||
| 423 | 0xb6040c98, | ||
| 424 | 0xbcbb0fc4, | ||
| 425 | 0x000c9800, | ||
| 426 | 0xf0010d98, | ||
| 427 | 0x21f500e7, | ||
| 428 | 0xacf00166, | ||
| 429 | 0x00b7f101, | ||
| 430 | 0x50b3f040, | ||
| 431 | 0xb6040c98, | ||
| 432 | 0xbcbb0fc4, | ||
| 433 | 0x010c9800, | ||
| 434 | 0x98020d98, | ||
| 435 | 0xe7f1060f, | ||
| 436 | 0x21f50800, | ||
| 437 | 0xacf00166, | ||
| 438 | 0x04a5f001, | ||
| 439 | 0x3000b7f1, | ||
| 440 | 0x9850b3f0, | 477 | 0x9850b3f0, |
| 441 | 0xc4b6040c, | 478 | 0xc4b6040c, |
| 442 | 0x00bcbb0f, | 479 | 0x00bcbb0f, |
| 443 | 0x98020c98, | 480 | 0x98000c98, |
| 444 | 0x0f98030d, | 481 | 0xe7f0010d, |
| 445 | 0x00e7f108, | 482 | 0x6f21f500, |
| 446 | 0x6621f502, | 483 | 0x01acf001, |
| 447 | 0x1521f501, | 484 | 0x4000b7f1, |
| 448 | 0x0601f402, | 485 | 0x9850b3f0, |
| 449 | /* 0x05a3: ctx_xfer_post */ | 486 | 0xc4b6040c, |
| 450 | 0xf11412f4, | 487 | 0x00bcbb0f, |
| 451 | 0xf04afc17, | 488 | 0x98010c98, |
| 452 | 0x27f00213, | 489 | 0x0f98020d, |
| 453 | 0x0012d00d, | 490 | 0x00e7f106, |
| 454 | 0x021521f5, | 491 | 0x6f21f508, |
| 455 | /* 0x05b4: ctx_xfer_done */ | 492 | 0x01acf001, |
| 456 | 0x04c321f5, | 493 | 0xf104a5f0, |
| 457 | 0x000000f8, | 494 | 0xf03000b7, |
| 495 | 0x0c9850b3, | ||
| 496 | 0x0fc4b604, | ||
| 497 | 0x9800bcbb, | ||
| 498 | 0x0d98020c, | ||
| 499 | 0x080f9803, | ||
| 500 | 0x0200e7f1, | ||
| 501 | 0x016f21f5, | ||
| 502 | 0x025e21f5, | ||
| 503 | 0xf40601f4, | ||
| 504 | /* 0x0686: ctx_xfer_post */ | ||
| 505 | 0x21f50712, | ||
| 506 | /* 0x068a: ctx_xfer_done */ | ||
| 507 | 0x21f5027f, | ||
| 508 | 0x00f80591, | ||
| 509 | 0x00000000, | ||
| 510 | 0x00000000, | ||
| 511 | 0x00000000, | ||
| 512 | 0x00000000, | ||
| 513 | 0x00000000, | ||
| 514 | 0x00000000, | ||
| 515 | 0x00000000, | ||
| 516 | 0x00000000, | ||
| 517 | 0x00000000, | ||
| 518 | 0x00000000, | ||
| 519 | 0x00000000, | ||
| 458 | 0x00000000, | 520 | 0x00000000, |
| 459 | 0x00000000, | 521 | 0x00000000, |
| 460 | 0x00000000, | 522 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h index f870507be880..6316ebaf5d9a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h | |||
| @@ -41,14 +41,14 @@ uint32_t nvf0_grgpc_data[] = { | |||
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | uint32_t nvf0_grgpc_code[] = { | 43 | uint32_t nvf0_grgpc_code[] = { |
| 44 | 0x03180ef5, | 44 | 0x03a10ef5, |
| 45 | /* 0x0004: queue_put */ | 45 | /* 0x0004: queue_put */ |
| 46 | 0x9800d898, | 46 | 0x9800d898, |
| 47 | 0x86f001d9, | 47 | 0x86f001d9, |
| 48 | 0x0489b808, | 48 | 0x0489b808, |
| 49 | 0xf00c1bf4, | 49 | 0xf00c1bf4, |
| 50 | 0x21f502f7, | 50 | 0x21f502f7, |
| 51 | 0x00f802fe, | 51 | 0x00f8037e, |
| 52 | /* 0x001c: queue_put_next */ | 52 | /* 0x001c: queue_put_next */ |
| 53 | 0xb60798c4, | 53 | 0xb60798c4, |
| 54 | 0x8dbb0384, | 54 | 0x8dbb0384, |
| @@ -72,184 +72,214 @@ uint32_t nvf0_grgpc_code[] = { | |||
| 72 | /* 0x0066: queue_get_done */ | 72 | /* 0x0066: queue_get_done */ |
| 73 | 0x00f80132, | 73 | 0x00f80132, |
| 74 | /* 0x0068: nv_rd32 */ | 74 | /* 0x0068: nv_rd32 */ |
| 75 | 0x0728b7f1, | 75 | 0xf002ecb9, |
| 76 | 0xb906b4b6, | 76 | 0x07f11fc9, |
| 77 | 0xc9f002ec, | 77 | 0x03f0ca00, |
| 78 | 0x00bcd01f, | 78 | 0x000cd001, |
| 79 | /* 0x0078: nv_rd32_wait */ | 79 | /* 0x007a: nv_rd32_wait */ |
| 80 | 0xc800bccf, | 80 | 0xc7f104bd, |
| 81 | 0x1bf41fcc, | 81 | 0xc3f0ca00, |
| 82 | 0x06a7f0fa, | 82 | 0x00cccf01, |
| 83 | 0x010921f5, | 83 | 0xf41fccc8, |
| 84 | 0xf840bfcf, | 84 | 0xa7f0f31b, |
| 85 | /* 0x008d: nv_wr32 */ | 85 | 0x1021f506, |
| 86 | 0x28b7f100, | 86 | 0x00f7f101, |
| 87 | 0x06b4b607, | 87 | 0x01f3f0cb, |
| 88 | 0xb980bfd0, | 88 | 0xf800ffcf, |
| 89 | 0xc9f002ec, | 89 | /* 0x009d: nv_wr32 */ |
| 90 | 0x1ec9f01f, | 90 | 0x0007f100, |
| 91 | /* 0x00a3: nv_wr32_wait */ | 91 | 0x0103f0cc, |
| 92 | 0xcf00bcd0, | 92 | 0xbd000fd0, |
| 93 | 0xccc800bc, | 93 | 0x02ecb904, |
| 94 | 0xfa1bf41f, | 94 | 0xf01fc9f0, |
| 95 | /* 0x00ae: watchdog_reset */ | 95 | 0x07f11ec9, |
| 96 | 0x87f100f8, | 96 | 0x03f0ca00, |
| 97 | 0x84b60430, | 97 | 0x000cd001, |
| 98 | 0x1ff9f006, | 98 | /* 0x00be: nv_wr32_wait */ |
| 99 | 0xf8008fd0, | 99 | 0xc7f104bd, |
| 100 | /* 0x00bd: watchdog_clear */ | 100 | 0xc3f0ca00, |
| 101 | 0x3087f100, | 101 | 0x00cccf01, |
| 102 | 0x0684b604, | 102 | 0xf41fccc8, |
| 103 | 0xf80080d0, | 103 | 0x00f8f31b, |
| 104 | /* 0x00c9: wait_donez */ | 104 | /* 0x00d0: wait_donez */ |
| 105 | 0xf094bd00, | 105 | 0x99f094bd, |
| 106 | 0x07f10099, | 106 | 0x0007f100, |
| 107 | 0x03f03700, | 107 | 0x0203f037, |
| 108 | 0x0009d002, | 108 | 0xbd0009d0, |
| 109 | 0x07f104bd, | 109 | 0x0007f104, |
| 110 | 0x03f00600, | 110 | 0x0203f006, |
| 111 | 0x000ad002, | 111 | 0xbd000ad0, |
| 112 | /* 0x00e6: wait_donez_ne */ | 112 | /* 0x00ed: wait_donez_ne */ |
| 113 | 0x87f104bd, | 113 | 0x0087f104, |
| 114 | 0x83f00000, | 114 | 0x0183f000, |
| 115 | 0x0088cf01, | 115 | 0xff0088cf, |
| 116 | 0xf4888aff, | 116 | 0x1bf4888a, |
| 117 | 0x94bdf31b, | 117 | 0xf094bdf3, |
| 118 | 0xf10099f0, | ||
| 119 | 0xf0170007, | ||
| 120 | 0x09d00203, | ||
| 121 | 0xf804bd00, | ||
| 122 | /* 0x0109: wait_doneo */ | ||
| 123 | 0xf094bd00, | ||
| 124 | 0x07f10099, | 118 | 0x07f10099, |
| 125 | 0x03f03700, | 119 | 0x03f01700, |
| 126 | 0x0009d002, | 120 | 0x0009d002, |
| 127 | 0x87f104bd, | 121 | 0x00f804bd, |
| 128 | 0x84b60818, | 122 | /* 0x0110: wait_doneo */ |
| 129 | 0x008ad006, | ||
| 130 | /* 0x0124: wait_doneo_e */ | ||
| 131 | 0x040087f1, | ||
| 132 | 0xcf0684b6, | ||
| 133 | 0x8aff0088, | ||
| 134 | 0xf30bf488, | ||
| 135 | 0x99f094bd, | 123 | 0x99f094bd, |
| 136 | 0x0007f100, | 124 | 0x0007f100, |
| 137 | 0x0203f017, | 125 | 0x0203f037, |
| 138 | 0xbd0009d0, | 126 | 0xbd0009d0, |
| 139 | /* 0x0147: mmctx_size */ | 127 | 0x0007f104, |
| 140 | 0xbd00f804, | 128 | 0x0203f006, |
| 141 | /* 0x0149: nv_mmctx_size_loop */ | 129 | 0xbd000ad0, |
| 142 | 0x00e89894, | 130 | /* 0x012d: wait_doneo_e */ |
| 143 | 0xb61a85b6, | 131 | 0x0087f104, |
| 144 | 0x84b60180, | 132 | 0x0183f000, |
| 145 | 0x0098bb02, | 133 | 0xff0088cf, |
| 146 | 0xb804e0b6, | 134 | 0x0bf4888a, |
| 147 | 0x1bf404ef, | 135 | 0xf094bdf3, |
| 148 | 0x029fb9eb, | 136 | 0x07f10099, |
| 149 | /* 0x0166: mmctx_xfer */ | 137 | 0x03f01700, |
| 150 | 0x94bd00f8, | 138 | 0x0009d002, |
| 151 | 0xf10199f0, | 139 | 0x00f804bd, |
| 152 | 0xf0370007, | 140 | /* 0x0150: mmctx_size */ |
| 153 | 0x09d00203, | 141 | /* 0x0152: nv_mmctx_size_loop */ |
| 154 | 0xf104bd00, | 142 | 0xe89894bd, |
| 155 | 0xb6071087, | 143 | 0x1a85b600, |
| 156 | 0x94bd0684, | 144 | 0xb60180b6, |
| 157 | 0xf405bbfd, | 145 | 0x98bb0284, |
| 158 | 0x8bd0090b, | 146 | 0x04e0b600, |
| 159 | 0x0099f000, | 147 | 0xf404efb8, |
| 160 | /* 0x018c: mmctx_base_disabled */ | 148 | 0x9fb9eb1b, |
| 161 | 0xf405eefd, | 149 | /* 0x016f: mmctx_xfer */ |
| 162 | 0x8ed00c0b, | 150 | 0xbd00f802, |
| 163 | 0xc08fd080, | 151 | 0x0199f094, |
| 164 | /* 0x019b: mmctx_multi_disabled */ | 152 | 0x370007f1, |
| 165 | 0xb70199f0, | 153 | 0xd00203f0, |
| 166 | 0xc8010080, | 154 | 0x04bd0009, |
| 155 | 0xbbfd94bd, | ||
| 156 | 0x120bf405, | ||
| 157 | 0xc40007f1, | ||
| 158 | 0xd00103f0, | ||
| 159 | 0x04bd000b, | ||
| 160 | /* 0x0197: mmctx_base_disabled */ | ||
| 161 | 0xfd0099f0, | ||
| 162 | 0x0bf405ee, | ||
| 163 | 0x0007f11e, | ||
| 164 | 0x0103f0c6, | ||
| 165 | 0xbd000ed0, | ||
| 166 | 0x0007f104, | ||
| 167 | 0x0103f0c7, | ||
| 168 | 0xbd000fd0, | ||
| 169 | 0x0199f004, | ||
| 170 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 171 | 0xb600abc8, | ||
| 172 | 0xb9f010b4, | ||
| 173 | 0x01aec80c, | ||
| 174 | 0xfd11e4b6, | ||
| 175 | 0x07f105be, | ||
| 176 | 0x03f0c500, | ||
| 177 | 0x000bd001, | ||
| 178 | /* 0x01d6: mmctx_exec_loop */ | ||
| 179 | /* 0x01d6: mmctx_wait_free */ | ||
| 180 | 0xe7f104bd, | ||
| 181 | 0xe3f0c500, | ||
| 182 | 0x00eecf01, | ||
| 183 | 0xf41fe4f0, | ||
| 184 | 0xce98f30b, | ||
| 185 | 0x05e9fd00, | ||
| 186 | 0xc80007f1, | ||
| 187 | 0xd00103f0, | ||
| 188 | 0x04bd000e, | ||
| 189 | 0xb804c0b6, | ||
| 190 | 0x1bf404cd, | ||
| 191 | 0x02abc8d8, | ||
| 192 | /* 0x0207: mmctx_fini_wait */ | ||
| 193 | 0xf11f1bf4, | ||
| 194 | 0xf0c500b7, | ||
| 195 | 0xbbcf01b3, | ||
| 196 | 0x1fb4f000, | ||
| 197 | 0xf410b4b0, | ||
| 198 | 0xa7f0f01b, | ||
| 199 | 0xd021f402, | ||
| 200 | /* 0x0223: mmctx_stop */ | ||
| 201 | 0xc82b0ef4, | ||
| 167 | 0xb4b600ab, | 202 | 0xb4b600ab, |
| 168 | 0x0cb9f010, | 203 | 0x0cb9f010, |
| 169 | 0xb601aec8, | 204 | 0xf112b9f0, |
| 170 | 0xbefd11e4, | 205 | 0xf0c50007, |
| 171 | 0x008bd005, | 206 | 0x0bd00103, |
| 172 | /* 0x01b4: mmctx_exec_loop */ | 207 | /* 0x023b: mmctx_stop_wait */ |
| 173 | /* 0x01b4: mmctx_wait_free */ | 208 | 0xf104bd00, |
| 174 | 0xf0008ecf, | 209 | 0xf0c500b7, |
| 175 | 0x0bf41fe4, | 210 | 0xbbcf01b3, |
| 176 | 0x00ce98fa, | 211 | 0x12bbc800, |
| 177 | 0xd005e9fd, | 212 | /* 0x024b: mmctx_done */ |
| 178 | 0xc0b6c08e, | 213 | 0xbdf31bf4, |
| 179 | 0x04cdb804, | 214 | 0x0199f094, |
| 180 | 0xc8e81bf4, | 215 | 0x170007f1, |
| 181 | 0x1bf402ab, | 216 | 0xd00203f0, |
| 182 | /* 0x01d5: mmctx_fini_wait */ | 217 | 0x04bd0009, |
| 183 | 0x008bcf18, | 218 | /* 0x025e: strand_wait */ |
| 184 | 0xb01fb4f0, | 219 | 0xa0f900f8, |
| 185 | 0x1bf410b4, | 220 | 0xf402a7f0, |
| 186 | 0x02a7f0f7, | 221 | 0xa0fcd021, |
| 187 | 0xf4c921f4, | 222 | /* 0x026a: strand_pre */ |
| 188 | /* 0x01ea: mmctx_stop */ | 223 | 0x97f000f8, |
| 189 | 0xabc81b0e, | 224 | 0xfc07f10c, |
| 190 | 0x10b4b600, | 225 | 0x0203f04a, |
| 191 | 0xf00cb9f0, | 226 | 0xbd0009d0, |
| 192 | 0x8bd012b9, | 227 | 0x5e21f504, |
| 193 | /* 0x01f9: mmctx_stop_wait */ | 228 | /* 0x027f: strand_post */ |
| 194 | 0x008bcf00, | 229 | 0xf000f802, |
| 195 | 0xf412bbc8, | 230 | 0x07f10d97, |
| 196 | /* 0x0202: mmctx_done */ | 231 | 0x03f04afc, |
| 197 | 0x94bdfa1b, | ||
| 198 | 0xf10199f0, | ||
| 199 | 0xf0170007, | ||
| 200 | 0x09d00203, | ||
| 201 | 0xf804bd00, | ||
| 202 | /* 0x0215: strand_wait */ | ||
| 203 | 0xf0a0f900, | ||
| 204 | 0x21f402a7, | ||
| 205 | 0xf8a0fcc9, | ||
| 206 | /* 0x0221: strand_pre */ | ||
| 207 | 0xfc87f100, | ||
| 208 | 0x0283f04a, | ||
| 209 | 0xd00c97f0, | ||
| 210 | 0x21f50089, | ||
| 211 | 0x00f80215, | ||
| 212 | /* 0x0234: strand_post */ | ||
| 213 | 0x4afc87f1, | ||
| 214 | 0xf00283f0, | ||
| 215 | 0x89d00d97, | ||
| 216 | 0x1521f500, | ||
| 217 | /* 0x0247: strand_set */ | ||
| 218 | 0xf100f802, | ||
| 219 | 0xf04ffca7, | ||
| 220 | 0xaba202a3, | ||
| 221 | 0xc7f00500, | ||
| 222 | 0x00acd00f, | ||
| 223 | 0xd00bc7f0, | ||
| 224 | 0x21f500bc, | ||
| 225 | 0xaed00215, | ||
| 226 | 0x0ac7f000, | ||
| 227 | 0xf500bcd0, | ||
| 228 | 0xf8021521, | ||
| 229 | /* 0x0271: strand_ctx_init */ | ||
| 230 | 0xf094bd00, | ||
| 231 | 0x07f10399, | ||
| 232 | 0x03f03700, | ||
| 233 | 0x0009d002, | 232 | 0x0009d002, |
| 234 | 0x21f504bd, | 233 | 0x21f504bd, |
| 235 | 0xe7f00221, | 234 | 0x00f8025e, |
| 236 | 0x4721f503, | 235 | /* 0x0294: strand_set */ |
| 237 | 0xfca7f102, | 236 | 0xf10fc7f0, |
| 238 | 0x02a3f046, | 237 | 0xf04ffc07, |
| 239 | 0x0400aba0, | 238 | 0x0cd00203, |
| 240 | 0xf040a0d0, | 239 | 0xf004bd00, |
| 241 | 0xbcd001c7, | 240 | 0x07f10bc7, |
| 242 | 0x1521f500, | 241 | 0x03f04afc, |
| 243 | 0x010c9202, | 242 | 0x000cd002, |
| 244 | 0xf000acd0, | 243 | 0x07f104bd, |
| 245 | 0xbcd002c7, | 244 | 0x03f04ffc, |
| 246 | 0x1521f500, | 245 | 0x000ed002, |
| 247 | 0x3421f502, | 246 | 0xc7f004bd, |
| 248 | 0x8087f102, | 247 | 0xfc07f10a, |
| 249 | 0x0684b608, | 248 | 0x0203f04a, |
| 250 | 0xb70089cf, | 249 | 0xbd000cd0, |
| 251 | 0x95220080, | 250 | 0x5e21f504, |
| 252 | /* 0x02ca: ctx_init_strand_loop */ | 251 | /* 0x02d3: strand_ctx_init */ |
| 252 | 0xbd00f802, | ||
| 253 | 0x0399f094, | ||
| 254 | 0x370007f1, | ||
| 255 | 0xd00203f0, | ||
| 256 | 0x04bd0009, | ||
| 257 | 0x026a21f5, | ||
| 258 | 0xf503e7f0, | ||
| 259 | 0xbd029421, | ||
| 260 | 0xfc07f1c4, | ||
| 261 | 0x0203f047, | ||
| 262 | 0xbd000cd0, | ||
| 263 | 0x01c7f004, | ||
| 264 | 0x4afc07f1, | ||
| 265 | 0xd00203f0, | ||
| 266 | 0x04bd000c, | ||
| 267 | 0x025e21f5, | ||
| 268 | 0xf1010c92, | ||
| 269 | 0xf046fc07, | ||
| 270 | 0x0cd00203, | ||
| 271 | 0xf004bd00, | ||
| 272 | 0x07f102c7, | ||
| 273 | 0x03f04afc, | ||
| 274 | 0x000cd002, | ||
| 275 | 0x21f504bd, | ||
| 276 | 0x21f5025e, | ||
| 277 | 0x87f1027f, | ||
| 278 | 0x83f04200, | ||
| 279 | 0x0097f102, | ||
| 280 | 0x0293f020, | ||
| 281 | 0x950099cf, | ||
| 282 | /* 0x034a: ctx_init_strand_loop */ | ||
| 253 | 0x8ed008fe, | 283 | 0x8ed008fe, |
| 254 | 0x408ed000, | 284 | 0x408ed000, |
| 255 | 0xb6808acf, | 285 | 0xb6808acf, |
| @@ -263,198 +293,230 @@ uint32_t nvf0_grgpc_code[] = { | |||
| 263 | 0x170007f1, | 293 | 0x170007f1, |
| 264 | 0xd00203f0, | 294 | 0xd00203f0, |
| 265 | 0x04bd0009, | 295 | 0x04bd0009, |
| 266 | /* 0x02fe: error */ | 296 | /* 0x037e: error */ |
| 267 | 0xe0f900f8, | 297 | 0xe0f900f8, |
| 268 | 0x9814e7f1, | 298 | 0xf102ffb9, |
| 269 | 0xf440e3f0, | 299 | 0xf09814e7, |
| 270 | 0xe0b78d21, | 300 | 0x21f440e3, |
| 271 | 0xf7f0041c, | 301 | 0x01f7f09d, |
| 272 | 0x8d21f401, | 302 | 0xf102ffb9, |
| 273 | 0x00f8e0fc, | 303 | 0xf09c1ce7, |
| 274 | /* 0x0318: init */ | 304 | 0x21f440e3, |
| 275 | 0x04fe04bd, | 305 | 0xf8e0fc9d, |
| 276 | 0x0017f100, | 306 | /* 0x03a1: init */ |
| 277 | 0x0227f012, | 307 | 0xfe04bd00, |
| 278 | 0xf10012d0, | 308 | 0x27f00004, |
| 279 | 0xfe047017, | 309 | 0x0007f102, |
| 280 | 0x17f10010, | 310 | 0x0003f012, |
| 281 | 0x10d00400, | 311 | 0xbd0002d0, |
| 282 | 0x0427f0c0, | 312 | 0x1f17f104, |
| 283 | 0xf40012d0, | 313 | 0x0010fe05, |
| 284 | 0x17f11031, | 314 | 0x070007f1, |
| 285 | 0x14b60608, | 315 | 0xd00003f0, |
| 286 | 0x0012cf06, | 316 | 0x04bd0000, |
| 317 | 0xf10427f0, | ||
| 318 | 0xf0040007, | ||
| 319 | 0x02d00003, | ||
| 320 | 0xf404bd00, | ||
| 321 | 0x27f11031, | ||
| 322 | 0x23f08200, | ||
| 323 | 0x0022cf01, | ||
| 287 | 0xf00137f0, | 324 | 0xf00137f0, |
| 288 | 0x32bb1f24, | 325 | 0x32bb1f24, |
| 289 | 0x0132b604, | 326 | 0x0132b604, |
| 290 | 0x80050280, | 327 | 0x80050280, |
| 291 | 0x10b70603, | 328 | 0x27f10603, |
| 292 | 0x12cf0400, | 329 | 0x23f08600, |
| 293 | 0x04028000, | 330 | 0x0022cf01, |
| 294 | 0x0c30e7f1, | 331 | 0xf1040280, |
| 295 | 0xbd50e3f0, | 332 | 0xf00c30e7, |
| 296 | 0xbd34bd24, | 333 | 0x24bd50e3, |
| 297 | /* 0x0371: init_unk_loop */ | 334 | 0x44bd34bd, |
| 298 | 0x6821f444, | 335 | /* 0x0410: init_unk_loop */ |
| 299 | 0xf400f6b0, | 336 | 0xb06821f4, |
| 300 | 0xf7f00f0b, | 337 | 0x0bf400f6, |
| 301 | 0x04f2bb01, | 338 | 0x01f7f00f, |
| 302 | 0xb6054ffd, | 339 | 0xfd04f2bb, |
| 303 | /* 0x0386: init_unk_next */ | 340 | 0x30b6054f, |
| 304 | 0x20b60130, | 341 | /* 0x0425: init_unk_next */ |
| 305 | 0x04e0b601, | 342 | 0x0120b601, |
| 306 | 0xf40226b0, | 343 | 0xb004e0b6, |
| 307 | /* 0x0392: init_unk_done */ | 344 | 0x1bf40226, |
| 308 | 0x0380e21b, | 345 | /* 0x0431: init_unk_done */ |
| 309 | 0x08048007, | 346 | 0x070380e2, |
| 310 | 0x010027f1, | 347 | 0xf1080480, |
| 311 | 0xcf0223f0, | 348 | 0xf0010027, |
| 312 | 0x34bd0022, | 349 | 0x22cf0223, |
| 313 | 0x070047f1, | 350 | 0x9534bd00, |
| 314 | 0x950644b6, | 351 | 0x07f10825, |
| 315 | 0x45d00825, | 352 | 0x03f0c000, |
| 316 | 0x4045d000, | 353 | 0x0005d001, |
| 317 | 0x98000e98, | 354 | 0x07f104bd, |
| 318 | 0x21f5010f, | 355 | 0x03f0c100, |
| 319 | 0x2fbb0147, | 356 | 0x0005d001, |
| 320 | 0x003fbb00, | 357 | 0x0e9804bd, |
| 321 | 0x98010e98, | 358 | 0x010f9800, |
| 322 | 0x21f5020f, | 359 | 0x015021f5, |
| 323 | 0x0e980147, | 360 | 0xbb002fbb, |
| 324 | 0x00effd05, | 361 | 0x0e98003f, |
| 325 | 0xbb002ebb, | 362 | 0x020f9801, |
| 326 | 0x0e98003e, | 363 | 0x015021f5, |
| 327 | 0x030f9802, | 364 | 0xfd050e98, |
| 328 | 0x014721f5, | ||
| 329 | 0xfd070e98, | ||
| 330 | 0x2ebb00ef, | 365 | 0x2ebb00ef, |
| 331 | 0x003ebb00, | 366 | 0x003ebb00, |
| 332 | 0x130040b7, | 367 | 0x98020e98, |
| 333 | 0xd00235b6, | 368 | 0x21f5030f, |
| 334 | 0x25b60043, | 369 | 0x0e980150, |
| 335 | 0x0635b608, | 370 | 0x00effd07, |
| 336 | 0xb60120b6, | 371 | 0xbb002ebb, |
| 337 | 0x24b60130, | 372 | 0x35b6003e, |
| 338 | 0x0834b608, | 373 | 0x0007f102, |
| 339 | 0xf5022fb9, | 374 | 0x0103f0d3, |
| 340 | 0xbb027121, | 375 | 0xbd0003d0, |
| 341 | 0x07f1003f, | 376 | 0x0825b604, |
| 342 | 0x03f00100, | 377 | 0xb60635b6, |
| 343 | 0x0003d002, | 378 | 0x30b60120, |
| 344 | 0x24bd04bd, | 379 | 0x0824b601, |
| 345 | 0xf11f29f0, | 380 | 0xb90834b6, |
| 346 | 0xf0300007, | 381 | 0x21f5022f, |
| 347 | 0x02d00203, | 382 | 0x3fbb02d3, |
| 348 | /* 0x0433: main */ | 383 | 0x0007f100, |
| 384 | 0x0203f001, | ||
| 385 | 0xbd0003d0, | ||
| 386 | 0xf024bd04, | ||
| 387 | 0x07f11f29, | ||
| 388 | 0x03f03000, | ||
| 389 | 0x0002d002, | ||
| 390 | /* 0x04e2: main */ | ||
| 391 | 0x31f404bd, | ||
| 392 | 0x0028f400, | ||
| 393 | 0xf424d7f0, | ||
| 394 | 0x01f43921, | ||
| 395 | 0x04e4b0f4, | ||
| 396 | 0xfe1e18f4, | ||
| 397 | 0x27f00181, | ||
| 398 | 0xfd20bd06, | ||
| 399 | 0xe4b60412, | ||
| 400 | 0x051efd01, | ||
| 401 | 0xf50018fe, | ||
| 402 | 0xf405d721, | ||
| 403 | /* 0x0512: main_not_ctx_xfer */ | ||
| 404 | 0xef94d30e, | ||
| 405 | 0x01f5f010, | ||
| 406 | 0x037e21f5, | ||
| 407 | /* 0x051f: ih */ | ||
| 408 | 0xf9c60ef4, | ||
| 409 | 0x0188fe80, | ||
| 410 | 0x90f980f9, | ||
| 411 | 0xb0f9a0f9, | ||
| 412 | 0xe0f9d0f9, | ||
| 413 | 0x04bdf0f9, | ||
| 414 | 0x0200a7f1, | ||
| 415 | 0xcf00a3f0, | ||
| 416 | 0xabc400aa, | ||
| 417 | 0x2c0bf404, | ||
| 418 | 0xf124d7f0, | ||
| 419 | 0xf01a00e7, | ||
| 420 | 0xeecf00e3, | ||
| 421 | 0x00f7f100, | ||
| 422 | 0x00f3f019, | ||
| 423 | 0xf400ffcf, | ||
| 424 | 0xe7f00421, | ||
| 425 | 0x0007f101, | ||
| 426 | 0x0003f01d, | ||
| 427 | 0xbd000ed0, | ||
| 428 | /* 0x056d: ih_no_fifo */ | ||
| 429 | 0x0007f104, | ||
| 430 | 0x0003f001, | ||
| 431 | 0xbd000ad0, | ||
| 432 | 0xfcf0fc04, | ||
| 433 | 0xfcd0fce0, | ||
| 434 | 0xfca0fcb0, | ||
| 435 | 0xfe80fc90, | ||
| 436 | 0x80fc0088, | ||
| 437 | 0xf80032f4, | ||
| 438 | /* 0x0591: hub_barrier_done */ | ||
| 439 | 0x01f7f001, | ||
| 440 | 0xbb040e98, | ||
| 441 | 0xffb904fe, | ||
| 442 | 0x18e7f102, | ||
| 443 | 0x40e3f094, | ||
| 444 | 0xf89d21f4, | ||
| 445 | /* 0x05a9: ctx_redswitch */ | ||
| 446 | 0x20f7f000, | ||
| 447 | 0x850007f1, | ||
| 448 | 0xd00103f0, | ||
| 449 | 0x04bd000f, | ||
| 450 | /* 0x05bb: ctx_redswitch_delay */ | ||
| 451 | 0xb608e7f0, | ||
| 452 | 0x1bf401e2, | ||
| 453 | 0x00f5f1fd, | ||
| 454 | 0x00f5f108, | ||
| 455 | 0x0007f102, | ||
| 456 | 0x0103f085, | ||
| 457 | 0xbd000fd0, | ||
| 458 | /* 0x05d7: ctx_xfer */ | ||
| 459 | 0xf100f804, | ||
| 460 | 0xf0810007, | ||
| 461 | 0x0fd00203, | ||
| 349 | 0xf404bd00, | 462 | 0xf404bd00, |
| 350 | 0x28f40031, | 463 | 0x21f50711, |
| 351 | 0x24d7f000, | 464 | /* 0x05ea: ctx_xfer_not_load */ |
| 352 | 0xf43921f4, | 465 | 0x21f505a9, |
| 353 | 0xe4b0f401, | 466 | 0x24bd026a, |
| 354 | 0x1e18f404, | 467 | 0x47fc07f1, |
| 355 | 0xf00181fe, | 468 | 0xd00203f0, |
| 356 | 0x20bd0627, | 469 | 0x04bd0002, |
| 357 | 0xb60412fd, | 470 | 0xb6012cf0, |
| 358 | 0x1efd01e4, | 471 | 0x07f10320, |
| 359 | 0x0018fe05, | 472 | 0x03f04afc, |
| 360 | 0x04f721f5, | 473 | 0x0002d002, |
| 361 | /* 0x0463: main_not_ctx_xfer */ | 474 | 0xacf004bd, |
| 362 | 0x94d30ef4, | 475 | 0x02a5f001, |
| 363 | 0xf5f010ef, | 476 | 0x0000b7f1, |
| 364 | 0xfe21f501, | ||
| 365 | 0xc60ef402, | ||
| 366 | /* 0x0470: ih */ | ||
| 367 | 0x88fe80f9, | ||
| 368 | 0xf980f901, | ||
| 369 | 0xf9a0f990, | ||
| 370 | 0xf9d0f9b0, | ||
| 371 | 0xbdf0f9e0, | ||
| 372 | 0x800acf04, | ||
| 373 | 0xf404abc4, | ||
| 374 | 0xb7f11d0b, | ||
| 375 | 0xd7f01900, | ||
| 376 | 0x40becf24, | ||
| 377 | 0xf400bfcf, | ||
| 378 | 0xb0b70421, | ||
| 379 | 0xe7f00400, | ||
| 380 | 0x00bed001, | ||
| 381 | /* 0x04a8: ih_no_fifo */ | ||
| 382 | 0xfc400ad0, | ||
| 383 | 0xfce0fcf0, | ||
| 384 | 0xfcb0fcd0, | ||
| 385 | 0xfc90fca0, | ||
| 386 | 0x0088fe80, | ||
| 387 | 0x32f480fc, | ||
| 388 | /* 0x04c3: hub_barrier_done */ | ||
| 389 | 0xf001f800, | ||
| 390 | 0x0e9801f7, | ||
| 391 | 0x04febb04, | ||
| 392 | 0x9418e7f1, | ||
| 393 | 0xf440e3f0, | ||
| 394 | 0x00f88d21, | ||
| 395 | /* 0x04d8: ctx_redswitch */ | ||
| 396 | 0x0614e7f1, | ||
| 397 | 0xf006e4b6, | ||
| 398 | 0xefd020f7, | ||
| 399 | 0x08f7f000, | ||
| 400 | /* 0x04e8: ctx_redswitch_delay */ | ||
| 401 | 0xf401f2b6, | ||
| 402 | 0xf7f1fd1b, | ||
| 403 | 0xefd00a20, | ||
| 404 | /* 0x04f7: ctx_xfer */ | ||
| 405 | 0xf100f800, | ||
| 406 | 0xb60a0417, | ||
| 407 | 0x1fd00614, | ||
| 408 | 0x0711f400, | ||
| 409 | 0x04d821f5, | ||
| 410 | /* 0x0508: ctx_xfer_not_load */ | ||
| 411 | 0x4afc17f1, | ||
| 412 | 0xf00213f0, | ||
| 413 | 0x12d00c27, | ||
| 414 | 0x1521f500, | ||
| 415 | 0xfc27f102, | ||
| 416 | 0x0223f047, | ||
| 417 | 0xf00020d0, | ||
| 418 | 0x20b6012c, | ||
| 419 | 0x0012d003, | ||
| 420 | 0xf001acf0, | ||
| 421 | 0xb7f002a5, | ||
| 422 | 0x50b3f000, | ||
| 423 | 0xb6040c98, | ||
| 424 | 0xbcbb0fc4, | ||
| 425 | 0x000c9800, | ||
| 426 | 0xf0010d98, | ||
| 427 | 0x21f500e7, | ||
| 428 | 0xacf00166, | ||
| 429 | 0x00b7f101, | ||
| 430 | 0x50b3f040, | ||
| 431 | 0xb6040c98, | ||
| 432 | 0xbcbb0fc4, | ||
| 433 | 0x010c9800, | ||
| 434 | 0x98020d98, | ||
| 435 | 0xe7f1060f, | ||
| 436 | 0x21f50800, | ||
| 437 | 0xacf00166, | ||
| 438 | 0x04a5f001, | ||
| 439 | 0x3000b7f1, | ||
| 440 | 0x9850b3f0, | 477 | 0x9850b3f0, |
| 441 | 0xc4b6040c, | 478 | 0xc4b6040c, |
| 442 | 0x00bcbb0f, | 479 | 0x00bcbb0f, |
| 443 | 0x98020c98, | 480 | 0x98000c98, |
| 444 | 0x0f98030d, | 481 | 0xe7f0010d, |
| 445 | 0x00e7f108, | 482 | 0x6f21f500, |
| 446 | 0x6621f502, | 483 | 0x01acf001, |
| 447 | 0x1521f501, | 484 | 0x4000b7f1, |
| 448 | 0x0601f402, | 485 | 0x9850b3f0, |
| 449 | /* 0x05a3: ctx_xfer_post */ | 486 | 0xc4b6040c, |
| 450 | 0xf11412f4, | 487 | 0x00bcbb0f, |
| 451 | 0xf04afc17, | 488 | 0x98010c98, |
| 452 | 0x27f00213, | 489 | 0x0f98020d, |
| 453 | 0x0012d00d, | 490 | 0x00e7f106, |
| 454 | 0x021521f5, | 491 | 0x6f21f508, |
| 455 | /* 0x05b4: ctx_xfer_done */ | 492 | 0x01acf001, |
| 456 | 0x04c321f5, | 493 | 0xf104a5f0, |
| 457 | 0x000000f8, | 494 | 0xf03000b7, |
| 495 | 0x0c9850b3, | ||
| 496 | 0x0fc4b604, | ||
| 497 | 0x9800bcbb, | ||
| 498 | 0x0d98020c, | ||
| 499 | 0x080f9803, | ||
| 500 | 0x0200e7f1, | ||
| 501 | 0x016f21f5, | ||
| 502 | 0x025e21f5, | ||
| 503 | 0xf40601f4, | ||
| 504 | /* 0x0686: ctx_xfer_post */ | ||
| 505 | 0x21f50712, | ||
| 506 | /* 0x068a: ctx_xfer_done */ | ||
| 507 | 0x21f5027f, | ||
| 508 | 0x00f80591, | ||
| 509 | 0x00000000, | ||
| 510 | 0x00000000, | ||
| 511 | 0x00000000, | ||
| 512 | 0x00000000, | ||
| 513 | 0x00000000, | ||
| 514 | 0x00000000, | ||
| 515 | 0x00000000, | ||
| 516 | 0x00000000, | ||
| 517 | 0x00000000, | ||
| 518 | 0x00000000, | ||
| 519 | 0x00000000, | ||
| 458 | 0x00000000, | 520 | 0x00000000, |
| 459 | 0x00000000, | 521 | 0x00000000, |
| 460 | 0x00000000, | 522 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc index b82d2ae89917..c8ddb8d71b91 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc | |||
| @@ -68,60 +68,57 @@ error: | |||
| 68 | // | 68 | // |
| 69 | init: | 69 | init: |
| 70 | clear b32 $r0 | 70 | clear b32 $r0 |
| 71 | mov $sp $r0 | ||
| 72 | mov $xdbase $r0 | 71 | mov $xdbase $r0 |
| 73 | 72 | ||
| 73 | // setup stack | ||
| 74 | nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0) | ||
| 75 | extr $r1 $r1 9:17 | ||
| 76 | shl b32 $r1 8 | ||
| 77 | mov $sp $r1 | ||
| 78 | |||
| 74 | // enable fifo access | 79 | // enable fifo access |
| 75 | mov $r1 0x1200 | 80 | mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO |
| 76 | mov $r2 2 | 81 | nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2) |
| 77 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
| 78 | 82 | ||
| 79 | // setup i0 handler, and route all interrupts to it | 83 | // setup i0 handler, and route all interrupts to it |
| 80 | mov $r1 #ih | 84 | mov $r1 #ih |
| 81 | mov $iv0 $r1 | 85 | mov $iv0 $r1 |
| 82 | mov $r1 0x400 | ||
| 83 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
| 84 | 86 | ||
| 85 | // route HUB_CHANNEL_SWITCH to fuc interrupt 8 | 87 | clear b32 $r2 |
| 86 | mov $r3 0x404 | 88 | nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2) |
| 87 | shl b32 $r3 6 | 89 | |
| 88 | mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 | 90 | // route HUB_CHSW_PULSE to fuc interrupt 8 |
| 89 | iowr I[$r3 + 0x000] $r2 | 91 | mov $r2 0x2003 // { HUB_CHSW_PULSE, ZERO } -> intr 8 |
| 92 | nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2) | ||
| 90 | 93 | ||
| 91 | // not sure what these are, route them because NVIDIA does, and | 94 | // not sure what these are, route them because NVIDIA does, and |
| 92 | // the IRQ handler will signal the host if we ever get one.. we | 95 | // the IRQ handler will signal the host if we ever get one.. we |
| 93 | // may find out if/why we need to handle these if so.. | 96 | // may find out if/why we need to handle these if so.. |
| 94 | // | 97 | // |
| 95 | mov $r2 0x2004 | 98 | mov $r2 0x2004 // { 0x04, ZERO } -> intr 9 |
| 96 | iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 | 99 | nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2) |
| 97 | mov $r2 0x200b | 100 | mov $r2 0x200b // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10 |
| 98 | iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 | 101 | nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2) |
| 99 | mov $r2 0x200c | 102 | mov $r2 0x200c // { 0x0c, ZERO } -> intr 15 |
| 100 | iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 | 103 | nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2) |
| 101 | 104 | ||
| 102 | // enable all INTR_UP interrupts | 105 | // enable all INTR_UP interrupts |
| 103 | mov $r2 0xc24 | 106 | sub b32 $r3 $r0 1 |
| 104 | shl b32 $r2 6 | 107 | nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3) |
| 105 | not b32 $r3 $r0 | ||
| 106 | iowr I[$r2] $r3 | ||
| 107 | 108 | ||
| 108 | // enable fifo, ctxsw, 9, 10, 15 interrupts | 109 | // enable fifo, ctxsw, 9, fwmthd, 15 interrupts |
| 109 | mov $r2 -0x78fc // 0x8704 | 110 | imm32($r2, 0x8704) |
| 110 | sethi $r2 0 | 111 | nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2) |
| 111 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | ||
| 112 | 112 | ||
| 113 | // fifo level triggered, rest edge | 113 | // fifo level triggered, rest edge |
| 114 | sub b32 $r1 0x100 | 114 | mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL |
| 115 | mov $r2 4 | 115 | nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2) |
| 116 | iowr I[$r1] $r2 | ||
| 117 | 116 | ||
| 118 | // enable interrupts | 117 | // enable interrupts |
| 119 | bset $flags ie0 | 118 | bset $flags ie0 |
| 120 | 119 | ||
| 121 | // fetch enabled GPC/ROP counts | 120 | // fetch enabled GPC/ROP counts |
| 122 | mov $r14 -0x69fc // 0x409604 | 121 | nv_rd32($r14, 0x409604) |
| 123 | sethi $r14 0x400000 | ||
| 124 | call #nv_rd32 | ||
| 125 | extr $r1 $r15 16:20 | 122 | extr $r1 $r15 16:20 |
| 126 | st b32 D[$r0 + #rop_count] $r1 | 123 | st b32 D[$r0 + #rop_count] $r1 |
| 127 | and $r15 0x1f | 124 | and $r15 0x1f |
| @@ -131,37 +128,40 @@ init: | |||
| 131 | mov $r1 1 | 128 | mov $r1 1 |
| 132 | shl b32 $r1 $r15 | 129 | shl b32 $r1 $r15 |
| 133 | sub b32 $r1 1 | 130 | sub b32 $r1 1 |
| 134 | mov $r2 0x40c | 131 | nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1) |
| 135 | shl b32 $r2 6 | 132 | nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1) |
| 136 | iowr I[$r2 + 0x000] $r1 | ||
| 137 | iowr I[$r2 + 0x100] $r1 | ||
| 138 | 133 | ||
| 139 | // context size calculation, reserve first 256 bytes for use by fuc | 134 | // context size calculation, reserve first 256 bytes for use by fuc |
| 140 | mov $r1 256 | 135 | mov $r1 256 |
| 141 | 136 | ||
| 137 | // | ||
| 138 | mov $r15 2 | ||
| 139 | call(ctx_4170s) | ||
| 140 | call(ctx_4170w) | ||
| 141 | mov $r15 0x10 | ||
| 142 | call(ctx_86c) | ||
| 143 | |||
| 142 | // calculate size of mmio context data | 144 | // calculate size of mmio context data |
| 143 | ld b32 $r14 D[$r0 + #hub_mmio_list_head] | 145 | ld b32 $r14 D[$r0 + #hub_mmio_list_head] |
| 144 | ld b32 $r15 D[$r0 + #hub_mmio_list_tail] | 146 | ld b32 $r15 D[$r0 + #hub_mmio_list_tail] |
| 145 | call #mmctx_size | 147 | call(mmctx_size) |
| 146 | 148 | ||
| 147 | // set mmctx base addresses now so we don't have to do it later, | 149 | // set mmctx base addresses now so we don't have to do it later, |
| 148 | // they don't (currently) ever change | 150 | // they don't (currently) ever change |
| 149 | mov $r3 0x700 | ||
| 150 | shl b32 $r3 6 | ||
| 151 | shr b32 $r4 $r1 8 | 151 | shr b32 $r4 $r1 8 |
| 152 | iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE | 152 | nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4) |
| 153 | iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE | 153 | nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4) |
| 154 | add b32 $r3 0x1300 | 154 | add b32 $r3 0x1300 |
| 155 | add b32 $r1 $r15 | 155 | add b32 $r1 $r15 |
| 156 | shr b32 $r15 2 | 156 | shr b32 $r15 2 |
| 157 | iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? | 157 | nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf?? |
| 158 | 158 | ||
| 159 | // strands, base offset needs to be aligned to 256 bytes | 159 | // strands, base offset needs to be aligned to 256 bytes |
| 160 | shr b32 $r1 8 | 160 | shr b32 $r1 8 |
| 161 | add b32 $r1 1 | 161 | add b32 $r1 1 |
| 162 | shl b32 $r1 8 | 162 | shl b32 $r1 8 |
| 163 | mov b32 $r15 $r1 | 163 | mov b32 $r15 $r1 |
| 164 | call #strand_ctx_init | 164 | call(strand_ctx_init) |
| 165 | add b32 $r1 $r15 | 165 | add b32 $r1 $r15 |
| 166 | 166 | ||
| 167 | // initialise each GPC in sequence by passing in the offset of its | 167 | // initialise each GPC in sequence by passing in the offset of its |
| @@ -173,30 +173,29 @@ init: | |||
| 173 | // in GPCn_CC_SCRATCH[1] | 173 | // in GPCn_CC_SCRATCH[1] |
| 174 | // | 174 | // |
| 175 | ld b32 $r3 D[$r0 + #gpc_count] | 175 | ld b32 $r3 D[$r0 + #gpc_count] |
| 176 | mov $r4 0x2000 | 176 | imm32($r4, 0x502000) |
| 177 | sethi $r4 0x500000 | ||
| 178 | init_gpc: | 177 | init_gpc: |
| 179 | // setup, and start GPC ucode running | 178 | // setup, and start GPC ucode running |
| 180 | add b32 $r14 $r4 0x804 | 179 | add b32 $r14 $r4 0x804 |
| 181 | mov b32 $r15 $r1 | 180 | mov b32 $r15 $r1 |
| 182 | call #nv_wr32 // CC_SCRATCH[1] = ctx offset | 181 | call(nv_wr32) // CC_SCRATCH[1] = ctx offset |
| 183 | add b32 $r14 $r4 0x10c | 182 | add b32 $r14 $r4 0x10c |
| 184 | clear b32 $r15 | 183 | clear b32 $r15 |
| 185 | call #nv_wr32 | 184 | call(nv_wr32) |
| 186 | add b32 $r14 $r4 0x104 | 185 | add b32 $r14 $r4 0x104 |
| 187 | call #nv_wr32 // ENTRY | 186 | call(nv_wr32) // ENTRY |
| 188 | add b32 $r14 $r4 0x100 | 187 | add b32 $r14 $r4 0x100 |
| 189 | mov $r15 2 // CTRL_START_TRIGGER | 188 | mov $r15 2 // CTRL_START_TRIGGER |
| 190 | call #nv_wr32 // CTRL | 189 | call(nv_wr32) // CTRL |
| 191 | 190 | ||
| 192 | // wait for it to complete, and adjust context size | 191 | // wait for it to complete, and adjust context size |
| 193 | add b32 $r14 $r4 0x800 | 192 | add b32 $r14 $r4 0x800 |
| 194 | init_gpc_wait: | 193 | init_gpc_wait: |
| 195 | call #nv_rd32 | 194 | call(nv_rd32) |
| 196 | xbit $r15 $r15 31 | 195 | xbit $r15 $r15 31 |
| 197 | bra e #init_gpc_wait | 196 | bra e #init_gpc_wait |
| 198 | add b32 $r14 $r4 0x804 | 197 | add b32 $r14 $r4 0x804 |
| 199 | call #nv_rd32 | 198 | call(nv_rd32) |
| 200 | add b32 $r1 $r15 | 199 | add b32 $r1 $r15 |
| 201 | 200 | ||
| 202 | // next! | 201 | // next! |
| @@ -204,6 +203,12 @@ init: | |||
| 204 | sub b32 $r3 1 | 203 | sub b32 $r3 1 |
| 205 | bra ne #init_gpc | 204 | bra ne #init_gpc |
| 206 | 205 | ||
| 206 | // | ||
| 207 | mov $r15 0 | ||
| 208 | call(ctx_86c) | ||
| 209 | mov $r15 0 | ||
| 210 | call(ctx_4170s) | ||
| 211 | |||
| 207 | // save context size, and tell host we're ready | 212 | // save context size, and tell host we're ready |
| 208 | nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1) | 213 | nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1) |
| 209 | clear b32 $r1 | 214 | clear b32 $r1 |
| @@ -218,17 +223,15 @@ main: | |||
| 218 | bset $flags $p0 | 223 | bset $flags $p0 |
| 219 | sleep $p0 | 224 | sleep $p0 |
| 220 | mov $r13 #cmd_queue | 225 | mov $r13 #cmd_queue |
| 221 | call #queue_get | 226 | call(queue_get) |
| 222 | bra $p1 #main | 227 | bra $p1 #main |
| 223 | 228 | ||
| 224 | // context switch, requested by GPU? | 229 | // context switch, requested by GPU? |
| 225 | cmpu b32 $r14 0x4001 | 230 | cmpu b32 $r14 0x4001 |
| 226 | bra ne #main_not_ctx_switch | 231 | bra ne #main_not_ctx_switch |
| 227 | trace_set(T_AUTO) | 232 | trace_set(T_AUTO) |
| 228 | mov $r1 0xb00 | 233 | nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0) |
| 229 | shl b32 $r1 6 | 234 | nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0) |
| 230 | iord $r2 I[$r1 + 0x100] // CHAN_NEXT | ||
| 231 | iord $r1 I[$r1 + 0x000] // CHAN_CUR | ||
| 232 | 235 | ||
| 233 | xbit $r3 $r1 31 | 236 | xbit $r3 $r1 31 |
| 234 | bra e #chsw_no_prev | 237 | bra e #chsw_no_prev |
| @@ -239,12 +242,12 @@ main: | |||
| 239 | trace_set(T_SAVE) | 242 | trace_set(T_SAVE) |
| 240 | bclr $flags $p1 | 243 | bclr $flags $p1 |
| 241 | bset $flags $p2 | 244 | bset $flags $p2 |
| 242 | call #ctx_xfer | 245 | call(ctx_xfer) |
| 243 | trace_clr(T_SAVE); | 246 | trace_clr(T_SAVE); |
| 244 | pop $r2 | 247 | pop $r2 |
| 245 | trace_set(T_LOAD); | 248 | trace_set(T_LOAD); |
| 246 | bset $flags $p1 | 249 | bset $flags $p1 |
| 247 | call #ctx_xfer | 250 | call(ctx_xfer) |
| 248 | trace_clr(T_LOAD); | 251 | trace_clr(T_LOAD); |
| 249 | bra #chsw_done | 252 | bra #chsw_done |
| 250 | chsw_prev_no_next: | 253 | chsw_prev_no_next: |
| @@ -252,25 +255,21 @@ main: | |||
| 252 | mov b32 $r2 $r1 | 255 | mov b32 $r2 $r1 |
| 253 | bclr $flags $p1 | 256 | bclr $flags $p1 |
| 254 | bclr $flags $p2 | 257 | bclr $flags $p2 |
| 255 | call #ctx_xfer | 258 | call(ctx_xfer) |
| 256 | pop $r2 | 259 | pop $r2 |
| 257 | mov $r1 0xb00 | 260 | nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2) |
| 258 | shl b32 $r1 6 | ||
| 259 | iowr I[$r1] $r2 | ||
| 260 | bra #chsw_done | 261 | bra #chsw_done |
| 261 | chsw_no_prev: | 262 | chsw_no_prev: |
| 262 | xbit $r3 $r2 31 | 263 | xbit $r3 $r2 31 |
| 263 | bra e #chsw_done | 264 | bra e #chsw_done |
| 264 | bset $flags $p1 | 265 | bset $flags $p1 |
| 265 | bclr $flags $p2 | 266 | bclr $flags $p2 |
| 266 | call #ctx_xfer | 267 | call(ctx_xfer) |
| 267 | 268 | ||
| 268 | // ack the context switch request | 269 | // ack the context switch request |
| 269 | chsw_done: | 270 | chsw_done: |
| 270 | mov $r1 0xb0c | 271 | mov $r2 NV_PGRAPH_FECS_CHSW_ACK |
| 271 | shl b32 $r1 6 | 272 | nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2) |
| 272 | mov $r2 1 | ||
| 273 | iowr I[$r1 + 0x000] $r2 // 0x409b0c | ||
| 274 | trace_clr(T_AUTO) | 273 | trace_clr(T_AUTO) |
| 275 | bra #main | 274 | bra #main |
| 276 | 275 | ||
| @@ -279,7 +278,7 @@ main: | |||
| 279 | cmpu b32 $r14 0x0001 | 278 | cmpu b32 $r14 0x0001 |
| 280 | bra ne #main_not_ctx_chan | 279 | bra ne #main_not_ctx_chan |
| 281 | mov b32 $r2 $r15 | 280 | mov b32 $r2 $r15 |
| 282 | call #ctx_chan | 281 | call(ctx_chan) |
| 283 | bra #main_done | 282 | bra #main_done |
| 284 | 283 | ||
| 285 | // request to store current channel context? | 284 | // request to store current channel context? |
| @@ -289,14 +288,14 @@ main: | |||
| 289 | trace_set(T_SAVE) | 288 | trace_set(T_SAVE) |
| 290 | bclr $flags $p1 | 289 | bclr $flags $p1 |
| 291 | bclr $flags $p2 | 290 | bclr $flags $p2 |
| 292 | call #ctx_xfer | 291 | call(ctx_xfer) |
| 293 | trace_clr(T_SAVE) | 292 | trace_clr(T_SAVE) |
| 294 | bra #main_done | 293 | bra #main_done |
| 295 | 294 | ||
| 296 | main_not_ctx_save: | 295 | main_not_ctx_save: |
| 297 | shl b32 $r15 $r14 16 | 296 | shl b32 $r15 $r14 16 |
| 298 | or $r15 E_BAD_COMMAND | 297 | or $r15 E_BAD_COMMAND |
| 299 | call #error | 298 | call(error) |
| 300 | bra #main | 299 | bra #main |
| 301 | 300 | ||
| 302 | main_done: | 301 | main_done: |
| @@ -319,41 +318,46 @@ ih: | |||
| 319 | clear b32 $r0 | 318 | clear b32 $r0 |
| 320 | 319 | ||
| 321 | // incoming fifo command? | 320 | // incoming fifo command? |
| 322 | iord $r10 I[$r0 + 0x200] // INTR | 321 | nv_iord($r10, NV_PGRAPH_FECS_INTR, 0) |
| 323 | and $r11 $r10 0x00000004 | 322 | and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO |
| 324 | bra e #ih_no_fifo | 323 | bra e #ih_no_fifo |
| 325 | // queue incoming fifo command for later processing | 324 | // queue incoming fifo command for later processing |
| 326 | mov $r11 0x1900 | ||
| 327 | mov $r13 #cmd_queue | 325 | mov $r13 #cmd_queue |
| 328 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | 326 | nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0) |
| 329 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | 327 | nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0) |
| 330 | call #queue_put | 328 | call(queue_put) |
| 331 | add b32 $r11 0x400 | 329 | add b32 $r11 0x400 |
| 332 | mov $r14 1 | 330 | mov $r14 1 |
| 333 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | 331 | nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14) |
| 334 | 332 | ||
| 335 | // context switch request? | 333 | // context switch request? |
| 336 | ih_no_fifo: | 334 | ih_no_fifo: |
| 337 | and $r11 $r10 0x00000100 | 335 | and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW |
| 338 | bra e #ih_no_ctxsw | 336 | bra e #ih_no_ctxsw |
| 339 | // enqueue a context switch for later processing | 337 | // enqueue a context switch for later processing |
| 340 | mov $r13 #cmd_queue | 338 | mov $r13 #cmd_queue |
| 341 | mov $r14 0x4001 | 339 | mov $r14 0x4001 |
| 342 | call #queue_put | 340 | call(queue_put) |
| 343 | 341 | ||
| 344 | // anything we didn't handle, bring it to the host's attention | 342 | // firmware method? |
| 345 | ih_no_ctxsw: | 343 | ih_no_ctxsw: |
| 346 | mov $r11 0x104 | 344 | and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD |
| 345 | bra e #ih_no_fwmthd | ||
| 346 | // none we handle, ack, and fall-through to unhandled | ||
| 347 | mov $r11 0x100 | ||
| 348 | nv_wr32(0x400144, $r11) | ||
| 349 | |||
| 350 | // anything we didn't handle, bring it to the host's attention | ||
| 351 | ih_no_fwmthd: | ||
| 352 | mov $r11 0x104 // FIFO | CHSW | ||
| 347 | not b32 $r11 | 353 | not b32 $r11 |
| 348 | and $r11 $r10 $r11 | 354 | and $r11 $r10 $r11 |
| 349 | bra e #ih_no_other | 355 | bra e #ih_no_other |
| 350 | mov $r10 0xc1c | 356 | nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11) |
| 351 | shl b32 $r10 6 | ||
| 352 | iowr I[$r10] $r11 // INTR_UP_SET | ||
| 353 | 357 | ||
| 354 | // ack, and wake up main() | 358 | // ack, and wake up main() |
| 355 | ih_no_other: | 359 | ih_no_other: |
| 356 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | 360 | nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10) |
| 357 | 361 | ||
| 358 | pop $r15 | 362 | pop $r15 |
| 359 | pop $r14 | 363 | pop $r14 |
| @@ -370,12 +374,10 @@ ih: | |||
| 370 | #if CHIPSET < GK100 | 374 | #if CHIPSET < GK100 |
| 371 | // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done | 375 | // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done |
| 372 | ctx_4160s: | 376 | ctx_4160s: |
| 373 | mov $r14 0x4160 | ||
| 374 | sethi $r14 0x400000 | ||
| 375 | mov $r15 1 | 377 | mov $r15 1 |
| 376 | call #nv_wr32 | 378 | nv_wr32(0x404160, $r15) |
| 377 | ctx_4160s_wait: | 379 | ctx_4160s_wait: |
| 378 | call #nv_rd32 | 380 | nv_rd32($r15, 0x404160) |
| 379 | xbit $r15 $r15 4 | 381 | xbit $r15 $r15 4 |
| 380 | bra e #ctx_4160s_wait | 382 | bra e #ctx_4160s_wait |
| 381 | ret | 383 | ret |
| @@ -384,10 +386,8 @@ ctx_4160s: | |||
| 384 | // to hang with STATUS=0x00000007 until it's cleared.. fbcon can | 386 | // to hang with STATUS=0x00000007 until it's cleared.. fbcon can |
| 385 | // still function with it set however... | 387 | // still function with it set however... |
| 386 | ctx_4160c: | 388 | ctx_4160c: |
| 387 | mov $r14 0x4160 | ||
| 388 | sethi $r14 0x400000 | ||
| 389 | clear b32 $r15 | 389 | clear b32 $r15 |
| 390 | call #nv_wr32 | 390 | nv_wr32(0x404160, $r15) |
| 391 | ret | 391 | ret |
| 392 | #endif | 392 | #endif |
| 393 | 393 | ||
| @@ -396,18 +396,14 @@ ctx_4160c: | |||
| 396 | // In: $r15 value to set 0x404170 to | 396 | // In: $r15 value to set 0x404170 to |
| 397 | // | 397 | // |
| 398 | ctx_4170s: | 398 | ctx_4170s: |
| 399 | mov $r14 0x4170 | ||
| 400 | sethi $r14 0x400000 | ||
| 401 | or $r15 0x10 | 399 | or $r15 0x10 |
| 402 | call #nv_wr32 | 400 | nv_wr32(0x404170, $r15) |
| 403 | ret | 401 | ret |
| 404 | 402 | ||
| 405 | // Waits for a ctx_4170s() call to complete | 403 | // Waits for a ctx_4170s() call to complete |
| 406 | // | 404 | // |
| 407 | ctx_4170w: | 405 | ctx_4170w: |
| 408 | mov $r14 0x4170 | 406 | nv_rd32($r15, 0x404170) |
| 409 | sethi $r14 0x400000 | ||
| 410 | call #nv_rd32 | ||
| 411 | and $r15 0x10 | 407 | and $r15 0x10 |
| 412 | bra ne #ctx_4170w | 408 | bra ne #ctx_4170w |
| 413 | ret | 409 | ret |
| @@ -419,16 +415,18 @@ ctx_4170w: | |||
| 419 | // funny things happen. | 415 | // funny things happen. |
| 420 | // | 416 | // |
| 421 | ctx_redswitch: | 417 | ctx_redswitch: |
| 422 | mov $r14 0x614 | 418 | mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC |
| 423 | shl b32 $r14 6 | 419 | or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP |
| 424 | mov $r15 0x270 | 420 | or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC |
| 425 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL | 421 | or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN |
| 422 | nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14) | ||
| 426 | mov $r15 8 | 423 | mov $r15 8 |
| 427 | ctx_redswitch_delay: | 424 | ctx_redswitch_delay: |
| 428 | sub b32 $r15 1 | 425 | sub b32 $r15 1 |
| 429 | bra ne #ctx_redswitch_delay | 426 | bra ne #ctx_redswitch_delay |
| 430 | mov $r15 0x770 | 427 | or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP |
| 431 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL | 428 | or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN |
| 429 | nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14) | ||
| 432 | ret | 430 | ret |
| 433 | 431 | ||
| 434 | // Not a clue what this is for, except that unless the value is 0x10, the | 432 | // Not a clue what this is for, except that unless the value is 0x10, the |
| @@ -437,15 +435,18 @@ ctx_redswitch: | |||
| 437 | // In: $r15 value to set to (0x00/0x10 are used) | 435 | // In: $r15 value to set to (0x00/0x10 are used) |
| 438 | // | 436 | // |
| 439 | ctx_86c: | 437 | ctx_86c: |
| 440 | mov $r14 0x86c | 438 | nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15) |
| 441 | shl b32 $r14 6 | 439 | nv_wr32(0x408a14, $r15) |
| 442 | iowr I[$r14] $r15 // HUB(0x86c) = val | 440 | nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15) |
| 443 | mov $r14 -0x75ec | 441 | ret |
| 444 | sethi $r14 0x400000 | 442 | |
| 445 | call #nv_wr32 // ROP(0xa14) = val | 443 | // In: $r15 NV_PGRAPH_FECS_MEM_CMD_* |
| 446 | mov $r14 -0x5794 | 444 | ctx_mem: |
| 447 | sethi $r14 0x410000 | 445 | nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15) |
| 448 | call #nv_wr32 // GPC(0x86c) = val | 446 | ctx_mem_wait: |
| 447 | nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0) | ||
| 448 | or $r15 $r15 | ||
| 449 | bra ne #ctx_mem_wait | ||
| 449 | ret | 450 | ret |
| 450 | 451 | ||
| 451 | // ctx_load - load's a channel's ctxctl data, and selects its vm | 452 | // ctx_load - load's a channel's ctxctl data, and selects its vm |
| @@ -457,23 +458,14 @@ ctx_load: | |||
| 457 | 458 | ||
| 458 | // switch to channel, somewhat magic in parts.. | 459 | // switch to channel, somewhat magic in parts.. |
| 459 | mov $r10 12 // DONE_UNK12 | 460 | mov $r10 12 // DONE_UNK12 |
| 460 | call #wait_donez | 461 | call(wait_donez) |
| 461 | mov $r1 0xa24 | 462 | clear b32 $r15 |
| 462 | shl b32 $r1 6 | 463 | nv_iowr(0x409a24, 0, $r15) |
| 463 | iowr I[$r1 + 0x000] $r0 // 0x409a24 | 464 | nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2) |
| 464 | mov $r3 0xb00 | 465 | nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2) |
| 465 | shl b32 $r3 6 | 466 | mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN |
| 466 | iowr I[$r3 + 0x100] $r2 // CHAN_NEXT | 467 | call(ctx_mem) |
| 467 | mov $r1 0xa0c | 468 | nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2) |
| 468 | shl b32 $r1 6 | ||
| 469 | mov $r4 7 | ||
| 470 | iowr I[$r1 + 0x000] $r2 // MEM_CHAN | ||
| 471 | iowr I[$r1 + 0x100] $r4 // MEM_CMD | ||
| 472 | ctx_chan_wait_0: | ||
| 473 | iord $r4 I[$r1 + 0x100] | ||
| 474 | and $r4 0x1f | ||
| 475 | bra ne #ctx_chan_wait_0 | ||
| 476 | iowr I[$r3 + 0x000] $r2 // CHAN_CUR | ||
| 477 | 469 | ||
| 478 | // load channel header, fetch PGRAPH context pointer | 470 | // load channel header, fetch PGRAPH context pointer |
| 479 | mov $xtargets $r0 | 471 | mov $xtargets $r0 |
| @@ -482,14 +474,10 @@ ctx_load: | |||
| 482 | add b32 $r2 2 | 474 | add b32 $r2 2 |
| 483 | 475 | ||
| 484 | trace_set(T_LCHAN) | 476 | trace_set(T_LCHAN) |
| 485 | mov $r1 0xa04 | 477 | nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2) |
| 486 | shl b32 $r1 6 | 478 | imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31) |
| 487 | iowr I[$r1 + 0x000] $r2 // MEM_BASE | 479 | or $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM |
| 488 | mov $r1 0xa20 | 480 | nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2) |
| 489 | shl b32 $r1 6 | ||
| 490 | mov $r2 0x0002 | ||
| 491 | sethi $r2 0x80000000 | ||
| 492 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram | ||
| 493 | mov $r1 0x10 // chan + 0x0210 | 481 | mov $r1 0x10 // chan + 0x0210 |
| 494 | mov $r2 #xfer_data | 482 | mov $r2 #xfer_data |
| 495 | sethi $r2 0x00020000 // 16 bytes | 483 | sethi $r2 0x00020000 // 16 bytes |
| @@ -507,13 +495,9 @@ ctx_load: | |||
| 507 | 495 | ||
| 508 | // set transfer base to start of context, and fetch context header | 496 | // set transfer base to start of context, and fetch context header |
| 509 | trace_set(T_LCTXH) | 497 | trace_set(T_LCTXH) |
| 510 | mov $r2 0xa04 | 498 | nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1) |
| 511 | shl b32 $r2 6 | 499 | mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM |
| 512 | iowr I[$r2 + 0x000] $r1 // MEM_BASE | 500 | nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2) |
| 513 | mov $r2 1 | ||
| 514 | mov $r1 0xa20 | ||
| 515 | shl b32 $r1 6 | ||
| 516 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm | ||
| 517 | mov $r1 #chan_data | 501 | mov $r1 #chan_data |
| 518 | sethi $r1 0x00060000 // 256 bytes | 502 | sethi $r1 0x00060000 // 256 bytes |
| 519 | xdld $r0 $r1 | 503 | xdld $r0 $r1 |
| @@ -532,21 +516,15 @@ ctx_load: | |||
| 532 | // | 516 | // |
| 533 | ctx_chan: | 517 | ctx_chan: |
| 534 | #if CHIPSET < GK100 | 518 | #if CHIPSET < GK100 |
| 535 | call #ctx_4160s | 519 | call(ctx_4160s) |
| 536 | #endif | 520 | #endif |
| 537 | call #ctx_load | 521 | call(ctx_load) |
| 538 | mov $r10 12 // DONE_UNK12 | 522 | mov $r10 12 // DONE_UNK12 |
| 539 | call #wait_donez | 523 | call(wait_donez) |
| 540 | mov $r1 0xa10 | 524 | mov $r15 5 // MEM_CMD 5 ??? |
| 541 | shl b32 $r1 6 | 525 | call(ctx_mem) |
| 542 | mov $r2 5 | ||
| 543 | iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) | ||
| 544 | ctx_chan_wait: | ||
| 545 | iord $r2 I[$r1 + 0x000] | ||
| 546 | or $r2 $r2 | ||
| 547 | bra ne #ctx_chan_wait | ||
| 548 | #if CHIPSET < GK100 | 526 | #if CHIPSET < GK100 |
| 549 | call #ctx_4160c | 527 | call(ctx_4160c) |
| 550 | #endif | 528 | #endif |
| 551 | ret | 529 | ret |
| 552 | 530 | ||
| @@ -562,9 +540,7 @@ ctx_chan: | |||
| 562 | ctx_mmio_exec: | 540 | ctx_mmio_exec: |
| 563 | // set transfer base to be the mmio list | 541 | // set transfer base to be the mmio list |
| 564 | ld b32 $r3 D[$r0 + #chan_mmio_address] | 542 | ld b32 $r3 D[$r0 + #chan_mmio_address] |
| 565 | mov $r2 0xa04 | 543 | nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3) |
| 566 | shl b32 $r2 6 | ||
| 567 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | ||
| 568 | 544 | ||
| 569 | clear b32 $r3 | 545 | clear b32 $r3 |
| 570 | ctx_mmio_loop: | 546 | ctx_mmio_loop: |
| @@ -580,7 +556,7 @@ ctx_mmio_exec: | |||
| 580 | ctx_mmio_pull: | 556 | ctx_mmio_pull: |
| 581 | ld b32 $r14 D[$r4 + #xfer_data + 0x00] | 557 | ld b32 $r14 D[$r4 + #xfer_data + 0x00] |
| 582 | ld b32 $r15 D[$r4 + #xfer_data + 0x04] | 558 | ld b32 $r15 D[$r4 + #xfer_data + 0x04] |
| 583 | call #nv_wr32 | 559 | call(nv_wr32) |
| 584 | 560 | ||
| 585 | // next! | 561 | // next! |
| 586 | add b32 $r3 8 | 562 | add b32 $r3 8 |
| @@ -590,7 +566,7 @@ ctx_mmio_exec: | |||
| 590 | // set transfer base back to the current context | 566 | // set transfer base back to the current context |
| 591 | ctx_mmio_done: | 567 | ctx_mmio_done: |
| 592 | ld b32 $r3 D[$r0 + #ctx_current] | 568 | ld b32 $r3 D[$r0 + #ctx_current] |
| 593 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | 569 | nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3) |
| 594 | 570 | ||
| 595 | // disable the mmio list now, we don't need/want to execute it again | 571 | // disable the mmio list now, we don't need/want to execute it again |
| 596 | st b32 D[$r0 + #chan_mmio_count] $r0 | 572 | st b32 D[$r0 + #chan_mmio_count] $r0 |
| @@ -610,12 +586,10 @@ ctx_mmio_exec: | |||
| 610 | // | 586 | // |
| 611 | ctx_xfer: | 587 | ctx_xfer: |
| 612 | // according to mwk, some kind of wait for idle | 588 | // according to mwk, some kind of wait for idle |
| 613 | mov $r15 0xc00 | ||
| 614 | shl b32 $r15 6 | ||
| 615 | mov $r14 4 | 589 | mov $r14 4 |
| 616 | iowr I[$r15 + 0x200] $r14 | 590 | nv_iowr(0x409c08, 0, $r14) |
| 617 | ctx_xfer_idle: | 591 | ctx_xfer_idle: |
| 618 | iord $r14 I[$r15 + 0x000] | 592 | nv_iord($r14, 0x409c00, 0) |
| 619 | and $r14 0x2000 | 593 | and $r14 0x2000 |
| 620 | bra ne #ctx_xfer_idle | 594 | bra ne #ctx_xfer_idle |
| 621 | 595 | ||
| @@ -623,50 +597,42 @@ ctx_xfer: | |||
| 623 | bra $p2 #ctx_xfer_pre_load | 597 | bra $p2 #ctx_xfer_pre_load |
| 624 | ctx_xfer_pre: | 598 | ctx_xfer_pre: |
| 625 | mov $r15 0x10 | 599 | mov $r15 0x10 |
| 626 | call #ctx_86c | 600 | call(ctx_86c) |
| 627 | #if CHIPSET < GK100 | 601 | #if CHIPSET < GK100 |
| 628 | call #ctx_4160s | 602 | call(ctx_4160s) |
| 629 | #endif | 603 | #endif |
| 630 | bra not $p1 #ctx_xfer_exec | 604 | bra not $p1 #ctx_xfer_exec |
| 631 | 605 | ||
| 632 | ctx_xfer_pre_load: | 606 | ctx_xfer_pre_load: |
| 633 | mov $r15 2 | 607 | mov $r15 2 |
| 634 | call #ctx_4170s | 608 | call(ctx_4170s) |
| 635 | call #ctx_4170w | 609 | call(ctx_4170w) |
| 636 | call #ctx_redswitch | 610 | call(ctx_redswitch) |
| 637 | clear b32 $r15 | 611 | clear b32 $r15 |
| 638 | call #ctx_4170s | 612 | call(ctx_4170s) |
| 639 | call #ctx_load | 613 | call(ctx_load) |
| 640 | 614 | ||
| 641 | // fetch context pointer, and initiate xfer on all GPCs | 615 | // fetch context pointer, and initiate xfer on all GPCs |
| 642 | ctx_xfer_exec: | 616 | ctx_xfer_exec: |
| 643 | ld b32 $r1 D[$r0 + #ctx_current] | 617 | ld b32 $r1 D[$r0 + #ctx_current] |
| 644 | mov $r2 0x414 | 618 | |
| 645 | shl b32 $r2 6 | 619 | clear b32 $r2 |
| 646 | iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset | 620 | nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2) |
| 647 | mov $r14 -0x5b00 | 621 | |
| 648 | sethi $r14 0x410000 | 622 | nv_wr32(0x41a500, $r1) // GPC_BCAST_WRCMD_DATA = ctx pointer |
| 649 | mov b32 $r15 $r1 | ||
| 650 | call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer | ||
| 651 | add b32 $r14 4 | ||
| 652 | xbit $r15 $flags $p1 | 623 | xbit $r15 $flags $p1 |
| 653 | xbit $r2 $flags $p2 | 624 | xbit $r2 $flags $p2 |
| 654 | shl b32 $r2 1 | 625 | shl b32 $r2 1 |
| 655 | or $r15 $r2 | 626 | or $r15 $r2 |
| 656 | call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) | 627 | nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) |
| 657 | 628 | ||
| 658 | // strands | 629 | // strands |
| 659 | mov $r1 0x4afc | 630 | call(strand_pre) |
| 660 | sethi $r1 0x20000 | 631 | clear b32 $r2 |
| 661 | mov $r2 0xc | 632 | nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2) |
| 662 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | 633 | xbit $r2 $flags $p1 // SAVE/LOAD |
| 663 | call #strand_wait | 634 | add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE |
| 664 | mov $r2 0x47fc | 635 | nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2) |
| 665 | sethi $r2 0x20000 | ||
| 666 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
| 667 | xbit $r2 $flags $p1 | ||
| 668 | add b32 $r2 3 | ||
| 669 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
| 670 | 636 | ||
| 671 | // mmio context | 637 | // mmio context |
| 672 | xbit $r10 $flags $p1 // direction | 638 | xbit $r10 $flags $p1 // direction |
| @@ -675,48 +641,42 @@ ctx_xfer: | |||
| 675 | ld b32 $r12 D[$r0 + #hub_mmio_list_head] | 641 | ld b32 $r12 D[$r0 + #hub_mmio_list_head] |
| 676 | ld b32 $r13 D[$r0 + #hub_mmio_list_tail] | 642 | ld b32 $r13 D[$r0 + #hub_mmio_list_tail] |
| 677 | mov $r14 0 // not multi | 643 | mov $r14 0 // not multi |
| 678 | call #mmctx_xfer | 644 | call(mmctx_xfer) |
| 679 | 645 | ||
| 680 | // wait for GPCs to all complete | 646 | // wait for GPCs to all complete |
| 681 | mov $r10 8 // DONE_BAR | 647 | mov $r10 8 // DONE_BAR |
| 682 | call #wait_doneo | 648 | call(wait_doneo) |
| 683 | 649 | ||
| 684 | // wait for strand xfer to complete | 650 | // wait for strand xfer to complete |
| 685 | call #strand_wait | 651 | call(strand_wait) |
| 686 | 652 | ||
| 687 | // post-op | 653 | // post-op |
| 688 | bra $p1 #ctx_xfer_post | 654 | bra $p1 #ctx_xfer_post |
| 689 | mov $r10 12 // DONE_UNK12 | 655 | mov $r10 12 // DONE_UNK12 |
| 690 | call #wait_donez | 656 | call(wait_donez) |
| 691 | mov $r1 0xa10 | 657 | mov $r15 5 // MEM_CMD 5 ??? |
| 692 | shl b32 $r1 6 | 658 | call(ctx_mem) |
| 693 | mov $r2 5 | ||
| 694 | iowr I[$r1] $r2 // MEM_CMD | ||
| 695 | ctx_xfer_post_save_wait: | ||
| 696 | iord $r2 I[$r1] | ||
| 697 | or $r2 $r2 | ||
| 698 | bra ne #ctx_xfer_post_save_wait | ||
| 699 | 659 | ||
| 700 | bra $p2 #ctx_xfer_done | 660 | bra $p2 #ctx_xfer_done |
| 701 | ctx_xfer_post: | 661 | ctx_xfer_post: |
| 702 | mov $r15 2 | 662 | mov $r15 2 |
| 703 | call #ctx_4170s | 663 | call(ctx_4170s) |
| 704 | clear b32 $r15 | 664 | clear b32 $r15 |
| 705 | call #ctx_86c | 665 | call(ctx_86c) |
| 706 | call #strand_post | 666 | call(strand_post) |
| 707 | call #ctx_4170w | 667 | call(ctx_4170w) |
| 708 | clear b32 $r15 | 668 | clear b32 $r15 |
| 709 | call #ctx_4170s | 669 | call(ctx_4170s) |
| 710 | 670 | ||
| 711 | bra not $p1 #ctx_xfer_no_post_mmio | 671 | bra not $p1 #ctx_xfer_no_post_mmio |
| 712 | ld b32 $r1 D[$r0 + #chan_mmio_count] | 672 | ld b32 $r1 D[$r0 + #chan_mmio_count] |
| 713 | or $r1 $r1 | 673 | or $r1 $r1 |
| 714 | bra e #ctx_xfer_no_post_mmio | 674 | bra e #ctx_xfer_no_post_mmio |
| 715 | call #ctx_mmio_exec | 675 | call(ctx_mmio_exec) |
| 716 | 676 | ||
| 717 | ctx_xfer_no_post_mmio: | 677 | ctx_xfer_no_post_mmio: |
| 718 | #if CHIPSET < GK100 | 678 | #if CHIPSET < GK100 |
| 719 | call #ctx_4160c | 679 | call(ctx_4160c) |
| 720 | #endif | 680 | #endif |
| 721 | 681 | ||
| 722 | ctx_xfer_done: | 682 | ctx_xfer_done: |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 new file mode 100644 index 000000000000..7c5d25630fa8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define CHIPSET GK208 | ||
| 26 | #include "macros.fuc" | ||
| 27 | |||
| 28 | .section #nv108_grhub_data | ||
| 29 | #define INCLUDE_DATA | ||
| 30 | #include "com.fuc" | ||
| 31 | #include "hub.fuc" | ||
| 32 | #undef INCLUDE_DATA | ||
| 33 | |||
| 34 | .section #nv108_grhub_code | ||
| 35 | #define INCLUDE_CODE | ||
| 36 | bra #init | ||
| 37 | #include "com.fuc" | ||
| 38 | #include "hub.fuc" | ||
| 39 | .align 256 | ||
| 40 | #undef INCLUDE_CODE | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h new file mode 100644 index 000000000000..4750984bf380 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h | |||
| @@ -0,0 +1,916 @@ | |||
| 1 | uint32_t nv108_grhub_data[] = { | ||
| 2 | /* 0x0000: hub_mmio_list_head */ | ||
| 3 | 0x00000300, | ||
| 4 | /* 0x0004: hub_mmio_list_tail */ | ||
| 5 | 0x00000304, | ||
| 6 | /* 0x0008: gpc_count */ | ||
| 7 | 0x00000000, | ||
| 8 | /* 0x000c: rop_count */ | ||
| 9 | 0x00000000, | ||
| 10 | /* 0x0010: cmd_queue */ | ||
| 11 | 0x00000000, | ||
| 12 | 0x00000000, | ||
| 13 | 0x00000000, | ||
| 14 | 0x00000000, | ||
| 15 | 0x00000000, | ||
| 16 | 0x00000000, | ||
| 17 | 0x00000000, | ||
| 18 | 0x00000000, | ||
| 19 | 0x00000000, | ||
| 20 | 0x00000000, | ||
| 21 | 0x00000000, | ||
| 22 | 0x00000000, | ||
| 23 | 0x00000000, | ||
| 24 | 0x00000000, | ||
| 25 | 0x00000000, | ||
| 26 | 0x00000000, | ||
| 27 | 0x00000000, | ||
| 28 | 0x00000000, | ||
| 29 | /* 0x0058: ctx_current */ | ||
| 30 | 0x00000000, | ||
| 31 | 0x00000000, | ||
| 32 | 0x00000000, | ||
| 33 | 0x00000000, | ||
| 34 | 0x00000000, | ||
| 35 | 0x00000000, | ||
| 36 | 0x00000000, | ||
| 37 | 0x00000000, | ||
| 38 | 0x00000000, | ||
| 39 | 0x00000000, | ||
| 40 | 0x00000000, | ||
| 41 | 0x00000000, | ||
| 42 | 0x00000000, | ||
| 43 | 0x00000000, | ||
| 44 | 0x00000000, | ||
| 45 | 0x00000000, | ||
| 46 | 0x00000000, | ||
| 47 | 0x00000000, | ||
| 48 | 0x00000000, | ||
| 49 | 0x00000000, | ||
| 50 | 0x00000000, | ||
| 51 | 0x00000000, | ||
| 52 | 0x00000000, | ||
| 53 | 0x00000000, | ||
| 54 | 0x00000000, | ||
| 55 | 0x00000000, | ||
| 56 | 0x00000000, | ||
| 57 | 0x00000000, | ||
| 58 | 0x00000000, | ||
| 59 | 0x00000000, | ||
| 60 | 0x00000000, | ||
| 61 | 0x00000000, | ||
| 62 | 0x00000000, | ||
| 63 | 0x00000000, | ||
| 64 | 0x00000000, | ||
| 65 | 0x00000000, | ||
| 66 | 0x00000000, | ||
| 67 | 0x00000000, | ||
| 68 | 0x00000000, | ||
| 69 | 0x00000000, | ||
| 70 | 0x00000000, | ||
| 71 | 0x00000000, | ||
| 72 | /* 0x0100: chan_data */ | ||
| 73 | /* 0x0100: chan_mmio_count */ | ||
| 74 | 0x00000000, | ||
| 75 | /* 0x0104: chan_mmio_address */ | ||
| 76 | 0x00000000, | ||
| 77 | 0x00000000, | ||
| 78 | 0x00000000, | ||
| 79 | 0x00000000, | ||
| 80 | 0x00000000, | ||
| 81 | 0x00000000, | ||
| 82 | 0x00000000, | ||
| 83 | 0x00000000, | ||
| 84 | 0x00000000, | ||
| 85 | 0x00000000, | ||
| 86 | 0x00000000, | ||
| 87 | 0x00000000, | ||
| 88 | 0x00000000, | ||
| 89 | 0x00000000, | ||
| 90 | 0x00000000, | ||
| 91 | 0x00000000, | ||
| 92 | 0x00000000, | ||
| 93 | 0x00000000, | ||
| 94 | 0x00000000, | ||
| 95 | 0x00000000, | ||
| 96 | 0x00000000, | ||
| 97 | 0x00000000, | ||
| 98 | 0x00000000, | ||
| 99 | 0x00000000, | ||
| 100 | 0x00000000, | ||
| 101 | 0x00000000, | ||
| 102 | 0x00000000, | ||
| 103 | 0x00000000, | ||
| 104 | 0x00000000, | ||
| 105 | 0x00000000, | ||
| 106 | 0x00000000, | ||
| 107 | 0x00000000, | ||
| 108 | 0x00000000, | ||
| 109 | 0x00000000, | ||
| 110 | 0x00000000, | ||
| 111 | 0x00000000, | ||
| 112 | 0x00000000, | ||
| 113 | 0x00000000, | ||
| 114 | 0x00000000, | ||
| 115 | 0x00000000, | ||
| 116 | 0x00000000, | ||
| 117 | 0x00000000, | ||
| 118 | 0x00000000, | ||
| 119 | 0x00000000, | ||
| 120 | 0x00000000, | ||
| 121 | 0x00000000, | ||
| 122 | 0x00000000, | ||
| 123 | 0x00000000, | ||
| 124 | 0x00000000, | ||
| 125 | 0x00000000, | ||
| 126 | 0x00000000, | ||
| 127 | 0x00000000, | ||
| 128 | 0x00000000, | ||
| 129 | 0x00000000, | ||
| 130 | 0x00000000, | ||
| 131 | 0x00000000, | ||
| 132 | 0x00000000, | ||
| 133 | 0x00000000, | ||
| 134 | 0x00000000, | ||
| 135 | 0x00000000, | ||
| 136 | 0x00000000, | ||
| 137 | 0x00000000, | ||
| 138 | 0x00000000, | ||
| 139 | /* 0x0200: xfer_data */ | ||
| 140 | 0x00000000, | ||
| 141 | 0x00000000, | ||
| 142 | 0x00000000, | ||
| 143 | 0x00000000, | ||
| 144 | 0x00000000, | ||
| 145 | 0x00000000, | ||
| 146 | 0x00000000, | ||
| 147 | 0x00000000, | ||
| 148 | 0x00000000, | ||
| 149 | 0x00000000, | ||
| 150 | 0x00000000, | ||
| 151 | 0x00000000, | ||
| 152 | 0x00000000, | ||
| 153 | 0x00000000, | ||
| 154 | 0x00000000, | ||
| 155 | 0x00000000, | ||
| 156 | 0x00000000, | ||
| 157 | 0x00000000, | ||
| 158 | 0x00000000, | ||
| 159 | 0x00000000, | ||
| 160 | 0x00000000, | ||
| 161 | 0x00000000, | ||
| 162 | 0x00000000, | ||
| 163 | 0x00000000, | ||
| 164 | 0x00000000, | ||
| 165 | 0x00000000, | ||
| 166 | 0x00000000, | ||
| 167 | 0x00000000, | ||
| 168 | 0x00000000, | ||
| 169 | 0x00000000, | ||
| 170 | 0x00000000, | ||
| 171 | 0x00000000, | ||
| 172 | 0x00000000, | ||
| 173 | 0x00000000, | ||
| 174 | 0x00000000, | ||
| 175 | 0x00000000, | ||
| 176 | 0x00000000, | ||
| 177 | 0x00000000, | ||
| 178 | 0x00000000, | ||
| 179 | 0x00000000, | ||
| 180 | 0x00000000, | ||
| 181 | 0x00000000, | ||
| 182 | 0x00000000, | ||
| 183 | 0x00000000, | ||
| 184 | 0x00000000, | ||
| 185 | 0x00000000, | ||
| 186 | 0x00000000, | ||
| 187 | 0x00000000, | ||
| 188 | 0x00000000, | ||
| 189 | 0x00000000, | ||
| 190 | 0x00000000, | ||
| 191 | 0x00000000, | ||
| 192 | 0x00000000, | ||
| 193 | 0x00000000, | ||
| 194 | 0x00000000, | ||
| 195 | 0x00000000, | ||
| 196 | 0x00000000, | ||
| 197 | 0x00000000, | ||
| 198 | 0x00000000, | ||
| 199 | 0x00000000, | ||
| 200 | 0x00000000, | ||
| 201 | 0x00000000, | ||
| 202 | 0x00000000, | ||
| 203 | 0x00000000, | ||
| 204 | /* 0x0300: hub_mmio_list_base */ | ||
| 205 | 0x0417e91c, | ||
| 206 | }; | ||
| 207 | |||
| 208 | uint32_t nv108_grhub_code[] = { | ||
| 209 | 0x030e0ef5, | ||
| 210 | /* 0x0004: queue_put */ | ||
| 211 | 0x9800d898, | ||
| 212 | 0x86f001d9, | ||
| 213 | 0xf489a408, | ||
| 214 | 0x020f0b1b, | ||
| 215 | 0x0002f87e, | ||
| 216 | /* 0x001a: queue_put_next */ | ||
| 217 | 0x98c400f8, | ||
| 218 | 0x0384b607, | ||
| 219 | 0xb6008dbb, | ||
| 220 | 0x8eb50880, | ||
| 221 | 0x018fb500, | ||
| 222 | 0xf00190b6, | ||
| 223 | 0xd9b50f94, | ||
| 224 | /* 0x0037: queue_get */ | ||
| 225 | 0xf400f801, | ||
| 226 | 0xd8980131, | ||
| 227 | 0x01d99800, | ||
| 228 | 0x0bf489a4, | ||
| 229 | 0x0789c421, | ||
| 230 | 0xbb0394b6, | ||
| 231 | 0x90b6009d, | ||
| 232 | 0x009e9808, | ||
| 233 | 0xb6019f98, | ||
| 234 | 0x84f00180, | ||
| 235 | 0x00d8b50f, | ||
| 236 | /* 0x0063: queue_get_done */ | ||
| 237 | 0xf80132f4, | ||
| 238 | /* 0x0065: nv_rd32 */ | ||
| 239 | 0xf0ecb200, | ||
| 240 | 0x00801fc9, | ||
| 241 | 0x0cf601ca, | ||
| 242 | /* 0x0073: nv_rd32_wait */ | ||
| 243 | 0x8c04bd00, | ||
| 244 | 0xcf01ca00, | ||
| 245 | 0xccc800cc, | ||
| 246 | 0xf61bf41f, | ||
| 247 | 0xec7e060a, | ||
| 248 | 0x008f0000, | ||
| 249 | 0xffcf01cb, | ||
| 250 | /* 0x008f: nv_wr32 */ | ||
| 251 | 0x8000f800, | ||
| 252 | 0xf601cc00, | ||
| 253 | 0x04bd000f, | ||
| 254 | 0xc9f0ecb2, | ||
| 255 | 0x1ec9f01f, | ||
| 256 | 0x01ca0080, | ||
| 257 | 0xbd000cf6, | ||
| 258 | /* 0x00a9: nv_wr32_wait */ | ||
| 259 | 0xca008c04, | ||
| 260 | 0x00cccf01, | ||
| 261 | 0xf41fccc8, | ||
| 262 | 0x00f8f61b, | ||
| 263 | /* 0x00b8: wait_donez */ | ||
| 264 | 0x99f094bd, | ||
| 265 | 0x37008000, | ||
| 266 | 0x0009f602, | ||
| 267 | 0x008004bd, | ||
| 268 | 0x0af60206, | ||
| 269 | /* 0x00cf: wait_donez_ne */ | ||
| 270 | 0x8804bd00, | ||
| 271 | 0xcf010000, | ||
| 272 | 0x8aff0088, | ||
| 273 | 0xf61bf488, | ||
| 274 | 0x99f094bd, | ||
| 275 | 0x17008000, | ||
| 276 | 0x0009f602, | ||
| 277 | 0x00f804bd, | ||
| 278 | /* 0x00ec: wait_doneo */ | ||
| 279 | 0x99f094bd, | ||
| 280 | 0x37008000, | ||
| 281 | 0x0009f602, | ||
| 282 | 0x008004bd, | ||
| 283 | 0x0af60206, | ||
| 284 | /* 0x0103: wait_doneo_e */ | ||
| 285 | 0x8804bd00, | ||
| 286 | 0xcf010000, | ||
| 287 | 0x8aff0088, | ||
| 288 | 0xf60bf488, | ||
| 289 | 0x99f094bd, | ||
| 290 | 0x17008000, | ||
| 291 | 0x0009f602, | ||
| 292 | 0x00f804bd, | ||
| 293 | /* 0x0120: mmctx_size */ | ||
| 294 | /* 0x0122: nv_mmctx_size_loop */ | ||
| 295 | 0xe89894bd, | ||
| 296 | 0x1a85b600, | ||
| 297 | 0xb60180b6, | ||
| 298 | 0x98bb0284, | ||
| 299 | 0x04e0b600, | ||
| 300 | 0x1bf4efa4, | ||
| 301 | 0xf89fb2ec, | ||
| 302 | /* 0x013d: mmctx_xfer */ | ||
| 303 | 0xf094bd00, | ||
| 304 | 0x00800199, | ||
| 305 | 0x09f60237, | ||
| 306 | 0xbd04bd00, | ||
| 307 | 0x05bbfd94, | ||
| 308 | 0x800f0bf4, | ||
| 309 | 0xf601c400, | ||
| 310 | 0x04bd000b, | ||
| 311 | /* 0x015f: mmctx_base_disabled */ | ||
| 312 | 0xfd0099f0, | ||
| 313 | 0x0bf405ee, | ||
| 314 | 0xc6008018, | ||
| 315 | 0x000ef601, | ||
| 316 | 0x008004bd, | ||
| 317 | 0x0ff601c7, | ||
| 318 | 0xf004bd00, | ||
| 319 | /* 0x017a: mmctx_multi_disabled */ | ||
| 320 | 0xabc80199, | ||
| 321 | 0x10b4b600, | ||
| 322 | 0xc80cb9f0, | ||
| 323 | 0xe4b601ae, | ||
| 324 | 0x05befd11, | ||
| 325 | 0x01c50080, | ||
| 326 | 0xbd000bf6, | ||
| 327 | /* 0x0195: mmctx_exec_loop */ | ||
| 328 | /* 0x0195: mmctx_wait_free */ | ||
| 329 | 0xc5008e04, | ||
| 330 | 0x00eecf01, | ||
| 331 | 0xf41fe4f0, | ||
| 332 | 0xce98f60b, | ||
| 333 | 0x05e9fd00, | ||
| 334 | 0x01c80080, | ||
| 335 | 0xbd000ef6, | ||
| 336 | 0x04c0b604, | ||
| 337 | 0x1bf4cda4, | ||
| 338 | 0x02abc8df, | ||
| 339 | /* 0x01bf: mmctx_fini_wait */ | ||
| 340 | 0x8b1c1bf4, | ||
| 341 | 0xcf01c500, | ||
| 342 | 0xb4f000bb, | ||
| 343 | 0x10b4b01f, | ||
| 344 | 0x0af31bf4, | ||
| 345 | 0x00b87e02, | ||
| 346 | 0x250ef400, | ||
| 347 | /* 0x01d8: mmctx_stop */ | ||
| 348 | 0xb600abc8, | ||
| 349 | 0xb9f010b4, | ||
| 350 | 0x12b9f00c, | ||
| 351 | 0x01c50080, | ||
| 352 | 0xbd000bf6, | ||
| 353 | /* 0x01ed: mmctx_stop_wait */ | ||
| 354 | 0xc5008b04, | ||
| 355 | 0x00bbcf01, | ||
| 356 | 0xf412bbc8, | ||
| 357 | /* 0x01fa: mmctx_done */ | ||
| 358 | 0x94bdf61b, | ||
| 359 | 0x800199f0, | ||
| 360 | 0xf6021700, | ||
| 361 | 0x04bd0009, | ||
| 362 | /* 0x020a: strand_wait */ | ||
| 363 | 0xa0f900f8, | ||
| 364 | 0xb87e020a, | ||
| 365 | 0xa0fc0000, | ||
| 366 | /* 0x0216: strand_pre */ | ||
| 367 | 0x0c0900f8, | ||
| 368 | 0x024afc80, | ||
| 369 | 0xbd0009f6, | ||
| 370 | 0x020a7e04, | ||
| 371 | /* 0x0227: strand_post */ | ||
| 372 | 0x0900f800, | ||
| 373 | 0x4afc800d, | ||
| 374 | 0x0009f602, | ||
| 375 | 0x0a7e04bd, | ||
| 376 | 0x00f80002, | ||
| 377 | /* 0x0238: strand_set */ | ||
| 378 | 0xfc800f0c, | ||
| 379 | 0x0cf6024f, | ||
| 380 | 0x0c04bd00, | ||
| 381 | 0x4afc800b, | ||
| 382 | 0x000cf602, | ||
| 383 | 0xfc8004bd, | ||
| 384 | 0x0ef6024f, | ||
| 385 | 0x0c04bd00, | ||
| 386 | 0x4afc800a, | ||
| 387 | 0x000cf602, | ||
| 388 | 0x0a7e04bd, | ||
| 389 | 0x00f80002, | ||
| 390 | /* 0x0268: strand_ctx_init */ | ||
| 391 | 0x99f094bd, | ||
| 392 | 0x37008003, | ||
| 393 | 0x0009f602, | ||
| 394 | 0x167e04bd, | ||
| 395 | 0x030e0002, | ||
| 396 | 0x0002387e, | ||
| 397 | 0xfc80c4bd, | ||
| 398 | 0x0cf60247, | ||
| 399 | 0x0c04bd00, | ||
| 400 | 0x4afc8001, | ||
| 401 | 0x000cf602, | ||
| 402 | 0x0a7e04bd, | ||
| 403 | 0x0c920002, | ||
| 404 | 0x46fc8001, | ||
| 405 | 0x000cf602, | ||
| 406 | 0x020c04bd, | ||
| 407 | 0x024afc80, | ||
| 408 | 0xbd000cf6, | ||
| 409 | 0x020a7e04, | ||
| 410 | 0x02277e00, | ||
| 411 | 0x42008800, | ||
| 412 | 0x20008902, | ||
| 413 | 0x0099cf02, | ||
| 414 | /* 0x02c7: ctx_init_strand_loop */ | ||
| 415 | 0xf608fe95, | ||
| 416 | 0x8ef6008e, | ||
| 417 | 0x808acf40, | ||
| 418 | 0xb606a5b6, | ||
| 419 | 0xeabb01a0, | ||
| 420 | 0x0480b600, | ||
| 421 | 0xf40192b6, | ||
| 422 | 0xe4b6e81b, | ||
| 423 | 0xf2efbc08, | ||
| 424 | 0x99f094bd, | ||
| 425 | 0x17008003, | ||
| 426 | 0x0009f602, | ||
| 427 | 0x00f804bd, | ||
| 428 | /* 0x02f8: error */ | ||
| 429 | 0x02050080, | ||
| 430 | 0xbd000ff6, | ||
| 431 | 0x80010f04, | ||
| 432 | 0xf6030700, | ||
| 433 | 0x04bd000f, | ||
| 434 | /* 0x030e: init */ | ||
| 435 | 0x04bd00f8, | ||
| 436 | 0x410007fe, | ||
| 437 | 0x11cf4200, | ||
| 438 | 0x0911e700, | ||
| 439 | 0x0814b601, | ||
| 440 | 0x020014fe, | ||
| 441 | 0x12004002, | ||
| 442 | 0xbd0002f6, | ||
| 443 | 0x05c94104, | ||
| 444 | 0xbd0010fe, | ||
| 445 | 0x07004024, | ||
| 446 | 0xbd0002f6, | ||
| 447 | 0x20034204, | ||
| 448 | 0x01010080, | ||
| 449 | 0xbd0002f6, | ||
| 450 | 0x20044204, | ||
| 451 | 0x01010480, | ||
| 452 | 0xbd0002f6, | ||
| 453 | 0x200b4204, | ||
| 454 | 0x01010880, | ||
| 455 | 0xbd0002f6, | ||
| 456 | 0x200c4204, | ||
| 457 | 0x01011c80, | ||
| 458 | 0xbd0002f6, | ||
| 459 | 0x01039204, | ||
| 460 | 0x03090080, | ||
| 461 | 0xbd0003f6, | ||
| 462 | 0x87044204, | ||
| 463 | 0xf6040040, | ||
| 464 | 0x04bd0002, | ||
| 465 | 0x00400402, | ||
| 466 | 0x0002f603, | ||
| 467 | 0x31f404bd, | ||
| 468 | 0x96048e10, | ||
| 469 | 0x00657e40, | ||
| 470 | 0xc7feb200, | ||
| 471 | 0x01b590f1, | ||
| 472 | 0x1ff4f003, | ||
| 473 | 0x01020fb5, | ||
| 474 | 0x041fbb01, | ||
| 475 | 0x800112b6, | ||
| 476 | 0xf6010300, | ||
| 477 | 0x04bd0001, | ||
| 478 | 0x01040080, | ||
| 479 | 0xbd0001f6, | ||
| 480 | 0x01004104, | ||
| 481 | 0x627e020f, | ||
| 482 | 0x717e0006, | ||
| 483 | 0x100f0006, | ||
| 484 | 0x0006b37e, | ||
| 485 | 0x98000e98, | ||
| 486 | 0x207e010f, | ||
| 487 | 0x14950001, | ||
| 488 | 0xc0008008, | ||
| 489 | 0x0004f601, | ||
| 490 | 0x008004bd, | ||
| 491 | 0x04f601c1, | ||
| 492 | 0xb704bd00, | ||
| 493 | 0xbb130030, | ||
| 494 | 0xf5b6001f, | ||
| 495 | 0xd3008002, | ||
| 496 | 0x000ff601, | ||
| 497 | 0x15b604bd, | ||
| 498 | 0x0110b608, | ||
| 499 | 0xb20814b6, | ||
| 500 | 0x02687e1f, | ||
| 501 | 0x001fbb00, | ||
| 502 | 0x84020398, | ||
| 503 | /* 0x041f: init_gpc */ | ||
| 504 | 0xb8502000, | ||
| 505 | 0x0008044e, | ||
| 506 | 0x8f7e1fb2, | ||
| 507 | 0x4eb80000, | ||
| 508 | 0xbd00010c, | ||
| 509 | 0x008f7ef4, | ||
| 510 | 0x044eb800, | ||
| 511 | 0x8f7e0001, | ||
| 512 | 0x4eb80000, | ||
| 513 | 0x0f000100, | ||
| 514 | 0x008f7e02, | ||
| 515 | 0x004eb800, | ||
| 516 | /* 0x044e: init_gpc_wait */ | ||
| 517 | 0x657e0008, | ||
| 518 | 0xffc80000, | ||
| 519 | 0xf90bf41f, | ||
| 520 | 0x08044eb8, | ||
| 521 | 0x00657e00, | ||
| 522 | 0x001fbb00, | ||
| 523 | 0x800040b7, | ||
| 524 | 0xf40132b6, | ||
| 525 | 0x000fb41b, | ||
| 526 | 0x0006b37e, | ||
| 527 | 0x627e000f, | ||
| 528 | 0x00800006, | ||
| 529 | 0x01f60201, | ||
| 530 | 0xbd04bd00, | ||
| 531 | 0x1f19f014, | ||
| 532 | 0x02300080, | ||
| 533 | 0xbd0001f6, | ||
| 534 | /* 0x0491: main */ | ||
| 535 | 0x0031f404, | ||
| 536 | 0x0d0028f4, | ||
| 537 | 0x00377e10, | ||
| 538 | 0xf401f400, | ||
| 539 | 0x4001e4b1, | ||
| 540 | 0x00c71bf5, | ||
| 541 | 0x99f094bd, | ||
| 542 | 0x37008004, | ||
| 543 | 0x0009f602, | ||
| 544 | 0x008104bd, | ||
| 545 | 0x11cf02c0, | ||
| 546 | 0xc1008200, | ||
| 547 | 0x0022cf02, | ||
| 548 | 0xf41f13c8, | ||
| 549 | 0x23c8770b, | ||
| 550 | 0x550bf41f, | ||
| 551 | 0x12b220f9, | ||
| 552 | 0x99f094bd, | ||
| 553 | 0x37008007, | ||
| 554 | 0x0009f602, | ||
| 555 | 0x32f404bd, | ||
| 556 | 0x0231f401, | ||
| 557 | 0x0008367e, | ||
| 558 | 0x99f094bd, | ||
| 559 | 0x17008007, | ||
| 560 | 0x0009f602, | ||
| 561 | 0x20fc04bd, | ||
| 562 | 0x99f094bd, | ||
| 563 | 0x37008006, | ||
| 564 | 0x0009f602, | ||
| 565 | 0x31f404bd, | ||
| 566 | 0x08367e01, | ||
| 567 | 0xf094bd00, | ||
| 568 | 0x00800699, | ||
| 569 | 0x09f60217, | ||
| 570 | 0xf404bd00, | ||
| 571 | /* 0x0522: chsw_prev_no_next */ | ||
| 572 | 0x20f92f0e, | ||
| 573 | 0x32f412b2, | ||
| 574 | 0x0232f401, | ||
| 575 | 0x0008367e, | ||
| 576 | 0x008020fc, | ||
| 577 | 0x02f602c0, | ||
| 578 | 0xf404bd00, | ||
| 579 | /* 0x053e: chsw_no_prev */ | ||
| 580 | 0x23c8130e, | ||
| 581 | 0x0d0bf41f, | ||
| 582 | 0xf40131f4, | ||
| 583 | 0x367e0232, | ||
| 584 | /* 0x054e: chsw_done */ | ||
| 585 | 0x01020008, | ||
| 586 | 0x02c30080, | ||
| 587 | 0xbd0002f6, | ||
| 588 | 0xf094bd04, | ||
| 589 | 0x00800499, | ||
| 590 | 0x09f60217, | ||
| 591 | 0xf504bd00, | ||
| 592 | /* 0x056b: main_not_ctx_switch */ | ||
| 593 | 0xb0ff2a0e, | ||
| 594 | 0x1bf401e4, | ||
| 595 | 0x7ef2b20c, | ||
| 596 | 0xf40007d6, | ||
| 597 | /* 0x057a: main_not_ctx_chan */ | ||
| 598 | 0xe4b0400e, | ||
| 599 | 0x2c1bf402, | ||
| 600 | 0x99f094bd, | ||
| 601 | 0x37008007, | ||
| 602 | 0x0009f602, | ||
| 603 | 0x32f404bd, | ||
| 604 | 0x0232f401, | ||
| 605 | 0x0008367e, | ||
| 606 | 0x99f094bd, | ||
| 607 | 0x17008007, | ||
| 608 | 0x0009f602, | ||
| 609 | 0x0ef404bd, | ||
| 610 | /* 0x05a9: main_not_ctx_save */ | ||
| 611 | 0x10ef9411, | ||
| 612 | 0x7e01f5f0, | ||
| 613 | 0xf50002f8, | ||
| 614 | /* 0x05b7: main_done */ | ||
| 615 | 0xbdfede0e, | ||
| 616 | 0x1f29f024, | ||
| 617 | 0x02300080, | ||
| 618 | 0xbd0002f6, | ||
| 619 | 0xcc0ef504, | ||
| 620 | /* 0x05c9: ih */ | ||
| 621 | 0xfe80f9fe, | ||
| 622 | 0x80f90188, | ||
| 623 | 0xa0f990f9, | ||
| 624 | 0xd0f9b0f9, | ||
| 625 | 0xf0f9e0f9, | ||
| 626 | 0x004a04bd, | ||
| 627 | 0x00aacf02, | ||
| 628 | 0xf404abc4, | ||
| 629 | 0x100d230b, | ||
| 630 | 0xcf1a004e, | ||
| 631 | 0x004f00ee, | ||
| 632 | 0x00ffcf19, | ||
| 633 | 0x0000047e, | ||
| 634 | 0x0400b0b7, | ||
| 635 | 0x0040010e, | ||
| 636 | 0x000ef61d, | ||
| 637 | /* 0x060a: ih_no_fifo */ | ||
| 638 | 0xabe404bd, | ||
| 639 | 0x0bf40100, | ||
| 640 | 0x4e100d0c, | ||
| 641 | 0x047e4001, | ||
| 642 | /* 0x061a: ih_no_ctxsw */ | ||
| 643 | 0xabe40000, | ||
| 644 | 0x0bf40400, | ||
| 645 | 0x01004b10, | ||
| 646 | 0x448ebfb2, | ||
| 647 | 0x8f7e4001, | ||
| 648 | /* 0x062e: ih_no_fwmthd */ | ||
| 649 | 0x044b0000, | ||
| 650 | 0xffb0bd01, | ||
| 651 | 0x0bf4b4ab, | ||
| 652 | 0x0700800c, | ||
| 653 | 0x000bf603, | ||
| 654 | /* 0x0642: ih_no_other */ | ||
| 655 | 0x004004bd, | ||
| 656 | 0x000af601, | ||
| 657 | 0xf0fc04bd, | ||
| 658 | 0xd0fce0fc, | ||
| 659 | 0xa0fcb0fc, | ||
| 660 | 0x80fc90fc, | ||
| 661 | 0xfc0088fe, | ||
| 662 | 0x0032f480, | ||
| 663 | /* 0x0662: ctx_4170s */ | ||
| 664 | 0xf5f001f8, | ||
| 665 | 0x8effb210, | ||
| 666 | 0x7e404170, | ||
| 667 | 0xf800008f, | ||
| 668 | /* 0x0671: ctx_4170w */ | ||
| 669 | 0x41708e00, | ||
| 670 | 0x00657e40, | ||
| 671 | 0xf0ffb200, | ||
| 672 | 0x1bf410f4, | ||
| 673 | /* 0x0683: ctx_redswitch */ | ||
| 674 | 0x4e00f8f3, | ||
| 675 | 0xe5f00200, | ||
| 676 | 0x20e5f040, | ||
| 677 | 0x8010e5f0, | ||
| 678 | 0xf6018500, | ||
| 679 | 0x04bd000e, | ||
| 680 | /* 0x069a: ctx_redswitch_delay */ | ||
| 681 | 0xf2b6080f, | ||
| 682 | 0xfd1bf401, | ||
| 683 | 0x0400e5f1, | ||
| 684 | 0x0100e5f1, | ||
| 685 | 0x01850080, | ||
| 686 | 0xbd000ef6, | ||
| 687 | /* 0x06b3: ctx_86c */ | ||
| 688 | 0x8000f804, | ||
| 689 | 0xf6022300, | ||
| 690 | 0x04bd000f, | ||
| 691 | 0x148effb2, | ||
| 692 | 0x8f7e408a, | ||
| 693 | 0xffb20000, | ||
| 694 | 0x41a88c8e, | ||
| 695 | 0x00008f7e, | ||
| 696 | /* 0x06d2: ctx_mem */ | ||
| 697 | 0x008000f8, | ||
| 698 | 0x0ff60284, | ||
| 699 | /* 0x06db: ctx_mem_wait */ | ||
| 700 | 0x8f04bd00, | ||
| 701 | 0xcf028400, | ||
| 702 | 0xfffd00ff, | ||
| 703 | 0xf61bf405, | ||
| 704 | /* 0x06ea: ctx_load */ | ||
| 705 | 0x94bd00f8, | ||
| 706 | 0x800599f0, | ||
| 707 | 0xf6023700, | ||
| 708 | 0x04bd0009, | ||
| 709 | 0xb87e0c0a, | ||
| 710 | 0xf4bd0000, | ||
| 711 | 0x02890080, | ||
| 712 | 0xbd000ff6, | ||
| 713 | 0xc1008004, | ||
| 714 | 0x0002f602, | ||
| 715 | 0x008004bd, | ||
| 716 | 0x02f60283, | ||
| 717 | 0x0f04bd00, | ||
| 718 | 0x06d27e07, | ||
| 719 | 0xc0008000, | ||
| 720 | 0x0002f602, | ||
| 721 | 0x0bfe04bd, | ||
| 722 | 0x1f2af000, | ||
| 723 | 0xb60424b6, | ||
| 724 | 0x94bd0220, | ||
| 725 | 0x800899f0, | ||
| 726 | 0xf6023700, | ||
| 727 | 0x04bd0009, | ||
| 728 | 0x02810080, | ||
| 729 | 0xbd0002f6, | ||
| 730 | 0x0000d204, | ||
| 731 | 0x25f08000, | ||
| 732 | 0x88008002, | ||
| 733 | 0x0002f602, | ||
| 734 | 0x100104bd, | ||
| 735 | 0xf0020042, | ||
| 736 | 0x12fa0223, | ||
| 737 | 0xbd03f805, | ||
| 738 | 0x0899f094, | ||
| 739 | 0x02170080, | ||
| 740 | 0xbd0009f6, | ||
| 741 | 0x81019804, | ||
| 742 | 0x981814b6, | ||
| 743 | 0x25b68002, | ||
| 744 | 0x0512fd08, | ||
| 745 | 0xbd1601b5, | ||
| 746 | 0x0999f094, | ||
| 747 | 0x02370080, | ||
| 748 | 0xbd0009f6, | ||
| 749 | 0x81008004, | ||
| 750 | 0x0001f602, | ||
| 751 | 0x010204bd, | ||
| 752 | 0x02880080, | ||
| 753 | 0xbd0002f6, | ||
| 754 | 0x01004104, | ||
| 755 | 0xfa0613f0, | ||
| 756 | 0x03f80501, | ||
| 757 | 0x99f094bd, | ||
| 758 | 0x17008009, | ||
| 759 | 0x0009f602, | ||
| 760 | 0x94bd04bd, | ||
| 761 | 0x800599f0, | ||
| 762 | 0xf6021700, | ||
| 763 | 0x04bd0009, | ||
| 764 | /* 0x07d6: ctx_chan */ | ||
| 765 | 0xea7e00f8, | ||
| 766 | 0x0c0a0006, | ||
| 767 | 0x0000b87e, | ||
| 768 | 0xd27e050f, | ||
| 769 | 0x00f80006, | ||
| 770 | /* 0x07e8: ctx_mmio_exec */ | ||
| 771 | 0x80410398, | ||
| 772 | 0xf6028100, | ||
| 773 | 0x04bd0003, | ||
| 774 | /* 0x07f6: ctx_mmio_loop */ | ||
| 775 | 0x34c434bd, | ||
| 776 | 0x0e1bf4ff, | ||
| 777 | 0xf0020045, | ||
| 778 | 0x35fa0653, | ||
| 779 | /* 0x0807: ctx_mmio_pull */ | ||
| 780 | 0x9803f805, | ||
| 781 | 0x4f98804e, | ||
| 782 | 0x008f7e81, | ||
| 783 | 0x0830b600, | ||
| 784 | 0xf40112b6, | ||
| 785 | /* 0x081a: ctx_mmio_done */ | ||
| 786 | 0x0398df1b, | ||
| 787 | 0x81008016, | ||
| 788 | 0x0003f602, | ||
| 789 | 0x00b504bd, | ||
| 790 | 0x01004140, | ||
| 791 | 0xfa0613f0, | ||
| 792 | 0x03f80601, | ||
| 793 | /* 0x0836: ctx_xfer */ | ||
| 794 | 0x040e00f8, | ||
| 795 | 0x03020080, | ||
| 796 | 0xbd000ef6, | ||
| 797 | /* 0x0841: ctx_xfer_idle */ | ||
| 798 | 0x00008e04, | ||
| 799 | 0x00eecf03, | ||
| 800 | 0x2000e4f1, | ||
| 801 | 0xf4f51bf4, | ||
| 802 | 0x02f40611, | ||
| 803 | /* 0x0855: ctx_xfer_pre */ | ||
| 804 | 0x7e100f0c, | ||
| 805 | 0xf40006b3, | ||
| 806 | /* 0x085e: ctx_xfer_pre_load */ | ||
| 807 | 0x020f1b11, | ||
| 808 | 0x0006627e, | ||
| 809 | 0x0006717e, | ||
| 810 | 0x0006837e, | ||
| 811 | 0x627ef4bd, | ||
| 812 | 0xea7e0006, | ||
| 813 | /* 0x0876: ctx_xfer_exec */ | ||
| 814 | 0x01980006, | ||
| 815 | 0x8024bd16, | ||
| 816 | 0xf6010500, | ||
| 817 | 0x04bd0002, | ||
| 818 | 0x008e1fb2, | ||
| 819 | 0x8f7e41a5, | ||
| 820 | 0xfcf00000, | ||
| 821 | 0x022cf001, | ||
| 822 | 0xfd0124b6, | ||
| 823 | 0xffb205f2, | ||
| 824 | 0x41a5048e, | ||
| 825 | 0x00008f7e, | ||
| 826 | 0x0002167e, | ||
| 827 | 0xfc8024bd, | ||
| 828 | 0x02f60247, | ||
| 829 | 0xf004bd00, | ||
| 830 | 0x20b6012c, | ||
| 831 | 0x4afc8003, | ||
| 832 | 0x0002f602, | ||
| 833 | 0xacf004bd, | ||
| 834 | 0x06a5f001, | ||
| 835 | 0x0c98000b, | ||
| 836 | 0x010d9800, | ||
| 837 | 0x3d7e000e, | ||
| 838 | 0x080a0001, | ||
| 839 | 0x0000ec7e, | ||
| 840 | 0x00020a7e, | ||
| 841 | 0x0a1201f4, | ||
| 842 | 0x00b87e0c, | ||
| 843 | 0x7e050f00, | ||
| 844 | 0xf40006d2, | ||
| 845 | /* 0x08f2: ctx_xfer_post */ | ||
| 846 | 0x020f2d02, | ||
| 847 | 0x0006627e, | ||
| 848 | 0xb37ef4bd, | ||
| 849 | 0x277e0006, | ||
| 850 | 0x717e0002, | ||
| 851 | 0xf4bd0006, | ||
| 852 | 0x0006627e, | ||
| 853 | 0x981011f4, | ||
| 854 | 0x11fd4001, | ||
| 855 | 0x070bf405, | ||
| 856 | 0x0007e87e, | ||
| 857 | /* 0x091c: ctx_xfer_no_post_mmio */ | ||
| 858 | /* 0x091c: ctx_xfer_done */ | ||
| 859 | 0x000000f8, | ||
| 860 | 0x00000000, | ||
| 861 | 0x00000000, | ||
| 862 | 0x00000000, | ||
| 863 | 0x00000000, | ||
| 864 | 0x00000000, | ||
| 865 | 0x00000000, | ||
| 866 | 0x00000000, | ||
| 867 | 0x00000000, | ||
| 868 | 0x00000000, | ||
| 869 | 0x00000000, | ||
| 870 | 0x00000000, | ||
| 871 | 0x00000000, | ||
| 872 | 0x00000000, | ||
| 873 | 0x00000000, | ||
| 874 | 0x00000000, | ||
| 875 | 0x00000000, | ||
| 876 | 0x00000000, | ||
| 877 | 0x00000000, | ||
| 878 | 0x00000000, | ||
| 879 | 0x00000000, | ||
| 880 | 0x00000000, | ||
| 881 | 0x00000000, | ||
| 882 | 0x00000000, | ||
| 883 | 0x00000000, | ||
| 884 | 0x00000000, | ||
| 885 | 0x00000000, | ||
| 886 | 0x00000000, | ||
| 887 | 0x00000000, | ||
| 888 | 0x00000000, | ||
| 889 | 0x00000000, | ||
| 890 | 0x00000000, | ||
| 891 | 0x00000000, | ||
| 892 | 0x00000000, | ||
| 893 | 0x00000000, | ||
| 894 | 0x00000000, | ||
| 895 | 0x00000000, | ||
| 896 | 0x00000000, | ||
| 897 | 0x00000000, | ||
| 898 | 0x00000000, | ||
| 899 | 0x00000000, | ||
| 900 | 0x00000000, | ||
| 901 | 0x00000000, | ||
| 902 | 0x00000000, | ||
| 903 | 0x00000000, | ||
| 904 | 0x00000000, | ||
| 905 | 0x00000000, | ||
| 906 | 0x00000000, | ||
| 907 | 0x00000000, | ||
| 908 | 0x00000000, | ||
| 909 | 0x00000000, | ||
| 910 | 0x00000000, | ||
| 911 | 0x00000000, | ||
| 912 | 0x00000000, | ||
| 913 | 0x00000000, | ||
| 914 | 0x00000000, | ||
| 915 | 0x00000000, | ||
| 916 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h index b59f694c0423..132f684b1946 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h | |||
| @@ -206,14 +206,14 @@ uint32_t nvc0_grhub_data[] = { | |||
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | uint32_t nvc0_grhub_code[] = { | 208 | uint32_t nvc0_grhub_code[] = { |
| 209 | 0x031b0ef5, | 209 | 0x039b0ef5, |
| 210 | /* 0x0004: queue_put */ | 210 | /* 0x0004: queue_put */ |
| 211 | 0x9800d898, | 211 | 0x9800d898, |
| 212 | 0x86f001d9, | 212 | 0x86f001d9, |
| 213 | 0x0489b808, | 213 | 0x0489b808, |
| 214 | 0xf00c1bf4, | 214 | 0xf00c1bf4, |
| 215 | 0x21f502f7, | 215 | 0x21f502f7, |
| 216 | 0x00f802fe, | 216 | 0x00f8037e, |
| 217 | /* 0x001c: queue_put_next */ | 217 | /* 0x001c: queue_put_next */ |
| 218 | 0xb60798c4, | 218 | 0xb60798c4, |
| 219 | 0x8dbb0384, | 219 | 0x8dbb0384, |
| @@ -237,184 +237,214 @@ uint32_t nvc0_grhub_code[] = { | |||
| 237 | /* 0x0066: queue_get_done */ | 237 | /* 0x0066: queue_get_done */ |
| 238 | 0x00f80132, | 238 | 0x00f80132, |
| 239 | /* 0x0068: nv_rd32 */ | 239 | /* 0x0068: nv_rd32 */ |
| 240 | 0x0728b7f1, | 240 | 0xf002ecb9, |
| 241 | 0xb906b4b6, | 241 | 0x07f11fc9, |
| 242 | 0xc9f002ec, | 242 | 0x03f0ca00, |
| 243 | 0x00bcd01f, | 243 | 0x000cd001, |
| 244 | /* 0x0078: nv_rd32_wait */ | 244 | /* 0x007a: nv_rd32_wait */ |
| 245 | 0xc800bccf, | 245 | 0xc7f104bd, |
| 246 | 0x1bf41fcc, | 246 | 0xc3f0ca00, |
| 247 | 0x06a7f0fa, | 247 | 0x00cccf01, |
| 248 | 0x010921f5, | 248 | 0xf41fccc8, |
| 249 | 0xf840bfcf, | 249 | 0xa7f0f31b, |
| 250 | /* 0x008d: nv_wr32 */ | 250 | 0x1021f506, |
| 251 | 0x28b7f100, | 251 | 0x00f7f101, |
| 252 | 0x06b4b607, | 252 | 0x01f3f0cb, |
| 253 | 0xb980bfd0, | 253 | 0xf800ffcf, |
| 254 | 0xc9f002ec, | 254 | /* 0x009d: nv_wr32 */ |
| 255 | 0x1ec9f01f, | 255 | 0x0007f100, |
| 256 | /* 0x00a3: nv_wr32_wait */ | 256 | 0x0103f0cc, |
| 257 | 0xcf00bcd0, | 257 | 0xbd000fd0, |
| 258 | 0xccc800bc, | 258 | 0x02ecb904, |
| 259 | 0xfa1bf41f, | 259 | 0xf01fc9f0, |
| 260 | /* 0x00ae: watchdog_reset */ | 260 | 0x07f11ec9, |
| 261 | 0x87f100f8, | 261 | 0x03f0ca00, |
| 262 | 0x84b60430, | 262 | 0x000cd001, |
| 263 | 0x1ff9f006, | 263 | /* 0x00be: nv_wr32_wait */ |
| 264 | 0xf8008fd0, | 264 | 0xc7f104bd, |
| 265 | /* 0x00bd: watchdog_clear */ | 265 | 0xc3f0ca00, |
| 266 | 0x3087f100, | 266 | 0x00cccf01, |
| 267 | 0x0684b604, | 267 | 0xf41fccc8, |
| 268 | 0xf80080d0, | 268 | 0x00f8f31b, |
| 269 | /* 0x00c9: wait_donez */ | 269 | /* 0x00d0: wait_donez */ |
| 270 | 0xf094bd00, | 270 | 0x99f094bd, |
| 271 | 0x07f10099, | 271 | 0x0007f100, |
| 272 | 0x03f00f00, | 272 | 0x0203f00f, |
| 273 | 0x0009d002, | 273 | 0xbd0009d0, |
| 274 | 0x07f104bd, | 274 | 0x0007f104, |
| 275 | 0x03f00600, | 275 | 0x0203f006, |
| 276 | 0x000ad002, | 276 | 0xbd000ad0, |
| 277 | /* 0x00e6: wait_donez_ne */ | 277 | /* 0x00ed: wait_donez_ne */ |
| 278 | 0x87f104bd, | 278 | 0x0087f104, |
| 279 | 0x83f00000, | 279 | 0x0183f000, |
| 280 | 0x0088cf01, | 280 | 0xff0088cf, |
| 281 | 0xf4888aff, | 281 | 0x1bf4888a, |
| 282 | 0x94bdf31b, | 282 | 0xf094bdf3, |
| 283 | 0xf10099f0, | ||
| 284 | 0xf0170007, | ||
| 285 | 0x09d00203, | ||
| 286 | 0xf804bd00, | ||
| 287 | /* 0x0109: wait_doneo */ | ||
| 288 | 0xf094bd00, | ||
| 289 | 0x07f10099, | 283 | 0x07f10099, |
| 290 | 0x03f00f00, | 284 | 0x03f01700, |
| 291 | 0x0009d002, | 285 | 0x0009d002, |
| 292 | 0x87f104bd, | 286 | 0x00f804bd, |
| 293 | 0x84b60818, | 287 | /* 0x0110: wait_doneo */ |
| 294 | 0x008ad006, | ||
| 295 | /* 0x0124: wait_doneo_e */ | ||
| 296 | 0x040087f1, | ||
| 297 | 0xcf0684b6, | ||
| 298 | 0x8aff0088, | ||
| 299 | 0xf30bf488, | ||
| 300 | 0x99f094bd, | 288 | 0x99f094bd, |
| 301 | 0x0007f100, | 289 | 0x0007f100, |
| 302 | 0x0203f017, | 290 | 0x0203f00f, |
| 303 | 0xbd0009d0, | 291 | 0xbd0009d0, |
| 304 | /* 0x0147: mmctx_size */ | 292 | 0x0007f104, |
| 305 | 0xbd00f804, | 293 | 0x0203f006, |
| 306 | /* 0x0149: nv_mmctx_size_loop */ | 294 | 0xbd000ad0, |
| 307 | 0x00e89894, | 295 | /* 0x012d: wait_doneo_e */ |
| 308 | 0xb61a85b6, | 296 | 0x0087f104, |
| 309 | 0x84b60180, | 297 | 0x0183f000, |
| 310 | 0x0098bb02, | 298 | 0xff0088cf, |
| 311 | 0xb804e0b6, | 299 | 0x0bf4888a, |
| 312 | 0x1bf404ef, | 300 | 0xf094bdf3, |
| 313 | 0x029fb9eb, | 301 | 0x07f10099, |
| 314 | /* 0x0166: mmctx_xfer */ | 302 | 0x03f01700, |
| 315 | 0x94bd00f8, | 303 | 0x0009d002, |
| 316 | 0xf10199f0, | 304 | 0x00f804bd, |
| 317 | 0xf00f0007, | 305 | /* 0x0150: mmctx_size */ |
| 318 | 0x09d00203, | 306 | /* 0x0152: nv_mmctx_size_loop */ |
| 319 | 0xf104bd00, | 307 | 0xe89894bd, |
| 320 | 0xb6071087, | 308 | 0x1a85b600, |
| 321 | 0x94bd0684, | 309 | 0xb60180b6, |
| 322 | 0xf405bbfd, | 310 | 0x98bb0284, |
| 323 | 0x8bd0090b, | 311 | 0x04e0b600, |
| 324 | 0x0099f000, | 312 | 0xf404efb8, |
| 325 | /* 0x018c: mmctx_base_disabled */ | 313 | 0x9fb9eb1b, |
| 326 | 0xf405eefd, | 314 | /* 0x016f: mmctx_xfer */ |
| 327 | 0x8ed00c0b, | 315 | 0xbd00f802, |
| 328 | 0xc08fd080, | 316 | 0x0199f094, |
| 329 | /* 0x019b: mmctx_multi_disabled */ | 317 | 0x0f0007f1, |
| 330 | 0xb70199f0, | 318 | 0xd00203f0, |
| 331 | 0xc8010080, | 319 | 0x04bd0009, |
| 320 | 0xbbfd94bd, | ||
| 321 | 0x120bf405, | ||
| 322 | 0xc40007f1, | ||
| 323 | 0xd00103f0, | ||
| 324 | 0x04bd000b, | ||
| 325 | /* 0x0197: mmctx_base_disabled */ | ||
| 326 | 0xfd0099f0, | ||
| 327 | 0x0bf405ee, | ||
| 328 | 0x0007f11e, | ||
| 329 | 0x0103f0c6, | ||
| 330 | 0xbd000ed0, | ||
| 331 | 0x0007f104, | ||
| 332 | 0x0103f0c7, | ||
| 333 | 0xbd000fd0, | ||
| 334 | 0x0199f004, | ||
| 335 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 336 | 0xb600abc8, | ||
| 337 | 0xb9f010b4, | ||
| 338 | 0x01aec80c, | ||
| 339 | 0xfd11e4b6, | ||
| 340 | 0x07f105be, | ||
| 341 | 0x03f0c500, | ||
| 342 | 0x000bd001, | ||
| 343 | /* 0x01d6: mmctx_exec_loop */ | ||
| 344 | /* 0x01d6: mmctx_wait_free */ | ||
| 345 | 0xe7f104bd, | ||
| 346 | 0xe3f0c500, | ||
| 347 | 0x00eecf01, | ||
| 348 | 0xf41fe4f0, | ||
| 349 | 0xce98f30b, | ||
| 350 | 0x05e9fd00, | ||
| 351 | 0xc80007f1, | ||
| 352 | 0xd00103f0, | ||
| 353 | 0x04bd000e, | ||
| 354 | 0xb804c0b6, | ||
| 355 | 0x1bf404cd, | ||
| 356 | 0x02abc8d8, | ||
| 357 | /* 0x0207: mmctx_fini_wait */ | ||
| 358 | 0xf11f1bf4, | ||
| 359 | 0xf0c500b7, | ||
| 360 | 0xbbcf01b3, | ||
| 361 | 0x1fb4f000, | ||
| 362 | 0xf410b4b0, | ||
| 363 | 0xa7f0f01b, | ||
| 364 | 0xd021f402, | ||
| 365 | /* 0x0223: mmctx_stop */ | ||
| 366 | 0xc82b0ef4, | ||
| 332 | 0xb4b600ab, | 367 | 0xb4b600ab, |
| 333 | 0x0cb9f010, | 368 | 0x0cb9f010, |
| 334 | 0xb601aec8, | 369 | 0xf112b9f0, |
| 335 | 0xbefd11e4, | 370 | 0xf0c50007, |
| 336 | 0x008bd005, | 371 | 0x0bd00103, |
| 337 | /* 0x01b4: mmctx_exec_loop */ | 372 | /* 0x023b: mmctx_stop_wait */ |
| 338 | /* 0x01b4: mmctx_wait_free */ | 373 | 0xf104bd00, |
| 339 | 0xf0008ecf, | 374 | 0xf0c500b7, |
| 340 | 0x0bf41fe4, | 375 | 0xbbcf01b3, |
| 341 | 0x00ce98fa, | 376 | 0x12bbc800, |
| 342 | 0xd005e9fd, | 377 | /* 0x024b: mmctx_done */ |
| 343 | 0xc0b6c08e, | 378 | 0xbdf31bf4, |
| 344 | 0x04cdb804, | 379 | 0x0199f094, |
| 345 | 0xc8e81bf4, | 380 | 0x170007f1, |
| 346 | 0x1bf402ab, | 381 | 0xd00203f0, |
| 347 | /* 0x01d5: mmctx_fini_wait */ | 382 | 0x04bd0009, |
| 348 | 0x008bcf18, | 383 | /* 0x025e: strand_wait */ |
| 349 | 0xb01fb4f0, | 384 | 0xa0f900f8, |
| 350 | 0x1bf410b4, | 385 | 0xf402a7f0, |
| 351 | 0x02a7f0f7, | 386 | 0xa0fcd021, |
| 352 | 0xf4c921f4, | 387 | /* 0x026a: strand_pre */ |
| 353 | /* 0x01ea: mmctx_stop */ | 388 | 0x97f000f8, |
| 354 | 0xabc81b0e, | 389 | 0xfc07f10c, |
| 355 | 0x10b4b600, | 390 | 0x0203f04a, |
| 356 | 0xf00cb9f0, | 391 | 0xbd0009d0, |
| 357 | 0x8bd012b9, | 392 | 0x5e21f504, |
| 358 | /* 0x01f9: mmctx_stop_wait */ | 393 | /* 0x027f: strand_post */ |
| 359 | 0x008bcf00, | 394 | 0xf000f802, |
| 360 | 0xf412bbc8, | 395 | 0x07f10d97, |
| 361 | /* 0x0202: mmctx_done */ | 396 | 0x03f04afc, |
| 362 | 0x94bdfa1b, | ||
| 363 | 0xf10199f0, | ||
| 364 | 0xf0170007, | ||
| 365 | 0x09d00203, | ||
| 366 | 0xf804bd00, | ||
| 367 | /* 0x0215: strand_wait */ | ||
| 368 | 0xf0a0f900, | ||
| 369 | 0x21f402a7, | ||
| 370 | 0xf8a0fcc9, | ||
| 371 | /* 0x0221: strand_pre */ | ||
| 372 | 0xfc87f100, | ||
| 373 | 0x0283f04a, | ||
| 374 | 0xd00c97f0, | ||
| 375 | 0x21f50089, | ||
| 376 | 0x00f80215, | ||
| 377 | /* 0x0234: strand_post */ | ||
| 378 | 0x4afc87f1, | ||
| 379 | 0xf00283f0, | ||
| 380 | 0x89d00d97, | ||
| 381 | 0x1521f500, | ||
| 382 | /* 0x0247: strand_set */ | ||
| 383 | 0xf100f802, | ||
| 384 | 0xf04ffca7, | ||
| 385 | 0xaba202a3, | ||
| 386 | 0xc7f00500, | ||
| 387 | 0x00acd00f, | ||
| 388 | 0xd00bc7f0, | ||
| 389 | 0x21f500bc, | ||
| 390 | 0xaed00215, | ||
| 391 | 0x0ac7f000, | ||
| 392 | 0xf500bcd0, | ||
| 393 | 0xf8021521, | ||
| 394 | /* 0x0271: strand_ctx_init */ | ||
| 395 | 0xf094bd00, | ||
| 396 | 0x07f10399, | ||
| 397 | 0x03f00f00, | ||
| 398 | 0x0009d002, | 397 | 0x0009d002, |
| 399 | 0x21f504bd, | 398 | 0x21f504bd, |
| 400 | 0xe7f00221, | 399 | 0x00f8025e, |
| 401 | 0x4721f503, | 400 | /* 0x0294: strand_set */ |
| 402 | 0xfca7f102, | 401 | 0xf10fc7f0, |
| 403 | 0x02a3f046, | 402 | 0xf04ffc07, |
| 404 | 0x0400aba0, | 403 | 0x0cd00203, |
| 405 | 0xf040a0d0, | 404 | 0xf004bd00, |
| 406 | 0xbcd001c7, | 405 | 0x07f10bc7, |
| 407 | 0x1521f500, | 406 | 0x03f04afc, |
| 408 | 0x010c9202, | 407 | 0x000cd002, |
| 409 | 0xf000acd0, | 408 | 0x07f104bd, |
| 410 | 0xbcd002c7, | 409 | 0x03f04ffc, |
| 411 | 0x1521f500, | 410 | 0x000ed002, |
| 412 | 0x3421f502, | 411 | 0xc7f004bd, |
| 413 | 0x8087f102, | 412 | 0xfc07f10a, |
| 414 | 0x0684b608, | 413 | 0x0203f04a, |
| 415 | 0xb70089cf, | 414 | 0xbd000cd0, |
| 416 | 0x95220080, | 415 | 0x5e21f504, |
| 417 | /* 0x02ca: ctx_init_strand_loop */ | 416 | /* 0x02d3: strand_ctx_init */ |
| 417 | 0xbd00f802, | ||
| 418 | 0x0399f094, | ||
| 419 | 0x0f0007f1, | ||
| 420 | 0xd00203f0, | ||
| 421 | 0x04bd0009, | ||
| 422 | 0x026a21f5, | ||
| 423 | 0xf503e7f0, | ||
| 424 | 0xbd029421, | ||
| 425 | 0xfc07f1c4, | ||
| 426 | 0x0203f047, | ||
| 427 | 0xbd000cd0, | ||
| 428 | 0x01c7f004, | ||
| 429 | 0x4afc07f1, | ||
| 430 | 0xd00203f0, | ||
| 431 | 0x04bd000c, | ||
| 432 | 0x025e21f5, | ||
| 433 | 0xf1010c92, | ||
| 434 | 0xf046fc07, | ||
| 435 | 0x0cd00203, | ||
| 436 | 0xf004bd00, | ||
| 437 | 0x07f102c7, | ||
| 438 | 0x03f04afc, | ||
| 439 | 0x000cd002, | ||
| 440 | 0x21f504bd, | ||
| 441 | 0x21f5025e, | ||
| 442 | 0x87f1027f, | ||
| 443 | 0x83f04200, | ||
| 444 | 0x0097f102, | ||
| 445 | 0x0293f020, | ||
| 446 | 0x950099cf, | ||
| 447 | /* 0x034a: ctx_init_strand_loop */ | ||
| 418 | 0x8ed008fe, | 448 | 0x8ed008fe, |
| 419 | 0x408ed000, | 449 | 0x408ed000, |
| 420 | 0xb6808acf, | 450 | 0xb6808acf, |
| @@ -428,7 +458,7 @@ uint32_t nvc0_grhub_code[] = { | |||
| 428 | 0x170007f1, | 458 | 0x170007f1, |
| 429 | 0xd00203f0, | 459 | 0xd00203f0, |
| 430 | 0x04bd0009, | 460 | 0x04bd0009, |
| 431 | /* 0x02fe: error */ | 461 | /* 0x037e: error */ |
| 432 | 0x07f100f8, | 462 | 0x07f100f8, |
| 433 | 0x03f00500, | 463 | 0x03f00500, |
| 434 | 0x000fd002, | 464 | 0x000fd002, |
| @@ -436,82 +466,117 @@ uint32_t nvc0_grhub_code[] = { | |||
| 436 | 0x0007f101, | 466 | 0x0007f101, |
| 437 | 0x0303f007, | 467 | 0x0303f007, |
| 438 | 0xbd000fd0, | 468 | 0xbd000fd0, |
| 439 | /* 0x031b: init */ | 469 | /* 0x039b: init */ |
| 440 | 0xbd00f804, | 470 | 0xbd00f804, |
| 441 | 0x0004fe04, | 471 | 0x0007fe04, |
| 442 | 0xf10007fe, | 472 | 0x420017f1, |
| 443 | 0xf0120017, | 473 | 0xcf0013f0, |
| 444 | 0x12d00227, | 474 | 0x11e70011, |
| 445 | 0xb117f100, | 475 | 0x14b60109, |
| 446 | 0x0010fe05, | 476 | 0x0014fe08, |
| 447 | 0x040017f1, | 477 | 0xf10227f0, |
| 448 | 0xf1c010d0, | 478 | 0xf0120007, |
| 449 | 0xb6040437, | 479 | 0x02d00003, |
| 450 | 0x27f10634, | 480 | 0xf104bd00, |
| 451 | 0x32d02003, | 481 | 0xfe06c817, |
| 452 | 0x0427f100, | 482 | 0x24bd0010, |
| 453 | 0x0132d020, | 483 | 0x070007f1, |
| 484 | 0xd00003f0, | ||
| 485 | 0x04bd0002, | ||
| 486 | 0x200327f1, | ||
| 487 | 0x010007f1, | ||
| 488 | 0xd00103f0, | ||
| 489 | 0x04bd0002, | ||
| 490 | 0x200427f1, | ||
| 491 | 0x010407f1, | ||
| 492 | 0xd00103f0, | ||
| 493 | 0x04bd0002, | ||
| 454 | 0x200b27f1, | 494 | 0x200b27f1, |
| 455 | 0xf10232d0, | 495 | 0x010807f1, |
| 456 | 0xd0200c27, | 496 | 0xd00103f0, |
| 457 | 0x27f10732, | 497 | 0x04bd0002, |
| 458 | 0x24b60c24, | 498 | 0x200c27f1, |
| 459 | 0x0003b906, | 499 | 0x011c07f1, |
| 460 | 0xf10023d0, | 500 | 0xd00103f0, |
| 501 | 0x04bd0002, | ||
| 502 | 0xf1010392, | ||
| 503 | 0xf0090007, | ||
| 504 | 0x03d00303, | ||
| 505 | 0xf104bd00, | ||
| 461 | 0xf0870427, | 506 | 0xf0870427, |
| 462 | 0x12d00023, | 507 | 0x07f10023, |
| 463 | 0x0012b700, | 508 | 0x03f00400, |
| 464 | 0x0427f001, | 509 | 0x0002d000, |
| 465 | 0xf40012d0, | 510 | 0x27f004bd, |
| 466 | 0xe7f11031, | 511 | 0x0007f104, |
| 467 | 0xe3f09604, | 512 | 0x0003f003, |
| 468 | 0x6821f440, | 513 | 0xbd0002d0, |
| 469 | 0x8090f1c7, | 514 | 0x1031f404, |
| 470 | 0xf4f00301, | 515 | 0x9604e7f1, |
| 471 | 0x020f801f, | 516 | 0xf440e3f0, |
| 472 | 0xbb0117f0, | 517 | 0xfeb96821, |
| 473 | 0x12b6041f, | 518 | 0x90f1c702, |
| 474 | 0x0c27f101, | 519 | 0xf0030180, |
| 475 | 0x0624b604, | 520 | 0x0f801ff4, |
| 476 | 0xd00021d0, | 521 | 0x0117f002, |
| 477 | 0x17f14021, | 522 | 0xb6041fbb, |
| 478 | 0x0e980100, | 523 | 0x07f10112, |
| 479 | 0x010f9800, | 524 | 0x03f00300, |
| 480 | 0x014721f5, | 525 | 0x0001d001, |
| 481 | 0x070037f1, | 526 | 0x07f104bd, |
| 482 | 0x950634b6, | 527 | 0x03f00400, |
| 483 | 0x34d00814, | 528 | 0x0001d001, |
| 484 | 0x4034d000, | 529 | 0x17f104bd, |
| 485 | 0x130030b7, | 530 | 0xf7f00100, |
| 486 | 0xb6001fbb, | 531 | 0xb521f502, |
| 487 | 0x3fd002f5, | 532 | 0xc721f507, |
| 488 | 0x0815b600, | 533 | 0x10f7f007, |
| 489 | 0xb60110b6, | 534 | 0x081421f5, |
| 490 | 0x1fb90814, | 535 | 0x98000e98, |
| 491 | 0x7121f502, | 536 | 0x21f5010f, |
| 492 | 0x001fbb02, | 537 | 0x14950150, |
| 493 | 0xf1020398, | 538 | 0x0007f108, |
| 494 | 0xf0200047, | 539 | 0x0103f0c0, |
| 495 | /* 0x03f6: init_gpc */ | 540 | 0xbd0004d0, |
| 496 | 0x4ea05043, | 541 | 0x0007f104, |
| 497 | 0x1fb90804, | 542 | 0x0103f0c1, |
| 498 | 0x8d21f402, | 543 | 0xbd0004d0, |
| 499 | 0x010c4ea0, | 544 | 0x0030b704, |
| 500 | 0x21f4f4bd, | 545 | 0x001fbb13, |
| 501 | 0x044ea08d, | 546 | 0xf102f5b6, |
| 502 | 0x8d21f401, | 547 | 0xf0d30007, |
| 503 | 0x01004ea0, | 548 | 0x0fd00103, |
| 504 | 0xf402f7f0, | 549 | 0xb604bd00, |
| 505 | 0x4ea08d21, | 550 | 0x10b60815, |
| 506 | /* 0x041e: init_gpc_wait */ | 551 | 0x0814b601, |
| 507 | 0x21f40800, | 552 | 0xf5021fb9, |
| 508 | 0x1fffc868, | 553 | 0xbb02d321, |
| 509 | 0xa0fa0bf4, | 554 | 0x0398001f, |
| 510 | 0xf408044e, | 555 | 0x0047f102, |
| 511 | 0x1fbb6821, | 556 | 0x5043f020, |
| 512 | 0x0040b700, | 557 | /* 0x04f4: init_gpc */ |
| 513 | 0x0132b680, | 558 | 0x08044ea0, |
| 514 | 0xf1be1bf4, | 559 | 0xf4021fb9, |
| 560 | 0x4ea09d21, | ||
| 561 | 0xf4bd010c, | ||
| 562 | 0xa09d21f4, | ||
| 563 | 0xf401044e, | ||
| 564 | 0x4ea09d21, | ||
| 565 | 0xf7f00100, | ||
| 566 | 0x9d21f402, | ||
| 567 | 0x08004ea0, | ||
| 568 | /* 0x051c: init_gpc_wait */ | ||
| 569 | 0xc86821f4, | ||
| 570 | 0x0bf41fff, | ||
| 571 | 0x044ea0fa, | ||
| 572 | 0x6821f408, | ||
| 573 | 0xb7001fbb, | ||
| 574 | 0xb6800040, | ||
| 575 | 0x1bf40132, | ||
| 576 | 0x00f7f0be, | ||
| 577 | 0x081421f5, | ||
| 578 | 0xf500f7f0, | ||
| 579 | 0xf107b521, | ||
| 515 | 0xf0010007, | 580 | 0xf0010007, |
| 516 | 0x01d00203, | 581 | 0x01d00203, |
| 517 | 0xbd04bd00, | 582 | 0xbd04bd00, |
| @@ -519,402 +584,399 @@ uint32_t nvc0_grhub_code[] = { | |||
| 519 | 0x080007f1, | 584 | 0x080007f1, |
| 520 | 0xd00203f0, | 585 | 0xd00203f0, |
| 521 | 0x04bd0001, | 586 | 0x04bd0001, |
| 522 | /* 0x0458: main */ | 587 | /* 0x0564: main */ |
| 523 | 0xf40031f4, | 588 | 0xf40031f4, |
| 524 | 0xd7f00028, | 589 | 0xd7f00028, |
| 525 | 0x3921f410, | 590 | 0x3921f410, |
| 526 | 0xb1f401f4, | 591 | 0xb1f401f4, |
| 527 | 0xf54001e4, | 592 | 0xf54001e4, |
| 528 | 0xbd00de1b, | 593 | 0xbd00e91b, |
| 529 | 0x0499f094, | 594 | 0x0499f094, |
| 530 | 0x0f0007f1, | 595 | 0x0f0007f1, |
| 531 | 0xd00203f0, | 596 | 0xd00203f0, |
| 532 | 0x04bd0009, | 597 | 0x04bd0009, |
| 533 | 0x0b0017f1, | 598 | 0xc00017f1, |
| 534 | 0xcf0614b6, | 599 | 0xcf0213f0, |
| 535 | 0x11cf4012, | 600 | 0x27f10011, |
| 536 | 0x1f13c800, | 601 | 0x23f0c100, |
| 537 | 0x00870bf5, | 602 | 0x0022cf02, |
| 538 | 0xf41f23c8, | 603 | 0xf51f13c8, |
| 539 | 0x20f9620b, | 604 | 0xc800890b, |
| 540 | 0xbd0212b9, | 605 | 0x0bf41f23, |
| 541 | 0x0799f094, | 606 | 0xb920f962, |
| 542 | 0x0f0007f1, | 607 | 0x94bd0212, |
| 543 | 0xd00203f0, | ||
| 544 | 0x04bd0009, | ||
| 545 | 0xf40132f4, | ||
| 546 | 0x21f50231, | ||
| 547 | 0x94bd082f, | ||
| 548 | 0xf10799f0, | 608 | 0xf10799f0, |
| 549 | 0xf0170007, | 609 | 0xf00f0007, |
| 550 | 0x09d00203, | 610 | 0x09d00203, |
| 551 | 0xfc04bd00, | 611 | 0xf404bd00, |
| 552 | 0xf094bd20, | 612 | 0x31f40132, |
| 553 | 0x07f10699, | 613 | 0xe821f502, |
| 554 | 0x03f00f00, | 614 | 0xf094bd09, |
| 555 | 0x0009d002, | 615 | 0x07f10799, |
| 556 | 0x31f404bd, | ||
| 557 | 0x2f21f501, | ||
| 558 | 0xf094bd08, | ||
| 559 | 0x07f10699, | ||
| 560 | 0x03f01700, | 616 | 0x03f01700, |
| 561 | 0x0009d002, | 617 | 0x0009d002, |
| 562 | 0x0ef404bd, | 618 | 0x20fc04bd, |
| 563 | /* 0x04f9: chsw_prev_no_next */ | ||
| 564 | 0xb920f931, | ||
| 565 | 0x32f40212, | ||
| 566 | 0x0232f401, | ||
| 567 | 0x082f21f5, | ||
| 568 | 0x17f120fc, | ||
| 569 | 0x14b60b00, | ||
| 570 | 0x0012d006, | ||
| 571 | /* 0x0517: chsw_no_prev */ | ||
| 572 | 0xc8130ef4, | ||
| 573 | 0x0bf41f23, | ||
| 574 | 0x0131f40d, | ||
| 575 | 0xf50232f4, | ||
| 576 | /* 0x0527: chsw_done */ | ||
| 577 | 0xf1082f21, | ||
| 578 | 0xb60b0c17, | ||
| 579 | 0x27f00614, | ||
| 580 | 0x0012d001, | ||
| 581 | 0x99f094bd, | 619 | 0x99f094bd, |
| 582 | 0x0007f104, | 620 | 0x0007f106, |
| 621 | 0x0203f00f, | ||
| 622 | 0xbd0009d0, | ||
| 623 | 0x0131f404, | ||
| 624 | 0x09e821f5, | ||
| 625 | 0x99f094bd, | ||
| 626 | 0x0007f106, | ||
| 583 | 0x0203f017, | 627 | 0x0203f017, |
| 584 | 0xbd0009d0, | 628 | 0xbd0009d0, |
| 585 | 0x130ef504, | 629 | 0x330ef404, |
| 586 | /* 0x0549: main_not_ctx_switch */ | 630 | /* 0x060c: chsw_prev_no_next */ |
| 587 | 0x01e4b0ff, | 631 | 0x12b920f9, |
| 588 | 0xb90d1bf4, | 632 | 0x0132f402, |
| 589 | 0x21f502f2, | 633 | 0xf50232f4, |
| 590 | 0x0ef407bb, | 634 | 0xfc09e821, |
| 591 | /* 0x0559: main_not_ctx_chan */ | 635 | 0x0007f120, |
| 592 | 0x02e4b046, | 636 | 0x0203f0c0, |
| 593 | 0xbd321bf4, | 637 | 0xbd0002d0, |
| 594 | 0x0799f094, | 638 | 0x130ef404, |
| 595 | 0x0f0007f1, | 639 | /* 0x062c: chsw_no_prev */ |
| 640 | 0xf41f23c8, | ||
| 641 | 0x31f40d0b, | ||
| 642 | 0x0232f401, | ||
| 643 | 0x09e821f5, | ||
| 644 | /* 0x063c: chsw_done */ | ||
| 645 | 0xf10127f0, | ||
| 646 | 0xf0c30007, | ||
| 647 | 0x02d00203, | ||
| 648 | 0xbd04bd00, | ||
| 649 | 0x0499f094, | ||
| 650 | 0x170007f1, | ||
| 596 | 0xd00203f0, | 651 | 0xd00203f0, |
| 597 | 0x04bd0009, | 652 | 0x04bd0009, |
| 598 | 0xf40132f4, | 653 | 0xff080ef5, |
| 599 | 0x21f50232, | 654 | /* 0x0660: main_not_ctx_switch */ |
| 600 | 0x94bd082f, | 655 | 0xf401e4b0, |
| 656 | 0xf2b90d1b, | ||
| 657 | 0x7821f502, | ||
| 658 | 0x460ef409, | ||
| 659 | /* 0x0670: main_not_ctx_chan */ | ||
| 660 | 0xf402e4b0, | ||
| 661 | 0x94bd321b, | ||
| 601 | 0xf10799f0, | 662 | 0xf10799f0, |
| 602 | 0xf0170007, | 663 | 0xf00f0007, |
| 603 | 0x09d00203, | 664 | 0x09d00203, |
| 604 | 0xf404bd00, | 665 | 0xf404bd00, |
| 605 | /* 0x058e: main_not_ctx_save */ | 666 | 0x32f40132, |
| 606 | 0xef94110e, | 667 | 0xe821f502, |
| 607 | 0x01f5f010, | 668 | 0xf094bd09, |
| 608 | 0x02fe21f5, | 669 | 0x07f10799, |
| 609 | 0xfec00ef5, | 670 | 0x03f01700, |
| 610 | /* 0x059c: main_done */ | 671 | 0x0009d002, |
| 611 | 0x29f024bd, | 672 | 0x0ef404bd, |
| 612 | 0x0007f11f, | 673 | /* 0x06a5: main_not_ctx_save */ |
| 613 | 0x0203f008, | 674 | 0x10ef9411, |
| 614 | 0xbd0002d0, | 675 | 0xf501f5f0, |
| 615 | 0xab0ef504, | 676 | 0xf5037e21, |
| 616 | /* 0x05b1: ih */ | 677 | /* 0x06b3: main_done */ |
| 617 | 0xfe80f9fe, | 678 | 0xbdfeb50e, |
| 618 | 0x80f90188, | 679 | 0x1f29f024, |
| 619 | 0xa0f990f9, | 680 | 0x080007f1, |
| 620 | 0xd0f9b0f9, | 681 | 0xd00203f0, |
| 621 | 0xf0f9e0f9, | 682 | 0x04bd0002, |
| 622 | 0x0acf04bd, | 683 | 0xfea00ef5, |
| 623 | 0x04abc480, | 684 | /* 0x06c8: ih */ |
| 624 | 0xf11d0bf4, | 685 | 0x88fe80f9, |
| 625 | 0xf01900b7, | 686 | 0xf980f901, |
| 626 | 0xbecf10d7, | 687 | 0xf9a0f990, |
| 627 | 0x00bfcf40, | 688 | 0xf9d0f9b0, |
| 689 | 0xbdf0f9e0, | ||
| 690 | 0x00a7f104, | ||
| 691 | 0x00a3f002, | ||
| 692 | 0xc400aacf, | ||
| 693 | 0x0bf404ab, | ||
| 694 | 0x10d7f030, | ||
| 695 | 0x1a00e7f1, | ||
| 696 | 0xcf00e3f0, | ||
| 697 | 0xf7f100ee, | ||
| 698 | 0xf3f01900, | ||
| 699 | 0x00ffcf00, | ||
| 628 | 0xb70421f4, | 700 | 0xb70421f4, |
| 629 | 0xf00400b0, | 701 | 0xf00400b0, |
| 630 | 0xbed001e7, | 702 | 0x07f101e7, |
| 631 | /* 0x05e9: ih_no_fifo */ | 703 | 0x03f01d00, |
| 632 | 0x00abe400, | 704 | 0x000ed000, |
| 633 | 0x0d0bf401, | 705 | /* 0x071a: ih_no_fifo */ |
| 634 | 0xf110d7f0, | 706 | 0xabe404bd, |
| 635 | 0xf44001e7, | 707 | 0x0bf40100, |
| 636 | /* 0x05fa: ih_no_ctxsw */ | 708 | 0x10d7f00d, |
| 637 | 0xb7f10421, | 709 | 0x4001e7f1, |
| 638 | 0xb0bd0104, | 710 | /* 0x072b: ih_no_ctxsw */ |
| 639 | 0xf4b4abff, | 711 | 0xe40421f4, |
| 640 | 0xa7f10d0b, | 712 | 0xf40400ab, |
| 641 | 0xa4b60c1c, | 713 | 0xb7f1140b, |
| 642 | 0x00abd006, | 714 | 0xbfb90100, |
| 643 | /* 0x0610: ih_no_other */ | 715 | 0x44e7f102, |
| 644 | 0xfc400ad0, | 716 | 0x40e3f001, |
| 717 | /* 0x0743: ih_no_fwmthd */ | ||
| 718 | 0xf19d21f4, | ||
| 719 | 0xbd0104b7, | ||
| 720 | 0xb4abffb0, | ||
| 721 | 0xf10f0bf4, | ||
| 722 | 0xf0070007, | ||
| 723 | 0x0bd00303, | ||
| 724 | /* 0x075b: ih_no_other */ | ||
| 725 | 0xf104bd00, | ||
| 726 | 0xf0010007, | ||
| 727 | 0x0ad00003, | ||
| 728 | 0xfc04bd00, | ||
| 645 | 0xfce0fcf0, | 729 | 0xfce0fcf0, |
| 646 | 0xfcb0fcd0, | 730 | 0xfcb0fcd0, |
| 647 | 0xfc90fca0, | 731 | 0xfc90fca0, |
| 648 | 0x0088fe80, | 732 | 0x0088fe80, |
| 649 | 0x32f480fc, | 733 | 0x32f480fc, |
| 650 | /* 0x062b: ctx_4160s */ | 734 | /* 0x077f: ctx_4160s */ |
| 651 | 0xf101f800, | 735 | 0xf001f800, |
| 652 | 0xf04160e7, | 736 | 0xffb901f7, |
| 653 | 0xf7f040e3, | 737 | 0x60e7f102, |
| 654 | 0x8d21f401, | 738 | 0x40e3f041, |
| 655 | /* 0x0638: ctx_4160s_wait */ | 739 | /* 0x078f: ctx_4160s_wait */ |
| 656 | 0xc86821f4, | 740 | 0xf19d21f4, |
| 657 | 0x0bf404ff, | ||
| 658 | /* 0x0643: ctx_4160c */ | ||
| 659 | 0xf100f8fa, | ||
| 660 | 0xf04160e7, | 741 | 0xf04160e7, |
| 661 | 0xf4bd40e3, | 742 | 0x21f440e3, |
| 662 | 0xf88d21f4, | 743 | 0x02ffb968, |
| 663 | /* 0x0651: ctx_4170s */ | 744 | 0xf404ffc8, |
| 664 | 0x70e7f100, | 745 | 0x00f8f00b, |
| 746 | /* 0x07a4: ctx_4160c */ | ||
| 747 | 0xffb9f4bd, | ||
| 748 | 0x60e7f102, | ||
| 665 | 0x40e3f041, | 749 | 0x40e3f041, |
| 666 | 0xf410f5f0, | 750 | 0xf89d21f4, |
| 667 | 0x00f88d21, | 751 | /* 0x07b5: ctx_4170s */ |
| 668 | /* 0x0660: ctx_4170w */ | 752 | 0x10f5f000, |
| 669 | 0x4170e7f1, | 753 | 0xf102ffb9, |
| 670 | 0xf440e3f0, | 754 | 0xf04170e7, |
| 671 | 0xf4f06821, | 755 | 0x21f440e3, |
| 672 | 0xf31bf410, | 756 | /* 0x07c7: ctx_4170w */ |
| 673 | /* 0x0672: ctx_redswitch */ | 757 | 0xf100f89d, |
| 674 | 0xe7f100f8, | 758 | 0xf04170e7, |
| 675 | 0xe4b60614, | 759 | 0x21f440e3, |
| 676 | 0x70f7f106, | 760 | 0x02ffb968, |
| 677 | 0x00efd002, | 761 | 0xf410f4f0, |
| 678 | /* 0x0683: ctx_redswitch_delay */ | 762 | 0x00f8f01b, |
| 679 | 0xb608f7f0, | 763 | /* 0x07dc: ctx_redswitch */ |
| 680 | 0x1bf401f2, | 764 | 0x0200e7f1, |
| 681 | 0x70f7f1fd, | 765 | 0xf040e5f0, |
| 682 | 0x00efd007, | 766 | 0xe5f020e5, |
| 683 | /* 0x0692: ctx_86c */ | 767 | 0x0007f110, |
| 684 | 0xe7f100f8, | 768 | 0x0103f085, |
| 685 | 0xe4b6086c, | 769 | 0xbd000ed0, |
| 686 | 0x00efd006, | 770 | 0x08f7f004, |
| 687 | 0x8a14e7f1, | 771 | /* 0x07f8: ctx_redswitch_delay */ |
| 688 | 0xf440e3f0, | 772 | 0xf401f2b6, |
| 689 | 0xe7f18d21, | 773 | 0xe5f1fd1b, |
| 690 | 0xe3f0a86c, | 774 | 0xe5f10400, |
| 691 | 0x8d21f441, | 775 | 0x07f10100, |
| 692 | /* 0x06b2: ctx_load */ | 776 | 0x03f08500, |
| 777 | 0x000ed001, | ||
| 778 | 0x00f804bd, | ||
| 779 | /* 0x0814: ctx_86c */ | ||
| 780 | 0x1b0007f1, | ||
| 781 | 0xd00203f0, | ||
| 782 | 0x04bd000f, | ||
| 783 | 0xf102ffb9, | ||
| 784 | 0xf08a14e7, | ||
| 785 | 0x21f440e3, | ||
| 786 | 0x02ffb99d, | ||
| 787 | 0xa86ce7f1, | ||
| 788 | 0xf441e3f0, | ||
| 789 | 0x00f89d21, | ||
| 790 | /* 0x083c: ctx_mem */ | ||
| 791 | 0x840007f1, | ||
| 792 | 0xd00203f0, | ||
| 793 | 0x04bd000f, | ||
| 794 | /* 0x0848: ctx_mem_wait */ | ||
| 795 | 0x8400f7f1, | ||
| 796 | 0xcf02f3f0, | ||
| 797 | 0xfffd00ff, | ||
| 798 | 0xf31bf405, | ||
| 799 | /* 0x085a: ctx_load */ | ||
| 693 | 0x94bd00f8, | 800 | 0x94bd00f8, |
| 694 | 0xf10599f0, | 801 | 0xf10599f0, |
| 695 | 0xf00f0007, | 802 | 0xf00f0007, |
| 696 | 0x09d00203, | 803 | 0x09d00203, |
| 697 | 0xf004bd00, | 804 | 0xf004bd00, |
| 698 | 0x21f40ca7, | 805 | 0x21f40ca7, |
| 699 | 0x2417f1c9, | 806 | 0xf1f4bdd0, |
| 700 | 0x0614b60a, | 807 | 0xf0890007, |
| 701 | 0xf10010d0, | 808 | 0x0fd00203, |
| 702 | 0xb60b0037, | 809 | 0xf104bd00, |
| 703 | 0x32d00634, | 810 | 0xf0c10007, |
| 704 | 0x0c17f140, | 811 | 0x02d00203, |
| 705 | 0x0614b60a, | 812 | 0xf104bd00, |
| 706 | 0xd00747f0, | 813 | 0xf0830007, |
| 707 | 0x14d00012, | 814 | 0x02d00203, |
| 708 | /* 0x06ed: ctx_chan_wait_0 */ | 815 | 0xf004bd00, |
| 709 | 0x4014cf40, | 816 | 0x21f507f7, |
| 710 | 0xf41f44f0, | 817 | 0x07f1083c, |
| 711 | 0x32d0fa1b, | 818 | 0x03f0c000, |
| 712 | 0x000bfe00, | 819 | 0x0002d002, |
| 713 | 0xb61f2af0, | 820 | 0x0bfe04bd, |
| 714 | 0x20b60424, | 821 | 0x1f2af000, |
| 715 | 0xf094bd02, | 822 | 0xb60424b6, |
| 823 | 0x94bd0220, | ||
| 824 | 0xf10899f0, | ||
| 825 | 0xf00f0007, | ||
| 826 | 0x09d00203, | ||
| 827 | 0xf104bd00, | ||
| 828 | 0xf0810007, | ||
| 829 | 0x02d00203, | ||
| 830 | 0xf104bd00, | ||
| 831 | 0xf1000027, | ||
| 832 | 0xf0800023, | ||
| 833 | 0x07f10225, | ||
| 834 | 0x03f08800, | ||
| 835 | 0x0002d002, | ||
| 836 | 0x17f004bd, | ||
| 837 | 0x0027f110, | ||
| 838 | 0x0223f002, | ||
| 839 | 0xf80512fa, | ||
| 840 | 0xf094bd03, | ||
| 716 | 0x07f10899, | 841 | 0x07f10899, |
| 717 | 0x03f00f00, | 842 | 0x03f01700, |
| 718 | 0x0009d002, | 843 | 0x0009d002, |
| 719 | 0x17f104bd, | 844 | 0x019804bd, |
| 720 | 0x14b60a04, | 845 | 0x1814b681, |
| 721 | 0x0012d006, | 846 | 0xb6800298, |
| 722 | 0x0a2017f1, | 847 | 0x12fd0825, |
| 723 | 0xf00614b6, | 848 | 0x16018005, |
| 724 | 0x23f10227, | ||
| 725 | 0x12d08000, | ||
| 726 | 0x1017f000, | ||
| 727 | 0x020027f1, | ||
| 728 | 0xfa0223f0, | ||
| 729 | 0x03f80512, | ||
| 730 | 0x99f094bd, | 849 | 0x99f094bd, |
| 731 | 0x0007f108, | 850 | 0x0007f109, |
| 732 | 0x0203f017, | 851 | 0x0203f00f, |
| 733 | 0xbd0009d0, | 852 | 0xbd0009d0, |
| 734 | 0x81019804, | 853 | 0x0007f104, |
| 735 | 0x981814b6, | 854 | 0x0203f081, |
| 736 | 0x25b68002, | 855 | 0xbd0001d0, |
| 737 | 0x0512fd08, | 856 | 0x0127f004, |
| 738 | 0xbd160180, | 857 | 0x880007f1, |
| 739 | 0x0999f094, | ||
| 740 | 0x0f0007f1, | ||
| 741 | 0xd00203f0, | ||
| 742 | 0x04bd0009, | ||
| 743 | 0x0a0427f1, | ||
| 744 | 0xd00624b6, | ||
| 745 | 0x27f00021, | ||
| 746 | 0x2017f101, | ||
| 747 | 0x0614b60a, | ||
| 748 | 0xf10012d0, | ||
| 749 | 0xf0010017, | ||
| 750 | 0x01fa0613, | ||
| 751 | 0xbd03f805, | ||
| 752 | 0x0999f094, | ||
| 753 | 0x170007f1, | ||
| 754 | 0xd00203f0, | 858 | 0xd00203f0, |
| 755 | 0x04bd0009, | 859 | 0x04bd0002, |
| 860 | 0x010017f1, | ||
| 861 | 0xfa0613f0, | ||
| 862 | 0x03f80501, | ||
| 756 | 0x99f094bd, | 863 | 0x99f094bd, |
| 757 | 0x0007f105, | 864 | 0x0007f109, |
| 758 | 0x0203f017, | 865 | 0x0203f017, |
| 759 | 0xbd0009d0, | 866 | 0xbd0009d0, |
| 760 | /* 0x07bb: ctx_chan */ | 867 | 0xf094bd04, |
| 761 | 0xf500f804, | 868 | 0x07f10599, |
| 762 | 0xf5062b21, | 869 | 0x03f01700, |
| 763 | 0xf006b221, | 870 | 0x0009d002, |
| 764 | 0x21f40ca7, | 871 | 0x00f804bd, |
| 765 | 0x1017f1c9, | 872 | /* 0x0978: ctx_chan */ |
| 766 | 0x0614b60a, | 873 | 0x077f21f5, |
| 767 | 0xd00527f0, | 874 | 0x085a21f5, |
| 768 | /* 0x07d6: ctx_chan_wait */ | 875 | 0xf40ca7f0, |
| 769 | 0x12cf0012, | 876 | 0xf7f0d021, |
| 770 | 0x0522fd00, | 877 | 0x3c21f505, |
| 771 | 0xf5fa1bf4, | 878 | 0xa421f508, |
| 772 | 0xf8064321, | 879 | /* 0x0993: ctx_mmio_exec */ |
| 773 | /* 0x07e5: ctx_mmio_exec */ | 880 | 0x9800f807, |
| 774 | 0x41039800, | 881 | 0x07f14103, |
| 775 | 0x0a0427f1, | 882 | 0x03f08100, |
| 776 | 0xd00624b6, | 883 | 0x0003d002, |
| 777 | 0x34bd0023, | 884 | 0x34bd04bd, |
| 778 | /* 0x07f4: ctx_mmio_loop */ | 885 | /* 0x09a4: ctx_mmio_loop */ |
| 779 | 0xf4ff34c4, | 886 | 0xf4ff34c4, |
| 780 | 0x57f10f1b, | 887 | 0x57f10f1b, |
| 781 | 0x53f00200, | 888 | 0x53f00200, |
| 782 | 0x0535fa06, | 889 | 0x0535fa06, |
| 783 | /* 0x0806: ctx_mmio_pull */ | 890 | /* 0x09b6: ctx_mmio_pull */ |
| 784 | 0x4e9803f8, | 891 | 0x4e9803f8, |
| 785 | 0x814f9880, | 892 | 0x814f9880, |
| 786 | 0xb68d21f4, | 893 | 0xb69d21f4, |
| 787 | 0x12b60830, | 894 | 0x12b60830, |
| 788 | 0xdf1bf401, | 895 | 0xdf1bf401, |
| 789 | /* 0x0818: ctx_mmio_done */ | 896 | /* 0x09c8: ctx_mmio_done */ |
| 790 | 0xd0160398, | 897 | 0xf1160398, |
| 791 | 0x00800023, | 898 | 0xf0810007, |
| 792 | 0x0017f140, | 899 | 0x03d00203, |
| 793 | 0x0613f001, | 900 | 0x8004bd00, |
| 794 | 0xf80601fa, | 901 | 0x17f14000, |
| 795 | /* 0x082f: ctx_xfer */ | 902 | 0x13f00100, |
| 796 | 0xf100f803, | 903 | 0x0601fa06, |
| 797 | 0xb60c00f7, | 904 | 0x00f803f8, |
| 798 | 0xe7f006f4, | 905 | /* 0x09e8: ctx_xfer */ |
| 799 | 0x80fed004, | 906 | 0xf104e7f0, |
| 800 | /* 0x083c: ctx_xfer_idle */ | 907 | 0xf0020007, |
| 801 | 0xf100fecf, | 908 | 0x0ed00303, |
| 802 | 0xf42000e4, | 909 | /* 0x09f7: ctx_xfer_idle */ |
| 803 | 0x11f4f91b, | 910 | 0xf104bd00, |
| 804 | 0x1102f406, | 911 | 0xf00000e7, |
| 805 | /* 0x084c: ctx_xfer_pre */ | 912 | 0xeecf03e3, |
| 806 | 0xf510f7f0, | 913 | 0x00e4f100, |
| 807 | 0xf5069221, | 914 | 0xf21bf420, |
| 808 | 0xf4062b21, | 915 | 0xf40611f4, |
| 809 | /* 0x085a: ctx_xfer_pre_load */ | 916 | /* 0x0a0e: ctx_xfer_pre */ |
| 810 | 0xf7f01c11, | 917 | 0xf7f01102, |
| 811 | 0x5121f502, | 918 | 0x1421f510, |
| 812 | 0x6021f506, | 919 | 0x7f21f508, |
| 813 | 0x7221f506, | 920 | 0x1c11f407, |
| 814 | 0xf5f4bd06, | 921 | /* 0x0a1c: ctx_xfer_pre_load */ |
| 815 | 0xf5065121, | 922 | 0xf502f7f0, |
| 816 | /* 0x0873: ctx_xfer_exec */ | 923 | 0xf507b521, |
| 817 | 0x9806b221, | 924 | 0xf507c721, |
| 818 | 0x27f11601, | 925 | 0xbd07dc21, |
| 819 | 0x24b60414, | 926 | 0xb521f5f4, |
| 820 | 0x0020d006, | 927 | 0x5a21f507, |
| 821 | 0xa500e7f1, | 928 | /* 0x0a35: ctx_xfer_exec */ |
| 822 | 0xb941e3f0, | 929 | 0x16019808, |
| 823 | 0x21f4021f, | 930 | 0x07f124bd, |
| 824 | 0x04e0b68d, | 931 | 0x03f00500, |
| 825 | 0xf001fcf0, | 932 | 0x0002d001, |
| 826 | 0x24b6022c, | 933 | 0x1fb904bd, |
| 827 | 0x05f2fd01, | 934 | 0x00e7f102, |
| 828 | 0xf18d21f4, | 935 | 0x41e3f0a5, |
| 829 | 0xf04afc17, | 936 | 0xf09d21f4, |
| 830 | 0x27f00213, | 937 | 0x2cf001fc, |
| 831 | 0x0012d00c, | 938 | 0x0124b602, |
| 832 | 0x021521f5, | 939 | 0xb905f2fd, |
| 833 | 0x47fc27f1, | 940 | 0xe7f102ff, |
| 834 | 0xd00223f0, | 941 | 0xe3f0a504, |
| 835 | 0x2cf00020, | 942 | 0x9d21f441, |
| 943 | 0x026a21f5, | ||
| 944 | 0x07f124bd, | ||
| 945 | 0x03f047fc, | ||
| 946 | 0x0002d002, | ||
| 947 | 0x2cf004bd, | ||
| 836 | 0x0320b601, | 948 | 0x0320b601, |
| 837 | 0xf00012d0, | 949 | 0x4afc07f1, |
| 838 | 0xa5f001ac, | 950 | 0xd00203f0, |
| 839 | 0x00b7f006, | 951 | 0x04bd0002, |
| 840 | 0x98000c98, | 952 | 0xf001acf0, |
| 841 | 0xe7f0010d, | 953 | 0xb7f006a5, |
| 842 | 0x6621f500, | 954 | 0x000c9800, |
| 843 | 0x08a7f001, | 955 | 0xf0010d98, |
| 844 | 0x010921f5, | 956 | 0x21f500e7, |
| 845 | 0x021521f5, | 957 | 0xa7f0016f, |
| 846 | 0xf02201f4, | 958 | 0x1021f508, |
| 847 | 0x21f40ca7, | 959 | 0x5e21f501, |
| 848 | 0x1017f1c9, | 960 | 0x1301f402, |
| 849 | 0x0614b60a, | 961 | 0xf40ca7f0, |
| 850 | 0xd00527f0, | 962 | 0xf7f0d021, |
| 851 | /* 0x08fa: ctx_xfer_post_save_wait */ | 963 | 0x3c21f505, |
| 852 | 0x12cf0012, | 964 | 0x3202f408, |
| 853 | 0x0522fd00, | 965 | /* 0x0ac4: ctx_xfer_post */ |
| 854 | 0xf4fa1bf4, | 966 | 0xf502f7f0, |
| 855 | /* 0x0906: ctx_xfer_post */ | 967 | 0xbd07b521, |
| 856 | 0xf7f03202, | 968 | 0x1421f5f4, |
| 857 | 0x5121f502, | 969 | 0x7f21f508, |
| 858 | 0xf5f4bd06, | 970 | 0xc721f502, |
| 859 | 0xf5069221, | 971 | 0xf5f4bd07, |
| 860 | 0xf5023421, | 972 | 0xf407b521, |
| 861 | 0xbd066021, | 973 | 0x01981011, |
| 862 | 0x5121f5f4, | 974 | 0x0511fd40, |
| 863 | 0x1011f406, | 975 | 0xf5070bf4, |
| 864 | 0xfd400198, | 976 | /* 0x0aef: ctx_xfer_no_post_mmio */ |
| 865 | 0x0bf40511, | 977 | 0xf5099321, |
| 866 | 0xe521f507, | 978 | /* 0x0af3: ctx_xfer_done */ |
| 867 | /* 0x0931: ctx_xfer_no_post_mmio */ | 979 | 0xf807a421, |
| 868 | 0x4321f507, | ||
| 869 | /* 0x0935: ctx_xfer_done */ | ||
| 870 | 0x0000f806, | ||
| 871 | 0x00000000, | ||
| 872 | 0x00000000, | ||
| 873 | 0x00000000, | ||
| 874 | 0x00000000, | ||
| 875 | 0x00000000, | ||
| 876 | 0x00000000, | ||
| 877 | 0x00000000, | ||
| 878 | 0x00000000, | ||
| 879 | 0x00000000, | ||
| 880 | 0x00000000, | ||
| 881 | 0x00000000, | ||
| 882 | 0x00000000, | ||
| 883 | 0x00000000, | ||
| 884 | 0x00000000, | ||
| 885 | 0x00000000, | ||
| 886 | 0x00000000, | ||
| 887 | 0x00000000, | ||
| 888 | 0x00000000, | ||
| 889 | 0x00000000, | ||
| 890 | 0x00000000, | ||
| 891 | 0x00000000, | ||
| 892 | 0x00000000, | ||
| 893 | 0x00000000, | ||
| 894 | 0x00000000, | ||
| 895 | 0x00000000, | ||
| 896 | 0x00000000, | ||
| 897 | 0x00000000, | ||
| 898 | 0x00000000, | ||
| 899 | 0x00000000, | ||
| 900 | 0x00000000, | ||
| 901 | 0x00000000, | ||
| 902 | 0x00000000, | ||
| 903 | 0x00000000, | ||
| 904 | 0x00000000, | ||
| 905 | 0x00000000, | ||
| 906 | 0x00000000, | ||
| 907 | 0x00000000, | ||
| 908 | 0x00000000, | ||
| 909 | 0x00000000, | ||
| 910 | 0x00000000, | ||
| 911 | 0x00000000, | ||
| 912 | 0x00000000, | ||
| 913 | 0x00000000, | ||
| 914 | 0x00000000, | ||
| 915 | 0x00000000, | ||
| 916 | 0x00000000, | ||
| 917 | 0x00000000, | ||
| 918 | 0x00000000, | 980 | 0x00000000, |
| 919 | 0x00000000, | 981 | 0x00000000, |
| 920 | 0x00000000, | 982 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h index a1b9f763996a..84af82418987 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h | |||
| @@ -206,14 +206,14 @@ uint32_t nvd7_grhub_data[] = { | |||
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | uint32_t nvd7_grhub_code[] = { | 208 | uint32_t nvd7_grhub_code[] = { |
| 209 | 0x031b0ef5, | 209 | 0x039b0ef5, |
| 210 | /* 0x0004: queue_put */ | 210 | /* 0x0004: queue_put */ |
| 211 | 0x9800d898, | 211 | 0x9800d898, |
| 212 | 0x86f001d9, | 212 | 0x86f001d9, |
| 213 | 0x0489b808, | 213 | 0x0489b808, |
| 214 | 0xf00c1bf4, | 214 | 0xf00c1bf4, |
| 215 | 0x21f502f7, | 215 | 0x21f502f7, |
| 216 | 0x00f802fe, | 216 | 0x00f8037e, |
| 217 | /* 0x001c: queue_put_next */ | 217 | /* 0x001c: queue_put_next */ |
| 218 | 0xb60798c4, | 218 | 0xb60798c4, |
| 219 | 0x8dbb0384, | 219 | 0x8dbb0384, |
| @@ -237,184 +237,214 @@ uint32_t nvd7_grhub_code[] = { | |||
| 237 | /* 0x0066: queue_get_done */ | 237 | /* 0x0066: queue_get_done */ |
| 238 | 0x00f80132, | 238 | 0x00f80132, |
| 239 | /* 0x0068: nv_rd32 */ | 239 | /* 0x0068: nv_rd32 */ |
| 240 | 0x0728b7f1, | 240 | 0xf002ecb9, |
| 241 | 0xb906b4b6, | 241 | 0x07f11fc9, |
| 242 | 0xc9f002ec, | 242 | 0x03f0ca00, |
| 243 | 0x00bcd01f, | 243 | 0x000cd001, |
| 244 | /* 0x0078: nv_rd32_wait */ | 244 | /* 0x007a: nv_rd32_wait */ |
| 245 | 0xc800bccf, | 245 | 0xc7f104bd, |
| 246 | 0x1bf41fcc, | 246 | 0xc3f0ca00, |
| 247 | 0x06a7f0fa, | 247 | 0x00cccf01, |
| 248 | 0x010921f5, | 248 | 0xf41fccc8, |
| 249 | 0xf840bfcf, | 249 | 0xa7f0f31b, |
| 250 | /* 0x008d: nv_wr32 */ | 250 | 0x1021f506, |
| 251 | 0x28b7f100, | 251 | 0x00f7f101, |
| 252 | 0x06b4b607, | 252 | 0x01f3f0cb, |
| 253 | 0xb980bfd0, | 253 | 0xf800ffcf, |
| 254 | 0xc9f002ec, | 254 | /* 0x009d: nv_wr32 */ |
| 255 | 0x1ec9f01f, | 255 | 0x0007f100, |
| 256 | /* 0x00a3: nv_wr32_wait */ | 256 | 0x0103f0cc, |
| 257 | 0xcf00bcd0, | 257 | 0xbd000fd0, |
| 258 | 0xccc800bc, | 258 | 0x02ecb904, |
| 259 | 0xfa1bf41f, | 259 | 0xf01fc9f0, |
| 260 | /* 0x00ae: watchdog_reset */ | 260 | 0x07f11ec9, |
| 261 | 0x87f100f8, | 261 | 0x03f0ca00, |
| 262 | 0x84b60430, | 262 | 0x000cd001, |
| 263 | 0x1ff9f006, | 263 | /* 0x00be: nv_wr32_wait */ |
| 264 | 0xf8008fd0, | 264 | 0xc7f104bd, |
| 265 | /* 0x00bd: watchdog_clear */ | 265 | 0xc3f0ca00, |
| 266 | 0x3087f100, | 266 | 0x00cccf01, |
| 267 | 0x0684b604, | 267 | 0xf41fccc8, |
| 268 | 0xf80080d0, | 268 | 0x00f8f31b, |
| 269 | /* 0x00c9: wait_donez */ | 269 | /* 0x00d0: wait_donez */ |
| 270 | 0xf094bd00, | 270 | 0x99f094bd, |
| 271 | 0x07f10099, | 271 | 0x0007f100, |
| 272 | 0x03f00f00, | 272 | 0x0203f00f, |
| 273 | 0x0009d002, | 273 | 0xbd0009d0, |
| 274 | 0x07f104bd, | 274 | 0x0007f104, |
| 275 | 0x03f00600, | 275 | 0x0203f006, |
| 276 | 0x000ad002, | 276 | 0xbd000ad0, |
| 277 | /* 0x00e6: wait_donez_ne */ | 277 | /* 0x00ed: wait_donez_ne */ |
| 278 | 0x87f104bd, | 278 | 0x0087f104, |
| 279 | 0x83f00000, | 279 | 0x0183f000, |
| 280 | 0x0088cf01, | 280 | 0xff0088cf, |
| 281 | 0xf4888aff, | 281 | 0x1bf4888a, |
| 282 | 0x94bdf31b, | 282 | 0xf094bdf3, |
| 283 | 0xf10099f0, | ||
| 284 | 0xf0170007, | ||
| 285 | 0x09d00203, | ||
| 286 | 0xf804bd00, | ||
| 287 | /* 0x0109: wait_doneo */ | ||
| 288 | 0xf094bd00, | ||
| 289 | 0x07f10099, | 283 | 0x07f10099, |
| 290 | 0x03f00f00, | 284 | 0x03f01700, |
| 291 | 0x0009d002, | 285 | 0x0009d002, |
| 292 | 0x87f104bd, | 286 | 0x00f804bd, |
| 293 | 0x84b60818, | 287 | /* 0x0110: wait_doneo */ |
| 294 | 0x008ad006, | ||
| 295 | /* 0x0124: wait_doneo_e */ | ||
| 296 | 0x040087f1, | ||
| 297 | 0xcf0684b6, | ||
| 298 | 0x8aff0088, | ||
| 299 | 0xf30bf488, | ||
| 300 | 0x99f094bd, | 288 | 0x99f094bd, |
| 301 | 0x0007f100, | 289 | 0x0007f100, |
| 302 | 0x0203f017, | 290 | 0x0203f00f, |
| 303 | 0xbd0009d0, | 291 | 0xbd0009d0, |
| 304 | /* 0x0147: mmctx_size */ | 292 | 0x0007f104, |
| 305 | 0xbd00f804, | 293 | 0x0203f006, |
| 306 | /* 0x0149: nv_mmctx_size_loop */ | 294 | 0xbd000ad0, |
| 307 | 0x00e89894, | 295 | /* 0x012d: wait_doneo_e */ |
| 308 | 0xb61a85b6, | 296 | 0x0087f104, |
| 309 | 0x84b60180, | 297 | 0x0183f000, |
| 310 | 0x0098bb02, | 298 | 0xff0088cf, |
| 311 | 0xb804e0b6, | 299 | 0x0bf4888a, |
| 312 | 0x1bf404ef, | 300 | 0xf094bdf3, |
| 313 | 0x029fb9eb, | 301 | 0x07f10099, |
| 314 | /* 0x0166: mmctx_xfer */ | 302 | 0x03f01700, |
| 315 | 0x94bd00f8, | 303 | 0x0009d002, |
| 316 | 0xf10199f0, | 304 | 0x00f804bd, |
| 317 | 0xf00f0007, | 305 | /* 0x0150: mmctx_size */ |
| 318 | 0x09d00203, | 306 | /* 0x0152: nv_mmctx_size_loop */ |
| 319 | 0xf104bd00, | 307 | 0xe89894bd, |
| 320 | 0xb6071087, | 308 | 0x1a85b600, |
| 321 | 0x94bd0684, | 309 | 0xb60180b6, |
| 322 | 0xf405bbfd, | 310 | 0x98bb0284, |
| 323 | 0x8bd0090b, | 311 | 0x04e0b600, |
| 324 | 0x0099f000, | 312 | 0xf404efb8, |
| 325 | /* 0x018c: mmctx_base_disabled */ | 313 | 0x9fb9eb1b, |
| 326 | 0xf405eefd, | 314 | /* 0x016f: mmctx_xfer */ |
| 327 | 0x8ed00c0b, | 315 | 0xbd00f802, |
| 328 | 0xc08fd080, | 316 | 0x0199f094, |
| 329 | /* 0x019b: mmctx_multi_disabled */ | 317 | 0x0f0007f1, |
| 330 | 0xb70199f0, | 318 | 0xd00203f0, |
| 331 | 0xc8010080, | 319 | 0x04bd0009, |
| 320 | 0xbbfd94bd, | ||
| 321 | 0x120bf405, | ||
| 322 | 0xc40007f1, | ||
| 323 | 0xd00103f0, | ||
| 324 | 0x04bd000b, | ||
| 325 | /* 0x0197: mmctx_base_disabled */ | ||
| 326 | 0xfd0099f0, | ||
| 327 | 0x0bf405ee, | ||
| 328 | 0x0007f11e, | ||
| 329 | 0x0103f0c6, | ||
| 330 | 0xbd000ed0, | ||
| 331 | 0x0007f104, | ||
| 332 | 0x0103f0c7, | ||
| 333 | 0xbd000fd0, | ||
| 334 | 0x0199f004, | ||
| 335 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 336 | 0xb600abc8, | ||
| 337 | 0xb9f010b4, | ||
| 338 | 0x01aec80c, | ||
| 339 | 0xfd11e4b6, | ||
| 340 | 0x07f105be, | ||
| 341 | 0x03f0c500, | ||
| 342 | 0x000bd001, | ||
| 343 | /* 0x01d6: mmctx_exec_loop */ | ||
| 344 | /* 0x01d6: mmctx_wait_free */ | ||
| 345 | 0xe7f104bd, | ||
| 346 | 0xe3f0c500, | ||
| 347 | 0x00eecf01, | ||
| 348 | 0xf41fe4f0, | ||
| 349 | 0xce98f30b, | ||
| 350 | 0x05e9fd00, | ||
| 351 | 0xc80007f1, | ||
| 352 | 0xd00103f0, | ||
| 353 | 0x04bd000e, | ||
| 354 | 0xb804c0b6, | ||
| 355 | 0x1bf404cd, | ||
| 356 | 0x02abc8d8, | ||
| 357 | /* 0x0207: mmctx_fini_wait */ | ||
| 358 | 0xf11f1bf4, | ||
| 359 | 0xf0c500b7, | ||
| 360 | 0xbbcf01b3, | ||
| 361 | 0x1fb4f000, | ||
| 362 | 0xf410b4b0, | ||
| 363 | 0xa7f0f01b, | ||
| 364 | 0xd021f402, | ||
| 365 | /* 0x0223: mmctx_stop */ | ||
| 366 | 0xc82b0ef4, | ||
| 332 | 0xb4b600ab, | 367 | 0xb4b600ab, |
| 333 | 0x0cb9f010, | 368 | 0x0cb9f010, |
| 334 | 0xb601aec8, | 369 | 0xf112b9f0, |
| 335 | 0xbefd11e4, | 370 | 0xf0c50007, |
| 336 | 0x008bd005, | 371 | 0x0bd00103, |
| 337 | /* 0x01b4: mmctx_exec_loop */ | 372 | /* 0x023b: mmctx_stop_wait */ |
| 338 | /* 0x01b4: mmctx_wait_free */ | 373 | 0xf104bd00, |
| 339 | 0xf0008ecf, | 374 | 0xf0c500b7, |
| 340 | 0x0bf41fe4, | 375 | 0xbbcf01b3, |
| 341 | 0x00ce98fa, | 376 | 0x12bbc800, |
| 342 | 0xd005e9fd, | 377 | /* 0x024b: mmctx_done */ |
| 343 | 0xc0b6c08e, | 378 | 0xbdf31bf4, |
| 344 | 0x04cdb804, | 379 | 0x0199f094, |
| 345 | 0xc8e81bf4, | 380 | 0x170007f1, |
| 346 | 0x1bf402ab, | 381 | 0xd00203f0, |
| 347 | /* 0x01d5: mmctx_fini_wait */ | 382 | 0x04bd0009, |
| 348 | 0x008bcf18, | 383 | /* 0x025e: strand_wait */ |
| 349 | 0xb01fb4f0, | 384 | 0xa0f900f8, |
| 350 | 0x1bf410b4, | 385 | 0xf402a7f0, |
| 351 | 0x02a7f0f7, | 386 | 0xa0fcd021, |
| 352 | 0xf4c921f4, | 387 | /* 0x026a: strand_pre */ |
| 353 | /* 0x01ea: mmctx_stop */ | 388 | 0x97f000f8, |
| 354 | 0xabc81b0e, | 389 | 0xfc07f10c, |
| 355 | 0x10b4b600, | 390 | 0x0203f04a, |
| 356 | 0xf00cb9f0, | 391 | 0xbd0009d0, |
| 357 | 0x8bd012b9, | 392 | 0x5e21f504, |
| 358 | /* 0x01f9: mmctx_stop_wait */ | 393 | /* 0x027f: strand_post */ |
| 359 | 0x008bcf00, | 394 | 0xf000f802, |
| 360 | 0xf412bbc8, | 395 | 0x07f10d97, |
| 361 | /* 0x0202: mmctx_done */ | 396 | 0x03f04afc, |
| 362 | 0x94bdfa1b, | ||
| 363 | 0xf10199f0, | ||
| 364 | 0xf0170007, | ||
| 365 | 0x09d00203, | ||
| 366 | 0xf804bd00, | ||
| 367 | /* 0x0215: strand_wait */ | ||
| 368 | 0xf0a0f900, | ||
| 369 | 0x21f402a7, | ||
| 370 | 0xf8a0fcc9, | ||
| 371 | /* 0x0221: strand_pre */ | ||
| 372 | 0xfc87f100, | ||
| 373 | 0x0283f04a, | ||
| 374 | 0xd00c97f0, | ||
| 375 | 0x21f50089, | ||
| 376 | 0x00f80215, | ||
| 377 | /* 0x0234: strand_post */ | ||
| 378 | 0x4afc87f1, | ||
| 379 | 0xf00283f0, | ||
| 380 | 0x89d00d97, | ||
| 381 | 0x1521f500, | ||
| 382 | /* 0x0247: strand_set */ | ||
| 383 | 0xf100f802, | ||
| 384 | 0xf04ffca7, | ||
| 385 | 0xaba202a3, | ||
| 386 | 0xc7f00500, | ||
| 387 | 0x00acd00f, | ||
| 388 | 0xd00bc7f0, | ||
| 389 | 0x21f500bc, | ||
| 390 | 0xaed00215, | ||
| 391 | 0x0ac7f000, | ||
| 392 | 0xf500bcd0, | ||
| 393 | 0xf8021521, | ||
| 394 | /* 0x0271: strand_ctx_init */ | ||
| 395 | 0xf094bd00, | ||
| 396 | 0x07f10399, | ||
| 397 | 0x03f00f00, | ||
| 398 | 0x0009d002, | 397 | 0x0009d002, |
| 399 | 0x21f504bd, | 398 | 0x21f504bd, |
| 400 | 0xe7f00221, | 399 | 0x00f8025e, |
| 401 | 0x4721f503, | 400 | /* 0x0294: strand_set */ |
| 402 | 0xfca7f102, | 401 | 0xf10fc7f0, |
| 403 | 0x02a3f046, | 402 | 0xf04ffc07, |
| 404 | 0x0400aba0, | 403 | 0x0cd00203, |
| 405 | 0xf040a0d0, | 404 | 0xf004bd00, |
| 406 | 0xbcd001c7, | 405 | 0x07f10bc7, |
| 407 | 0x1521f500, | 406 | 0x03f04afc, |
| 408 | 0x010c9202, | 407 | 0x000cd002, |
| 409 | 0xf000acd0, | 408 | 0x07f104bd, |
| 410 | 0xbcd002c7, | 409 | 0x03f04ffc, |
| 411 | 0x1521f500, | 410 | 0x000ed002, |
| 412 | 0x3421f502, | 411 | 0xc7f004bd, |
| 413 | 0x8087f102, | 412 | 0xfc07f10a, |
| 414 | 0x0684b608, | 413 | 0x0203f04a, |
| 415 | 0xb70089cf, | 414 | 0xbd000cd0, |
| 416 | 0x95220080, | 415 | 0x5e21f504, |
| 417 | /* 0x02ca: ctx_init_strand_loop */ | 416 | /* 0x02d3: strand_ctx_init */ |
| 417 | 0xbd00f802, | ||
| 418 | 0x0399f094, | ||
| 419 | 0x0f0007f1, | ||
| 420 | 0xd00203f0, | ||
| 421 | 0x04bd0009, | ||
| 422 | 0x026a21f5, | ||
| 423 | 0xf503e7f0, | ||
| 424 | 0xbd029421, | ||
| 425 | 0xfc07f1c4, | ||
| 426 | 0x0203f047, | ||
| 427 | 0xbd000cd0, | ||
| 428 | 0x01c7f004, | ||
| 429 | 0x4afc07f1, | ||
| 430 | 0xd00203f0, | ||
| 431 | 0x04bd000c, | ||
| 432 | 0x025e21f5, | ||
| 433 | 0xf1010c92, | ||
| 434 | 0xf046fc07, | ||
| 435 | 0x0cd00203, | ||
| 436 | 0xf004bd00, | ||
| 437 | 0x07f102c7, | ||
| 438 | 0x03f04afc, | ||
| 439 | 0x000cd002, | ||
| 440 | 0x21f504bd, | ||
| 441 | 0x21f5025e, | ||
| 442 | 0x87f1027f, | ||
| 443 | 0x83f04200, | ||
| 444 | 0x0097f102, | ||
| 445 | 0x0293f020, | ||
| 446 | 0x950099cf, | ||
| 447 | /* 0x034a: ctx_init_strand_loop */ | ||
| 418 | 0x8ed008fe, | 448 | 0x8ed008fe, |
| 419 | 0x408ed000, | 449 | 0x408ed000, |
| 420 | 0xb6808acf, | 450 | 0xb6808acf, |
| @@ -428,7 +458,7 @@ uint32_t nvd7_grhub_code[] = { | |||
| 428 | 0x170007f1, | 458 | 0x170007f1, |
| 429 | 0xd00203f0, | 459 | 0xd00203f0, |
| 430 | 0x04bd0009, | 460 | 0x04bd0009, |
| 431 | /* 0x02fe: error */ | 461 | /* 0x037e: error */ |
| 432 | 0x07f100f8, | 462 | 0x07f100f8, |
| 433 | 0x03f00500, | 463 | 0x03f00500, |
| 434 | 0x000fd002, | 464 | 0x000fd002, |
| @@ -436,82 +466,117 @@ uint32_t nvd7_grhub_code[] = { | |||
| 436 | 0x0007f101, | 466 | 0x0007f101, |
| 437 | 0x0303f007, | 467 | 0x0303f007, |
| 438 | 0xbd000fd0, | 468 | 0xbd000fd0, |
| 439 | /* 0x031b: init */ | 469 | /* 0x039b: init */ |
| 440 | 0xbd00f804, | 470 | 0xbd00f804, |
| 441 | 0x0004fe04, | 471 | 0x0007fe04, |
| 442 | 0xf10007fe, | 472 | 0x420017f1, |
| 443 | 0xf0120017, | 473 | 0xcf0013f0, |
| 444 | 0x12d00227, | 474 | 0x11e70011, |
| 445 | 0xb117f100, | 475 | 0x14b60109, |
| 446 | 0x0010fe05, | 476 | 0x0014fe08, |
| 447 | 0x040017f1, | 477 | 0xf10227f0, |
| 448 | 0xf1c010d0, | 478 | 0xf0120007, |
| 449 | 0xb6040437, | 479 | 0x02d00003, |
| 450 | 0x27f10634, | 480 | 0xf104bd00, |
| 451 | 0x32d02003, | 481 | 0xfe06c817, |
| 452 | 0x0427f100, | 482 | 0x24bd0010, |
| 453 | 0x0132d020, | 483 | 0x070007f1, |
| 484 | 0xd00003f0, | ||
| 485 | 0x04bd0002, | ||
| 486 | 0x200327f1, | ||
| 487 | 0x010007f1, | ||
| 488 | 0xd00103f0, | ||
| 489 | 0x04bd0002, | ||
| 490 | 0x200427f1, | ||
| 491 | 0x010407f1, | ||
| 492 | 0xd00103f0, | ||
| 493 | 0x04bd0002, | ||
| 454 | 0x200b27f1, | 494 | 0x200b27f1, |
| 455 | 0xf10232d0, | 495 | 0x010807f1, |
| 456 | 0xd0200c27, | 496 | 0xd00103f0, |
| 457 | 0x27f10732, | 497 | 0x04bd0002, |
| 458 | 0x24b60c24, | 498 | 0x200c27f1, |
| 459 | 0x0003b906, | 499 | 0x011c07f1, |
| 460 | 0xf10023d0, | 500 | 0xd00103f0, |
| 501 | 0x04bd0002, | ||
| 502 | 0xf1010392, | ||
| 503 | 0xf0090007, | ||
| 504 | 0x03d00303, | ||
| 505 | 0xf104bd00, | ||
| 461 | 0xf0870427, | 506 | 0xf0870427, |
| 462 | 0x12d00023, | 507 | 0x07f10023, |
| 463 | 0x0012b700, | 508 | 0x03f00400, |
| 464 | 0x0427f001, | 509 | 0x0002d000, |
| 465 | 0xf40012d0, | 510 | 0x27f004bd, |
| 466 | 0xe7f11031, | 511 | 0x0007f104, |
| 467 | 0xe3f09604, | 512 | 0x0003f003, |
| 468 | 0x6821f440, | 513 | 0xbd0002d0, |
| 469 | 0x8090f1c7, | 514 | 0x1031f404, |
| 470 | 0xf4f00301, | 515 | 0x9604e7f1, |
| 471 | 0x020f801f, | 516 | 0xf440e3f0, |
| 472 | 0xbb0117f0, | 517 | 0xfeb96821, |
| 473 | 0x12b6041f, | 518 | 0x90f1c702, |
| 474 | 0x0c27f101, | 519 | 0xf0030180, |
| 475 | 0x0624b604, | 520 | 0x0f801ff4, |
| 476 | 0xd00021d0, | 521 | 0x0117f002, |
| 477 | 0x17f14021, | 522 | 0xb6041fbb, |
| 478 | 0x0e980100, | 523 | 0x07f10112, |
| 479 | 0x010f9800, | 524 | 0x03f00300, |
| 480 | 0x014721f5, | 525 | 0x0001d001, |
| 481 | 0x070037f1, | 526 | 0x07f104bd, |
| 482 | 0x950634b6, | 527 | 0x03f00400, |
| 483 | 0x34d00814, | 528 | 0x0001d001, |
| 484 | 0x4034d000, | 529 | 0x17f104bd, |
| 485 | 0x130030b7, | 530 | 0xf7f00100, |
| 486 | 0xb6001fbb, | 531 | 0xb521f502, |
| 487 | 0x3fd002f5, | 532 | 0xc721f507, |
| 488 | 0x0815b600, | 533 | 0x10f7f007, |
| 489 | 0xb60110b6, | 534 | 0x081421f5, |
| 490 | 0x1fb90814, | 535 | 0x98000e98, |
| 491 | 0x7121f502, | 536 | 0x21f5010f, |
| 492 | 0x001fbb02, | 537 | 0x14950150, |
| 493 | 0xf1020398, | 538 | 0x0007f108, |
| 494 | 0xf0200047, | 539 | 0x0103f0c0, |
| 495 | /* 0x03f6: init_gpc */ | 540 | 0xbd0004d0, |
| 496 | 0x4ea05043, | 541 | 0x0007f104, |
| 497 | 0x1fb90804, | 542 | 0x0103f0c1, |
| 498 | 0x8d21f402, | 543 | 0xbd0004d0, |
| 499 | 0x010c4ea0, | 544 | 0x0030b704, |
| 500 | 0x21f4f4bd, | 545 | 0x001fbb13, |
| 501 | 0x044ea08d, | 546 | 0xf102f5b6, |
| 502 | 0x8d21f401, | 547 | 0xf0d30007, |
| 503 | 0x01004ea0, | 548 | 0x0fd00103, |
| 504 | 0xf402f7f0, | 549 | 0xb604bd00, |
| 505 | 0x4ea08d21, | 550 | 0x10b60815, |
| 506 | /* 0x041e: init_gpc_wait */ | 551 | 0x0814b601, |
| 507 | 0x21f40800, | 552 | 0xf5021fb9, |
| 508 | 0x1fffc868, | 553 | 0xbb02d321, |
| 509 | 0xa0fa0bf4, | 554 | 0x0398001f, |
| 510 | 0xf408044e, | 555 | 0x0047f102, |
| 511 | 0x1fbb6821, | 556 | 0x5043f020, |
| 512 | 0x0040b700, | 557 | /* 0x04f4: init_gpc */ |
| 513 | 0x0132b680, | 558 | 0x08044ea0, |
| 514 | 0xf1be1bf4, | 559 | 0xf4021fb9, |
| 560 | 0x4ea09d21, | ||
| 561 | 0xf4bd010c, | ||
| 562 | 0xa09d21f4, | ||
| 563 | 0xf401044e, | ||
| 564 | 0x4ea09d21, | ||
| 565 | 0xf7f00100, | ||
| 566 | 0x9d21f402, | ||
| 567 | 0x08004ea0, | ||
| 568 | /* 0x051c: init_gpc_wait */ | ||
| 569 | 0xc86821f4, | ||
| 570 | 0x0bf41fff, | ||
| 571 | 0x044ea0fa, | ||
| 572 | 0x6821f408, | ||
| 573 | 0xb7001fbb, | ||
| 574 | 0xb6800040, | ||
| 575 | 0x1bf40132, | ||
| 576 | 0x00f7f0be, | ||
| 577 | 0x081421f5, | ||
| 578 | 0xf500f7f0, | ||
| 579 | 0xf107b521, | ||
| 515 | 0xf0010007, | 580 | 0xf0010007, |
| 516 | 0x01d00203, | 581 | 0x01d00203, |
| 517 | 0xbd04bd00, | 582 | 0xbd04bd00, |
| @@ -519,402 +584,399 @@ uint32_t nvd7_grhub_code[] = { | |||
| 519 | 0x080007f1, | 584 | 0x080007f1, |
| 520 | 0xd00203f0, | 585 | 0xd00203f0, |
| 521 | 0x04bd0001, | 586 | 0x04bd0001, |
| 522 | /* 0x0458: main */ | 587 | /* 0x0564: main */ |
| 523 | 0xf40031f4, | 588 | 0xf40031f4, |
| 524 | 0xd7f00028, | 589 | 0xd7f00028, |
| 525 | 0x3921f410, | 590 | 0x3921f410, |
| 526 | 0xb1f401f4, | 591 | 0xb1f401f4, |
| 527 | 0xf54001e4, | 592 | 0xf54001e4, |
| 528 | 0xbd00de1b, | 593 | 0xbd00e91b, |
| 529 | 0x0499f094, | 594 | 0x0499f094, |
| 530 | 0x0f0007f1, | 595 | 0x0f0007f1, |
| 531 | 0xd00203f0, | 596 | 0xd00203f0, |
| 532 | 0x04bd0009, | 597 | 0x04bd0009, |
| 533 | 0x0b0017f1, | 598 | 0xc00017f1, |
| 534 | 0xcf0614b6, | 599 | 0xcf0213f0, |
| 535 | 0x11cf4012, | 600 | 0x27f10011, |
| 536 | 0x1f13c800, | 601 | 0x23f0c100, |
| 537 | 0x00870bf5, | 602 | 0x0022cf02, |
| 538 | 0xf41f23c8, | 603 | 0xf51f13c8, |
| 539 | 0x20f9620b, | 604 | 0xc800890b, |
| 540 | 0xbd0212b9, | 605 | 0x0bf41f23, |
| 541 | 0x0799f094, | 606 | 0xb920f962, |
| 542 | 0x0f0007f1, | 607 | 0x94bd0212, |
| 543 | 0xd00203f0, | ||
| 544 | 0x04bd0009, | ||
| 545 | 0xf40132f4, | ||
| 546 | 0x21f50231, | ||
| 547 | 0x94bd082f, | ||
| 548 | 0xf10799f0, | 608 | 0xf10799f0, |
| 549 | 0xf0170007, | 609 | 0xf00f0007, |
| 550 | 0x09d00203, | 610 | 0x09d00203, |
| 551 | 0xfc04bd00, | 611 | 0xf404bd00, |
| 552 | 0xf094bd20, | 612 | 0x31f40132, |
| 553 | 0x07f10699, | 613 | 0xe821f502, |
| 554 | 0x03f00f00, | 614 | 0xf094bd09, |
| 555 | 0x0009d002, | 615 | 0x07f10799, |
| 556 | 0x31f404bd, | ||
| 557 | 0x2f21f501, | ||
| 558 | 0xf094bd08, | ||
| 559 | 0x07f10699, | ||
| 560 | 0x03f01700, | 616 | 0x03f01700, |
| 561 | 0x0009d002, | 617 | 0x0009d002, |
| 562 | 0x0ef404bd, | 618 | 0x20fc04bd, |
| 563 | /* 0x04f9: chsw_prev_no_next */ | ||
| 564 | 0xb920f931, | ||
| 565 | 0x32f40212, | ||
| 566 | 0x0232f401, | ||
| 567 | 0x082f21f5, | ||
| 568 | 0x17f120fc, | ||
| 569 | 0x14b60b00, | ||
| 570 | 0x0012d006, | ||
| 571 | /* 0x0517: chsw_no_prev */ | ||
| 572 | 0xc8130ef4, | ||
| 573 | 0x0bf41f23, | ||
| 574 | 0x0131f40d, | ||
| 575 | 0xf50232f4, | ||
| 576 | /* 0x0527: chsw_done */ | ||
| 577 | 0xf1082f21, | ||
| 578 | 0xb60b0c17, | ||
| 579 | 0x27f00614, | ||
| 580 | 0x0012d001, | ||
| 581 | 0x99f094bd, | 619 | 0x99f094bd, |
| 582 | 0x0007f104, | 620 | 0x0007f106, |
| 621 | 0x0203f00f, | ||
| 622 | 0xbd0009d0, | ||
| 623 | 0x0131f404, | ||
| 624 | 0x09e821f5, | ||
| 625 | 0x99f094bd, | ||
| 626 | 0x0007f106, | ||
| 583 | 0x0203f017, | 627 | 0x0203f017, |
| 584 | 0xbd0009d0, | 628 | 0xbd0009d0, |
| 585 | 0x130ef504, | 629 | 0x330ef404, |
| 586 | /* 0x0549: main_not_ctx_switch */ | 630 | /* 0x060c: chsw_prev_no_next */ |
| 587 | 0x01e4b0ff, | 631 | 0x12b920f9, |
| 588 | 0xb90d1bf4, | 632 | 0x0132f402, |
| 589 | 0x21f502f2, | 633 | 0xf50232f4, |
| 590 | 0x0ef407bb, | 634 | 0xfc09e821, |
| 591 | /* 0x0559: main_not_ctx_chan */ | 635 | 0x0007f120, |
| 592 | 0x02e4b046, | 636 | 0x0203f0c0, |
| 593 | 0xbd321bf4, | 637 | 0xbd0002d0, |
| 594 | 0x0799f094, | 638 | 0x130ef404, |
| 595 | 0x0f0007f1, | 639 | /* 0x062c: chsw_no_prev */ |
| 640 | 0xf41f23c8, | ||
| 641 | 0x31f40d0b, | ||
| 642 | 0x0232f401, | ||
| 643 | 0x09e821f5, | ||
| 644 | /* 0x063c: chsw_done */ | ||
| 645 | 0xf10127f0, | ||
| 646 | 0xf0c30007, | ||
| 647 | 0x02d00203, | ||
| 648 | 0xbd04bd00, | ||
| 649 | 0x0499f094, | ||
| 650 | 0x170007f1, | ||
| 596 | 0xd00203f0, | 651 | 0xd00203f0, |
| 597 | 0x04bd0009, | 652 | 0x04bd0009, |
| 598 | 0xf40132f4, | 653 | 0xff080ef5, |
| 599 | 0x21f50232, | 654 | /* 0x0660: main_not_ctx_switch */ |
| 600 | 0x94bd082f, | 655 | 0xf401e4b0, |
| 656 | 0xf2b90d1b, | ||
| 657 | 0x7821f502, | ||
| 658 | 0x460ef409, | ||
| 659 | /* 0x0670: main_not_ctx_chan */ | ||
| 660 | 0xf402e4b0, | ||
| 661 | 0x94bd321b, | ||
| 601 | 0xf10799f0, | 662 | 0xf10799f0, |
| 602 | 0xf0170007, | 663 | 0xf00f0007, |
| 603 | 0x09d00203, | 664 | 0x09d00203, |
| 604 | 0xf404bd00, | 665 | 0xf404bd00, |
| 605 | /* 0x058e: main_not_ctx_save */ | 666 | 0x32f40132, |
| 606 | 0xef94110e, | 667 | 0xe821f502, |
| 607 | 0x01f5f010, | 668 | 0xf094bd09, |
| 608 | 0x02fe21f5, | 669 | 0x07f10799, |
| 609 | 0xfec00ef5, | 670 | 0x03f01700, |
| 610 | /* 0x059c: main_done */ | 671 | 0x0009d002, |
| 611 | 0x29f024bd, | 672 | 0x0ef404bd, |
| 612 | 0x0007f11f, | 673 | /* 0x06a5: main_not_ctx_save */ |
| 613 | 0x0203f008, | 674 | 0x10ef9411, |
| 614 | 0xbd0002d0, | 675 | 0xf501f5f0, |
| 615 | 0xab0ef504, | 676 | 0xf5037e21, |
| 616 | /* 0x05b1: ih */ | 677 | /* 0x06b3: main_done */ |
| 617 | 0xfe80f9fe, | 678 | 0xbdfeb50e, |
| 618 | 0x80f90188, | 679 | 0x1f29f024, |
| 619 | 0xa0f990f9, | 680 | 0x080007f1, |
| 620 | 0xd0f9b0f9, | 681 | 0xd00203f0, |
| 621 | 0xf0f9e0f9, | 682 | 0x04bd0002, |
| 622 | 0x0acf04bd, | 683 | 0xfea00ef5, |
| 623 | 0x04abc480, | 684 | /* 0x06c8: ih */ |
| 624 | 0xf11d0bf4, | 685 | 0x88fe80f9, |
| 625 | 0xf01900b7, | 686 | 0xf980f901, |
| 626 | 0xbecf10d7, | 687 | 0xf9a0f990, |
| 627 | 0x00bfcf40, | 688 | 0xf9d0f9b0, |
| 689 | 0xbdf0f9e0, | ||
| 690 | 0x00a7f104, | ||
| 691 | 0x00a3f002, | ||
| 692 | 0xc400aacf, | ||
| 693 | 0x0bf404ab, | ||
| 694 | 0x10d7f030, | ||
| 695 | 0x1a00e7f1, | ||
| 696 | 0xcf00e3f0, | ||
| 697 | 0xf7f100ee, | ||
| 698 | 0xf3f01900, | ||
| 699 | 0x00ffcf00, | ||
| 628 | 0xb70421f4, | 700 | 0xb70421f4, |
| 629 | 0xf00400b0, | 701 | 0xf00400b0, |
| 630 | 0xbed001e7, | 702 | 0x07f101e7, |
| 631 | /* 0x05e9: ih_no_fifo */ | 703 | 0x03f01d00, |
| 632 | 0x00abe400, | 704 | 0x000ed000, |
| 633 | 0x0d0bf401, | 705 | /* 0x071a: ih_no_fifo */ |
| 634 | 0xf110d7f0, | 706 | 0xabe404bd, |
| 635 | 0xf44001e7, | 707 | 0x0bf40100, |
| 636 | /* 0x05fa: ih_no_ctxsw */ | 708 | 0x10d7f00d, |
| 637 | 0xb7f10421, | 709 | 0x4001e7f1, |
| 638 | 0xb0bd0104, | 710 | /* 0x072b: ih_no_ctxsw */ |
| 639 | 0xf4b4abff, | 711 | 0xe40421f4, |
| 640 | 0xa7f10d0b, | 712 | 0xf40400ab, |
| 641 | 0xa4b60c1c, | 713 | 0xb7f1140b, |
| 642 | 0x00abd006, | 714 | 0xbfb90100, |
| 643 | /* 0x0610: ih_no_other */ | 715 | 0x44e7f102, |
| 644 | 0xfc400ad0, | 716 | 0x40e3f001, |
| 717 | /* 0x0743: ih_no_fwmthd */ | ||
| 718 | 0xf19d21f4, | ||
| 719 | 0xbd0104b7, | ||
| 720 | 0xb4abffb0, | ||
| 721 | 0xf10f0bf4, | ||
| 722 | 0xf0070007, | ||
| 723 | 0x0bd00303, | ||
| 724 | /* 0x075b: ih_no_other */ | ||
| 725 | 0xf104bd00, | ||
| 726 | 0xf0010007, | ||
| 727 | 0x0ad00003, | ||
| 728 | 0xfc04bd00, | ||
| 645 | 0xfce0fcf0, | 729 | 0xfce0fcf0, |
| 646 | 0xfcb0fcd0, | 730 | 0xfcb0fcd0, |
| 647 | 0xfc90fca0, | 731 | 0xfc90fca0, |
| 648 | 0x0088fe80, | 732 | 0x0088fe80, |
| 649 | 0x32f480fc, | 733 | 0x32f480fc, |
| 650 | /* 0x062b: ctx_4160s */ | 734 | /* 0x077f: ctx_4160s */ |
| 651 | 0xf101f800, | 735 | 0xf001f800, |
| 652 | 0xf04160e7, | 736 | 0xffb901f7, |
| 653 | 0xf7f040e3, | 737 | 0x60e7f102, |
| 654 | 0x8d21f401, | 738 | 0x40e3f041, |
| 655 | /* 0x0638: ctx_4160s_wait */ | 739 | /* 0x078f: ctx_4160s_wait */ |
| 656 | 0xc86821f4, | 740 | 0xf19d21f4, |
| 657 | 0x0bf404ff, | ||
| 658 | /* 0x0643: ctx_4160c */ | ||
| 659 | 0xf100f8fa, | ||
| 660 | 0xf04160e7, | 741 | 0xf04160e7, |
| 661 | 0xf4bd40e3, | 742 | 0x21f440e3, |
| 662 | 0xf88d21f4, | 743 | 0x02ffb968, |
| 663 | /* 0x0651: ctx_4170s */ | 744 | 0xf404ffc8, |
| 664 | 0x70e7f100, | 745 | 0x00f8f00b, |
| 746 | /* 0x07a4: ctx_4160c */ | ||
| 747 | 0xffb9f4bd, | ||
| 748 | 0x60e7f102, | ||
| 665 | 0x40e3f041, | 749 | 0x40e3f041, |
| 666 | 0xf410f5f0, | 750 | 0xf89d21f4, |
| 667 | 0x00f88d21, | 751 | /* 0x07b5: ctx_4170s */ |
| 668 | /* 0x0660: ctx_4170w */ | 752 | 0x10f5f000, |
| 669 | 0x4170e7f1, | 753 | 0xf102ffb9, |
| 670 | 0xf440e3f0, | 754 | 0xf04170e7, |
| 671 | 0xf4f06821, | 755 | 0x21f440e3, |
| 672 | 0xf31bf410, | 756 | /* 0x07c7: ctx_4170w */ |
| 673 | /* 0x0672: ctx_redswitch */ | 757 | 0xf100f89d, |
| 674 | 0xe7f100f8, | 758 | 0xf04170e7, |
| 675 | 0xe4b60614, | 759 | 0x21f440e3, |
| 676 | 0x70f7f106, | 760 | 0x02ffb968, |
| 677 | 0x00efd002, | 761 | 0xf410f4f0, |
| 678 | /* 0x0683: ctx_redswitch_delay */ | 762 | 0x00f8f01b, |
| 679 | 0xb608f7f0, | 763 | /* 0x07dc: ctx_redswitch */ |
| 680 | 0x1bf401f2, | 764 | 0x0200e7f1, |
| 681 | 0x70f7f1fd, | 765 | 0xf040e5f0, |
| 682 | 0x00efd007, | 766 | 0xe5f020e5, |
| 683 | /* 0x0692: ctx_86c */ | 767 | 0x0007f110, |
| 684 | 0xe7f100f8, | 768 | 0x0103f085, |
| 685 | 0xe4b6086c, | 769 | 0xbd000ed0, |
| 686 | 0x00efd006, | 770 | 0x08f7f004, |
| 687 | 0x8a14e7f1, | 771 | /* 0x07f8: ctx_redswitch_delay */ |
| 688 | 0xf440e3f0, | 772 | 0xf401f2b6, |
| 689 | 0xe7f18d21, | 773 | 0xe5f1fd1b, |
| 690 | 0xe3f0a86c, | 774 | 0xe5f10400, |
| 691 | 0x8d21f441, | 775 | 0x07f10100, |
| 692 | /* 0x06b2: ctx_load */ | 776 | 0x03f08500, |
| 777 | 0x000ed001, | ||
| 778 | 0x00f804bd, | ||
| 779 | /* 0x0814: ctx_86c */ | ||
| 780 | 0x1b0007f1, | ||
| 781 | 0xd00203f0, | ||
| 782 | 0x04bd000f, | ||
| 783 | 0xf102ffb9, | ||
| 784 | 0xf08a14e7, | ||
| 785 | 0x21f440e3, | ||
| 786 | 0x02ffb99d, | ||
| 787 | 0xa86ce7f1, | ||
| 788 | 0xf441e3f0, | ||
| 789 | 0x00f89d21, | ||
| 790 | /* 0x083c: ctx_mem */ | ||
| 791 | 0x840007f1, | ||
| 792 | 0xd00203f0, | ||
| 793 | 0x04bd000f, | ||
| 794 | /* 0x0848: ctx_mem_wait */ | ||
| 795 | 0x8400f7f1, | ||
| 796 | 0xcf02f3f0, | ||
| 797 | 0xfffd00ff, | ||
| 798 | 0xf31bf405, | ||
| 799 | /* 0x085a: ctx_load */ | ||
| 693 | 0x94bd00f8, | 800 | 0x94bd00f8, |
| 694 | 0xf10599f0, | 801 | 0xf10599f0, |
| 695 | 0xf00f0007, | 802 | 0xf00f0007, |
| 696 | 0x09d00203, | 803 | 0x09d00203, |
| 697 | 0xf004bd00, | 804 | 0xf004bd00, |
| 698 | 0x21f40ca7, | 805 | 0x21f40ca7, |
| 699 | 0x2417f1c9, | 806 | 0xf1f4bdd0, |
| 700 | 0x0614b60a, | 807 | 0xf0890007, |
| 701 | 0xf10010d0, | 808 | 0x0fd00203, |
| 702 | 0xb60b0037, | 809 | 0xf104bd00, |
| 703 | 0x32d00634, | 810 | 0xf0c10007, |
| 704 | 0x0c17f140, | 811 | 0x02d00203, |
| 705 | 0x0614b60a, | 812 | 0xf104bd00, |
| 706 | 0xd00747f0, | 813 | 0xf0830007, |
| 707 | 0x14d00012, | 814 | 0x02d00203, |
| 708 | /* 0x06ed: ctx_chan_wait_0 */ | 815 | 0xf004bd00, |
| 709 | 0x4014cf40, | 816 | 0x21f507f7, |
| 710 | 0xf41f44f0, | 817 | 0x07f1083c, |
| 711 | 0x32d0fa1b, | 818 | 0x03f0c000, |
| 712 | 0x000bfe00, | 819 | 0x0002d002, |
| 713 | 0xb61f2af0, | 820 | 0x0bfe04bd, |
| 714 | 0x20b60424, | 821 | 0x1f2af000, |
| 715 | 0xf094bd02, | 822 | 0xb60424b6, |
| 823 | 0x94bd0220, | ||
| 824 | 0xf10899f0, | ||
| 825 | 0xf00f0007, | ||
| 826 | 0x09d00203, | ||
| 827 | 0xf104bd00, | ||
| 828 | 0xf0810007, | ||
| 829 | 0x02d00203, | ||
| 830 | 0xf104bd00, | ||
| 831 | 0xf1000027, | ||
| 832 | 0xf0800023, | ||
| 833 | 0x07f10225, | ||
| 834 | 0x03f08800, | ||
| 835 | 0x0002d002, | ||
| 836 | 0x17f004bd, | ||
| 837 | 0x0027f110, | ||
| 838 | 0x0223f002, | ||
| 839 | 0xf80512fa, | ||
| 840 | 0xf094bd03, | ||
| 716 | 0x07f10899, | 841 | 0x07f10899, |
| 717 | 0x03f00f00, | 842 | 0x03f01700, |
| 718 | 0x0009d002, | 843 | 0x0009d002, |
| 719 | 0x17f104bd, | 844 | 0x019804bd, |
| 720 | 0x14b60a04, | 845 | 0x1814b681, |
| 721 | 0x0012d006, | 846 | 0xb6800298, |
| 722 | 0x0a2017f1, | 847 | 0x12fd0825, |
| 723 | 0xf00614b6, | 848 | 0x16018005, |
| 724 | 0x23f10227, | ||
| 725 | 0x12d08000, | ||
| 726 | 0x1017f000, | ||
| 727 | 0x020027f1, | ||
| 728 | 0xfa0223f0, | ||
| 729 | 0x03f80512, | ||
| 730 | 0x99f094bd, | 849 | 0x99f094bd, |
| 731 | 0x0007f108, | 850 | 0x0007f109, |
| 732 | 0x0203f017, | 851 | 0x0203f00f, |
| 733 | 0xbd0009d0, | 852 | 0xbd0009d0, |
| 734 | 0x81019804, | 853 | 0x0007f104, |
| 735 | 0x981814b6, | 854 | 0x0203f081, |
| 736 | 0x25b68002, | 855 | 0xbd0001d0, |
| 737 | 0x0512fd08, | 856 | 0x0127f004, |
| 738 | 0xbd160180, | 857 | 0x880007f1, |
| 739 | 0x0999f094, | ||
| 740 | 0x0f0007f1, | ||
| 741 | 0xd00203f0, | ||
| 742 | 0x04bd0009, | ||
| 743 | 0x0a0427f1, | ||
| 744 | 0xd00624b6, | ||
| 745 | 0x27f00021, | ||
| 746 | 0x2017f101, | ||
| 747 | 0x0614b60a, | ||
| 748 | 0xf10012d0, | ||
| 749 | 0xf0010017, | ||
| 750 | 0x01fa0613, | ||
| 751 | 0xbd03f805, | ||
| 752 | 0x0999f094, | ||
| 753 | 0x170007f1, | ||
| 754 | 0xd00203f0, | 858 | 0xd00203f0, |
| 755 | 0x04bd0009, | 859 | 0x04bd0002, |
| 860 | 0x010017f1, | ||
| 861 | 0xfa0613f0, | ||
| 862 | 0x03f80501, | ||
| 756 | 0x99f094bd, | 863 | 0x99f094bd, |
| 757 | 0x0007f105, | 864 | 0x0007f109, |
| 758 | 0x0203f017, | 865 | 0x0203f017, |
| 759 | 0xbd0009d0, | 866 | 0xbd0009d0, |
| 760 | /* 0x07bb: ctx_chan */ | 867 | 0xf094bd04, |
| 761 | 0xf500f804, | 868 | 0x07f10599, |
| 762 | 0xf5062b21, | 869 | 0x03f01700, |
| 763 | 0xf006b221, | 870 | 0x0009d002, |
| 764 | 0x21f40ca7, | 871 | 0x00f804bd, |
| 765 | 0x1017f1c9, | 872 | /* 0x0978: ctx_chan */ |
| 766 | 0x0614b60a, | 873 | 0x077f21f5, |
| 767 | 0xd00527f0, | 874 | 0x085a21f5, |
| 768 | /* 0x07d6: ctx_chan_wait */ | 875 | 0xf40ca7f0, |
| 769 | 0x12cf0012, | 876 | 0xf7f0d021, |
| 770 | 0x0522fd00, | 877 | 0x3c21f505, |
| 771 | 0xf5fa1bf4, | 878 | 0xa421f508, |
| 772 | 0xf8064321, | 879 | /* 0x0993: ctx_mmio_exec */ |
| 773 | /* 0x07e5: ctx_mmio_exec */ | 880 | 0x9800f807, |
| 774 | 0x41039800, | 881 | 0x07f14103, |
| 775 | 0x0a0427f1, | 882 | 0x03f08100, |
| 776 | 0xd00624b6, | 883 | 0x0003d002, |
| 777 | 0x34bd0023, | 884 | 0x34bd04bd, |
| 778 | /* 0x07f4: ctx_mmio_loop */ | 885 | /* 0x09a4: ctx_mmio_loop */ |
| 779 | 0xf4ff34c4, | 886 | 0xf4ff34c4, |
| 780 | 0x57f10f1b, | 887 | 0x57f10f1b, |
| 781 | 0x53f00200, | 888 | 0x53f00200, |
| 782 | 0x0535fa06, | 889 | 0x0535fa06, |
| 783 | /* 0x0806: ctx_mmio_pull */ | 890 | /* 0x09b6: ctx_mmio_pull */ |
| 784 | 0x4e9803f8, | 891 | 0x4e9803f8, |
| 785 | 0x814f9880, | 892 | 0x814f9880, |
| 786 | 0xb68d21f4, | 893 | 0xb69d21f4, |
| 787 | 0x12b60830, | 894 | 0x12b60830, |
| 788 | 0xdf1bf401, | 895 | 0xdf1bf401, |
| 789 | /* 0x0818: ctx_mmio_done */ | 896 | /* 0x09c8: ctx_mmio_done */ |
| 790 | 0xd0160398, | 897 | 0xf1160398, |
| 791 | 0x00800023, | 898 | 0xf0810007, |
| 792 | 0x0017f140, | 899 | 0x03d00203, |
| 793 | 0x0613f001, | 900 | 0x8004bd00, |
| 794 | 0xf80601fa, | 901 | 0x17f14000, |
| 795 | /* 0x082f: ctx_xfer */ | 902 | 0x13f00100, |
| 796 | 0xf100f803, | 903 | 0x0601fa06, |
| 797 | 0xb60c00f7, | 904 | 0x00f803f8, |
| 798 | 0xe7f006f4, | 905 | /* 0x09e8: ctx_xfer */ |
| 799 | 0x80fed004, | 906 | 0xf104e7f0, |
| 800 | /* 0x083c: ctx_xfer_idle */ | 907 | 0xf0020007, |
| 801 | 0xf100fecf, | 908 | 0x0ed00303, |
| 802 | 0xf42000e4, | 909 | /* 0x09f7: ctx_xfer_idle */ |
| 803 | 0x11f4f91b, | 910 | 0xf104bd00, |
| 804 | 0x1102f406, | 911 | 0xf00000e7, |
| 805 | /* 0x084c: ctx_xfer_pre */ | 912 | 0xeecf03e3, |
| 806 | 0xf510f7f0, | 913 | 0x00e4f100, |
| 807 | 0xf5069221, | 914 | 0xf21bf420, |
| 808 | 0xf4062b21, | 915 | 0xf40611f4, |
| 809 | /* 0x085a: ctx_xfer_pre_load */ | 916 | /* 0x0a0e: ctx_xfer_pre */ |
| 810 | 0xf7f01c11, | 917 | 0xf7f01102, |
| 811 | 0x5121f502, | 918 | 0x1421f510, |
| 812 | 0x6021f506, | 919 | 0x7f21f508, |
| 813 | 0x7221f506, | 920 | 0x1c11f407, |
| 814 | 0xf5f4bd06, | 921 | /* 0x0a1c: ctx_xfer_pre_load */ |
| 815 | 0xf5065121, | 922 | 0xf502f7f0, |
| 816 | /* 0x0873: ctx_xfer_exec */ | 923 | 0xf507b521, |
| 817 | 0x9806b221, | 924 | 0xf507c721, |
| 818 | 0x27f11601, | 925 | 0xbd07dc21, |
| 819 | 0x24b60414, | 926 | 0xb521f5f4, |
| 820 | 0x0020d006, | 927 | 0x5a21f507, |
| 821 | 0xa500e7f1, | 928 | /* 0x0a35: ctx_xfer_exec */ |
| 822 | 0xb941e3f0, | 929 | 0x16019808, |
| 823 | 0x21f4021f, | 930 | 0x07f124bd, |
| 824 | 0x04e0b68d, | 931 | 0x03f00500, |
| 825 | 0xf001fcf0, | 932 | 0x0002d001, |
| 826 | 0x24b6022c, | 933 | 0x1fb904bd, |
| 827 | 0x05f2fd01, | 934 | 0x00e7f102, |
| 828 | 0xf18d21f4, | 935 | 0x41e3f0a5, |
| 829 | 0xf04afc17, | 936 | 0xf09d21f4, |
| 830 | 0x27f00213, | 937 | 0x2cf001fc, |
| 831 | 0x0012d00c, | 938 | 0x0124b602, |
| 832 | 0x021521f5, | 939 | 0xb905f2fd, |
| 833 | 0x47fc27f1, | 940 | 0xe7f102ff, |
| 834 | 0xd00223f0, | 941 | 0xe3f0a504, |
| 835 | 0x2cf00020, | 942 | 0x9d21f441, |
| 943 | 0x026a21f5, | ||
| 944 | 0x07f124bd, | ||
| 945 | 0x03f047fc, | ||
| 946 | 0x0002d002, | ||
| 947 | 0x2cf004bd, | ||
| 836 | 0x0320b601, | 948 | 0x0320b601, |
| 837 | 0xf00012d0, | 949 | 0x4afc07f1, |
| 838 | 0xa5f001ac, | 950 | 0xd00203f0, |
| 839 | 0x00b7f006, | 951 | 0x04bd0002, |
| 840 | 0x98000c98, | 952 | 0xf001acf0, |
| 841 | 0xe7f0010d, | 953 | 0xb7f006a5, |
| 842 | 0x6621f500, | 954 | 0x000c9800, |
| 843 | 0x08a7f001, | 955 | 0xf0010d98, |
| 844 | 0x010921f5, | 956 | 0x21f500e7, |
| 845 | 0x021521f5, | 957 | 0xa7f0016f, |
| 846 | 0xf02201f4, | 958 | 0x1021f508, |
| 847 | 0x21f40ca7, | 959 | 0x5e21f501, |
| 848 | 0x1017f1c9, | 960 | 0x1301f402, |
| 849 | 0x0614b60a, | 961 | 0xf40ca7f0, |
| 850 | 0xd00527f0, | 962 | 0xf7f0d021, |
| 851 | /* 0x08fa: ctx_xfer_post_save_wait */ | 963 | 0x3c21f505, |
| 852 | 0x12cf0012, | 964 | 0x3202f408, |
| 853 | 0x0522fd00, | 965 | /* 0x0ac4: ctx_xfer_post */ |
| 854 | 0xf4fa1bf4, | 966 | 0xf502f7f0, |
| 855 | /* 0x0906: ctx_xfer_post */ | 967 | 0xbd07b521, |
| 856 | 0xf7f03202, | 968 | 0x1421f5f4, |
| 857 | 0x5121f502, | 969 | 0x7f21f508, |
| 858 | 0xf5f4bd06, | 970 | 0xc721f502, |
| 859 | 0xf5069221, | 971 | 0xf5f4bd07, |
| 860 | 0xf5023421, | 972 | 0xf407b521, |
| 861 | 0xbd066021, | 973 | 0x01981011, |
| 862 | 0x5121f5f4, | 974 | 0x0511fd40, |
| 863 | 0x1011f406, | 975 | 0xf5070bf4, |
| 864 | 0xfd400198, | 976 | /* 0x0aef: ctx_xfer_no_post_mmio */ |
| 865 | 0x0bf40511, | 977 | 0xf5099321, |
| 866 | 0xe521f507, | 978 | /* 0x0af3: ctx_xfer_done */ |
| 867 | /* 0x0931: ctx_xfer_no_post_mmio */ | 979 | 0xf807a421, |
| 868 | 0x4321f507, | ||
| 869 | /* 0x0935: ctx_xfer_done */ | ||
| 870 | 0x0000f806, | ||
| 871 | 0x00000000, | ||
| 872 | 0x00000000, | ||
| 873 | 0x00000000, | ||
| 874 | 0x00000000, | ||
| 875 | 0x00000000, | ||
| 876 | 0x00000000, | ||
| 877 | 0x00000000, | ||
| 878 | 0x00000000, | ||
| 879 | 0x00000000, | ||
| 880 | 0x00000000, | ||
| 881 | 0x00000000, | ||
| 882 | 0x00000000, | ||
| 883 | 0x00000000, | ||
| 884 | 0x00000000, | ||
| 885 | 0x00000000, | ||
| 886 | 0x00000000, | ||
| 887 | 0x00000000, | ||
| 888 | 0x00000000, | ||
| 889 | 0x00000000, | ||
| 890 | 0x00000000, | ||
| 891 | 0x00000000, | ||
| 892 | 0x00000000, | ||
| 893 | 0x00000000, | ||
| 894 | 0x00000000, | ||
| 895 | 0x00000000, | ||
| 896 | 0x00000000, | ||
| 897 | 0x00000000, | ||
| 898 | 0x00000000, | ||
| 899 | 0x00000000, | ||
| 900 | 0x00000000, | ||
| 901 | 0x00000000, | ||
| 902 | 0x00000000, | ||
| 903 | 0x00000000, | ||
| 904 | 0x00000000, | ||
| 905 | 0x00000000, | ||
| 906 | 0x00000000, | ||
| 907 | 0x00000000, | ||
| 908 | 0x00000000, | ||
| 909 | 0x00000000, | ||
| 910 | 0x00000000, | ||
| 911 | 0x00000000, | ||
| 912 | 0x00000000, | ||
| 913 | 0x00000000, | ||
| 914 | 0x00000000, | ||
| 915 | 0x00000000, | ||
| 916 | 0x00000000, | ||
| 917 | 0x00000000, | ||
| 918 | 0x00000000, | 980 | 0x00000000, |
| 919 | 0x00000000, | 981 | 0x00000000, |
| 920 | 0x00000000, | 982 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h index eb7bc0e9576e..1c179bdd48cc 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h | |||
| @@ -206,14 +206,14 @@ uint32_t nve0_grhub_data[] = { | |||
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | uint32_t nve0_grhub_code[] = { | 208 | uint32_t nve0_grhub_code[] = { |
| 209 | 0x031b0ef5, | 209 | 0x039b0ef5, |
| 210 | /* 0x0004: queue_put */ | 210 | /* 0x0004: queue_put */ |
| 211 | 0x9800d898, | 211 | 0x9800d898, |
| 212 | 0x86f001d9, | 212 | 0x86f001d9, |
| 213 | 0x0489b808, | 213 | 0x0489b808, |
| 214 | 0xf00c1bf4, | 214 | 0xf00c1bf4, |
| 215 | 0x21f502f7, | 215 | 0x21f502f7, |
| 216 | 0x00f802fe, | 216 | 0x00f8037e, |
| 217 | /* 0x001c: queue_put_next */ | 217 | /* 0x001c: queue_put_next */ |
| 218 | 0xb60798c4, | 218 | 0xb60798c4, |
| 219 | 0x8dbb0384, | 219 | 0x8dbb0384, |
| @@ -237,184 +237,214 @@ uint32_t nve0_grhub_code[] = { | |||
| 237 | /* 0x0066: queue_get_done */ | 237 | /* 0x0066: queue_get_done */ |
| 238 | 0x00f80132, | 238 | 0x00f80132, |
| 239 | /* 0x0068: nv_rd32 */ | 239 | /* 0x0068: nv_rd32 */ |
| 240 | 0x0728b7f1, | 240 | 0xf002ecb9, |
| 241 | 0xb906b4b6, | 241 | 0x07f11fc9, |
| 242 | 0xc9f002ec, | 242 | 0x03f0ca00, |
| 243 | 0x00bcd01f, | 243 | 0x000cd001, |
| 244 | /* 0x0078: nv_rd32_wait */ | 244 | /* 0x007a: nv_rd32_wait */ |
| 245 | 0xc800bccf, | 245 | 0xc7f104bd, |
| 246 | 0x1bf41fcc, | 246 | 0xc3f0ca00, |
| 247 | 0x06a7f0fa, | 247 | 0x00cccf01, |
| 248 | 0x010921f5, | 248 | 0xf41fccc8, |
| 249 | 0xf840bfcf, | 249 | 0xa7f0f31b, |
| 250 | /* 0x008d: nv_wr32 */ | 250 | 0x1021f506, |
| 251 | 0x28b7f100, | 251 | 0x00f7f101, |
| 252 | 0x06b4b607, | 252 | 0x01f3f0cb, |
| 253 | 0xb980bfd0, | 253 | 0xf800ffcf, |
| 254 | 0xc9f002ec, | 254 | /* 0x009d: nv_wr32 */ |
| 255 | 0x1ec9f01f, | 255 | 0x0007f100, |
| 256 | /* 0x00a3: nv_wr32_wait */ | 256 | 0x0103f0cc, |
| 257 | 0xcf00bcd0, | 257 | 0xbd000fd0, |
| 258 | 0xccc800bc, | 258 | 0x02ecb904, |
| 259 | 0xfa1bf41f, | 259 | 0xf01fc9f0, |
| 260 | /* 0x00ae: watchdog_reset */ | 260 | 0x07f11ec9, |
| 261 | 0x87f100f8, | 261 | 0x03f0ca00, |
| 262 | 0x84b60430, | 262 | 0x000cd001, |
| 263 | 0x1ff9f006, | 263 | /* 0x00be: nv_wr32_wait */ |
| 264 | 0xf8008fd0, | 264 | 0xc7f104bd, |
| 265 | /* 0x00bd: watchdog_clear */ | 265 | 0xc3f0ca00, |
| 266 | 0x3087f100, | 266 | 0x00cccf01, |
| 267 | 0x0684b604, | 267 | 0xf41fccc8, |
| 268 | 0xf80080d0, | 268 | 0x00f8f31b, |
| 269 | /* 0x00c9: wait_donez */ | 269 | /* 0x00d0: wait_donez */ |
| 270 | 0xf094bd00, | 270 | 0x99f094bd, |
| 271 | 0x07f10099, | 271 | 0x0007f100, |
| 272 | 0x03f00f00, | 272 | 0x0203f00f, |
| 273 | 0x0009d002, | 273 | 0xbd0009d0, |
| 274 | 0x07f104bd, | 274 | 0x0007f104, |
| 275 | 0x03f00600, | 275 | 0x0203f006, |
| 276 | 0x000ad002, | 276 | 0xbd000ad0, |
| 277 | /* 0x00e6: wait_donez_ne */ | 277 | /* 0x00ed: wait_donez_ne */ |
| 278 | 0x87f104bd, | 278 | 0x0087f104, |
| 279 | 0x83f00000, | 279 | 0x0183f000, |
| 280 | 0x0088cf01, | 280 | 0xff0088cf, |
| 281 | 0xf4888aff, | 281 | 0x1bf4888a, |
| 282 | 0x94bdf31b, | 282 | 0xf094bdf3, |
| 283 | 0xf10099f0, | ||
| 284 | 0xf0170007, | ||
| 285 | 0x09d00203, | ||
| 286 | 0xf804bd00, | ||
| 287 | /* 0x0109: wait_doneo */ | ||
| 288 | 0xf094bd00, | ||
| 289 | 0x07f10099, | 283 | 0x07f10099, |
| 290 | 0x03f00f00, | 284 | 0x03f01700, |
| 291 | 0x0009d002, | 285 | 0x0009d002, |
| 292 | 0x87f104bd, | 286 | 0x00f804bd, |
| 293 | 0x84b60818, | 287 | /* 0x0110: wait_doneo */ |
| 294 | 0x008ad006, | ||
| 295 | /* 0x0124: wait_doneo_e */ | ||
| 296 | 0x040087f1, | ||
| 297 | 0xcf0684b6, | ||
| 298 | 0x8aff0088, | ||
| 299 | 0xf30bf488, | ||
| 300 | 0x99f094bd, | 288 | 0x99f094bd, |
| 301 | 0x0007f100, | 289 | 0x0007f100, |
| 302 | 0x0203f017, | 290 | 0x0203f00f, |
| 303 | 0xbd0009d0, | 291 | 0xbd0009d0, |
| 304 | /* 0x0147: mmctx_size */ | 292 | 0x0007f104, |
| 305 | 0xbd00f804, | 293 | 0x0203f006, |
| 306 | /* 0x0149: nv_mmctx_size_loop */ | 294 | 0xbd000ad0, |
| 307 | 0x00e89894, | 295 | /* 0x012d: wait_doneo_e */ |
| 308 | 0xb61a85b6, | 296 | 0x0087f104, |
| 309 | 0x84b60180, | 297 | 0x0183f000, |
| 310 | 0x0098bb02, | 298 | 0xff0088cf, |
| 311 | 0xb804e0b6, | 299 | 0x0bf4888a, |
| 312 | 0x1bf404ef, | 300 | 0xf094bdf3, |
| 313 | 0x029fb9eb, | 301 | 0x07f10099, |
| 314 | /* 0x0166: mmctx_xfer */ | 302 | 0x03f01700, |
| 315 | 0x94bd00f8, | 303 | 0x0009d002, |
| 316 | 0xf10199f0, | 304 | 0x00f804bd, |
| 317 | 0xf00f0007, | 305 | /* 0x0150: mmctx_size */ |
| 318 | 0x09d00203, | 306 | /* 0x0152: nv_mmctx_size_loop */ |
| 319 | 0xf104bd00, | 307 | 0xe89894bd, |
| 320 | 0xb6071087, | 308 | 0x1a85b600, |
| 321 | 0x94bd0684, | 309 | 0xb60180b6, |
| 322 | 0xf405bbfd, | 310 | 0x98bb0284, |
| 323 | 0x8bd0090b, | 311 | 0x04e0b600, |
| 324 | 0x0099f000, | 312 | 0xf404efb8, |
| 325 | /* 0x018c: mmctx_base_disabled */ | 313 | 0x9fb9eb1b, |
| 326 | 0xf405eefd, | 314 | /* 0x016f: mmctx_xfer */ |
| 327 | 0x8ed00c0b, | 315 | 0xbd00f802, |
| 328 | 0xc08fd080, | 316 | 0x0199f094, |
| 329 | /* 0x019b: mmctx_multi_disabled */ | 317 | 0x0f0007f1, |
| 330 | 0xb70199f0, | 318 | 0xd00203f0, |
| 331 | 0xc8010080, | 319 | 0x04bd0009, |
| 320 | 0xbbfd94bd, | ||
| 321 | 0x120bf405, | ||
| 322 | 0xc40007f1, | ||
| 323 | 0xd00103f0, | ||
| 324 | 0x04bd000b, | ||
| 325 | /* 0x0197: mmctx_base_disabled */ | ||
| 326 | 0xfd0099f0, | ||
| 327 | 0x0bf405ee, | ||
| 328 | 0x0007f11e, | ||
| 329 | 0x0103f0c6, | ||
| 330 | 0xbd000ed0, | ||
| 331 | 0x0007f104, | ||
| 332 | 0x0103f0c7, | ||
| 333 | 0xbd000fd0, | ||
| 334 | 0x0199f004, | ||
| 335 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 336 | 0xb600abc8, | ||
| 337 | 0xb9f010b4, | ||
| 338 | 0x01aec80c, | ||
| 339 | 0xfd11e4b6, | ||
| 340 | 0x07f105be, | ||
| 341 | 0x03f0c500, | ||
| 342 | 0x000bd001, | ||
| 343 | /* 0x01d6: mmctx_exec_loop */ | ||
| 344 | /* 0x01d6: mmctx_wait_free */ | ||
| 345 | 0xe7f104bd, | ||
| 346 | 0xe3f0c500, | ||
| 347 | 0x00eecf01, | ||
| 348 | 0xf41fe4f0, | ||
| 349 | 0xce98f30b, | ||
| 350 | 0x05e9fd00, | ||
| 351 | 0xc80007f1, | ||
| 352 | 0xd00103f0, | ||
| 353 | 0x04bd000e, | ||
| 354 | 0xb804c0b6, | ||
| 355 | 0x1bf404cd, | ||
| 356 | 0x02abc8d8, | ||
| 357 | /* 0x0207: mmctx_fini_wait */ | ||
| 358 | 0xf11f1bf4, | ||
| 359 | 0xf0c500b7, | ||
| 360 | 0xbbcf01b3, | ||
| 361 | 0x1fb4f000, | ||
| 362 | 0xf410b4b0, | ||
| 363 | 0xa7f0f01b, | ||
| 364 | 0xd021f402, | ||
| 365 | /* 0x0223: mmctx_stop */ | ||
| 366 | 0xc82b0ef4, | ||
| 332 | 0xb4b600ab, | 367 | 0xb4b600ab, |
| 333 | 0x0cb9f010, | 368 | 0x0cb9f010, |
| 334 | 0xb601aec8, | 369 | 0xf112b9f0, |
| 335 | 0xbefd11e4, | 370 | 0xf0c50007, |
| 336 | 0x008bd005, | 371 | 0x0bd00103, |
| 337 | /* 0x01b4: mmctx_exec_loop */ | 372 | /* 0x023b: mmctx_stop_wait */ |
| 338 | /* 0x01b4: mmctx_wait_free */ | 373 | 0xf104bd00, |
| 339 | 0xf0008ecf, | 374 | 0xf0c500b7, |
| 340 | 0x0bf41fe4, | 375 | 0xbbcf01b3, |
| 341 | 0x00ce98fa, | 376 | 0x12bbc800, |
| 342 | 0xd005e9fd, | 377 | /* 0x024b: mmctx_done */ |
| 343 | 0xc0b6c08e, | 378 | 0xbdf31bf4, |
| 344 | 0x04cdb804, | 379 | 0x0199f094, |
| 345 | 0xc8e81bf4, | 380 | 0x170007f1, |
| 346 | 0x1bf402ab, | 381 | 0xd00203f0, |
| 347 | /* 0x01d5: mmctx_fini_wait */ | 382 | 0x04bd0009, |
| 348 | 0x008bcf18, | 383 | /* 0x025e: strand_wait */ |
| 349 | 0xb01fb4f0, | 384 | 0xa0f900f8, |
| 350 | 0x1bf410b4, | 385 | 0xf402a7f0, |
| 351 | 0x02a7f0f7, | 386 | 0xa0fcd021, |
| 352 | 0xf4c921f4, | 387 | /* 0x026a: strand_pre */ |
| 353 | /* 0x01ea: mmctx_stop */ | 388 | 0x97f000f8, |
| 354 | 0xabc81b0e, | 389 | 0xfc07f10c, |
| 355 | 0x10b4b600, | 390 | 0x0203f04a, |
| 356 | 0xf00cb9f0, | 391 | 0xbd0009d0, |
| 357 | 0x8bd012b9, | 392 | 0x5e21f504, |
| 358 | /* 0x01f9: mmctx_stop_wait */ | 393 | /* 0x027f: strand_post */ |
| 359 | 0x008bcf00, | 394 | 0xf000f802, |
| 360 | 0xf412bbc8, | 395 | 0x07f10d97, |
| 361 | /* 0x0202: mmctx_done */ | 396 | 0x03f04afc, |
| 362 | 0x94bdfa1b, | ||
| 363 | 0xf10199f0, | ||
| 364 | 0xf0170007, | ||
| 365 | 0x09d00203, | ||
| 366 | 0xf804bd00, | ||
| 367 | /* 0x0215: strand_wait */ | ||
| 368 | 0xf0a0f900, | ||
| 369 | 0x21f402a7, | ||
| 370 | 0xf8a0fcc9, | ||
| 371 | /* 0x0221: strand_pre */ | ||
| 372 | 0xfc87f100, | ||
| 373 | 0x0283f04a, | ||
| 374 | 0xd00c97f0, | ||
| 375 | 0x21f50089, | ||
| 376 | 0x00f80215, | ||
| 377 | /* 0x0234: strand_post */ | ||
| 378 | 0x4afc87f1, | ||
| 379 | 0xf00283f0, | ||
| 380 | 0x89d00d97, | ||
| 381 | 0x1521f500, | ||
| 382 | /* 0x0247: strand_set */ | ||
| 383 | 0xf100f802, | ||
| 384 | 0xf04ffca7, | ||
| 385 | 0xaba202a3, | ||
| 386 | 0xc7f00500, | ||
| 387 | 0x00acd00f, | ||
| 388 | 0xd00bc7f0, | ||
| 389 | 0x21f500bc, | ||
| 390 | 0xaed00215, | ||
| 391 | 0x0ac7f000, | ||
| 392 | 0xf500bcd0, | ||
| 393 | 0xf8021521, | ||
| 394 | /* 0x0271: strand_ctx_init */ | ||
| 395 | 0xf094bd00, | ||
| 396 | 0x07f10399, | ||
| 397 | 0x03f00f00, | ||
| 398 | 0x0009d002, | 397 | 0x0009d002, |
| 399 | 0x21f504bd, | 398 | 0x21f504bd, |
| 400 | 0xe7f00221, | 399 | 0x00f8025e, |
| 401 | 0x4721f503, | 400 | /* 0x0294: strand_set */ |
| 402 | 0xfca7f102, | 401 | 0xf10fc7f0, |
| 403 | 0x02a3f046, | 402 | 0xf04ffc07, |
| 404 | 0x0400aba0, | 403 | 0x0cd00203, |
| 405 | 0xf040a0d0, | 404 | 0xf004bd00, |
| 406 | 0xbcd001c7, | 405 | 0x07f10bc7, |
| 407 | 0x1521f500, | 406 | 0x03f04afc, |
| 408 | 0x010c9202, | 407 | 0x000cd002, |
| 409 | 0xf000acd0, | 408 | 0x07f104bd, |
| 410 | 0xbcd002c7, | 409 | 0x03f04ffc, |
| 411 | 0x1521f500, | 410 | 0x000ed002, |
| 412 | 0x3421f502, | 411 | 0xc7f004bd, |
| 413 | 0x8087f102, | 412 | 0xfc07f10a, |
| 414 | 0x0684b608, | 413 | 0x0203f04a, |
| 415 | 0xb70089cf, | 414 | 0xbd000cd0, |
| 416 | 0x95220080, | 415 | 0x5e21f504, |
| 417 | /* 0x02ca: ctx_init_strand_loop */ | 416 | /* 0x02d3: strand_ctx_init */ |
| 417 | 0xbd00f802, | ||
| 418 | 0x0399f094, | ||
| 419 | 0x0f0007f1, | ||
| 420 | 0xd00203f0, | ||
| 421 | 0x04bd0009, | ||
| 422 | 0x026a21f5, | ||
| 423 | 0xf503e7f0, | ||
| 424 | 0xbd029421, | ||
| 425 | 0xfc07f1c4, | ||
| 426 | 0x0203f047, | ||
| 427 | 0xbd000cd0, | ||
| 428 | 0x01c7f004, | ||
| 429 | 0x4afc07f1, | ||
| 430 | 0xd00203f0, | ||
| 431 | 0x04bd000c, | ||
| 432 | 0x025e21f5, | ||
| 433 | 0xf1010c92, | ||
| 434 | 0xf046fc07, | ||
| 435 | 0x0cd00203, | ||
| 436 | 0xf004bd00, | ||
| 437 | 0x07f102c7, | ||
| 438 | 0x03f04afc, | ||
| 439 | 0x000cd002, | ||
| 440 | 0x21f504bd, | ||
| 441 | 0x21f5025e, | ||
| 442 | 0x87f1027f, | ||
| 443 | 0x83f04200, | ||
| 444 | 0x0097f102, | ||
| 445 | 0x0293f020, | ||
| 446 | 0x950099cf, | ||
| 447 | /* 0x034a: ctx_init_strand_loop */ | ||
| 418 | 0x8ed008fe, | 448 | 0x8ed008fe, |
| 419 | 0x408ed000, | 449 | 0x408ed000, |
| 420 | 0xb6808acf, | 450 | 0xb6808acf, |
| @@ -428,7 +458,7 @@ uint32_t nve0_grhub_code[] = { | |||
| 428 | 0x170007f1, | 458 | 0x170007f1, |
| 429 | 0xd00203f0, | 459 | 0xd00203f0, |
| 430 | 0x04bd0009, | 460 | 0x04bd0009, |
| 431 | /* 0x02fe: error */ | 461 | /* 0x037e: error */ |
| 432 | 0x07f100f8, | 462 | 0x07f100f8, |
| 433 | 0x03f00500, | 463 | 0x03f00500, |
| 434 | 0x000fd002, | 464 | 0x000fd002, |
| @@ -436,82 +466,117 @@ uint32_t nve0_grhub_code[] = { | |||
| 436 | 0x0007f101, | 466 | 0x0007f101, |
| 437 | 0x0303f007, | 467 | 0x0303f007, |
| 438 | 0xbd000fd0, | 468 | 0xbd000fd0, |
| 439 | /* 0x031b: init */ | 469 | /* 0x039b: init */ |
| 440 | 0xbd00f804, | 470 | 0xbd00f804, |
| 441 | 0x0004fe04, | 471 | 0x0007fe04, |
| 442 | 0xf10007fe, | 472 | 0x420017f1, |
| 443 | 0xf0120017, | 473 | 0xcf0013f0, |
| 444 | 0x12d00227, | 474 | 0x11e70011, |
| 445 | 0xb117f100, | 475 | 0x14b60109, |
| 446 | 0x0010fe05, | 476 | 0x0014fe08, |
| 447 | 0x040017f1, | 477 | 0xf10227f0, |
| 448 | 0xf1c010d0, | 478 | 0xf0120007, |
| 449 | 0xb6040437, | 479 | 0x02d00003, |
| 450 | 0x27f10634, | 480 | 0xf104bd00, |
| 451 | 0x32d02003, | 481 | 0xfe06c817, |
| 452 | 0x0427f100, | 482 | 0x24bd0010, |
| 453 | 0x0132d020, | 483 | 0x070007f1, |
| 484 | 0xd00003f0, | ||
| 485 | 0x04bd0002, | ||
| 486 | 0x200327f1, | ||
| 487 | 0x010007f1, | ||
| 488 | 0xd00103f0, | ||
| 489 | 0x04bd0002, | ||
| 490 | 0x200427f1, | ||
| 491 | 0x010407f1, | ||
| 492 | 0xd00103f0, | ||
| 493 | 0x04bd0002, | ||
| 454 | 0x200b27f1, | 494 | 0x200b27f1, |
| 455 | 0xf10232d0, | 495 | 0x010807f1, |
| 456 | 0xd0200c27, | 496 | 0xd00103f0, |
| 457 | 0x27f10732, | 497 | 0x04bd0002, |
| 458 | 0x24b60c24, | 498 | 0x200c27f1, |
| 459 | 0x0003b906, | 499 | 0x011c07f1, |
| 460 | 0xf10023d0, | 500 | 0xd00103f0, |
| 501 | 0x04bd0002, | ||
| 502 | 0xf1010392, | ||
| 503 | 0xf0090007, | ||
| 504 | 0x03d00303, | ||
| 505 | 0xf104bd00, | ||
| 461 | 0xf0870427, | 506 | 0xf0870427, |
| 462 | 0x12d00023, | 507 | 0x07f10023, |
| 463 | 0x0012b700, | 508 | 0x03f00400, |
| 464 | 0x0427f001, | 509 | 0x0002d000, |
| 465 | 0xf40012d0, | 510 | 0x27f004bd, |
| 466 | 0xe7f11031, | 511 | 0x0007f104, |
| 467 | 0xe3f09604, | 512 | 0x0003f003, |
| 468 | 0x6821f440, | 513 | 0xbd0002d0, |
| 469 | 0x8090f1c7, | 514 | 0x1031f404, |
| 470 | 0xf4f00301, | 515 | 0x9604e7f1, |
| 471 | 0x020f801f, | 516 | 0xf440e3f0, |
| 472 | 0xbb0117f0, | 517 | 0xfeb96821, |
| 473 | 0x12b6041f, | 518 | 0x90f1c702, |
| 474 | 0x0c27f101, | 519 | 0xf0030180, |
| 475 | 0x0624b604, | 520 | 0x0f801ff4, |
| 476 | 0xd00021d0, | 521 | 0x0117f002, |
| 477 | 0x17f14021, | 522 | 0xb6041fbb, |
| 478 | 0x0e980100, | 523 | 0x07f10112, |
| 479 | 0x010f9800, | 524 | 0x03f00300, |
| 480 | 0x014721f5, | 525 | 0x0001d001, |
| 481 | 0x070037f1, | 526 | 0x07f104bd, |
| 482 | 0x950634b6, | 527 | 0x03f00400, |
| 483 | 0x34d00814, | 528 | 0x0001d001, |
| 484 | 0x4034d000, | 529 | 0x17f104bd, |
| 485 | 0x130030b7, | 530 | 0xf7f00100, |
| 486 | 0xb6001fbb, | 531 | 0x7f21f502, |
| 487 | 0x3fd002f5, | 532 | 0x9121f507, |
| 488 | 0x0815b600, | 533 | 0x10f7f007, |
| 489 | 0xb60110b6, | 534 | 0x07de21f5, |
| 490 | 0x1fb90814, | 535 | 0x98000e98, |
| 491 | 0x7121f502, | 536 | 0x21f5010f, |
| 492 | 0x001fbb02, | 537 | 0x14950150, |
| 493 | 0xf1020398, | 538 | 0x0007f108, |
| 494 | 0xf0200047, | 539 | 0x0103f0c0, |
| 495 | /* 0x03f6: init_gpc */ | 540 | 0xbd0004d0, |
| 496 | 0x4ea05043, | 541 | 0x0007f104, |
| 497 | 0x1fb90804, | 542 | 0x0103f0c1, |
| 498 | 0x8d21f402, | 543 | 0xbd0004d0, |
| 499 | 0x010c4ea0, | 544 | 0x0030b704, |
| 500 | 0x21f4f4bd, | 545 | 0x001fbb13, |
| 501 | 0x044ea08d, | 546 | 0xf102f5b6, |
| 502 | 0x8d21f401, | 547 | 0xf0d30007, |
| 503 | 0x01004ea0, | 548 | 0x0fd00103, |
| 504 | 0xf402f7f0, | 549 | 0xb604bd00, |
| 505 | 0x4ea08d21, | 550 | 0x10b60815, |
| 506 | /* 0x041e: init_gpc_wait */ | 551 | 0x0814b601, |
| 507 | 0x21f40800, | 552 | 0xf5021fb9, |
| 508 | 0x1fffc868, | 553 | 0xbb02d321, |
| 509 | 0xa0fa0bf4, | 554 | 0x0398001f, |
| 510 | 0xf408044e, | 555 | 0x0047f102, |
| 511 | 0x1fbb6821, | 556 | 0x5043f020, |
| 512 | 0x0040b700, | 557 | /* 0x04f4: init_gpc */ |
| 513 | 0x0132b680, | 558 | 0x08044ea0, |
| 514 | 0xf1be1bf4, | 559 | 0xf4021fb9, |
| 560 | 0x4ea09d21, | ||
| 561 | 0xf4bd010c, | ||
| 562 | 0xa09d21f4, | ||
| 563 | 0xf401044e, | ||
| 564 | 0x4ea09d21, | ||
| 565 | 0xf7f00100, | ||
| 566 | 0x9d21f402, | ||
| 567 | 0x08004ea0, | ||
| 568 | /* 0x051c: init_gpc_wait */ | ||
| 569 | 0xc86821f4, | ||
| 570 | 0x0bf41fff, | ||
| 571 | 0x044ea0fa, | ||
| 572 | 0x6821f408, | ||
| 573 | 0xb7001fbb, | ||
| 574 | 0xb6800040, | ||
| 575 | 0x1bf40132, | ||
| 576 | 0x00f7f0be, | ||
| 577 | 0x07de21f5, | ||
| 578 | 0xf500f7f0, | ||
| 579 | 0xf1077f21, | ||
| 515 | 0xf0010007, | 580 | 0xf0010007, |
| 516 | 0x01d00203, | 581 | 0x01d00203, |
| 517 | 0xbd04bd00, | 582 | 0xbd04bd00, |
| @@ -519,382 +584,379 @@ uint32_t nve0_grhub_code[] = { | |||
| 519 | 0x080007f1, | 584 | 0x080007f1, |
| 520 | 0xd00203f0, | 585 | 0xd00203f0, |
| 521 | 0x04bd0001, | 586 | 0x04bd0001, |
| 522 | /* 0x0458: main */ | 587 | /* 0x0564: main */ |
| 523 | 0xf40031f4, | 588 | 0xf40031f4, |
| 524 | 0xd7f00028, | 589 | 0xd7f00028, |
| 525 | 0x3921f410, | 590 | 0x3921f410, |
| 526 | 0xb1f401f4, | 591 | 0xb1f401f4, |
| 527 | 0xf54001e4, | 592 | 0xf54001e4, |
| 528 | 0xbd00de1b, | 593 | 0xbd00e91b, |
| 529 | 0x0499f094, | 594 | 0x0499f094, |
| 530 | 0x0f0007f1, | 595 | 0x0f0007f1, |
| 531 | 0xd00203f0, | 596 | 0xd00203f0, |
| 532 | 0x04bd0009, | 597 | 0x04bd0009, |
| 533 | 0x0b0017f1, | 598 | 0xc00017f1, |
| 534 | 0xcf0614b6, | 599 | 0xcf0213f0, |
| 535 | 0x11cf4012, | 600 | 0x27f10011, |
| 536 | 0x1f13c800, | 601 | 0x23f0c100, |
| 537 | 0x00870bf5, | 602 | 0x0022cf02, |
| 538 | 0xf41f23c8, | 603 | 0xf51f13c8, |
| 539 | 0x20f9620b, | 604 | 0xc800890b, |
| 540 | 0xbd0212b9, | 605 | 0x0bf41f23, |
| 541 | 0x0799f094, | 606 | 0xb920f962, |
| 542 | 0x0f0007f1, | 607 | 0x94bd0212, |
| 543 | 0xd00203f0, | ||
| 544 | 0x04bd0009, | ||
| 545 | 0xf40132f4, | ||
| 546 | 0x21f50231, | ||
| 547 | 0x94bd0801, | ||
| 548 | 0xf10799f0, | 608 | 0xf10799f0, |
| 549 | 0xf0170007, | 609 | 0xf00f0007, |
| 550 | 0x09d00203, | 610 | 0x09d00203, |
| 551 | 0xfc04bd00, | 611 | 0xf404bd00, |
| 552 | 0xf094bd20, | 612 | 0x31f40132, |
| 553 | 0x07f10699, | 613 | 0xaa21f502, |
| 554 | 0x03f00f00, | 614 | 0xf094bd09, |
| 555 | 0x0009d002, | 615 | 0x07f10799, |
| 556 | 0x31f404bd, | ||
| 557 | 0x0121f501, | ||
| 558 | 0xf094bd08, | ||
| 559 | 0x07f10699, | ||
| 560 | 0x03f01700, | 616 | 0x03f01700, |
| 561 | 0x0009d002, | 617 | 0x0009d002, |
| 562 | 0x0ef404bd, | 618 | 0x20fc04bd, |
| 563 | /* 0x04f9: chsw_prev_no_next */ | ||
| 564 | 0xb920f931, | ||
| 565 | 0x32f40212, | ||
| 566 | 0x0232f401, | ||
| 567 | 0x080121f5, | ||
| 568 | 0x17f120fc, | ||
| 569 | 0x14b60b00, | ||
| 570 | 0x0012d006, | ||
| 571 | /* 0x0517: chsw_no_prev */ | ||
| 572 | 0xc8130ef4, | ||
| 573 | 0x0bf41f23, | ||
| 574 | 0x0131f40d, | ||
| 575 | 0xf50232f4, | ||
| 576 | /* 0x0527: chsw_done */ | ||
| 577 | 0xf1080121, | ||
| 578 | 0xb60b0c17, | ||
| 579 | 0x27f00614, | ||
| 580 | 0x0012d001, | ||
| 581 | 0x99f094bd, | 619 | 0x99f094bd, |
| 582 | 0x0007f104, | 620 | 0x0007f106, |
| 621 | 0x0203f00f, | ||
| 622 | 0xbd0009d0, | ||
| 623 | 0x0131f404, | ||
| 624 | 0x09aa21f5, | ||
| 625 | 0x99f094bd, | ||
| 626 | 0x0007f106, | ||
| 583 | 0x0203f017, | 627 | 0x0203f017, |
| 584 | 0xbd0009d0, | 628 | 0xbd0009d0, |
| 585 | 0x130ef504, | 629 | 0x330ef404, |
| 586 | /* 0x0549: main_not_ctx_switch */ | 630 | /* 0x060c: chsw_prev_no_next */ |
| 587 | 0x01e4b0ff, | 631 | 0x12b920f9, |
| 588 | 0xb90d1bf4, | 632 | 0x0132f402, |
| 589 | 0x21f502f2, | 633 | 0xf50232f4, |
| 590 | 0x0ef40795, | 634 | 0xfc09aa21, |
| 591 | /* 0x0559: main_not_ctx_chan */ | 635 | 0x0007f120, |
| 592 | 0x02e4b046, | 636 | 0x0203f0c0, |
| 593 | 0xbd321bf4, | 637 | 0xbd0002d0, |
| 594 | 0x0799f094, | 638 | 0x130ef404, |
| 595 | 0x0f0007f1, | 639 | /* 0x062c: chsw_no_prev */ |
| 640 | 0xf41f23c8, | ||
| 641 | 0x31f40d0b, | ||
| 642 | 0x0232f401, | ||
| 643 | 0x09aa21f5, | ||
| 644 | /* 0x063c: chsw_done */ | ||
| 645 | 0xf10127f0, | ||
| 646 | 0xf0c30007, | ||
| 647 | 0x02d00203, | ||
| 648 | 0xbd04bd00, | ||
| 649 | 0x0499f094, | ||
| 650 | 0x170007f1, | ||
| 596 | 0xd00203f0, | 651 | 0xd00203f0, |
| 597 | 0x04bd0009, | 652 | 0x04bd0009, |
| 598 | 0xf40132f4, | 653 | 0xff080ef5, |
| 599 | 0x21f50232, | 654 | /* 0x0660: main_not_ctx_switch */ |
| 600 | 0x94bd0801, | 655 | 0xf401e4b0, |
| 656 | 0xf2b90d1b, | ||
| 657 | 0x4221f502, | ||
| 658 | 0x460ef409, | ||
| 659 | /* 0x0670: main_not_ctx_chan */ | ||
| 660 | 0xf402e4b0, | ||
| 661 | 0x94bd321b, | ||
| 601 | 0xf10799f0, | 662 | 0xf10799f0, |
| 602 | 0xf0170007, | 663 | 0xf00f0007, |
| 603 | 0x09d00203, | 664 | 0x09d00203, |
| 604 | 0xf404bd00, | 665 | 0xf404bd00, |
| 605 | /* 0x058e: main_not_ctx_save */ | 666 | 0x32f40132, |
| 606 | 0xef94110e, | 667 | 0xaa21f502, |
| 607 | 0x01f5f010, | 668 | 0xf094bd09, |
| 608 | 0x02fe21f5, | 669 | 0x07f10799, |
| 609 | 0xfec00ef5, | 670 | 0x03f01700, |
| 610 | /* 0x059c: main_done */ | 671 | 0x0009d002, |
| 611 | 0x29f024bd, | 672 | 0x0ef404bd, |
| 612 | 0x0007f11f, | 673 | /* 0x06a5: main_not_ctx_save */ |
| 613 | 0x0203f008, | 674 | 0x10ef9411, |
| 614 | 0xbd0002d0, | 675 | 0xf501f5f0, |
| 615 | 0xab0ef504, | 676 | 0xf5037e21, |
| 616 | /* 0x05b1: ih */ | 677 | /* 0x06b3: main_done */ |
| 617 | 0xfe80f9fe, | 678 | 0xbdfeb50e, |
| 618 | 0x80f90188, | 679 | 0x1f29f024, |
| 619 | 0xa0f990f9, | 680 | 0x080007f1, |
| 620 | 0xd0f9b0f9, | 681 | 0xd00203f0, |
| 621 | 0xf0f9e0f9, | 682 | 0x04bd0002, |
| 622 | 0x0acf04bd, | 683 | 0xfea00ef5, |
| 623 | 0x04abc480, | 684 | /* 0x06c8: ih */ |
| 624 | 0xf11d0bf4, | 685 | 0x88fe80f9, |
| 625 | 0xf01900b7, | 686 | 0xf980f901, |
| 626 | 0xbecf10d7, | 687 | 0xf9a0f990, |
| 627 | 0x00bfcf40, | 688 | 0xf9d0f9b0, |
| 689 | 0xbdf0f9e0, | ||
| 690 | 0x00a7f104, | ||
| 691 | 0x00a3f002, | ||
| 692 | 0xc400aacf, | ||
| 693 | 0x0bf404ab, | ||
| 694 | 0x10d7f030, | ||
| 695 | 0x1a00e7f1, | ||
| 696 | 0xcf00e3f0, | ||
| 697 | 0xf7f100ee, | ||
| 698 | 0xf3f01900, | ||
| 699 | 0x00ffcf00, | ||
| 628 | 0xb70421f4, | 700 | 0xb70421f4, |
| 629 | 0xf00400b0, | 701 | 0xf00400b0, |
| 630 | 0xbed001e7, | 702 | 0x07f101e7, |
| 631 | /* 0x05e9: ih_no_fifo */ | 703 | 0x03f01d00, |
| 632 | 0x00abe400, | 704 | 0x000ed000, |
| 633 | 0x0d0bf401, | 705 | /* 0x071a: ih_no_fifo */ |
| 634 | 0xf110d7f0, | 706 | 0xabe404bd, |
| 635 | 0xf44001e7, | 707 | 0x0bf40100, |
| 636 | /* 0x05fa: ih_no_ctxsw */ | 708 | 0x10d7f00d, |
| 637 | 0xb7f10421, | 709 | 0x4001e7f1, |
| 638 | 0xb0bd0104, | 710 | /* 0x072b: ih_no_ctxsw */ |
| 639 | 0xf4b4abff, | 711 | 0xe40421f4, |
| 640 | 0xa7f10d0b, | 712 | 0xf40400ab, |
| 641 | 0xa4b60c1c, | 713 | 0xb7f1140b, |
| 642 | 0x00abd006, | 714 | 0xbfb90100, |
| 643 | /* 0x0610: ih_no_other */ | 715 | 0x44e7f102, |
| 644 | 0xfc400ad0, | 716 | 0x40e3f001, |
| 717 | /* 0x0743: ih_no_fwmthd */ | ||
| 718 | 0xf19d21f4, | ||
| 719 | 0xbd0104b7, | ||
| 720 | 0xb4abffb0, | ||
| 721 | 0xf10f0bf4, | ||
| 722 | 0xf0070007, | ||
| 723 | 0x0bd00303, | ||
| 724 | /* 0x075b: ih_no_other */ | ||
| 725 | 0xf104bd00, | ||
| 726 | 0xf0010007, | ||
| 727 | 0x0ad00003, | ||
| 728 | 0xfc04bd00, | ||
| 645 | 0xfce0fcf0, | 729 | 0xfce0fcf0, |
| 646 | 0xfcb0fcd0, | 730 | 0xfcb0fcd0, |
| 647 | 0xfc90fca0, | 731 | 0xfc90fca0, |
| 648 | 0x0088fe80, | 732 | 0x0088fe80, |
| 649 | 0x32f480fc, | 733 | 0x32f480fc, |
| 650 | /* 0x062b: ctx_4170s */ | 734 | /* 0x077f: ctx_4170s */ |
| 651 | 0xf101f800, | 735 | 0xf001f800, |
| 652 | 0xf04170e7, | 736 | 0xffb910f5, |
| 653 | 0xf5f040e3, | 737 | 0x70e7f102, |
| 654 | 0x8d21f410, | 738 | 0x40e3f041, |
| 655 | /* 0x063a: ctx_4170w */ | 739 | 0xf89d21f4, |
| 740 | /* 0x0791: ctx_4170w */ | ||
| 741 | 0x70e7f100, | ||
| 742 | 0x40e3f041, | ||
| 743 | 0xb96821f4, | ||
| 744 | 0xf4f002ff, | ||
| 745 | 0xf01bf410, | ||
| 746 | /* 0x07a6: ctx_redswitch */ | ||
| 656 | 0xe7f100f8, | 747 | 0xe7f100f8, |
| 657 | 0xe3f04170, | 748 | 0xe5f00200, |
| 658 | 0x6821f440, | 749 | 0x20e5f040, |
| 659 | 0xf410f4f0, | 750 | 0xf110e5f0, |
| 751 | 0xf0850007, | ||
| 752 | 0x0ed00103, | ||
| 753 | 0xf004bd00, | ||
| 754 | /* 0x07c2: ctx_redswitch_delay */ | ||
| 755 | 0xf2b608f7, | ||
| 756 | 0xfd1bf401, | ||
| 757 | 0x0400e5f1, | ||
| 758 | 0x0100e5f1, | ||
| 759 | 0x850007f1, | ||
| 760 | 0xd00103f0, | ||
| 761 | 0x04bd000e, | ||
| 762 | /* 0x07de: ctx_86c */ | ||
| 763 | 0x07f100f8, | ||
| 764 | 0x03f01b00, | ||
| 765 | 0x000fd002, | ||
| 766 | 0xffb904bd, | ||
| 767 | 0x14e7f102, | ||
| 768 | 0x40e3f08a, | ||
| 769 | 0xb99d21f4, | ||
| 770 | 0xe7f102ff, | ||
| 771 | 0xe3f0a86c, | ||
| 772 | 0x9d21f441, | ||
| 773 | /* 0x0806: ctx_mem */ | ||
| 774 | 0x07f100f8, | ||
| 775 | 0x03f08400, | ||
| 776 | 0x000fd002, | ||
| 777 | /* 0x0812: ctx_mem_wait */ | ||
| 778 | 0xf7f104bd, | ||
| 779 | 0xf3f08400, | ||
| 780 | 0x00ffcf02, | ||
| 781 | 0xf405fffd, | ||
| 660 | 0x00f8f31b, | 782 | 0x00f8f31b, |
| 661 | /* 0x064c: ctx_redswitch */ | 783 | /* 0x0824: ctx_load */ |
| 662 | 0x0614e7f1, | ||
| 663 | 0xf106e4b6, | ||
| 664 | 0xd00270f7, | ||
| 665 | 0xf7f000ef, | ||
| 666 | /* 0x065d: ctx_redswitch_delay */ | ||
| 667 | 0x01f2b608, | ||
| 668 | 0xf1fd1bf4, | ||
| 669 | 0xd00770f7, | ||
| 670 | 0x00f800ef, | ||
| 671 | /* 0x066c: ctx_86c */ | ||
| 672 | 0x086ce7f1, | ||
| 673 | 0xd006e4b6, | ||
| 674 | 0xe7f100ef, | ||
| 675 | 0xe3f08a14, | ||
| 676 | 0x8d21f440, | ||
| 677 | 0xa86ce7f1, | ||
| 678 | 0xf441e3f0, | ||
| 679 | 0x00f88d21, | ||
| 680 | /* 0x068c: ctx_load */ | ||
| 681 | 0x99f094bd, | 784 | 0x99f094bd, |
| 682 | 0x0007f105, | 785 | 0x0007f105, |
| 683 | 0x0203f00f, | 786 | 0x0203f00f, |
| 684 | 0xbd0009d0, | 787 | 0xbd0009d0, |
| 685 | 0x0ca7f004, | 788 | 0x0ca7f004, |
| 686 | 0xf1c921f4, | 789 | 0xbdd021f4, |
| 687 | 0xb60a2417, | 790 | 0x0007f1f4, |
| 688 | 0x10d00614, | 791 | 0x0203f089, |
| 689 | 0x0037f100, | 792 | 0xbd000fd0, |
| 690 | 0x0634b60b, | 793 | 0x0007f104, |
| 691 | 0xf14032d0, | 794 | 0x0203f0c1, |
| 692 | 0xb60a0c17, | 795 | 0xbd0002d0, |
| 693 | 0x47f00614, | 796 | 0x0007f104, |
| 694 | 0x0012d007, | 797 | 0x0203f083, |
| 695 | /* 0x06c7: ctx_chan_wait_0 */ | 798 | 0xbd0002d0, |
| 696 | 0xcf4014d0, | 799 | 0x07f7f004, |
| 697 | 0x44f04014, | 800 | 0x080621f5, |
| 698 | 0xfa1bf41f, | 801 | 0xc00007f1, |
| 699 | 0xfe0032d0, | 802 | 0xd00203f0, |
| 700 | 0x2af0000b, | 803 | 0x04bd0002, |
| 701 | 0x0424b61f, | 804 | 0xf0000bfe, |
| 702 | 0xbd0220b6, | 805 | 0x24b61f2a, |
| 806 | 0x0220b604, | ||
| 807 | 0x99f094bd, | ||
| 808 | 0x0007f108, | ||
| 809 | 0x0203f00f, | ||
| 810 | 0xbd0009d0, | ||
| 811 | 0x0007f104, | ||
| 812 | 0x0203f081, | ||
| 813 | 0xbd0002d0, | ||
| 814 | 0x0027f104, | ||
| 815 | 0x0023f100, | ||
| 816 | 0x0225f080, | ||
| 817 | 0x880007f1, | ||
| 818 | 0xd00203f0, | ||
| 819 | 0x04bd0002, | ||
| 820 | 0xf11017f0, | ||
| 821 | 0xf0020027, | ||
| 822 | 0x12fa0223, | ||
| 823 | 0xbd03f805, | ||
| 703 | 0x0899f094, | 824 | 0x0899f094, |
| 704 | 0x0f0007f1, | 825 | 0x170007f1, |
| 705 | 0xd00203f0, | 826 | 0xd00203f0, |
| 706 | 0x04bd0009, | 827 | 0x04bd0009, |
| 707 | 0x0a0417f1, | 828 | 0xb6810198, |
| 708 | 0xd00614b6, | 829 | 0x02981814, |
| 709 | 0x17f10012, | 830 | 0x0825b680, |
| 710 | 0x14b60a20, | 831 | 0x800512fd, |
| 711 | 0x0227f006, | 832 | 0x94bd1601, |
| 712 | 0x800023f1, | 833 | 0xf10999f0, |
| 713 | 0xf00012d0, | 834 | 0xf00f0007, |
| 714 | 0x27f11017, | ||
| 715 | 0x23f00200, | ||
| 716 | 0x0512fa02, | ||
| 717 | 0x94bd03f8, | ||
| 718 | 0xf10899f0, | ||
| 719 | 0xf0170007, | ||
| 720 | 0x09d00203, | 835 | 0x09d00203, |
| 721 | 0x9804bd00, | 836 | 0xf104bd00, |
| 722 | 0x14b68101, | 837 | 0xf0810007, |
| 723 | 0x80029818, | 838 | 0x01d00203, |
| 724 | 0xfd0825b6, | 839 | 0xf004bd00, |
| 725 | 0x01800512, | 840 | 0x07f10127, |
| 726 | 0xf094bd16, | 841 | 0x03f08800, |
| 727 | 0x07f10999, | 842 | 0x0002d002, |
| 728 | 0x03f00f00, | 843 | 0x17f104bd, |
| 729 | 0x0009d002, | 844 | 0x13f00100, |
| 730 | 0x27f104bd, | 845 | 0x0501fa06, |
| 731 | 0x24b60a04, | 846 | 0x94bd03f8, |
| 732 | 0x0021d006, | 847 | 0xf10999f0, |
| 733 | 0xf10127f0, | ||
| 734 | 0xb60a2017, | ||
| 735 | 0x12d00614, | ||
| 736 | 0x0017f100, | ||
| 737 | 0x0613f001, | ||
| 738 | 0xf80501fa, | ||
| 739 | 0xf094bd03, | ||
| 740 | 0x07f10999, | ||
| 741 | 0x03f01700, | ||
| 742 | 0x0009d002, | ||
| 743 | 0x94bd04bd, | ||
| 744 | 0xf10599f0, | ||
| 745 | 0xf0170007, | 848 | 0xf0170007, |
| 746 | 0x09d00203, | 849 | 0x09d00203, |
| 747 | 0xf804bd00, | 850 | 0xbd04bd00, |
| 748 | /* 0x0795: ctx_chan */ | 851 | 0x0599f094, |
| 749 | 0x8c21f500, | 852 | 0x170007f1, |
| 750 | 0x0ca7f006, | 853 | 0xd00203f0, |
| 751 | 0xf1c921f4, | 854 | 0x04bd0009, |
| 752 | 0xb60a1017, | 855 | /* 0x0942: ctx_chan */ |
| 753 | 0x27f00614, | 856 | 0x21f500f8, |
| 754 | 0x0012d005, | 857 | 0xa7f00824, |
| 755 | /* 0x07ac: ctx_chan_wait */ | 858 | 0xd021f40c, |
| 756 | 0xfd0012cf, | 859 | 0xf505f7f0, |
| 757 | 0x1bf40522, | 860 | 0xf8080621, |
| 758 | /* 0x07b7: ctx_mmio_exec */ | 861 | /* 0x0955: ctx_mmio_exec */ |
| 759 | 0x9800f8fa, | 862 | 0x41039800, |
| 760 | 0x27f14103, | 863 | 0x810007f1, |
| 761 | 0x24b60a04, | 864 | 0xd00203f0, |
| 762 | 0x0023d006, | 865 | 0x04bd0003, |
| 763 | /* 0x07c6: ctx_mmio_loop */ | 866 | /* 0x0966: ctx_mmio_loop */ |
| 764 | 0x34c434bd, | 867 | 0x34c434bd, |
| 765 | 0x0f1bf4ff, | 868 | 0x0f1bf4ff, |
| 766 | 0x020057f1, | 869 | 0x020057f1, |
| 767 | 0xfa0653f0, | 870 | 0xfa0653f0, |
| 768 | 0x03f80535, | 871 | 0x03f80535, |
| 769 | /* 0x07d8: ctx_mmio_pull */ | 872 | /* 0x0978: ctx_mmio_pull */ |
| 770 | 0x98804e98, | 873 | 0x98804e98, |
| 771 | 0x21f4814f, | 874 | 0x21f4814f, |
| 772 | 0x0830b68d, | 875 | 0x0830b69d, |
| 773 | 0xf40112b6, | 876 | 0xf40112b6, |
| 774 | /* 0x07ea: ctx_mmio_done */ | 877 | /* 0x098a: ctx_mmio_done */ |
| 775 | 0x0398df1b, | 878 | 0x0398df1b, |
| 776 | 0x0023d016, | 879 | 0x0007f116, |
| 777 | 0xf1400080, | 880 | 0x0203f081, |
| 778 | 0xf0010017, | 881 | 0xbd0003d0, |
| 779 | 0x01fa0613, | 882 | 0x40008004, |
| 780 | 0xf803f806, | 883 | 0x010017f1, |
| 781 | /* 0x0801: ctx_xfer */ | 884 | 0xfa0613f0, |
| 782 | 0x00f7f100, | 885 | 0x03f80601, |
| 783 | 0x06f4b60c, | 886 | /* 0x09aa: ctx_xfer */ |
| 784 | 0xd004e7f0, | 887 | 0xe7f000f8, |
| 785 | /* 0x080e: ctx_xfer_idle */ | 888 | 0x0007f104, |
| 786 | 0xfecf80fe, | 889 | 0x0303f002, |
| 787 | 0x00e4f100, | 890 | 0xbd000ed0, |
| 788 | 0xf91bf420, | 891 | /* 0x09b9: ctx_xfer_idle */ |
| 789 | 0xf40611f4, | 892 | 0x00e7f104, |
| 790 | /* 0x081e: ctx_xfer_pre */ | 893 | 0x03e3f000, |
| 791 | 0xf7f00d02, | 894 | 0xf100eecf, |
| 792 | 0x6c21f510, | 895 | 0xf42000e4, |
| 793 | 0x1c11f406, | 896 | 0x11f4f21b, |
| 794 | /* 0x0828: ctx_xfer_pre_load */ | 897 | 0x0d02f406, |
| 795 | 0xf502f7f0, | 898 | /* 0x09d0: ctx_xfer_pre */ |
| 796 | 0xf5062b21, | 899 | 0xf510f7f0, |
| 797 | 0xf5063a21, | 900 | 0xf407de21, |
| 798 | 0xbd064c21, | 901 | /* 0x09da: ctx_xfer_pre_load */ |
| 799 | 0x2b21f5f4, | 902 | 0xf7f01c11, |
| 800 | 0x8c21f506, | 903 | 0x7f21f502, |
| 801 | /* 0x0841: ctx_xfer_exec */ | 904 | 0x9121f507, |
| 802 | 0x16019806, | 905 | 0xa621f507, |
| 803 | 0x041427f1, | 906 | 0xf5f4bd07, |
| 804 | 0xd00624b6, | 907 | 0xf5077f21, |
| 805 | 0xe7f10020, | 908 | /* 0x09f3: ctx_xfer_exec */ |
| 806 | 0xe3f0a500, | 909 | 0x98082421, |
| 807 | 0x021fb941, | 910 | 0x24bd1601, |
| 808 | 0xb68d21f4, | 911 | 0x050007f1, |
| 809 | 0xfcf004e0, | 912 | 0xd00103f0, |
| 810 | 0x022cf001, | 913 | 0x04bd0002, |
| 811 | 0xfd0124b6, | 914 | 0xf1021fb9, |
| 812 | 0x21f405f2, | 915 | 0xf0a500e7, |
| 813 | 0xfc17f18d, | 916 | 0x21f441e3, |
| 814 | 0x0213f04a, | 917 | 0x01fcf09d, |
| 815 | 0xd00c27f0, | 918 | 0xb6022cf0, |
| 816 | 0x21f50012, | 919 | 0xf2fd0124, |
| 817 | 0x27f10215, | 920 | 0x02ffb905, |
| 818 | 0x23f047fc, | 921 | 0xa504e7f1, |
| 819 | 0x0020d002, | 922 | 0xf441e3f0, |
| 923 | 0x21f59d21, | ||
| 924 | 0x24bd026a, | ||
| 925 | 0x47fc07f1, | ||
| 926 | 0xd00203f0, | ||
| 927 | 0x04bd0002, | ||
| 820 | 0xb6012cf0, | 928 | 0xb6012cf0, |
| 821 | 0x12d00320, | 929 | 0x07f10320, |
| 822 | 0x01acf000, | 930 | 0x03f04afc, |
| 823 | 0xf006a5f0, | 931 | 0x0002d002, |
| 824 | 0x0c9800b7, | 932 | 0xacf004bd, |
| 825 | 0x010d9800, | 933 | 0x06a5f001, |
| 826 | 0xf500e7f0, | 934 | 0x9800b7f0, |
| 827 | 0xf0016621, | 935 | 0x0d98000c, |
| 828 | 0x21f508a7, | 936 | 0x00e7f001, |
| 829 | 0x21f50109, | 937 | 0x016f21f5, |
| 830 | 0x01f40215, | 938 | 0xf508a7f0, |
| 831 | 0x0ca7f022, | 939 | 0xf5011021, |
| 832 | 0xf1c921f4, | 940 | 0xf4025e21, |
| 833 | 0xb60a1017, | 941 | 0xa7f01301, |
| 834 | 0x27f00614, | 942 | 0xd021f40c, |
| 835 | 0x0012d005, | 943 | 0xf505f7f0, |
| 836 | /* 0x08c8: ctx_xfer_post_save_wait */ | 944 | 0xf4080621, |
| 837 | 0xfd0012cf, | 945 | /* 0x0a82: ctx_xfer_post */ |
| 838 | 0x1bf40522, | 946 | 0xf7f02e02, |
| 839 | 0x2e02f4fa, | 947 | 0x7f21f502, |
| 840 | /* 0x08d4: ctx_xfer_post */ | 948 | 0xf5f4bd07, |
| 841 | 0xf502f7f0, | 949 | 0xf507de21, |
| 842 | 0xbd062b21, | 950 | 0xf5027f21, |
| 843 | 0x6c21f5f4, | 951 | 0xbd079121, |
| 844 | 0x3421f506, | 952 | 0x7f21f5f4, |
| 845 | 0x3a21f502, | 953 | 0x1011f407, |
| 846 | 0xf5f4bd06, | 954 | 0xfd400198, |
| 847 | 0xf4062b21, | 955 | 0x0bf40511, |
| 848 | 0x01981011, | 956 | 0x5521f507, |
| 849 | 0x0511fd40, | 957 | /* 0x0aad: ctx_xfer_no_post_mmio */ |
| 850 | 0xf5070bf4, | 958 | /* 0x0aad: ctx_xfer_done */ |
| 851 | /* 0x08ff: ctx_xfer_no_post_mmio */ | 959 | 0x0000f809, |
| 852 | /* 0x08ff: ctx_xfer_done */ | ||
| 853 | 0xf807b721, | ||
| 854 | 0x00000000, | ||
| 855 | 0x00000000, | ||
| 856 | 0x00000000, | ||
| 857 | 0x00000000, | ||
| 858 | 0x00000000, | ||
| 859 | 0x00000000, | ||
| 860 | 0x00000000, | ||
| 861 | 0x00000000, | ||
| 862 | 0x00000000, | ||
| 863 | 0x00000000, | ||
| 864 | 0x00000000, | ||
| 865 | 0x00000000, | ||
| 866 | 0x00000000, | ||
| 867 | 0x00000000, | ||
| 868 | 0x00000000, | ||
| 869 | 0x00000000, | ||
| 870 | 0x00000000, | ||
| 871 | 0x00000000, | ||
| 872 | 0x00000000, | ||
| 873 | 0x00000000, | ||
| 874 | 0x00000000, | ||
| 875 | 0x00000000, | ||
| 876 | 0x00000000, | ||
| 877 | 0x00000000, | ||
| 878 | 0x00000000, | ||
| 879 | 0x00000000, | ||
| 880 | 0x00000000, | ||
| 881 | 0x00000000, | ||
| 882 | 0x00000000, | ||
| 883 | 0x00000000, | ||
| 884 | 0x00000000, | ||
| 885 | 0x00000000, | ||
| 886 | 0x00000000, | ||
| 887 | 0x00000000, | ||
| 888 | 0x00000000, | ||
| 889 | 0x00000000, | ||
| 890 | 0x00000000, | ||
| 891 | 0x00000000, | ||
| 892 | 0x00000000, | ||
| 893 | 0x00000000, | ||
| 894 | 0x00000000, | ||
| 895 | 0x00000000, | ||
| 896 | 0x00000000, | ||
| 897 | 0x00000000, | ||
| 898 | 0x00000000, | 960 | 0x00000000, |
| 899 | 0x00000000, | 961 | 0x00000000, |
| 900 | 0x00000000, | 962 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h index 438506d14749..229c0ae37228 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h | |||
| @@ -206,14 +206,14 @@ uint32_t nvf0_grhub_data[] = { | |||
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | uint32_t nvf0_grhub_code[] = { | 208 | uint32_t nvf0_grhub_code[] = { |
| 209 | 0x031b0ef5, | 209 | 0x039b0ef5, |
| 210 | /* 0x0004: queue_put */ | 210 | /* 0x0004: queue_put */ |
| 211 | 0x9800d898, | 211 | 0x9800d898, |
| 212 | 0x86f001d9, | 212 | 0x86f001d9, |
| 213 | 0x0489b808, | 213 | 0x0489b808, |
| 214 | 0xf00c1bf4, | 214 | 0xf00c1bf4, |
| 215 | 0x21f502f7, | 215 | 0x21f502f7, |
| 216 | 0x00f802fe, | 216 | 0x00f8037e, |
| 217 | /* 0x001c: queue_put_next */ | 217 | /* 0x001c: queue_put_next */ |
| 218 | 0xb60798c4, | 218 | 0xb60798c4, |
| 219 | 0x8dbb0384, | 219 | 0x8dbb0384, |
| @@ -237,184 +237,214 @@ uint32_t nvf0_grhub_code[] = { | |||
| 237 | /* 0x0066: queue_get_done */ | 237 | /* 0x0066: queue_get_done */ |
| 238 | 0x00f80132, | 238 | 0x00f80132, |
| 239 | /* 0x0068: nv_rd32 */ | 239 | /* 0x0068: nv_rd32 */ |
| 240 | 0x0728b7f1, | 240 | 0xf002ecb9, |
| 241 | 0xb906b4b6, | 241 | 0x07f11fc9, |
| 242 | 0xc9f002ec, | 242 | 0x03f0ca00, |
| 243 | 0x00bcd01f, | 243 | 0x000cd001, |
| 244 | /* 0x0078: nv_rd32_wait */ | 244 | /* 0x007a: nv_rd32_wait */ |
| 245 | 0xc800bccf, | 245 | 0xc7f104bd, |
| 246 | 0x1bf41fcc, | 246 | 0xc3f0ca00, |
| 247 | 0x06a7f0fa, | 247 | 0x00cccf01, |
| 248 | 0x010921f5, | 248 | 0xf41fccc8, |
| 249 | 0xf840bfcf, | 249 | 0xa7f0f31b, |
| 250 | /* 0x008d: nv_wr32 */ | 250 | 0x1021f506, |
| 251 | 0x28b7f100, | 251 | 0x00f7f101, |
| 252 | 0x06b4b607, | 252 | 0x01f3f0cb, |
| 253 | 0xb980bfd0, | 253 | 0xf800ffcf, |
| 254 | 0xc9f002ec, | 254 | /* 0x009d: nv_wr32 */ |
| 255 | 0x1ec9f01f, | 255 | 0x0007f100, |
| 256 | /* 0x00a3: nv_wr32_wait */ | 256 | 0x0103f0cc, |
| 257 | 0xcf00bcd0, | 257 | 0xbd000fd0, |
| 258 | 0xccc800bc, | 258 | 0x02ecb904, |
| 259 | 0xfa1bf41f, | 259 | 0xf01fc9f0, |
| 260 | /* 0x00ae: watchdog_reset */ | 260 | 0x07f11ec9, |
| 261 | 0x87f100f8, | 261 | 0x03f0ca00, |
| 262 | 0x84b60430, | 262 | 0x000cd001, |
| 263 | 0x1ff9f006, | 263 | /* 0x00be: nv_wr32_wait */ |
| 264 | 0xf8008fd0, | 264 | 0xc7f104bd, |
| 265 | /* 0x00bd: watchdog_clear */ | 265 | 0xc3f0ca00, |
| 266 | 0x3087f100, | 266 | 0x00cccf01, |
| 267 | 0x0684b604, | 267 | 0xf41fccc8, |
| 268 | 0xf80080d0, | 268 | 0x00f8f31b, |
| 269 | /* 0x00c9: wait_donez */ | 269 | /* 0x00d0: wait_donez */ |
| 270 | 0xf094bd00, | 270 | 0x99f094bd, |
| 271 | 0x07f10099, | 271 | 0x0007f100, |
| 272 | 0x03f03700, | 272 | 0x0203f037, |
| 273 | 0x0009d002, | 273 | 0xbd0009d0, |
| 274 | 0x07f104bd, | 274 | 0x0007f104, |
| 275 | 0x03f00600, | 275 | 0x0203f006, |
| 276 | 0x000ad002, | 276 | 0xbd000ad0, |
| 277 | /* 0x00e6: wait_donez_ne */ | 277 | /* 0x00ed: wait_donez_ne */ |
| 278 | 0x87f104bd, | 278 | 0x0087f104, |
| 279 | 0x83f00000, | 279 | 0x0183f000, |
| 280 | 0x0088cf01, | 280 | 0xff0088cf, |
| 281 | 0xf4888aff, | 281 | 0x1bf4888a, |
| 282 | 0x94bdf31b, | 282 | 0xf094bdf3, |
| 283 | 0xf10099f0, | ||
| 284 | 0xf0170007, | ||
| 285 | 0x09d00203, | ||
| 286 | 0xf804bd00, | ||
| 287 | /* 0x0109: wait_doneo */ | ||
| 288 | 0xf094bd00, | ||
| 289 | 0x07f10099, | 283 | 0x07f10099, |
| 290 | 0x03f03700, | 284 | 0x03f01700, |
| 291 | 0x0009d002, | 285 | 0x0009d002, |
| 292 | 0x87f104bd, | 286 | 0x00f804bd, |
| 293 | 0x84b60818, | 287 | /* 0x0110: wait_doneo */ |
| 294 | 0x008ad006, | ||
| 295 | /* 0x0124: wait_doneo_e */ | ||
| 296 | 0x040087f1, | ||
| 297 | 0xcf0684b6, | ||
| 298 | 0x8aff0088, | ||
| 299 | 0xf30bf488, | ||
| 300 | 0x99f094bd, | 288 | 0x99f094bd, |
| 301 | 0x0007f100, | 289 | 0x0007f100, |
| 302 | 0x0203f017, | 290 | 0x0203f037, |
| 303 | 0xbd0009d0, | 291 | 0xbd0009d0, |
| 304 | /* 0x0147: mmctx_size */ | 292 | 0x0007f104, |
| 305 | 0xbd00f804, | 293 | 0x0203f006, |
| 306 | /* 0x0149: nv_mmctx_size_loop */ | 294 | 0xbd000ad0, |
| 307 | 0x00e89894, | 295 | /* 0x012d: wait_doneo_e */ |
| 308 | 0xb61a85b6, | 296 | 0x0087f104, |
| 309 | 0x84b60180, | 297 | 0x0183f000, |
| 310 | 0x0098bb02, | 298 | 0xff0088cf, |
| 311 | 0xb804e0b6, | 299 | 0x0bf4888a, |
| 312 | 0x1bf404ef, | 300 | 0xf094bdf3, |
| 313 | 0x029fb9eb, | 301 | 0x07f10099, |
| 314 | /* 0x0166: mmctx_xfer */ | 302 | 0x03f01700, |
| 315 | 0x94bd00f8, | 303 | 0x0009d002, |
| 316 | 0xf10199f0, | 304 | 0x00f804bd, |
| 317 | 0xf0370007, | 305 | /* 0x0150: mmctx_size */ |
| 318 | 0x09d00203, | 306 | /* 0x0152: nv_mmctx_size_loop */ |
| 319 | 0xf104bd00, | 307 | 0xe89894bd, |
| 320 | 0xb6071087, | 308 | 0x1a85b600, |
| 321 | 0x94bd0684, | 309 | 0xb60180b6, |
| 322 | 0xf405bbfd, | 310 | 0x98bb0284, |
| 323 | 0x8bd0090b, | 311 | 0x04e0b600, |
| 324 | 0x0099f000, | 312 | 0xf404efb8, |
| 325 | /* 0x018c: mmctx_base_disabled */ | 313 | 0x9fb9eb1b, |
| 326 | 0xf405eefd, | 314 | /* 0x016f: mmctx_xfer */ |
| 327 | 0x8ed00c0b, | 315 | 0xbd00f802, |
| 328 | 0xc08fd080, | 316 | 0x0199f094, |
| 329 | /* 0x019b: mmctx_multi_disabled */ | 317 | 0x370007f1, |
| 330 | 0xb70199f0, | 318 | 0xd00203f0, |
| 331 | 0xc8010080, | 319 | 0x04bd0009, |
| 320 | 0xbbfd94bd, | ||
| 321 | 0x120bf405, | ||
| 322 | 0xc40007f1, | ||
| 323 | 0xd00103f0, | ||
| 324 | 0x04bd000b, | ||
| 325 | /* 0x0197: mmctx_base_disabled */ | ||
| 326 | 0xfd0099f0, | ||
| 327 | 0x0bf405ee, | ||
| 328 | 0x0007f11e, | ||
| 329 | 0x0103f0c6, | ||
| 330 | 0xbd000ed0, | ||
| 331 | 0x0007f104, | ||
| 332 | 0x0103f0c7, | ||
| 333 | 0xbd000fd0, | ||
| 334 | 0x0199f004, | ||
| 335 | /* 0x01b8: mmctx_multi_disabled */ | ||
| 336 | 0xb600abc8, | ||
| 337 | 0xb9f010b4, | ||
| 338 | 0x01aec80c, | ||
| 339 | 0xfd11e4b6, | ||
| 340 | 0x07f105be, | ||
| 341 | 0x03f0c500, | ||
| 342 | 0x000bd001, | ||
| 343 | /* 0x01d6: mmctx_exec_loop */ | ||
| 344 | /* 0x01d6: mmctx_wait_free */ | ||
| 345 | 0xe7f104bd, | ||
| 346 | 0xe3f0c500, | ||
| 347 | 0x00eecf01, | ||
| 348 | 0xf41fe4f0, | ||
| 349 | 0xce98f30b, | ||
| 350 | 0x05e9fd00, | ||
| 351 | 0xc80007f1, | ||
| 352 | 0xd00103f0, | ||
| 353 | 0x04bd000e, | ||
| 354 | 0xb804c0b6, | ||
| 355 | 0x1bf404cd, | ||
| 356 | 0x02abc8d8, | ||
| 357 | /* 0x0207: mmctx_fini_wait */ | ||
| 358 | 0xf11f1bf4, | ||
| 359 | 0xf0c500b7, | ||
| 360 | 0xbbcf01b3, | ||
| 361 | 0x1fb4f000, | ||
| 362 | 0xf410b4b0, | ||
| 363 | 0xa7f0f01b, | ||
| 364 | 0xd021f402, | ||
| 365 | /* 0x0223: mmctx_stop */ | ||
| 366 | 0xc82b0ef4, | ||
| 332 | 0xb4b600ab, | 367 | 0xb4b600ab, |
| 333 | 0x0cb9f010, | 368 | 0x0cb9f010, |
| 334 | 0xb601aec8, | 369 | 0xf112b9f0, |
| 335 | 0xbefd11e4, | 370 | 0xf0c50007, |
| 336 | 0x008bd005, | 371 | 0x0bd00103, |
| 337 | /* 0x01b4: mmctx_exec_loop */ | 372 | /* 0x023b: mmctx_stop_wait */ |
| 338 | /* 0x01b4: mmctx_wait_free */ | 373 | 0xf104bd00, |
| 339 | 0xf0008ecf, | 374 | 0xf0c500b7, |
| 340 | 0x0bf41fe4, | 375 | 0xbbcf01b3, |
| 341 | 0x00ce98fa, | 376 | 0x12bbc800, |
| 342 | 0xd005e9fd, | 377 | /* 0x024b: mmctx_done */ |
| 343 | 0xc0b6c08e, | 378 | 0xbdf31bf4, |
| 344 | 0x04cdb804, | 379 | 0x0199f094, |
| 345 | 0xc8e81bf4, | 380 | 0x170007f1, |
| 346 | 0x1bf402ab, | 381 | 0xd00203f0, |
| 347 | /* 0x01d5: mmctx_fini_wait */ | 382 | 0x04bd0009, |
| 348 | 0x008bcf18, | 383 | /* 0x025e: strand_wait */ |
| 349 | 0xb01fb4f0, | 384 | 0xa0f900f8, |
| 350 | 0x1bf410b4, | 385 | 0xf402a7f0, |
| 351 | 0x02a7f0f7, | 386 | 0xa0fcd021, |
| 352 | 0xf4c921f4, | 387 | /* 0x026a: strand_pre */ |
| 353 | /* 0x01ea: mmctx_stop */ | 388 | 0x97f000f8, |
| 354 | 0xabc81b0e, | 389 | 0xfc07f10c, |
| 355 | 0x10b4b600, | 390 | 0x0203f04a, |
| 356 | 0xf00cb9f0, | 391 | 0xbd0009d0, |
| 357 | 0x8bd012b9, | 392 | 0x5e21f504, |
| 358 | /* 0x01f9: mmctx_stop_wait */ | 393 | /* 0x027f: strand_post */ |
| 359 | 0x008bcf00, | 394 | 0xf000f802, |
| 360 | 0xf412bbc8, | 395 | 0x07f10d97, |
| 361 | /* 0x0202: mmctx_done */ | 396 | 0x03f04afc, |
| 362 | 0x94bdfa1b, | ||
| 363 | 0xf10199f0, | ||
| 364 | 0xf0170007, | ||
| 365 | 0x09d00203, | ||
| 366 | 0xf804bd00, | ||
| 367 | /* 0x0215: strand_wait */ | ||
| 368 | 0xf0a0f900, | ||
| 369 | 0x21f402a7, | ||
| 370 | 0xf8a0fcc9, | ||
| 371 | /* 0x0221: strand_pre */ | ||
| 372 | 0xfc87f100, | ||
| 373 | 0x0283f04a, | ||
| 374 | 0xd00c97f0, | ||
| 375 | 0x21f50089, | ||
| 376 | 0x00f80215, | ||
| 377 | /* 0x0234: strand_post */ | ||
| 378 | 0x4afc87f1, | ||
| 379 | 0xf00283f0, | ||
| 380 | 0x89d00d97, | ||
| 381 | 0x1521f500, | ||
| 382 | /* 0x0247: strand_set */ | ||
| 383 | 0xf100f802, | ||
| 384 | 0xf04ffca7, | ||
| 385 | 0xaba202a3, | ||
| 386 | 0xc7f00500, | ||
| 387 | 0x00acd00f, | ||
| 388 | 0xd00bc7f0, | ||
| 389 | 0x21f500bc, | ||
| 390 | 0xaed00215, | ||
| 391 | 0x0ac7f000, | ||
| 392 | 0xf500bcd0, | ||
| 393 | 0xf8021521, | ||
| 394 | /* 0x0271: strand_ctx_init */ | ||
| 395 | 0xf094bd00, | ||
| 396 | 0x07f10399, | ||
| 397 | 0x03f03700, | ||
| 398 | 0x0009d002, | 397 | 0x0009d002, |
| 399 | 0x21f504bd, | 398 | 0x21f504bd, |
| 400 | 0xe7f00221, | 399 | 0x00f8025e, |
| 401 | 0x4721f503, | 400 | /* 0x0294: strand_set */ |
| 402 | 0xfca7f102, | 401 | 0xf10fc7f0, |
| 403 | 0x02a3f046, | 402 | 0xf04ffc07, |
| 404 | 0x0400aba0, | 403 | 0x0cd00203, |
| 405 | 0xf040a0d0, | 404 | 0xf004bd00, |
| 406 | 0xbcd001c7, | 405 | 0x07f10bc7, |
| 407 | 0x1521f500, | 406 | 0x03f04afc, |
| 408 | 0x010c9202, | 407 | 0x000cd002, |
| 409 | 0xf000acd0, | 408 | 0x07f104bd, |
| 410 | 0xbcd002c7, | 409 | 0x03f04ffc, |
| 411 | 0x1521f500, | 410 | 0x000ed002, |
| 412 | 0x3421f502, | 411 | 0xc7f004bd, |
| 413 | 0x8087f102, | 412 | 0xfc07f10a, |
| 414 | 0x0684b608, | 413 | 0x0203f04a, |
| 415 | 0xb70089cf, | 414 | 0xbd000cd0, |
| 416 | 0x95220080, | 415 | 0x5e21f504, |
| 417 | /* 0x02ca: ctx_init_strand_loop */ | 416 | /* 0x02d3: strand_ctx_init */ |
| 417 | 0xbd00f802, | ||
| 418 | 0x0399f094, | ||
| 419 | 0x370007f1, | ||
| 420 | 0xd00203f0, | ||
| 421 | 0x04bd0009, | ||
| 422 | 0x026a21f5, | ||
| 423 | 0xf503e7f0, | ||
| 424 | 0xbd029421, | ||
| 425 | 0xfc07f1c4, | ||
| 426 | 0x0203f047, | ||
| 427 | 0xbd000cd0, | ||
| 428 | 0x01c7f004, | ||
| 429 | 0x4afc07f1, | ||
| 430 | 0xd00203f0, | ||
| 431 | 0x04bd000c, | ||
| 432 | 0x025e21f5, | ||
| 433 | 0xf1010c92, | ||
| 434 | 0xf046fc07, | ||
| 435 | 0x0cd00203, | ||
| 436 | 0xf004bd00, | ||
| 437 | 0x07f102c7, | ||
| 438 | 0x03f04afc, | ||
| 439 | 0x000cd002, | ||
| 440 | 0x21f504bd, | ||
| 441 | 0x21f5025e, | ||
| 442 | 0x87f1027f, | ||
| 443 | 0x83f04200, | ||
| 444 | 0x0097f102, | ||
| 445 | 0x0293f020, | ||
| 446 | 0x950099cf, | ||
| 447 | /* 0x034a: ctx_init_strand_loop */ | ||
| 418 | 0x8ed008fe, | 448 | 0x8ed008fe, |
| 419 | 0x408ed000, | 449 | 0x408ed000, |
| 420 | 0xb6808acf, | 450 | 0xb6808acf, |
| @@ -428,7 +458,7 @@ uint32_t nvf0_grhub_code[] = { | |||
| 428 | 0x170007f1, | 458 | 0x170007f1, |
| 429 | 0xd00203f0, | 459 | 0xd00203f0, |
| 430 | 0x04bd0009, | 460 | 0x04bd0009, |
| 431 | /* 0x02fe: error */ | 461 | /* 0x037e: error */ |
| 432 | 0x07f100f8, | 462 | 0x07f100f8, |
| 433 | 0x03f00500, | 463 | 0x03f00500, |
| 434 | 0x000fd002, | 464 | 0x000fd002, |
| @@ -436,82 +466,117 @@ uint32_t nvf0_grhub_code[] = { | |||
| 436 | 0x0007f101, | 466 | 0x0007f101, |
| 437 | 0x0303f007, | 467 | 0x0303f007, |
| 438 | 0xbd000fd0, | 468 | 0xbd000fd0, |
| 439 | /* 0x031b: init */ | 469 | /* 0x039b: init */ |
| 440 | 0xbd00f804, | 470 | 0xbd00f804, |
| 441 | 0x0004fe04, | 471 | 0x0007fe04, |
| 442 | 0xf10007fe, | 472 | 0x420017f1, |
| 443 | 0xf0120017, | 473 | 0xcf0013f0, |
| 444 | 0x12d00227, | 474 | 0x11e70011, |
| 445 | 0xb117f100, | 475 | 0x14b60109, |
| 446 | 0x0010fe05, | 476 | 0x0014fe08, |
| 447 | 0x040017f1, | 477 | 0xf10227f0, |
| 448 | 0xf1c010d0, | 478 | 0xf0120007, |
| 449 | 0xb6040437, | 479 | 0x02d00003, |
| 450 | 0x27f10634, | 480 | 0xf104bd00, |
| 451 | 0x32d02003, | 481 | 0xfe06c817, |
| 452 | 0x0427f100, | 482 | 0x24bd0010, |
| 453 | 0x0132d020, | 483 | 0x070007f1, |
| 484 | 0xd00003f0, | ||
| 485 | 0x04bd0002, | ||
| 486 | 0x200327f1, | ||
| 487 | 0x010007f1, | ||
| 488 | 0xd00103f0, | ||
| 489 | 0x04bd0002, | ||
| 490 | 0x200427f1, | ||
| 491 | 0x010407f1, | ||
| 492 | 0xd00103f0, | ||
| 493 | 0x04bd0002, | ||
| 454 | 0x200b27f1, | 494 | 0x200b27f1, |
| 455 | 0xf10232d0, | 495 | 0x010807f1, |
| 456 | 0xd0200c27, | 496 | 0xd00103f0, |
| 457 | 0x27f10732, | 497 | 0x04bd0002, |
| 458 | 0x24b60c24, | 498 | 0x200c27f1, |
| 459 | 0x0003b906, | 499 | 0x011c07f1, |
| 460 | 0xf10023d0, | 500 | 0xd00103f0, |
| 501 | 0x04bd0002, | ||
| 502 | 0xf1010392, | ||
| 503 | 0xf0090007, | ||
| 504 | 0x03d00303, | ||
| 505 | 0xf104bd00, | ||
| 461 | 0xf0870427, | 506 | 0xf0870427, |
| 462 | 0x12d00023, | 507 | 0x07f10023, |
| 463 | 0x0012b700, | 508 | 0x03f00400, |
| 464 | 0x0427f001, | 509 | 0x0002d000, |
| 465 | 0xf40012d0, | 510 | 0x27f004bd, |
| 466 | 0xe7f11031, | 511 | 0x0007f104, |
| 467 | 0xe3f09604, | 512 | 0x0003f003, |
| 468 | 0x6821f440, | 513 | 0xbd0002d0, |
| 469 | 0x8090f1c7, | 514 | 0x1031f404, |
| 470 | 0xf4f00301, | 515 | 0x9604e7f1, |
| 471 | 0x020f801f, | 516 | 0xf440e3f0, |
| 472 | 0xbb0117f0, | 517 | 0xfeb96821, |
| 473 | 0x12b6041f, | 518 | 0x90f1c702, |
| 474 | 0x0c27f101, | 519 | 0xf0030180, |
| 475 | 0x0624b604, | 520 | 0x0f801ff4, |
| 476 | 0xd00021d0, | 521 | 0x0117f002, |
| 477 | 0x17f14021, | 522 | 0xb6041fbb, |
| 478 | 0x0e980100, | 523 | 0x07f10112, |
| 479 | 0x010f9800, | 524 | 0x03f00300, |
| 480 | 0x014721f5, | 525 | 0x0001d001, |
| 481 | 0x070037f1, | 526 | 0x07f104bd, |
| 482 | 0x950634b6, | 527 | 0x03f00400, |
| 483 | 0x34d00814, | 528 | 0x0001d001, |
| 484 | 0x4034d000, | 529 | 0x17f104bd, |
| 485 | 0x130030b7, | 530 | 0xf7f00100, |
| 486 | 0xb6001fbb, | 531 | 0x7f21f502, |
| 487 | 0x3fd002f5, | 532 | 0x9121f507, |
| 488 | 0x0815b600, | 533 | 0x10f7f007, |
| 489 | 0xb60110b6, | 534 | 0x07de21f5, |
| 490 | 0x1fb90814, | 535 | 0x98000e98, |
| 491 | 0x7121f502, | 536 | 0x21f5010f, |
| 492 | 0x001fbb02, | 537 | 0x14950150, |
| 493 | 0xf1020398, | 538 | 0x0007f108, |
| 494 | 0xf0200047, | 539 | 0x0103f0c0, |
| 495 | /* 0x03f6: init_gpc */ | 540 | 0xbd0004d0, |
| 496 | 0x4ea05043, | 541 | 0x0007f104, |
| 497 | 0x1fb90804, | 542 | 0x0103f0c1, |
| 498 | 0x8d21f402, | 543 | 0xbd0004d0, |
| 499 | 0x010c4ea0, | 544 | 0x0030b704, |
| 500 | 0x21f4f4bd, | 545 | 0x001fbb13, |
| 501 | 0x044ea08d, | 546 | 0xf102f5b6, |
| 502 | 0x8d21f401, | 547 | 0xf0d30007, |
| 503 | 0x01004ea0, | 548 | 0x0fd00103, |
| 504 | 0xf402f7f0, | 549 | 0xb604bd00, |
| 505 | 0x4ea08d21, | 550 | 0x10b60815, |
| 506 | /* 0x041e: init_gpc_wait */ | 551 | 0x0814b601, |
| 507 | 0x21f40800, | 552 | 0xf5021fb9, |
| 508 | 0x1fffc868, | 553 | 0xbb02d321, |
| 509 | 0xa0fa0bf4, | 554 | 0x0398001f, |
| 510 | 0xf408044e, | 555 | 0x0047f102, |
| 511 | 0x1fbb6821, | 556 | 0x5043f020, |
| 512 | 0x0040b700, | 557 | /* 0x04f4: init_gpc */ |
| 513 | 0x0132b680, | 558 | 0x08044ea0, |
| 514 | 0xf1be1bf4, | 559 | 0xf4021fb9, |
| 560 | 0x4ea09d21, | ||
| 561 | 0xf4bd010c, | ||
| 562 | 0xa09d21f4, | ||
| 563 | 0xf401044e, | ||
| 564 | 0x4ea09d21, | ||
| 565 | 0xf7f00100, | ||
| 566 | 0x9d21f402, | ||
| 567 | 0x08004ea0, | ||
| 568 | /* 0x051c: init_gpc_wait */ | ||
| 569 | 0xc86821f4, | ||
| 570 | 0x0bf41fff, | ||
| 571 | 0x044ea0fa, | ||
| 572 | 0x6821f408, | ||
| 573 | 0xb7001fbb, | ||
| 574 | 0xb6800040, | ||
| 575 | 0x1bf40132, | ||
| 576 | 0x00f7f0be, | ||
| 577 | 0x07de21f5, | ||
| 578 | 0xf500f7f0, | ||
| 579 | 0xf1077f21, | ||
| 515 | 0xf0010007, | 580 | 0xf0010007, |
| 516 | 0x01d00203, | 581 | 0x01d00203, |
| 517 | 0xbd04bd00, | 582 | 0xbd04bd00, |
| @@ -519,382 +584,379 @@ uint32_t nvf0_grhub_code[] = { | |||
| 519 | 0x300007f1, | 584 | 0x300007f1, |
| 520 | 0xd00203f0, | 585 | 0xd00203f0, |
| 521 | 0x04bd0001, | 586 | 0x04bd0001, |
| 522 | /* 0x0458: main */ | 587 | /* 0x0564: main */ |
| 523 | 0xf40031f4, | 588 | 0xf40031f4, |
| 524 | 0xd7f00028, | 589 | 0xd7f00028, |
| 525 | 0x3921f410, | 590 | 0x3921f410, |
| 526 | 0xb1f401f4, | 591 | 0xb1f401f4, |
| 527 | 0xf54001e4, | 592 | 0xf54001e4, |
| 528 | 0xbd00de1b, | 593 | 0xbd00e91b, |
| 529 | 0x0499f094, | 594 | 0x0499f094, |
| 530 | 0x370007f1, | 595 | 0x370007f1, |
| 531 | 0xd00203f0, | 596 | 0xd00203f0, |
| 532 | 0x04bd0009, | 597 | 0x04bd0009, |
| 533 | 0x0b0017f1, | 598 | 0xc00017f1, |
| 534 | 0xcf0614b6, | 599 | 0xcf0213f0, |
| 535 | 0x11cf4012, | 600 | 0x27f10011, |
| 536 | 0x1f13c800, | 601 | 0x23f0c100, |
| 537 | 0x00870bf5, | 602 | 0x0022cf02, |
| 538 | 0xf41f23c8, | 603 | 0xf51f13c8, |
| 539 | 0x20f9620b, | 604 | 0xc800890b, |
| 540 | 0xbd0212b9, | 605 | 0x0bf41f23, |
| 541 | 0x0799f094, | 606 | 0xb920f962, |
| 542 | 0x370007f1, | 607 | 0x94bd0212, |
| 543 | 0xd00203f0, | ||
| 544 | 0x04bd0009, | ||
| 545 | 0xf40132f4, | ||
| 546 | 0x21f50231, | ||
| 547 | 0x94bd0801, | ||
| 548 | 0xf10799f0, | 608 | 0xf10799f0, |
| 549 | 0xf0170007, | 609 | 0xf0370007, |
| 550 | 0x09d00203, | 610 | 0x09d00203, |
| 551 | 0xfc04bd00, | 611 | 0xf404bd00, |
| 552 | 0xf094bd20, | 612 | 0x31f40132, |
| 553 | 0x07f10699, | 613 | 0xaa21f502, |
| 554 | 0x03f03700, | 614 | 0xf094bd09, |
| 555 | 0x0009d002, | 615 | 0x07f10799, |
| 556 | 0x31f404bd, | ||
| 557 | 0x0121f501, | ||
| 558 | 0xf094bd08, | ||
| 559 | 0x07f10699, | ||
| 560 | 0x03f01700, | 616 | 0x03f01700, |
| 561 | 0x0009d002, | 617 | 0x0009d002, |
| 562 | 0x0ef404bd, | 618 | 0x20fc04bd, |
| 563 | /* 0x04f9: chsw_prev_no_next */ | ||
| 564 | 0xb920f931, | ||
| 565 | 0x32f40212, | ||
| 566 | 0x0232f401, | ||
| 567 | 0x080121f5, | ||
| 568 | 0x17f120fc, | ||
| 569 | 0x14b60b00, | ||
| 570 | 0x0012d006, | ||
| 571 | /* 0x0517: chsw_no_prev */ | ||
| 572 | 0xc8130ef4, | ||
| 573 | 0x0bf41f23, | ||
| 574 | 0x0131f40d, | ||
| 575 | 0xf50232f4, | ||
| 576 | /* 0x0527: chsw_done */ | ||
| 577 | 0xf1080121, | ||
| 578 | 0xb60b0c17, | ||
| 579 | 0x27f00614, | ||
| 580 | 0x0012d001, | ||
| 581 | 0x99f094bd, | 619 | 0x99f094bd, |
| 582 | 0x0007f104, | 620 | 0x0007f106, |
| 621 | 0x0203f037, | ||
| 622 | 0xbd0009d0, | ||
| 623 | 0x0131f404, | ||
| 624 | 0x09aa21f5, | ||
| 625 | 0x99f094bd, | ||
| 626 | 0x0007f106, | ||
| 583 | 0x0203f017, | 627 | 0x0203f017, |
| 584 | 0xbd0009d0, | 628 | 0xbd0009d0, |
| 585 | 0x130ef504, | 629 | 0x330ef404, |
| 586 | /* 0x0549: main_not_ctx_switch */ | 630 | /* 0x060c: chsw_prev_no_next */ |
| 587 | 0x01e4b0ff, | 631 | 0x12b920f9, |
| 588 | 0xb90d1bf4, | 632 | 0x0132f402, |
| 589 | 0x21f502f2, | 633 | 0xf50232f4, |
| 590 | 0x0ef40795, | 634 | 0xfc09aa21, |
| 591 | /* 0x0559: main_not_ctx_chan */ | 635 | 0x0007f120, |
| 592 | 0x02e4b046, | 636 | 0x0203f0c0, |
| 593 | 0xbd321bf4, | 637 | 0xbd0002d0, |
| 594 | 0x0799f094, | 638 | 0x130ef404, |
| 595 | 0x370007f1, | 639 | /* 0x062c: chsw_no_prev */ |
| 640 | 0xf41f23c8, | ||
| 641 | 0x31f40d0b, | ||
| 642 | 0x0232f401, | ||
| 643 | 0x09aa21f5, | ||
| 644 | /* 0x063c: chsw_done */ | ||
| 645 | 0xf10127f0, | ||
| 646 | 0xf0c30007, | ||
| 647 | 0x02d00203, | ||
| 648 | 0xbd04bd00, | ||
| 649 | 0x0499f094, | ||
| 650 | 0x170007f1, | ||
| 596 | 0xd00203f0, | 651 | 0xd00203f0, |
| 597 | 0x04bd0009, | 652 | 0x04bd0009, |
| 598 | 0xf40132f4, | 653 | 0xff080ef5, |
| 599 | 0x21f50232, | 654 | /* 0x0660: main_not_ctx_switch */ |
| 600 | 0x94bd0801, | 655 | 0xf401e4b0, |
| 656 | 0xf2b90d1b, | ||
| 657 | 0x4221f502, | ||
| 658 | 0x460ef409, | ||
| 659 | /* 0x0670: main_not_ctx_chan */ | ||
| 660 | 0xf402e4b0, | ||
| 661 | 0x94bd321b, | ||
| 601 | 0xf10799f0, | 662 | 0xf10799f0, |
| 602 | 0xf0170007, | 663 | 0xf0370007, |
| 603 | 0x09d00203, | 664 | 0x09d00203, |
| 604 | 0xf404bd00, | 665 | 0xf404bd00, |
| 605 | /* 0x058e: main_not_ctx_save */ | 666 | 0x32f40132, |
| 606 | 0xef94110e, | 667 | 0xaa21f502, |
| 607 | 0x01f5f010, | 668 | 0xf094bd09, |
| 608 | 0x02fe21f5, | 669 | 0x07f10799, |
| 609 | 0xfec00ef5, | 670 | 0x03f01700, |
| 610 | /* 0x059c: main_done */ | 671 | 0x0009d002, |
| 611 | 0x29f024bd, | 672 | 0x0ef404bd, |
| 612 | 0x0007f11f, | 673 | /* 0x06a5: main_not_ctx_save */ |
| 613 | 0x0203f030, | 674 | 0x10ef9411, |
| 614 | 0xbd0002d0, | 675 | 0xf501f5f0, |
| 615 | 0xab0ef504, | 676 | 0xf5037e21, |
| 616 | /* 0x05b1: ih */ | 677 | /* 0x06b3: main_done */ |
| 617 | 0xfe80f9fe, | 678 | 0xbdfeb50e, |
| 618 | 0x80f90188, | 679 | 0x1f29f024, |
| 619 | 0xa0f990f9, | 680 | 0x300007f1, |
| 620 | 0xd0f9b0f9, | 681 | 0xd00203f0, |
| 621 | 0xf0f9e0f9, | 682 | 0x04bd0002, |
| 622 | 0x0acf04bd, | 683 | 0xfea00ef5, |
| 623 | 0x04abc480, | 684 | /* 0x06c8: ih */ |
| 624 | 0xf11d0bf4, | 685 | 0x88fe80f9, |
| 625 | 0xf01900b7, | 686 | 0xf980f901, |
| 626 | 0xbecf10d7, | 687 | 0xf9a0f990, |
| 627 | 0x00bfcf40, | 688 | 0xf9d0f9b0, |
| 689 | 0xbdf0f9e0, | ||
| 690 | 0x00a7f104, | ||
| 691 | 0x00a3f002, | ||
| 692 | 0xc400aacf, | ||
| 693 | 0x0bf404ab, | ||
| 694 | 0x10d7f030, | ||
| 695 | 0x1a00e7f1, | ||
| 696 | 0xcf00e3f0, | ||
| 697 | 0xf7f100ee, | ||
| 698 | 0xf3f01900, | ||
| 699 | 0x00ffcf00, | ||
| 628 | 0xb70421f4, | 700 | 0xb70421f4, |
| 629 | 0xf00400b0, | 701 | 0xf00400b0, |
| 630 | 0xbed001e7, | 702 | 0x07f101e7, |
| 631 | /* 0x05e9: ih_no_fifo */ | 703 | 0x03f01d00, |
| 632 | 0x00abe400, | 704 | 0x000ed000, |
| 633 | 0x0d0bf401, | 705 | /* 0x071a: ih_no_fifo */ |
| 634 | 0xf110d7f0, | 706 | 0xabe404bd, |
| 635 | 0xf44001e7, | 707 | 0x0bf40100, |
| 636 | /* 0x05fa: ih_no_ctxsw */ | 708 | 0x10d7f00d, |
| 637 | 0xb7f10421, | 709 | 0x4001e7f1, |
| 638 | 0xb0bd0104, | 710 | /* 0x072b: ih_no_ctxsw */ |
| 639 | 0xf4b4abff, | 711 | 0xe40421f4, |
| 640 | 0xa7f10d0b, | 712 | 0xf40400ab, |
| 641 | 0xa4b60c1c, | 713 | 0xb7f1140b, |
| 642 | 0x00abd006, | 714 | 0xbfb90100, |
| 643 | /* 0x0610: ih_no_other */ | 715 | 0x44e7f102, |
| 644 | 0xfc400ad0, | 716 | 0x40e3f001, |
| 717 | /* 0x0743: ih_no_fwmthd */ | ||
| 718 | 0xf19d21f4, | ||
| 719 | 0xbd0104b7, | ||
| 720 | 0xb4abffb0, | ||
| 721 | 0xf10f0bf4, | ||
| 722 | 0xf0070007, | ||
| 723 | 0x0bd00303, | ||
| 724 | /* 0x075b: ih_no_other */ | ||
| 725 | 0xf104bd00, | ||
| 726 | 0xf0010007, | ||
| 727 | 0x0ad00003, | ||
| 728 | 0xfc04bd00, | ||
| 645 | 0xfce0fcf0, | 729 | 0xfce0fcf0, |
| 646 | 0xfcb0fcd0, | 730 | 0xfcb0fcd0, |
| 647 | 0xfc90fca0, | 731 | 0xfc90fca0, |
| 648 | 0x0088fe80, | 732 | 0x0088fe80, |
| 649 | 0x32f480fc, | 733 | 0x32f480fc, |
| 650 | /* 0x062b: ctx_4170s */ | 734 | /* 0x077f: ctx_4170s */ |
| 651 | 0xf101f800, | 735 | 0xf001f800, |
| 652 | 0xf04170e7, | 736 | 0xffb910f5, |
| 653 | 0xf5f040e3, | 737 | 0x70e7f102, |
| 654 | 0x8d21f410, | 738 | 0x40e3f041, |
| 655 | /* 0x063a: ctx_4170w */ | 739 | 0xf89d21f4, |
| 740 | /* 0x0791: ctx_4170w */ | ||
| 741 | 0x70e7f100, | ||
| 742 | 0x40e3f041, | ||
| 743 | 0xb96821f4, | ||
| 744 | 0xf4f002ff, | ||
| 745 | 0xf01bf410, | ||
| 746 | /* 0x07a6: ctx_redswitch */ | ||
| 656 | 0xe7f100f8, | 747 | 0xe7f100f8, |
| 657 | 0xe3f04170, | 748 | 0xe5f00200, |
| 658 | 0x6821f440, | 749 | 0x20e5f040, |
| 659 | 0xf410f4f0, | 750 | 0xf110e5f0, |
| 751 | 0xf0850007, | ||
| 752 | 0x0ed00103, | ||
| 753 | 0xf004bd00, | ||
| 754 | /* 0x07c2: ctx_redswitch_delay */ | ||
| 755 | 0xf2b608f7, | ||
| 756 | 0xfd1bf401, | ||
| 757 | 0x0400e5f1, | ||
| 758 | 0x0100e5f1, | ||
| 759 | 0x850007f1, | ||
| 760 | 0xd00103f0, | ||
| 761 | 0x04bd000e, | ||
| 762 | /* 0x07de: ctx_86c */ | ||
| 763 | 0x07f100f8, | ||
| 764 | 0x03f02300, | ||
| 765 | 0x000fd002, | ||
| 766 | 0xffb904bd, | ||
| 767 | 0x14e7f102, | ||
| 768 | 0x40e3f08a, | ||
| 769 | 0xb99d21f4, | ||
| 770 | 0xe7f102ff, | ||
| 771 | 0xe3f0a88c, | ||
| 772 | 0x9d21f441, | ||
| 773 | /* 0x0806: ctx_mem */ | ||
| 774 | 0x07f100f8, | ||
| 775 | 0x03f08400, | ||
| 776 | 0x000fd002, | ||
| 777 | /* 0x0812: ctx_mem_wait */ | ||
| 778 | 0xf7f104bd, | ||
| 779 | 0xf3f08400, | ||
| 780 | 0x00ffcf02, | ||
| 781 | 0xf405fffd, | ||
| 660 | 0x00f8f31b, | 782 | 0x00f8f31b, |
| 661 | /* 0x064c: ctx_redswitch */ | 783 | /* 0x0824: ctx_load */ |
| 662 | 0x0614e7f1, | ||
| 663 | 0xf106e4b6, | ||
| 664 | 0xd00270f7, | ||
| 665 | 0xf7f000ef, | ||
| 666 | /* 0x065d: ctx_redswitch_delay */ | ||
| 667 | 0x01f2b608, | ||
| 668 | 0xf1fd1bf4, | ||
| 669 | 0xd00770f7, | ||
| 670 | 0x00f800ef, | ||
| 671 | /* 0x066c: ctx_86c */ | ||
| 672 | 0x086ce7f1, | ||
| 673 | 0xd006e4b6, | ||
| 674 | 0xe7f100ef, | ||
| 675 | 0xe3f08a14, | ||
| 676 | 0x8d21f440, | ||
| 677 | 0xa86ce7f1, | ||
| 678 | 0xf441e3f0, | ||
| 679 | 0x00f88d21, | ||
| 680 | /* 0x068c: ctx_load */ | ||
| 681 | 0x99f094bd, | 784 | 0x99f094bd, |
| 682 | 0x0007f105, | 785 | 0x0007f105, |
| 683 | 0x0203f037, | 786 | 0x0203f037, |
| 684 | 0xbd0009d0, | 787 | 0xbd0009d0, |
| 685 | 0x0ca7f004, | 788 | 0x0ca7f004, |
| 686 | 0xf1c921f4, | 789 | 0xbdd021f4, |
| 687 | 0xb60a2417, | 790 | 0x0007f1f4, |
| 688 | 0x10d00614, | 791 | 0x0203f089, |
| 689 | 0x0037f100, | 792 | 0xbd000fd0, |
| 690 | 0x0634b60b, | 793 | 0x0007f104, |
| 691 | 0xf14032d0, | 794 | 0x0203f0c1, |
| 692 | 0xb60a0c17, | 795 | 0xbd0002d0, |
| 693 | 0x47f00614, | 796 | 0x0007f104, |
| 694 | 0x0012d007, | 797 | 0x0203f083, |
| 695 | /* 0x06c7: ctx_chan_wait_0 */ | 798 | 0xbd0002d0, |
| 696 | 0xcf4014d0, | 799 | 0x07f7f004, |
| 697 | 0x44f04014, | 800 | 0x080621f5, |
| 698 | 0xfa1bf41f, | 801 | 0xc00007f1, |
| 699 | 0xfe0032d0, | 802 | 0xd00203f0, |
| 700 | 0x2af0000b, | 803 | 0x04bd0002, |
| 701 | 0x0424b61f, | 804 | 0xf0000bfe, |
| 702 | 0xbd0220b6, | 805 | 0x24b61f2a, |
| 806 | 0x0220b604, | ||
| 807 | 0x99f094bd, | ||
| 808 | 0x0007f108, | ||
| 809 | 0x0203f037, | ||
| 810 | 0xbd0009d0, | ||
| 811 | 0x0007f104, | ||
| 812 | 0x0203f081, | ||
| 813 | 0xbd0002d0, | ||
| 814 | 0x0027f104, | ||
| 815 | 0x0023f100, | ||
| 816 | 0x0225f080, | ||
| 817 | 0x880007f1, | ||
| 818 | 0xd00203f0, | ||
| 819 | 0x04bd0002, | ||
| 820 | 0xf11017f0, | ||
| 821 | 0xf0020027, | ||
| 822 | 0x12fa0223, | ||
| 823 | 0xbd03f805, | ||
| 703 | 0x0899f094, | 824 | 0x0899f094, |
| 704 | 0x370007f1, | 825 | 0x170007f1, |
| 705 | 0xd00203f0, | 826 | 0xd00203f0, |
| 706 | 0x04bd0009, | 827 | 0x04bd0009, |
| 707 | 0x0a0417f1, | 828 | 0xb6810198, |
| 708 | 0xd00614b6, | 829 | 0x02981814, |
| 709 | 0x17f10012, | 830 | 0x0825b680, |
| 710 | 0x14b60a20, | 831 | 0x800512fd, |
| 711 | 0x0227f006, | 832 | 0x94bd1601, |
| 712 | 0x800023f1, | 833 | 0xf10999f0, |
| 713 | 0xf00012d0, | 834 | 0xf0370007, |
| 714 | 0x27f11017, | ||
| 715 | 0x23f00200, | ||
| 716 | 0x0512fa02, | ||
| 717 | 0x94bd03f8, | ||
| 718 | 0xf10899f0, | ||
| 719 | 0xf0170007, | ||
| 720 | 0x09d00203, | 835 | 0x09d00203, |
| 721 | 0x9804bd00, | 836 | 0xf104bd00, |
| 722 | 0x14b68101, | 837 | 0xf0810007, |
| 723 | 0x80029818, | 838 | 0x01d00203, |
| 724 | 0xfd0825b6, | 839 | 0xf004bd00, |
| 725 | 0x01800512, | 840 | 0x07f10127, |
| 726 | 0xf094bd16, | 841 | 0x03f08800, |
| 727 | 0x07f10999, | 842 | 0x0002d002, |
| 728 | 0x03f03700, | 843 | 0x17f104bd, |
| 729 | 0x0009d002, | 844 | 0x13f00100, |
| 730 | 0x27f104bd, | 845 | 0x0501fa06, |
| 731 | 0x24b60a04, | 846 | 0x94bd03f8, |
| 732 | 0x0021d006, | 847 | 0xf10999f0, |
| 733 | 0xf10127f0, | ||
| 734 | 0xb60a2017, | ||
| 735 | 0x12d00614, | ||
| 736 | 0x0017f100, | ||
| 737 | 0x0613f001, | ||
| 738 | 0xf80501fa, | ||
| 739 | 0xf094bd03, | ||
| 740 | 0x07f10999, | ||
| 741 | 0x03f01700, | ||
| 742 | 0x0009d002, | ||
| 743 | 0x94bd04bd, | ||
| 744 | 0xf10599f0, | ||
| 745 | 0xf0170007, | 848 | 0xf0170007, |
| 746 | 0x09d00203, | 849 | 0x09d00203, |
| 747 | 0xf804bd00, | 850 | 0xbd04bd00, |
| 748 | /* 0x0795: ctx_chan */ | 851 | 0x0599f094, |
| 749 | 0x8c21f500, | 852 | 0x170007f1, |
| 750 | 0x0ca7f006, | 853 | 0xd00203f0, |
| 751 | 0xf1c921f4, | 854 | 0x04bd0009, |
| 752 | 0xb60a1017, | 855 | /* 0x0942: ctx_chan */ |
| 753 | 0x27f00614, | 856 | 0x21f500f8, |
| 754 | 0x0012d005, | 857 | 0xa7f00824, |
| 755 | /* 0x07ac: ctx_chan_wait */ | 858 | 0xd021f40c, |
| 756 | 0xfd0012cf, | 859 | 0xf505f7f0, |
| 757 | 0x1bf40522, | 860 | 0xf8080621, |
| 758 | /* 0x07b7: ctx_mmio_exec */ | 861 | /* 0x0955: ctx_mmio_exec */ |
| 759 | 0x9800f8fa, | 862 | 0x41039800, |
| 760 | 0x27f14103, | 863 | 0x810007f1, |
| 761 | 0x24b60a04, | 864 | 0xd00203f0, |
| 762 | 0x0023d006, | 865 | 0x04bd0003, |
| 763 | /* 0x07c6: ctx_mmio_loop */ | 866 | /* 0x0966: ctx_mmio_loop */ |
| 764 | 0x34c434bd, | 867 | 0x34c434bd, |
| 765 | 0x0f1bf4ff, | 868 | 0x0f1bf4ff, |
| 766 | 0x020057f1, | 869 | 0x020057f1, |
| 767 | 0xfa0653f0, | 870 | 0xfa0653f0, |
| 768 | 0x03f80535, | 871 | 0x03f80535, |
| 769 | /* 0x07d8: ctx_mmio_pull */ | 872 | /* 0x0978: ctx_mmio_pull */ |
| 770 | 0x98804e98, | 873 | 0x98804e98, |
| 771 | 0x21f4814f, | 874 | 0x21f4814f, |
| 772 | 0x0830b68d, | 875 | 0x0830b69d, |
| 773 | 0xf40112b6, | 876 | 0xf40112b6, |
| 774 | /* 0x07ea: ctx_mmio_done */ | 877 | /* 0x098a: ctx_mmio_done */ |
| 775 | 0x0398df1b, | 878 | 0x0398df1b, |
| 776 | 0x0023d016, | 879 | 0x0007f116, |
| 777 | 0xf1400080, | 880 | 0x0203f081, |
| 778 | 0xf0010017, | 881 | 0xbd0003d0, |
| 779 | 0x01fa0613, | 882 | 0x40008004, |
| 780 | 0xf803f806, | 883 | 0x010017f1, |
| 781 | /* 0x0801: ctx_xfer */ | 884 | 0xfa0613f0, |
| 782 | 0x00f7f100, | 885 | 0x03f80601, |
| 783 | 0x06f4b60c, | 886 | /* 0x09aa: ctx_xfer */ |
| 784 | 0xd004e7f0, | 887 | 0xe7f000f8, |
| 785 | /* 0x080e: ctx_xfer_idle */ | 888 | 0x0007f104, |
| 786 | 0xfecf80fe, | 889 | 0x0303f002, |
| 787 | 0x00e4f100, | 890 | 0xbd000ed0, |
| 788 | 0xf91bf420, | 891 | /* 0x09b9: ctx_xfer_idle */ |
| 789 | 0xf40611f4, | 892 | 0x00e7f104, |
| 790 | /* 0x081e: ctx_xfer_pre */ | 893 | 0x03e3f000, |
| 791 | 0xf7f00d02, | 894 | 0xf100eecf, |
| 792 | 0x6c21f510, | 895 | 0xf42000e4, |
| 793 | 0x1c11f406, | 896 | 0x11f4f21b, |
| 794 | /* 0x0828: ctx_xfer_pre_load */ | 897 | 0x0d02f406, |
| 795 | 0xf502f7f0, | 898 | /* 0x09d0: ctx_xfer_pre */ |
| 796 | 0xf5062b21, | 899 | 0xf510f7f0, |
| 797 | 0xf5063a21, | 900 | 0xf407de21, |
| 798 | 0xbd064c21, | 901 | /* 0x09da: ctx_xfer_pre_load */ |
| 799 | 0x2b21f5f4, | 902 | 0xf7f01c11, |
| 800 | 0x8c21f506, | 903 | 0x7f21f502, |
| 801 | /* 0x0841: ctx_xfer_exec */ | 904 | 0x9121f507, |
| 802 | 0x16019806, | 905 | 0xa621f507, |
| 803 | 0x041427f1, | 906 | 0xf5f4bd07, |
| 804 | 0xd00624b6, | 907 | 0xf5077f21, |
| 805 | 0xe7f10020, | 908 | /* 0x09f3: ctx_xfer_exec */ |
| 806 | 0xe3f0a500, | 909 | 0x98082421, |
| 807 | 0x021fb941, | 910 | 0x24bd1601, |
| 808 | 0xb68d21f4, | 911 | 0x050007f1, |
| 809 | 0xfcf004e0, | 912 | 0xd00103f0, |
| 810 | 0x022cf001, | 913 | 0x04bd0002, |
| 811 | 0xfd0124b6, | 914 | 0xf1021fb9, |
| 812 | 0x21f405f2, | 915 | 0xf0a500e7, |
| 813 | 0xfc17f18d, | 916 | 0x21f441e3, |
| 814 | 0x0213f04a, | 917 | 0x01fcf09d, |
| 815 | 0xd00c27f0, | 918 | 0xb6022cf0, |
| 816 | 0x21f50012, | 919 | 0xf2fd0124, |
| 817 | 0x27f10215, | 920 | 0x02ffb905, |
| 818 | 0x23f047fc, | 921 | 0xa504e7f1, |
| 819 | 0x0020d002, | 922 | 0xf441e3f0, |
| 923 | 0x21f59d21, | ||
| 924 | 0x24bd026a, | ||
| 925 | 0x47fc07f1, | ||
| 926 | 0xd00203f0, | ||
| 927 | 0x04bd0002, | ||
| 820 | 0xb6012cf0, | 928 | 0xb6012cf0, |
| 821 | 0x12d00320, | 929 | 0x07f10320, |
| 822 | 0x01acf000, | 930 | 0x03f04afc, |
| 823 | 0xf006a5f0, | 931 | 0x0002d002, |
| 824 | 0x0c9800b7, | 932 | 0xacf004bd, |
| 825 | 0x010d9800, | 933 | 0x06a5f001, |
| 826 | 0xf500e7f0, | 934 | 0x9800b7f0, |
| 827 | 0xf0016621, | 935 | 0x0d98000c, |
| 828 | 0x21f508a7, | 936 | 0x00e7f001, |
| 829 | 0x21f50109, | 937 | 0x016f21f5, |
| 830 | 0x01f40215, | 938 | 0xf508a7f0, |
| 831 | 0x0ca7f022, | 939 | 0xf5011021, |
| 832 | 0xf1c921f4, | 940 | 0xf4025e21, |
| 833 | 0xb60a1017, | 941 | 0xa7f01301, |
| 834 | 0x27f00614, | 942 | 0xd021f40c, |
| 835 | 0x0012d005, | 943 | 0xf505f7f0, |
| 836 | /* 0x08c8: ctx_xfer_post_save_wait */ | 944 | 0xf4080621, |
| 837 | 0xfd0012cf, | 945 | /* 0x0a82: ctx_xfer_post */ |
| 838 | 0x1bf40522, | 946 | 0xf7f02e02, |
| 839 | 0x2e02f4fa, | 947 | 0x7f21f502, |
| 840 | /* 0x08d4: ctx_xfer_post */ | 948 | 0xf5f4bd07, |
| 841 | 0xf502f7f0, | 949 | 0xf507de21, |
| 842 | 0xbd062b21, | 950 | 0xf5027f21, |
| 843 | 0x6c21f5f4, | 951 | 0xbd079121, |
| 844 | 0x3421f506, | 952 | 0x7f21f5f4, |
| 845 | 0x3a21f502, | 953 | 0x1011f407, |
| 846 | 0xf5f4bd06, | 954 | 0xfd400198, |
| 847 | 0xf4062b21, | 955 | 0x0bf40511, |
| 848 | 0x01981011, | 956 | 0x5521f507, |
| 849 | 0x0511fd40, | 957 | /* 0x0aad: ctx_xfer_no_post_mmio */ |
| 850 | 0xf5070bf4, | 958 | /* 0x0aad: ctx_xfer_done */ |
| 851 | /* 0x08ff: ctx_xfer_no_post_mmio */ | 959 | 0x0000f809, |
| 852 | /* 0x08ff: ctx_xfer_done */ | ||
| 853 | 0xf807b721, | ||
| 854 | 0x00000000, | ||
| 855 | 0x00000000, | ||
| 856 | 0x00000000, | ||
| 857 | 0x00000000, | ||
| 858 | 0x00000000, | ||
| 859 | 0x00000000, | ||
| 860 | 0x00000000, | ||
| 861 | 0x00000000, | ||
| 862 | 0x00000000, | ||
| 863 | 0x00000000, | ||
| 864 | 0x00000000, | ||
| 865 | 0x00000000, | ||
| 866 | 0x00000000, | ||
| 867 | 0x00000000, | ||
| 868 | 0x00000000, | ||
| 869 | 0x00000000, | ||
| 870 | 0x00000000, | ||
| 871 | 0x00000000, | ||
| 872 | 0x00000000, | ||
| 873 | 0x00000000, | ||
| 874 | 0x00000000, | ||
| 875 | 0x00000000, | ||
| 876 | 0x00000000, | ||
| 877 | 0x00000000, | ||
| 878 | 0x00000000, | ||
| 879 | 0x00000000, | ||
| 880 | 0x00000000, | ||
| 881 | 0x00000000, | ||
| 882 | 0x00000000, | ||
| 883 | 0x00000000, | ||
| 884 | 0x00000000, | ||
| 885 | 0x00000000, | ||
| 886 | 0x00000000, | ||
| 887 | 0x00000000, | ||
| 888 | 0x00000000, | ||
| 889 | 0x00000000, | ||
| 890 | 0x00000000, | ||
| 891 | 0x00000000, | ||
| 892 | 0x00000000, | ||
| 893 | 0x00000000, | ||
| 894 | 0x00000000, | ||
| 895 | 0x00000000, | ||
| 896 | 0x00000000, | ||
| 897 | 0x00000000, | ||
| 898 | 0x00000000, | 960 | 0x00000000, |
| 899 | 0x00000000, | 961 | 0x00000000, |
| 900 | 0x00000000, | 962 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc index 33a5a82eccbd..6ffe28307dbd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc | |||
| @@ -28,28 +28,135 @@ | |||
| 28 | #define GF117 0xd7 | 28 | #define GF117 0xd7 |
| 29 | #define GK100 0xe0 | 29 | #define GK100 0xe0 |
| 30 | #define GK110 0xf0 | 30 | #define GK110 0xf0 |
| 31 | #define GK208 0x108 | ||
| 31 | 32 | ||
| 33 | #define NV_PGRAPH_FECS_INTR_ACK 0x409004 | ||
| 34 | #define NV_PGRAPH_FECS_INTR 0x409008 | ||
| 35 | #define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400 | ||
| 36 | #define NV_PGRAPH_FECS_INTR_CHSW 0x00000100 | ||
| 37 | #define NV_PGRAPH_FECS_INTR_FIFO 0x00000004 | ||
| 38 | #define NV_PGRAPH_FECS_INTR_MODE 0x40900c | ||
| 39 | #define NV_PGRAPH_FECS_INTR_MODE_FIFO 0x00000004 | ||
| 40 | #define NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL 0x00000004 | ||
| 41 | #define NV_PGRAPH_FECS_INTR_MODE_FIFO_EDGE 0x00000000 | ||
| 42 | #define NV_PGRAPH_FECS_INTR_EN_SET 0x409010 | ||
| 43 | #define NV_PGRAPH_FECS_INTR_EN_SET_FIFO 0x00000004 | ||
| 44 | #define NV_PGRAPH_FECS_INTR_ROUTE 0x40901c | ||
| 45 | #define NV_PGRAPH_FECS_ACCESS 0x409048 | ||
| 46 | #define NV_PGRAPH_FECS_ACCESS_FIFO 0x00000002 | ||
| 47 | #define NV_PGRAPH_FECS_FIFO_DATA 0x409064 | ||
| 48 | #define NV_PGRAPH_FECS_FIFO_CMD 0x409068 | ||
| 49 | #define NV_PGRAPH_FECS_FIFO_ACK 0x409074 | ||
| 50 | #define NV_PGRAPH_FECS_CAPS 0x409108 | ||
| 32 | #define NV_PGRAPH_FECS_SIGNAL 0x409400 | 51 | #define NV_PGRAPH_FECS_SIGNAL 0x409400 |
| 52 | #define NV_PGRAPH_FECS_IROUTE 0x409404 | ||
| 53 | #define NV_PGRAPH_FECS_BAR_MASK0 0x40940c | ||
| 54 | #define NV_PGRAPH_FECS_BAR_MASK1 0x409410 | ||
| 55 | #define NV_PGRAPH_FECS_BAR 0x409414 | ||
| 56 | #define NV_PGRAPH_FECS_BAR_SET 0x409418 | ||
| 57 | #define NV_PGRAPH_FECS_RED_SWITCH 0x409614 | ||
| 58 | #define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP 0x00000400 | ||
| 59 | #define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC 0x00000200 | ||
| 60 | #define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN 0x00000100 | ||
| 61 | #define NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP 0x00000040 | ||
| 62 | #define NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC 0x00000020 | ||
| 63 | #define NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN 0x00000010 | ||
| 64 | #define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_GPC 0x00000002 | ||
| 65 | #define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_MAIN 0x00000001 | ||
| 66 | #define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700 | ||
| 67 | #define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704 | ||
| 68 | #define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c | ||
| 69 | #define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700 | ||
| 70 | #define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704 | ||
| 71 | #define NV_PGRAPH_FECS_MMCTX_BASE 0x409710 | ||
| 72 | #define NV_PGRAPH_FECS_MMCTX_CTRL 0x409714 | ||
| 73 | #define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE 0x409718 | ||
| 74 | #define NV_PGRAPH_FECS_MMCTX_MULTI_MASK 0x40971c | ||
| 75 | #define NV_PGRAPH_FECS_MMCTX_QUEUE 0x409720 | ||
| 76 | #define NV_PGRAPH_FECS_MMIO_CTRL 0x409728 | ||
| 77 | #define NV_PGRAPH_FECS_MMIO_RDVAL 0x40972c | ||
| 78 | #define NV_PGRAPH_FECS_MMIO_WRVAL 0x409730 | ||
| 79 | #define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c | ||
| 33 | #if CHIPSET < GK110 | 80 | #if CHIPSET < GK110 |
| 34 | #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800) | 81 | #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800) |
| 35 | #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x409820) | 82 | #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x409820) |
| 36 | #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840) | 83 | #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840) |
| 84 | #define NV_PGRAPH_FECS_UNK86C 0x40986c | ||
| 37 | #else | 85 | #else |
| 38 | #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800) | 86 | #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800) |
| 39 | #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840) | 87 | #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840) |
| 88 | #define NV_PGRAPH_FECS_UNK86C 0x40988c | ||
| 40 | #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x4098c0) | 89 | #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x4098c0) |
| 41 | #endif | 90 | #endif |
| 91 | #define NV_PGRAPH_FECS_STRANDS_CNT 0x409880 | ||
| 92 | #define NV_PGRAPH_FECS_STRAND_SAVE_SWBASE 0x409908 | ||
| 93 | #define NV_PGRAPH_FECS_STRAND_LOAD_SWBASE 0x40990c | ||
| 94 | #define NV_PGRAPH_FECS_STRAND_WORDS 0x409910 | ||
| 95 | #define NV_PGRAPH_FECS_STRAND_DATA 0x409918 | ||
| 96 | #define NV_PGRAPH_FECS_STRAND_SELECT 0x40991c | ||
| 97 | #define NV_PGRAPH_FECS_STRAND_CMD 0x409928 | ||
| 98 | #define NV_PGRAPH_FECS_STRAND_CMD_SEEK 0x00000001 | ||
| 99 | #define NV_PGRAPH_FECS_STRAND_CMD_GET_INFO 0x00000002 | ||
| 100 | #define NV_PGRAPH_FECS_STRAND_CMD_SAVE 0x00000003 | ||
| 101 | #define NV_PGRAPH_FECS_STRAND_CMD_LOAD 0x00000004 | ||
| 102 | #define NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER 0x0000000a | ||
| 103 | #define NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER 0x0000000b | ||
| 104 | #define NV_PGRAPH_FECS_STRAND_CMD_ENABLE 0x0000000c | ||
| 105 | #define NV_PGRAPH_FECS_STRAND_CMD_DISABLE 0x0000000d | ||
| 106 | #define NV_PGRAPH_FECS_STRAND_FILTER 0x40993c | ||
| 107 | #define NV_PGRAPH_FECS_MEM_BASE 0x409a04 | ||
| 108 | #define NV_PGRAPH_FECS_MEM_CHAN 0x409a0c | ||
| 109 | #define NV_PGRAPH_FECS_MEM_CMD 0x409a10 | ||
| 110 | #define NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN 0x00000007 | ||
| 111 | #define NV_PGRAPH_FECS_MEM_TARGET 0x409a20 | ||
| 112 | #define NV_PGRAPH_FECS_MEM_TARGET_UNK31 0x80000000 | ||
| 113 | #define NV_PGRAPH_FECS_MEM_TARGET_AS 0x0000001f | ||
| 114 | #define NV_PGRAPH_FECS_MEM_TARGET_AS_VM 0x00000001 | ||
| 115 | #define NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM 0x00000002 | ||
| 116 | #define NV_PGRAPH_FECS_CHAN_ADDR 0x409b00 | ||
| 117 | #define NV_PGRAPH_FECS_CHAN_NEXT 0x409b04 | ||
| 118 | #define NV_PGRAPH_FECS_CHSW 0x409b0c | ||
| 119 | #define NV_PGRAPH_FECS_CHSW_ACK 0x00000001 | ||
| 42 | #define NV_PGRAPH_FECS_INTR_UP_SET 0x409c1c | 120 | #define NV_PGRAPH_FECS_INTR_UP_SET 0x409c1c |
| 121 | #define NV_PGRAPH_FECS_INTR_UP_EN 0x409c24 | ||
| 43 | 122 | ||
| 123 | #define NV_PGRAPH_GPCX_GPCCS_INTR_ACK 0x41a004 | ||
| 124 | #define NV_PGRAPH_GPCX_GPCCS_INTR 0x41a008 | ||
| 125 | #define NV_PGRAPH_GPCX_GPCCS_INTR_FIFO 0x00000004 | ||
| 126 | #define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET 0x41a010 | ||
| 127 | #define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO 0x00000004 | ||
| 128 | #define NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE 0x41a01c | ||
| 129 | #define NV_PGRAPH_GPCX_GPCCS_ACCESS 0x41a048 | ||
| 130 | #define NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO 0x00000002 | ||
| 131 | #define NV_PGRAPH_GPCX_GPCCS_FIFO_DATA 0x41a064 | ||
| 132 | #define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068 | ||
| 133 | #define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074 | ||
| 134 | #define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608 | ||
| 135 | #define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614 | ||
| 136 | #define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800 | ||
| 137 | #define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200 | ||
| 138 | #define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER 0x00000020 | ||
| 139 | #define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_PAUSE 0x00000002 | ||
| 140 | #define NV_PGRAPH_GPCX_GPCCS_MYINDEX 0x41a618 | ||
| 141 | #define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE 0x41a700 | ||
| 142 | #define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE 0x41a704 | ||
| 143 | #define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT 0x41a74c | ||
| 44 | #if CHIPSET < GK110 | 144 | #if CHIPSET < GK110 |
| 45 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800) | 145 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800) |
| 46 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a820) | 146 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a820) |
| 47 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840) | 147 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840) |
| 148 | #define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a86c | ||
| 48 | #else | 149 | #else |
| 49 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800) | 150 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800) |
| 50 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840) | 151 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840) |
| 152 | #define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a88c | ||
| 51 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a8c0) | 153 | #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a8c0) |
| 52 | #endif | 154 | #endif |
| 155 | #define NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT 0x41a91c | ||
| 156 | #define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD 0x41a928 | ||
| 157 | #define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE 0x00000003 | ||
| 158 | #define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD 0x00000004 | ||
| 159 | #define NV_PGRAPH_GPCX_GPCCS_MEM_BASE 0x41aa04 | ||
| 53 | 160 | ||
| 54 | #define mmctx_data(r,c) .b32 (((c - 1) << 26) | r) | 161 | #define mmctx_data(r,c) .b32 (((c - 1) << 26) | r) |
| 55 | #define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2) | 162 | #define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2) |
| @@ -65,24 +172,50 @@ | |||
| 65 | #define T_LCHAN 8 | 172 | #define T_LCHAN 8 |
| 66 | #define T_LCTXH 9 | 173 | #define T_LCTXH 9 |
| 67 | 174 | ||
| 68 | #define nv_mkmm(rv,r) /* | 175 | #if CHIPSET < GK208 |
| 69 | */ movw rv ((r) & 0x0000fffc) /* | 176 | #define imm32(reg,val) /* |
| 70 | */ sethi rv ((r) & 0x00ff0000) | 177 | */ movw reg ((val) & 0x0000ffff) /* |
| 178 | */ sethi reg ((val) & 0xffff0000) | ||
| 179 | #else | ||
| 180 | #define imm32(reg,val) /* | ||
| 181 | */ mov reg (val) | ||
| 182 | #endif | ||
| 183 | |||
| 71 | #define nv_mkio(rv,r,i) /* | 184 | #define nv_mkio(rv,r,i) /* |
| 72 | */ nv_mkmm(rv, (((r) & 0xffc) << 6) | ((i) << 2)) | 185 | */ imm32(rv, (((r) & 0xffc) << 6) | ((i) << 2)) |
| 186 | |||
| 187 | #define hash # | ||
| 188 | #define fn(a) a | ||
| 189 | #if CHIPSET < GK208 | ||
| 190 | #define call(a) call fn(hash)a | ||
| 191 | #else | ||
| 192 | #define call(a) lcall fn(hash)a | ||
| 193 | #endif | ||
| 73 | 194 | ||
| 74 | #define nv_iord(rv,r,i) /* | 195 | #define nv_iord(rv,r,i) /* |
| 75 | */ nv_mkio(rv,r,i) /* | 196 | */ nv_mkio(rv,r,i) /* |
| 76 | */ iord rv I[rv] | 197 | */ iord rv I[rv] |
| 198 | |||
| 77 | #define nv_iowr(r,i,rv) /* | 199 | #define nv_iowr(r,i,rv) /* |
| 78 | */ nv_mkio($r0,r,i) /* | 200 | */ nv_mkio($r0,r,i) /* |
| 79 | */ iowr I[$r0] rv /* | 201 | */ iowr I[$r0] rv /* |
| 80 | */ clear b32 $r0 | 202 | */ clear b32 $r0 |
| 81 | 203 | ||
| 204 | #define nv_rd32(reg,addr) /* | ||
| 205 | */ imm32($r14, addr) /* | ||
| 206 | */ call(nv_rd32) /* | ||
| 207 | */ mov b32 reg $r15 | ||
| 208 | |||
| 209 | #define nv_wr32(addr,reg) /* | ||
| 210 | */ mov b32 $r15 reg /* | ||
| 211 | */ imm32($r14, addr) /* | ||
| 212 | */ call(nv_wr32) | ||
| 213 | |||
| 82 | #define trace_set(bit) /* | 214 | #define trace_set(bit) /* |
| 83 | */ clear b32 $r9 /* | 215 | */ clear b32 $r9 /* |
| 84 | */ bset $r9 bit /* | 216 | */ bset $r9 bit /* |
| 85 | */ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9) | 217 | */ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9) |
| 218 | |||
| 86 | #define trace_clr(bit) /* | 219 | #define trace_clr(bit) /* |
| 87 | */ clear b32 $r9 /* | 220 | */ clear b32 $r9 /* |
| 88 | */ bset $r9 bit /* | 221 | */ bset $r9 bit /* |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c new file mode 100644 index 000000000000..e1af65ead379 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c | |||
| @@ -0,0 +1,236 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nvc0.h" | ||
| 26 | |||
| 27 | /******************************************************************************* | ||
| 28 | * Graphics object classes | ||
| 29 | ******************************************************************************/ | ||
| 30 | |||
| 31 | static struct nouveau_oclass | ||
| 32 | nv108_graph_sclass[] = { | ||
| 33 | { 0x902d, &nouveau_object_ofuncs }, | ||
| 34 | { 0xa140, &nouveau_object_ofuncs }, | ||
| 35 | { 0xa197, &nouveau_object_ofuncs }, | ||
| 36 | { 0xa1c0, &nouveau_object_ofuncs }, | ||
| 37 | {} | ||
| 38 | }; | ||
| 39 | |||
| 40 | /******************************************************************************* | ||
| 41 | * PGRAPH engine/subdev functions | ||
| 42 | ******************************************************************************/ | ||
| 43 | |||
| 44 | static struct nvc0_graph_init | ||
| 45 | nv108_graph_init_regs[] = { | ||
| 46 | { 0x400080, 1, 0x04, 0x003083c2 }, | ||
| 47 | { 0x400088, 1, 0x04, 0x0001bfe7 }, | ||
| 48 | { 0x40008c, 1, 0x04, 0x00000000 }, | ||
| 49 | { 0x400090, 1, 0x04, 0x00000030 }, | ||
| 50 | { 0x40013c, 1, 0x04, 0x003901f7 }, | ||
| 51 | { 0x400140, 1, 0x04, 0x00000100 }, | ||
| 52 | { 0x400144, 1, 0x04, 0x00000000 }, | ||
| 53 | { 0x400148, 1, 0x04, 0x00000110 }, | ||
| 54 | { 0x400138, 1, 0x04, 0x00000000 }, | ||
| 55 | { 0x400130, 2, 0x04, 0x00000000 }, | ||
| 56 | { 0x400124, 1, 0x04, 0x00000002 }, | ||
| 57 | {} | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct nvc0_graph_init | ||
| 61 | nv108_graph_init_unk58xx[] = { | ||
| 62 | { 0x405844, 1, 0x04, 0x00ffffff }, | ||
| 63 | { 0x405850, 1, 0x04, 0x00000000 }, | ||
| 64 | { 0x405900, 1, 0x04, 0x00000000 }, | ||
| 65 | { 0x405908, 1, 0x04, 0x00000000 }, | ||
| 66 | { 0x405928, 1, 0x04, 0x00000000 }, | ||
| 67 | { 0x40592c, 1, 0x04, 0x00000000 }, | ||
| 68 | {} | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct nvc0_graph_init | ||
| 72 | nv108_graph_init_gpc[] = { | ||
| 73 | { 0x418408, 1, 0x04, 0x00000000 }, | ||
| 74 | { 0x4184a0, 3, 0x04, 0x00000000 }, | ||
| 75 | { 0x418604, 1, 0x04, 0x00000000 }, | ||
| 76 | { 0x418680, 1, 0x04, 0x00000000 }, | ||
| 77 | { 0x418714, 1, 0x04, 0x00000000 }, | ||
| 78 | { 0x418384, 2, 0x04, 0x00000000 }, | ||
| 79 | { 0x418814, 3, 0x04, 0x00000000 }, | ||
| 80 | { 0x418b04, 1, 0x04, 0x00000000 }, | ||
| 81 | { 0x4188c8, 2, 0x04, 0x00000000 }, | ||
| 82 | { 0x4188d0, 1, 0x04, 0x00010000 }, | ||
| 83 | { 0x4188d4, 1, 0x04, 0x00000201 }, | ||
| 84 | { 0x418910, 1, 0x04, 0x00010001 }, | ||
| 85 | { 0x418914, 1, 0x04, 0x00000301 }, | ||
| 86 | { 0x418918, 1, 0x04, 0x00800000 }, | ||
| 87 | { 0x418980, 1, 0x04, 0x77777770 }, | ||
| 88 | { 0x418984, 3, 0x04, 0x77777777 }, | ||
| 89 | { 0x418c04, 1, 0x04, 0x00000000 }, | ||
| 90 | { 0x418c64, 2, 0x04, 0x00000000 }, | ||
| 91 | { 0x418c88, 1, 0x04, 0x00000000 }, | ||
| 92 | { 0x418cb4, 2, 0x04, 0x00000000 }, | ||
| 93 | { 0x418d00, 1, 0x04, 0x00000000 }, | ||
| 94 | { 0x418d28, 2, 0x04, 0x00000000 }, | ||
| 95 | { 0x418f00, 1, 0x04, 0x00000400 }, | ||
| 96 | { 0x418f08, 1, 0x04, 0x00000000 }, | ||
| 97 | { 0x418f20, 2, 0x04, 0x00000000 }, | ||
| 98 | { 0x418e00, 1, 0x04, 0x00000000 }, | ||
| 99 | { 0x418e08, 1, 0x04, 0x00000000 }, | ||
| 100 | { 0x418e1c, 2, 0x04, 0x00000000 }, | ||
| 101 | { 0x41900c, 1, 0x04, 0x00000000 }, | ||
| 102 | { 0x419018, 1, 0x04, 0x00000000 }, | ||
| 103 | {} | ||
| 104 | }; | ||
| 105 | |||
| 106 | static struct nvc0_graph_init | ||
| 107 | nv108_graph_init_tpc[] = { | ||
| 108 | { 0x419d0c, 1, 0x04, 0x00000000 }, | ||
| 109 | { 0x419d10, 1, 0x04, 0x00000014 }, | ||
| 110 | { 0x419ab0, 1, 0x04, 0x00000000 }, | ||
| 111 | { 0x419ac8, 1, 0x04, 0x00000000 }, | ||
| 112 | { 0x419ab8, 1, 0x04, 0x000000e7 }, | ||
| 113 | { 0x419abc, 2, 0x04, 0x00000000 }, | ||
| 114 | { 0x419ab4, 1, 0x04, 0x00000000 }, | ||
| 115 | { 0x419aa8, 2, 0x04, 0x00000000 }, | ||
| 116 | { 0x41980c, 1, 0x04, 0x00000010 }, | ||
| 117 | { 0x419844, 1, 0x04, 0x00000000 }, | ||
| 118 | { 0x419850, 1, 0x04, 0x00000004 }, | ||
| 119 | { 0x419854, 2, 0x04, 0x00000000 }, | ||
| 120 | { 0x419c98, 1, 0x04, 0x00000000 }, | ||
| 121 | { 0x419ca8, 1, 0x04, 0x00000000 }, | ||
| 122 | { 0x419cb0, 1, 0x04, 0x01000000 }, | ||
| 123 | { 0x419cb4, 1, 0x04, 0x00000000 }, | ||
| 124 | { 0x419cb8, 1, 0x04, 0x00b08bea }, | ||
| 125 | { 0x419c84, 1, 0x04, 0x00010384 }, | ||
| 126 | { 0x419cbc, 1, 0x04, 0x281b3646 }, | ||
| 127 | { 0x419cc0, 2, 0x04, 0x00000000 }, | ||
| 128 | { 0x419c80, 1, 0x04, 0x00000230 }, | ||
| 129 | { 0x419ccc, 2, 0x04, 0x00000000 }, | ||
| 130 | { 0x419c0c, 1, 0x04, 0x00000000 }, | ||
| 131 | { 0x419e00, 1, 0x04, 0x00000080 }, | ||
| 132 | { 0x419ea0, 1, 0x04, 0x00000000 }, | ||
| 133 | { 0x419ee4, 1, 0x04, 0x00000000 }, | ||
| 134 | { 0x419ea4, 1, 0x04, 0x00000100 }, | ||
| 135 | { 0x419ea8, 1, 0x04, 0x00000000 }, | ||
| 136 | { 0x419eb4, 1, 0x04, 0x00000000 }, | ||
| 137 | { 0x419ebc, 2, 0x04, 0x00000000 }, | ||
| 138 | { 0x419edc, 1, 0x04, 0x00000000 }, | ||
| 139 | { 0x419f00, 1, 0x04, 0x00000000 }, | ||
| 140 | { 0x419ed0, 1, 0x04, 0x00003234 }, | ||
| 141 | { 0x419f74, 1, 0x04, 0x00015555 }, | ||
| 142 | { 0x419f80, 4, 0x04, 0x00000000 }, | ||
| 143 | {} | ||
| 144 | }; | ||
| 145 | |||
| 146 | static int | ||
| 147 | nv108_graph_fini(struct nouveau_object *object, bool suspend) | ||
| 148 | { | ||
| 149 | struct nvc0_graph_priv *priv = (void *)object; | ||
| 150 | static const struct { | ||
| 151 | u32 addr; | ||
| 152 | u32 data; | ||
| 153 | } magic[] = { | ||
| 154 | { 0x020520, 0xfffffffc }, | ||
| 155 | { 0x020524, 0xfffffffe }, | ||
| 156 | { 0x020524, 0xfffffffc }, | ||
| 157 | { 0x020524, 0xfffffff8 }, | ||
| 158 | { 0x020524, 0xffffffe0 }, | ||
| 159 | { 0x020530, 0xfffffffe }, | ||
| 160 | { 0x02052c, 0xfffffffa }, | ||
| 161 | { 0x02052c, 0xfffffff0 }, | ||
| 162 | { 0x02052c, 0xffffffc0 }, | ||
| 163 | { 0x02052c, 0xffffff00 }, | ||
| 164 | { 0x02052c, 0xfffffc00 }, | ||
| 165 | { 0x02052c, 0xfffcfc00 }, | ||
| 166 | { 0x02052c, 0xfff0fc00 }, | ||
| 167 | { 0x02052c, 0xff80fc00 }, | ||
| 168 | { 0x020528, 0xfffffffe }, | ||
| 169 | { 0x020528, 0xfffffffc }, | ||
| 170 | }; | ||
| 171 | int i; | ||
| 172 | |||
| 173 | nv_mask(priv, 0x000200, 0x08001000, 0x00000000); | ||
| 174 | nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000); | ||
| 175 | for (i = 0; i < ARRAY_SIZE(magic); i++) { | ||
| 176 | nv_wr32(priv, magic[i].addr, magic[i].data); | ||
| 177 | nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000); | ||
| 178 | } | ||
| 179 | |||
| 180 | return nouveau_graph_fini(&priv->base, suspend); | ||
| 181 | } | ||
| 182 | |||
| 183 | static struct nvc0_graph_init * | ||
| 184 | nv108_graph_init_mmio[] = { | ||
| 185 | nv108_graph_init_regs, | ||
| 186 | nvf0_graph_init_unk40xx, | ||
| 187 | nvc0_graph_init_unk44xx, | ||
| 188 | nvc0_graph_init_unk78xx, | ||
| 189 | nvc0_graph_init_unk60xx, | ||
| 190 | nvd9_graph_init_unk64xx, | ||
| 191 | nv108_graph_init_unk58xx, | ||
| 192 | nvc0_graph_init_unk80xx, | ||
| 193 | nvf0_graph_init_unk70xx, | ||
| 194 | nvf0_graph_init_unk5bxx, | ||
| 195 | nv108_graph_init_gpc, | ||
| 196 | nv108_graph_init_tpc, | ||
| 197 | nve4_graph_init_unk, | ||
| 198 | nve4_graph_init_unk88xx, | ||
| 199 | NULL | ||
| 200 | }; | ||
| 201 | |||
| 202 | #include "fuc/hubnv108.fuc5.h" | ||
| 203 | |||
| 204 | static struct nvc0_graph_ucode | ||
| 205 | nv108_graph_fecs_ucode = { | ||
| 206 | .code.data = nv108_grhub_code, | ||
| 207 | .code.size = sizeof(nv108_grhub_code), | ||
| 208 | .data.data = nv108_grhub_data, | ||
| 209 | .data.size = sizeof(nv108_grhub_data), | ||
| 210 | }; | ||
| 211 | |||
| 212 | #include "fuc/gpcnv108.fuc5.h" | ||
| 213 | |||
| 214 | static struct nvc0_graph_ucode | ||
| 215 | nv108_graph_gpccs_ucode = { | ||
| 216 | .code.data = nv108_grgpc_code, | ||
| 217 | .code.size = sizeof(nv108_grgpc_code), | ||
| 218 | .data.data = nv108_grgpc_data, | ||
| 219 | .data.size = sizeof(nv108_grgpc_data), | ||
| 220 | }; | ||
| 221 | |||
| 222 | struct nouveau_oclass * | ||
| 223 | nv108_graph_oclass = &(struct nvc0_graph_oclass) { | ||
| 224 | .base.handle = NV_ENGINE(GR, 0x08), | ||
| 225 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 226 | .ctor = nvc0_graph_ctor, | ||
| 227 | .dtor = nvc0_graph_dtor, | ||
| 228 | .init = nve4_graph_init, | ||
| 229 | .fini = nv108_graph_fini, | ||
| 230 | }, | ||
| 231 | .cclass = &nv108_grctx_oclass, | ||
| 232 | .sclass = nv108_graph_sclass, | ||
| 233 | .mmio = nv108_graph_init_mmio, | ||
| 234 | .fecs.ucode = &nv108_graph_fecs_ucode, | ||
| 235 | .gpccs.ucode = &nv108_graph_gpccs_ucode, | ||
| 236 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 03de5175dd9f..30ed19c52e05 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c | |||
| @@ -304,12 +304,28 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine) | |||
| 304 | return timeout ? -EBUSY : 0; | 304 | return timeout ? -EBUSY : 0; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | static const struct nouveau_enum nv50_mp_exec_error_names[] = { | 307 | static const struct nouveau_bitfield nv50_mp_exec_errors[] = { |
| 308 | { 3, "STACK_UNDERFLOW", NULL }, | 308 | { 0x01, "STACK_UNDERFLOW" }, |
| 309 | { 4, "QUADON_ACTIVE", NULL }, | 309 | { 0x02, "STACK_MISMATCH" }, |
| 310 | { 8, "TIMEOUT", NULL }, | 310 | { 0x04, "QUADON_ACTIVE" }, |
| 311 | { 0x10, "INVALID_OPCODE", NULL }, | 311 | { 0x08, "TIMEOUT" }, |
| 312 | { 0x40, "BREAKPOINT", NULL }, | 312 | { 0x10, "INVALID_OPCODE" }, |
| 313 | { 0x20, "PM_OVERFLOW" }, | ||
| 314 | { 0x40, "BREAKPOINT" }, | ||
| 315 | {} | ||
| 316 | }; | ||
| 317 | |||
| 318 | static const struct nouveau_bitfield nv50_mpc_traps[] = { | ||
| 319 | { 0x0000001, "LOCAL_LIMIT_READ" }, | ||
| 320 | { 0x0000010, "LOCAL_LIMIT_WRITE" }, | ||
| 321 | { 0x0000040, "STACK_LIMIT" }, | ||
| 322 | { 0x0000100, "GLOBAL_LIMIT_READ" }, | ||
| 323 | { 0x0001000, "GLOBAL_LIMIT_WRITE" }, | ||
| 324 | { 0x0010000, "MP0" }, | ||
| 325 | { 0x0020000, "MP1" }, | ||
| 326 | { 0x0040000, "GLOBAL_LIMIT_RED" }, | ||
| 327 | { 0x0400000, "GLOBAL_LIMIT_ATOM" }, | ||
| 328 | { 0x4000000, "MP2" }, | ||
| 313 | {} | 329 | {} |
| 314 | }; | 330 | }; |
| 315 | 331 | ||
| @@ -396,6 +412,60 @@ static const struct nouveau_bitfield nv50_graph_intr_name[] = { | |||
| 396 | {} | 412 | {} |
| 397 | }; | 413 | }; |
| 398 | 414 | ||
| 415 | static const struct nouveau_bitfield nv50_graph_trap_prop[] = { | ||
| 416 | { 0x00000004, "SURF_WIDTH_OVERRUN" }, | ||
| 417 | { 0x00000008, "SURF_HEIGHT_OVERRUN" }, | ||
| 418 | { 0x00000010, "DST2D_FAULT" }, | ||
| 419 | { 0x00000020, "ZETA_FAULT" }, | ||
| 420 | { 0x00000040, "RT_FAULT" }, | ||
| 421 | { 0x00000080, "CUDA_FAULT" }, | ||
| 422 | { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" }, | ||
| 423 | { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" }, | ||
| 424 | { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" }, | ||
| 425 | { 0x00000800, "DST2D_LINEAR_MISMATCH" }, | ||
| 426 | { 0x00001000, "RT_LINEAR_MISMATCH" }, | ||
| 427 | {} | ||
| 428 | }; | ||
| 429 | |||
| 430 | static void | ||
| 431 | nv50_priv_prop_trap(struct nv50_graph_priv *priv, | ||
| 432 | u32 ustatus_addr, u32 ustatus, u32 tp) | ||
| 433 | { | ||
| 434 | u32 e0c = nv_rd32(priv, ustatus_addr + 0x04); | ||
| 435 | u32 e10 = nv_rd32(priv, ustatus_addr + 0x08); | ||
| 436 | u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c); | ||
| 437 | u32 e18 = nv_rd32(priv, ustatus_addr + 0x10); | ||
| 438 | u32 e1c = nv_rd32(priv, ustatus_addr + 0x14); | ||
| 439 | u32 e20 = nv_rd32(priv, ustatus_addr + 0x18); | ||
| 440 | u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c); | ||
| 441 | |||
| 442 | /* CUDA memory: l[], g[] or stack. */ | ||
| 443 | if (ustatus & 0x00000080) { | ||
| 444 | if (e18 & 0x80000000) { | ||
| 445 | /* g[] read fault? */ | ||
| 446 | nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n", | ||
| 447 | tp, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
| 448 | e18 &= ~0x1f000000; | ||
| 449 | } else if (e18 & 0xc) { | ||
| 450 | /* g[] write fault? */ | ||
| 451 | nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n", | ||
| 452 | tp, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
| 453 | e18 &= ~0x00000f80; | ||
| 454 | } else { | ||
| 455 | nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
| 456 | tp, e14, e10); | ||
| 457 | } | ||
| 458 | ustatus &= ~0x00000080; | ||
| 459 | } | ||
| 460 | if (ustatus) { | ||
| 461 | nv_error(priv, "TRAP_PROP - TP %d -", tp); | ||
| 462 | nouveau_bitfield_print(nv50_graph_trap_prop, ustatus); | ||
| 463 | pr_cont(" - Address %02x%08x\n", e14, e10); | ||
| 464 | } | ||
| 465 | nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
| 466 | tp, e0c, e18, e1c, e20, e24); | ||
| 467 | } | ||
| 468 | |||
| 399 | static void | 469 | static void |
| 400 | nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display) | 470 | nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display) |
| 401 | { | 471 | { |
| @@ -420,8 +490,8 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display) | |||
| 420 | oplow = nv_rd32(priv, addr + 0x70); | 490 | oplow = nv_rd32(priv, addr + 0x70); |
| 421 | ophigh = nv_rd32(priv, addr + 0x74); | 491 | ophigh = nv_rd32(priv, addr + 0x74); |
| 422 | nv_error(priv, "TRAP_MP_EXEC - " | 492 | nv_error(priv, "TRAP_MP_EXEC - " |
| 423 | "TP %d MP %d: ", tpid, i); | 493 | "TP %d MP %d:", tpid, i); |
| 424 | nouveau_enum_print(nv50_mp_exec_error_names, status); | 494 | nouveau_bitfield_print(nv50_mp_exec_errors, status); |
| 425 | pr_cont(" at %06x warp %d, opcode %08x %08x\n", | 495 | pr_cont(" at %06x warp %d, opcode %08x %08x\n", |
| 426 | pc&0xffffff, pc >> 24, | 496 | pc&0xffffff, pc >> 24, |
| 427 | oplow, ophigh); | 497 | oplow, ophigh); |
| @@ -468,60 +538,19 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, | |||
| 468 | nv50_priv_mp_trap(priv, i, display); | 538 | nv50_priv_mp_trap(priv, i, display); |
| 469 | ustatus &= ~0x04030000; | 539 | ustatus &= ~0x04030000; |
| 470 | } | 540 | } |
| 471 | break; | 541 | if (ustatus && display) { |
| 472 | case 8: /* TPDMA error */ | 542 | nv_error("%s - TP%d:", name, i); |
| 473 | { | 543 | nouveau_bitfield_print(nv50_mpc_traps, ustatus); |
| 474 | u32 e0c = nv_rd32(priv, ustatus_addr + 4); | 544 | pr_cont("\n"); |
| 475 | u32 e10 = nv_rd32(priv, ustatus_addr + 8); | 545 | ustatus = 0; |
| 476 | u32 e14 = nv_rd32(priv, ustatus_addr + 0xc); | ||
| 477 | u32 e18 = nv_rd32(priv, ustatus_addr + 0x10); | ||
| 478 | u32 e1c = nv_rd32(priv, ustatus_addr + 0x14); | ||
| 479 | u32 e20 = nv_rd32(priv, ustatus_addr + 0x18); | ||
| 480 | u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c); | ||
| 481 | /* 2d engine destination */ | ||
| 482 | if (ustatus & 0x00000010) { | ||
| 483 | if (display) { | ||
| 484 | nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
| 485 | i, e14, e10); | ||
| 486 | nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
| 487 | i, e0c, e18, e1c, e20, e24); | ||
| 488 | } | ||
| 489 | ustatus &= ~0x00000010; | ||
| 490 | } | ||
| 491 | /* Render target */ | ||
| 492 | if (ustatus & 0x00000040) { | ||
| 493 | if (display) { | ||
| 494 | nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
| 495 | i, e14, e10); | ||
| 496 | nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
| 497 | i, e0c, e18, e1c, e20, e24); | ||
| 498 | } | ||
| 499 | ustatus &= ~0x00000040; | ||
| 500 | } | ||
| 501 | /* CUDA memory: l[], g[] or stack. */ | ||
| 502 | if (ustatus & 0x00000080) { | ||
| 503 | if (display) { | ||
| 504 | if (e18 & 0x80000000) { | ||
| 505 | /* g[] read fault? */ | ||
| 506 | nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
| 507 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
| 508 | e18 &= ~0x1f000000; | ||
| 509 | } else if (e18 & 0xc) { | ||
| 510 | /* g[] write fault? */ | ||
| 511 | nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
| 512 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
| 513 | e18 &= ~0x00000f80; | ||
| 514 | } else { | ||
| 515 | nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
| 516 | i, e14, e10); | ||
| 517 | } | ||
| 518 | nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
| 519 | i, e0c, e18, e1c, e20, e24); | ||
| 520 | } | ||
| 521 | ustatus &= ~0x00000080; | ||
| 522 | } | ||
| 523 | } | 546 | } |
| 524 | break; | 547 | break; |
| 548 | case 8: /* PROP error */ | ||
| 549 | if (display) | ||
| 550 | nv50_priv_prop_trap( | ||
| 551 | priv, ustatus_addr, ustatus, i); | ||
| 552 | ustatus = 0; | ||
| 553 | break; | ||
| 525 | } | 554 | } |
| 526 | if (ustatus) { | 555 | if (ustatus) { |
| 527 | if (display) | 556 | if (display) |
| @@ -727,11 +756,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, | |||
| 727 | status &= ~0x080; | 756 | status &= ~0x080; |
| 728 | } | 757 | } |
| 729 | 758 | ||
| 730 | /* TPDMA: Handles TP-initiated uncached memory accesses: | 759 | /* PROP: Handles TP-initiated uncached memory accesses: |
| 731 | * l[], g[], stack, 2d surfaces, render targets. */ | 760 | * l[], g[], stack, 2d surfaces, render targets. */ |
| 732 | if (status & 0x100) { | 761 | if (status & 0x100) { |
| 733 | nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display, | 762 | nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display, |
| 734 | "TRAP_TPDMA"); | 763 | "TRAP_PROP"); |
| 735 | nv_wr32(priv, 0x400108, 0x100); | 764 | nv_wr32(priv, 0x400108, 0x100); |
| 736 | status &= ~0x100; | 765 | status &= ~0x100; |
| 737 | } | 766 | } |
| @@ -760,7 +789,7 @@ nv50_graph_intr(struct nouveau_subdev *subdev) | |||
| 760 | u32 mthd = (addr & 0x00001ffc); | 789 | u32 mthd = (addr & 0x00001ffc); |
| 761 | u32 data = nv_rd32(priv, 0x400708); | 790 | u32 data = nv_rd32(priv, 0x400708); |
| 762 | u32 class = nv_rd32(priv, 0x400814); | 791 | u32 class = nv_rd32(priv, 0x400814); |
| 763 | u32 show = stat; | 792 | u32 show = stat, show_bitfield = stat; |
| 764 | int chid; | 793 | int chid; |
| 765 | 794 | ||
| 766 | engctx = nouveau_engctx_get(engine, inst); | 795 | engctx = nouveau_engctx_get(engine, inst); |
| @@ -778,21 +807,26 @@ nv50_graph_intr(struct nouveau_subdev *subdev) | |||
| 778 | nv_error(priv, "DATA_ERROR "); | 807 | nv_error(priv, "DATA_ERROR "); |
| 779 | nouveau_enum_print(nv50_data_error_names, ecode); | 808 | nouveau_enum_print(nv50_data_error_names, ecode); |
| 780 | pr_cont("\n"); | 809 | pr_cont("\n"); |
| 810 | show_bitfield &= ~0x00100000; | ||
| 781 | } | 811 | } |
| 782 | 812 | ||
| 783 | if (stat & 0x00200000) { | 813 | if (stat & 0x00200000) { |
| 784 | if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12, | 814 | if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12, |
| 785 | engctx)) | 815 | engctx)) |
| 786 | show &= ~0x00200000; | 816 | show &= ~0x00200000; |
| 817 | show_bitfield &= ~0x00200000; | ||
| 787 | } | 818 | } |
| 788 | 819 | ||
| 789 | nv_wr32(priv, 0x400100, stat); | 820 | nv_wr32(priv, 0x400100, stat); |
| 790 | nv_wr32(priv, 0x400500, 0x00010001); | 821 | nv_wr32(priv, 0x400500, 0x00010001); |
| 791 | 822 | ||
| 792 | if (show) { | 823 | if (show) { |
| 793 | nv_error(priv, "%s", ""); | 824 | show &= show_bitfield; |
| 794 | nouveau_bitfield_print(nv50_graph_intr_name, show); | 825 | if (show) { |
| 795 | pr_cont("\n"); | 826 | nv_error(priv, "%s", ""); |
| 827 | nouveau_bitfield_print(nv50_graph_intr_name, show); | ||
| 828 | pr_cont("\n"); | ||
| 829 | } | ||
| 796 | nv_error(priv, | 830 | nv_error(priv, |
| 797 | "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", | 831 | "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", |
| 798 | chid, (u64)inst << 12, nouveau_client_name(engctx), | 832 | chid, (u64)inst << 12, nouveau_client_name(engctx), |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 5c8a63dc506a..a73ab209ea88 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c | |||
| @@ -901,6 +901,9 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) | |||
| 901 | } | 901 | } |
| 902 | 902 | ||
| 903 | return 0; | 903 | return 0; |
| 904 | } else | ||
| 905 | if (!oclass->fecs.ucode) { | ||
| 906 | return -ENOSYS; | ||
| 904 | } | 907 | } |
| 905 | 908 | ||
| 906 | /* load HUB microcode */ | 909 | /* load HUB microcode */ |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h index ea17a80ad7fc..b0ab6de270b2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h | |||
| @@ -205,6 +205,11 @@ extern struct nvc0_graph_init nve4_graph_init_regs[]; | |||
| 205 | extern struct nvc0_graph_init nve4_graph_init_unk[]; | 205 | extern struct nvc0_graph_init nve4_graph_init_unk[]; |
| 206 | extern struct nvc0_graph_init nve4_graph_init_unk88xx[]; | 206 | extern struct nvc0_graph_init nve4_graph_init_unk88xx[]; |
| 207 | 207 | ||
| 208 | extern struct nvc0_graph_init nvf0_graph_init_unk40xx[]; | ||
| 209 | extern struct nvc0_graph_init nvf0_graph_init_unk70xx[]; | ||
| 210 | extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[]; | ||
| 211 | extern struct nvc0_graph_init nvf0_graph_init_tpc[]; | ||
| 212 | |||
| 208 | int nvc0_grctx_generate(struct nvc0_graph_priv *); | 213 | int nvc0_grctx_generate(struct nvc0_graph_priv *); |
| 209 | void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); | 214 | void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); |
| 210 | void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); | 215 | void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); |
| @@ -266,6 +271,11 @@ extern struct nvc0_graph_init nve4_grctx_init_unk80xx[]; | |||
| 266 | extern struct nvc0_graph_init nve4_grctx_init_unk90xx[]; | 271 | extern struct nvc0_graph_init nve4_grctx_init_unk90xx[]; |
| 267 | 272 | ||
| 268 | extern struct nouveau_oclass *nvf0_grctx_oclass; | 273 | extern struct nouveau_oclass *nvf0_grctx_oclass; |
| 274 | extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[]; | ||
| 275 | extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[]; | ||
| 276 | extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[]; | ||
| 277 | |||
| 278 | extern struct nouveau_oclass *nv108_grctx_oclass; | ||
| 269 | 279 | ||
| 270 | #define mmio_data(s,a,p) do { \ | 280 | #define mmio_data(s,a,p) do { \ |
| 271 | info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \ | 281 | info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \ |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c index 2f0ac7832234..b1acb9939d95 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c | |||
| @@ -41,7 +41,7 @@ nvf0_graph_sclass[] = { | |||
| 41 | * PGRAPH engine/subdev functions | 41 | * PGRAPH engine/subdev functions |
| 42 | ******************************************************************************/ | 42 | ******************************************************************************/ |
| 43 | 43 | ||
| 44 | static struct nvc0_graph_init | 44 | struct nvc0_graph_init |
| 45 | nvf0_graph_init_unk40xx[] = { | 45 | nvf0_graph_init_unk40xx[] = { |
| 46 | { 0x40415c, 1, 0x04, 0x00000000 }, | 46 | { 0x40415c, 1, 0x04, 0x00000000 }, |
| 47 | { 0x404170, 1, 0x04, 0x00000000 }, | 47 | { 0x404170, 1, 0x04, 0x00000000 }, |
| @@ -60,7 +60,7 @@ nvf0_graph_init_unk58xx[] = { | |||
| 60 | {} | 60 | {} |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | static struct nvc0_graph_init | 63 | struct nvc0_graph_init |
| 64 | nvf0_graph_init_unk70xx[] = { | 64 | nvf0_graph_init_unk70xx[] = { |
| 65 | { 0x407010, 1, 0x04, 0x00000000 }, | 65 | { 0x407010, 1, 0x04, 0x00000000 }, |
| 66 | { 0x407040, 1, 0x04, 0x80440424 }, | 66 | { 0x407040, 1, 0x04, 0x80440424 }, |
| @@ -68,7 +68,7 @@ nvf0_graph_init_unk70xx[] = { | |||
| 68 | {} | 68 | {} |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | static struct nvc0_graph_init | 71 | struct nvc0_graph_init |
| 72 | nvf0_graph_init_unk5bxx[] = { | 72 | nvf0_graph_init_unk5bxx[] = { |
| 73 | { 0x405b44, 1, 0x04, 0x00000000 }, | 73 | { 0x405b44, 1, 0x04, 0x00000000 }, |
| 74 | { 0x405b50, 1, 0x04, 0x00000000 }, | 74 | { 0x405b50, 1, 0x04, 0x00000000 }, |
| @@ -114,7 +114,7 @@ nvf0_graph_init_gpc[] = { | |||
| 114 | {} | 114 | {} |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | static struct nvc0_graph_init | 117 | struct nvc0_graph_init |
| 118 | nvf0_graph_init_tpc[] = { | 118 | nvf0_graph_init_tpc[] = { |
| 119 | { 0x419d0c, 1, 0x04, 0x00000000 }, | 119 | { 0x419d0c, 1, 0x04, 0x00000000 }, |
| 120 | { 0x419d10, 1, 0x04, 0x00000014 }, | 120 | { 0x419d10, 1, 0x04, 0x00000014 }, |
| @@ -243,6 +243,6 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) { | |||
| 243 | .cclass = &nvf0_grctx_oclass, | 243 | .cclass = &nvf0_grctx_oclass, |
| 244 | .sclass = nvf0_graph_sclass, | 244 | .sclass = nvf0_graph_sclass, |
| 245 | .mmio = nvf0_graph_init_mmio, | 245 | .mmio = nvf0_graph_init_mmio, |
| 246 | .fecs.ucode = 0 ? &nvf0_graph_fecs_ucode : NULL, | 246 | .fecs.ucode = &nvf0_graph_fecs_ucode, |
| 247 | .gpccs.ucode = &nvf0_graph_gpccs_ucode, | 247 | .gpccs.ucode = &nvf0_graph_gpccs_ucode, |
| 248 | }.base; | 248 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h index 560c3593dae7..e71a4325e670 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/class.h +++ b/drivers/gpu/drm/nouveau/core/include/core/class.h | |||
| @@ -230,9 +230,26 @@ struct nve0_channel_ind_class { | |||
| 230 | 230 | ||
| 231 | #define NV04_DISP_CLASS 0x00000046 | 231 | #define NV04_DISP_CLASS 0x00000046 |
| 232 | 232 | ||
| 233 | #define NV04_DISP_MTHD 0x00000000 | ||
| 234 | #define NV04_DISP_MTHD_HEAD 0x00000001 | ||
| 235 | |||
| 236 | #define NV04_DISP_SCANOUTPOS 0x00000000 | ||
| 237 | |||
| 233 | struct nv04_display_class { | 238 | struct nv04_display_class { |
| 234 | }; | 239 | }; |
| 235 | 240 | ||
| 241 | struct nv04_display_scanoutpos { | ||
| 242 | s64 time[2]; | ||
| 243 | u32 vblanks; | ||
| 244 | u32 vblanke; | ||
| 245 | u32 vtotal; | ||
| 246 | u32 vline; | ||
| 247 | u32 hblanks; | ||
| 248 | u32 hblanke; | ||
| 249 | u32 htotal; | ||
| 250 | u32 hline; | ||
| 251 | }; | ||
| 252 | |||
| 236 | /* 5070: NV50_DISP | 253 | /* 5070: NV50_DISP |
| 237 | * 8270: NV84_DISP | 254 | * 8270: NV84_DISP |
| 238 | * 8370: NVA0_DISP | 255 | * 8370: NVA0_DISP |
| @@ -252,6 +269,11 @@ struct nv04_display_class { | |||
| 252 | #define NVE0_DISP_CLASS 0x00009170 | 269 | #define NVE0_DISP_CLASS 0x00009170 |
| 253 | #define NVF0_DISP_CLASS 0x00009270 | 270 | #define NVF0_DISP_CLASS 0x00009270 |
| 254 | 271 | ||
| 272 | #define NV50_DISP_MTHD 0x00000000 | ||
| 273 | #define NV50_DISP_MTHD_HEAD 0x00000003 | ||
| 274 | |||
| 275 | #define NV50_DISP_SCANOUTPOS 0x00000000 | ||
| 276 | |||
| 255 | #define NV50_DISP_SOR_MTHD 0x00010000 | 277 | #define NV50_DISP_SOR_MTHD 0x00010000 |
| 256 | #define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 | 278 | #define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 |
| 257 | #define NV50_DISP_SOR_MTHD_HEAD 0x00000018 | 279 | #define NV50_DISP_SOR_MTHD_HEAD 0x00000018 |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h index ac2881d1776a..7b8ea221b00d 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/device.h +++ b/drivers/gpu/drm/nouveau/core/include/core/device.h | |||
| @@ -38,7 +38,8 @@ enum nv_subdev_type { | |||
| 38 | NVDEV_SUBDEV_THERM, | 38 | NVDEV_SUBDEV_THERM, |
| 39 | NVDEV_SUBDEV_CLOCK, | 39 | NVDEV_SUBDEV_CLOCK, |
| 40 | 40 | ||
| 41 | NVDEV_ENGINE_DMAOBJ, | 41 | NVDEV_ENGINE_FIRST, |
| 42 | NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST, | ||
| 42 | NVDEV_ENGINE_FIFO, | 43 | NVDEV_ENGINE_FIFO, |
| 43 | NVDEV_ENGINE_SW, | 44 | NVDEV_ENGINE_SW, |
| 44 | NVDEV_ENGINE_GR, | 45 | NVDEV_ENGINE_GR, |
| @@ -70,6 +71,7 @@ struct nouveau_device { | |||
| 70 | const char *dbgopt; | 71 | const char *dbgopt; |
| 71 | const char *name; | 72 | const char *name; |
| 72 | const char *cname; | 73 | const char *cname; |
| 74 | u64 disable_mask; | ||
| 73 | 75 | ||
| 74 | enum { | 76 | enum { |
| 75 | NV_04 = 0x04, | 77 | NV_04 = 0x04, |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h index 8c32cf4d83c7..26b6b2bb1112 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h | |||
| @@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass; | |||
| 109 | extern struct nouveau_oclass *nv84_fifo_oclass; | 109 | extern struct nouveau_oclass *nv84_fifo_oclass; |
| 110 | extern struct nouveau_oclass *nvc0_fifo_oclass; | 110 | extern struct nouveau_oclass *nvc0_fifo_oclass; |
| 111 | extern struct nouveau_oclass *nve0_fifo_oclass; | 111 | extern struct nouveau_oclass *nve0_fifo_oclass; |
| 112 | extern struct nouveau_oclass *nv108_fifo_oclass; | ||
| 112 | 113 | ||
| 113 | void nv04_fifo_intr(struct nouveau_subdev *); | 114 | void nv04_fifo_intr(struct nouveau_subdev *); |
| 114 | int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); | 115 | int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h index 8e1b52312ddc..97705618de97 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h | |||
| @@ -69,6 +69,7 @@ extern struct nouveau_oclass *nvd7_graph_oclass; | |||
| 69 | extern struct nouveau_oclass *nvd9_graph_oclass; | 69 | extern struct nouveau_oclass *nvd9_graph_oclass; |
| 70 | extern struct nouveau_oclass *nve4_graph_oclass; | 70 | extern struct nouveau_oclass *nve4_graph_oclass; |
| 71 | extern struct nouveau_oclass *nvf0_graph_oclass; | 71 | extern struct nouveau_oclass *nvf0_graph_oclass; |
| 72 | extern struct nouveau_oclass *nv108_graph_oclass; | ||
| 72 | 73 | ||
| 73 | extern const struct nouveau_bitfield nv04_graph_nsource[]; | 74 | extern const struct nouveau_bitfield nv04_graph_nsource[]; |
| 74 | extern struct nouveau_ofuncs nv04_graph_ofuncs; | 75 | extern struct nouveau_ofuncs nv04_graph_ofuncs; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h index 4f4ff4502c3d..9faa98e67ad8 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h | |||
| @@ -4,8 +4,7 @@ | |||
| 4 | #include <core/subdev.h> | 4 | #include <core/subdev.h> |
| 5 | #include <core/device.h> | 5 | #include <core/device.h> |
| 6 | 6 | ||
| 7 | #include <subdev/fb.h> | 7 | struct nouveau_mem; |
| 8 | |||
| 9 | struct nouveau_vma; | 8 | struct nouveau_vma; |
| 10 | 9 | ||
| 11 | struct nouveau_bar { | 10 | struct nouveau_bar { |
| @@ -29,27 +28,7 @@ nouveau_bar(void *obj) | |||
| 29 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR]; | 28 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR]; |
| 30 | } | 29 | } |
| 31 | 30 | ||
| 32 | #define nouveau_bar_create(p,e,o,d) \ | ||
| 33 | nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 34 | #define nouveau_bar_init(p) \ | ||
| 35 | nouveau_subdev_init(&(p)->base) | ||
| 36 | #define nouveau_bar_fini(p,s) \ | ||
| 37 | nouveau_subdev_fini(&(p)->base, (s)) | ||
| 38 | |||
| 39 | int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 40 | struct nouveau_oclass *, int, void **); | ||
| 41 | void nouveau_bar_destroy(struct nouveau_bar *); | ||
| 42 | |||
| 43 | void _nouveau_bar_dtor(struct nouveau_object *); | ||
| 44 | #define _nouveau_bar_init _nouveau_subdev_init | ||
| 45 | #define _nouveau_bar_fini _nouveau_subdev_fini | ||
| 46 | |||
| 47 | extern struct nouveau_oclass nv50_bar_oclass; | 31 | extern struct nouveau_oclass nv50_bar_oclass; |
| 48 | extern struct nouveau_oclass nvc0_bar_oclass; | 32 | extern struct nouveau_oclass nvc0_bar_oclass; |
| 49 | 33 | ||
| 50 | int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *, | ||
| 51 | struct nouveau_mem *, struct nouveau_object **); | ||
| 52 | |||
| 53 | void nv84_bar_flush(struct nouveau_bar *); | ||
| 54 | |||
| 55 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h new file mode 100644 index 000000000000..c5e6d1e6ac1d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h | |||
| @@ -0,0 +1,66 @@ | |||
| 1 | #ifndef __NVBIOS_RAMCFG_H__ | ||
| 2 | #define __NVBIOS_RAMCFG_H__ | ||
| 3 | |||
| 4 | struct nouveau_bios; | ||
| 5 | |||
| 6 | struct nvbios_ramcfg { | ||
| 7 | unsigned rammap_11_08_01:1; | ||
| 8 | unsigned rammap_11_08_0c:2; | ||
| 9 | unsigned rammap_11_08_10:1; | ||
| 10 | unsigned rammap_11_11_0c:2; | ||
| 11 | |||
| 12 | unsigned ramcfg_11_01_01:1; | ||
| 13 | unsigned ramcfg_11_01_02:1; | ||
| 14 | unsigned ramcfg_11_01_04:1; | ||
| 15 | unsigned ramcfg_11_01_08:1; | ||
| 16 | unsigned ramcfg_11_01_10:1; | ||
| 17 | unsigned ramcfg_11_01_20:1; | ||
| 18 | unsigned ramcfg_11_01_40:1; | ||
| 19 | unsigned ramcfg_11_01_80:1; | ||
| 20 | unsigned ramcfg_11_02_03:2; | ||
| 21 | unsigned ramcfg_11_02_04:1; | ||
| 22 | unsigned ramcfg_11_02_08:1; | ||
| 23 | unsigned ramcfg_11_02_10:1; | ||
| 24 | unsigned ramcfg_11_02_40:1; | ||
| 25 | unsigned ramcfg_11_02_80:1; | ||
| 26 | unsigned ramcfg_11_03_0f:4; | ||
| 27 | unsigned ramcfg_11_03_30:2; | ||
| 28 | unsigned ramcfg_11_03_c0:2; | ||
| 29 | unsigned ramcfg_11_03_f0:4; | ||
| 30 | unsigned ramcfg_11_04:8; | ||
| 31 | unsigned ramcfg_11_06:8; | ||
| 32 | unsigned ramcfg_11_07_02:1; | ||
| 33 | unsigned ramcfg_11_07_04:1; | ||
| 34 | unsigned ramcfg_11_07_08:1; | ||
| 35 | unsigned ramcfg_11_07_10:1; | ||
| 36 | unsigned ramcfg_11_07_40:1; | ||
| 37 | unsigned ramcfg_11_07_80:1; | ||
| 38 | unsigned ramcfg_11_08_01:1; | ||
| 39 | unsigned ramcfg_11_08_02:1; | ||
| 40 | unsigned ramcfg_11_08_04:1; | ||
| 41 | unsigned ramcfg_11_08_08:1; | ||
| 42 | unsigned ramcfg_11_08_10:1; | ||
| 43 | unsigned ramcfg_11_08_20:1; | ||
| 44 | unsigned ramcfg_11_09:8; | ||
| 45 | |||
| 46 | unsigned timing[11]; | ||
| 47 | unsigned timing_20_2e_03:2; | ||
| 48 | unsigned timing_20_2e_30:2; | ||
| 49 | unsigned timing_20_2e_c0:2; | ||
| 50 | unsigned timing_20_2f_03:2; | ||
| 51 | unsigned timing_20_2c_003f:6; | ||
| 52 | unsigned timing_20_2c_1fc0:7; | ||
| 53 | unsigned timing_20_30_f8:5; | ||
| 54 | unsigned timing_20_30_07:3; | ||
| 55 | unsigned timing_20_31_0007:3; | ||
| 56 | unsigned timing_20_31_0078:4; | ||
| 57 | unsigned timing_20_31_0780:4; | ||
| 58 | unsigned timing_20_31_0800:1; | ||
| 59 | unsigned timing_20_31_7000:3; | ||
| 60 | unsigned timing_20_31_8000:1; | ||
| 61 | }; | ||
| 62 | |||
| 63 | u8 nvbios_ramcfg_count(struct nouveau_bios *); | ||
| 64 | u8 nvbios_ramcfg_index(struct nouveau_bios *); | ||
| 65 | |||
| 66 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h index bc15e0320877..5bdf8e4db40a 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h | |||
| @@ -1,11 +1,25 @@ | |||
| 1 | #ifndef __NVBIOS_RAMMAP_H__ | 1 | #ifndef __NVBIOS_RAMMAP_H__ |
| 2 | #define __NVBIOS_RAMMAP_H__ | 2 | #define __NVBIOS_RAMMAP_H__ |
| 3 | 3 | ||
| 4 | u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, | 4 | struct nvbios_ramcfg; |
| 5 | u8 *cnt, u8 *len, u8 *snr, u8 *ssz); | 5 | |
| 6 | u16 nvbios_rammap_entry(struct nouveau_bios *, int idx, | 6 | u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr, |
| 7 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 7 | u8 *cnt, u8 *len, u8 *snr, u8 *ssz); |
| 8 | u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz, | 8 | |
| 9 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 9 | u32 nvbios_rammapEe(struct nouveau_bios *, int idx, |
| 10 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | ||
| 11 | u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz, | ||
| 12 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | ||
| 13 | u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz, | ||
| 14 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | ||
| 15 | struct nvbios_ramcfg *); | ||
| 16 | |||
| 17 | u32 nvbios_rammapSe(struct nouveau_bios *, u32 data, | ||
| 18 | u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, | ||
| 19 | u8 *ver, u8 *hdr); | ||
| 20 | u32 nvbios_rammapSp(struct nouveau_bios *, u32 data, | ||
| 21 | u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, | ||
| 22 | u8 *ver, u8 *hdr, | ||
| 23 | struct nvbios_ramcfg *); | ||
| 10 | 24 | ||
| 11 | #endif | 25 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h index 963694b54224..76d914b67ab5 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h | |||
| @@ -1,8 +1,14 @@ | |||
| 1 | #ifndef __NVBIOS_TIMING_H__ | 1 | #ifndef __NVBIOS_TIMING_H__ |
| 2 | #define __NVBIOS_TIMING_H__ | 2 | #define __NVBIOS_TIMING_H__ |
| 3 | 3 | ||
| 4 | u16 nvbios_timing_table(struct nouveau_bios *, | 4 | struct nvbios_ramcfg; |
| 5 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 5 | |
| 6 | u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr); | 6 | u16 nvbios_timingTe(struct nouveau_bios *, |
| 7 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz); | ||
| 8 | u16 nvbios_timingEe(struct nouveau_bios *, int idx, | ||
| 9 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | ||
| 10 | u16 nvbios_timingEp(struct nouveau_bios *, int idx, | ||
| 11 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | ||
| 12 | struct nvbios_ramcfg *); | ||
| 7 | 13 | ||
| 8 | #endif | 14 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h index 685c9b12ee4c..ed1ac68c38b3 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h | |||
| @@ -9,7 +9,6 @@ struct nouveau_devinit { | |||
| 9 | bool post; | 9 | bool post; |
| 10 | void (*meminit)(struct nouveau_devinit *); | 10 | void (*meminit)(struct nouveau_devinit *); |
| 11 | int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); | 11 | int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); |
| 12 | |||
| 13 | }; | 12 | }; |
| 14 | 13 | ||
| 15 | static inline struct nouveau_devinit * | 14 | static inline struct nouveau_devinit * |
| @@ -18,32 +17,16 @@ nouveau_devinit(void *obj) | |||
| 18 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT]; | 17 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT]; |
| 19 | } | 18 | } |
| 20 | 19 | ||
| 21 | #define nouveau_devinit_create(p,e,o,d) \ | 20 | extern struct nouveau_oclass *nv04_devinit_oclass; |
| 22 | nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) | 21 | extern struct nouveau_oclass *nv05_devinit_oclass; |
| 23 | #define nouveau_devinit_destroy(p) \ | 22 | extern struct nouveau_oclass *nv10_devinit_oclass; |
| 24 | nouveau_subdev_destroy(&(p)->base) | 23 | extern struct nouveau_oclass *nv1a_devinit_oclass; |
| 25 | #define nouveau_devinit_init(p) ({ \ | 24 | extern struct nouveau_oclass *nv20_devinit_oclass; |
| 26 | struct nouveau_devinit *d = (p); \ | 25 | extern struct nouveau_oclass *nv50_devinit_oclass; |
| 27 | _nouveau_devinit_init(nv_object(d)); \ | 26 | extern struct nouveau_oclass *nv84_devinit_oclass; |
| 28 | }) | 27 | extern struct nouveau_oclass *nv98_devinit_oclass; |
| 29 | #define nouveau_devinit_fini(p,s) ({ \ | 28 | extern struct nouveau_oclass *nva3_devinit_oclass; |
| 30 | struct nouveau_devinit *d = (p); \ | 29 | extern struct nouveau_oclass *nvaf_devinit_oclass; |
| 31 | _nouveau_devinit_fini(nv_object(d), (s)); \ | 30 | extern struct nouveau_oclass *nvc0_devinit_oclass; |
| 32 | }) | ||
| 33 | |||
| 34 | int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 35 | struct nouveau_oclass *, int, void **); | ||
| 36 | #define _nouveau_devinit_dtor _nouveau_subdev_dtor | ||
| 37 | int _nouveau_devinit_init(struct nouveau_object *); | ||
| 38 | int _nouveau_devinit_fini(struct nouveau_object *, bool suspend); | ||
| 39 | |||
| 40 | extern struct nouveau_oclass nv04_devinit_oclass; | ||
| 41 | extern struct nouveau_oclass nv05_devinit_oclass; | ||
| 42 | extern struct nouveau_oclass nv10_devinit_oclass; | ||
| 43 | extern struct nouveau_oclass nv1a_devinit_oclass; | ||
| 44 | extern struct nouveau_oclass nv20_devinit_oclass; | ||
| 45 | extern struct nouveau_oclass nv50_devinit_oclass; | ||
| 46 | extern struct nouveau_oclass nva3_devinit_oclass; | ||
| 47 | extern struct nouveau_oclass nvc0_devinit_oclass; | ||
| 48 | 31 | ||
| 49 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index d89dbdf39b0d..d7ecafbae1ca 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h | |||
| @@ -106,6 +106,13 @@ extern struct nouveau_oclass *nvaf_fb_oclass; | |||
| 106 | extern struct nouveau_oclass *nvc0_fb_oclass; | 106 | extern struct nouveau_oclass *nvc0_fb_oclass; |
| 107 | extern struct nouveau_oclass *nve0_fb_oclass; | 107 | extern struct nouveau_oclass *nve0_fb_oclass; |
| 108 | 108 | ||
| 109 | #include <subdev/bios/ramcfg.h> | ||
| 110 | |||
| 111 | struct nouveau_ram_data { | ||
| 112 | struct nvbios_ramcfg bios; | ||
| 113 | u32 freq; | ||
| 114 | }; | ||
| 115 | |||
| 109 | struct nouveau_ram { | 116 | struct nouveau_ram { |
| 110 | struct nouveau_object base; | 117 | struct nouveau_object base; |
| 111 | enum { | 118 | enum { |
| @@ -142,6 +149,12 @@ struct nouveau_ram { | |||
| 142 | } rammap, ramcfg, timing; | 149 | } rammap, ramcfg, timing; |
| 143 | u32 freq; | 150 | u32 freq; |
| 144 | u32 mr[16]; | 151 | u32 mr[16]; |
| 152 | u32 mr1_nuts; | ||
| 153 | |||
| 154 | struct nouveau_ram_data *next; | ||
| 155 | struct nouveau_ram_data former; | ||
| 156 | struct nouveau_ram_data xition; | ||
| 157 | struct nouveau_ram_data target; | ||
| 145 | }; | 158 | }; |
| 146 | 159 | ||
| 147 | #endif | 160 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h index 4aca33887aaa..c1df26f3230c 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h | |||
| @@ -23,21 +23,6 @@ nv_memobj(void *obj) | |||
| 23 | return obj; | 23 | return obj; |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | #define nouveau_instobj_create(p,e,o,d) \ | ||
| 27 | nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 28 | #define nouveau_instobj_init(p) \ | ||
| 29 | nouveau_object_init(&(p)->base) | ||
| 30 | #define nouveau_instobj_fini(p,s) \ | ||
| 31 | nouveau_object_fini(&(p)->base, (s)) | ||
| 32 | |||
| 33 | int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 34 | struct nouveau_oclass *, int, void **); | ||
| 35 | void nouveau_instobj_destroy(struct nouveau_instobj *); | ||
| 36 | |||
| 37 | void _nouveau_instobj_dtor(struct nouveau_object *); | ||
| 38 | #define _nouveau_instobj_init nouveau_object_init | ||
| 39 | #define _nouveau_instobj_fini nouveau_object_fini | ||
| 40 | |||
| 41 | struct nouveau_instmem { | 26 | struct nouveau_instmem { |
| 42 | struct nouveau_subdev base; | 27 | struct nouveau_subdev base; |
| 43 | struct list_head list; | 28 | struct list_head list; |
| @@ -60,21 +45,8 @@ nouveau_instmem(void *obj) | |||
| 60 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; | 45 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; |
| 61 | } | 46 | } |
| 62 | 47 | ||
| 63 | #define nouveau_instmem_create(p,e,o,d) \ | 48 | extern struct nouveau_oclass *nv04_instmem_oclass; |
| 64 | nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d) | 49 | extern struct nouveau_oclass *nv40_instmem_oclass; |
| 65 | #define nouveau_instmem_destroy(p) \ | 50 | extern struct nouveau_oclass *nv50_instmem_oclass; |
| 66 | nouveau_subdev_destroy(&(p)->base) | ||
| 67 | int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 68 | struct nouveau_oclass *, int, void **); | ||
| 69 | int nouveau_instmem_init(struct nouveau_instmem *); | ||
| 70 | int nouveau_instmem_fini(struct nouveau_instmem *, bool); | ||
| 71 | |||
| 72 | #define _nouveau_instmem_dtor _nouveau_subdev_dtor | ||
| 73 | int _nouveau_instmem_init(struct nouveau_object *); | ||
| 74 | int _nouveau_instmem_fini(struct nouveau_object *, bool); | ||
| 75 | |||
| 76 | extern struct nouveau_oclass nv04_instmem_oclass; | ||
| 77 | extern struct nouveau_oclass nv40_instmem_oclass; | ||
| 78 | extern struct nouveau_oclass nv50_instmem_oclass; | ||
| 79 | 51 | ||
| 80 | #endif | 52 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h index fcf57fa309bf..c9509039f94b 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h | |||
| @@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *); | |||
| 131 | void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *); | 131 | void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *); |
| 132 | void nouveau_vm_unmap(struct nouveau_vma *); | 132 | void nouveau_vm_unmap(struct nouveau_vma *); |
| 133 | void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); | 133 | void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); |
| 134 | void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, | ||
| 135 | struct nouveau_mem *); | ||
| 136 | void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, | ||
| 137 | struct nouveau_mem *mem); | ||
| 138 | 134 | ||
| 139 | #endif | 135 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c index d70ba342aa2e..7098ddd54678 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c | |||
| @@ -23,7 +23,11 @@ | |||
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <core/object.h> | 25 | #include <core/object.h> |
| 26 | #include <subdev/bar.h> | 26 | |
| 27 | #include <subdev/fb.h> | ||
| 28 | #include <subdev/vm.h> | ||
| 29 | |||
| 30 | #include "priv.h" | ||
| 27 | 31 | ||
| 28 | struct nouveau_barobj { | 32 | struct nouveau_barobj { |
| 29 | struct nouveau_object base; | 33 | struct nouveau_object base; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c index 160d27f3c7b4..090d594a21b3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c | |||
| @@ -25,10 +25,11 @@ | |||
| 25 | #include <core/gpuobj.h> | 25 | #include <core/gpuobj.h> |
| 26 | 26 | ||
| 27 | #include <subdev/timer.h> | 27 | #include <subdev/timer.h> |
| 28 | #include <subdev/bar.h> | ||
| 29 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
| 30 | #include <subdev/vm.h> | 29 | #include <subdev/vm.h> |
| 31 | 30 | ||
| 31 | #include "priv.h" | ||
| 32 | |||
| 32 | struct nv50_bar_priv { | 33 | struct nv50_bar_priv { |
| 33 | struct nouveau_bar base; | 34 | struct nouveau_bar base; |
| 34 | spinlock_t lock; | 35 | spinlock_t lock; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c index b2ec7411eb2e..bac5e754de35 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c | |||
| @@ -25,10 +25,11 @@ | |||
| 25 | #include <core/gpuobj.h> | 25 | #include <core/gpuobj.h> |
| 26 | 26 | ||
| 27 | #include <subdev/timer.h> | 27 | #include <subdev/timer.h> |
| 28 | #include <subdev/bar.h> | ||
| 29 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
| 30 | #include <subdev/vm.h> | 29 | #include <subdev/vm.h> |
| 31 | 30 | ||
| 31 | #include "priv.h" | ||
| 32 | |||
| 32 | struct nvc0_bar_priv { | 33 | struct nvc0_bar_priv { |
| 33 | struct nouveau_bar base; | 34 | struct nouveau_bar base; |
| 34 | spinlock_t lock; | 35 | spinlock_t lock; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h new file mode 100644 index 000000000000..ffad8f337ead --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | #ifndef __NVKM_BAR_PRIV_H__ | ||
| 2 | #define __NVKM_BAR_PRIV_H__ | ||
| 3 | |||
| 4 | #include <subdev/bar.h> | ||
| 5 | |||
| 6 | #define nouveau_bar_create(p,e,o,d) \ | ||
| 7 | nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 8 | #define nouveau_bar_init(p) \ | ||
| 9 | nouveau_subdev_init(&(p)->base) | ||
| 10 | #define nouveau_bar_fini(p,s) \ | ||
| 11 | nouveau_subdev_fini(&(p)->base, (s)) | ||
| 12 | |||
| 13 | int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 14 | struct nouveau_oclass *, int, void **); | ||
| 15 | void nouveau_bar_destroy(struct nouveau_bar *); | ||
| 16 | |||
| 17 | void _nouveau_bar_dtor(struct nouveau_object *); | ||
| 18 | #define _nouveau_bar_init _nouveau_subdev_init | ||
| 19 | #define _nouveau_bar_fini _nouveau_subdev_fini | ||
| 20 | |||
| 21 | int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *, | ||
| 22 | struct nouveau_mem *, struct nouveau_object **); | ||
| 23 | |||
| 24 | void nv84_bar_flush(struct nouveau_bar *); | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index df1b1b423093..de201baeb053 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <subdev/bios/dp.h> | 9 | #include <subdev/bios/dp.h> |
| 10 | #include <subdev/bios/gpio.h> | 10 | #include <subdev/bios/gpio.h> |
| 11 | #include <subdev/bios/init.h> | 11 | #include <subdev/bios/init.h> |
| 12 | #include <subdev/bios/ramcfg.h> | ||
| 12 | #include <subdev/devinit.h> | 13 | #include <subdev/devinit.h> |
| 13 | #include <subdev/i2c.h> | 14 | #include <subdev/i2c.h> |
| 14 | #include <subdev/vga.h> | 15 | #include <subdev/vga.h> |
| @@ -391,43 +392,14 @@ init_unknown_script(struct nouveau_bios *bios) | |||
| 391 | return 0x0000; | 392 | return 0x0000; |
| 392 | } | 393 | } |
| 393 | 394 | ||
| 394 | static u16 | ||
| 395 | init_ram_restrict_table(struct nvbios_init *init) | ||
| 396 | { | ||
| 397 | struct nouveau_bios *bios = init->bios; | ||
| 398 | struct bit_entry bit_M; | ||
| 399 | u16 data = 0x0000; | ||
| 400 | |||
| 401 | if (!bit_entry(bios, 'M', &bit_M)) { | ||
| 402 | if (bit_M.version == 1 && bit_M.length >= 5) | ||
| 403 | data = nv_ro16(bios, bit_M.offset + 3); | ||
| 404 | if (bit_M.version == 2 && bit_M.length >= 3) | ||
| 405 | data = nv_ro16(bios, bit_M.offset + 1); | ||
| 406 | } | ||
| 407 | |||
| 408 | if (data == 0x0000) | ||
| 409 | warn("ram restrict table not found\n"); | ||
| 410 | return data; | ||
| 411 | } | ||
| 412 | |||
| 413 | static u8 | 395 | static u8 |
| 414 | init_ram_restrict_group_count(struct nvbios_init *init) | 396 | init_ram_restrict_group_count(struct nvbios_init *init) |
| 415 | { | 397 | { |
| 416 | struct nouveau_bios *bios = init->bios; | 398 | return nvbios_ramcfg_count(init->bios); |
| 417 | struct bit_entry bit_M; | ||
| 418 | |||
| 419 | if (!bit_entry(bios, 'M', &bit_M)) { | ||
| 420 | if (bit_M.version == 1 && bit_M.length >= 5) | ||
| 421 | return nv_ro08(bios, bit_M.offset + 2); | ||
| 422 | if (bit_M.version == 2 && bit_M.length >= 3) | ||
| 423 | return nv_ro08(bios, bit_M.offset + 0); | ||
| 424 | } | ||
| 425 | |||
| 426 | return 0x00; | ||
| 427 | } | 399 | } |
| 428 | 400 | ||
| 429 | static u8 | 401 | static u8 |
| 430 | init_ram_restrict_strap(struct nvbios_init *init) | 402 | init_ram_restrict(struct nvbios_init *init) |
| 431 | { | 403 | { |
| 432 | /* This appears to be the behaviour of the VBIOS parser, and *is* | 404 | /* This appears to be the behaviour of the VBIOS parser, and *is* |
| 433 | * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to | 405 | * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to |
| @@ -438,18 +410,8 @@ init_ram_restrict_strap(struct nvbios_init *init) | |||
| 438 | * in case *not* re-reading the strap causes similar breakage. | 410 | * in case *not* re-reading the strap causes similar breakage. |
| 439 | */ | 411 | */ |
| 440 | if (!init->ramcfg || init->bios->version.major < 0x70) | 412 | if (!init->ramcfg || init->bios->version.major < 0x70) |
| 441 | init->ramcfg = init_rd32(init, 0x101000); | 413 | init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios); |
| 442 | return (init->ramcfg & 0x00000003c) >> 2; | 414 | return (init->ramcfg & 0x7fffffff); |
| 443 | } | ||
| 444 | |||
| 445 | static u8 | ||
| 446 | init_ram_restrict(struct nvbios_init *init) | ||
| 447 | { | ||
| 448 | u8 strap = init_ram_restrict_strap(init); | ||
| 449 | u16 table = init_ram_restrict_table(init); | ||
| 450 | if (table) | ||
| 451 | return nv_ro08(init->bios, table + strap); | ||
| 452 | return 0x00; | ||
| 453 | } | 415 | } |
| 454 | 416 | ||
| 455 | static u8 | 417 | static u8 |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c new file mode 100644 index 000000000000..991aedda999b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <subdev/bios.h> | ||
| 26 | #include <subdev/bios/bit.h> | ||
| 27 | #include <subdev/bios/ramcfg.h> | ||
| 28 | |||
| 29 | static u8 | ||
| 30 | nvbios_ramcfg_strap(struct nouveau_bios *bios) | ||
| 31 | { | ||
| 32 | return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; | ||
| 33 | } | ||
| 34 | |||
| 35 | u8 | ||
| 36 | nvbios_ramcfg_count(struct nouveau_bios *bios) | ||
| 37 | { | ||
| 38 | struct bit_entry bit_M; | ||
| 39 | |||
| 40 | if (!bit_entry(bios, 'M', &bit_M)) { | ||
| 41 | if (bit_M.version == 1 && bit_M.length >= 5) | ||
| 42 | return nv_ro08(bios, bit_M.offset + 2); | ||
| 43 | if (bit_M.version == 2 && bit_M.length >= 3) | ||
| 44 | return nv_ro08(bios, bit_M.offset + 0); | ||
| 45 | } | ||
| 46 | |||
| 47 | return 0x00; | ||
| 48 | } | ||
| 49 | |||
| 50 | u8 | ||
| 51 | nvbios_ramcfg_index(struct nouveau_bios *bios) | ||
| 52 | { | ||
| 53 | u8 strap = nvbios_ramcfg_strap(bios); | ||
| 54 | u32 xlat = 0x00000000; | ||
| 55 | struct bit_entry bit_M; | ||
| 56 | |||
| 57 | if (!bit_entry(bios, 'M', &bit_M)) { | ||
| 58 | if (bit_M.version == 1 && bit_M.length >= 5) | ||
| 59 | xlat = nv_ro16(bios, bit_M.offset + 3); | ||
| 60 | if (bit_M.version == 2 && bit_M.length >= 3) | ||
| 61 | xlat = nv_ro16(bios, bit_M.offset + 1); | ||
| 62 | } | ||
| 63 | |||
| 64 | if (xlat) | ||
| 65 | strap = nv_ro08(bios, xlat + strap); | ||
| 66 | return strap; | ||
| 67 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c index 916fa9d302b7..1811b2cb0472 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c | |||
| @@ -24,11 +24,12 @@ | |||
| 24 | 24 | ||
| 25 | #include <subdev/bios.h> | 25 | #include <subdev/bios.h> |
| 26 | #include <subdev/bios/bit.h> | 26 | #include <subdev/bios/bit.h> |
| 27 | #include <subdev/bios/ramcfg.h> | ||
| 27 | #include <subdev/bios/rammap.h> | 28 | #include <subdev/bios/rammap.h> |
| 28 | 29 | ||
| 29 | u16 | 30 | u32 |
| 30 | nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, | 31 | nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, |
| 31 | u8 *cnt, u8 *len, u8 *snr, u8 *ssz) | 32 | u8 *cnt, u8 *len, u8 *snr, u8 *ssz) |
| 32 | { | 33 | { |
| 33 | struct bit_entry bit_P; | 34 | struct bit_entry bit_P; |
| 34 | u16 rammap = 0x0000; | 35 | u16 rammap = 0x0000; |
| @@ -57,12 +58,12 @@ nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, | |||
| 57 | return 0x0000; | 58 | return 0x0000; |
| 58 | } | 59 | } |
| 59 | 60 | ||
| 60 | u16 | 61 | u32 |
| 61 | nvbios_rammap_entry(struct nouveau_bios *bios, int idx, | 62 | nvbios_rammapEe(struct nouveau_bios *bios, int idx, |
| 62 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | 63 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) |
| 63 | { | 64 | { |
| 64 | u8 snr, ssz; | 65 | u8 snr, ssz; |
| 65 | u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz); | 66 | u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); |
| 66 | if (rammap && idx < *cnt) { | 67 | if (rammap && idx < *cnt) { |
| 67 | rammap = rammap + *hdr + (idx * (*len + (snr * ssz))); | 68 | rammap = rammap + *hdr + (idx * (*len + (snr * ssz))); |
| 68 | *hdr = *len; | 69 | *hdr = *len; |
| @@ -73,16 +74,100 @@ nvbios_rammap_entry(struct nouveau_bios *bios, int idx, | |||
| 73 | return 0x0000; | 74 | return 0x0000; |
| 74 | } | 75 | } |
| 75 | 76 | ||
| 76 | u16 | 77 | u32 |
| 77 | nvbios_rammap_match(struct nouveau_bios *bios, u16 khz, | 78 | nvbios_rammapEm(struct nouveau_bios *bios, u16 khz, |
| 78 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | 79 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) |
| 79 | { | 80 | { |
| 80 | int idx = 0; | 81 | int idx = 0; |
| 81 | u32 data; | 82 | u32 data; |
| 82 | while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) { | 83 | while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) { |
| 83 | if (khz >= nv_ro16(bios, data + 0x00) && | 84 | if (khz >= nv_ro16(bios, data + 0x00) && |
| 84 | khz <= nv_ro16(bios, data + 0x02)) | 85 | khz <= nv_ro16(bios, data + 0x02)) |
| 85 | break; | 86 | break; |
| 86 | } | 87 | } |
| 87 | return data; | 88 | return data; |
| 88 | } | 89 | } |
| 90 | |||
| 91 | u32 | ||
| 92 | nvbios_rammapEp(struct nouveau_bios *bios, u16 khz, | ||
| 93 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | ||
| 94 | struct nvbios_ramcfg *p) | ||
| 95 | { | ||
| 96 | u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len); | ||
| 97 | memset(p, 0x00, sizeof(*p)); | ||
| 98 | switch (!!data * *ver) { | ||
| 99 | case 0x11: | ||
| 100 | p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; | ||
| 101 | p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2; | ||
| 102 | p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; | ||
| 103 | p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2; | ||
| 104 | break; | ||
| 105 | default: | ||
| 106 | data = 0; | ||
| 107 | break; | ||
| 108 | } | ||
| 109 | return data; | ||
| 110 | } | ||
| 111 | |||
| 112 | u32 | ||
| 113 | nvbios_rammapSe(struct nouveau_bios *bios, u32 data, | ||
| 114 | u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, | ||
| 115 | u8 *ver, u8 *hdr) | ||
| 116 | { | ||
| 117 | if (idx < ecnt) { | ||
| 118 | data = data + ehdr + (idx * elen); | ||
| 119 | *ver = ever; | ||
| 120 | *hdr = elen; | ||
| 121 | return data; | ||
| 122 | } | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | u32 | ||
| 127 | nvbios_rammapSp(struct nouveau_bios *bios, u32 data, | ||
| 128 | u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, | ||
| 129 | u8 *ver, u8 *hdr, struct nvbios_ramcfg *p) | ||
| 130 | { | ||
| 131 | data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr); | ||
| 132 | switch (!!data * *ver) { | ||
| 133 | case 0x11: | ||
| 134 | p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0; | ||
| 135 | p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1; | ||
| 136 | p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2; | ||
| 137 | p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3; | ||
| 138 | p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4; | ||
| 139 | p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5; | ||
| 140 | p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6; | ||
| 141 | p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7; | ||
| 142 | p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0; | ||
| 143 | p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2; | ||
| 144 | p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3; | ||
| 145 | p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4; | ||
| 146 | p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6; | ||
| 147 | p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7; | ||
| 148 | p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0; | ||
| 149 | p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4; | ||
| 150 | p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6; | ||
| 151 | p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4; | ||
| 152 | p->ramcfg_11_04 = (nv_ro08(bios, data + 0x04) & 0xff) >> 0; | ||
| 153 | p->ramcfg_11_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0; | ||
| 154 | p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1; | ||
| 155 | p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2; | ||
| 156 | p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3; | ||
| 157 | p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4; | ||
| 158 | p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6; | ||
| 159 | p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7; | ||
| 160 | p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; | ||
| 161 | p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1; | ||
| 162 | p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2; | ||
| 163 | p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3; | ||
| 164 | p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; | ||
| 165 | p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5; | ||
| 166 | p->ramcfg_11_09 = (nv_ro08(bios, data + 0x09) & 0xff) >> 0; | ||
| 167 | break; | ||
| 168 | default: | ||
| 169 | data = 0; | ||
| 170 | break; | ||
| 171 | } | ||
| 172 | return data; | ||
| 173 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c index 151c2d6aaee8..350d44ab2ba2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c | |||
| @@ -24,11 +24,12 @@ | |||
| 24 | 24 | ||
| 25 | #include <subdev/bios.h> | 25 | #include <subdev/bios.h> |
| 26 | #include <subdev/bios/bit.h> | 26 | #include <subdev/bios/bit.h> |
| 27 | #include <subdev/bios/ramcfg.h> | ||
| 27 | #include <subdev/bios/timing.h> | 28 | #include <subdev/bios/timing.h> |
| 28 | 29 | ||
| 29 | u16 | 30 | u16 |
| 30 | nvbios_timing_table(struct nouveau_bios *bios, | 31 | nvbios_timingTe(struct nouveau_bios *bios, |
| 31 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | 32 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz) |
| 32 | { | 33 | { |
| 33 | struct bit_entry bit_P; | 34 | struct bit_entry bit_P; |
| 34 | u16 timing = 0x0000; | 35 | u16 timing = 0x0000; |
| @@ -47,11 +48,15 @@ nvbios_timing_table(struct nouveau_bios *bios, | |||
| 47 | *hdr = nv_ro08(bios, timing + 1); | 48 | *hdr = nv_ro08(bios, timing + 1); |
| 48 | *cnt = nv_ro08(bios, timing + 2); | 49 | *cnt = nv_ro08(bios, timing + 2); |
| 49 | *len = nv_ro08(bios, timing + 3); | 50 | *len = nv_ro08(bios, timing + 3); |
| 51 | *snr = 0; | ||
| 52 | *ssz = 0; | ||
| 50 | return timing; | 53 | return timing; |
| 51 | case 0x20: | 54 | case 0x20: |
| 52 | *hdr = nv_ro08(bios, timing + 1); | 55 | *hdr = nv_ro08(bios, timing + 1); |
| 53 | *cnt = nv_ro08(bios, timing + 3); | 56 | *cnt = nv_ro08(bios, timing + 5); |
| 54 | *len = nv_ro08(bios, timing + 2); | 57 | *len = nv_ro08(bios, timing + 2); |
| 58 | *snr = nv_ro08(bios, timing + 4); | ||
| 59 | *ssz = nv_ro08(bios, timing + 3); | ||
| 55 | return timing; | 60 | return timing; |
| 56 | default: | 61 | default: |
| 57 | break; | 62 | break; |
| @@ -63,11 +68,60 @@ nvbios_timing_table(struct nouveau_bios *bios, | |||
| 63 | } | 68 | } |
| 64 | 69 | ||
| 65 | u16 | 70 | u16 |
| 66 | nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len) | 71 | nvbios_timingEe(struct nouveau_bios *bios, int idx, |
| 72 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | ||
| 67 | { | 73 | { |
| 68 | u8 hdr, cnt; | 74 | u8 snr, ssz; |
| 69 | u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len); | 75 | u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz); |
| 70 | if (timing && idx < cnt) | 76 | if (timing && idx < *cnt) { |
| 71 | return timing + hdr + (idx * *len); | 77 | timing += *hdr + idx * (*len + (snr * ssz)); |
| 78 | *hdr = *len; | ||
| 79 | *cnt = snr; | ||
| 80 | *len = ssz; | ||
| 81 | return timing; | ||
| 82 | } | ||
| 72 | return 0x0000; | 83 | return 0x0000; |
| 73 | } | 84 | } |
| 85 | |||
| 86 | u16 | ||
| 87 | nvbios_timingEp(struct nouveau_bios *bios, int idx, | ||
| 88 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | ||
| 89 | struct nvbios_ramcfg *p) | ||
| 90 | { | ||
| 91 | u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp; | ||
| 92 | switch (!!data * *ver) { | ||
| 93 | case 0x20: | ||
| 94 | p->timing[0] = nv_ro32(bios, data + 0x00); | ||
| 95 | p->timing[1] = nv_ro32(bios, data + 0x04); | ||
| 96 | p->timing[2] = nv_ro32(bios, data + 0x08); | ||
| 97 | p->timing[3] = nv_ro32(bios, data + 0x0c); | ||
| 98 | p->timing[4] = nv_ro32(bios, data + 0x10); | ||
| 99 | p->timing[5] = nv_ro32(bios, data + 0x14); | ||
| 100 | p->timing[6] = nv_ro32(bios, data + 0x18); | ||
| 101 | p->timing[7] = nv_ro32(bios, data + 0x1c); | ||
| 102 | p->timing[8] = nv_ro32(bios, data + 0x20); | ||
| 103 | p->timing[9] = nv_ro32(bios, data + 0x24); | ||
| 104 | p->timing[10] = nv_ro32(bios, data + 0x28); | ||
| 105 | p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0; | ||
| 106 | p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4; | ||
| 107 | p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6; | ||
| 108 | p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0; | ||
| 109 | temp = nv_ro16(bios, data + 0x2c); | ||
| 110 | p->timing_20_2c_003f = (temp & 0x003f) >> 0; | ||
| 111 | p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6; | ||
| 112 | p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0; | ||
| 113 | p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3; | ||
| 114 | temp = nv_ro16(bios, data + 0x31); | ||
| 115 | p->timing_20_31_0007 = (temp & 0x0007) >> 0; | ||
| 116 | p->timing_20_31_0078 = (temp & 0x0078) >> 3; | ||
| 117 | p->timing_20_31_0780 = (temp & 0x0780) >> 7; | ||
| 118 | p->timing_20_31_0800 = (temp & 0x0800) >> 11; | ||
| 119 | p->timing_20_31_7000 = (temp & 0x7000) >> 12; | ||
| 120 | p->timing_20_31_8000 = (temp & 0x8000) >> 15; | ||
| 121 | break; | ||
| 122 | default: | ||
| 123 | data = 0; | ||
| 124 | break; | ||
| 125 | } | ||
| 126 | return data; | ||
| 127 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c index e2938a21b06f..dd62baead39c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c | |||
| @@ -182,9 +182,12 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei) | |||
| 182 | clk->pstate = pstatei; | 182 | clk->pstate = pstatei; |
| 183 | 183 | ||
| 184 | if (pfb->ram->calc) { | 184 | if (pfb->ram->calc) { |
| 185 | ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]); | 185 | int khz = pstate->base.domain[nv_clk_src_mem]; |
| 186 | if (ret == 0) | 186 | do { |
| 187 | ret = pfb->ram->prog(pfb); | 187 | ret = pfb->ram->calc(pfb, khz); |
| 188 | if (ret == 0) | ||
| 189 | ret = pfb->ram->prog(pfb); | ||
| 190 | } while (ret > 0); | ||
| 188 | pfb->ram->tidy(pfb); | 191 | pfb->ram->tidy(pfb); |
| 189 | } | 192 | } |
| 190 | 193 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c index 30c1f3a4158e..b74db6cfc4e2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <subdev/bios.h> | 25 | #include <subdev/bios.h> |
| 26 | #include <subdev/bios/pll.h> | 26 | #include <subdev/bios/pll.h> |
| 27 | #include <subdev/clock.h> | 27 | #include <subdev/clock.h> |
| 28 | #include <subdev/devinit/priv.h> | 28 | #include <subdev/devinit/nv04.h> |
| 29 | 29 | ||
| 30 | #include "pll.h" | 30 | #include "pll.h" |
| 31 | 31 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c index 4c62e84b96f5..d3c37c96f0e7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c | |||
| @@ -457,7 +457,7 @@ nve0_domain[] = { | |||
| 457 | { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 }, | 457 | { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 }, |
| 458 | { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, | 458 | { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, |
| 459 | { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, | 459 | { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, |
| 460 | { nv_clk_src_mem , 0x03, 0, "memory", 1000 }, | 460 | { nv_clk_src_mem , 0x03, 0, "memory", 500 }, |
| 461 | { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE }, | 461 | { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE }, |
| 462 | { nv_clk_src_hubk01 , 0x05 }, | 462 | { nv_clk_src_hubk01 , 0x05 }, |
| 463 | { nv_clk_src_vdec , 0x06 }, | 463 | { nv_clk_src_vdec , 0x06 }, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c index 79c81d3d9bac..8fa34e8152c2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c | |||
| @@ -24,9 +24,11 @@ | |||
| 24 | 24 | ||
| 25 | #include <core/option.h> | 25 | #include <core/option.h> |
| 26 | 26 | ||
| 27 | #include <subdev/devinit.h> | ||
| 28 | #include <subdev/bios.h> | 27 | #include <subdev/bios.h> |
| 29 | #include <subdev/bios/init.h> | 28 | #include <subdev/bios/init.h> |
| 29 | #include <subdev/vga.h> | ||
| 30 | |||
| 31 | #include "priv.h" | ||
| 30 | 32 | ||
| 31 | int | 33 | int |
| 32 | _nouveau_devinit_fini(struct nouveau_object *object, bool suspend) | 34 | _nouveau_devinit_fini(struct nouveau_object *object, bool suspend) |
| @@ -37,18 +39,41 @@ _nouveau_devinit_fini(struct nouveau_object *object, bool suspend) | |||
| 37 | if (suspend) | 39 | if (suspend) |
| 38 | devinit->post = true; | 40 | devinit->post = true; |
| 39 | 41 | ||
| 42 | /* unlock the extended vga crtc regs */ | ||
| 43 | nv_lockvgac(devinit, false); | ||
| 44 | |||
| 40 | return nouveau_subdev_fini(&devinit->base, suspend); | 45 | return nouveau_subdev_fini(&devinit->base, suspend); |
| 41 | } | 46 | } |
| 42 | 47 | ||
| 43 | int | 48 | int |
| 44 | _nouveau_devinit_init(struct nouveau_object *object) | 49 | _nouveau_devinit_init(struct nouveau_object *object) |
| 45 | { | 50 | { |
| 51 | struct nouveau_devinit_impl *impl = (void *)object->oclass; | ||
| 46 | struct nouveau_devinit *devinit = (void *)object; | 52 | struct nouveau_devinit *devinit = (void *)object; |
| 47 | int ret = nouveau_subdev_init(&devinit->base); | 53 | int ret; |
| 54 | |||
| 55 | ret = nouveau_subdev_init(&devinit->base); | ||
| 56 | if (ret) | ||
| 57 | return ret; | ||
| 58 | |||
| 59 | ret = nvbios_init(&devinit->base, devinit->post); | ||
| 48 | if (ret) | 60 | if (ret) |
| 49 | return ret; | 61 | return ret; |
| 50 | 62 | ||
| 51 | return nvbios_init(&devinit->base, devinit->post); | 63 | if (impl->disable) |
| 64 | nv_device(devinit)->disable_mask |= impl->disable(devinit); | ||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | void | ||
| 69 | _nouveau_devinit_dtor(struct nouveau_object *object) | ||
| 70 | { | ||
| 71 | struct nouveau_devinit *devinit = (void *)object; | ||
| 72 | |||
| 73 | /* lock crtc regs */ | ||
| 74 | nv_lockvgac(devinit, true); | ||
| 75 | |||
| 76 | nouveau_subdev_destroy(&devinit->base); | ||
| 52 | } | 77 | } |
| 53 | 78 | ||
| 54 | int | 79 | int |
| @@ -57,6 +82,7 @@ nouveau_devinit_create_(struct nouveau_object *parent, | |||
| 57 | struct nouveau_oclass *oclass, | 82 | struct nouveau_oclass *oclass, |
| 58 | int size, void **pobject) | 83 | int size, void **pobject) |
| 59 | { | 84 | { |
| 85 | struct nouveau_devinit_impl *impl = (void *)oclass; | ||
| 60 | struct nouveau_device *device = nv_device(parent); | 86 | struct nouveau_device *device = nv_device(parent); |
| 61 | struct nouveau_devinit *devinit; | 87 | struct nouveau_devinit *devinit; |
| 62 | int ret; | 88 | int ret; |
| @@ -68,5 +94,7 @@ nouveau_devinit_create_(struct nouveau_object *parent, | |||
| 68 | return ret; | 94 | return ret; |
| 69 | 95 | ||
| 70 | devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); | 96 | devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); |
| 97 | devinit->meminit = impl->meminit; | ||
| 98 | devinit->pll_set = impl->pll_set; | ||
| 71 | return 0; | 99 | return 0; |
| 72 | } | 100 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c index 27c8235f1a85..7037eae46e44 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c | |||
| @@ -27,12 +27,7 @@ | |||
| 27 | #include <subdev/vga.h> | 27 | #include <subdev/vga.h> |
| 28 | 28 | ||
| 29 | #include "fbmem.h" | 29 | #include "fbmem.h" |
| 30 | #include "priv.h" | 30 | #include "nv04.h" |
| 31 | |||
| 32 | struct nv04_devinit_priv { | ||
| 33 | struct nouveau_devinit base; | ||
| 34 | int owner; | ||
| 35 | }; | ||
| 36 | 31 | ||
| 37 | static void | 32 | static void |
| 38 | nv04_devinit_meminit(struct nouveau_devinit *devinit) | 33 | nv04_devinit_meminit(struct nouveau_devinit *devinit) |
| @@ -393,17 +388,21 @@ int | |||
| 393 | nv04_devinit_fini(struct nouveau_object *object, bool suspend) | 388 | nv04_devinit_fini(struct nouveau_object *object, bool suspend) |
| 394 | { | 389 | { |
| 395 | struct nv04_devinit_priv *priv = (void *)object; | 390 | struct nv04_devinit_priv *priv = (void *)object; |
| 391 | int ret; | ||
| 396 | 392 | ||
| 397 | /* make i2c busses accessible */ | 393 | /* make i2c busses accessible */ |
| 398 | nv_mask(priv, 0x000200, 0x00000001, 0x00000001); | 394 | nv_mask(priv, 0x000200, 0x00000001, 0x00000001); |
| 399 | 395 | ||
| 400 | /* unlock extended vga crtc regs, and unslave crtcs */ | 396 | ret = nouveau_devinit_fini(&priv->base, suspend); |
| 401 | nv_lockvgac(priv, false); | 397 | if (ret) |
| 398 | return ret; | ||
| 399 | |||
| 400 | /* unslave crtcs */ | ||
| 402 | if (priv->owner < 0) | 401 | if (priv->owner < 0) |
| 403 | priv->owner = nv_rdvgaowner(priv); | 402 | priv->owner = nv_rdvgaowner(priv); |
| 404 | nv_wrvgaowner(priv, 0); | 403 | nv_wrvgaowner(priv, 0); |
| 405 | 404 | ||
| 406 | return nouveau_devinit_fini(&priv->base, suspend); | 405 | return 0; |
| 407 | } | 406 | } |
| 408 | 407 | ||
| 409 | int | 408 | int |
| @@ -431,14 +430,13 @@ nv04_devinit_dtor(struct nouveau_object *object) | |||
| 431 | { | 430 | { |
| 432 | struct nv04_devinit_priv *priv = (void *)object; | 431 | struct nv04_devinit_priv *priv = (void *)object; |
| 433 | 432 | ||
| 434 | /* restore vga owner saved at first init, and lock crtc regs */ | 433 | /* restore vga owner saved at first init */ |
| 435 | nv_wrvgaowner(priv, priv->owner); | 434 | nv_wrvgaowner(priv, priv->owner); |
| 436 | nv_lockvgac(priv, true); | ||
| 437 | 435 | ||
| 438 | nouveau_devinit_destroy(&priv->base); | 436 | nouveau_devinit_destroy(&priv->base); |
| 439 | } | 437 | } |
| 440 | 438 | ||
| 441 | static int | 439 | int |
| 442 | nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 440 | nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 443 | struct nouveau_oclass *oclass, void *data, u32 size, | 441 | struct nouveau_oclass *oclass, void *data, u32 size, |
| 444 | struct nouveau_object **pobject) | 442 | struct nouveau_object **pobject) |
| @@ -451,19 +449,19 @@ nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 451 | if (ret) | 449 | if (ret) |
| 452 | return ret; | 450 | return ret; |
| 453 | 451 | ||
| 454 | priv->base.meminit = nv04_devinit_meminit; | ||
| 455 | priv->base.pll_set = nv04_devinit_pll_set; | ||
| 456 | priv->owner = -1; | 452 | priv->owner = -1; |
| 457 | return 0; | 453 | return 0; |
| 458 | } | 454 | } |
| 459 | 455 | ||
| 460 | struct nouveau_oclass | 456 | struct nouveau_oclass * |
| 461 | nv04_devinit_oclass = { | 457 | nv04_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 462 | .handle = NV_SUBDEV(DEVINIT, 0x04), | 458 | .base.handle = NV_SUBDEV(DEVINIT, 0x04), |
| 463 | .ofuncs = &(struct nouveau_ofuncs) { | 459 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 464 | .ctor = nv04_devinit_ctor, | 460 | .ctor = nv04_devinit_ctor, |
| 465 | .dtor = nv04_devinit_dtor, | 461 | .dtor = nv04_devinit_dtor, |
| 466 | .init = nv04_devinit_init, | 462 | .init = nv04_devinit_init, |
| 467 | .fini = nv04_devinit_fini, | 463 | .fini = nv04_devinit_fini, |
| 468 | }, | 464 | }, |
| 469 | }; | 465 | .meminit = nv04_devinit_meminit, |
| 466 | .pll_set = nv04_devinit_pll_set, | ||
| 467 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h new file mode 100644 index 000000000000..23470a57510c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef __NVKM_DEVINIT_NV04_H__ | ||
| 2 | #define __NVKM_DEVINIT_NV04_H__ | ||
| 3 | |||
| 4 | #include "priv.h" | ||
| 5 | |||
| 6 | struct nv04_devinit_priv { | ||
| 7 | struct nouveau_devinit base; | ||
| 8 | u8 owner; | ||
| 9 | }; | ||
| 10 | |||
| 11 | int nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *, | ||
| 12 | struct nouveau_oclass *, void *, u32, | ||
| 13 | struct nouveau_object **); | ||
| 14 | void nv04_devinit_dtor(struct nouveau_object *); | ||
| 15 | int nv04_devinit_init(struct nouveau_object *); | ||
| 16 | int nv04_devinit_fini(struct nouveau_object *, bool); | ||
| 17 | int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32); | ||
| 18 | |||
| 19 | void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 20 | void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 21 | void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c index b1912a8a8942..98b7e6780dc7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c | |||
| @@ -29,12 +29,7 @@ | |||
| 29 | #include <subdev/vga.h> | 29 | #include <subdev/vga.h> |
| 30 | 30 | ||
| 31 | #include "fbmem.h" | 31 | #include "fbmem.h" |
| 32 | #include "priv.h" | 32 | #include "nv04.h" |
| 33 | |||
| 34 | struct nv05_devinit_priv { | ||
| 35 | struct nouveau_devinit base; | ||
| 36 | u8 owner; | ||
| 37 | }; | ||
| 38 | 33 | ||
| 39 | static void | 34 | static void |
| 40 | nv05_devinit_meminit(struct nouveau_devinit *devinit) | 35 | nv05_devinit_meminit(struct nouveau_devinit *devinit) |
| @@ -49,7 +44,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit) | |||
| 49 | { 0x06, 0x00 }, | 44 | { 0x06, 0x00 }, |
| 50 | { 0x00, 0x00 } | 45 | { 0x00, 0x00 } |
| 51 | }; | 46 | }; |
| 52 | struct nv05_devinit_priv *priv = (void *)devinit; | 47 | struct nv04_devinit_priv *priv = (void *)devinit; |
| 53 | struct nouveau_bios *bios = nouveau_bios(priv); | 48 | struct nouveau_bios *bios = nouveau_bios(priv); |
| 54 | struct io_mapping *fb; | 49 | struct io_mapping *fb; |
| 55 | u32 patt = 0xdeadbeef; | 50 | u32 patt = 0xdeadbeef; |
| @@ -130,31 +125,15 @@ out: | |||
| 130 | fbmem_fini(fb); | 125 | fbmem_fini(fb); |
| 131 | } | 126 | } |
| 132 | 127 | ||
| 133 | static int | 128 | struct nouveau_oclass * |
| 134 | nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 129 | nv05_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 135 | struct nouveau_oclass *oclass, void *data, u32 size, | 130 | .base.handle = NV_SUBDEV(DEVINIT, 0x05), |
| 136 | struct nouveau_object **pobject) | 131 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 137 | { | 132 | .ctor = nv04_devinit_ctor, |
| 138 | struct nv05_devinit_priv *priv; | ||
| 139 | int ret; | ||
| 140 | |||
| 141 | ret = nouveau_devinit_create(parent, engine, oclass, &priv); | ||
| 142 | *pobject = nv_object(priv); | ||
| 143 | if (ret) | ||
| 144 | return ret; | ||
| 145 | |||
| 146 | priv->base.meminit = nv05_devinit_meminit; | ||
| 147 | priv->base.pll_set = nv04_devinit_pll_set; | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | struct nouveau_oclass | ||
| 152 | nv05_devinit_oclass = { | ||
| 153 | .handle = NV_SUBDEV(DEVINIT, 0x05), | ||
| 154 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 155 | .ctor = nv05_devinit_ctor, | ||
| 156 | .dtor = nv04_devinit_dtor, | 133 | .dtor = nv04_devinit_dtor, |
| 157 | .init = nv04_devinit_init, | 134 | .init = nv04_devinit_init, |
| 158 | .fini = nv04_devinit_fini, | 135 | .fini = nv04_devinit_fini, |
| 159 | }, | 136 | }, |
| 160 | }; | 137 | .meminit = nv05_devinit_meminit, |
| 138 | .pll_set = nv04_devinit_pll_set, | ||
| 139 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c index 8d274dba1ef1..32b3d2131a7f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c | |||
| @@ -27,17 +27,12 @@ | |||
| 27 | #include <subdev/vga.h> | 27 | #include <subdev/vga.h> |
| 28 | 28 | ||
| 29 | #include "fbmem.h" | 29 | #include "fbmem.h" |
| 30 | #include "priv.h" | 30 | #include "nv04.h" |
| 31 | |||
| 32 | struct nv10_devinit_priv { | ||
| 33 | struct nouveau_devinit base; | ||
| 34 | u8 owner; | ||
| 35 | }; | ||
| 36 | 31 | ||
| 37 | static void | 32 | static void |
| 38 | nv10_devinit_meminit(struct nouveau_devinit *devinit) | 33 | nv10_devinit_meminit(struct nouveau_devinit *devinit) |
| 39 | { | 34 | { |
| 40 | struct nv10_devinit_priv *priv = (void *)devinit; | 35 | struct nv04_devinit_priv *priv = (void *)devinit; |
| 41 | static const int mem_width[] = { 0x10, 0x00, 0x20 }; | 36 | static const int mem_width[] = { 0x10, 0x00, 0x20 }; |
| 42 | int mem_width_count; | 37 | int mem_width_count; |
| 43 | uint32_t patt = 0xdeadbeef; | 38 | uint32_t patt = 0xdeadbeef; |
| @@ -101,31 +96,15 @@ amount_found: | |||
| 101 | fbmem_fini(fb); | 96 | fbmem_fini(fb); |
| 102 | } | 97 | } |
| 103 | 98 | ||
| 104 | static int | 99 | struct nouveau_oclass * |
| 105 | nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 100 | nv10_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 106 | struct nouveau_oclass *oclass, void *data, u32 size, | 101 | .base.handle = NV_SUBDEV(DEVINIT, 0x10), |
| 107 | struct nouveau_object **pobject) | 102 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 108 | { | 103 | .ctor = nv04_devinit_ctor, |
| 109 | struct nv10_devinit_priv *priv; | ||
| 110 | int ret; | ||
| 111 | |||
| 112 | ret = nouveau_devinit_create(parent, engine, oclass, &priv); | ||
| 113 | *pobject = nv_object(priv); | ||
| 114 | if (ret) | ||
| 115 | return ret; | ||
| 116 | |||
| 117 | priv->base.meminit = nv10_devinit_meminit; | ||
| 118 | priv->base.pll_set = nv04_devinit_pll_set; | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | struct nouveau_oclass | ||
| 123 | nv10_devinit_oclass = { | ||
| 124 | .handle = NV_SUBDEV(DEVINIT, 0x10), | ||
| 125 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 126 | .ctor = nv10_devinit_ctor, | ||
| 127 | .dtor = nv04_devinit_dtor, | 104 | .dtor = nv04_devinit_dtor, |
| 128 | .init = nv04_devinit_init, | 105 | .init = nv04_devinit_init, |
| 129 | .fini = nv04_devinit_fini, | 106 | .fini = nv04_devinit_fini, |
| 130 | }, | 107 | }, |
| 131 | }; | 108 | .meminit = nv10_devinit_meminit, |
| 109 | .pll_set = nv04_devinit_pll_set, | ||
| 110 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c index e9743cdabe75..526d0c6faacd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c | |||
| @@ -22,37 +22,16 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include "priv.h" | 25 | #include "nv04.h" |
| 26 | 26 | ||
| 27 | struct nv1a_devinit_priv { | 27 | struct nouveau_oclass * |
| 28 | struct nouveau_devinit base; | 28 | nv1a_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 29 | u8 owner; | 29 | .base.handle = NV_SUBDEV(DEVINIT, 0x1a), |
| 30 | }; | 30 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 31 | 31 | .ctor = nv04_devinit_ctor, | |
| 32 | static int | ||
| 33 | nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
| 34 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
| 35 | struct nouveau_object **pobject) | ||
| 36 | { | ||
| 37 | struct nv1a_devinit_priv *priv; | ||
| 38 | int ret; | ||
| 39 | |||
| 40 | ret = nouveau_devinit_create(parent, engine, oclass, &priv); | ||
| 41 | *pobject = nv_object(priv); | ||
| 42 | if (ret) | ||
| 43 | return ret; | ||
| 44 | |||
| 45 | priv->base.pll_set = nv04_devinit_pll_set; | ||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | struct nouveau_oclass | ||
| 50 | nv1a_devinit_oclass = { | ||
| 51 | .handle = NV_SUBDEV(DEVINIT, 0x1a), | ||
| 52 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 53 | .ctor = nv1a_devinit_ctor, | ||
| 54 | .dtor = nv04_devinit_dtor, | 32 | .dtor = nv04_devinit_dtor, |
| 55 | .init = nv04_devinit_init, | 33 | .init = nv04_devinit_init, |
| 56 | .fini = nv04_devinit_fini, | 34 | .fini = nv04_devinit_fini, |
| 57 | }, | 35 | }, |
| 58 | }; | 36 | .pll_set = nv04_devinit_pll_set, |
| 37 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c index 6cc6080d3bc0..4689ba303b0b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c | |||
| @@ -24,18 +24,13 @@ | |||
| 24 | * | 24 | * |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #include "priv.h" | 27 | #include "nv04.h" |
| 28 | #include "fbmem.h" | 28 | #include "fbmem.h" |
| 29 | 29 | ||
| 30 | struct nv20_devinit_priv { | ||
| 31 | struct nouveau_devinit base; | ||
| 32 | u8 owner; | ||
| 33 | }; | ||
| 34 | |||
| 35 | static void | 30 | static void |
| 36 | nv20_devinit_meminit(struct nouveau_devinit *devinit) | 31 | nv20_devinit_meminit(struct nouveau_devinit *devinit) |
| 37 | { | 32 | { |
| 38 | struct nv20_devinit_priv *priv = (void *)devinit; | 33 | struct nv04_devinit_priv *priv = (void *)devinit; |
| 39 | struct nouveau_device *device = nv_device(priv); | 34 | struct nouveau_device *device = nv_device(priv); |
| 40 | uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900); | 35 | uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900); |
| 41 | uint32_t amount, off; | 36 | uint32_t amount, off; |
| @@ -65,31 +60,15 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit) | |||
| 65 | fbmem_fini(fb); | 60 | fbmem_fini(fb); |
| 66 | } | 61 | } |
| 67 | 62 | ||
| 68 | static int | 63 | struct nouveau_oclass * |
| 69 | nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 64 | nv20_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 70 | struct nouveau_oclass *oclass, void *data, u32 size, | 65 | .base.handle = NV_SUBDEV(DEVINIT, 0x20), |
| 71 | struct nouveau_object **pobject) | 66 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 72 | { | 67 | .ctor = nv04_devinit_ctor, |
| 73 | struct nv20_devinit_priv *priv; | ||
| 74 | int ret; | ||
| 75 | |||
| 76 | ret = nouveau_devinit_create(parent, engine, oclass, &priv); | ||
| 77 | *pobject = nv_object(priv); | ||
| 78 | if (ret) | ||
| 79 | return ret; | ||
| 80 | |||
| 81 | priv->base.meminit = nv20_devinit_meminit; | ||
| 82 | priv->base.pll_set = nv04_devinit_pll_set; | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | struct nouveau_oclass | ||
| 87 | nv20_devinit_oclass = { | ||
| 88 | .handle = NV_SUBDEV(DEVINIT, 0x20), | ||
| 89 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 90 | .ctor = nv20_devinit_ctor, | ||
| 91 | .dtor = nv04_devinit_dtor, | 68 | .dtor = nv04_devinit_dtor, |
| 92 | .init = nv04_devinit_init, | 69 | .init = nv04_devinit_init, |
| 93 | .fini = nv04_devinit_fini, | 70 | .fini = nv04_devinit_fini, |
| 94 | }, | 71 | }, |
| 95 | }; | 72 | .meminit = nv20_devinit_meminit, |
| 73 | .pll_set = nv04_devinit_pll_set, | ||
| 74 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c index 6df72247c477..b46c62a1d5d8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c | |||
| @@ -28,9 +28,9 @@ | |||
| 28 | #include <subdev/bios/init.h> | 28 | #include <subdev/bios/init.h> |
| 29 | #include <subdev/vga.h> | 29 | #include <subdev/vga.h> |
| 30 | 30 | ||
| 31 | #include "priv.h" | 31 | #include "nv50.h" |
| 32 | 32 | ||
| 33 | static int | 33 | int |
| 34 | nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | 34 | nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) |
| 35 | { | 35 | { |
| 36 | struct nv50_devinit_priv *priv = (void *)devinit; | 36 | struct nv50_devinit_priv *priv = (void *)devinit; |
| @@ -74,6 +74,19 @@ nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | |||
| 74 | return 0; | 74 | return 0; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static u64 | ||
| 78 | nv50_devinit_disable(struct nouveau_devinit *devinit) | ||
| 79 | { | ||
| 80 | struct nv50_devinit_priv *priv = (void *)devinit; | ||
| 81 | u32 r001540 = nv_rd32(priv, 0x001540); | ||
| 82 | u64 disable = 0ULL; | ||
| 83 | |||
| 84 | if (!(r001540 & 0x40000000)) | ||
| 85 | disable |= (1ULL << NVDEV_ENGINE_MPEG); | ||
| 86 | |||
| 87 | return disable; | ||
| 88 | } | ||
| 89 | |||
| 77 | int | 90 | int |
| 78 | nv50_devinit_init(struct nouveau_object *object) | 91 | nv50_devinit_init(struct nouveau_object *object) |
| 79 | { | 92 | { |
| @@ -120,7 +133,7 @@ nv50_devinit_init(struct nouveau_object *object) | |||
| 120 | return 0; | 133 | return 0; |
| 121 | } | 134 | } |
| 122 | 135 | ||
| 123 | static int | 136 | int |
| 124 | nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 137 | nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 125 | struct nouveau_oclass *oclass, void *data, u32 size, | 138 | struct nouveau_oclass *oclass, void *data, u32 size, |
| 126 | struct nouveau_object **pobject) | 139 | struct nouveau_object **pobject) |
| @@ -133,17 +146,18 @@ nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 133 | if (ret) | 146 | if (ret) |
| 134 | return ret; | 147 | return ret; |
| 135 | 148 | ||
| 136 | priv->base.pll_set = nv50_devinit_pll_set; | ||
| 137 | return 0; | 149 | return 0; |
| 138 | } | 150 | } |
| 139 | 151 | ||
| 140 | struct nouveau_oclass | 152 | struct nouveau_oclass * |
| 141 | nv50_devinit_oclass = { | 153 | nv50_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 142 | .handle = NV_SUBDEV(DEVINIT, 0x50), | 154 | .base.handle = NV_SUBDEV(DEVINIT, 0x50), |
| 143 | .ofuncs = &(struct nouveau_ofuncs) { | 155 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 144 | .ctor = nv50_devinit_ctor, | 156 | .ctor = nv50_devinit_ctor, |
| 145 | .dtor = _nouveau_devinit_dtor, | 157 | .dtor = _nouveau_devinit_dtor, |
| 146 | .init = nv50_devinit_init, | 158 | .init = nv50_devinit_init, |
| 147 | .fini = _nouveau_devinit_fini, | 159 | .fini = _nouveau_devinit_fini, |
| 148 | }, | 160 | }, |
| 149 | }; | 161 | .pll_set = nv50_devinit_pll_set, |
| 162 | .disable = nv50_devinit_disable, | ||
| 163 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h new file mode 100644 index 000000000000..141c27e9f182 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | #ifndef __NVKM_DEVINIT_NV50_H__ | ||
| 2 | #define __NVKM_DEVINIT_NV50_H__ | ||
| 3 | |||
| 4 | #include "priv.h" | ||
| 5 | |||
| 6 | struct nv50_devinit_priv { | ||
| 7 | struct nouveau_devinit base; | ||
| 8 | }; | ||
| 9 | |||
| 10 | int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *, | ||
| 11 | struct nouveau_oclass *, void *, u32, | ||
| 12 | struct nouveau_object **); | ||
| 13 | int nv50_devinit_init(struct nouveau_object *); | ||
| 14 | int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32); | ||
| 15 | |||
| 16 | int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32); | ||
| 17 | |||
| 18 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c new file mode 100644 index 000000000000..787422505d87 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nv50.h" | ||
| 26 | |||
| 27 | static u64 | ||
| 28 | nv84_devinit_disable(struct nouveau_devinit *devinit) | ||
| 29 | { | ||
| 30 | struct nv50_devinit_priv *priv = (void *)devinit; | ||
| 31 | u32 r001540 = nv_rd32(priv, 0x001540); | ||
| 32 | u32 r00154c = nv_rd32(priv, 0x00154c); | ||
| 33 | u64 disable = 0ULL; | ||
| 34 | |||
| 35 | if (!(r001540 & 0x40000000)) { | ||
| 36 | disable |= (1ULL << NVDEV_ENGINE_MPEG); | ||
| 37 | disable |= (1ULL << NVDEV_ENGINE_VP); | ||
| 38 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 39 | disable |= (1ULL << NVDEV_ENGINE_CRYPT); | ||
| 40 | } | ||
| 41 | |||
| 42 | if (!(r00154c & 0x00000004)) | ||
| 43 | disable |= (1ULL << NVDEV_ENGINE_DISP); | ||
| 44 | if (!(r00154c & 0x00000020)) | ||
| 45 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 46 | if (!(r00154c & 0x00000040)) | ||
| 47 | disable |= (1ULL << NVDEV_ENGINE_CRYPT); | ||
| 48 | |||
| 49 | return disable; | ||
| 50 | } | ||
| 51 | |||
| 52 | struct nouveau_oclass * | ||
| 53 | nv84_devinit_oclass = &(struct nouveau_devinit_impl) { | ||
| 54 | .base.handle = NV_SUBDEV(DEVINIT, 0x84), | ||
| 55 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 56 | .ctor = nv50_devinit_ctor, | ||
| 57 | .dtor = _nouveau_devinit_dtor, | ||
| 58 | .init = nv50_devinit_init, | ||
| 59 | .fini = _nouveau_devinit_fini, | ||
| 60 | }, | ||
| 61 | .pll_set = nv50_devinit_pll_set, | ||
| 62 | .disable = nv84_devinit_disable, | ||
| 63 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c new file mode 100644 index 000000000000..2b0e963fc6f0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nv50.h" | ||
| 26 | |||
| 27 | static u64 | ||
| 28 | nv98_devinit_disable(struct nouveau_devinit *devinit) | ||
| 29 | { | ||
| 30 | struct nv50_devinit_priv *priv = (void *)devinit; | ||
| 31 | u32 r001540 = nv_rd32(priv, 0x001540); | ||
| 32 | u32 r00154c = nv_rd32(priv, 0x00154c); | ||
| 33 | u64 disable = 0ULL; | ||
| 34 | |||
| 35 | if (!(r001540 & 0x40000000)) { | ||
| 36 | disable |= (1ULL << NVDEV_ENGINE_VP); | ||
| 37 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 38 | disable |= (1ULL << NVDEV_ENGINE_PPP); | ||
| 39 | } | ||
| 40 | |||
| 41 | if (!(r00154c & 0x00000004)) | ||
| 42 | disable |= (1ULL << NVDEV_ENGINE_DISP); | ||
| 43 | if (!(r00154c & 0x00000020)) | ||
| 44 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 45 | if (!(r00154c & 0x00000040)) | ||
| 46 | disable |= (1ULL << NVDEV_ENGINE_CRYPT); | ||
| 47 | |||
| 48 | return disable; | ||
| 49 | } | ||
| 50 | |||
| 51 | struct nouveau_oclass * | ||
| 52 | nv98_devinit_oclass = &(struct nouveau_devinit_impl) { | ||
| 53 | .base.handle = NV_SUBDEV(DEVINIT, 0x98), | ||
| 54 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 55 | .ctor = nv50_devinit_ctor, | ||
| 56 | .dtor = _nouveau_devinit_dtor, | ||
| 57 | .init = nv50_devinit_init, | ||
| 58 | .fini = _nouveau_devinit_fini, | ||
| 59 | }, | ||
| 60 | .pll_set = nv50_devinit_pll_set, | ||
| 61 | .disable = nv98_devinit_disable, | ||
| 62 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c index 76a68b290141..6dedf1dad7f7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c | |||
| @@ -22,12 +22,12 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include "priv.h" | 25 | #include "nv50.h" |
| 26 | 26 | ||
| 27 | static int | 27 | int |
| 28 | nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | 28 | nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) |
| 29 | { | 29 | { |
| 30 | struct nva3_devinit_priv *priv = (void *)devinit; | 30 | struct nv50_devinit_priv *priv = (void *)devinit; |
| 31 | struct nouveau_bios *bios = nouveau_bios(priv); | 31 | struct nouveau_bios *bios = nouveau_bios(priv); |
| 32 | struct nvbios_pll info; | 32 | struct nvbios_pll info; |
| 33 | int N, fN, M, P; | 33 | int N, fN, M, P; |
| @@ -58,30 +58,38 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | |||
| 58 | return ret; | 58 | return ret; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static int | 61 | static u64 |
| 62 | nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 62 | nva3_devinit_disable(struct nouveau_devinit *devinit) |
| 63 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
| 64 | struct nouveau_object **pobject) | ||
| 65 | { | 63 | { |
| 66 | struct nv50_devinit_priv *priv; | 64 | struct nv50_devinit_priv *priv = (void *)devinit; |
| 67 | int ret; | 65 | u32 r001540 = nv_rd32(priv, 0x001540); |
| 66 | u32 r00154c = nv_rd32(priv, 0x00154c); | ||
| 67 | u64 disable = 0ULL; | ||
| 68 | 68 | ||
| 69 | ret = nouveau_devinit_create(parent, engine, oclass, &priv); | 69 | if (!(r001540 & 0x40000000)) { |
| 70 | *pobject = nv_object(priv); | 70 | disable |= (1ULL << NVDEV_ENGINE_VP); |
| 71 | if (ret) | 71 | disable |= (1ULL << NVDEV_ENGINE_PPP); |
| 72 | return ret; | 72 | } |
| 73 | |||
| 74 | if (!(r00154c & 0x00000004)) | ||
| 75 | disable |= (1ULL << NVDEV_ENGINE_DISP); | ||
| 76 | if (!(r00154c & 0x00000020)) | ||
| 77 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 78 | if (!(r00154c & 0x00000200)) | ||
| 79 | disable |= (1ULL << NVDEV_ENGINE_COPY0); | ||
| 73 | 80 | ||
| 74 | priv->base.pll_set = nva3_devinit_pll_set; | 81 | return disable; |
| 75 | return 0; | ||
| 76 | } | 82 | } |
| 77 | 83 | ||
| 78 | struct nouveau_oclass | 84 | struct nouveau_oclass * |
| 79 | nva3_devinit_oclass = { | 85 | nva3_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 80 | .handle = NV_SUBDEV(DEVINIT, 0xa3), | 86 | .base.handle = NV_SUBDEV(DEVINIT, 0xa3), |
| 81 | .ofuncs = &(struct nouveau_ofuncs) { | 87 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 82 | .ctor = nva3_devinit_ctor, | 88 | .ctor = nv50_devinit_ctor, |
| 83 | .dtor = _nouveau_devinit_dtor, | 89 | .dtor = _nouveau_devinit_dtor, |
| 84 | .init = nv50_devinit_init, | 90 | .init = nv50_devinit_init, |
| 85 | .fini = _nouveau_devinit_fini, | 91 | .fini = _nouveau_devinit_fini, |
| 86 | }, | 92 | }, |
| 87 | }; | 93 | .pll_set = nva3_devinit_pll_set, |
| 94 | .disable = nva3_devinit_disable, | ||
| 95 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c new file mode 100644 index 000000000000..4fc68d27eff3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nv50.h" | ||
| 26 | |||
| 27 | static u64 | ||
| 28 | nvaf_devinit_disable(struct nouveau_devinit *devinit) | ||
| 29 | { | ||
| 30 | struct nv50_devinit_priv *priv = (void *)devinit; | ||
| 31 | u32 r001540 = nv_rd32(priv, 0x001540); | ||
| 32 | u32 r00154c = nv_rd32(priv, 0x00154c); | ||
| 33 | u64 disable = 0; | ||
| 34 | |||
| 35 | if (!(r001540 & 0x40000000)) { | ||
| 36 | disable |= (1ULL << NVDEV_ENGINE_VP); | ||
| 37 | disable |= (1ULL << NVDEV_ENGINE_PPP); | ||
| 38 | } | ||
| 39 | |||
| 40 | if (!(r00154c & 0x00000004)) | ||
| 41 | disable |= (1ULL << NVDEV_ENGINE_DISP); | ||
| 42 | if (!(r00154c & 0x00000020)) | ||
| 43 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 44 | if (!(r00154c & 0x00000040)) | ||
| 45 | disable |= (1ULL << NVDEV_ENGINE_VIC); | ||
| 46 | if (!(r00154c & 0x00000200)) | ||
| 47 | disable |= (1ULL << NVDEV_ENGINE_COPY0); | ||
| 48 | |||
| 49 | return disable; | ||
| 50 | } | ||
| 51 | |||
| 52 | struct nouveau_oclass * | ||
| 53 | nvaf_devinit_oclass = &(struct nouveau_devinit_impl) { | ||
| 54 | .base.handle = NV_SUBDEV(DEVINIT, 0xaf), | ||
| 55 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 56 | .ctor = nv50_devinit_ctor, | ||
| 57 | .dtor = _nouveau_devinit_dtor, | ||
| 58 | .init = nv50_devinit_init, | ||
| 59 | .fini = _nouveau_devinit_fini, | ||
| 60 | }, | ||
| 61 | .pll_set = nva3_devinit_pll_set, | ||
| 62 | .disable = nvaf_devinit_disable, | ||
| 63 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c index 19e265bf4574..fa7e63766b1b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c | |||
| @@ -22,12 +22,12 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include "priv.h" | 25 | #include "nv50.h" |
| 26 | 26 | ||
| 27 | static int | 27 | static int |
| 28 | nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | 28 | nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) |
| 29 | { | 29 | { |
| 30 | struct nvc0_devinit_priv *priv = (void *)devinit; | 30 | struct nv50_devinit_priv *priv = (void *)devinit; |
| 31 | struct nouveau_bios *bios = nouveau_bios(priv); | 31 | struct nouveau_bios *bios = nouveau_bios(priv); |
| 32 | struct nvbios_pll info; | 32 | struct nvbios_pll info; |
| 33 | int N, fN, M, P; | 33 | int N, fN, M, P; |
| @@ -59,6 +59,33 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) | |||
| 59 | return ret; | 59 | return ret; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static u64 | ||
| 63 | nvc0_devinit_disable(struct nouveau_devinit *devinit) | ||
| 64 | { | ||
| 65 | struct nv50_devinit_priv *priv = (void *)devinit; | ||
| 66 | u32 r022500 = nv_rd32(priv, 0x022500); | ||
| 67 | u64 disable = 0ULL; | ||
| 68 | |||
| 69 | if (r022500 & 0x00000001) | ||
| 70 | disable |= (1ULL << NVDEV_ENGINE_DISP); | ||
| 71 | |||
| 72 | if (r022500 & 0x00000002) { | ||
| 73 | disable |= (1ULL << NVDEV_ENGINE_VP); | ||
| 74 | disable |= (1ULL << NVDEV_ENGINE_PPP); | ||
| 75 | } | ||
| 76 | |||
| 77 | if (r022500 & 0x00000004) | ||
| 78 | disable |= (1ULL << NVDEV_ENGINE_BSP); | ||
| 79 | if (r022500 & 0x00000008) | ||
| 80 | disable |= (1ULL << NVDEV_ENGINE_VENC); | ||
| 81 | if (r022500 & 0x00000100) | ||
| 82 | disable |= (1ULL << NVDEV_ENGINE_COPY0); | ||
| 83 | if (r022500 & 0x00000200) | ||
| 84 | disable |= (1ULL << NVDEV_ENGINE_COPY1); | ||
| 85 | |||
| 86 | return disable; | ||
| 87 | } | ||
| 88 | |||
| 62 | static int | 89 | static int |
| 63 | nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 90 | nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 64 | struct nouveau_oclass *oclass, void *data, u32 size, | 91 | struct nouveau_oclass *oclass, void *data, u32 size, |
| @@ -72,19 +99,20 @@ nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 72 | if (ret) | 99 | if (ret) |
| 73 | return ret; | 100 | return ret; |
| 74 | 101 | ||
| 75 | priv->base.pll_set = nvc0_devinit_pll_set; | ||
| 76 | if (nv_rd32(priv, 0x022500) & 0x00000001) | 102 | if (nv_rd32(priv, 0x022500) & 0x00000001) |
| 77 | priv->base.post = true; | 103 | priv->base.post = true; |
| 78 | return 0; | 104 | return 0; |
| 79 | } | 105 | } |
| 80 | 106 | ||
| 81 | struct nouveau_oclass | 107 | struct nouveau_oclass * |
| 82 | nvc0_devinit_oclass = { | 108 | nvc0_devinit_oclass = &(struct nouveau_devinit_impl) { |
| 83 | .handle = NV_SUBDEV(DEVINIT, 0xc0), | 109 | .base.handle = NV_SUBDEV(DEVINIT, 0xc0), |
| 84 | .ofuncs = &(struct nouveau_ofuncs) { | 110 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 85 | .ctor = nvc0_devinit_ctor, | 111 | .ctor = nvc0_devinit_ctor, |
| 86 | .dtor = _nouveau_devinit_dtor, | 112 | .dtor = _nouveau_devinit_dtor, |
| 87 | .init = nv50_devinit_init, | 113 | .init = nv50_devinit_init, |
| 88 | .fini = _nouveau_devinit_fini, | 114 | .fini = _nouveau_devinit_fini, |
| 89 | }, | 115 | }, |
| 90 | }; | 116 | .pll_set = nvc0_devinit_pll_set, |
| 117 | .disable = nvc0_devinit_disable, | ||
| 118 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h index 7d622e2b0171..822a2fbf44a5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h | |||
| @@ -6,20 +6,32 @@ | |||
| 6 | #include <subdev/clock/pll.h> | 6 | #include <subdev/clock/pll.h> |
| 7 | #include <subdev/devinit.h> | 7 | #include <subdev/devinit.h> |
| 8 | 8 | ||
| 9 | void nv04_devinit_dtor(struct nouveau_object *); | 9 | struct nouveau_devinit_impl { |
| 10 | int nv04_devinit_init(struct nouveau_object *); | 10 | struct nouveau_oclass base; |
| 11 | int nv04_devinit_fini(struct nouveau_object *, bool); | 11 | void (*meminit)(struct nouveau_devinit *); |
| 12 | int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32); | 12 | int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); |
| 13 | 13 | u64 (*disable)(struct nouveau_devinit *); | |
| 14 | void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 15 | void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 16 | void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); | ||
| 17 | |||
| 18 | |||
| 19 | struct nv50_devinit_priv { | ||
| 20 | struct nouveau_devinit base; | ||
| 21 | }; | 14 | }; |
| 22 | 15 | ||
| 23 | int nv50_devinit_init(struct nouveau_object *); | 16 | #define nouveau_devinit_create(p,e,o,d) \ |
| 17 | nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 18 | #define nouveau_devinit_destroy(p) ({ \ | ||
| 19 | struct nouveau_devinit *d = (p); \ | ||
| 20 | _nouveau_devinit_dtor(nv_object(d)); \ | ||
| 21 | }) | ||
| 22 | #define nouveau_devinit_init(p) ({ \ | ||
| 23 | struct nouveau_devinit *d = (p); \ | ||
| 24 | _nouveau_devinit_init(nv_object(d)); \ | ||
| 25 | }) | ||
| 26 | #define nouveau_devinit_fini(p,s) ({ \ | ||
| 27 | struct nouveau_devinit *d = (p); \ | ||
| 28 | _nouveau_devinit_fini(nv_object(d), (s)); \ | ||
| 29 | }) | ||
| 30 | |||
| 31 | int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 32 | struct nouveau_oclass *, int, void **); | ||
| 33 | void _nouveau_devinit_dtor(struct nouveau_object *); | ||
| 34 | int _nouveau_devinit_init(struct nouveau_object *); | ||
| 35 | int _nouveau_devinit_fini(struct nouveau_object *, bool suspend); | ||
| 24 | 36 | ||
| 25 | #endif | 37 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c index 34f9605ffee6..66fe959b4f74 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c | |||
| @@ -25,35 +25,44 @@ | |||
| 25 | #include <subdev/bios.h> | 25 | #include <subdev/bios.h> |
| 26 | #include "priv.h" | 26 | #include "priv.h" |
| 27 | 27 | ||
| 28 | /* binary driver only executes this path if the condition (a) is true | ||
| 29 | * for any configuration (combination of rammap+ramcfg+timing) that | ||
| 30 | * can be reached on a given card. for now, we will execute the branch | ||
| 31 | * unconditionally in the hope that a "false everywhere" in the bios | ||
| 32 | * tables doesn't actually mean "don't touch this". | ||
| 33 | */ | ||
| 34 | #define NOTE00(a) 1 | ||
| 35 | |||
| 28 | int | 36 | int |
| 29 | nouveau_gddr5_calc(struct nouveau_ram *ram) | 37 | nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts) |
| 30 | { | 38 | { |
| 31 | struct nouveau_bios *bios = nouveau_bios(ram); | 39 | int pd, lf, xd, vh, vr, vo, l3; |
| 32 | int pd, lf, xd, vh, vr, vo; | 40 | int WL, CL, WR, at[2], dt, ds; |
| 33 | int WL, CL, WR, at, dt, ds; | ||
| 34 | int rq = ram->freq < 1000000; /* XXX */ | 41 | int rq = ram->freq < 1000000; /* XXX */ |
| 35 | 42 | ||
| 36 | switch (!!ram->ramcfg.data * ram->ramcfg.version) { | 43 | switch (ram->ramcfg.version) { |
| 37 | case 0x11: | 44 | case 0x11: |
| 38 | pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7; | 45 | pd = ram->next->bios.ramcfg_11_01_80; |
| 39 | lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6; | 46 | lf = ram->next->bios.ramcfg_11_01_40; |
| 40 | xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20); | 47 | xd = !ram->next->bios.ramcfg_11_01_20; |
| 41 | vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4; | 48 | vh = ram->next->bios.ramcfg_11_02_10; |
| 42 | vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2; | 49 | vr = ram->next->bios.ramcfg_11_02_04; |
| 43 | vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff; | 50 | vo = ram->next->bios.ramcfg_11_06; |
| 51 | l3 = !ram->next->bios.ramcfg_11_07_02; | ||
| 44 | break; | 52 | break; |
| 45 | default: | 53 | default: |
| 46 | return -ENOSYS; | 54 | return -ENOSYS; |
| 47 | } | 55 | } |
| 48 | 56 | ||
| 49 | switch (!!ram->timing.data * ram->timing.version) { | 57 | switch (ram->timing.version) { |
| 50 | case 0x20: | 58 | case 0x20: |
| 51 | WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7; | 59 | WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7; |
| 52 | CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f; | 60 | CL = (ram->next->bios.timing[1] & 0x0000001f); |
| 53 | WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f; | 61 | WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16; |
| 54 | at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6; | 62 | at[0] = ram->next->bios.timing_20_2e_c0; |
| 55 | dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03; | 63 | at[1] = ram->next->bios.timing_20_2e_30; |
| 56 | ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03; | 64 | dt = ram->next->bios.timing_20_2e_03; |
| 65 | ds = ram->next->bios.timing_20_2f_03; | ||
| 57 | break; | 66 | break; |
| 58 | default: | 67 | default: |
| 59 | return -ENOSYS; | 68 | return -ENOSYS; |
| @@ -71,13 +80,25 @@ nouveau_gddr5_calc(struct nouveau_ram *ram) | |||
| 71 | 80 | ||
| 72 | ram->mr[1] &= ~0x0bf; | 81 | ram->mr[1] &= ~0x0bf; |
| 73 | ram->mr[1] |= (xd & 0x01) << 7; | 82 | ram->mr[1] |= (xd & 0x01) << 7; |
| 74 | ram->mr[1] |= (at & 0x03) << 4; | 83 | ram->mr[1] |= (at[0] & 0x03) << 4; |
| 75 | ram->mr[1] |= (dt & 0x03) << 2; | 84 | ram->mr[1] |= (dt & 0x03) << 2; |
| 76 | ram->mr[1] |= (ds & 0x03) << 0; | 85 | ram->mr[1] |= (ds & 0x03) << 0; |
| 77 | 86 | ||
| 87 | /* this seems wrong, alternate field used for the broadcast | ||
| 88 | * on nuts vs non-nuts configs.. meh, it matches for now. | ||
| 89 | */ | ||
| 90 | ram->mr1_nuts = ram->mr[1]; | ||
| 91 | if (nuts) { | ||
| 92 | ram->mr[1] &= ~0x030; | ||
| 93 | ram->mr[1] |= (at[1] & 0x03) << 4; | ||
| 94 | } | ||
| 95 | |||
| 78 | ram->mr[3] &= ~0x020; | 96 | ram->mr[3] &= ~0x020; |
| 79 | ram->mr[3] |= (rq & 0x01) << 5; | 97 | ram->mr[3] |= (rq & 0x01) << 5; |
| 80 | 98 | ||
| 99 | ram->mr[5] &= ~0x004; | ||
| 100 | ram->mr[5] |= (l3 << 2); | ||
| 101 | |||
| 81 | if (!vo) | 102 | if (!vo) |
| 82 | vo = (ram->mr[6] & 0xff0) >> 4; | 103 | vo = (ram->mr[6] & 0xff0) >> 4; |
| 83 | if (ram->mr[6] & 0x001) | 104 | if (ram->mr[6] & 0x001) |
| @@ -86,11 +107,16 @@ nouveau_gddr5_calc(struct nouveau_ram *ram) | |||
| 86 | ram->mr[6] |= (vo & 0xff) << 4; | 107 | ram->mr[6] |= (vo & 0xff) << 4; |
| 87 | ram->mr[6] |= (pd & 0x01) << 0; | 108 | ram->mr[6] |= (pd & 0x01) << 0; |
| 88 | 109 | ||
| 89 | if (!(ram->mr[7] & 0x100)) | 110 | if (NOTE00(vr)) { |
| 90 | vr = 0; /* binary driver does this.. bug? */ | 111 | ram->mr[7] &= ~0x300; |
| 91 | ram->mr[7] &= ~0x188; | 112 | ram->mr[7] |= (vr & 0x03) << 8; |
| 92 | ram->mr[7] |= (vr & 0x01) << 8; | 113 | } |
| 114 | ram->mr[7] &= ~0x088; | ||
| 93 | ram->mr[7] |= (vh & 0x01) << 7; | 115 | ram->mr[7] |= (vh & 0x01) << 7; |
| 94 | ram->mr[7] |= (lf & 0x01) << 3; | 116 | ram->mr[7] |= (lf & 0x01) << 3; |
| 117 | |||
| 118 | ram->mr[8] &= ~0x003; | ||
| 119 | ram->mr[8] |= (WR & 0x10) >> 3; | ||
| 120 | ram->mr[8] |= (CL & 0x10) >> 4; | ||
| 95 | return 0; | 121 | return 0; |
| 96 | } | 122 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index e5fc37c4caac..45470e1f0385 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c | |||
| @@ -33,6 +33,21 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) | |||
| 33 | return likely((nvc0_pte_storage_type_map[memtype] != 0xff)); | 33 | return likely((nvc0_pte_storage_type_map[memtype] != 0xff)); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | static void | ||
| 37 | nvc0_fb_intr(struct nouveau_subdev *subdev) | ||
| 38 | { | ||
| 39 | struct nvc0_fb_priv *priv = (void *)subdev; | ||
| 40 | u32 intr = nv_rd32(priv, 0x000100); | ||
| 41 | if (intr & 0x08000000) { | ||
| 42 | nv_debug(priv, "PFFB intr\n"); | ||
| 43 | intr &= ~0x08000000; | ||
| 44 | } | ||
| 45 | if (intr & 0x00002000) { | ||
| 46 | nv_debug(priv, "PBFB intr\n"); | ||
| 47 | intr &= ~0x00002000; | ||
| 48 | } | ||
| 49 | } | ||
| 50 | |||
| 36 | int | 51 | int |
| 37 | nvc0_fb_init(struct nouveau_object *object) | 52 | nvc0_fb_init(struct nouveau_object *object) |
| 38 | { | 53 | { |
| @@ -86,6 +101,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 86 | return -EFAULT; | 101 | return -EFAULT; |
| 87 | } | 102 | } |
| 88 | 103 | ||
| 104 | nv_subdev(priv)->intr = nvc0_fb_intr; | ||
| 89 | return 0; | 105 | return 0; |
| 90 | } | 106 | } |
| 91 | 107 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h index 493125214e88..edaf95dee612 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h | |||
| @@ -34,7 +34,7 @@ extern struct nouveau_oclass nvc0_ram_oclass; | |||
| 34 | extern struct nouveau_oclass nve0_ram_oclass; | 34 | extern struct nouveau_oclass nve0_ram_oclass; |
| 35 | 35 | ||
| 36 | int nouveau_sddr3_calc(struct nouveau_ram *ram); | 36 | int nouveau_sddr3_calc(struct nouveau_ram *ram); |
| 37 | int nouveau_gddr5_calc(struct nouveau_ram *ram); | 37 | int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts); |
| 38 | 38 | ||
| 39 | #define nouveau_fb_create(p,e,c,d) \ | 39 | #define nouveau_fb_create(p,e,c,d) \ |
| 40 | nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d) | 40 | nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c index 76762a17d89c..c7fdb3a9e88b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c | |||
| @@ -70,13 +70,11 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 70 | struct nv50_ramseq *hwsq = &ram->hwsq; | 70 | struct nv50_ramseq *hwsq = &ram->hwsq; |
| 71 | struct nvbios_perfE perfE; | 71 | struct nvbios_perfE perfE; |
| 72 | struct nvbios_pll mpll; | 72 | struct nvbios_pll mpll; |
| 73 | struct bit_entry M; | ||
| 74 | struct { | 73 | struct { |
| 75 | u32 data; | 74 | u32 data; |
| 76 | u8 size; | 75 | u8 size; |
| 77 | } ramcfg, timing; | 76 | } ramcfg, timing; |
| 78 | u8 ver, hdr, cnt, strap; | 77 | u8 ver, hdr, cnt, len, strap; |
| 79 | u32 data; | ||
| 80 | int N1, M1, N2, M2, P; | 78 | int N1, M1, N2, M2, P; |
| 81 | int ret, i; | 79 | int ret, i; |
| 82 | 80 | ||
| @@ -93,16 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 93 | } while (perfE.memory < freq); | 91 | } while (perfE.memory < freq); |
| 94 | 92 | ||
| 95 | /* locate specific data set for the attached memory */ | 93 | /* locate specific data set for the attached memory */ |
| 96 | if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) { | 94 | strap = nvbios_ramcfg_index(bios); |
| 97 | nv_error(pfb, "invalid/missing memory table\n"); | ||
| 98 | return -EINVAL; | ||
| 99 | } | ||
| 100 | |||
| 101 | strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2; | ||
| 102 | data = nv_ro16(bios, M.offset + 3); | ||
| 103 | if (data) | ||
| 104 | strap = nv_ro08(bios, data + strap); | ||
| 105 | |||
| 106 | if (strap >= cnt) { | 95 | if (strap >= cnt) { |
| 107 | nv_error(pfb, "invalid ramcfg strap\n"); | 96 | nv_error(pfb, "invalid ramcfg strap\n"); |
| 108 | return -EINVAL; | 97 | return -EINVAL; |
| @@ -113,7 +102,8 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 113 | /* lookup memory timings, if bios says they're present */ | 102 | /* lookup memory timings, if bios says they're present */ |
| 114 | strap = nv_ro08(bios, ramcfg.data + 0x01); | 103 | strap = nv_ro08(bios, ramcfg.data + 0x01); |
| 115 | if (strap != 0xff) { | 104 | if (strap != 0xff) { |
| 116 | timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr); | 105 | timing.data = nvbios_timingEe(bios, strap, &ver, &hdr, |
| 106 | &cnt, &len); | ||
| 117 | if (!timing.data || ver != 0x10 || hdr < 0x12) { | 107 | if (!timing.data || ver != 0x10 || hdr < 0x12) { |
| 118 | nv_error(pfb, "invalid/missing timing entry " | 108 | nv_error(pfb, "invalid/missing timing entry " |
| 119 | "%02x %04x %02x %02x\n", | 109 | "%02x %04x %02x %02x\n", |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c index f6292cd9207c..f4ae8aa46a25 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c | |||
| @@ -79,8 +79,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 79 | struct nva3_ram *ram = (void *)pfb->ram; | 79 | struct nva3_ram *ram = (void *)pfb->ram; |
| 80 | struct nva3_ramfuc *fuc = &ram->fuc; | 80 | struct nva3_ramfuc *fuc = &ram->fuc; |
| 81 | struct nva3_clock_info mclk; | 81 | struct nva3_clock_info mclk; |
| 82 | struct bit_entry M; | 82 | u8 ver, cnt, len, strap; |
| 83 | u8 ver, cnt, strap; | ||
| 84 | u32 data; | 83 | u32 data; |
| 85 | struct { | 84 | struct { |
| 86 | u32 data; | 85 | u32 data; |
| @@ -91,24 +90,15 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 91 | int ret; | 90 | int ret; |
| 92 | 91 | ||
| 93 | /* lookup memory config data relevant to the target frequency */ | 92 | /* lookup memory config data relevant to the target frequency */ |
| 94 | rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size, | 93 | rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, |
| 95 | &cnt, &ramcfg.size); | 94 | &cnt, &ramcfg.size); |
| 96 | if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { | 95 | if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { |
| 97 | nv_error(pfb, "invalid/missing rammap entry\n"); | 96 | nv_error(pfb, "invalid/missing rammap entry\n"); |
| 98 | return -EINVAL; | 97 | return -EINVAL; |
| 99 | } | 98 | } |
| 100 | 99 | ||
| 101 | /* locate specific data set for the attached memory */ | 100 | /* locate specific data set for the attached memory */ |
| 102 | if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) { | 101 | strap = nvbios_ramcfg_index(bios); |
| 103 | nv_error(pfb, "invalid/missing memory table\n"); | ||
| 104 | return -EINVAL; | ||
| 105 | } | ||
| 106 | |||
| 107 | strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2; | ||
| 108 | data = nv_ro16(bios, M.offset + 1); | ||
| 109 | if (data) | ||
| 110 | strap = nv_ro08(bios, data + strap); | ||
| 111 | |||
| 112 | if (strap >= cnt) { | 102 | if (strap >= cnt) { |
| 113 | nv_error(pfb, "invalid ramcfg strap\n"); | 103 | nv_error(pfb, "invalid ramcfg strap\n"); |
| 114 | return -EINVAL; | 104 | return -EINVAL; |
| @@ -123,8 +113,8 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 123 | /* lookup memory timings, if bios says they're present */ | 113 | /* lookup memory timings, if bios says they're present */ |
| 124 | strap = nv_ro08(bios, ramcfg.data + 0x01); | 114 | strap = nv_ro08(bios, ramcfg.data + 0x01); |
| 125 | if (strap != 0xff) { | 115 | if (strap != 0xff) { |
| 126 | timing.data = nvbios_timing_entry(bios, strap, &ver, | 116 | timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size, |
| 127 | &timing.size); | 117 | &cnt, &len); |
| 128 | if (!timing.data || ver != 0x10 || timing.size < 0x19) { | 118 | if (!timing.data || ver != 0x10 || timing.size < 0x19) { |
| 129 | nv_error(pfb, "invalid/missing timing entry\n"); | 119 | nv_error(pfb, "invalid/missing timing entry\n"); |
| 130 | return -EINVAL; | 120 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index f464547c6bab..0391b824ee76 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <subdev/bios.h> | 25 | #include <subdev/bios.h> |
| 26 | #include <subdev/bios/bit.h> | ||
| 27 | #include <subdev/bios/pll.h> | 26 | #include <subdev/bios/pll.h> |
| 28 | #include <subdev/bios/rammap.h> | 27 | #include <subdev/bios/rammap.h> |
| 29 | #include <subdev/bios/timing.h> | 28 | #include <subdev/bios/timing.h> |
| @@ -134,9 +133,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 134 | struct nouveau_bios *bios = nouveau_bios(pfb); | 133 | struct nouveau_bios *bios = nouveau_bios(pfb); |
| 135 | struct nvc0_ram *ram = (void *)pfb->ram; | 134 | struct nvc0_ram *ram = (void *)pfb->ram; |
| 136 | struct nvc0_ramfuc *fuc = &ram->fuc; | 135 | struct nvc0_ramfuc *fuc = &ram->fuc; |
| 137 | struct bit_entry M; | 136 | u8 ver, cnt, len, strap; |
| 138 | u8 ver, cnt, strap; | ||
| 139 | u32 data; | ||
| 140 | struct { | 137 | struct { |
| 141 | u32 data; | 138 | u32 data; |
| 142 | u8 size; | 139 | u8 size; |
| @@ -147,24 +144,15 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 147 | int ret; | 144 | int ret; |
| 148 | 145 | ||
| 149 | /* lookup memory config data relevant to the target frequency */ | 146 | /* lookup memory config data relevant to the target frequency */ |
| 150 | rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size, | 147 | rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, |
| 151 | &cnt, &ramcfg.size); | 148 | &cnt, &ramcfg.size); |
| 152 | if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { | 149 | if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { |
| 153 | nv_error(pfb, "invalid/missing rammap entry\n"); | 150 | nv_error(pfb, "invalid/missing rammap entry\n"); |
| 154 | return -EINVAL; | 151 | return -EINVAL; |
| 155 | } | 152 | } |
| 156 | 153 | ||
| 157 | /* locate specific data set for the attached memory */ | 154 | /* locate specific data set for the attached memory */ |
| 158 | if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) { | 155 | strap = nvbios_ramcfg_index(bios); |
| 159 | nv_error(pfb, "invalid/missing memory table\n"); | ||
| 160 | return -EINVAL; | ||
| 161 | } | ||
| 162 | |||
| 163 | strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2; | ||
| 164 | data = nv_ro16(bios, M.offset + 1); | ||
| 165 | if (data) | ||
| 166 | strap = nv_ro08(bios, data + strap); | ||
| 167 | |||
| 168 | if (strap >= cnt) { | 156 | if (strap >= cnt) { |
| 169 | nv_error(pfb, "invalid ramcfg strap\n"); | 157 | nv_error(pfb, "invalid ramcfg strap\n"); |
| 170 | return -EINVAL; | 158 | return -EINVAL; |
| @@ -179,8 +167,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 179 | /* lookup memory timings, if bios says they're present */ | 167 | /* lookup memory timings, if bios says they're present */ |
| 180 | strap = nv_ro08(bios, ramcfg.data + 0x01); | 168 | strap = nv_ro08(bios, ramcfg.data + 0x01); |
| 181 | if (strap != 0xff) { | 169 | if (strap != 0xff) { |
| 182 | timing.data = nvbios_timing_entry(bios, strap, &ver, | 170 | timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size, |
| 183 | &timing.size); | 171 | &cnt, &len); |
| 184 | if (!timing.data || ver != 0x10 || timing.size < 0x19) { | 172 | if (!timing.data || ver != 0x10 || timing.size < 0x19) { |
| 185 | nv_error(pfb, "invalid/missing timing entry\n"); | 173 | nv_error(pfb, "invalid/missing timing entry\n"); |
| 186 | return -EINVAL; | 174 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c index bc86cfd084f6..3257c522a021 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <subdev/gpio.h> | 25 | #include <subdev/gpio.h> |
| 26 | 26 | ||
| 27 | #include <subdev/bios.h> | 27 | #include <subdev/bios.h> |
| 28 | #include <subdev/bios/bit.h> | ||
| 29 | #include <subdev/bios/pll.h> | 28 | #include <subdev/bios/pll.h> |
| 30 | #include <subdev/bios/init.h> | 29 | #include <subdev/bios/init.h> |
| 31 | #include <subdev/bios/rammap.h> | 30 | #include <subdev/bios/rammap.h> |
| @@ -42,6 +41,14 @@ | |||
| 42 | 41 | ||
| 43 | #include "ramfuc.h" | 42 | #include "ramfuc.h" |
| 44 | 43 | ||
| 44 | /* binary driver only executes this path if the condition (a) is true | ||
| 45 | * for any configuration (combination of rammap+ramcfg+timing) that | ||
| 46 | * can be reached on a given card. for now, we will execute the branch | ||
| 47 | * unconditionally in the hope that a "false everywhere" in the bios | ||
| 48 | * tables doesn't actually mean "don't touch this". | ||
| 49 | */ | ||
| 50 | #define NOTE00(a) 1 | ||
| 51 | |||
| 45 | struct nve0_ramfuc { | 52 | struct nve0_ramfuc { |
| 46 | struct ramfuc base; | 53 | struct ramfuc base; |
| 47 | 54 | ||
| @@ -104,7 +111,9 @@ struct nve0_ramfuc { | |||
| 104 | struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */ | 111 | struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */ |
| 105 | 112 | ||
| 106 | struct ramfuc_reg r_0x62c000; | 113 | struct ramfuc_reg r_0x62c000; |
| 114 | |||
| 107 | struct ramfuc_reg r_0x10f200; | 115 | struct ramfuc_reg r_0x10f200; |
| 116 | |||
| 108 | struct ramfuc_reg r_0x10f210; | 117 | struct ramfuc_reg r_0x10f210; |
| 109 | struct ramfuc_reg r_0x10f310; | 118 | struct ramfuc_reg r_0x10f310; |
| 110 | struct ramfuc_reg r_0x10f314; | 119 | struct ramfuc_reg r_0x10f314; |
| @@ -118,12 +127,17 @@ struct nve0_ramfuc { | |||
| 118 | struct ramfuc_reg r_0x10f65c; | 127 | struct ramfuc_reg r_0x10f65c; |
| 119 | struct ramfuc_reg r_0x10f6bc; | 128 | struct ramfuc_reg r_0x10f6bc; |
| 120 | struct ramfuc_reg r_0x100710; | 129 | struct ramfuc_reg r_0x100710; |
| 121 | struct ramfuc_reg r_0x10f750; | 130 | struct ramfuc_reg r_0x100750; |
| 122 | }; | 131 | }; |
| 123 | 132 | ||
| 124 | struct nve0_ram { | 133 | struct nve0_ram { |
| 125 | struct nouveau_ram base; | 134 | struct nouveau_ram base; |
| 126 | struct nve0_ramfuc fuc; | 135 | struct nve0_ramfuc fuc; |
| 136 | |||
| 137 | u32 parts; | ||
| 138 | u32 pmask; | ||
| 139 | u32 pnuts; | ||
| 140 | |||
| 127 | int from; | 141 | int from; |
| 128 | int mode; | 142 | int mode; |
| 129 | int N1, fN1, M1, P1; | 143 | int N1, fN1, M1, P1; |
| @@ -134,17 +148,17 @@ struct nve0_ram { | |||
| 134 | * GDDR5 | 148 | * GDDR5 |
| 135 | ******************************************************************************/ | 149 | ******************************************************************************/ |
| 136 | static void | 150 | static void |
| 137 | train(struct nve0_ramfuc *fuc, u32 magic) | 151 | nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data) |
| 138 | { | 152 | { |
| 139 | struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc); | 153 | struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc); |
| 140 | struct nouveau_fb *pfb = nouveau_fb(ram); | 154 | u32 addr = 0x110974, i; |
| 141 | const int mc = nv_rd32(pfb, 0x02243c); | 155 | |
| 142 | int i; | 156 | ram_mask(fuc, 0x10f910, mask, data); |
| 143 | 157 | ram_mask(fuc, 0x10f914, mask, data); | |
| 144 | ram_mask(fuc, 0x10f910, 0xbc0e0000, magic); | 158 | |
| 145 | ram_mask(fuc, 0x10f914, 0xbc0e0000, magic); | 159 | for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) { |
| 146 | for (i = 0; i < mc; i++) { | 160 | if (ram->pmask & (1 << i)) |
| 147 | const u32 addr = 0x110974 + (i * 0x1000); | 161 | continue; |
| 148 | ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000); | 162 | ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000); |
| 149 | } | 163 | } |
| 150 | } | 164 | } |
| @@ -199,12 +213,12 @@ r1373f4_init(struct nve0_ramfuc *fuc) | |||
| 199 | } | 213 | } |
| 200 | 214 | ||
| 201 | static void | 215 | static void |
| 202 | r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg) | 216 | r1373f4_fini(struct nve0_ramfuc *fuc) |
| 203 | { | 217 | { |
| 204 | struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc); | 218 | struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc); |
| 205 | struct nouveau_bios *bios = nouveau_bios(ram); | 219 | struct nouveau_ram_data *next = ram->base.next; |
| 206 | u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6; | 220 | u8 v0 = next->bios.ramcfg_11_03_c0; |
| 207 | u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4; | 221 | u8 v1 = next->bios.ramcfg_11_03_30; |
| 208 | u32 tmp; | 222 | u32 tmp; |
| 209 | 223 | ||
| 210 | tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000; | 224 | tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000; |
| @@ -220,25 +234,46 @@ r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg) | |||
| 220 | ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4); | 234 | ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4); |
| 221 | } | 235 | } |
| 222 | 236 | ||
| 237 | static void | ||
| 238 | nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg, | ||
| 239 | u32 _mask, u32 _data, u32 _copy) | ||
| 240 | { | ||
| 241 | struct nve0_fb_priv *priv = (void *)nouveau_fb(ram); | ||
| 242 | struct ramfuc *fuc = &ram->fuc.base; | ||
| 243 | u32 addr = 0x110000 + (reg->addr[0] & 0xfff); | ||
| 244 | u32 mask = _mask | _copy; | ||
| 245 | u32 data = (_data & _mask) | (reg->data & _copy); | ||
| 246 | u32 i; | ||
| 247 | |||
| 248 | for (i = 0; i < 16; i++, addr += 0x1000) { | ||
| 249 | if (ram->pnuts & (1 << i)) { | ||
| 250 | u32 prev = nv_rd32(priv, addr); | ||
| 251 | u32 next = (prev & ~mask) | data; | ||
| 252 | nouveau_memx_wr32(fuc->memx, addr, next); | ||
| 253 | } | ||
| 254 | } | ||
| 255 | } | ||
| 256 | #define ram_nuts(s,r,m,d,c) \ | ||
| 257 | nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c)) | ||
| 258 | |||
| 223 | static int | 259 | static int |
| 224 | nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | 260 | nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) |
| 225 | { | 261 | { |
| 226 | struct nouveau_bios *bios = nouveau_bios(pfb); | ||
| 227 | struct nve0_ram *ram = (void *)pfb->ram; | 262 | struct nve0_ram *ram = (void *)pfb->ram; |
| 228 | struct nve0_ramfuc *fuc = &ram->fuc; | 263 | struct nve0_ramfuc *fuc = &ram->fuc; |
| 229 | const u32 rammap = ram->base.rammap.data; | 264 | struct nouveau_ram_data *next = ram->base.next; |
| 230 | const u32 ramcfg = ram->base.ramcfg.data; | 265 | int vc = !(next->bios.ramcfg_11_02_08); |
| 231 | const u32 timing = ram->base.timing.data; | 266 | int mv = !(next->bios.ramcfg_11_02_04); |
| 232 | int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08); | ||
| 233 | int mv = 1; /*XXX*/ | ||
| 234 | u32 mask, data; | 267 | u32 mask, data; |
| 235 | 268 | ||
| 236 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); | 269 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); |
| 237 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); | 270 | ram_wr32(fuc, 0x62c000, 0x0f0f0000); |
| 238 | 271 | ||
| 239 | /* MR1: turn termination on early, for some reason.. */ | 272 | /* MR1: turn termination on early, for some reason.. */ |
| 240 | if ((ram->base.mr[1] & 0x03c) != 0x030) | 273 | if ((ram->base.mr[1] & 0x03c) != 0x030) { |
| 241 | ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c); | 274 | ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c); |
| 275 | ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000); | ||
| 276 | } | ||
| 242 | 277 | ||
| 243 | if (vc == 1 && ram_have(fuc, gpio2E)) { | 278 | if (vc == 1 && ram_have(fuc, gpio2E)) { |
| 244 | u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); | 279 | u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]); |
| @@ -250,8 +285,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 250 | 285 | ||
| 251 | ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); | 286 | ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); |
| 252 | 287 | ||
| 253 | ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000); | 288 | nve0_ram_train(fuc, 0x01020000, 0x000c0000); |
| 254 | ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000); | ||
| 255 | 289 | ||
| 256 | ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */ | 290 | ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */ |
| 257 | ram_nsec(fuc, 1000); | 291 | ram_nsec(fuc, 1000); |
| @@ -280,28 +314,28 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 280 | 314 | ||
| 281 | if (1) { | 315 | if (1) { |
| 282 | data |= 0x800807e0; | 316 | data |= 0x800807e0; |
| 283 | switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) { | 317 | switch (next->bios.ramcfg_11_03_c0) { |
| 284 | case 0xc0: data &= ~0x00000040; break; | 318 | case 3: data &= ~0x00000040; break; |
| 285 | case 0x80: data &= ~0x00000100; break; | 319 | case 2: data &= ~0x00000100; break; |
| 286 | case 0x40: data &= ~0x80000000; break; | 320 | case 1: data &= ~0x80000000; break; |
| 287 | case 0x00: data &= ~0x00000400; break; | 321 | case 0: data &= ~0x00000400; break; |
| 288 | } | 322 | } |
| 289 | 323 | ||
| 290 | switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) { | 324 | switch (next->bios.ramcfg_11_03_30) { |
| 291 | case 0x30: data &= ~0x00000020; break; | 325 | case 3: data &= ~0x00000020; break; |
| 292 | case 0x20: data &= ~0x00000080; break; | 326 | case 2: data &= ~0x00000080; break; |
| 293 | case 0x10: data &= ~0x00080000; break; | 327 | case 1: data &= ~0x00080000; break; |
| 294 | case 0x00: data &= ~0x00000200; break; | 328 | case 0: data &= ~0x00000200; break; |
| 295 | } | 329 | } |
| 296 | } | 330 | } |
| 297 | 331 | ||
| 298 | if (nv_ro08(bios, ramcfg + 0x02) & 0x80) | 332 | if (next->bios.ramcfg_11_02_80) |
| 299 | mask |= 0x03000000; | 333 | mask |= 0x03000000; |
| 300 | if (nv_ro08(bios, ramcfg + 0x02) & 0x40) | 334 | if (next->bios.ramcfg_11_02_40) |
| 301 | mask |= 0x00002000; | 335 | mask |= 0x00002000; |
| 302 | if (nv_ro08(bios, ramcfg + 0x07) & 0x10) | 336 | if (next->bios.ramcfg_11_07_10) |
| 303 | mask |= 0x00004000; | 337 | mask |= 0x00004000; |
| 304 | if (nv_ro08(bios, ramcfg + 0x07) & 0x08) | 338 | if (next->bios.ramcfg_11_07_08) |
| 305 | mask |= 0x00000003; | 339 | mask |= 0x00000003; |
| 306 | else { | 340 | else { |
| 307 | mask |= 0x34000000; | 341 | mask |= 0x34000000; |
| @@ -314,18 +348,18 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 314 | 348 | ||
| 315 | if (ram->from == 2 && ram->mode != 2) { | 349 | if (ram->from == 2 && ram->mode != 2) { |
| 316 | ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000); | 350 | ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000); |
| 317 | ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000); | 351 | ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000); |
| 318 | ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004); | 352 | ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004); |
| 319 | ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010); | 353 | ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010); |
| 320 | ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); | 354 | ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); |
| 321 | r1373f4_init(fuc); | 355 | r1373f4_init(fuc); |
| 322 | ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001); | 356 | ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001); |
| 323 | r1373f4_fini(fuc, ramcfg); | 357 | r1373f4_fini(fuc); |
| 324 | ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001); | 358 | ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001); |
| 325 | } else | 359 | } else |
| 326 | if (ram->from != 2 && ram->mode != 2) { | 360 | if (ram->from != 2 && ram->mode != 2) { |
| 327 | r1373f4_init(fuc); | 361 | r1373f4_init(fuc); |
| 328 | r1373f4_fini(fuc, ramcfg); | 362 | r1373f4_fini(fuc); |
| 329 | } | 363 | } |
| 330 | 364 | ||
| 331 | if (ram_have(fuc, gpioMV)) { | 365 | if (ram_have(fuc, gpioMV)) { |
| @@ -336,49 +370,54 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 336 | } | 370 | } |
| 337 | } | 371 | } |
| 338 | 372 | ||
| 339 | if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) || | 373 | if ( (next->bios.ramcfg_11_02_40) || |
| 340 | (nv_ro08(bios, ramcfg + 0x07) & 0x10)) { | 374 | (next->bios.ramcfg_11_07_10)) { |
| 341 | ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); | 375 | ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); |
| 342 | ram_nsec(fuc, 20000); | 376 | ram_nsec(fuc, 20000); |
| 343 | } | 377 | } |
| 344 | 378 | ||
| 345 | if (ram->from != 2 && ram->mode == 2) { | 379 | if (ram->from != 2 && ram->mode == 2) { |
| 380 | if (0 /*XXX: Titan */) | ||
| 381 | ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000); | ||
| 346 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); | 382 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); |
| 347 | ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002); | 383 | ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002); |
| 348 | ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010); | 384 | ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010); |
| 349 | r1373f4_init(fuc); | 385 | r1373f4_init(fuc); |
| 350 | r1373f4_fini(fuc, ramcfg); | 386 | r1373f4_fini(fuc); |
| 351 | ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000); | 387 | ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000); |
| 352 | ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000); | 388 | ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000); |
| 353 | } else | 389 | } else |
| 354 | if (ram->from == 2 && ram->mode == 2) { | 390 | if (ram->from == 2 && ram->mode == 2) { |
| 355 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); | 391 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000); |
| 356 | r1373f4_init(fuc); | 392 | r1373f4_init(fuc); |
| 357 | r1373f4_fini(fuc, ramcfg); | 393 | r1373f4_fini(fuc); |
| 358 | } | 394 | } |
| 359 | 395 | ||
| 360 | if (ram->mode != 2) /*XXX*/ { | 396 | if (ram->mode != 2) /*XXX*/ { |
| 361 | if (nv_ro08(bios, ramcfg + 0x07) & 0x40) | 397 | if (next->bios.ramcfg_11_07_40) |
| 362 | ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); | 398 | ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); |
| 363 | } | 399 | } |
| 364 | 400 | ||
| 365 | data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2; | 401 | ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c); |
| 366 | ram_wr32(fuc, 0x10f65c, 0x00000011 * data); | 402 | ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09); |
| 367 | ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09)); | 403 | ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09); |
| 368 | ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09)); | ||
| 369 | 404 | ||
| 370 | data = nv_ro08(bios, ramcfg + 0x04); | 405 | if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) { |
| 371 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) { | 406 | ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04); |
| 372 | ram_wr32(fuc, 0x10f698, 0x01010101 * data); | 407 | ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04); |
| 373 | ram_wr32(fuc, 0x10f69c, 0x01010101 * data); | 408 | } else |
| 409 | if (!next->bios.ramcfg_11_07_08) { | ||
| 410 | ram_wr32(fuc, 0x10f698, 0x00000000); | ||
| 411 | ram_wr32(fuc, 0x10f69c, 0x00000000); | ||
| 374 | } | 412 | } |
| 375 | 413 | ||
| 376 | if (ram->mode != 2) { | 414 | if (ram->mode != 2) { |
| 377 | u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00; | 415 | u32 data = 0x01000100 * next->bios.ramcfg_11_04; |
| 378 | ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data)); | 416 | ram_nuke(fuc, 0x10f694); |
| 417 | ram_mask(fuc, 0x10f694, 0xff00ff00, data); | ||
| 379 | } | 418 | } |
| 380 | 419 | ||
| 381 | if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10)) | 420 | if (ram->mode == 2 && (next->bios.ramcfg_11_08_10)) |
| 382 | data = 0x00000080; | 421 | data = 0x00000080; |
| 383 | else | 422 | else |
| 384 | data = 0x00000000; | 423 | data = 0x00000000; |
| @@ -386,19 +425,19 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 386 | 425 | ||
| 387 | mask = 0x00070000; | 426 | mask = 0x00070000; |
| 388 | data = 0x00000000; | 427 | data = 0x00000000; |
| 389 | if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80)) | 428 | if (!(next->bios.ramcfg_11_02_80)) |
| 390 | data |= 0x03000000; | 429 | data |= 0x03000000; |
| 391 | if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40)) | 430 | if (!(next->bios.ramcfg_11_02_40)) |
| 392 | data |= 0x00002000; | 431 | data |= 0x00002000; |
| 393 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10)) | 432 | if (!(next->bios.ramcfg_11_07_10)) |
| 394 | data |= 0x00004000; | 433 | data |= 0x00004000; |
| 395 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) | 434 | if (!(next->bios.ramcfg_11_07_08)) |
| 396 | data |= 0x00000003; | 435 | data |= 0x00000003; |
| 397 | else | 436 | else |
| 398 | data |= 0x74000000; | 437 | data |= 0x74000000; |
| 399 | ram_mask(fuc, 0x10f824, mask, data); | 438 | ram_mask(fuc, 0x10f824, mask, data); |
| 400 | 439 | ||
| 401 | if (nv_ro08(bios, ramcfg + 0x01) & 0x08) | 440 | if (next->bios.ramcfg_11_01_08) |
| 402 | data = 0x00000000; | 441 | data = 0x00000000; |
| 403 | else | 442 | else |
| 404 | data = 0x00001000; | 443 | data = 0x00001000; |
| @@ -409,61 +448,90 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 409 | ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000); | 448 | ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000); |
| 410 | } | 449 | } |
| 411 | 450 | ||
| 412 | if (nv_ro08(bios, ramcfg + 0x08) & 0x01) | 451 | if (next->bios.ramcfg_11_08_01) |
| 413 | data = 0x00100000; | 452 | data = 0x00100000; |
| 414 | else | 453 | else |
| 415 | data = 0x00000000; | 454 | data = 0x00000000; |
| 416 | ram_mask(fuc, 0x10f82c, 0x00100000, data); | 455 | ram_mask(fuc, 0x10f82c, 0x00100000, data); |
| 417 | 456 | ||
| 418 | data = 0x00000000; | 457 | data = 0x00000000; |
| 419 | if (nv_ro08(bios, ramcfg + 0x08) & 0x08) | 458 | if (next->bios.ramcfg_11_08_08) |
| 420 | data |= 0x00002000; | 459 | data |= 0x00002000; |
| 421 | if (nv_ro08(bios, ramcfg + 0x08) & 0x04) | 460 | if (next->bios.ramcfg_11_08_04) |
| 422 | data |= 0x00001000; | 461 | data |= 0x00001000; |
| 423 | if (nv_ro08(bios, ramcfg + 0x08) & 0x02) | 462 | if (next->bios.ramcfg_11_08_02) |
| 424 | data |= 0x00004000; | 463 | data |= 0x00004000; |
| 425 | ram_mask(fuc, 0x10f830, 0x00007000, data); | 464 | ram_mask(fuc, 0x10f830, 0x00007000, data); |
| 426 | 465 | ||
| 427 | /* PFB timing */ | 466 | /* PFB timing */ |
| 428 | ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28)); | 467 | ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]); |
| 429 | ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00)); | 468 | ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]); |
| 430 | ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04)); | 469 | ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]); |
| 431 | ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08)); | 470 | ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]); |
| 432 | ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c)); | 471 | ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]); |
| 433 | ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10)); | 472 | ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]); |
| 434 | ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14)); | 473 | ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]); |
| 435 | ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18)); | 474 | ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]); |
| 436 | ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c)); | 475 | ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]); |
| 437 | ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20)); | 476 | ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]); |
| 438 | ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24)); | 477 | ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]); |
| 439 | 478 | ||
| 440 | data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8; | 479 | data = mask = 0x00000000; |
| 441 | if (nv_ro08(bios, ramcfg + 0x01) & 0x10) | 480 | if (NOTE00(ramcfg_08_20)) { |
| 442 | data |= 0x70000000; | 481 | if (next->bios.ramcfg_11_08_20) |
| 443 | ram_mask(fuc, 0x10f604, 0x70000300, data); | 482 | data |= 0x01000000; |
| 444 | 483 | mask |= 0x01000000; | |
| 445 | data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28; | 484 | } |
| 446 | if (nv_ro08(bios, ramcfg + 0x01) & 0x01) | 485 | ram_mask(fuc, 0x10f200, mask, data); |
| 447 | data |= 0x00000100; | 486 | |
| 448 | ram_mask(fuc, 0x10f614, 0x70000000, data); | 487 | data = mask = 0x00000000; |
| 449 | 488 | if (NOTE00(ramcfg_02_03 != 0)) { | |
| 450 | data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28; | 489 | data |= (next->bios.ramcfg_11_02_03) << 8; |
| 451 | if (nv_ro08(bios, ramcfg + 0x01) & 0x02) | 490 | mask |= 0x00000300; |
| 452 | data |= 0x00000100; | 491 | } |
| 453 | ram_mask(fuc, 0x10f610, 0x70000000, data); | 492 | if (NOTE00(ramcfg_01_10)) { |
| 493 | if (next->bios.ramcfg_11_01_10) | ||
| 494 | data |= 0x70000000; | ||
| 495 | mask |= 0x70000000; | ||
| 496 | } | ||
| 497 | ram_mask(fuc, 0x10f604, mask, data); | ||
| 498 | |||
| 499 | data = mask = 0x00000000; | ||
| 500 | if (NOTE00(timing_30_07 != 0)) { | ||
| 501 | data |= (next->bios.timing_20_30_07) << 28; | ||
| 502 | mask |= 0x70000000; | ||
| 503 | } | ||
| 504 | if (NOTE00(ramcfg_01_01)) { | ||
| 505 | if (next->bios.ramcfg_11_01_01) | ||
| 506 | data |= 0x00000100; | ||
| 507 | mask |= 0x00000100; | ||
| 508 | } | ||
| 509 | ram_mask(fuc, 0x10f614, mask, data); | ||
| 510 | |||
| 511 | data = mask = 0x00000000; | ||
| 512 | if (NOTE00(timing_30_07 != 0)) { | ||
| 513 | data |= (next->bios.timing_20_30_07) << 28; | ||
| 514 | mask |= 0x70000000; | ||
| 515 | } | ||
| 516 | if (NOTE00(ramcfg_01_02)) { | ||
| 517 | if (next->bios.ramcfg_11_01_02) | ||
| 518 | data |= 0x00000100; | ||
| 519 | mask |= 0x00000100; | ||
| 520 | } | ||
| 521 | ram_mask(fuc, 0x10f610, mask, data); | ||
| 454 | 522 | ||
| 455 | mask = 0x33f00000; | 523 | mask = 0x33f00000; |
| 456 | data = 0x00000000; | 524 | data = 0x00000000; |
| 457 | if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04)) | 525 | if (!(next->bios.ramcfg_11_01_04)) |
| 458 | data |= 0x20200000; | 526 | data |= 0x20200000; |
| 459 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80)) | 527 | if (!(next->bios.ramcfg_11_07_80)) |
| 460 | data |= 0x12800000; | 528 | data |= 0x12800000; |
| 461 | /*XXX: see note above about there probably being some condition | 529 | /*XXX: see note above about there probably being some condition |
| 462 | * for the 10f824 stuff that uses ramcfg 3... | 530 | * for the 10f824 stuff that uses ramcfg 3... |
| 463 | */ | 531 | */ |
| 464 | if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) { | 532 | if ( (next->bios.ramcfg_11_03_f0)) { |
| 465 | if (nv_ro08(bios, rammap + 0x08) & 0x0c) { | 533 | if (next->bios.rammap_11_08_0c) { |
| 466 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80)) | 534 | if (!(next->bios.ramcfg_11_07_80)) |
| 467 | mask |= 0x00000020; | 535 | mask |= 0x00000020; |
| 468 | else | 536 | else |
| 469 | data |= 0x00000020; | 537 | data |= 0x00000020; |
| @@ -476,49 +544,53 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 476 | 544 | ||
| 477 | ram_mask(fuc, 0x10f808, mask, data); | 545 | ram_mask(fuc, 0x10f808, mask, data); |
| 478 | 546 | ||
| 479 | data = nv_ro08(bios, ramcfg + 0x03) & 0x0f; | 547 | ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f); |
| 480 | ram_wr32(fuc, 0x10f870, 0x11111111 * data); | ||
| 481 | 548 | ||
| 482 | data = nv_ro08(bios, ramcfg + 0x02) & 0x03; | 549 | data = mask = 0x00000000; |
| 483 | if (nv_ro08(bios, ramcfg + 0x01) & 0x10) | 550 | if (NOTE00(ramcfg_02_03 != 0)) { |
| 484 | data |= 0x00000004; | 551 | data |= next->bios.ramcfg_11_02_03; |
| 485 | if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) { | 552 | mask |= 0x00000003; |
| 486 | ram_wr32(fuc, 0x10f750, 0x04000009); | 553 | } |
| 554 | if (NOTE00(ramcfg_01_10)) { | ||
| 555 | if (next->bios.ramcfg_11_01_10) | ||
| 556 | data |= 0x00000004; | ||
| 557 | mask |= 0x00000004; | ||
| 558 | } | ||
| 559 | |||
| 560 | if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) { | ||
| 561 | ram_mask(fuc, 0x100750, 0x00000008, 0x00000008); | ||
| 487 | ram_wr32(fuc, 0x100710, 0x00000000); | 562 | ram_wr32(fuc, 0x100710, 0x00000000); |
| 488 | ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000); | 563 | ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000); |
| 489 | } | 564 | } |
| 490 | ram_mask(fuc, 0x100770, 0x00000007, data); | ||
| 491 | 565 | ||
| 492 | data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8; | 566 | data = (next->bios.timing_20_30_07) << 8; |
| 493 | if (nv_ro08(bios, ramcfg + 0x01) & 0x01) | 567 | if (next->bios.ramcfg_11_01_01) |
| 494 | data |= 0x80000000; | 568 | data |= 0x80000000; |
| 495 | ram_mask(fuc, 0x100778, 0x00000700, data); | 569 | ram_mask(fuc, 0x100778, 0x00000700, data); |
| 496 | 570 | ||
| 497 | data = nv_ro16(bios, timing + 0x2c); | 571 | ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4); |
| 498 | ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4); | 572 | data = (next->bios.timing[10] & 0x7f000000) >> 24; |
| 499 | ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18); | 573 | if (data < next->bios.timing_20_2c_1fc0) |
| 500 | 574 | data = next->bios.timing_20_2c_1fc0; | |
| 501 | data = nv_ro08(bios, timing + 0x30); | 575 | ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); |
| 502 | ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13); | 576 | ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16); |
| 503 | 577 | ||
| 504 | data = nv_ro16(bios, timing + 0x31); | 578 | ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 | |
| 505 | ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 | | 579 | next->bios.timing_20_31_0780 << 17 | |
| 506 | (data & 0x0780) << 10 | | 580 | next->bios.timing_20_31_0078 << 8 | |
| 507 | (data & 0x0078) << 5 | | 581 | next->bios.timing_20_31_0007); |
| 508 | (data & 0x0007)); | 582 | ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 | |
| 509 | ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 | | 583 | next->bios.timing_20_31_7000); |
| 510 | (data & 0x7000) >> 12); | ||
| 511 | 584 | ||
| 512 | ram_wr32(fuc, 0x10f090, 0x4000007e); | 585 | ram_wr32(fuc, 0x10f090, 0x4000007e); |
| 513 | ram_nsec(fuc, 1000); | 586 | ram_nsec(fuc, 2000); |
| 514 | ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ | 587 | ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ |
| 515 | ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ | 588 | ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */ |
| 516 | ram_nsec(fuc, 2000); | ||
| 517 | ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ | 589 | ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */ |
| 518 | 590 | ||
| 519 | if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) { | 591 | if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) { |
| 520 | u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000); | 592 | u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000); |
| 521 | train(fuc, 0xa4010000); /*XXX*/ | 593 | nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/ |
| 522 | ram_nsec(fuc, 1000); | 594 | ram_nsec(fuc, 1000); |
| 523 | ram_wr32(fuc, 0x10f294, temp); | 595 | ram_wr32(fuc, 0x10f294, temp); |
| 524 | } | 596 | } |
| @@ -528,7 +600,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 528 | ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]); | 600 | ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]); |
| 529 | ram_nsec(fuc, 1000); | 601 | ram_nsec(fuc, 1000); |
| 530 | ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]); | 602 | ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]); |
| 531 | ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]); | 603 | ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */ |
| 532 | ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]); | 604 | ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]); |
| 533 | ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]); | 605 | ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]); |
| 534 | 606 | ||
| @@ -544,12 +616,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 544 | ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */ | 616 | ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */ |
| 545 | ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); | 617 | ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000); |
| 546 | ram_nsec(fuc, 1000); | 618 | ram_nsec(fuc, 1000); |
| 619 | ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800); | ||
| 547 | 620 | ||
| 548 | data = ram_rd32(fuc, 0x10f978); | 621 | data = ram_rd32(fuc, 0x10f978); |
| 549 | data &= ~0x00046144; | 622 | data &= ~0x00046144; |
| 550 | data |= 0x0000000b; | 623 | data |= 0x0000000b; |
| 551 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) { | 624 | if (!(next->bios.ramcfg_11_07_08)) { |
| 552 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04)) | 625 | if (!(next->bios.ramcfg_11_07_04)) |
| 553 | data |= 0x0000200c; | 626 | data |= 0x0000200c; |
| 554 | else | 627 | else |
| 555 | data |= 0x00000000; | 628 | data |= 0x00000000; |
| @@ -563,44 +636,43 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 563 | ram_wr32(fuc, 0x10f830, data); | 636 | ram_wr32(fuc, 0x10f830, data); |
| 564 | } | 637 | } |
| 565 | 638 | ||
| 566 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) { | 639 | if (!(next->bios.ramcfg_11_07_08)) { |
| 567 | data = 0x88020000; | 640 | data = 0x88020000; |
| 568 | if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04)) | 641 | if ( (next->bios.ramcfg_11_07_04)) |
| 569 | data |= 0x10000000; | 642 | data |= 0x10000000; |
| 570 | if (!(nv_ro08(bios, rammap + 0x08) & 0x10)) | 643 | if (!(next->bios.rammap_11_08_10)) |
| 571 | data |= 0x00080000; | 644 | data |= 0x00080000; |
| 572 | } else { | 645 | } else { |
| 573 | data = 0xa40e0000; | 646 | data = 0xa40e0000; |
| 574 | } | 647 | } |
| 575 | train(fuc, data); | 648 | nve0_ram_train(fuc, 0xbc0f0000, data); |
| 576 | ram_nsec(fuc, 1000); | 649 | if (1) /* XXX: not always? */ |
| 650 | ram_nsec(fuc, 1000); | ||
| 577 | 651 | ||
| 578 | if (ram->mode == 2) { /*XXX*/ | 652 | if (ram->mode == 2) { /*XXX*/ |
| 579 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004); | 653 | ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004); |
| 580 | } | 654 | } |
| 581 | 655 | ||
| 582 | /* MR5: (re)enable LP3 if necessary | 656 | /* LP3 */ |
| 583 | * XXX: need to find the switch, keeping off for now | 657 | if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5]) |
| 584 | */ | 658 | ram_nsec(fuc, 1000); |
| 585 | ram_mask(fuc, mr[5], 0x00000004, 0x00000000); | ||
| 586 | 659 | ||
| 587 | if (ram->mode != 2) { | 660 | if (ram->mode != 2) { |
| 588 | ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000); | 661 | ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000); |
| 589 | ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); | 662 | ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000); |
| 590 | } | 663 | } |
| 591 | 664 | ||
| 592 | if (nv_ro08(bios, ramcfg + 0x07) & 0x02) { | 665 | if (next->bios.ramcfg_11_07_02) |
| 593 | ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000); | 666 | nve0_ram_train(fuc, 0x80020000, 0x01000000); |
| 594 | ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000); | ||
| 595 | } | ||
| 596 | 667 | ||
| 597 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | 668 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); |
| 598 | 669 | ||
| 599 | if (nv_ro08(bios, rammap + 0x08) & 0x01) | 670 | if (next->bios.rammap_11_08_01) |
| 600 | data = 0x00000800; | 671 | data = 0x00000800; |
| 601 | else | 672 | else |
| 602 | data = 0x00000000; | 673 | data = 0x00000000; |
| 603 | ram_mask(fuc, 0x10f200, 0x00000800, data); | 674 | ram_mask(fuc, 0x10f200, 0x00000800, data); |
| 675 | ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800); | ||
| 604 | return 0; | 676 | return 0; |
| 605 | } | 677 | } |
| 606 | 678 | ||
| @@ -611,17 +683,14 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq) | |||
| 611 | static int | 683 | static int |
| 612 | nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | 684 | nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) |
| 613 | { | 685 | { |
| 614 | struct nouveau_bios *bios = nouveau_bios(pfb); | ||
| 615 | struct nve0_ram *ram = (void *)pfb->ram; | 686 | struct nve0_ram *ram = (void *)pfb->ram; |
| 616 | struct nve0_ramfuc *fuc = &ram->fuc; | 687 | struct nve0_ramfuc *fuc = &ram->fuc; |
| 617 | const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1); | 688 | const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1); |
| 618 | const u32 runk0 = ram->fN1 << 16; | 689 | const u32 runk0 = ram->fN1 << 16; |
| 619 | const u32 runk1 = ram->fN1; | 690 | const u32 runk1 = ram->fN1; |
| 620 | const u32 rammap = ram->base.rammap.data; | 691 | struct nouveau_ram_data *next = ram->base.next; |
| 621 | const u32 ramcfg = ram->base.ramcfg.data; | 692 | int vc = !(next->bios.ramcfg_11_02_08); |
| 622 | const u32 timing = ram->base.timing.data; | 693 | int mv = !(next->bios.ramcfg_11_02_04); |
| 623 | int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08); | ||
| 624 | int mv = 1; /*XXX*/ | ||
| 625 | u32 mask, data; | 694 | u32 mask, data; |
| 626 | 695 | ||
| 627 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); | 696 | ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); |
| @@ -636,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 636 | } | 705 | } |
| 637 | 706 | ||
| 638 | ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); | 707 | ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000); |
| 639 | if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0)) | 708 | if ((next->bios.ramcfg_11_03_f0)) |
| 640 | ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000); | 709 | ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000); |
| 641 | 710 | ||
| 642 | ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ | 711 | ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */ |
| @@ -661,28 +730,28 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 661 | if (1) { | 730 | if (1) { |
| 662 | mask |= 0x800807e0; | 731 | mask |= 0x800807e0; |
| 663 | data |= 0x800807e0; | 732 | data |= 0x800807e0; |
| 664 | switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) { | 733 | switch (next->bios.ramcfg_11_03_c0) { |
| 665 | case 0xc0: data &= ~0x00000040; break; | 734 | case 3: data &= ~0x00000040; break; |
| 666 | case 0x80: data &= ~0x00000100; break; | 735 | case 2: data &= ~0x00000100; break; |
| 667 | case 0x40: data &= ~0x80000000; break; | 736 | case 1: data &= ~0x80000000; break; |
| 668 | case 0x00: data &= ~0x00000400; break; | 737 | case 0: data &= ~0x00000400; break; |
| 669 | } | 738 | } |
| 670 | 739 | ||
| 671 | switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) { | 740 | switch (next->bios.ramcfg_11_03_30) { |
| 672 | case 0x30: data &= ~0x00000020; break; | 741 | case 3: data &= ~0x00000020; break; |
| 673 | case 0x20: data &= ~0x00000080; break; | 742 | case 2: data &= ~0x00000080; break; |
| 674 | case 0x10: data &= ~0x00080000; break; | 743 | case 1: data &= ~0x00080000; break; |
| 675 | case 0x00: data &= ~0x00000200; break; | 744 | case 0: data &= ~0x00000200; break; |
| 676 | } | 745 | } |
| 677 | } | 746 | } |
| 678 | 747 | ||
| 679 | if (nv_ro08(bios, ramcfg + 0x02) & 0x80) | 748 | if (next->bios.ramcfg_11_02_80) |
| 680 | mask |= 0x03000000; | 749 | mask |= 0x03000000; |
| 681 | if (nv_ro08(bios, ramcfg + 0x02) & 0x40) | 750 | if (next->bios.ramcfg_11_02_40) |
| 682 | mask |= 0x00002000; | 751 | mask |= 0x00002000; |
| 683 | if (nv_ro08(bios, ramcfg + 0x07) & 0x10) | 752 | if (next->bios.ramcfg_11_07_10) |
| 684 | mask |= 0x00004000; | 753 | mask |= 0x00004000; |
| 685 | if (nv_ro08(bios, ramcfg + 0x07) & 0x08) | 754 | if (next->bios.ramcfg_11_07_08) |
| 686 | mask |= 0x00000003; | 755 | mask |= 0x00000003; |
| 687 | else | 756 | else |
| 688 | mask |= 0x14000000; | 757 | mask |= 0x14000000; |
| @@ -692,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 692 | 761 | ||
| 693 | ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010); | 762 | ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010); |
| 694 | data = ram_rd32(fuc, 0x1373ec) & ~0x00030000; | 763 | data = ram_rd32(fuc, 0x1373ec) & ~0x00030000; |
| 695 | data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12; | 764 | data |= (next->bios.ramcfg_11_03_30) << 12; |
| 696 | ram_wr32(fuc, 0x1373ec, data); | 765 | ram_wr32(fuc, 0x1373ec, data); |
| 697 | ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000); | 766 | ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000); |
| 698 | ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000); | 767 | ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000); |
| @@ -724,68 +793,67 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 724 | } | 793 | } |
| 725 | } | 794 | } |
| 726 | 795 | ||
| 727 | if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) || | 796 | if ( (next->bios.ramcfg_11_02_40) || |
| 728 | (nv_ro08(bios, ramcfg + 0x07) & 0x10)) { | 797 | (next->bios.ramcfg_11_07_10)) { |
| 729 | ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); | 798 | ram_mask(fuc, 0x132040, 0x00010000, 0x00010000); |
| 730 | ram_nsec(fuc, 20000); | 799 | ram_nsec(fuc, 20000); |
| 731 | } | 800 | } |
| 732 | 801 | ||
| 733 | if (ram->mode != 2) /*XXX*/ { | 802 | if (ram->mode != 2) /*XXX*/ { |
| 734 | if (nv_ro08(bios, ramcfg + 0x07) & 0x40) | 803 | if (next->bios.ramcfg_11_07_40) |
| 735 | ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); | 804 | ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000); |
| 736 | } | 805 | } |
| 737 | 806 | ||
| 738 | data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2; | 807 | ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c); |
| 739 | ram_wr32(fuc, 0x10f65c, 0x00000011 * data); | 808 | ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09); |
| 740 | ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09)); | 809 | ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09); |
| 741 | ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09)); | ||
| 742 | 810 | ||
| 743 | mask = 0x00010000; | 811 | mask = 0x00010000; |
| 744 | data = 0x00000000; | 812 | data = 0x00000000; |
| 745 | if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80)) | 813 | if (!(next->bios.ramcfg_11_02_80)) |
| 746 | data |= 0x03000000; | 814 | data |= 0x03000000; |
| 747 | if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40)) | 815 | if (!(next->bios.ramcfg_11_02_40)) |
| 748 | data |= 0x00002000; | 816 | data |= 0x00002000; |
| 749 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10)) | 817 | if (!(next->bios.ramcfg_11_07_10)) |
| 750 | data |= 0x00004000; | 818 | data |= 0x00004000; |
| 751 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) | 819 | if (!(next->bios.ramcfg_11_07_08)) |
| 752 | data |= 0x00000003; | 820 | data |= 0x00000003; |
| 753 | else | 821 | else |
| 754 | data |= 0x14000000; | 822 | data |= 0x14000000; |
| 755 | ram_mask(fuc, 0x10f824, mask, data); | 823 | ram_mask(fuc, 0x10f824, mask, data); |
| 756 | ram_nsec(fuc, 1000); | 824 | ram_nsec(fuc, 1000); |
| 757 | 825 | ||
| 758 | if (nv_ro08(bios, ramcfg + 0x08) & 0x01) | 826 | if (next->bios.ramcfg_11_08_01) |
| 759 | data = 0x00100000; | 827 | data = 0x00100000; |
| 760 | else | 828 | else |
| 761 | data = 0x00000000; | 829 | data = 0x00000000; |
| 762 | ram_mask(fuc, 0x10f82c, 0x00100000, data); | 830 | ram_mask(fuc, 0x10f82c, 0x00100000, data); |
| 763 | 831 | ||
| 764 | /* PFB timing */ | 832 | /* PFB timing */ |
| 765 | ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28)); | 833 | ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]); |
| 766 | ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00)); | 834 | ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]); |
| 767 | ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04)); | 835 | ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]); |
| 768 | ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08)); | 836 | ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]); |
| 769 | ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c)); | 837 | ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]); |
| 770 | ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10)); | 838 | ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]); |
| 771 | ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14)); | 839 | ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]); |
| 772 | ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18)); | 840 | ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]); |
| 773 | ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c)); | 841 | ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]); |
| 774 | ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20)); | 842 | ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]); |
| 775 | ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24)); | 843 | ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]); |
| 776 | 844 | ||
| 777 | mask = 0x33f00000; | 845 | mask = 0x33f00000; |
| 778 | data = 0x00000000; | 846 | data = 0x00000000; |
| 779 | if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04)) | 847 | if (!(next->bios.ramcfg_11_01_04)) |
| 780 | data |= 0x20200000; | 848 | data |= 0x20200000; |
| 781 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80)) | 849 | if (!(next->bios.ramcfg_11_07_80)) |
| 782 | data |= 0x12800000; | 850 | data |= 0x12800000; |
| 783 | /*XXX: see note above about there probably being some condition | 851 | /*XXX: see note above about there probably being some condition |
| 784 | * for the 10f824 stuff that uses ramcfg 3... | 852 | * for the 10f824 stuff that uses ramcfg 3... |
| 785 | */ | 853 | */ |
| 786 | if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) { | 854 | if ( (next->bios.ramcfg_11_03_f0)) { |
| 787 | if (nv_ro08(bios, rammap + 0x08) & 0x0c) { | 855 | if (next->bios.rammap_11_08_0c) { |
| 788 | if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80)) | 856 | if (!(next->bios.ramcfg_11_07_80)) |
| 789 | mask |= 0x00000020; | 857 | mask |= 0x00000020; |
| 790 | else | 858 | else |
| 791 | data |= 0x00000020; | 859 | data |= 0x00000020; |
| @@ -799,21 +867,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 799 | 867 | ||
| 800 | ram_mask(fuc, 0x10f808, mask, data); | 868 | ram_mask(fuc, 0x10f808, mask, data); |
| 801 | 869 | ||
| 802 | data = nv_ro08(bios, ramcfg + 0x03) & 0x0f; | 870 | ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f); |
| 803 | ram_wr32(fuc, 0x10f870, 0x11111111 * data); | ||
| 804 | 871 | ||
| 805 | data = nv_ro16(bios, timing + 0x2c); | 872 | ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4); |
| 806 | ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4); | ||
| 807 | 873 | ||
| 808 | if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) > | 874 | data = (next->bios.timing[10] & 0x7f000000) >> 24; |
| 809 | ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24)) | 875 | if (data < next->bios.timing_20_2c_1fc0) |
| 810 | data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6; | 876 | data = next->bios.timing_20_2c_1fc0; |
| 811 | else | ||
| 812 | data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24; | ||
| 813 | ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); | 877 | ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24); |
| 814 | 878 | ||
| 815 | data = nv_ro08(bios, timing + 0x30); | 879 | ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8); |
| 816 | ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13); | ||
| 817 | 880 | ||
| 818 | ram_wr32(fuc, 0x10f090, 0x4000007f); | 881 | ram_wr32(fuc, 0x10f090, 0x4000007f); |
| 819 | ram_nsec(fuc, 1000); | 882 | ram_nsec(fuc, 1000); |
| @@ -855,7 +918,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 855 | 918 | ||
| 856 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); | 919 | ram_wr32(fuc, 0x62c000, 0x0f0f0f00); |
| 857 | 920 | ||
| 858 | if (nv_ro08(bios, rammap + 0x08) & 0x01) | 921 | if (next->bios.rammap_11_08_01) |
| 859 | data = 0x00000800; | 922 | data = 0x00000800; |
| 860 | else | 923 | else |
| 861 | data = 0x00000000; | 924 | data = 0x00000000; |
| @@ -868,21 +931,18 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq) | |||
| 868 | ******************************************************************************/ | 931 | ******************************************************************************/ |
| 869 | 932 | ||
| 870 | static int | 933 | static int |
| 871 | nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | 934 | nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq, |
| 935 | struct nouveau_ram_data *data) | ||
| 872 | { | 936 | { |
| 873 | struct nouveau_bios *bios = nouveau_bios(pfb); | 937 | struct nouveau_bios *bios = nouveau_bios(pfb); |
| 874 | struct nve0_ram *ram = (void *)pfb->ram; | 938 | struct nve0_ram *ram = (void *)pfb->ram; |
| 875 | struct nve0_ramfuc *fuc = &ram->fuc; | 939 | u8 strap, cnt, len; |
| 876 | struct bit_entry M; | ||
| 877 | int ret, refclk, strap, i; | ||
| 878 | u32 data; | ||
| 879 | u8 cnt; | ||
| 880 | 940 | ||
| 881 | /* lookup memory config data relevant to the target frequency */ | 941 | /* lookup memory config data relevant to the target frequency */ |
| 882 | ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000, | 942 | ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000, |
| 883 | &ram->base.rammap.version, | 943 | &ram->base.rammap.version, |
| 884 | &ram->base.rammap.size, &cnt, | 944 | &ram->base.rammap.size, |
| 885 | &ram->base.ramcfg.size); | 945 | &cnt, &len, &data->bios); |
| 886 | if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 || | 946 | if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 || |
| 887 | ram->base.rammap.size < 0x09) { | 947 | ram->base.rammap.size < 0x09) { |
| 888 | nv_error(pfb, "invalid/missing rammap entry\n"); | 948 | nv_error(pfb, "invalid/missing rammap entry\n"); |
| @@ -890,24 +950,13 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 890 | } | 950 | } |
| 891 | 951 | ||
| 892 | /* locate specific data set for the attached memory */ | 952 | /* locate specific data set for the attached memory */ |
| 893 | if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) { | 953 | ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data, |
| 894 | nv_error(pfb, "invalid/missing memory table\n"); | 954 | ram->base.rammap.version, |
| 895 | return -EINVAL; | 955 | ram->base.rammap.size, cnt, len, |
| 896 | } | 956 | nvbios_ramcfg_index(bios), |
| 897 | 957 | &ram->base.ramcfg.version, | |
| 898 | strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2; | 958 | &ram->base.ramcfg.size, |
| 899 | data = nv_ro16(bios, M.offset + 1); | 959 | &data->bios); |
| 900 | if (data) | ||
| 901 | strap = nv_ro08(bios, data + strap); | ||
| 902 | |||
| 903 | if (strap >= cnt) { | ||
| 904 | nv_error(pfb, "invalid ramcfg strap\n"); | ||
| 905 | return -EINVAL; | ||
| 906 | } | ||
| 907 | |||
| 908 | ram->base.ramcfg.version = ram->base.rammap.version; | ||
| 909 | ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size + | ||
| 910 | (ram->base.ramcfg.size * strap); | ||
| 911 | if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 || | 960 | if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 || |
| 912 | ram->base.ramcfg.size < 0x08) { | 961 | ram->base.ramcfg.size < 0x08) { |
| 913 | nv_error(pfb, "invalid/missing ramcfg entry\n"); | 962 | nv_error(pfb, "invalid/missing ramcfg entry\n"); |
| @@ -918,9 +967,9 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 918 | strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00); | 967 | strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00); |
| 919 | if (strap != 0xff) { | 968 | if (strap != 0xff) { |
| 920 | ram->base.timing.data = | 969 | ram->base.timing.data = |
| 921 | nvbios_timing_entry(bios, strap, | 970 | nvbios_timingEp(bios, strap, &ram->base.timing.version, |
| 922 | &ram->base.timing.version, | 971 | &ram->base.timing.size, &cnt, &len, |
| 923 | &ram->base.timing.size); | 972 | &data->bios); |
| 924 | if (!ram->base.timing.data || | 973 | if (!ram->base.timing.data || |
| 925 | ram->base.timing.version != 0x20 || | 974 | ram->base.timing.version != 0x20 || |
| 926 | ram->base.timing.size < 0x33) { | 975 | ram->base.timing.size < 0x33) { |
| @@ -931,11 +980,23 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 931 | ram->base.timing.data = 0; | 980 | ram->base.timing.data = 0; |
| 932 | } | 981 | } |
| 933 | 982 | ||
| 983 | data->freq = freq; | ||
| 984 | return 0; | ||
| 985 | } | ||
| 986 | |||
| 987 | static int | ||
| 988 | nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next) | ||
| 989 | { | ||
| 990 | struct nve0_ram *ram = (void *)pfb->ram; | ||
| 991 | struct nve0_ramfuc *fuc = &ram->fuc; | ||
| 992 | int refclk, i; | ||
| 993 | int ret; | ||
| 994 | |||
| 934 | ret = ram_init(fuc, pfb); | 995 | ret = ram_init(fuc, pfb); |
| 935 | if (ret) | 996 | if (ret) |
| 936 | return ret; | 997 | return ret; |
| 937 | 998 | ||
| 938 | ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1; | 999 | ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1; |
| 939 | ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f; | 1000 | ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f; |
| 940 | 1001 | ||
| 941 | /* XXX: this is *not* what nvidia do. on fermi nvidia generally | 1002 | /* XXX: this is *not* what nvidia do. on fermi nvidia generally |
| @@ -946,7 +1007,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 946 | * so far, i've seen very weird values being chosen by nvidia on | 1007 | * so far, i've seen very weird values being chosen by nvidia on |
| 947 | * kepler boards, no idea how/why they're chosen. | 1008 | * kepler boards, no idea how/why they're chosen. |
| 948 | */ | 1009 | */ |
| 949 | refclk = freq; | 1010 | refclk = next->freq; |
| 950 | if (ram->mode == 2) | 1011 | if (ram->mode == 2) |
| 951 | refclk = fuc->mempll.refclk; | 1012 | refclk = fuc->mempll.refclk; |
| 952 | 1013 | ||
| @@ -968,7 +1029,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 968 | fuc->mempll.min_p = 1; | 1029 | fuc->mempll.min_p = 1; |
| 969 | fuc->mempll.max_p = 2; | 1030 | fuc->mempll.max_p = 2; |
| 970 | 1031 | ||
| 971 | ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq, | 1032 | ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq, |
| 972 | &ram->N2, NULL, &ram->M2, &ram->P2); | 1033 | &ram->N2, NULL, &ram->M2, &ram->P2); |
| 973 | if (ret <= 0) { | 1034 | if (ret <= 0) { |
| 974 | nv_error(pfb, "unable to calc mempll\n"); | 1035 | nv_error(pfb, "unable to calc mempll\n"); |
| @@ -980,17 +1041,18 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 980 | if (ram_have(fuc, mr[i])) | 1041 | if (ram_have(fuc, mr[i])) |
| 981 | ram->base.mr[i] = ram_rd32(fuc, mr[i]); | 1042 | ram->base.mr[i] = ram_rd32(fuc, mr[i]); |
| 982 | } | 1043 | } |
| 1044 | ram->base.freq = next->freq; | ||
| 983 | 1045 | ||
| 984 | switch (ram->base.type) { | 1046 | switch (ram->base.type) { |
| 985 | case NV_MEM_TYPE_DDR3: | 1047 | case NV_MEM_TYPE_DDR3: |
| 986 | ret = nouveau_sddr3_calc(&ram->base); | 1048 | ret = nouveau_sddr3_calc(&ram->base); |
| 987 | if (ret == 0) | 1049 | if (ret == 0) |
| 988 | ret = nve0_ram_calc_sddr3(pfb, freq); | 1050 | ret = nve0_ram_calc_sddr3(pfb, next->freq); |
| 989 | break; | 1051 | break; |
| 990 | case NV_MEM_TYPE_GDDR5: | 1052 | case NV_MEM_TYPE_GDDR5: |
| 991 | ret = nouveau_gddr5_calc(&ram->base); | 1053 | ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0); |
| 992 | if (ret == 0) | 1054 | if (ret == 0) |
| 993 | ret = nve0_ram_calc_gddr5(pfb, freq); | 1055 | ret = nve0_ram_calc_gddr5(pfb, next->freq); |
| 994 | break; | 1056 | break; |
| 995 | default: | 1057 | default: |
| 996 | ret = -ENOSYS; | 1058 | ret = -ENOSYS; |
| @@ -1001,13 +1063,55 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | |||
| 1001 | } | 1063 | } |
| 1002 | 1064 | ||
| 1003 | static int | 1065 | static int |
| 1066 | nve0_ram_calc(struct nouveau_fb *pfb, u32 freq) | ||
| 1067 | { | ||
| 1068 | struct nouveau_clock *clk = nouveau_clock(pfb); | ||
| 1069 | struct nve0_ram *ram = (void *)pfb->ram; | ||
| 1070 | struct nouveau_ram_data *xits = &ram->base.xition; | ||
| 1071 | struct nouveau_ram_data *copy; | ||
| 1072 | int ret; | ||
| 1073 | |||
| 1074 | if (ram->base.next == NULL) { | ||
| 1075 | ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem), | ||
| 1076 | &ram->base.former); | ||
| 1077 | if (ret) | ||
| 1078 | return ret; | ||
| 1079 | |||
| 1080 | ret = nve0_ram_calc_data(pfb, freq, &ram->base.target); | ||
| 1081 | if (ret) | ||
| 1082 | return ret; | ||
| 1083 | |||
| 1084 | if (ram->base.target.freq < ram->base.former.freq) { | ||
| 1085 | *xits = ram->base.target; | ||
| 1086 | copy = &ram->base.former; | ||
| 1087 | } else { | ||
| 1088 | *xits = ram->base.former; | ||
| 1089 | copy = &ram->base.target; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04; | ||
| 1093 | xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03; | ||
| 1094 | xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07; | ||
| 1095 | |||
| 1096 | ram->base.next = &ram->base.target; | ||
| 1097 | if (memcmp(xits, &ram->base.former, sizeof(xits->bios))) | ||
| 1098 | ram->base.next = &ram->base.xition; | ||
| 1099 | } else { | ||
| 1100 | BUG_ON(ram->base.next != &ram->base.xition); | ||
| 1101 | ram->base.next = &ram->base.target; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | return nve0_ram_calc_xits(pfb, ram->base.next); | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | static int | ||
| 1004 | nve0_ram_prog(struct nouveau_fb *pfb) | 1108 | nve0_ram_prog(struct nouveau_fb *pfb) |
| 1005 | { | 1109 | { |
| 1006 | struct nouveau_device *device = nv_device(pfb); | 1110 | struct nouveau_device *device = nv_device(pfb); |
| 1007 | struct nve0_ram *ram = (void *)pfb->ram; | 1111 | struct nve0_ram *ram = (void *)pfb->ram; |
| 1008 | struct nve0_ramfuc *fuc = &ram->fuc; | 1112 | struct nve0_ramfuc *fuc = &ram->fuc; |
| 1009 | ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); | 1113 | ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); |
| 1010 | return 0; | 1114 | return (ram->base.next == &ram->base.xition); |
| 1011 | } | 1115 | } |
| 1012 | 1116 | ||
| 1013 | static void | 1117 | static void |
| @@ -1015,6 +1119,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb) | |||
| 1015 | { | 1119 | { |
| 1016 | struct nve0_ram *ram = (void *)pfb->ram; | 1120 | struct nve0_ram *ram = (void *)pfb->ram; |
| 1017 | struct nve0_ramfuc *fuc = &ram->fuc; | 1121 | struct nve0_ramfuc *fuc = &ram->fuc; |
| 1122 | ram->base.next = NULL; | ||
| 1018 | ram_exec(fuc, false); | 1123 | ram_exec(fuc, false); |
| 1019 | } | 1124 | } |
| 1020 | 1125 | ||
| @@ -1055,7 +1160,7 @@ nve0_ram_init(struct nouveau_object *object) | |||
| 1055 | * binary driver skips the one that's already been setup by | 1160 | * binary driver skips the one that's already been setup by |
| 1056 | * the init tables. | 1161 | * the init tables. |
| 1057 | */ | 1162 | */ |
| 1058 | data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); | 1163 | data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); |
| 1059 | if (!data || hdr < 0x15) | 1164 | if (!data || hdr < 0x15) |
| 1060 | return -EINVAL; | 1165 | return -EINVAL; |
| 1061 | 1166 | ||
| @@ -1073,6 +1178,7 @@ nve0_ram_init(struct nouveau_object *object) | |||
| 1073 | data += 4; | 1178 | data += 4; |
| 1074 | } | 1179 | } |
| 1075 | nv_wr32(pfb, 0x10f65c, save); | 1180 | nv_wr32(pfb, 0x10f65c, save); |
| 1181 | nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000); | ||
| 1076 | 1182 | ||
| 1077 | switch (ram->base.type) { | 1183 | switch (ram->base.type) { |
| 1078 | case NV_MEM_TYPE_GDDR5: | 1184 | case NV_MEM_TYPE_GDDR5: |
| @@ -1117,7 +1223,8 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 1117 | struct nouveau_gpio *gpio = nouveau_gpio(pfb); | 1223 | struct nouveau_gpio *gpio = nouveau_gpio(pfb); |
| 1118 | struct dcb_gpio_func func; | 1224 | struct dcb_gpio_func func; |
| 1119 | struct nve0_ram *ram; | 1225 | struct nve0_ram *ram; |
| 1120 | int ret; | 1226 | int ret, i; |
| 1227 | u32 tmp; | ||
| 1121 | 1228 | ||
| 1122 | ret = nvc0_ram_create(parent, engine, oclass, &ram); | 1229 | ret = nvc0_ram_create(parent, engine, oclass, &ram); |
| 1123 | *pobject = nv_object(ram); | 1230 | *pobject = nv_object(ram); |
| @@ -1136,6 +1243,25 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 1136 | break; | 1243 | break; |
| 1137 | } | 1244 | } |
| 1138 | 1245 | ||
| 1246 | /* calculate a mask of differently configured memory partitions, | ||
| 1247 | * because, of course reclocking wasn't complicated enough | ||
| 1248 | * already without having to treat some of them differently to | ||
| 1249 | * the others.... | ||
| 1250 | */ | ||
| 1251 | ram->parts = nv_rd32(pfb, 0x022438); | ||
| 1252 | ram->pmask = nv_rd32(pfb, 0x022554); | ||
| 1253 | ram->pnuts = 0; | ||
| 1254 | for (i = 0, tmp = 0; i < ram->parts; i++) { | ||
| 1255 | if (!(ram->pmask & (1 << i))) { | ||
| 1256 | u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000)); | ||
| 1257 | if (tmp && tmp != cfg1) { | ||
| 1258 | ram->pnuts |= (1 << i); | ||
| 1259 | continue; | ||
| 1260 | } | ||
| 1261 | tmp = cfg1; | ||
| 1262 | } | ||
| 1263 | } | ||
| 1264 | |||
| 1139 | // parse bios data for both pll's | 1265 | // parse bios data for both pll's |
| 1140 | ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll); | 1266 | ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll); |
| 1141 | if (ret) { | 1267 | if (ret) { |
| @@ -1248,7 +1374,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 1248 | ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c); | 1374 | ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c); |
| 1249 | ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc); | 1375 | ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc); |
| 1250 | ram->fuc.r_0x100710 = ramfuc_reg(0x100710); | 1376 | ram->fuc.r_0x100710 = ramfuc_reg(0x100710); |
| 1251 | ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750); | 1377 | ram->fuc.r_0x100750 = ramfuc_reg(0x100750); |
| 1252 | return 0; | 1378 | return 0; |
| 1253 | } | 1379 | } |
| 1254 | 1380 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c index 6565f3dbbe04..14706d9842ca 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c | |||
| @@ -22,7 +22,24 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <subdev/instmem.h> | 25 | #include "priv.h" |
| 26 | |||
| 27 | /****************************************************************************** | ||
| 28 | * instmem object base implementation | ||
| 29 | *****************************************************************************/ | ||
| 30 | |||
| 31 | void | ||
| 32 | _nouveau_instobj_dtor(struct nouveau_object *object) | ||
| 33 | { | ||
| 34 | struct nouveau_instmem *imem = (void *)object->engine; | ||
| 35 | struct nouveau_instobj *iobj = (void *)object; | ||
| 36 | |||
| 37 | mutex_lock(&nv_subdev(imem)->mutex); | ||
| 38 | list_del(&iobj->head); | ||
| 39 | mutex_unlock(&nv_subdev(imem)->mutex); | ||
| 40 | |||
| 41 | return nouveau_object_destroy(&iobj->base); | ||
| 42 | } | ||
| 26 | 43 | ||
| 27 | int | 44 | int |
| 28 | nouveau_instobj_create_(struct nouveau_object *parent, | 45 | nouveau_instobj_create_(struct nouveau_object *parent, |
| @@ -46,73 +63,26 @@ nouveau_instobj_create_(struct nouveau_object *parent, | |||
| 46 | return 0; | 63 | return 0; |
| 47 | } | 64 | } |
| 48 | 65 | ||
| 49 | void | 66 | /****************************************************************************** |
| 50 | nouveau_instobj_destroy(struct nouveau_instobj *iobj) | 67 | * instmem subdev base implementation |
| 51 | { | 68 | *****************************************************************************/ |
| 52 | struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine); | ||
| 53 | 69 | ||
| 54 | mutex_lock(&subdev->mutex); | 70 | static int |
| 55 | list_del(&iobj->head); | 71 | nouveau_instmem_alloc(struct nouveau_instmem *imem, |
| 56 | mutex_unlock(&subdev->mutex); | 72 | struct nouveau_object *parent, u32 size, u32 align, |
| 57 | 73 | struct nouveau_object **pobject) | |
| 58 | return nouveau_object_destroy(&iobj->base); | ||
| 59 | } | ||
| 60 | |||
| 61 | void | ||
| 62 | _nouveau_instobj_dtor(struct nouveau_object *object) | ||
| 63 | { | 74 | { |
| 64 | struct nouveau_instobj *iobj = (void *)object; | 75 | struct nouveau_object *engine = nv_object(imem); |
| 65 | return nouveau_instobj_destroy(iobj); | 76 | struct nouveau_instmem_impl *impl = (void *)engine->oclass; |
| 77 | struct nouveau_instobj_args args = { .size = size, .align = align }; | ||
| 78 | return nouveau_object_ctor(parent, engine, impl->instobj, &args, | ||
| 79 | sizeof(args), pobject); | ||
| 66 | } | 80 | } |
| 67 | 81 | ||
| 68 | int | 82 | int |
| 69 | nouveau_instmem_create_(struct nouveau_object *parent, | 83 | _nouveau_instmem_fini(struct nouveau_object *object, bool suspend) |
| 70 | struct nouveau_object *engine, | ||
| 71 | struct nouveau_oclass *oclass, | ||
| 72 | int length, void **pobject) | ||
| 73 | { | ||
| 74 | struct nouveau_instmem *imem; | ||
| 75 | int ret; | ||
| 76 | |||
| 77 | ret = nouveau_subdev_create_(parent, engine, oclass, 0, | ||
| 78 | "INSTMEM", "instmem", length, pobject); | ||
| 79 | imem = *pobject; | ||
| 80 | if (ret) | ||
| 81 | return ret; | ||
| 82 | |||
| 83 | INIT_LIST_HEAD(&imem->list); | ||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | int | ||
| 88 | nouveau_instmem_init(struct nouveau_instmem *imem) | ||
| 89 | { | ||
| 90 | struct nouveau_instobj *iobj; | ||
| 91 | int ret, i; | ||
| 92 | |||
| 93 | ret = nouveau_subdev_init(&imem->base); | ||
| 94 | if (ret) | ||
| 95 | return ret; | ||
| 96 | |||
| 97 | mutex_lock(&imem->base.mutex); | ||
| 98 | |||
| 99 | list_for_each_entry(iobj, &imem->list, head) { | ||
| 100 | if (iobj->suspend) { | ||
| 101 | for (i = 0; i < iobj->size; i += 4) | ||
| 102 | nv_wo32(iobj, i, iobj->suspend[i / 4]); | ||
| 103 | vfree(iobj->suspend); | ||
| 104 | iobj->suspend = NULL; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | |||
| 108 | mutex_unlock(&imem->base.mutex); | ||
| 109 | |||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | int | ||
| 114 | nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) | ||
| 115 | { | 84 | { |
| 85 | struct nouveau_instmem *imem = (void *)object; | ||
| 116 | struct nouveau_instobj *iobj; | 86 | struct nouveau_instobj *iobj; |
| 117 | int i, ret = 0; | 87 | int i, ret = 0; |
| 118 | 88 | ||
| @@ -143,12 +113,45 @@ int | |||
| 143 | _nouveau_instmem_init(struct nouveau_object *object) | 113 | _nouveau_instmem_init(struct nouveau_object *object) |
| 144 | { | 114 | { |
| 145 | struct nouveau_instmem *imem = (void *)object; | 115 | struct nouveau_instmem *imem = (void *)object; |
| 146 | return nouveau_instmem_init(imem); | 116 | struct nouveau_instobj *iobj; |
| 117 | int ret, i; | ||
| 118 | |||
| 119 | ret = nouveau_subdev_init(&imem->base); | ||
| 120 | if (ret) | ||
| 121 | return ret; | ||
| 122 | |||
| 123 | mutex_lock(&imem->base.mutex); | ||
| 124 | |||
| 125 | list_for_each_entry(iobj, &imem->list, head) { | ||
| 126 | if (iobj->suspend) { | ||
| 127 | for (i = 0; i < iobj->size; i += 4) | ||
| 128 | nv_wo32(iobj, i, iobj->suspend[i / 4]); | ||
| 129 | vfree(iobj->suspend); | ||
| 130 | iobj->suspend = NULL; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | mutex_unlock(&imem->base.mutex); | ||
| 135 | |||
| 136 | return 0; | ||
| 147 | } | 137 | } |
| 148 | 138 | ||
| 149 | int | 139 | int |
| 150 | _nouveau_instmem_fini(struct nouveau_object *object, bool suspend) | 140 | nouveau_instmem_create_(struct nouveau_object *parent, |
| 141 | struct nouveau_object *engine, | ||
| 142 | struct nouveau_oclass *oclass, | ||
| 143 | int length, void **pobject) | ||
| 151 | { | 144 | { |
| 152 | struct nouveau_instmem *imem = (void *)object; | 145 | struct nouveau_instmem *imem; |
| 153 | return nouveau_instmem_fini(imem, suspend); | 146 | int ret; |
| 147 | |||
| 148 | ret = nouveau_subdev_create_(parent, engine, oclass, 0, | ||
| 149 | "INSTMEM", "instmem", length, pobject); | ||
| 150 | imem = *pobject; | ||
| 151 | if (ret) | ||
| 152 | return ret; | ||
| 153 | |||
| 154 | INIT_LIST_HEAD(&imem->list); | ||
| 155 | imem->alloc = nouveau_instmem_alloc; | ||
| 156 | return 0; | ||
| 154 | } | 157 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c index 795393d7b2f5..7b64befee48f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c | |||
| @@ -22,10 +22,35 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <subdev/fb.h> | ||
| 26 | |||
| 27 | #include "nv04.h" | 25 | #include "nv04.h" |
| 28 | 26 | ||
| 27 | /****************************************************************************** | ||
| 28 | * instmem object implementation | ||
| 29 | *****************************************************************************/ | ||
| 30 | |||
| 31 | static u32 | ||
| 32 | nv04_instobj_rd32(struct nouveau_object *object, u64 addr) | ||
| 33 | { | ||
| 34 | struct nv04_instobj_priv *node = (void *)object; | ||
| 35 | return nv_ro32(object->engine, node->mem->offset + addr); | ||
| 36 | } | ||
| 37 | |||
| 38 | static void | ||
| 39 | nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
| 40 | { | ||
| 41 | struct nv04_instobj_priv *node = (void *)object; | ||
| 42 | nv_wo32(object->engine, node->mem->offset + addr, data); | ||
| 43 | } | ||
| 44 | |||
| 45 | static void | ||
| 46 | nv04_instobj_dtor(struct nouveau_object *object) | ||
| 47 | { | ||
| 48 | struct nv04_instmem_priv *priv = (void *)object->engine; | ||
| 49 | struct nv04_instobj_priv *node = (void *)object; | ||
| 50 | nouveau_mm_free(&priv->heap, &node->mem); | ||
| 51 | nouveau_instobj_destroy(&node->base); | ||
| 52 | } | ||
| 53 | |||
| 29 | static int | 54 | static int |
| 30 | nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 55 | nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 31 | struct nouveau_oclass *oclass, void *data, u32 size, | 56 | struct nouveau_oclass *oclass, void *data, u32 size, |
| @@ -33,18 +58,19 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 33 | { | 58 | { |
| 34 | struct nv04_instmem_priv *priv = (void *)engine; | 59 | struct nv04_instmem_priv *priv = (void *)engine; |
| 35 | struct nv04_instobj_priv *node; | 60 | struct nv04_instobj_priv *node; |
| 36 | int ret, align; | 61 | struct nouveau_instobj_args *args = data; |
| 62 | int ret; | ||
| 37 | 63 | ||
| 38 | align = (unsigned long)data; | 64 | if (!args->align) |
| 39 | if (!align) | 65 | args->align = 1; |
| 40 | align = 1; | ||
| 41 | 66 | ||
| 42 | ret = nouveau_instobj_create(parent, engine, oclass, &node); | 67 | ret = nouveau_instobj_create(parent, engine, oclass, &node); |
| 43 | *pobject = nv_object(node); | 68 | *pobject = nv_object(node); |
| 44 | if (ret) | 69 | if (ret) |
| 45 | return ret; | 70 | return ret; |
| 46 | 71 | ||
| 47 | ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem); | 72 | ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size, |
| 73 | args->align, &node->mem); | ||
| 48 | if (ret) | 74 | if (ret) |
| 49 | return ret; | 75 | return ret; |
| 50 | 76 | ||
| @@ -53,32 +79,9 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 53 | return 0; | 79 | return 0; |
| 54 | } | 80 | } |
| 55 | 81 | ||
| 56 | static void | 82 | struct nouveau_instobj_impl |
| 57 | nv04_instobj_dtor(struct nouveau_object *object) | ||
| 58 | { | ||
| 59 | struct nv04_instmem_priv *priv = (void *)object->engine; | ||
| 60 | struct nv04_instobj_priv *node = (void *)object; | ||
| 61 | nouveau_mm_free(&priv->heap, &node->mem); | ||
| 62 | nouveau_instobj_destroy(&node->base); | ||
| 63 | } | ||
| 64 | |||
| 65 | static u32 | ||
| 66 | nv04_instobj_rd32(struct nouveau_object *object, u64 addr) | ||
| 67 | { | ||
| 68 | struct nv04_instobj_priv *node = (void *)object; | ||
| 69 | return nv_ro32(object->engine, node->mem->offset + addr); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void | ||
| 73 | nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
| 74 | { | ||
| 75 | struct nv04_instobj_priv *node = (void *)object; | ||
| 76 | nv_wo32(object->engine, node->mem->offset + addr, data); | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct nouveau_oclass | ||
| 80 | nv04_instobj_oclass = { | 83 | nv04_instobj_oclass = { |
| 81 | .ofuncs = &(struct nouveau_ofuncs) { | 84 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 82 | .ctor = nv04_instobj_ctor, | 85 | .ctor = nv04_instobj_ctor, |
| 83 | .dtor = nv04_instobj_dtor, | 86 | .dtor = nv04_instobj_dtor, |
| 84 | .init = _nouveau_instobj_init, | 87 | .init = _nouveau_instobj_init, |
| @@ -88,19 +91,34 @@ nv04_instobj_oclass = { | |||
| 88 | }, | 91 | }, |
| 89 | }; | 92 | }; |
| 90 | 93 | ||
| 91 | int | 94 | /****************************************************************************** |
| 92 | nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent, | 95 | * instmem subdev implementation |
| 93 | u32 size, u32 align, struct nouveau_object **pobject) | 96 | *****************************************************************************/ |
| 97 | |||
| 98 | static u32 | ||
| 99 | nv04_instmem_rd32(struct nouveau_object *object, u64 addr) | ||
| 94 | { | 100 | { |
| 95 | struct nouveau_object *engine = nv_object(imem); | 101 | return nv_rd32(object, 0x700000 + addr); |
| 96 | int ret; | 102 | } |
| 97 | 103 | ||
| 98 | ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass, | 104 | static void |
| 99 | (void *)(unsigned long)align, size, pobject); | 105 | nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) |
| 100 | if (ret) | 106 | { |
| 101 | return ret; | 107 | return nv_wr32(object, 0x700000 + addr, data); |
| 108 | } | ||
| 102 | 109 | ||
| 103 | return 0; | 110 | void |
| 111 | nv04_instmem_dtor(struct nouveau_object *object) | ||
| 112 | { | ||
| 113 | struct nv04_instmem_priv *priv = (void *)object; | ||
| 114 | nouveau_gpuobj_ref(NULL, &priv->ramfc); | ||
| 115 | nouveau_gpuobj_ref(NULL, &priv->ramro); | ||
| 116 | nouveau_ramht_ref(NULL, &priv->ramht); | ||
| 117 | nouveau_gpuobj_ref(NULL, &priv->vbios); | ||
| 118 | nouveau_mm_fini(&priv->heap); | ||
| 119 | if (priv->iomem) | ||
| 120 | iounmap(priv->iomem); | ||
| 121 | nouveau_instmem_destroy(&priv->base); | ||
| 104 | } | 122 | } |
| 105 | 123 | ||
| 106 | static int | 124 | static int |
| @@ -118,7 +136,6 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 118 | 136 | ||
| 119 | /* PRAMIN aperture maps over the end of VRAM, reserve it */ | 137 | /* PRAMIN aperture maps over the end of VRAM, reserve it */ |
| 120 | priv->base.reserved = 512 * 1024; | 138 | priv->base.reserved = 512 * 1024; |
| 121 | priv->base.alloc = nv04_instmem_alloc; | ||
| 122 | 139 | ||
| 123 | ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); | 140 | ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); |
| 124 | if (ret) | 141 | if (ret) |
| @@ -150,36 +167,10 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 150 | return 0; | 167 | return 0; |
| 151 | } | 168 | } |
| 152 | 169 | ||
| 153 | void | 170 | struct nouveau_oclass * |
| 154 | nv04_instmem_dtor(struct nouveau_object *object) | 171 | nv04_instmem_oclass = &(struct nouveau_instmem_impl) { |
| 155 | { | 172 | .base.handle = NV_SUBDEV(INSTMEM, 0x04), |
| 156 | struct nv04_instmem_priv *priv = (void *)object; | 173 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 157 | nouveau_gpuobj_ref(NULL, &priv->ramfc); | ||
| 158 | nouveau_gpuobj_ref(NULL, &priv->ramro); | ||
| 159 | nouveau_ramht_ref(NULL, &priv->ramht); | ||
| 160 | nouveau_gpuobj_ref(NULL, &priv->vbios); | ||
| 161 | nouveau_mm_fini(&priv->heap); | ||
| 162 | if (priv->iomem) | ||
| 163 | iounmap(priv->iomem); | ||
| 164 | nouveau_instmem_destroy(&priv->base); | ||
| 165 | } | ||
| 166 | |||
| 167 | static u32 | ||
| 168 | nv04_instmem_rd32(struct nouveau_object *object, u64 addr) | ||
| 169 | { | ||
| 170 | return nv_rd32(object, 0x700000 + addr); | ||
| 171 | } | ||
| 172 | |||
| 173 | static void | ||
| 174 | nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
| 175 | { | ||
| 176 | return nv_wr32(object, 0x700000 + addr, data); | ||
| 177 | } | ||
| 178 | |||
| 179 | struct nouveau_oclass | ||
| 180 | nv04_instmem_oclass = { | ||
| 181 | .handle = NV_SUBDEV(INSTMEM, 0x04), | ||
| 182 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 183 | .ctor = nv04_instmem_ctor, | 174 | .ctor = nv04_instmem_ctor, |
| 184 | .dtor = nv04_instmem_dtor, | 175 | .dtor = nv04_instmem_dtor, |
| 185 | .init = _nouveau_instmem_init, | 176 | .init = _nouveau_instmem_init, |
| @@ -187,4 +178,5 @@ nv04_instmem_oclass = { | |||
| 187 | .rd32 = nv04_instmem_rd32, | 178 | .rd32 = nv04_instmem_rd32, |
| 188 | .wr32 = nv04_instmem_wr32, | 179 | .wr32 = nv04_instmem_wr32, |
| 189 | }, | 180 | }, |
| 190 | }; | 181 | .instobj = &nv04_instobj_oclass.base, |
| 182 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h index b15b61310236..095fbc6fc099 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h | |||
| @@ -5,7 +5,9 @@ | |||
| 5 | #include <core/ramht.h> | 5 | #include <core/ramht.h> |
| 6 | #include <core/mm.h> | 6 | #include <core/mm.h> |
| 7 | 7 | ||
| 8 | #include <subdev/instmem.h> | 8 | #include "priv.h" |
| 9 | |||
| 10 | extern struct nouveau_instobj_impl nv04_instobj_oclass; | ||
| 9 | 11 | ||
| 10 | struct nv04_instmem_priv { | 12 | struct nv04_instmem_priv { |
| 11 | struct nouveau_instmem base; | 13 | struct nouveau_instmem base; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c index b10a143787a7..ec0b9661d614 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c | |||
| @@ -26,6 +26,24 @@ | |||
| 26 | 26 | ||
| 27 | #include "nv04.h" | 27 | #include "nv04.h" |
| 28 | 28 | ||
| 29 | /****************************************************************************** | ||
| 30 | * instmem subdev implementation | ||
| 31 | *****************************************************************************/ | ||
| 32 | |||
| 33 | static u32 | ||
| 34 | nv40_instmem_rd32(struct nouveau_object *object, u64 addr) | ||
| 35 | { | ||
| 36 | struct nv04_instmem_priv *priv = (void *)object; | ||
| 37 | return ioread32_native(priv->iomem + addr); | ||
| 38 | } | ||
| 39 | |||
| 40 | static void | ||
| 41 | nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
| 42 | { | ||
| 43 | struct nv04_instmem_priv *priv = (void *)object; | ||
| 44 | iowrite32_native(data, priv->iomem + addr); | ||
| 45 | } | ||
| 46 | |||
| 29 | static int | 47 | static int |
| 30 | nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 48 | nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
| 31 | struct nouveau_oclass *oclass, void *data, u32 size, | 49 | struct nouveau_oclass *oclass, void *data, u32 size, |
| @@ -69,7 +87,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 69 | priv->base.reserved += 512 * 1024; /* object storage */ | 87 | priv->base.reserved += 512 * 1024; /* object storage */ |
| 70 | 88 | ||
| 71 | priv->base.reserved = round_up(priv->base.reserved, 4096); | 89 | priv->base.reserved = round_up(priv->base.reserved, 4096); |
| 72 | priv->base.alloc = nv04_instmem_alloc; | ||
| 73 | 90 | ||
| 74 | ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); | 91 | ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); |
| 75 | if (ret) | 92 | if (ret) |
| @@ -106,24 +123,10 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 106 | return 0; | 123 | return 0; |
| 107 | } | 124 | } |
| 108 | 125 | ||
| 109 | static u32 | 126 | struct nouveau_oclass * |
| 110 | nv40_instmem_rd32(struct nouveau_object *object, u64 addr) | 127 | nv40_instmem_oclass = &(struct nouveau_instmem_impl) { |
| 111 | { | 128 | .base.handle = NV_SUBDEV(INSTMEM, 0x40), |
| 112 | struct nv04_instmem_priv *priv = (void *)object; | 129 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 113 | return ioread32_native(priv->iomem + addr); | ||
| 114 | } | ||
| 115 | |||
| 116 | static void | ||
| 117 | nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
| 118 | { | ||
| 119 | struct nv04_instmem_priv *priv = (void *)object; | ||
| 120 | iowrite32_native(data, priv->iomem + addr); | ||
| 121 | } | ||
| 122 | |||
| 123 | struct nouveau_oclass | ||
| 124 | nv40_instmem_oclass = { | ||
| 125 | .handle = NV_SUBDEV(INSTMEM, 0x40), | ||
| 126 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 127 | .ctor = nv40_instmem_ctor, | 130 | .ctor = nv40_instmem_ctor, |
| 128 | .dtor = nv04_instmem_dtor, | 131 | .dtor = nv04_instmem_dtor, |
| 129 | .init = _nouveau_instmem_init, | 132 | .init = _nouveau_instmem_init, |
| @@ -131,4 +134,5 @@ nv40_instmem_oclass = { | |||
| 131 | .rd32 = nv40_instmem_rd32, | 134 | .rd32 = nv40_instmem_rd32, |
| 132 | .wr32 = nv40_instmem_wr32, | 135 | .wr32 = nv40_instmem_wr32, |
| 133 | }, | 136 | }, |
| 134 | }; | 137 | .instobj = &nv04_instobj_oclass.base, |
| 138 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c index 97bc5dff93e7..7cb3b098a08d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c | |||
| @@ -22,11 +22,11 @@ | |||
| 22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <subdev/instmem.h> | ||
| 26 | #include <subdev/fb.h> | 25 | #include <subdev/fb.h> |
| 27 | |||
| 28 | #include <core/mm.h> | 26 | #include <core/mm.h> |
| 29 | 27 | ||
| 28 | #include "priv.h" | ||
| 29 | |||
| 30 | struct nv50_instmem_priv { | 30 | struct nv50_instmem_priv { |
| 31 | struct nouveau_instmem base; | 31 | struct nouveau_instmem base; |
| 32 | spinlock_t lock; | 32 | spinlock_t lock; |
| @@ -38,42 +38,9 @@ struct nv50_instobj_priv { | |||
| 38 | struct nouveau_mem *mem; | 38 | struct nouveau_mem *mem; |
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | static int | 41 | /****************************************************************************** |
| 42 | nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 42 | * instmem object implementation |
| 43 | struct nouveau_oclass *oclass, void *data, u32 size, | 43 | *****************************************************************************/ |
| 44 | struct nouveau_object **pobject) | ||
| 45 | { | ||
| 46 | struct nouveau_fb *pfb = nouveau_fb(parent); | ||
| 47 | struct nv50_instobj_priv *node; | ||
| 48 | u32 align = (unsigned long)data; | ||
| 49 | int ret; | ||
| 50 | |||
| 51 | size = max((size + 4095) & ~4095, (u32)4096); | ||
| 52 | align = max((align + 4095) & ~4095, (u32)4096); | ||
| 53 | |||
| 54 | ret = nouveau_instobj_create(parent, engine, oclass, &node); | ||
| 55 | *pobject = nv_object(node); | ||
| 56 | if (ret) | ||
| 57 | return ret; | ||
| 58 | |||
| 59 | ret = pfb->ram->get(pfb, size, align, 0, 0x800, &node->mem); | ||
| 60 | if (ret) | ||
| 61 | return ret; | ||
| 62 | |||
| 63 | node->base.addr = node->mem->offset; | ||
| 64 | node->base.size = node->mem->size << 12; | ||
| 65 | node->mem->page_shift = 12; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | static void | ||
| 70 | nv50_instobj_dtor(struct nouveau_object *object) | ||
| 71 | { | ||
| 72 | struct nv50_instobj_priv *node = (void *)object; | ||
| 73 | struct nouveau_fb *pfb = nouveau_fb(object); | ||
| 74 | pfb->ram->put(pfb, &node->mem); | ||
| 75 | nouveau_instobj_destroy(&node->base); | ||
| 76 | } | ||
| 77 | 44 | ||
| 78 | static u32 | 45 | static u32 |
| 79 | nv50_instobj_rd32(struct nouveau_object *object, u64 offset) | 46 | nv50_instobj_rd32(struct nouveau_object *object, u64 offset) |
| @@ -113,9 +80,46 @@ nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data) | |||
| 113 | spin_unlock_irqrestore(&priv->lock, flags); | 80 | spin_unlock_irqrestore(&priv->lock, flags); |
| 114 | } | 81 | } |
| 115 | 82 | ||
| 116 | static struct nouveau_oclass | 83 | static void |
| 84 | nv50_instobj_dtor(struct nouveau_object *object) | ||
| 85 | { | ||
| 86 | struct nv50_instobj_priv *node = (void *)object; | ||
| 87 | struct nouveau_fb *pfb = nouveau_fb(object); | ||
| 88 | pfb->ram->put(pfb, &node->mem); | ||
| 89 | nouveau_instobj_destroy(&node->base); | ||
| 90 | } | ||
| 91 | |||
| 92 | static int | ||
| 93 | nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
| 94 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
| 95 | struct nouveau_object **pobject) | ||
| 96 | { | ||
| 97 | struct nouveau_fb *pfb = nouveau_fb(parent); | ||
| 98 | struct nouveau_instobj_args *args = data; | ||
| 99 | struct nv50_instobj_priv *node; | ||
| 100 | int ret; | ||
| 101 | |||
| 102 | args->size = max((args->size + 4095) & ~4095, (u32)4096); | ||
| 103 | args->align = max((args->align + 4095) & ~4095, (u32)4096); | ||
| 104 | |||
| 105 | ret = nouveau_instobj_create(parent, engine, oclass, &node); | ||
| 106 | *pobject = nv_object(node); | ||
| 107 | if (ret) | ||
| 108 | return ret; | ||
| 109 | |||
| 110 | ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem); | ||
| 111 | if (ret) | ||
| 112 | return ret; | ||
| 113 | |||
| 114 | node->base.addr = node->mem->offset; | ||
| 115 | node->base.size = node->mem->size << 12; | ||
| 116 | node->mem->page_shift = 12; | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | static struct nouveau_instobj_impl | ||
| 117 | nv50_instobj_oclass = { | 121 | nv50_instobj_oclass = { |
| 118 | .ofuncs = &(struct nouveau_ofuncs) { | 122 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 119 | .ctor = nv50_instobj_ctor, | 123 | .ctor = nv50_instobj_ctor, |
| 120 | .dtor = nv50_instobj_dtor, | 124 | .dtor = nv50_instobj_dtor, |
| 121 | .init = _nouveau_instobj_init, | 125 | .init = _nouveau_instobj_init, |
| @@ -125,13 +129,16 @@ nv50_instobj_oclass = { | |||
| 125 | }, | 129 | }, |
| 126 | }; | 130 | }; |
| 127 | 131 | ||
| 132 | /****************************************************************************** | ||
| 133 | * instmem subdev implementation | ||
| 134 | *****************************************************************************/ | ||
| 135 | |||
| 128 | static int | 136 | static int |
| 129 | nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent, | 137 | nv50_instmem_fini(struct nouveau_object *object, bool suspend) |
| 130 | u32 size, u32 align, struct nouveau_object **pobject) | ||
| 131 | { | 138 | { |
| 132 | struct nouveau_object *engine = nv_object(imem); | 139 | struct nv50_instmem_priv *priv = (void *)object; |
| 133 | return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass, | 140 | priv->addr = ~0ULL; |
| 134 | (void *)(unsigned long)align, size, pobject); | 141 | return nouveau_instmem_fini(&priv->base, suspend); |
| 135 | } | 142 | } |
| 136 | 143 | ||
| 137 | static int | 144 | static int |
| @@ -148,25 +155,17 @@ nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 148 | return ret; | 155 | return ret; |
| 149 | 156 | ||
| 150 | spin_lock_init(&priv->lock); | 157 | spin_lock_init(&priv->lock); |
| 151 | priv->base.alloc = nv50_instmem_alloc; | ||
| 152 | return 0; | 158 | return 0; |
| 153 | } | 159 | } |
| 154 | 160 | ||
| 155 | static int | 161 | struct nouveau_oclass * |
| 156 | nv50_instmem_fini(struct nouveau_object *object, bool suspend) | 162 | nv50_instmem_oclass = &(struct nouveau_instmem_impl) { |
| 157 | { | 163 | .base.handle = NV_SUBDEV(INSTMEM, 0x50), |
| 158 | struct nv50_instmem_priv *priv = (void *)object; | 164 | .base.ofuncs = &(struct nouveau_ofuncs) { |
| 159 | priv->addr = ~0ULL; | ||
| 160 | return nouveau_instmem_fini(&priv->base, suspend); | ||
| 161 | } | ||
| 162 | |||
| 163 | struct nouveau_oclass | ||
| 164 | nv50_instmem_oclass = { | ||
| 165 | .handle = NV_SUBDEV(INSTMEM, 0x50), | ||
| 166 | .ofuncs = &(struct nouveau_ofuncs) { | ||
| 167 | .ctor = nv50_instmem_ctor, | 165 | .ctor = nv50_instmem_ctor, |
| 168 | .dtor = _nouveau_instmem_dtor, | 166 | .dtor = _nouveau_instmem_dtor, |
| 169 | .init = _nouveau_instmem_init, | 167 | .init = _nouveau_instmem_init, |
| 170 | .fini = nv50_instmem_fini, | 168 | .fini = nv50_instmem_fini, |
| 171 | }, | 169 | }, |
| 172 | }; | 170 | .instobj = &nv50_instobj_oclass.base, |
| 171 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h new file mode 100644 index 000000000000..8d67dedc5bb2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | #ifndef __NVKM_INSTMEM_PRIV_H__ | ||
| 2 | #define __NVKM_INSTMEM_PRIV_H__ | ||
| 3 | |||
| 4 | #include <subdev/instmem.h> | ||
| 5 | |||
| 6 | struct nouveau_instobj_impl { | ||
| 7 | struct nouveau_oclass base; | ||
| 8 | }; | ||
| 9 | |||
| 10 | struct nouveau_instobj_args { | ||
| 11 | u32 size; | ||
| 12 | u32 align; | ||
| 13 | }; | ||
| 14 | |||
| 15 | #define nouveau_instobj_create(p,e,o,d) \ | ||
| 16 | nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 17 | #define nouveau_instobj_destroy(p) ({ \ | ||
| 18 | struct nouveau_instobj *iobj = (p); \ | ||
| 19 | _nouveau_instobj_dtor(nv_object(iobj)); \ | ||
| 20 | }) | ||
| 21 | #define nouveau_instobj_init(p) \ | ||
| 22 | nouveau_object_init(&(p)->base) | ||
| 23 | #define nouveau_instobj_fini(p,s) \ | ||
| 24 | nouveau_object_fini(&(p)->base, (s)) | ||
| 25 | |||
| 26 | int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 27 | struct nouveau_oclass *, int, void **); | ||
| 28 | void _nouveau_instobj_dtor(struct nouveau_object *); | ||
| 29 | #define _nouveau_instobj_init nouveau_object_init | ||
| 30 | #define _nouveau_instobj_fini nouveau_object_fini | ||
| 31 | |||
| 32 | struct nouveau_instmem_impl { | ||
| 33 | struct nouveau_oclass base; | ||
| 34 | struct nouveau_oclass *instobj; | ||
| 35 | }; | ||
| 36 | |||
| 37 | #define nouveau_instmem_create(p,e,o,d) \ | ||
| 38 | nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
| 39 | #define nouveau_instmem_destroy(p) \ | ||
| 40 | nouveau_subdev_destroy(&(p)->base) | ||
| 41 | #define nouveau_instmem_init(p) ({ \ | ||
| 42 | struct nouveau_instmem *imem = (p); \ | ||
| 43 | _nouveau_instmem_init(nv_object(imem)); \ | ||
| 44 | }) | ||
| 45 | #define nouveau_instmem_fini(p,s) ({ \ | ||
| 46 | struct nouveau_instmem *imem = (p); \ | ||
| 47 | _nouveau_instmem_fini(nv_object(imem), (s)); \ | ||
| 48 | }) | ||
| 49 | |||
| 50 | int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *, | ||
| 51 | struct nouveau_oclass *, int, void **); | ||
| 52 | #define _nouveau_instmem_dtor _nouveau_subdev_dtor | ||
| 53 | int _nouveau_instmem_init(struct nouveau_object *); | ||
| 54 | int _nouveau_instmem_fini(struct nouveau_object *, bool); | ||
| 55 | |||
| 56 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c02b4763a2d5..34472d317097 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
| @@ -32,6 +32,7 @@ nvc0_mc_intr[] = { | |||
| 32 | { 0x00000080, NVDEV_ENGINE_COPY2 }, | 32 | { 0x00000080, NVDEV_ENGINE_COPY2 }, |
| 33 | { 0x00000100, NVDEV_ENGINE_FIFO }, | 33 | { 0x00000100, NVDEV_ENGINE_FIFO }, |
| 34 | { 0x00001000, NVDEV_ENGINE_GR }, | 34 | { 0x00001000, NVDEV_ENGINE_GR }, |
| 35 | { 0x00002000, NVDEV_SUBDEV_FB }, | ||
| 35 | { 0x00008000, NVDEV_ENGINE_BSP }, | 36 | { 0x00008000, NVDEV_ENGINE_BSP }, |
| 36 | { 0x00040000, NVDEV_SUBDEV_THERM }, | 37 | { 0x00040000, NVDEV_SUBDEV_THERM }, |
| 37 | { 0x00020000, NVDEV_ENGINE_VP }, | 38 | { 0x00020000, NVDEV_ENGINE_VP }, |
| @@ -40,6 +41,7 @@ nvc0_mc_intr[] = { | |||
| 40 | { 0x01000000, NVDEV_SUBDEV_PWR }, | 41 | { 0x01000000, NVDEV_SUBDEV_PWR }, |
| 41 | { 0x02000000, NVDEV_SUBDEV_LTCG }, | 42 | { 0x02000000, NVDEV_SUBDEV_LTCG }, |
| 42 | { 0x04000000, NVDEV_ENGINE_DISP }, | 43 | { 0x04000000, NVDEV_ENGINE_DISP }, |
| 44 | { 0x08000000, NVDEV_SUBDEV_FB }, | ||
| 43 | { 0x10000000, NVDEV_SUBDEV_BUS }, | 45 | { 0x10000000, NVDEV_SUBDEV_BUS }, |
| 44 | { 0x40000000, NVDEV_SUBDEV_IBUS }, | 46 | { 0x40000000, NVDEV_SUBDEV_IBUS }, |
| 45 | { 0x80000000, NVDEV_ENGINE_SW }, | 47 | { 0x80000000, NVDEV_ENGINE_SW }, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc new file mode 100644 index 000000000000..757dda700024 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc | |||
| @@ -0,0 +1,393 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2013 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define T_TIMEOUT 2200000 | ||
| 26 | #define T_RISEFALL 1000 | ||
| 27 | #define T_HOLD 5000 | ||
| 28 | |||
| 29 | #ifdef INCLUDE_PROC | ||
| 30 | process(PROC_I2C_, #i2c_init, #i2c_recv) | ||
| 31 | #endif | ||
| 32 | |||
| 33 | /****************************************************************************** | ||
| 34 | * I2C_ data segment | ||
| 35 | *****************************************************************************/ | ||
| 36 | #ifdef INCLUDE_DATA | ||
| 37 | i2c_scl_map: | ||
| 38 | .b32 NV_PPWR_OUTPUT_I2C_0_SCL | ||
| 39 | .b32 NV_PPWR_OUTPUT_I2C_1_SCL | ||
| 40 | .b32 NV_PPWR_OUTPUT_I2C_2_SCL | ||
| 41 | .b32 NV_PPWR_OUTPUT_I2C_3_SCL | ||
| 42 | .b32 NV_PPWR_OUTPUT_I2C_4_SCL | ||
| 43 | .b32 NV_PPWR_OUTPUT_I2C_5_SCL | ||
| 44 | .b32 NV_PPWR_OUTPUT_I2C_6_SCL | ||
| 45 | .b32 NV_PPWR_OUTPUT_I2C_7_SCL | ||
| 46 | .b32 NV_PPWR_OUTPUT_I2C_8_SCL | ||
| 47 | .b32 NV_PPWR_OUTPUT_I2C_9_SCL | ||
| 48 | i2c_sda_map: | ||
| 49 | .b32 NV_PPWR_OUTPUT_I2C_0_SDA | ||
| 50 | .b32 NV_PPWR_OUTPUT_I2C_1_SDA | ||
| 51 | .b32 NV_PPWR_OUTPUT_I2C_2_SDA | ||
| 52 | .b32 NV_PPWR_OUTPUT_I2C_3_SDA | ||
| 53 | .b32 NV_PPWR_OUTPUT_I2C_4_SDA | ||
| 54 | .b32 NV_PPWR_OUTPUT_I2C_5_SDA | ||
| 55 | .b32 NV_PPWR_OUTPUT_I2C_6_SDA | ||
| 56 | .b32 NV_PPWR_OUTPUT_I2C_7_SDA | ||
| 57 | .b32 NV_PPWR_OUTPUT_I2C_8_SDA | ||
| 58 | .b32 NV_PPWR_OUTPUT_I2C_9_SDA | ||
| 59 | #if NVKM_PPWR_CHIPSET < GF119 | ||
| 60 | i2c_ctrl: | ||
| 61 | .b32 0x00e138 | ||
| 62 | .b32 0x00e150 | ||
| 63 | .b32 0x00e168 | ||
| 64 | .b32 0x00e180 | ||
| 65 | .b32 0x00e254 | ||
| 66 | .b32 0x00e274 | ||
| 67 | .b32 0x00e764 | ||
| 68 | .b32 0x00e780 | ||
| 69 | .b32 0x00e79c | ||
| 70 | .b32 0x00e7b8 | ||
| 71 | #endif | ||
| 72 | #endif | ||
| 73 | |||
| 74 | /****************************************************************************** | ||
| 75 | * I2C_ code segment | ||
| 76 | *****************************************************************************/ | ||
| 77 | #ifdef INCLUDE_CODE | ||
| 78 | |||
| 79 | // $r3 - value | ||
| 80 | // $r2 - sda line | ||
| 81 | // $r1 - scl line | ||
| 82 | // $r0 - zero | ||
| 83 | i2c_drive_scl: | ||
| 84 | cmp b32 $r3 0 | ||
| 85 | bra e #i2c_drive_scl_lo | ||
| 86 | nv_iowr(NV_PPWR_OUTPUT_SET, $r1) | ||
| 87 | ret | ||
| 88 | i2c_drive_scl_lo: | ||
| 89 | nv_iowr(NV_PPWR_OUTPUT_CLR, $r1) | ||
| 90 | ret | ||
| 91 | |||
| 92 | i2c_drive_sda: | ||
| 93 | cmp b32 $r3 0 | ||
| 94 | bra e #i2c_drive_sda_lo | ||
| 95 | nv_iowr(NV_PPWR_OUTPUT_SET, $r2) | ||
| 96 | ret | ||
| 97 | i2c_drive_sda_lo: | ||
| 98 | nv_iowr(NV_PPWR_OUTPUT_CLR, $r2) | ||
| 99 | ret | ||
| 100 | |||
| 101 | i2c_sense_scl: | ||
| 102 | bclr $flags $p1 | ||
| 103 | nv_iord($r3, NV_PPWR_INPUT) | ||
| 104 | and $r3 $r1 | ||
| 105 | bra z #i2c_sense_scl_done | ||
| 106 | bset $flags $p1 | ||
| 107 | i2c_sense_scl_done: | ||
| 108 | ret | ||
| 109 | |||
| 110 | i2c_sense_sda: | ||
| 111 | bclr $flags $p1 | ||
| 112 | nv_iord($r3, NV_PPWR_INPUT) | ||
| 113 | and $r3 $r2 | ||
| 114 | bra z #i2c_sense_sda_done | ||
| 115 | bset $flags $p1 | ||
| 116 | i2c_sense_sda_done: | ||
| 117 | ret | ||
| 118 | |||
| 119 | #define i2c_drive_scl(v) /* | ||
| 120 | */ mov $r3 (v) /* | ||
| 121 | */ call(i2c_drive_scl) | ||
| 122 | #define i2c_drive_sda(v) /* | ||
| 123 | */ mov $r3 (v) /* | ||
| 124 | */ call(i2c_drive_sda) | ||
| 125 | #define i2c_sense_scl() /* | ||
| 126 | */ call(i2c_sense_scl) | ||
| 127 | #define i2c_sense_sda() /* | ||
| 128 | */ call(i2c_sense_sda) | ||
| 129 | #define i2c_delay(v) /* | ||
| 130 | */ mov $r14 (v) /* | ||
| 131 | */ call(nsec) | ||
| 132 | |||
| 133 | #define i2c_trace_init() /* | ||
| 134 | */ imm32($r6, 0x10000000) /* | ||
| 135 | */ sub b32 $r7 $r6 1 /* | ||
| 136 | */ | ||
| 137 | #define i2c_trace_down() /* | ||
| 138 | */ shr b32 $r6 4 /* | ||
| 139 | */ push $r5 /* | ||
| 140 | */ shl b32 $r5 $r6 4 /* | ||
| 141 | */ sub b32 $r5 $r6 /* | ||
| 142 | */ not b32 $r5 /* | ||
| 143 | */ and $r7 $r5 /* | ||
| 144 | */ pop $r5 /* | ||
| 145 | */ | ||
| 146 | #define i2c_trace_exit() /* | ||
| 147 | */ shl b32 $r6 4 /* | ||
| 148 | */ | ||
| 149 | #define i2c_trace_next() /* | ||
| 150 | */ add b32 $r7 $r6 /* | ||
| 151 | */ | ||
| 152 | #define i2c_trace_call(func) /* | ||
| 153 | */ i2c_trace_next() /* | ||
| 154 | */ i2c_trace_down() /* | ||
| 155 | */ call(func) /* | ||
| 156 | */ i2c_trace_exit() /* | ||
| 157 | */ | ||
| 158 | |||
| 159 | i2c_raise_scl: | ||
| 160 | push $r4 | ||
| 161 | mov $r4 (T_TIMEOUT / T_RISEFALL) | ||
| 162 | i2c_drive_scl(1) | ||
| 163 | i2c_raise_scl_wait: | ||
| 164 | i2c_delay(T_RISEFALL) | ||
| 165 | i2c_sense_scl() | ||
| 166 | bra $p1 #i2c_raise_scl_done | ||
| 167 | sub b32 $r4 1 | ||
| 168 | bra nz #i2c_raise_scl_wait | ||
| 169 | i2c_raise_scl_done: | ||
| 170 | pop $r4 | ||
| 171 | ret | ||
| 172 | |||
| 173 | i2c_start: | ||
| 174 | i2c_sense_scl() | ||
| 175 | bra not $p1 #i2c_start_rep | ||
| 176 | i2c_sense_sda() | ||
| 177 | bra not $p1 #i2c_start_rep | ||
| 178 | bra #i2c_start_send | ||
| 179 | i2c_start_rep: | ||
| 180 | i2c_drive_scl(0) | ||
| 181 | i2c_drive_sda(1) | ||
| 182 | i2c_trace_call(i2c_raise_scl) | ||
| 183 | bra not $p1 #i2c_start_out | ||
| 184 | i2c_start_send: | ||
| 185 | i2c_drive_sda(0) | ||
| 186 | i2c_delay(T_HOLD) | ||
| 187 | i2c_drive_scl(0) | ||
| 188 | i2c_delay(T_HOLD) | ||
| 189 | i2c_start_out: | ||
| 190 | ret | ||
| 191 | |||
| 192 | i2c_stop: | ||
| 193 | i2c_drive_scl(0) | ||
| 194 | i2c_drive_sda(0) | ||
| 195 | i2c_delay(T_RISEFALL) | ||
| 196 | i2c_drive_scl(1) | ||
| 197 | i2c_delay(T_HOLD) | ||
| 198 | i2c_drive_sda(1) | ||
| 199 | i2c_delay(T_HOLD) | ||
| 200 | ret | ||
| 201 | |||
| 202 | // $r3 - value | ||
| 203 | // $r2 - sda line | ||
| 204 | // $r1 - scl line | ||
| 205 | // $r0 - zero | ||
| 206 | i2c_bitw: | ||
| 207 | call(i2c_drive_sda) | ||
| 208 | i2c_delay(T_RISEFALL) | ||
| 209 | i2c_trace_call(i2c_raise_scl) | ||
| 210 | bra not $p1 #i2c_bitw_out | ||
| 211 | i2c_delay(T_HOLD) | ||
| 212 | i2c_drive_scl(0) | ||
| 213 | i2c_delay(T_HOLD) | ||
| 214 | i2c_bitw_out: | ||
| 215 | ret | ||
| 216 | |||
| 217 | // $r3 - value (out) | ||
| 218 | // $r2 - sda line | ||
| 219 | // $r1 - scl line | ||
| 220 | // $r0 - zero | ||
| 221 | i2c_bitr: | ||
| 222 | i2c_drive_sda(1) | ||
| 223 | i2c_delay(T_RISEFALL) | ||
| 224 | i2c_trace_call(i2c_raise_scl) | ||
| 225 | bra not $p1 #i2c_bitr_done | ||
| 226 | i2c_sense_sda() | ||
| 227 | i2c_drive_scl(0) | ||
| 228 | i2c_delay(T_HOLD) | ||
| 229 | xbit $r3 $flags $p1 | ||
| 230 | bset $flags $p1 | ||
| 231 | i2c_bitr_done: | ||
| 232 | ret | ||
| 233 | |||
| 234 | i2c_get_byte: | ||
| 235 | mov $r5 0 | ||
| 236 | mov $r4 8 | ||
| 237 | i2c_get_byte_next: | ||
| 238 | shl b32 $r5 1 | ||
| 239 | i2c_trace_call(i2c_bitr) | ||
| 240 | bra not $p1 #i2c_get_byte_done | ||
| 241 | or $r5 $r3 | ||
| 242 | sub b32 $r4 1 | ||
| 243 | bra nz #i2c_get_byte_next | ||
| 244 | mov $r3 1 | ||
| 245 | i2c_trace_call(i2c_bitw) | ||
| 246 | i2c_get_byte_done: | ||
| 247 | ret | ||
| 248 | |||
| 249 | i2c_put_byte: | ||
| 250 | mov $r4 8 | ||
| 251 | i2c_put_byte_next: | ||
| 252 | sub b32 $r4 1 | ||
| 253 | xbit $r3 $r5 $r4 | ||
| 254 | i2c_trace_call(i2c_bitw) | ||
| 255 | bra not $p1 #i2c_put_byte_done | ||
| 256 | cmp b32 $r4 0 | ||
| 257 | bra ne #i2c_put_byte_next | ||
| 258 | i2c_trace_call(i2c_bitr) | ||
| 259 | bra not $p1 #i2c_put_byte_done | ||
| 260 | i2c_trace_next() | ||
| 261 | cmp b32 $r3 1 | ||
| 262 | bra ne #i2c_put_byte_done | ||
| 263 | bclr $flags $p1 // nack | ||
| 264 | i2c_put_byte_done: | ||
| 265 | ret | ||
| 266 | |||
| 267 | i2c_addr: | ||
| 268 | i2c_trace_call(i2c_start) | ||
| 269 | bra not $p1 #i2c_addr_done | ||
| 270 | extr $r3 $r12 I2C__MSG_DATA0_ADDR | ||
| 271 | shl b32 $r3 1 | ||
| 272 | or $r5 $r3 | ||
| 273 | i2c_trace_call(i2c_put_byte) | ||
| 274 | i2c_addr_done: | ||
| 275 | ret | ||
| 276 | |||
| 277 | i2c_acquire_addr: | ||
| 278 | extr $r14 $r12 I2C__MSG_DATA0_PORT | ||
| 279 | #if NVKM_PPWR_CHIPSET < GF119 | ||
| 280 | shl b32 $r14 2 | ||
| 281 | add b32 $r14 #i2c_ctrl | ||
| 282 | ld b32 $r14 D[$r14] | ||
| 283 | #else | ||
| 284 | shl b32 $r14 5 | ||
| 285 | add b32 $r14 0x00d014 | ||
| 286 | #endif | ||
| 287 | ret | ||
| 288 | |||
| 289 | i2c_acquire: | ||
| 290 | call(i2c_acquire_addr) | ||
| 291 | call(rd32) | ||
| 292 | bset $r13 3 | ||
| 293 | call(wr32) | ||
| 294 | ret | ||
| 295 | |||
| 296 | i2c_release: | ||
| 297 | call(i2c_acquire_addr) | ||
| 298 | call(rd32) | ||
| 299 | bclr $r13 3 | ||
| 300 | call(wr32) | ||
| 301 | ret | ||
| 302 | |||
| 303 | // description | ||
| 304 | // | ||
| 305 | // $r15 - current (i2c) | ||
| 306 | // $r14 - sender process name | ||
| 307 | // $r13 - message | ||
| 308 | // $r12 - data0 | ||
| 309 | // $r11 - data1 | ||
| 310 | // $r0 - zero | ||
| 311 | i2c_recv: | ||
| 312 | bclr $flags $p1 | ||
| 313 | extr $r1 $r12 I2C__MSG_DATA0_PORT | ||
| 314 | shl b32 $r1 2 | ||
| 315 | cmp b32 $r1 (#i2c_sda_map - #i2c_scl_map) | ||
| 316 | bra ge #i2c_recv_done | ||
| 317 | add b32 $r3 $r1 #i2c_sda_map | ||
| 318 | ld b32 $r2 D[$r3] | ||
| 319 | add b32 $r3 $r1 #i2c_scl_map | ||
| 320 | ld b32 $r1 D[$r3] | ||
| 321 | |||
| 322 | bset $flags $p2 | ||
| 323 | push $r13 | ||
| 324 | push $r14 | ||
| 325 | |||
| 326 | push $r13 | ||
| 327 | i2c_trace_init() | ||
| 328 | i2c_trace_call(i2c_acquire) | ||
| 329 | pop $r13 | ||
| 330 | |||
| 331 | cmp b32 $r13 I2C__MSG_RD08 | ||
| 332 | bra ne #i2c_recv_not_rd08 | ||
| 333 | mov $r5 0 | ||
| 334 | i2c_trace_call(i2c_addr) | ||
| 335 | bra not $p1 #i2c_recv_done | ||
| 336 | extr $r5 $r12 I2C__MSG_DATA0_RD08_REG | ||
| 337 | i2c_trace_call(i2c_put_byte) | ||
| 338 | bra not $p1 #i2c_recv_done | ||
| 339 | mov $r5 1 | ||
| 340 | i2c_trace_call(i2c_addr) | ||
| 341 | bra not $p1 #i2c_recv_done | ||
| 342 | i2c_trace_call(i2c_get_byte) | ||
| 343 | bra not $p1 #i2c_recv_done | ||
| 344 | ins $r11 $r5 I2C__MSG_DATA1_RD08_VAL | ||
| 345 | i2c_trace_call(i2c_stop) | ||
| 346 | mov b32 $r11 $r5 | ||
| 347 | clear b32 $r7 | ||
| 348 | bra #i2c_recv_done | ||
| 349 | |||
| 350 | i2c_recv_not_rd08: | ||
| 351 | cmp b32 $r13 I2C__MSG_WR08 | ||
| 352 | bra ne #i2c_recv_not_wr08 | ||
| 353 | mov $r5 0 | ||
| 354 | call(i2c_addr) | ||
| 355 | bra not $p1 #i2c_recv_done | ||
| 356 | extr $r5 $r12 I2C__MSG_DATA0_WR08_REG | ||
| 357 | call(i2c_put_byte) | ||
| 358 | bra not $p1 #i2c_recv_done | ||
| 359 | mov $r5 0 | ||
| 360 | call(i2c_addr) | ||
| 361 | bra not $p1 #i2c_recv_done | ||
| 362 | extr $r5 $r11 I2C__MSG_DATA1_WR08_VAL | ||
| 363 | call(i2c_put_byte) | ||
| 364 | bra not $p1 #i2c_recv_done | ||
| 365 | call(i2c_stop) | ||
| 366 | clear b32 $r7 | ||
| 367 | extr $r5 $r12 I2C__MSG_DATA0_WR08_SYNC | ||
| 368 | bra nz #i2c_recv_done | ||
| 369 | bclr $flags $p2 | ||
| 370 | bra #i2c_recv_done | ||
| 371 | |||
| 372 | i2c_recv_not_wr08: | ||
| 373 | |||
| 374 | i2c_recv_done: | ||
| 375 | extr $r14 $r12 I2C__MSG_DATA0_PORT | ||
| 376 | call(i2c_release) | ||
| 377 | |||
| 378 | pop $r14 | ||
| 379 | pop $r13 | ||
| 380 | bra not $p2 #i2c_recv_exit | ||
| 381 | mov b32 $r12 $r7 | ||
| 382 | call(send) | ||
| 383 | |||
| 384 | i2c_recv_exit: | ||
| 385 | ret | ||
| 386 | |||
| 387 | // description | ||
| 388 | // | ||
| 389 | // $r15 - current (i2c) | ||
| 390 | // $r0 - zero | ||
| 391 | i2c_init: | ||
| 392 | ret | ||
| 393 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc index 0a7b05fa5c11..8f29badd785f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc | |||
| @@ -51,12 +51,12 @@ time_next: .b32 0 | |||
| 51 | // $r0 - zero | 51 | // $r0 - zero |
| 52 | rd32: | 52 | rd32: |
| 53 | nv_iowr(NV_PPWR_MMIO_ADDR, $r14) | 53 | nv_iowr(NV_PPWR_MMIO_ADDR, $r14) |
| 54 | mov $r14 NV_PPWR_MMIO_CTRL_OP_RD | 54 | mov $r13 NV_PPWR_MMIO_CTRL_OP_RD |
| 55 | sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER | 55 | sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER |
| 56 | nv_iowr(NV_PPWR_MMIO_CTRL, $r14) | 56 | nv_iowr(NV_PPWR_MMIO_CTRL, $r13) |
| 57 | rd32_wait: | 57 | rd32_wait: |
| 58 | nv_iord($r14, NV_PPWR_MMIO_CTRL) | 58 | nv_iord($r13, NV_PPWR_MMIO_CTRL) |
| 59 | and $r14 NV_PPWR_MMIO_CTRL_STATUS | 59 | and $r13 NV_PPWR_MMIO_CTRL_STATUS |
| 60 | bra nz #rd32_wait | 60 | bra nz #rd32_wait |
| 61 | nv_iord($r13, NV_PPWR_MMIO_DATA) | 61 | nv_iord($r13, NV_PPWR_MMIO_DATA) |
| 62 | ret | 62 | ret |
| @@ -70,23 +70,25 @@ rd32: | |||
| 70 | wr32: | 70 | wr32: |
| 71 | nv_iowr(NV_PPWR_MMIO_ADDR, $r14) | 71 | nv_iowr(NV_PPWR_MMIO_ADDR, $r14) |
| 72 | nv_iowr(NV_PPWR_MMIO_DATA, $r13) | 72 | nv_iowr(NV_PPWR_MMIO_DATA, $r13) |
| 73 | mov $r14 NV_PPWR_MMIO_CTRL_OP_WR | 73 | mov $r13 NV_PPWR_MMIO_CTRL_OP_WR |
| 74 | or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0 | 74 | or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0 |
| 75 | sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER | 75 | sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER |
| 76 | 76 | ||
| 77 | #ifdef NVKM_FALCON_MMIO_TRAP | 77 | #ifdef NVKM_FALCON_MMIO_TRAP |
| 78 | mov $r8 NV_PPWR_INTR_TRIGGER_USER1 | 78 | push $r13 |
| 79 | nv_iowr(NV_PPWR_INTR_TRIGGER, $r8) | 79 | mov $r13 NV_PPWR_INTR_TRIGGER_USER1 |
| 80 | nv_iowr(NV_PPWR_INTR_TRIGGER, $r13) | ||
| 80 | wr32_host: | 81 | wr32_host: |
| 81 | nv_iord($r8, NV_PPWR_INTR) | 82 | nv_iord($r13, NV_PPWR_INTR) |
| 82 | and $r8 NV_PPWR_INTR_USER1 | 83 | and $r13 NV_PPWR_INTR_USER1 |
| 83 | bra nz #wr32_host | 84 | bra nz #wr32_host |
| 85 | pop $r13 | ||
| 84 | #endif | 86 | #endif |
| 85 | 87 | ||
| 86 | nv_iowr(NV_PPWR_MMIO_CTRL, $r14) | 88 | nv_iowr(NV_PPWR_MMIO_CTRL, $r13) |
| 87 | wr32_wait: | 89 | wr32_wait: |
| 88 | nv_iord($r14, NV_PPWR_MMIO_CTRL) | 90 | nv_iord($r13, NV_PPWR_MMIO_CTRL) |
| 89 | and $r14 NV_PPWR_MMIO_CTRL_STATUS | 91 | and $r13 NV_PPWR_MMIO_CTRL_STATUS |
| 90 | bra nz #wr32_wait | 92 | bra nz #wr32_wait |
| 91 | ret | 93 | ret |
| 92 | 94 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc index 2a74ea907604..e2a63ac5422b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc | |||
| @@ -83,6 +83,50 @@ | |||
| 83 | #define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002 | 83 | #define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002 |
| 84 | #define NV_PPWR_OUTPUT 0x07c0 | 84 | #define NV_PPWR_OUTPUT 0x07c0 |
| 85 | #define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004 | 85 | #define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004 |
| 86 | #if NVKM_PPWR_CHIPSET < GF119 | ||
| 87 | #define NV_PPWR_OUTPUT_I2C_3_SCL 0x00000100 | ||
| 88 | #define NV_PPWR_OUTPUT_I2C_3_SDA 0x00000200 | ||
| 89 | #define NV_PPWR_OUTPUT_I2C_0_SCL 0x00001000 | ||
| 90 | #define NV_PPWR_OUTPUT_I2C_0_SDA 0x00002000 | ||
| 91 | #define NV_PPWR_OUTPUT_I2C_1_SCL 0x00004000 | ||
| 92 | #define NV_PPWR_OUTPUT_I2C_1_SDA 0x00008000 | ||
| 93 | #define NV_PPWR_OUTPUT_I2C_2_SCL 0x00010000 | ||
| 94 | #define NV_PPWR_OUTPUT_I2C_2_SDA 0x00020000 | ||
| 95 | #define NV_PPWR_OUTPUT_I2C_4_SCL 0x00040000 | ||
| 96 | #define NV_PPWR_OUTPUT_I2C_4_SDA 0x00080000 | ||
| 97 | #define NV_PPWR_OUTPUT_I2C_5_SCL 0x00100000 | ||
| 98 | #define NV_PPWR_OUTPUT_I2C_5_SDA 0x00200000 | ||
| 99 | #define NV_PPWR_OUTPUT_I2C_6_SCL 0x00400000 | ||
| 100 | #define NV_PPWR_OUTPUT_I2C_6_SDA 0x00800000 | ||
| 101 | #define NV_PPWR_OUTPUT_I2C_7_SCL 0x01000000 | ||
| 102 | #define NV_PPWR_OUTPUT_I2C_7_SDA 0x02000000 | ||
| 103 | #define NV_PPWR_OUTPUT_I2C_8_SCL 0x04000000 | ||
| 104 | #define NV_PPWR_OUTPUT_I2C_8_SDA 0x08000000 | ||
| 105 | #define NV_PPWR_OUTPUT_I2C_9_SCL 0x10000000 | ||
| 106 | #define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000 | ||
| 107 | #else | ||
| 108 | #define NV_PPWR_OUTPUT_I2C_0_SCL 0x00000400 | ||
| 109 | #define NV_PPWR_OUTPUT_I2C_1_SCL 0x00000800 | ||
| 110 | #define NV_PPWR_OUTPUT_I2C_2_SCL 0x00001000 | ||
| 111 | #define NV_PPWR_OUTPUT_I2C_3_SCL 0x00002000 | ||
| 112 | #define NV_PPWR_OUTPUT_I2C_4_SCL 0x00004000 | ||
| 113 | #define NV_PPWR_OUTPUT_I2C_5_SCL 0x00008000 | ||
| 114 | #define NV_PPWR_OUTPUT_I2C_6_SCL 0x00010000 | ||
| 115 | #define NV_PPWR_OUTPUT_I2C_7_SCL 0x00020000 | ||
| 116 | #define NV_PPWR_OUTPUT_I2C_8_SCL 0x00040000 | ||
| 117 | #define NV_PPWR_OUTPUT_I2C_9_SCL 0x00080000 | ||
| 118 | #define NV_PPWR_OUTPUT_I2C_0_SDA 0x00100000 | ||
| 119 | #define NV_PPWR_OUTPUT_I2C_1_SDA 0x00200000 | ||
| 120 | #define NV_PPWR_OUTPUT_I2C_2_SDA 0x00400000 | ||
| 121 | #define NV_PPWR_OUTPUT_I2C_3_SDA 0x00800000 | ||
| 122 | #define NV_PPWR_OUTPUT_I2C_4_SDA 0x01000000 | ||
| 123 | #define NV_PPWR_OUTPUT_I2C_5_SDA 0x02000000 | ||
| 124 | #define NV_PPWR_OUTPUT_I2C_6_SDA 0x04000000 | ||
| 125 | #define NV_PPWR_OUTPUT_I2C_7_SDA 0x08000000 | ||
| 126 | #define NV_PPWR_OUTPUT_I2C_8_SDA 0x10000000 | ||
| 127 | #define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000 | ||
| 128 | #endif | ||
| 129 | #define NV_PPWR_INPUT 0x07c4 | ||
| 86 | #define NV_PPWR_OUTPUT_SET 0x07e0 | 130 | #define NV_PPWR_OUTPUT_SET 0x07e0 |
| 87 | #define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004 | 131 | #define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004 |
| 88 | #define NV_PPWR_OUTPUT_CLR 0x07e4 | 132 | #define NV_PPWR_OUTPUT_CLR 0x07e4 |
| @@ -125,6 +169,15 @@ | |||
| 125 | */ .b32 0 /* | 169 | */ .b32 0 /* |
| 126 | */ .skip 64 | 170 | */ .skip 64 |
| 127 | 171 | ||
| 172 | #if NV_PPWR_CHIPSET < GK208 | ||
| 173 | #define imm32(reg,val) /* | ||
| 174 | */ movw reg ((val) & 0x0000ffff) /* | ||
| 175 | */ sethi reg ((val) & 0xffff0000) | ||
| 176 | #else | ||
| 177 | #define imm32(reg,val) /* | ||
| 178 | */ mov reg (val) | ||
| 179 | #endif | ||
| 180 | |||
| 128 | #ifndef NVKM_FALCON_UNSHIFTED_IO | 181 | #ifndef NVKM_FALCON_UNSHIFTED_IO |
| 129 | #define nv_iord(reg,ior) /* | 182 | #define nv_iord(reg,ior) /* |
| 130 | */ mov reg ior /* | 183 | */ mov reg ior /* |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc index 947be536daef..17a8a383d91a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "host.fuc" | 37 | #include "host.fuc" |
| 38 | #include "memx.fuc" | 38 | #include "memx.fuc" |
| 39 | #include "perf.fuc" | 39 | #include "perf.fuc" |
| 40 | #include "i2c_.fuc" | ||
| 40 | #include "test.fuc" | 41 | #include "test.fuc" |
| 41 | #include "idle.fuc" | 42 | #include "idle.fuc" |
| 42 | #undef INCLUDE_PROC | 43 | #undef INCLUDE_PROC |
| @@ -46,6 +47,7 @@ | |||
| 46 | #include "host.fuc" | 47 | #include "host.fuc" |
| 47 | #include "memx.fuc" | 48 | #include "memx.fuc" |
| 48 | #include "perf.fuc" | 49 | #include "perf.fuc" |
| 50 | #include "i2c_.fuc" | ||
| 49 | #include "test.fuc" | 51 | #include "test.fuc" |
| 50 | #include "idle.fuc" | 52 | #include "idle.fuc" |
| 51 | #undef INCLUDE_DATA | 53 | #undef INCLUDE_DATA |
| @@ -57,6 +59,7 @@ | |||
| 57 | #include "host.fuc" | 59 | #include "host.fuc" |
| 58 | #include "memx.fuc" | 60 | #include "memx.fuc" |
| 59 | #include "perf.fuc" | 61 | #include "perf.fuc" |
| 62 | #include "i2c_.fuc" | ||
| 60 | #include "test.fuc" | 63 | #include "test.fuc" |
| 61 | #include "idle.fuc" | 64 | #include "idle.fuc" |
| 62 | #undef INCLUDE_CODE | 65 | #undef INCLUDE_CODE |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h index 9342e2d7d3b7..4bd43a99fdcc 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h | |||
| @@ -89,16 +89,9 @@ uint32_t nv108_pwr_data[] = { | |||
| 89 | 0x00000000, | 89 | 0x00000000, |
| 90 | 0x00000000, | 90 | 0x00000000, |
| 91 | 0x00000000, | 91 | 0x00000000, |
| 92 | 0x54534554, | 92 | 0x5f433249, |
| 93 | 0x00000494, | 93 | 0x00000877, |
| 94 | 0x00000475, | 94 | 0x0000071e, |
| 95 | 0x00000000, | ||
| 96 | 0x00000000, | ||
| 97 | 0x00000000, | ||
| 98 | 0x00000000, | ||
| 99 | 0x00000000, | ||
| 100 | 0x00000000, | ||
| 101 | 0x00000000, | ||
| 102 | 0x00000000, | 95 | 0x00000000, |
| 103 | 0x00000000, | 96 | 0x00000000, |
| 104 | 0x00000000, | 97 | 0x00000000, |
| @@ -111,16 +104,6 @@ uint32_t nv108_pwr_data[] = { | |||
| 111 | 0x00000000, | 104 | 0x00000000, |
| 112 | 0x00000000, | 105 | 0x00000000, |
| 113 | 0x00000000, | 106 | 0x00000000, |
| 114 | 0x454c4449, | ||
| 115 | 0x0000049f, | ||
| 116 | 0x0000049d, | ||
| 117 | 0x00000000, | ||
| 118 | 0x00000000, | ||
| 119 | 0x00000000, | ||
| 120 | 0x00000000, | ||
| 121 | 0x00000000, | ||
| 122 | 0x00000000, | ||
| 123 | 0x00000000, | ||
| 124 | 0x00000000, | 107 | 0x00000000, |
| 125 | 0x00000000, | 108 | 0x00000000, |
| 126 | 0x00000000, | 109 | 0x00000000, |
| @@ -128,17 +111,16 @@ uint32_t nv108_pwr_data[] = { | |||
| 128 | 0x00000000, | 111 | 0x00000000, |
| 129 | 0x00000000, | 112 | 0x00000000, |
| 130 | 0x00000000, | 113 | 0x00000000, |
| 114 | 0x54534554, | ||
| 115 | 0x00000898, | ||
| 116 | 0x00000879, | ||
| 131 | 0x00000000, | 117 | 0x00000000, |
| 132 | 0x00000000, | 118 | 0x00000000, |
| 133 | 0x00000000, | 119 | 0x00000000, |
| 134 | 0x00000000, | 120 | 0x00000000, |
| 135 | 0x00000000, | 121 | 0x00000000, |
| 136 | /* 0x0210: proc_list_tail */ | ||
| 137 | /* 0x0210: time_prev */ | ||
| 138 | 0x00000000, | 122 | 0x00000000, |
| 139 | /* 0x0214: time_next */ | ||
| 140 | 0x00000000, | 123 | 0x00000000, |
| 141 | /* 0x0218: fifo_queue */ | ||
| 142 | 0x00000000, | 124 | 0x00000000, |
| 143 | 0x00000000, | 125 | 0x00000000, |
| 144 | 0x00000000, | 126 | 0x00000000, |
| @@ -151,6 +133,9 @@ uint32_t nv108_pwr_data[] = { | |||
| 151 | 0x00000000, | 133 | 0x00000000, |
| 152 | 0x00000000, | 134 | 0x00000000, |
| 153 | 0x00000000, | 135 | 0x00000000, |
| 136 | 0x454c4449, | ||
| 137 | 0x000008a3, | ||
| 138 | 0x000008a1, | ||
| 154 | 0x00000000, | 139 | 0x00000000, |
| 155 | 0x00000000, | 140 | 0x00000000, |
| 156 | 0x00000000, | 141 | 0x00000000, |
| @@ -170,9 +155,12 @@ uint32_t nv108_pwr_data[] = { | |||
| 170 | 0x00000000, | 155 | 0x00000000, |
| 171 | 0x00000000, | 156 | 0x00000000, |
| 172 | 0x00000000, | 157 | 0x00000000, |
| 158 | /* 0x0268: proc_list_tail */ | ||
| 159 | /* 0x0268: time_prev */ | ||
| 173 | 0x00000000, | 160 | 0x00000000, |
| 174 | /* 0x0298: rfifo_queue */ | 161 | /* 0x026c: time_next */ |
| 175 | 0x00000000, | 162 | 0x00000000, |
| 163 | /* 0x0270: fifo_queue */ | ||
| 176 | 0x00000000, | 164 | 0x00000000, |
| 177 | 0x00000000, | 165 | 0x00000000, |
| 178 | 0x00000000, | 166 | 0x00000000, |
| @@ -204,31 +192,8 @@ uint32_t nv108_pwr_data[] = { | |||
| 204 | 0x00000000, | 192 | 0x00000000, |
| 205 | 0x00000000, | 193 | 0x00000000, |
| 206 | 0x00000000, | 194 | 0x00000000, |
| 207 | /* 0x0318: memx_func_head */ | ||
| 208 | 0x00010000, | ||
| 209 | 0x00000000, | ||
| 210 | 0x000003a9, | ||
| 211 | /* 0x0324: memx_func_next */ | ||
| 212 | 0x00000001, | ||
| 213 | 0x00000000, | ||
| 214 | 0x000003c7, | ||
| 215 | 0x00000002, | ||
| 216 | 0x00000002, | ||
| 217 | 0x000003df, | ||
| 218 | 0x00040003, | ||
| 219 | 0x00000000, | ||
| 220 | 0x00000407, | ||
| 221 | 0x00010004, | ||
| 222 | 0x00000000, | ||
| 223 | 0x00000421, | ||
| 224 | /* 0x0354: memx_func_tail */ | ||
| 225 | /* 0x0354: memx_data_head */ | ||
| 226 | 0x00000000, | ||
| 227 | 0x00000000, | ||
| 228 | 0x00000000, | ||
| 229 | 0x00000000, | ||
| 230 | 0x00000000, | ||
| 231 | 0x00000000, | 195 | 0x00000000, |
| 196 | /* 0x02f0: rfifo_queue */ | ||
| 232 | 0x00000000, | 197 | 0x00000000, |
| 233 | 0x00000000, | 198 | 0x00000000, |
| 234 | 0x00000000, | 199 | 0x00000000, |
| @@ -261,10 +226,25 @@ uint32_t nv108_pwr_data[] = { | |||
| 261 | 0x00000000, | 226 | 0x00000000, |
| 262 | 0x00000000, | 227 | 0x00000000, |
| 263 | 0x00000000, | 228 | 0x00000000, |
| 229 | /* 0x0370: memx_func_head */ | ||
| 230 | 0x00010000, | ||
| 264 | 0x00000000, | 231 | 0x00000000, |
| 232 | 0x000003a9, | ||
| 233 | /* 0x037c: memx_func_next */ | ||
| 234 | 0x00000001, | ||
| 265 | 0x00000000, | 235 | 0x00000000, |
| 236 | 0x000003c7, | ||
| 237 | 0x00000002, | ||
| 238 | 0x00000002, | ||
| 239 | 0x000003df, | ||
| 240 | 0x00040003, | ||
| 266 | 0x00000000, | 241 | 0x00000000, |
| 242 | 0x00000407, | ||
| 243 | 0x00010004, | ||
| 267 | 0x00000000, | 244 | 0x00000000, |
| 245 | 0x00000421, | ||
| 246 | /* 0x03ac: memx_func_tail */ | ||
| 247 | /* 0x03ac: memx_data_head */ | ||
| 268 | 0x00000000, | 248 | 0x00000000, |
| 269 | 0x00000000, | 249 | 0x00000000, |
| 270 | 0x00000000, | 250 | 0x00000000, |
| @@ -735,7 +715,6 @@ uint32_t nv108_pwr_data[] = { | |||
| 735 | 0x00000000, | 715 | 0x00000000, |
| 736 | 0x00000000, | 716 | 0x00000000, |
| 737 | 0x00000000, | 717 | 0x00000000, |
| 738 | /* 0x0b54: memx_data_tail */ | ||
| 739 | 0x00000000, | 718 | 0x00000000, |
| 740 | 0x00000000, | 719 | 0x00000000, |
| 741 | 0x00000000, | 720 | 0x00000000, |
| @@ -778,6 +757,29 @@ uint32_t nv108_pwr_data[] = { | |||
| 778 | 0x00000000, | 757 | 0x00000000, |
| 779 | 0x00000000, | 758 | 0x00000000, |
| 780 | 0x00000000, | 759 | 0x00000000, |
| 760 | /* 0x0bac: memx_data_tail */ | ||
| 761 | /* 0x0bac: i2c_scl_map */ | ||
| 762 | 0x00000400, | ||
| 763 | 0x00000800, | ||
| 764 | 0x00001000, | ||
| 765 | 0x00002000, | ||
| 766 | 0x00004000, | ||
| 767 | 0x00008000, | ||
| 768 | 0x00010000, | ||
| 769 | 0x00020000, | ||
| 770 | 0x00040000, | ||
| 771 | 0x00080000, | ||
| 772 | /* 0x0bd4: i2c_sda_map */ | ||
| 773 | 0x00100000, | ||
| 774 | 0x00200000, | ||
| 775 | 0x00400000, | ||
| 776 | 0x00800000, | ||
| 777 | 0x01000000, | ||
| 778 | 0x02000000, | ||
| 779 | 0x04000000, | ||
| 780 | 0x08000000, | ||
| 781 | 0x10000000, | ||
| 782 | 0x20000000, | ||
| 781 | 0x00000000, | 783 | 0x00000000, |
| 782 | }; | 784 | }; |
| 783 | 785 | ||
| @@ -786,13 +788,13 @@ uint32_t nv108_pwr_code[] = { | |||
| 786 | /* 0x0004: rd32 */ | 788 | /* 0x0004: rd32 */ |
| 787 | 0xf607a040, | 789 | 0xf607a040, |
| 788 | 0x04bd000e, | 790 | 0x04bd000e, |
| 789 | 0xe3f0010e, | 791 | 0xd3f0010d, |
| 790 | 0x07ac4001, | 792 | 0x07ac4001, |
| 791 | 0xbd000ef6, | 793 | 0xbd000df6, |
| 792 | /* 0x0019: rd32_wait */ | 794 | /* 0x0019: rd32_wait */ |
| 793 | 0x07ac4e04, | 795 | 0x07ac4d04, |
| 794 | 0xf100eecf, | 796 | 0xf100ddcf, |
| 795 | 0xf47000e4, | 797 | 0xf47000d4, |
| 796 | 0xa44df61b, | 798 | 0xa44df61b, |
| 797 | 0x00ddcf07, | 799 | 0x00ddcf07, |
| 798 | /* 0x002e: wr32 */ | 800 | /* 0x002e: wr32 */ |
| @@ -800,14 +802,14 @@ uint32_t nv108_pwr_code[] = { | |||
| 800 | 0x000ef607, | 802 | 0x000ef607, |
| 801 | 0xa44004bd, | 803 | 0xa44004bd, |
| 802 | 0x000df607, | 804 | 0x000df607, |
| 803 | 0x020e04bd, | 805 | 0x020d04bd, |
| 804 | 0xf0f0e5f0, | 806 | 0xf0f0d5f0, |
| 805 | 0xac4001e3, | 807 | 0xac4001d3, |
| 806 | 0x000ef607, | 808 | 0x000df607, |
| 807 | /* 0x004e: wr32_wait */ | 809 | /* 0x004e: wr32_wait */ |
| 808 | 0xac4e04bd, | 810 | 0xac4d04bd, |
| 809 | 0x00eecf07, | 811 | 0x00ddcf07, |
| 810 | 0x7000e4f1, | 812 | 0x7000d4f1, |
| 811 | 0xf8f61bf4, | 813 | 0xf8f61bf4, |
| 812 | /* 0x005d: nsec */ | 814 | /* 0x005d: nsec */ |
| 813 | 0xcf2c0800, | 815 | 0xcf2c0800, |
| @@ -832,20 +834,20 @@ uint32_t nv108_pwr_code[] = { | |||
| 832 | 0x03e99800, | 834 | 0x03e99800, |
| 833 | 0xf40096b0, | 835 | 0xf40096b0, |
| 834 | 0x0a98280b, | 836 | 0x0a98280b, |
| 835 | 0x029abb84, | 837 | 0x029abb9a, |
| 836 | 0x0d0e1cf4, | 838 | 0x0d0e1cf4, |
| 837 | 0x01de7e01, | 839 | 0x01de7e01, |
| 838 | 0xf494bd00, | 840 | 0xf494bd00, |
| 839 | /* 0x00b2: intr_watchdog_next_time */ | 841 | /* 0x00b2: intr_watchdog_next_time */ |
| 840 | 0x0a98140e, | 842 | 0x0a98140e, |
| 841 | 0x00a6b085, | 843 | 0x00a6b09b, |
| 842 | 0xa6080bf4, | 844 | 0xa6080bf4, |
| 843 | 0x061cf49a, | 845 | 0x061cf49a, |
| 844 | /* 0x00c0: intr_watchdog_next_time_set */ | 846 | /* 0x00c0: intr_watchdog_next_time_set */ |
| 845 | /* 0x00c3: intr_watchdog_next_proc */ | 847 | /* 0x00c3: intr_watchdog_next_proc */ |
| 846 | 0xb58509b5, | 848 | 0xb59b09b5, |
| 847 | 0xe0b603e9, | 849 | 0xe0b603e9, |
| 848 | 0x10e6b158, | 850 | 0x68e6b158, |
| 849 | 0xc81bf402, | 851 | 0xc81bf402, |
| 850 | /* 0x00d2: intr */ | 852 | /* 0x00d2: intr */ |
| 851 | 0x00f900f8, | 853 | 0x00f900f8, |
| @@ -862,15 +864,15 @@ uint32_t nv108_pwr_code[] = { | |||
| 862 | 0x080804bd, | 864 | 0x080804bd, |
| 863 | 0xc40088cf, | 865 | 0xc40088cf, |
| 864 | 0x0bf40289, | 866 | 0x0bf40289, |
| 865 | 0x8500b51f, | 867 | 0x9b00b51f, |
| 866 | 0x957e580e, | 868 | 0x957e580e, |
| 867 | 0x09980000, | 869 | 0x09980000, |
| 868 | 0x0096b085, | 870 | 0x0096b09b, |
| 869 | 0x000d0bf4, | 871 | 0x000d0bf4, |
| 870 | 0x0009f634, | 872 | 0x0009f634, |
| 871 | 0x09b504bd, | 873 | 0x09b504bd, |
| 872 | /* 0x0125: intr_skip_watchdog */ | 874 | /* 0x0125: intr_skip_watchdog */ |
| 873 | 0x0089e484, | 875 | 0x0089e49a, |
| 874 | 0x360bf408, | 876 | 0x360bf408, |
| 875 | 0xcf068849, | 877 | 0xcf068849, |
| 876 | 0x9ac40099, | 878 | 0x9ac40099, |
| @@ -918,7 +920,7 @@ uint32_t nv108_pwr_code[] = { | |||
| 918 | /* 0x01c6: timer_reset */ | 920 | /* 0x01c6: timer_reset */ |
| 919 | 0x3400161e, | 921 | 0x3400161e, |
| 920 | 0xbd000ef6, | 922 | 0xbd000ef6, |
| 921 | 0x840eb504, | 923 | 0x9a0eb504, |
| 922 | /* 0x01d0: timer_enable */ | 924 | /* 0x01d0: timer_enable */ |
| 923 | 0x38000108, | 925 | 0x38000108, |
| 924 | 0xbd0008f6, | 926 | 0xbd0008f6, |
| @@ -949,7 +951,7 @@ uint32_t nv108_pwr_code[] = { | |||
| 949 | 0xa6008a98, | 951 | 0xa6008a98, |
| 950 | 0x100bf4ae, | 952 | 0x100bf4ae, |
| 951 | 0xb15880b6, | 953 | 0xb15880b6, |
| 952 | 0xf4021086, | 954 | 0xf4026886, |
| 953 | 0x32f4f11b, | 955 | 0x32f4f11b, |
| 954 | /* 0x0239: find_done */ | 956 | /* 0x0239: find_done */ |
| 955 | 0xfc8eb201, | 957 | 0xfc8eb201, |
| @@ -1009,7 +1011,7 @@ uint32_t nv108_pwr_code[] = { | |||
| 1009 | 0x0bf412a6, | 1011 | 0x0bf412a6, |
| 1010 | 0x071ec42e, | 1012 | 0x071ec42e, |
| 1011 | 0xb704ee94, | 1013 | 0xb704ee94, |
| 1012 | 0x980218e0, | 1014 | 0x980270e0, |
| 1013 | 0xec9803eb, | 1015 | 0xec9803eb, |
| 1014 | 0x01ed9802, | 1016 | 0x01ed9802, |
| 1015 | 0x7e00ee98, | 1017 | 0x7e00ee98, |
| @@ -1031,7 +1033,7 @@ uint32_t nv108_pwr_code[] = { | |||
| 1031 | 0xf412a608, | 1033 | 0xf412a608, |
| 1032 | 0x23c4ef0b, | 1034 | 0x23c4ef0b, |
| 1033 | 0x0434b607, | 1035 | 0x0434b607, |
| 1034 | 0x029830b7, | 1036 | 0x02f030b7, |
| 1035 | 0xb5033bb5, | 1037 | 0xb5033bb5, |
| 1036 | 0x3db5023c, | 1038 | 0x3db5023c, |
| 1037 | 0x003eb501, | 1039 | 0x003eb501, |
| @@ -1044,11 +1046,11 @@ uint32_t nv108_pwr_code[] = { | |||
| 1044 | /* 0x0379: host_init */ | 1046 | /* 0x0379: host_init */ |
| 1045 | 0x00804100, | 1047 | 0x00804100, |
| 1046 | 0xf11014b6, | 1048 | 0xf11014b6, |
| 1047 | 0x40021815, | 1049 | 0x40027015, |
| 1048 | 0x01f604d0, | 1050 | 0x01f604d0, |
| 1049 | 0x4104bd00, | 1051 | 0x4104bd00, |
| 1050 | 0x14b60080, | 1052 | 0x14b60080, |
| 1051 | 0x9815f110, | 1053 | 0xf015f110, |
| 1052 | 0x04dc4002, | 1054 | 0x04dc4002, |
| 1053 | 0xbd0001f6, | 1055 | 0xbd0001f6, |
| 1054 | 0x40010104, | 1056 | 0x40010104, |
| @@ -1101,13 +1103,13 @@ uint32_t nv108_pwr_code[] = { | |||
| 1101 | 0x001398b2, | 1103 | 0x001398b2, |
| 1102 | 0x950410b6, | 1104 | 0x950410b6, |
| 1103 | 0x30f01034, | 1105 | 0x30f01034, |
| 1104 | 0xc835980c, | 1106 | 0xde35980c, |
| 1105 | 0x12a655f9, | 1107 | 0x12a655f9, |
| 1106 | 0xfced1ef4, | 1108 | 0xfced1ef4, |
| 1107 | 0x7ee0fcd0, | 1109 | 0x7ee0fcd0, |
| 1108 | 0xf800023f, | 1110 | 0xf800023f, |
| 1109 | /* 0x0455: memx_info */ | 1111 | /* 0x0455: memx_info */ |
| 1110 | 0x03544c00, | 1112 | 0x03ac4c00, |
| 1111 | 0x7e08004b, | 1113 | 0x7e08004b, |
| 1112 | 0xf800023f, | 1114 | 0xf800023f, |
| 1113 | /* 0x0461: memx_recv */ | 1115 | /* 0x0461: memx_recv */ |
| @@ -1119,7 +1121,301 @@ uint32_t nv108_pwr_code[] = { | |||
| 1119 | /* 0x0471: perf_recv */ | 1121 | /* 0x0471: perf_recv */ |
| 1120 | /* 0x0473: perf_init */ | 1122 | /* 0x0473: perf_init */ |
| 1121 | 0xf800f800, | 1123 | 0xf800f800, |
| 1122 | /* 0x0475: test_recv */ | 1124 | /* 0x0475: i2c_drive_scl */ |
| 1125 | 0x0036b000, | ||
| 1126 | 0x400d0bf4, | ||
| 1127 | 0x01f607e0, | ||
| 1128 | 0xf804bd00, | ||
| 1129 | /* 0x0485: i2c_drive_scl_lo */ | ||
| 1130 | 0x07e44000, | ||
| 1131 | 0xbd0001f6, | ||
| 1132 | /* 0x048f: i2c_drive_sda */ | ||
| 1133 | 0xb000f804, | ||
| 1134 | 0x0bf40036, | ||
| 1135 | 0x07e0400d, | ||
| 1136 | 0xbd0002f6, | ||
| 1137 | /* 0x049f: i2c_drive_sda_lo */ | ||
| 1138 | 0x4000f804, | ||
| 1139 | 0x02f607e4, | ||
| 1140 | 0xf804bd00, | ||
| 1141 | /* 0x04a9: i2c_sense_scl */ | ||
| 1142 | 0x0132f400, | ||
| 1143 | 0xcf07c443, | ||
| 1144 | 0x31fd0033, | ||
| 1145 | 0x060bf404, | ||
| 1146 | /* 0x04bb: i2c_sense_scl_done */ | ||
| 1147 | 0xf80131f4, | ||
| 1148 | /* 0x04bd: i2c_sense_sda */ | ||
| 1149 | 0x0132f400, | ||
| 1150 | 0xcf07c443, | ||
| 1151 | 0x32fd0033, | ||
| 1152 | 0x060bf404, | ||
| 1153 | /* 0x04cf: i2c_sense_sda_done */ | ||
| 1154 | 0xf80131f4, | ||
| 1155 | /* 0x04d1: i2c_raise_scl */ | ||
| 1156 | 0x4440f900, | ||
| 1157 | 0x01030898, | ||
| 1158 | 0x0004757e, | ||
| 1159 | /* 0x04dc: i2c_raise_scl_wait */ | ||
| 1160 | 0x7e03e84e, | ||
| 1161 | 0x7e00005d, | ||
| 1162 | 0xf40004a9, | ||
| 1163 | 0x42b60901, | ||
| 1164 | 0xef1bf401, | ||
| 1165 | /* 0x04f0: i2c_raise_scl_done */ | ||
| 1166 | 0x00f840fc, | ||
| 1167 | /* 0x04f4: i2c_start */ | ||
| 1168 | 0x0004a97e, | ||
| 1169 | 0x7e0d11f4, | ||
| 1170 | 0xf40004bd, | ||
| 1171 | 0x0ef40611, | ||
| 1172 | /* 0x0505: i2c_start_rep */ | ||
| 1173 | 0x7e00032e, | ||
| 1174 | 0x03000475, | ||
| 1175 | 0x048f7e01, | ||
| 1176 | 0x0076bb00, | ||
| 1177 | 0xf90465b6, | ||
| 1178 | 0x04659450, | ||
| 1179 | 0xbd0256bb, | ||
| 1180 | 0x0475fd50, | ||
| 1181 | 0xd17e50fc, | ||
| 1182 | 0x64b60004, | ||
| 1183 | 0x1d11f404, | ||
| 1184 | /* 0x0530: i2c_start_send */ | ||
| 1185 | 0x8f7e0003, | ||
| 1186 | 0x884e0004, | ||
| 1187 | 0x005d7e13, | ||
| 1188 | 0x7e000300, | ||
| 1189 | 0x4e000475, | ||
| 1190 | 0x5d7e1388, | ||
| 1191 | /* 0x054a: i2c_start_out */ | ||
| 1192 | 0x00f80000, | ||
| 1193 | /* 0x054c: i2c_stop */ | ||
| 1194 | 0x757e0003, | ||
| 1195 | 0x00030004, | ||
| 1196 | 0x00048f7e, | ||
| 1197 | 0x7e03e84e, | ||
| 1198 | 0x0300005d, | ||
| 1199 | 0x04757e01, | ||
| 1200 | 0x13884e00, | ||
| 1201 | 0x00005d7e, | ||
| 1202 | 0x8f7e0103, | ||
| 1203 | 0x884e0004, | ||
| 1204 | 0x005d7e13, | ||
| 1205 | /* 0x057b: i2c_bitw */ | ||
| 1206 | 0x7e00f800, | ||
| 1207 | 0x4e00048f, | ||
| 1208 | 0x5d7e03e8, | ||
| 1209 | 0x76bb0000, | ||
| 1210 | 0x0465b600, | ||
| 1211 | 0x659450f9, | ||
| 1212 | 0x0256bb04, | ||
| 1213 | 0x75fd50bd, | ||
| 1214 | 0x7e50fc04, | ||
| 1215 | 0xb60004d1, | ||
| 1216 | 0x11f40464, | ||
| 1217 | 0x13884e17, | ||
| 1218 | 0x00005d7e, | ||
| 1219 | 0x757e0003, | ||
| 1220 | 0x884e0004, | ||
| 1221 | 0x005d7e13, | ||
| 1222 | /* 0x05b9: i2c_bitw_out */ | ||
| 1223 | /* 0x05bb: i2c_bitr */ | ||
| 1224 | 0x0300f800, | ||
| 1225 | 0x048f7e01, | ||
| 1226 | 0x03e84e00, | ||
| 1227 | 0x00005d7e, | ||
| 1228 | 0xb60076bb, | ||
| 1229 | 0x50f90465, | ||
| 1230 | 0xbb046594, | ||
| 1231 | 0x50bd0256, | ||
| 1232 | 0xfc0475fd, | ||
| 1233 | 0x04d17e50, | ||
| 1234 | 0x0464b600, | ||
| 1235 | 0x7e1a11f4, | ||
| 1236 | 0x030004bd, | ||
| 1237 | 0x04757e00, | ||
| 1238 | 0x13884e00, | ||
| 1239 | 0x00005d7e, | ||
| 1240 | 0xf4013cf0, | ||
| 1241 | /* 0x05fe: i2c_bitr_done */ | ||
| 1242 | 0x00f80131, | ||
| 1243 | /* 0x0600: i2c_get_byte */ | ||
| 1244 | 0x08040005, | ||
| 1245 | /* 0x0604: i2c_get_byte_next */ | ||
| 1246 | 0xbb0154b6, | ||
| 1247 | 0x65b60076, | ||
| 1248 | 0x9450f904, | ||
| 1249 | 0x56bb0465, | ||
| 1250 | 0xfd50bd02, | ||
| 1251 | 0x50fc0475, | ||
| 1252 | 0x0005bb7e, | ||
| 1253 | 0xf40464b6, | ||
| 1254 | 0x53fd2a11, | ||
| 1255 | 0x0142b605, | ||
| 1256 | 0x03d81bf4, | ||
| 1257 | 0x0076bb01, | ||
| 1258 | 0xf90465b6, | ||
| 1259 | 0x04659450, | ||
| 1260 | 0xbd0256bb, | ||
| 1261 | 0x0475fd50, | ||
| 1262 | 0x7b7e50fc, | ||
| 1263 | 0x64b60005, | ||
| 1264 | /* 0x064d: i2c_get_byte_done */ | ||
| 1265 | /* 0x064f: i2c_put_byte */ | ||
| 1266 | 0x0400f804, | ||
| 1267 | /* 0x0651: i2c_put_byte_next */ | ||
| 1268 | 0x0142b608, | ||
| 1269 | 0xbb3854ff, | ||
| 1270 | 0x65b60076, | ||
| 1271 | 0x9450f904, | ||
| 1272 | 0x56bb0465, | ||
| 1273 | 0xfd50bd02, | ||
| 1274 | 0x50fc0475, | ||
| 1275 | 0x00057b7e, | ||
| 1276 | 0xf40464b6, | ||
| 1277 | 0x46b03411, | ||
| 1278 | 0xd81bf400, | ||
| 1279 | 0xb60076bb, | ||
| 1280 | 0x50f90465, | ||
| 1281 | 0xbb046594, | ||
| 1282 | 0x50bd0256, | ||
| 1283 | 0xfc0475fd, | ||
| 1284 | 0x05bb7e50, | ||
| 1285 | 0x0464b600, | ||
| 1286 | 0xbb0f11f4, | ||
| 1287 | 0x36b00076, | ||
| 1288 | 0x061bf401, | ||
| 1289 | /* 0x06a7: i2c_put_byte_done */ | ||
| 1290 | 0xf80132f4, | ||
| 1291 | /* 0x06a9: i2c_addr */ | ||
| 1292 | 0x0076bb00, | ||
| 1293 | 0xf90465b6, | ||
| 1294 | 0x04659450, | ||
| 1295 | 0xbd0256bb, | ||
| 1296 | 0x0475fd50, | ||
| 1297 | 0xf47e50fc, | ||
| 1298 | 0x64b60004, | ||
| 1299 | 0x2911f404, | ||
| 1300 | 0x012ec3e7, | ||
| 1301 | 0xfd0134b6, | ||
| 1302 | 0x76bb0553, | ||
| 1303 | 0x0465b600, | ||
| 1304 | 0x659450f9, | ||
| 1305 | 0x0256bb04, | ||
| 1306 | 0x75fd50bd, | ||
| 1307 | 0x7e50fc04, | ||
| 1308 | 0xb600064f, | ||
| 1309 | /* 0x06ee: i2c_addr_done */ | ||
| 1310 | 0x00f80464, | ||
| 1311 | /* 0x06f0: i2c_acquire_addr */ | ||
| 1312 | 0xb6f8cec7, | ||
| 1313 | 0xe0b705e4, | ||
| 1314 | 0x00f8d014, | ||
| 1315 | /* 0x06fc: i2c_acquire */ | ||
| 1316 | 0x0006f07e, | ||
| 1317 | 0x0000047e, | ||
| 1318 | 0x7e03d9f0, | ||
| 1319 | 0xf800002e, | ||
| 1320 | /* 0x070d: i2c_release */ | ||
| 1321 | 0x06f07e00, | ||
| 1322 | 0x00047e00, | ||
| 1323 | 0x03daf000, | ||
| 1324 | 0x00002e7e, | ||
| 1325 | /* 0x071e: i2c_recv */ | ||
| 1326 | 0x32f400f8, | ||
| 1327 | 0xf8c1c701, | ||
| 1328 | 0xb00214b6, | ||
| 1329 | 0x1ff52816, | ||
| 1330 | 0x13b80137, | ||
| 1331 | 0x98000bd4, | ||
| 1332 | 0x13b80032, | ||
| 1333 | 0x98000bac, | ||
| 1334 | 0x31f40031, | ||
| 1335 | 0xf9d0f902, | ||
| 1336 | 0xf1d0f9e0, | ||
| 1337 | 0xf1000067, | ||
| 1338 | 0x92100063, | ||
| 1339 | 0x76bb0167, | ||
| 1340 | 0x0465b600, | ||
| 1341 | 0x659450f9, | ||
| 1342 | 0x0256bb04, | ||
| 1343 | 0x75fd50bd, | ||
| 1344 | 0x7e50fc04, | ||
| 1345 | 0xb60006fc, | ||
| 1346 | 0xd0fc0464, | ||
| 1347 | 0xf500d6b0, | ||
| 1348 | 0x0500b01b, | ||
| 1349 | 0x0076bb00, | ||
| 1350 | 0xf90465b6, | ||
| 1351 | 0x04659450, | ||
| 1352 | 0xbd0256bb, | ||
| 1353 | 0x0475fd50, | ||
| 1354 | 0xa97e50fc, | ||
| 1355 | 0x64b60006, | ||
| 1356 | 0xcc11f504, | ||
| 1357 | 0xe0c5c700, | ||
| 1358 | 0xb60076bb, | ||
| 1359 | 0x50f90465, | ||
| 1360 | 0xbb046594, | ||
| 1361 | 0x50bd0256, | ||
| 1362 | 0xfc0475fd, | ||
| 1363 | 0x064f7e50, | ||
| 1364 | 0x0464b600, | ||
| 1365 | 0x00a911f5, | ||
| 1366 | 0x76bb0105, | ||
| 1367 | 0x0465b600, | ||
| 1368 | 0x659450f9, | ||
| 1369 | 0x0256bb04, | ||
| 1370 | 0x75fd50bd, | ||
| 1371 | 0x7e50fc04, | ||
| 1372 | 0xb60006a9, | ||
| 1373 | 0x11f50464, | ||
| 1374 | 0x76bb0087, | ||
| 1375 | 0x0465b600, | ||
| 1376 | 0x659450f9, | ||
| 1377 | 0x0256bb04, | ||
| 1378 | 0x75fd50bd, | ||
| 1379 | 0x7e50fc04, | ||
| 1380 | 0xb6000600, | ||
| 1381 | 0x11f40464, | ||
| 1382 | 0xe05bcb67, | ||
| 1383 | 0xb60076bb, | ||
| 1384 | 0x50f90465, | ||
| 1385 | 0xbb046594, | ||
| 1386 | 0x50bd0256, | ||
| 1387 | 0xfc0475fd, | ||
| 1388 | 0x054c7e50, | ||
| 1389 | 0x0464b600, | ||
| 1390 | 0x74bd5bb2, | ||
| 1391 | /* 0x0823: i2c_recv_not_rd08 */ | ||
| 1392 | 0xb0410ef4, | ||
| 1393 | 0x1bf401d6, | ||
| 1394 | 0x7e00053b, | ||
| 1395 | 0xf40006a9, | ||
| 1396 | 0xc5c73211, | ||
| 1397 | 0x064f7ee0, | ||
| 1398 | 0x2811f400, | ||
| 1399 | 0xa97e0005, | ||
| 1400 | 0x11f40006, | ||
| 1401 | 0xe0b5c71f, | ||
| 1402 | 0x00064f7e, | ||
| 1403 | 0x7e1511f4, | ||
| 1404 | 0xbd00054c, | ||
| 1405 | 0x08c5c774, | ||
| 1406 | 0xf4091bf4, | ||
| 1407 | 0x0ef40232, | ||
| 1408 | /* 0x0861: i2c_recv_not_wr08 */ | ||
| 1409 | /* 0x0861: i2c_recv_done */ | ||
| 1410 | 0xf8cec703, | ||
| 1411 | 0x00070d7e, | ||
| 1412 | 0xd0fce0fc, | ||
| 1413 | 0xb20912f4, | ||
| 1414 | 0x023f7e7c, | ||
| 1415 | /* 0x0875: i2c_recv_exit */ | ||
| 1416 | /* 0x0877: i2c_init */ | ||
| 1417 | 0xf800f800, | ||
| 1418 | /* 0x0879: test_recv */ | ||
| 1123 | 0x04584100, | 1419 | 0x04584100, |
| 1124 | 0xb60011cf, | 1420 | 0xb60011cf, |
| 1125 | 0x58400110, | 1421 | 0x58400110, |
| @@ -1128,26 +1424,26 @@ uint32_t nv108_pwr_code[] = { | |||
| 1128 | 0xe3f1d900, | 1424 | 0xe3f1d900, |
| 1129 | 0x967e134f, | 1425 | 0x967e134f, |
| 1130 | 0x00f80001, | 1426 | 0x00f80001, |
| 1131 | /* 0x0494: test_init */ | 1427 | /* 0x0898: test_init */ |
| 1132 | 0x7e08004e, | 1428 | 0x7e08004e, |
| 1133 | 0xf8000196, | 1429 | 0xf8000196, |
| 1134 | /* 0x049d: idle_recv */ | 1430 | /* 0x08a1: idle_recv */ |
| 1135 | /* 0x049f: idle */ | 1431 | /* 0x08a3: idle */ |
| 1136 | 0xf400f800, | 1432 | 0xf400f800, |
| 1137 | 0x54410031, | 1433 | 0x54410031, |
| 1138 | 0x0011cf04, | 1434 | 0x0011cf04, |
| 1139 | 0x400110b6, | 1435 | 0x400110b6, |
| 1140 | 0x01f60454, | 1436 | 0x01f60454, |
| 1141 | /* 0x04b3: idle_loop */ | 1437 | /* 0x08b7: idle_loop */ |
| 1142 | 0x0104bd00, | 1438 | 0x0104bd00, |
| 1143 | 0x0232f458, | 1439 | 0x0232f458, |
| 1144 | /* 0x04b8: idle_proc */ | 1440 | /* 0x08bc: idle_proc */ |
| 1145 | /* 0x04b8: idle_proc_exec */ | 1441 | /* 0x08bc: idle_proc_exec */ |
| 1146 | 0x1eb210f9, | 1442 | 0x1eb210f9, |
| 1147 | 0x0002487e, | 1443 | 0x0002487e, |
| 1148 | 0x11f410fc, | 1444 | 0x11f410fc, |
| 1149 | 0x0231f409, | 1445 | 0x0231f409, |
| 1150 | /* 0x04cb: idle_proc_next */ | 1446 | /* 0x08cf: idle_proc_next */ |
| 1151 | 0xb6f00ef4, | 1447 | 0xb6f00ef4, |
| 1152 | 0x1fa65810, | 1448 | 0x1fa65810, |
| 1153 | 0xf4e81bf4, | 1449 | 0xf4e81bf4, |
| @@ -1161,5 +1457,4 @@ uint32_t nv108_pwr_code[] = { | |||
| 1161 | 0x00000000, | 1457 | 0x00000000, |
| 1162 | 0x00000000, | 1458 | 0x00000000, |
| 1163 | 0x00000000, | 1459 | 0x00000000, |
| 1164 | 0x00000000, | ||
| 1165 | }; | 1460 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc index 6fde0b89e5aa..6744fcc06151 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "host.fuc" | 37 | #include "host.fuc" |
| 38 | #include "memx.fuc" | 38 | #include "memx.fuc" |
| 39 | #include "perf.fuc" | 39 | #include "perf.fuc" |
| 40 | #include "i2c_.fuc" | ||
| 40 | #include "test.fuc" | 41 | #include "test.fuc" |
| 41 | #include "idle.fuc" | 42 | #include "idle.fuc" |
| 42 | #undef INCLUDE_PROC | 43 | #undef INCLUDE_PROC |
| @@ -46,6 +47,7 @@ | |||
| 46 | #include "host.fuc" | 47 | #include "host.fuc" |
| 47 | #include "memx.fuc" | 48 | #include "memx.fuc" |
| 48 | #include "perf.fuc" | 49 | #include "perf.fuc" |
| 50 | #include "i2c_.fuc" | ||
| 49 | #include "test.fuc" | 51 | #include "test.fuc" |
| 50 | #include "idle.fuc" | 52 | #include "idle.fuc" |
| 51 | #undef INCLUDE_DATA | 53 | #undef INCLUDE_DATA |
| @@ -57,6 +59,7 @@ | |||
| 57 | #include "host.fuc" | 59 | #include "host.fuc" |
| 58 | #include "memx.fuc" | 60 | #include "memx.fuc" |
| 59 | #include "perf.fuc" | 61 | #include "perf.fuc" |
| 62 | #include "i2c_.fuc" | ||
| 60 | #include "test.fuc" | 63 | #include "test.fuc" |
| 61 | #include "idle.fuc" | 64 | #include "idle.fuc" |
| 62 | #undef INCLUDE_CODE | 65 | #undef INCLUDE_CODE |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h index 0fa4d7dcd407..5a73fa620978 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h | |||
| @@ -89,9 +89,31 @@ uint32_t nva3_pwr_data[] = { | |||
| 89 | 0x00000000, | 89 | 0x00000000, |
| 90 | 0x00000000, | 90 | 0x00000000, |
| 91 | 0x00000000, | 91 | 0x00000000, |
| 92 | 0x5f433249, | ||
| 93 | 0x00000982, | ||
| 94 | 0x00000825, | ||
| 95 | 0x00000000, | ||
| 96 | 0x00000000, | ||
| 97 | 0x00000000, | ||
| 98 | 0x00000000, | ||
| 99 | 0x00000000, | ||
| 100 | 0x00000000, | ||
| 101 | 0x00000000, | ||
| 102 | 0x00000000, | ||
| 103 | 0x00000000, | ||
| 104 | 0x00000000, | ||
| 105 | 0x00000000, | ||
| 106 | 0x00000000, | ||
| 107 | 0x00000000, | ||
| 108 | 0x00000000, | ||
| 109 | 0x00000000, | ||
| 110 | 0x00000000, | ||
| 111 | 0x00000000, | ||
| 112 | 0x00000000, | ||
| 113 | 0x00000000, | ||
| 92 | 0x54534554, | 114 | 0x54534554, |
| 93 | 0x0000057b, | 115 | 0x000009ab, |
| 94 | 0x00000554, | 116 | 0x00000984, |
| 95 | 0x00000000, | 117 | 0x00000000, |
| 96 | 0x00000000, | 118 | 0x00000000, |
| 97 | 0x00000000, | 119 | 0x00000000, |
| @@ -112,8 +134,8 @@ uint32_t nva3_pwr_data[] = { | |||
| 112 | 0x00000000, | 134 | 0x00000000, |
| 113 | 0x00000000, | 135 | 0x00000000, |
| 114 | 0x454c4449, | 136 | 0x454c4449, |
| 115 | 0x00000587, | 137 | 0x000009b7, |
| 116 | 0x00000585, | 138 | 0x000009b5, |
| 117 | 0x00000000, | 139 | 0x00000000, |
| 118 | 0x00000000, | 140 | 0x00000000, |
| 119 | 0x00000000, | 141 | 0x00000000, |
| @@ -133,12 +155,12 @@ uint32_t nva3_pwr_data[] = { | |||
| 133 | 0x00000000, | 155 | 0x00000000, |
| 134 | 0x00000000, | 156 | 0x00000000, |
| 135 | 0x00000000, | 157 | 0x00000000, |
| 136 | /* 0x0210: proc_list_tail */ | 158 | /* 0x0268: proc_list_tail */ |
| 137 | /* 0x0210: time_prev */ | 159 | /* 0x0268: time_prev */ |
| 138 | 0x00000000, | 160 | 0x00000000, |
| 139 | /* 0x0214: time_next */ | 161 | /* 0x026c: time_next */ |
| 140 | 0x00000000, | 162 | 0x00000000, |
| 141 | /* 0x0218: fifo_queue */ | 163 | /* 0x0270: fifo_queue */ |
| 142 | 0x00000000, | 164 | 0x00000000, |
| 143 | 0x00000000, | 165 | 0x00000000, |
| 144 | 0x00000000, | 166 | 0x00000000, |
| @@ -171,7 +193,7 @@ uint32_t nva3_pwr_data[] = { | |||
| 171 | 0x00000000, | 193 | 0x00000000, |
| 172 | 0x00000000, | 194 | 0x00000000, |
| 173 | 0x00000000, | 195 | 0x00000000, |
| 174 | /* 0x0298: rfifo_queue */ | 196 | /* 0x02f0: rfifo_queue */ |
| 175 | 0x00000000, | 197 | 0x00000000, |
| 176 | 0x00000000, | 198 | 0x00000000, |
| 177 | 0x00000000, | 199 | 0x00000000, |
| @@ -204,11 +226,11 @@ uint32_t nva3_pwr_data[] = { | |||
| 204 | 0x00000000, | 226 | 0x00000000, |
| 205 | 0x00000000, | 227 | 0x00000000, |
| 206 | 0x00000000, | 228 | 0x00000000, |
| 207 | /* 0x0318: memx_func_head */ | 229 | /* 0x0370: memx_func_head */ |
| 208 | 0x00010000, | 230 | 0x00010000, |
| 209 | 0x00000000, | 231 | 0x00000000, |
| 210 | 0x0000046f, | 232 | 0x0000046f, |
| 211 | /* 0x0324: memx_func_next */ | 233 | /* 0x037c: memx_func_next */ |
| 212 | 0x00000001, | 234 | 0x00000001, |
| 213 | 0x00000000, | 235 | 0x00000000, |
| 214 | 0x00000496, | 236 | 0x00000496, |
| @@ -221,8 +243,18 @@ uint32_t nva3_pwr_data[] = { | |||
| 221 | 0x00010004, | 243 | 0x00010004, |
| 222 | 0x00000000, | 244 | 0x00000000, |
| 223 | 0x000004fc, | 245 | 0x000004fc, |
| 224 | /* 0x0354: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
| 225 | /* 0x0354: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
| 248 | 0x00000000, | ||
| 249 | 0x00000000, | ||
| 250 | 0x00000000, | ||
| 251 | 0x00000000, | ||
| 252 | 0x00000000, | ||
| 253 | 0x00000000, | ||
| 254 | 0x00000000, | ||
| 255 | 0x00000000, | ||
| 256 | 0x00000000, | ||
| 257 | 0x00000000, | ||
| 226 | 0x00000000, | 258 | 0x00000000, |
| 227 | 0x00000000, | 259 | 0x00000000, |
| 228 | 0x00000000, | 260 | 0x00000000, |
| @@ -725,6 +757,42 @@ uint32_t nva3_pwr_data[] = { | |||
| 725 | 0x00000000, | 757 | 0x00000000, |
| 726 | 0x00000000, | 758 | 0x00000000, |
| 727 | 0x00000000, | 759 | 0x00000000, |
| 760 | /* 0x0bac: memx_data_tail */ | ||
| 761 | /* 0x0bac: i2c_scl_map */ | ||
| 762 | 0x00001000, | ||
| 763 | 0x00004000, | ||
| 764 | 0x00010000, | ||
| 765 | 0x00000100, | ||
| 766 | 0x00040000, | ||
| 767 | 0x00100000, | ||
| 768 | 0x00400000, | ||
| 769 | 0x01000000, | ||
| 770 | 0x04000000, | ||
| 771 | 0x10000000, | ||
| 772 | /* 0x0bd4: i2c_sda_map */ | ||
| 773 | 0x00002000, | ||
| 774 | 0x00008000, | ||
| 775 | 0x00020000, | ||
| 776 | 0x00000200, | ||
| 777 | 0x00080000, | ||
| 778 | 0x00200000, | ||
| 779 | 0x00800000, | ||
| 780 | 0x02000000, | ||
| 781 | 0x08000000, | ||
| 782 | 0x20000000, | ||
| 783 | /* 0x0bfc: i2c_ctrl */ | ||
| 784 | 0x0000e138, | ||
| 785 | 0x0000e150, | ||
| 786 | 0x0000e168, | ||
| 787 | 0x0000e180, | ||
| 788 | 0x0000e254, | ||
| 789 | 0x0000e274, | ||
| 790 | 0x0000e764, | ||
| 791 | 0x0000e780, | ||
| 792 | 0x0000e79c, | ||
| 793 | 0x0000e7b8, | ||
| 794 | 0x00000000, | ||
| 795 | 0x00000000, | ||
| 728 | 0x00000000, | 796 | 0x00000000, |
| 729 | 0x00000000, | 797 | 0x00000000, |
| 730 | 0x00000000, | 798 | 0x00000000, |
| @@ -735,7 +803,6 @@ uint32_t nva3_pwr_data[] = { | |||
| 735 | 0x00000000, | 803 | 0x00000000, |
| 736 | 0x00000000, | 804 | 0x00000000, |
| 737 | 0x00000000, | 805 | 0x00000000, |
| 738 | /* 0x0b54: memx_data_tail */ | ||
| 739 | 0x00000000, | 806 | 0x00000000, |
| 740 | 0x00000000, | 807 | 0x00000000, |
| 741 | 0x00000000, | 808 | 0x00000000, |
| @@ -787,15 +854,15 @@ uint32_t nva3_pwr_code[] = { | |||
| 787 | 0x07a007f1, | 854 | 0x07a007f1, |
| 788 | 0xd00604b6, | 855 | 0xd00604b6, |
| 789 | 0x04bd000e, | 856 | 0x04bd000e, |
| 790 | 0xf001e7f0, | 857 | 0xf001d7f0, |
| 791 | 0x07f101e3, | 858 | 0x07f101d3, |
| 792 | 0x04b607ac, | 859 | 0x04b607ac, |
| 793 | 0x000ed006, | 860 | 0x000dd006, |
| 794 | /* 0x0022: rd32_wait */ | 861 | /* 0x0022: rd32_wait */ |
| 795 | 0xe7f104bd, | 862 | 0xd7f104bd, |
| 796 | 0xe4b607ac, | 863 | 0xd4b607ac, |
| 797 | 0x00eecf06, | 864 | 0x00ddcf06, |
| 798 | 0x7000e4f1, | 865 | 0x7000d4f1, |
| 799 | 0xf1f21bf4, | 866 | 0xf1f21bf4, |
| 800 | 0xb607a4d7, | 867 | 0xb607a4d7, |
| 801 | 0xddcf06d4, | 868 | 0xddcf06d4, |
| @@ -807,15 +874,15 @@ uint32_t nva3_pwr_code[] = { | |||
| 807 | 0xb607a407, | 874 | 0xb607a407, |
| 808 | 0x0dd00604, | 875 | 0x0dd00604, |
| 809 | 0xf004bd00, | 876 | 0xf004bd00, |
| 810 | 0xe5f002e7, | 877 | 0xd5f002d7, |
| 811 | 0x01e3f0f0, | 878 | 0x01d3f0f0, |
| 812 | 0x07ac07f1, | 879 | 0x07ac07f1, |
| 813 | 0xd00604b6, | 880 | 0xd00604b6, |
| 814 | 0x04bd000e, | 881 | 0x04bd000d, |
| 815 | /* 0x006c: wr32_wait */ | 882 | /* 0x006c: wr32_wait */ |
| 816 | 0x07ace7f1, | 883 | 0x07acd7f1, |
| 817 | 0xcf06e4b6, | 884 | 0xcf06d4b6, |
| 818 | 0xe4f100ee, | 885 | 0xd4f100dd, |
| 819 | 0x1bf47000, | 886 | 0x1bf47000, |
| 820 | /* 0x007f: nsec */ | 887 | /* 0x007f: nsec */ |
| 821 | 0xf000f8f2, | 888 | 0xf000f8f2, |
| @@ -845,21 +912,21 @@ uint32_t nva3_pwr_code[] = { | |||
| 845 | 0x9800f8df, | 912 | 0x9800f8df, |
| 846 | 0x96b003e9, | 913 | 0x96b003e9, |
| 847 | 0x2a0bf400, | 914 | 0x2a0bf400, |
| 848 | 0xbb840a98, | 915 | 0xbb9a0a98, |
| 849 | 0x1cf4029a, | 916 | 0x1cf4029a, |
| 850 | 0x01d7f00f, | 917 | 0x01d7f00f, |
| 851 | 0x025421f5, | 918 | 0x025421f5, |
| 852 | 0x0ef494bd, | 919 | 0x0ef494bd, |
| 853 | /* 0x00e9: intr_watchdog_next_time */ | 920 | /* 0x00e9: intr_watchdog_next_time */ |
| 854 | 0x850a9815, | 921 | 0x9b0a9815, |
| 855 | 0xf400a6b0, | 922 | 0xf400a6b0, |
| 856 | 0x9ab8090b, | 923 | 0x9ab8090b, |
| 857 | 0x061cf406, | 924 | 0x061cf406, |
| 858 | /* 0x00f8: intr_watchdog_next_time_set */ | 925 | /* 0x00f8: intr_watchdog_next_time_set */ |
| 859 | /* 0x00fb: intr_watchdog_next_proc */ | 926 | /* 0x00fb: intr_watchdog_next_proc */ |
| 860 | 0x80850980, | 927 | 0x809b0980, |
| 861 | 0xe0b603e9, | 928 | 0xe0b603e9, |
| 862 | 0x10e6b158, | 929 | 0x68e6b158, |
| 863 | 0xc61bf402, | 930 | 0xc61bf402, |
| 864 | /* 0x010a: intr */ | 931 | /* 0x010a: intr */ |
| 865 | 0x00f900f8, | 932 | 0x00f900f8, |
| @@ -880,15 +947,15 @@ uint32_t nva3_pwr_code[] = { | |||
| 880 | 0x0088cf06, | 947 | 0x0088cf06, |
| 881 | 0xf40289c4, | 948 | 0xf40289c4, |
| 882 | 0x0080230b, | 949 | 0x0080230b, |
| 883 | 0x58e7f085, | 950 | 0x58e7f09b, |
| 884 | 0x98cb21f4, | 951 | 0x98cb21f4, |
| 885 | 0x96b08509, | 952 | 0x96b09b09, |
| 886 | 0x110bf400, | 953 | 0x110bf400, |
| 887 | 0xb63407f0, | 954 | 0xb63407f0, |
| 888 | 0x09d00604, | 955 | 0x09d00604, |
| 889 | 0x8004bd00, | 956 | 0x8004bd00, |
| 890 | /* 0x016e: intr_skip_watchdog */ | 957 | /* 0x016e: intr_skip_watchdog */ |
| 891 | 0x89e48409, | 958 | 0x89e49a09, |
| 892 | 0x0bf40800, | 959 | 0x0bf40800, |
| 893 | 0x8897f148, | 960 | 0x8897f148, |
| 894 | 0x0694b606, | 961 | 0x0694b606, |
| @@ -948,7 +1015,7 @@ uint32_t nva3_pwr_code[] = { | |||
| 948 | 0x000ed006, | 1015 | 0x000ed006, |
| 949 | 0x0e8004bd, | 1016 | 0x0e8004bd, |
| 950 | /* 0x0241: timer_enable */ | 1017 | /* 0x0241: timer_enable */ |
| 951 | 0x0187f084, | 1018 | 0x0187f09a, |
| 952 | 0xb63807f0, | 1019 | 0xb63807f0, |
| 953 | 0x08d00604, | 1020 | 0x08d00604, |
| 954 | /* 0x024f: timer_done */ | 1021 | /* 0x024f: timer_done */ |
| @@ -979,7 +1046,7 @@ uint32_t nva3_pwr_code[] = { | |||
| 979 | 0xb8008a98, | 1046 | 0xb8008a98, |
| 980 | 0x0bf406ae, | 1047 | 0x0bf406ae, |
| 981 | 0x5880b610, | 1048 | 0x5880b610, |
| 982 | 0x021086b1, | 1049 | 0x026886b1, |
| 983 | 0xf4f01bf4, | 1050 | 0xf4f01bf4, |
| 984 | /* 0x02b2: find_done */ | 1051 | /* 0x02b2: find_done */ |
| 985 | 0x8eb90132, | 1052 | 0x8eb90132, |
| @@ -1049,7 +1116,7 @@ uint32_t nva3_pwr_code[] = { | |||
| 1049 | 0x320bf406, | 1116 | 0x320bf406, |
| 1050 | 0x94071ec4, | 1117 | 0x94071ec4, |
| 1051 | 0xe0b704ee, | 1118 | 0xe0b704ee, |
| 1052 | 0xeb980218, | 1119 | 0xeb980270, |
| 1053 | 0x02ec9803, | 1120 | 0x02ec9803, |
| 1054 | 0x9801ed98, | 1121 | 0x9801ed98, |
| 1055 | 0x21f500ee, | 1122 | 0x21f500ee, |
| @@ -1075,7 +1142,7 @@ uint32_t nva3_pwr_code[] = { | |||
| 1075 | 0xe60bf406, | 1142 | 0xe60bf406, |
| 1076 | 0xb60723c4, | 1143 | 0xb60723c4, |
| 1077 | 0x30b70434, | 1144 | 0x30b70434, |
| 1078 | 0x3b800298, | 1145 | 0x3b8002f0, |
| 1079 | 0x023c8003, | 1146 | 0x023c8003, |
| 1080 | 0x80013d80, | 1147 | 0x80013d80, |
| 1081 | 0x20b6003e, | 1148 | 0x20b6003e, |
| @@ -1090,13 +1157,13 @@ uint32_t nva3_pwr_code[] = { | |||
| 1090 | /* 0x0430: host_init */ | 1157 | /* 0x0430: host_init */ |
| 1091 | 0x008017f1, | 1158 | 0x008017f1, |
| 1092 | 0xf11014b6, | 1159 | 0xf11014b6, |
| 1093 | 0xf1021815, | 1160 | 0xf1027015, |
| 1094 | 0xb604d007, | 1161 | 0xb604d007, |
| 1095 | 0x01d00604, | 1162 | 0x01d00604, |
| 1096 | 0xf104bd00, | 1163 | 0xf104bd00, |
| 1097 | 0xb6008017, | 1164 | 0xb6008017, |
| 1098 | 0x15f11014, | 1165 | 0x15f11014, |
| 1099 | 0x07f10298, | 1166 | 0x07f102f0, |
| 1100 | 0x04b604dc, | 1167 | 0x04b604dc, |
| 1101 | 0x0001d006, | 1168 | 0x0001d006, |
| 1102 | 0x17f004bd, | 1169 | 0x17f004bd, |
| @@ -1156,14 +1223,14 @@ uint32_t nva3_pwr_code[] = { | |||
| 1156 | 0x00139802, | 1223 | 0x00139802, |
| 1157 | 0x950410b6, | 1224 | 0x950410b6, |
| 1158 | 0x30f01034, | 1225 | 0x30f01034, |
| 1159 | 0xc835980c, | 1226 | 0xde35980c, |
| 1160 | 0x12b855f9, | 1227 | 0x12b855f9, |
| 1161 | 0xec1ef406, | 1228 | 0xec1ef406, |
| 1162 | 0xe0fcd0fc, | 1229 | 0xe0fcd0fc, |
| 1163 | 0x02b921f5, | 1230 | 0x02b921f5, |
| 1164 | /* 0x0532: memx_info */ | 1231 | /* 0x0532: memx_info */ |
| 1165 | 0xc7f100f8, | 1232 | 0xc7f100f8, |
| 1166 | 0xb7f10354, | 1233 | 0xb7f103ac, |
| 1167 | 0x21f50800, | 1234 | 0x21f50800, |
| 1168 | 0x00f802b9, | 1235 | 0x00f802b9, |
| 1169 | /* 0x0540: memx_recv */ | 1236 | /* 0x0540: memx_recv */ |
| @@ -1175,7 +1242,312 @@ uint32_t nva3_pwr_code[] = { | |||
| 1175 | /* 0x0550: perf_recv */ | 1242 | /* 0x0550: perf_recv */ |
| 1176 | /* 0x0552: perf_init */ | 1243 | /* 0x0552: perf_init */ |
| 1177 | 0x00f800f8, | 1244 | 0x00f800f8, |
| 1178 | /* 0x0554: test_recv */ | 1245 | /* 0x0554: i2c_drive_scl */ |
| 1246 | 0xf40036b0, | ||
| 1247 | 0x07f1110b, | ||
| 1248 | 0x04b607e0, | ||
| 1249 | 0x0001d006, | ||
| 1250 | 0x00f804bd, | ||
| 1251 | /* 0x0568: i2c_drive_scl_lo */ | ||
| 1252 | 0x07e407f1, | ||
| 1253 | 0xd00604b6, | ||
| 1254 | 0x04bd0001, | ||
| 1255 | /* 0x0576: i2c_drive_sda */ | ||
| 1256 | 0x36b000f8, | ||
| 1257 | 0x110bf400, | ||
| 1258 | 0x07e007f1, | ||
| 1259 | 0xd00604b6, | ||
| 1260 | 0x04bd0002, | ||
| 1261 | /* 0x058a: i2c_drive_sda_lo */ | ||
| 1262 | 0x07f100f8, | ||
| 1263 | 0x04b607e4, | ||
| 1264 | 0x0002d006, | ||
| 1265 | 0x00f804bd, | ||
| 1266 | /* 0x0598: i2c_sense_scl */ | ||
| 1267 | 0xf10132f4, | ||
| 1268 | 0xb607c437, | ||
| 1269 | 0x33cf0634, | ||
| 1270 | 0x0431fd00, | ||
| 1271 | 0xf4060bf4, | ||
| 1272 | /* 0x05ae: i2c_sense_scl_done */ | ||
| 1273 | 0x00f80131, | ||
| 1274 | /* 0x05b0: i2c_sense_sda */ | ||
| 1275 | 0xf10132f4, | ||
| 1276 | 0xb607c437, | ||
| 1277 | 0x33cf0634, | ||
| 1278 | 0x0432fd00, | ||
| 1279 | 0xf4060bf4, | ||
| 1280 | /* 0x05c6: i2c_sense_sda_done */ | ||
| 1281 | 0x00f80131, | ||
| 1282 | /* 0x05c8: i2c_raise_scl */ | ||
| 1283 | 0x47f140f9, | ||
| 1284 | 0x37f00898, | ||
| 1285 | 0x5421f501, | ||
| 1286 | /* 0x05d5: i2c_raise_scl_wait */ | ||
| 1287 | 0xe8e7f105, | ||
| 1288 | 0x7f21f403, | ||
| 1289 | 0x059821f5, | ||
| 1290 | 0xb60901f4, | ||
| 1291 | 0x1bf40142, | ||
| 1292 | /* 0x05e9: i2c_raise_scl_done */ | ||
| 1293 | 0xf840fcef, | ||
| 1294 | /* 0x05ed: i2c_start */ | ||
| 1295 | 0x9821f500, | ||
| 1296 | 0x0d11f405, | ||
| 1297 | 0x05b021f5, | ||
| 1298 | 0xf40611f4, | ||
| 1299 | /* 0x05fe: i2c_start_rep */ | ||
| 1300 | 0x37f0300e, | ||
| 1301 | 0x5421f500, | ||
| 1302 | 0x0137f005, | ||
| 1303 | 0x057621f5, | ||
| 1304 | 0xb60076bb, | ||
| 1305 | 0x50f90465, | ||
| 1306 | 0xbb046594, | ||
| 1307 | 0x50bd0256, | ||
| 1308 | 0xfc0475fd, | ||
| 1309 | 0xc821f550, | ||
| 1310 | 0x0464b605, | ||
| 1311 | /* 0x062b: i2c_start_send */ | ||
| 1312 | 0xf01f11f4, | ||
| 1313 | 0x21f50037, | ||
| 1314 | 0xe7f10576, | ||
| 1315 | 0x21f41388, | ||
| 1316 | 0x0037f07f, | ||
| 1317 | 0x055421f5, | ||
| 1318 | 0x1388e7f1, | ||
| 1319 | /* 0x0647: i2c_start_out */ | ||
| 1320 | 0xf87f21f4, | ||
| 1321 | /* 0x0649: i2c_stop */ | ||
| 1322 | 0x0037f000, | ||
| 1323 | 0x055421f5, | ||
| 1324 | 0xf50037f0, | ||
| 1325 | 0xf1057621, | ||
| 1326 | 0xf403e8e7, | ||
| 1327 | 0x37f07f21, | ||
| 1328 | 0x5421f501, | ||
| 1329 | 0x88e7f105, | ||
| 1330 | 0x7f21f413, | ||
| 1331 | 0xf50137f0, | ||
| 1332 | 0xf1057621, | ||
| 1333 | 0xf41388e7, | ||
| 1334 | 0x00f87f21, | ||
| 1335 | /* 0x067c: i2c_bitw */ | ||
| 1336 | 0x057621f5, | ||
| 1337 | 0x03e8e7f1, | ||
| 1338 | 0xbb7f21f4, | ||
| 1339 | 0x65b60076, | ||
| 1340 | 0x9450f904, | ||
| 1341 | 0x56bb0465, | ||
| 1342 | 0xfd50bd02, | ||
| 1343 | 0x50fc0475, | ||
| 1344 | 0x05c821f5, | ||
| 1345 | 0xf40464b6, | ||
| 1346 | 0xe7f11811, | ||
| 1347 | 0x21f41388, | ||
| 1348 | 0x0037f07f, | ||
| 1349 | 0x055421f5, | ||
| 1350 | 0x1388e7f1, | ||
| 1351 | /* 0x06bb: i2c_bitw_out */ | ||
| 1352 | 0xf87f21f4, | ||
| 1353 | /* 0x06bd: i2c_bitr */ | ||
| 1354 | 0x0137f000, | ||
| 1355 | 0x057621f5, | ||
| 1356 | 0x03e8e7f1, | ||
| 1357 | 0xbb7f21f4, | ||
| 1358 | 0x65b60076, | ||
| 1359 | 0x9450f904, | ||
| 1360 | 0x56bb0465, | ||
| 1361 | 0xfd50bd02, | ||
| 1362 | 0x50fc0475, | ||
| 1363 | 0x05c821f5, | ||
| 1364 | 0xf40464b6, | ||
| 1365 | 0x21f51b11, | ||
| 1366 | 0x37f005b0, | ||
| 1367 | 0x5421f500, | ||
| 1368 | 0x88e7f105, | ||
| 1369 | 0x7f21f413, | ||
| 1370 | 0xf4013cf0, | ||
| 1371 | /* 0x0702: i2c_bitr_done */ | ||
| 1372 | 0x00f80131, | ||
| 1373 | /* 0x0704: i2c_get_byte */ | ||
| 1374 | 0xf00057f0, | ||
| 1375 | /* 0x070a: i2c_get_byte_next */ | ||
| 1376 | 0x54b60847, | ||
| 1377 | 0x0076bb01, | ||
| 1378 | 0xf90465b6, | ||
| 1379 | 0x04659450, | ||
| 1380 | 0xbd0256bb, | ||
| 1381 | 0x0475fd50, | ||
| 1382 | 0x21f550fc, | ||
| 1383 | 0x64b606bd, | ||
| 1384 | 0x2b11f404, | ||
| 1385 | 0xb60553fd, | ||
| 1386 | 0x1bf40142, | ||
| 1387 | 0x0137f0d8, | ||
| 1388 | 0xb60076bb, | ||
| 1389 | 0x50f90465, | ||
| 1390 | 0xbb046594, | ||
| 1391 | 0x50bd0256, | ||
| 1392 | 0xfc0475fd, | ||
| 1393 | 0x7c21f550, | ||
| 1394 | 0x0464b606, | ||
| 1395 | /* 0x0754: i2c_get_byte_done */ | ||
| 1396 | /* 0x0756: i2c_put_byte */ | ||
| 1397 | 0x47f000f8, | ||
| 1398 | /* 0x0759: i2c_put_byte_next */ | ||
| 1399 | 0x0142b608, | ||
| 1400 | 0xbb3854ff, | ||
| 1401 | 0x65b60076, | ||
| 1402 | 0x9450f904, | ||
| 1403 | 0x56bb0465, | ||
| 1404 | 0xfd50bd02, | ||
| 1405 | 0x50fc0475, | ||
| 1406 | 0x067c21f5, | ||
| 1407 | 0xf40464b6, | ||
| 1408 | 0x46b03411, | ||
| 1409 | 0xd81bf400, | ||
| 1410 | 0xb60076bb, | ||
| 1411 | 0x50f90465, | ||
| 1412 | 0xbb046594, | ||
| 1413 | 0x50bd0256, | ||
| 1414 | 0xfc0475fd, | ||
| 1415 | 0xbd21f550, | ||
| 1416 | 0x0464b606, | ||
| 1417 | 0xbb0f11f4, | ||
| 1418 | 0x36b00076, | ||
| 1419 | 0x061bf401, | ||
| 1420 | /* 0x07af: i2c_put_byte_done */ | ||
| 1421 | 0xf80132f4, | ||
| 1422 | /* 0x07b1: i2c_addr */ | ||
| 1423 | 0x0076bb00, | ||
| 1424 | 0xf90465b6, | ||
| 1425 | 0x04659450, | ||
| 1426 | 0xbd0256bb, | ||
| 1427 | 0x0475fd50, | ||
| 1428 | 0x21f550fc, | ||
| 1429 | 0x64b605ed, | ||
| 1430 | 0x2911f404, | ||
| 1431 | 0x012ec3e7, | ||
| 1432 | 0xfd0134b6, | ||
| 1433 | 0x76bb0553, | ||
| 1434 | 0x0465b600, | ||
| 1435 | 0x659450f9, | ||
| 1436 | 0x0256bb04, | ||
| 1437 | 0x75fd50bd, | ||
| 1438 | 0xf550fc04, | ||
| 1439 | 0xb6075621, | ||
| 1440 | /* 0x07f6: i2c_addr_done */ | ||
| 1441 | 0x00f80464, | ||
| 1442 | /* 0x07f8: i2c_acquire_addr */ | ||
| 1443 | 0xb6f8cec7, | ||
| 1444 | 0xe0b702e4, | ||
| 1445 | 0xee980bfc, | ||
| 1446 | /* 0x0807: i2c_acquire */ | ||
| 1447 | 0xf500f800, | ||
| 1448 | 0xf407f821, | ||
| 1449 | 0xd9f00421, | ||
| 1450 | 0x3f21f403, | ||
| 1451 | /* 0x0816: i2c_release */ | ||
| 1452 | 0x21f500f8, | ||
| 1453 | 0x21f407f8, | ||
| 1454 | 0x03daf004, | ||
| 1455 | 0xf83f21f4, | ||
| 1456 | /* 0x0825: i2c_recv */ | ||
| 1457 | 0x0132f400, | ||
| 1458 | 0xb6f8c1c7, | ||
| 1459 | 0x16b00214, | ||
| 1460 | 0x3a1ff528, | ||
| 1461 | 0xd413a001, | ||
| 1462 | 0x0032980b, | ||
| 1463 | 0x0bac13a0, | ||
| 1464 | 0xf4003198, | ||
| 1465 | 0xd0f90231, | ||
| 1466 | 0xd0f9e0f9, | ||
| 1467 | 0x000067f1, | ||
| 1468 | 0x100063f1, | ||
| 1469 | 0xbb016792, | ||
| 1470 | 0x65b60076, | ||
| 1471 | 0x9450f904, | ||
| 1472 | 0x56bb0465, | ||
| 1473 | 0xfd50bd02, | ||
| 1474 | 0x50fc0475, | ||
| 1475 | 0x080721f5, | ||
| 1476 | 0xfc0464b6, | ||
| 1477 | 0x00d6b0d0, | ||
| 1478 | 0x00b31bf5, | ||
| 1479 | 0xbb0057f0, | ||
| 1480 | 0x65b60076, | ||
| 1481 | 0x9450f904, | ||
| 1482 | 0x56bb0465, | ||
| 1483 | 0xfd50bd02, | ||
| 1484 | 0x50fc0475, | ||
| 1485 | 0x07b121f5, | ||
| 1486 | 0xf50464b6, | ||
| 1487 | 0xc700d011, | ||
| 1488 | 0x76bbe0c5, | ||
| 1489 | 0x0465b600, | ||
| 1490 | 0x659450f9, | ||
| 1491 | 0x0256bb04, | ||
| 1492 | 0x75fd50bd, | ||
| 1493 | 0xf550fc04, | ||
| 1494 | 0xb6075621, | ||
| 1495 | 0x11f50464, | ||
| 1496 | 0x57f000ad, | ||
| 1497 | 0x0076bb01, | ||
| 1498 | 0xf90465b6, | ||
| 1499 | 0x04659450, | ||
| 1500 | 0xbd0256bb, | ||
| 1501 | 0x0475fd50, | ||
| 1502 | 0x21f550fc, | ||
| 1503 | 0x64b607b1, | ||
| 1504 | 0x8a11f504, | ||
| 1505 | 0x0076bb00, | ||
| 1506 | 0xf90465b6, | ||
| 1507 | 0x04659450, | ||
| 1508 | 0xbd0256bb, | ||
| 1509 | 0x0475fd50, | ||
| 1510 | 0x21f550fc, | ||
| 1511 | 0x64b60704, | ||
| 1512 | 0x6a11f404, | ||
| 1513 | 0xbbe05bcb, | ||
| 1514 | 0x65b60076, | ||
| 1515 | 0x9450f904, | ||
| 1516 | 0x56bb0465, | ||
| 1517 | 0xfd50bd02, | ||
| 1518 | 0x50fc0475, | ||
| 1519 | 0x064921f5, | ||
| 1520 | 0xb90464b6, | ||
| 1521 | 0x74bd025b, | ||
| 1522 | /* 0x092b: i2c_recv_not_rd08 */ | ||
| 1523 | 0xb0430ef4, | ||
| 1524 | 0x1bf401d6, | ||
| 1525 | 0x0057f03d, | ||
| 1526 | 0x07b121f5, | ||
| 1527 | 0xc73311f4, | ||
| 1528 | 0x21f5e0c5, | ||
| 1529 | 0x11f40756, | ||
| 1530 | 0x0057f029, | ||
| 1531 | 0x07b121f5, | ||
| 1532 | 0xc71f11f4, | ||
| 1533 | 0x21f5e0b5, | ||
| 1534 | 0x11f40756, | ||
| 1535 | 0x4921f515, | ||
| 1536 | 0xc774bd06, | ||
| 1537 | 0x1bf408c5, | ||
| 1538 | 0x0232f409, | ||
| 1539 | /* 0x096b: i2c_recv_not_wr08 */ | ||
| 1540 | /* 0x096b: i2c_recv_done */ | ||
| 1541 | 0xc7030ef4, | ||
| 1542 | 0x21f5f8ce, | ||
| 1543 | 0xe0fc0816, | ||
| 1544 | 0x12f4d0fc, | ||
| 1545 | 0x027cb90a, | ||
| 1546 | 0x02b921f5, | ||
| 1547 | /* 0x0980: i2c_recv_exit */ | ||
| 1548 | /* 0x0982: i2c_init */ | ||
| 1549 | 0x00f800f8, | ||
| 1550 | /* 0x0984: test_recv */ | ||
| 1179 | 0x05d817f1, | 1551 | 0x05d817f1, |
| 1180 | 0xcf0614b6, | 1552 | 0xcf0614b6, |
| 1181 | 0x10b60011, | 1553 | 0x10b60011, |
| @@ -1185,12 +1557,12 @@ uint32_t nva3_pwr_code[] = { | |||
| 1185 | 0x00e7f104, | 1557 | 0x00e7f104, |
| 1186 | 0x4fe3f1d9, | 1558 | 0x4fe3f1d9, |
| 1187 | 0xf521f513, | 1559 | 0xf521f513, |
| 1188 | /* 0x057b: test_init */ | 1560 | /* 0x09ab: test_init */ |
| 1189 | 0xf100f801, | 1561 | 0xf100f801, |
| 1190 | 0xf50800e7, | 1562 | 0xf50800e7, |
| 1191 | 0xf801f521, | 1563 | 0xf801f521, |
| 1192 | /* 0x0585: idle_recv */ | 1564 | /* 0x09b5: idle_recv */ |
| 1193 | /* 0x0587: idle */ | 1565 | /* 0x09b7: idle */ |
| 1194 | 0xf400f800, | 1566 | 0xf400f800, |
| 1195 | 0x17f10031, | 1567 | 0x17f10031, |
| 1196 | 0x14b605d4, | 1568 | 0x14b605d4, |
| @@ -1198,32 +1570,20 @@ uint32_t nva3_pwr_code[] = { | |||
| 1198 | 0xf10110b6, | 1570 | 0xf10110b6, |
| 1199 | 0xb605d407, | 1571 | 0xb605d407, |
| 1200 | 0x01d00604, | 1572 | 0x01d00604, |
| 1201 | /* 0x05a3: idle_loop */ | 1573 | /* 0x09d3: idle_loop */ |
| 1202 | 0xf004bd00, | 1574 | 0xf004bd00, |
| 1203 | 0x32f45817, | 1575 | 0x32f45817, |
| 1204 | /* 0x05a9: idle_proc */ | 1576 | /* 0x09d9: idle_proc */ |
| 1205 | /* 0x05a9: idle_proc_exec */ | 1577 | /* 0x09d9: idle_proc_exec */ |
| 1206 | 0xb910f902, | 1578 | 0xb910f902, |
| 1207 | 0x21f5021e, | 1579 | 0x21f5021e, |
| 1208 | 0x10fc02c2, | 1580 | 0x10fc02c2, |
| 1209 | 0xf40911f4, | 1581 | 0xf40911f4, |
| 1210 | 0x0ef40231, | 1582 | 0x0ef40231, |
| 1211 | /* 0x05bd: idle_proc_next */ | 1583 | /* 0x09ed: idle_proc_next */ |
| 1212 | 0x5810b6ef, | 1584 | 0x5810b6ef, |
| 1213 | 0xf4061fb8, | 1585 | 0xf4061fb8, |
| 1214 | 0x02f4e61b, | 1586 | 0x02f4e61b, |
| 1215 | 0x0028f4dd, | 1587 | 0x0028f4dd, |
| 1216 | 0x00bb0ef4, | 1588 | 0x00bb0ef4, |
| 1217 | 0x00000000, | ||
| 1218 | 0x00000000, | ||
| 1219 | 0x00000000, | ||
| 1220 | 0x00000000, | ||
| 1221 | 0x00000000, | ||
| 1222 | 0x00000000, | ||
| 1223 | 0x00000000, | ||
| 1224 | 0x00000000, | ||
| 1225 | 0x00000000, | ||
| 1226 | 0x00000000, | ||
| 1227 | 0x00000000, | ||
| 1228 | 0x00000000, | ||
| 1229 | }; | 1589 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc index eaa64da68e36..48f79434a449 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "host.fuc" | 37 | #include "host.fuc" |
| 38 | #include "memx.fuc" | 38 | #include "memx.fuc" |
| 39 | #include "perf.fuc" | 39 | #include "perf.fuc" |
| 40 | #include "i2c_.fuc" | ||
| 40 | #include "test.fuc" | 41 | #include "test.fuc" |
| 41 | #include "idle.fuc" | 42 | #include "idle.fuc" |
| 42 | #undef INCLUDE_PROC | 43 | #undef INCLUDE_PROC |
| @@ -46,6 +47,7 @@ | |||
| 46 | #include "host.fuc" | 47 | #include "host.fuc" |
| 47 | #include "memx.fuc" | 48 | #include "memx.fuc" |
| 48 | #include "perf.fuc" | 49 | #include "perf.fuc" |
| 50 | #include "i2c_.fuc" | ||
| 49 | #include "test.fuc" | 51 | #include "test.fuc" |
| 50 | #include "idle.fuc" | 52 | #include "idle.fuc" |
| 51 | #undef INCLUDE_DATA | 53 | #undef INCLUDE_DATA |
| @@ -57,6 +59,7 @@ | |||
| 57 | #include "host.fuc" | 59 | #include "host.fuc" |
| 58 | #include "memx.fuc" | 60 | #include "memx.fuc" |
| 59 | #include "perf.fuc" | 61 | #include "perf.fuc" |
| 62 | #include "i2c_.fuc" | ||
| 60 | #include "test.fuc" | 63 | #include "test.fuc" |
| 61 | #include "idle.fuc" | 64 | #include "idle.fuc" |
| 62 | #undef INCLUDE_CODE | 65 | #undef INCLUDE_CODE |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h index 82c8e8b88917..4dba00d2dd1a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h | |||
| @@ -89,9 +89,31 @@ uint32_t nvc0_pwr_data[] = { | |||
| 89 | 0x00000000, | 89 | 0x00000000, |
| 90 | 0x00000000, | 90 | 0x00000000, |
| 91 | 0x00000000, | 91 | 0x00000000, |
| 92 | 0x5f433249, | ||
| 93 | 0x00000982, | ||
| 94 | 0x00000825, | ||
| 95 | 0x00000000, | ||
| 96 | 0x00000000, | ||
| 97 | 0x00000000, | ||
| 98 | 0x00000000, | ||
| 99 | 0x00000000, | ||
| 100 | 0x00000000, | ||
| 101 | 0x00000000, | ||
| 102 | 0x00000000, | ||
| 103 | 0x00000000, | ||
| 104 | 0x00000000, | ||
| 105 | 0x00000000, | ||
| 106 | 0x00000000, | ||
| 107 | 0x00000000, | ||
| 108 | 0x00000000, | ||
| 109 | 0x00000000, | ||
| 110 | 0x00000000, | ||
| 111 | 0x00000000, | ||
| 112 | 0x00000000, | ||
| 113 | 0x00000000, | ||
| 92 | 0x54534554, | 114 | 0x54534554, |
| 93 | 0x0000057b, | 115 | 0x000009ab, |
| 94 | 0x00000554, | 116 | 0x00000984, |
| 95 | 0x00000000, | 117 | 0x00000000, |
| 96 | 0x00000000, | 118 | 0x00000000, |
| 97 | 0x00000000, | 119 | 0x00000000, |
| @@ -112,8 +134,8 @@ uint32_t nvc0_pwr_data[] = { | |||
| 112 | 0x00000000, | 134 | 0x00000000, |
| 113 | 0x00000000, | 135 | 0x00000000, |
| 114 | 0x454c4449, | 136 | 0x454c4449, |
| 115 | 0x00000587, | 137 | 0x000009b7, |
| 116 | 0x00000585, | 138 | 0x000009b5, |
| 117 | 0x00000000, | 139 | 0x00000000, |
| 118 | 0x00000000, | 140 | 0x00000000, |
| 119 | 0x00000000, | 141 | 0x00000000, |
| @@ -133,12 +155,12 @@ uint32_t nvc0_pwr_data[] = { | |||
| 133 | 0x00000000, | 155 | 0x00000000, |
| 134 | 0x00000000, | 156 | 0x00000000, |
| 135 | 0x00000000, | 157 | 0x00000000, |
| 136 | /* 0x0210: proc_list_tail */ | 158 | /* 0x0268: proc_list_tail */ |
| 137 | /* 0x0210: time_prev */ | 159 | /* 0x0268: time_prev */ |
| 138 | 0x00000000, | 160 | 0x00000000, |
| 139 | /* 0x0214: time_next */ | 161 | /* 0x026c: time_next */ |
| 140 | 0x00000000, | 162 | 0x00000000, |
| 141 | /* 0x0218: fifo_queue */ | 163 | /* 0x0270: fifo_queue */ |
| 142 | 0x00000000, | 164 | 0x00000000, |
| 143 | 0x00000000, | 165 | 0x00000000, |
| 144 | 0x00000000, | 166 | 0x00000000, |
| @@ -171,7 +193,7 @@ uint32_t nvc0_pwr_data[] = { | |||
| 171 | 0x00000000, | 193 | 0x00000000, |
| 172 | 0x00000000, | 194 | 0x00000000, |
| 173 | 0x00000000, | 195 | 0x00000000, |
| 174 | /* 0x0298: rfifo_queue */ | 196 | /* 0x02f0: rfifo_queue */ |
| 175 | 0x00000000, | 197 | 0x00000000, |
| 176 | 0x00000000, | 198 | 0x00000000, |
| 177 | 0x00000000, | 199 | 0x00000000, |
| @@ -204,11 +226,11 @@ uint32_t nvc0_pwr_data[] = { | |||
| 204 | 0x00000000, | 226 | 0x00000000, |
| 205 | 0x00000000, | 227 | 0x00000000, |
| 206 | 0x00000000, | 228 | 0x00000000, |
| 207 | /* 0x0318: memx_func_head */ | 229 | /* 0x0370: memx_func_head */ |
| 208 | 0x00010000, | 230 | 0x00010000, |
| 209 | 0x00000000, | 231 | 0x00000000, |
| 210 | 0x0000046f, | 232 | 0x0000046f, |
| 211 | /* 0x0324: memx_func_next */ | 233 | /* 0x037c: memx_func_next */ |
| 212 | 0x00000001, | 234 | 0x00000001, |
| 213 | 0x00000000, | 235 | 0x00000000, |
| 214 | 0x00000496, | 236 | 0x00000496, |
| @@ -221,8 +243,18 @@ uint32_t nvc0_pwr_data[] = { | |||
| 221 | 0x00010004, | 243 | 0x00010004, |
| 222 | 0x00000000, | 244 | 0x00000000, |
| 223 | 0x000004fc, | 245 | 0x000004fc, |
| 224 | /* 0x0354: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
| 225 | /* 0x0354: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
| 248 | 0x00000000, | ||
| 249 | 0x00000000, | ||
| 250 | 0x00000000, | ||
| 251 | 0x00000000, | ||
| 252 | 0x00000000, | ||
| 253 | 0x00000000, | ||
| 254 | 0x00000000, | ||
| 255 | 0x00000000, | ||
| 256 | 0x00000000, | ||
| 257 | 0x00000000, | ||
| 226 | 0x00000000, | 258 | 0x00000000, |
| 227 | 0x00000000, | 259 | 0x00000000, |
| 228 | 0x00000000, | 260 | 0x00000000, |
| @@ -725,6 +757,42 @@ uint32_t nvc0_pwr_data[] = { | |||
| 725 | 0x00000000, | 757 | 0x00000000, |
| 726 | 0x00000000, | 758 | 0x00000000, |
| 727 | 0x00000000, | 759 | 0x00000000, |
| 760 | /* 0x0bac: memx_data_tail */ | ||
| 761 | /* 0x0bac: i2c_scl_map */ | ||
| 762 | 0x00001000, | ||
| 763 | 0x00004000, | ||
| 764 | 0x00010000, | ||
| 765 | 0x00000100, | ||
| 766 | 0x00040000, | ||
| 767 | 0x00100000, | ||
| 768 | 0x00400000, | ||
| 769 | 0x01000000, | ||
| 770 | 0x04000000, | ||
| 771 | 0x10000000, | ||
| 772 | /* 0x0bd4: i2c_sda_map */ | ||
| 773 | 0x00002000, | ||
| 774 | 0x00008000, | ||
| 775 | 0x00020000, | ||
| 776 | 0x00000200, | ||
| 777 | 0x00080000, | ||
| 778 | 0x00200000, | ||
| 779 | 0x00800000, | ||
| 780 | 0x02000000, | ||
| 781 | 0x08000000, | ||
| 782 | 0x20000000, | ||
| 783 | /* 0x0bfc: i2c_ctrl */ | ||
| 784 | 0x0000e138, | ||
| 785 | 0x0000e150, | ||
| 786 | 0x0000e168, | ||
| 787 | 0x0000e180, | ||
| 788 | 0x0000e254, | ||
| 789 | 0x0000e274, | ||
| 790 | 0x0000e764, | ||
| 791 | 0x0000e780, | ||
| 792 | 0x0000e79c, | ||
| 793 | 0x0000e7b8, | ||
| 794 | 0x00000000, | ||
| 795 | 0x00000000, | ||
| 728 | 0x00000000, | 796 | 0x00000000, |
| 729 | 0x00000000, | 797 | 0x00000000, |
| 730 | 0x00000000, | 798 | 0x00000000, |
| @@ -735,7 +803,6 @@ uint32_t nvc0_pwr_data[] = { | |||
| 735 | 0x00000000, | 803 | 0x00000000, |
| 736 | 0x00000000, | 804 | 0x00000000, |
| 737 | 0x00000000, | 805 | 0x00000000, |
| 738 | /* 0x0b54: memx_data_tail */ | ||
| 739 | 0x00000000, | 806 | 0x00000000, |
| 740 | 0x00000000, | 807 | 0x00000000, |
| 741 | 0x00000000, | 808 | 0x00000000, |
| @@ -787,15 +854,15 @@ uint32_t nvc0_pwr_code[] = { | |||
| 787 | 0x07a007f1, | 854 | 0x07a007f1, |
| 788 | 0xd00604b6, | 855 | 0xd00604b6, |
| 789 | 0x04bd000e, | 856 | 0x04bd000e, |
| 790 | 0xf001e7f0, | 857 | 0xf001d7f0, |
| 791 | 0x07f101e3, | 858 | 0x07f101d3, |
| 792 | 0x04b607ac, | 859 | 0x04b607ac, |
| 793 | 0x000ed006, | 860 | 0x000dd006, |
| 794 | /* 0x0022: rd32_wait */ | 861 | /* 0x0022: rd32_wait */ |
| 795 | 0xe7f104bd, | 862 | 0xd7f104bd, |
| 796 | 0xe4b607ac, | 863 | 0xd4b607ac, |
| 797 | 0x00eecf06, | 864 | 0x00ddcf06, |
| 798 | 0x7000e4f1, | 865 | 0x7000d4f1, |
| 799 | 0xf1f21bf4, | 866 | 0xf1f21bf4, |
| 800 | 0xb607a4d7, | 867 | 0xb607a4d7, |
| 801 | 0xddcf06d4, | 868 | 0xddcf06d4, |
| @@ -807,15 +874,15 @@ uint32_t nvc0_pwr_code[] = { | |||
| 807 | 0xb607a407, | 874 | 0xb607a407, |
| 808 | 0x0dd00604, | 875 | 0x0dd00604, |
| 809 | 0xf004bd00, | 876 | 0xf004bd00, |
| 810 | 0xe5f002e7, | 877 | 0xd5f002d7, |
| 811 | 0x01e3f0f0, | 878 | 0x01d3f0f0, |
| 812 | 0x07ac07f1, | 879 | 0x07ac07f1, |
| 813 | 0xd00604b6, | 880 | 0xd00604b6, |
| 814 | 0x04bd000e, | 881 | 0x04bd000d, |
| 815 | /* 0x006c: wr32_wait */ | 882 | /* 0x006c: wr32_wait */ |
| 816 | 0x07ace7f1, | 883 | 0x07acd7f1, |
| 817 | 0xcf06e4b6, | 884 | 0xcf06d4b6, |
| 818 | 0xe4f100ee, | 885 | 0xd4f100dd, |
| 819 | 0x1bf47000, | 886 | 0x1bf47000, |
| 820 | /* 0x007f: nsec */ | 887 | /* 0x007f: nsec */ |
| 821 | 0xf000f8f2, | 888 | 0xf000f8f2, |
| @@ -845,21 +912,21 @@ uint32_t nvc0_pwr_code[] = { | |||
| 845 | 0x9800f8df, | 912 | 0x9800f8df, |
| 846 | 0x96b003e9, | 913 | 0x96b003e9, |
| 847 | 0x2a0bf400, | 914 | 0x2a0bf400, |
| 848 | 0xbb840a98, | 915 | 0xbb9a0a98, |
| 849 | 0x1cf4029a, | 916 | 0x1cf4029a, |
| 850 | 0x01d7f00f, | 917 | 0x01d7f00f, |
| 851 | 0x025421f5, | 918 | 0x025421f5, |
| 852 | 0x0ef494bd, | 919 | 0x0ef494bd, |
| 853 | /* 0x00e9: intr_watchdog_next_time */ | 920 | /* 0x00e9: intr_watchdog_next_time */ |
| 854 | 0x850a9815, | 921 | 0x9b0a9815, |
| 855 | 0xf400a6b0, | 922 | 0xf400a6b0, |
| 856 | 0x9ab8090b, | 923 | 0x9ab8090b, |
| 857 | 0x061cf406, | 924 | 0x061cf406, |
| 858 | /* 0x00f8: intr_watchdog_next_time_set */ | 925 | /* 0x00f8: intr_watchdog_next_time_set */ |
| 859 | /* 0x00fb: intr_watchdog_next_proc */ | 926 | /* 0x00fb: intr_watchdog_next_proc */ |
| 860 | 0x80850980, | 927 | 0x809b0980, |
| 861 | 0xe0b603e9, | 928 | 0xe0b603e9, |
| 862 | 0x10e6b158, | 929 | 0x68e6b158, |
| 863 | 0xc61bf402, | 930 | 0xc61bf402, |
| 864 | /* 0x010a: intr */ | 931 | /* 0x010a: intr */ |
| 865 | 0x00f900f8, | 932 | 0x00f900f8, |
| @@ -880,15 +947,15 @@ uint32_t nvc0_pwr_code[] = { | |||
| 880 | 0x0088cf06, | 947 | 0x0088cf06, |
| 881 | 0xf40289c4, | 948 | 0xf40289c4, |
| 882 | 0x0080230b, | 949 | 0x0080230b, |
| 883 | 0x58e7f085, | 950 | 0x58e7f09b, |
| 884 | 0x98cb21f4, | 951 | 0x98cb21f4, |
| 885 | 0x96b08509, | 952 | 0x96b09b09, |
| 886 | 0x110bf400, | 953 | 0x110bf400, |
| 887 | 0xb63407f0, | 954 | 0xb63407f0, |
| 888 | 0x09d00604, | 955 | 0x09d00604, |
| 889 | 0x8004bd00, | 956 | 0x8004bd00, |
| 890 | /* 0x016e: intr_skip_watchdog */ | 957 | /* 0x016e: intr_skip_watchdog */ |
| 891 | 0x89e48409, | 958 | 0x89e49a09, |
| 892 | 0x0bf40800, | 959 | 0x0bf40800, |
| 893 | 0x8897f148, | 960 | 0x8897f148, |
| 894 | 0x0694b606, | 961 | 0x0694b606, |
| @@ -948,7 +1015,7 @@ uint32_t nvc0_pwr_code[] = { | |||
| 948 | 0x000ed006, | 1015 | 0x000ed006, |
| 949 | 0x0e8004bd, | 1016 | 0x0e8004bd, |
| 950 | /* 0x0241: timer_enable */ | 1017 | /* 0x0241: timer_enable */ |
| 951 | 0x0187f084, | 1018 | 0x0187f09a, |
| 952 | 0xb63807f0, | 1019 | 0xb63807f0, |
| 953 | 0x08d00604, | 1020 | 0x08d00604, |
| 954 | /* 0x024f: timer_done */ | 1021 | /* 0x024f: timer_done */ |
| @@ -979,7 +1046,7 @@ uint32_t nvc0_pwr_code[] = { | |||
| 979 | 0xb8008a98, | 1046 | 0xb8008a98, |
| 980 | 0x0bf406ae, | 1047 | 0x0bf406ae, |
| 981 | 0x5880b610, | 1048 | 0x5880b610, |
| 982 | 0x021086b1, | 1049 | 0x026886b1, |
| 983 | 0xf4f01bf4, | 1050 | 0xf4f01bf4, |
| 984 | /* 0x02b2: find_done */ | 1051 | /* 0x02b2: find_done */ |
| 985 | 0x8eb90132, | 1052 | 0x8eb90132, |
| @@ -1049,7 +1116,7 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1049 | 0x320bf406, | 1116 | 0x320bf406, |
| 1050 | 0x94071ec4, | 1117 | 0x94071ec4, |
| 1051 | 0xe0b704ee, | 1118 | 0xe0b704ee, |
| 1052 | 0xeb980218, | 1119 | 0xeb980270, |
| 1053 | 0x02ec9803, | 1120 | 0x02ec9803, |
| 1054 | 0x9801ed98, | 1121 | 0x9801ed98, |
| 1055 | 0x21f500ee, | 1122 | 0x21f500ee, |
| @@ -1075,7 +1142,7 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1075 | 0xe60bf406, | 1142 | 0xe60bf406, |
| 1076 | 0xb60723c4, | 1143 | 0xb60723c4, |
| 1077 | 0x30b70434, | 1144 | 0x30b70434, |
| 1078 | 0x3b800298, | 1145 | 0x3b8002f0, |
| 1079 | 0x023c8003, | 1146 | 0x023c8003, |
| 1080 | 0x80013d80, | 1147 | 0x80013d80, |
| 1081 | 0x20b6003e, | 1148 | 0x20b6003e, |
| @@ -1090,13 +1157,13 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1090 | /* 0x0430: host_init */ | 1157 | /* 0x0430: host_init */ |
| 1091 | 0x008017f1, | 1158 | 0x008017f1, |
| 1092 | 0xf11014b6, | 1159 | 0xf11014b6, |
| 1093 | 0xf1021815, | 1160 | 0xf1027015, |
| 1094 | 0xb604d007, | 1161 | 0xb604d007, |
| 1095 | 0x01d00604, | 1162 | 0x01d00604, |
| 1096 | 0xf104bd00, | 1163 | 0xf104bd00, |
| 1097 | 0xb6008017, | 1164 | 0xb6008017, |
| 1098 | 0x15f11014, | 1165 | 0x15f11014, |
| 1099 | 0x07f10298, | 1166 | 0x07f102f0, |
| 1100 | 0x04b604dc, | 1167 | 0x04b604dc, |
| 1101 | 0x0001d006, | 1168 | 0x0001d006, |
| 1102 | 0x17f004bd, | 1169 | 0x17f004bd, |
| @@ -1156,14 +1223,14 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1156 | 0x00139802, | 1223 | 0x00139802, |
| 1157 | 0x950410b6, | 1224 | 0x950410b6, |
| 1158 | 0x30f01034, | 1225 | 0x30f01034, |
| 1159 | 0xc835980c, | 1226 | 0xde35980c, |
| 1160 | 0x12b855f9, | 1227 | 0x12b855f9, |
| 1161 | 0xec1ef406, | 1228 | 0xec1ef406, |
| 1162 | 0xe0fcd0fc, | 1229 | 0xe0fcd0fc, |
| 1163 | 0x02b921f5, | 1230 | 0x02b921f5, |
| 1164 | /* 0x0532: memx_info */ | 1231 | /* 0x0532: memx_info */ |
| 1165 | 0xc7f100f8, | 1232 | 0xc7f100f8, |
| 1166 | 0xb7f10354, | 1233 | 0xb7f103ac, |
| 1167 | 0x21f50800, | 1234 | 0x21f50800, |
| 1168 | 0x00f802b9, | 1235 | 0x00f802b9, |
| 1169 | /* 0x0540: memx_recv */ | 1236 | /* 0x0540: memx_recv */ |
| @@ -1175,7 +1242,312 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1175 | /* 0x0550: perf_recv */ | 1242 | /* 0x0550: perf_recv */ |
| 1176 | /* 0x0552: perf_init */ | 1243 | /* 0x0552: perf_init */ |
| 1177 | 0x00f800f8, | 1244 | 0x00f800f8, |
| 1178 | /* 0x0554: test_recv */ | 1245 | /* 0x0554: i2c_drive_scl */ |
| 1246 | 0xf40036b0, | ||
| 1247 | 0x07f1110b, | ||
| 1248 | 0x04b607e0, | ||
| 1249 | 0x0001d006, | ||
| 1250 | 0x00f804bd, | ||
| 1251 | /* 0x0568: i2c_drive_scl_lo */ | ||
| 1252 | 0x07e407f1, | ||
| 1253 | 0xd00604b6, | ||
| 1254 | 0x04bd0001, | ||
| 1255 | /* 0x0576: i2c_drive_sda */ | ||
| 1256 | 0x36b000f8, | ||
| 1257 | 0x110bf400, | ||
| 1258 | 0x07e007f1, | ||
| 1259 | 0xd00604b6, | ||
| 1260 | 0x04bd0002, | ||
| 1261 | /* 0x058a: i2c_drive_sda_lo */ | ||
| 1262 | 0x07f100f8, | ||
| 1263 | 0x04b607e4, | ||
| 1264 | 0x0002d006, | ||
| 1265 | 0x00f804bd, | ||
| 1266 | /* 0x0598: i2c_sense_scl */ | ||
| 1267 | 0xf10132f4, | ||
| 1268 | 0xb607c437, | ||
| 1269 | 0x33cf0634, | ||
| 1270 | 0x0431fd00, | ||
| 1271 | 0xf4060bf4, | ||
| 1272 | /* 0x05ae: i2c_sense_scl_done */ | ||
| 1273 | 0x00f80131, | ||
| 1274 | /* 0x05b0: i2c_sense_sda */ | ||
| 1275 | 0xf10132f4, | ||
| 1276 | 0xb607c437, | ||
| 1277 | 0x33cf0634, | ||
| 1278 | 0x0432fd00, | ||
| 1279 | 0xf4060bf4, | ||
| 1280 | /* 0x05c6: i2c_sense_sda_done */ | ||
| 1281 | 0x00f80131, | ||
| 1282 | /* 0x05c8: i2c_raise_scl */ | ||
| 1283 | 0x47f140f9, | ||
| 1284 | 0x37f00898, | ||
| 1285 | 0x5421f501, | ||
| 1286 | /* 0x05d5: i2c_raise_scl_wait */ | ||
| 1287 | 0xe8e7f105, | ||
| 1288 | 0x7f21f403, | ||
| 1289 | 0x059821f5, | ||
| 1290 | 0xb60901f4, | ||
| 1291 | 0x1bf40142, | ||
| 1292 | /* 0x05e9: i2c_raise_scl_done */ | ||
| 1293 | 0xf840fcef, | ||
| 1294 | /* 0x05ed: i2c_start */ | ||
| 1295 | 0x9821f500, | ||
| 1296 | 0x0d11f405, | ||
| 1297 | 0x05b021f5, | ||
| 1298 | 0xf40611f4, | ||
| 1299 | /* 0x05fe: i2c_start_rep */ | ||
| 1300 | 0x37f0300e, | ||
| 1301 | 0x5421f500, | ||
| 1302 | 0x0137f005, | ||
| 1303 | 0x057621f5, | ||
| 1304 | 0xb60076bb, | ||
| 1305 | 0x50f90465, | ||
| 1306 | 0xbb046594, | ||
| 1307 | 0x50bd0256, | ||
| 1308 | 0xfc0475fd, | ||
| 1309 | 0xc821f550, | ||
| 1310 | 0x0464b605, | ||
| 1311 | /* 0x062b: i2c_start_send */ | ||
| 1312 | 0xf01f11f4, | ||
| 1313 | 0x21f50037, | ||
| 1314 | 0xe7f10576, | ||
| 1315 | 0x21f41388, | ||
| 1316 | 0x0037f07f, | ||
| 1317 | 0x055421f5, | ||
| 1318 | 0x1388e7f1, | ||
| 1319 | /* 0x0647: i2c_start_out */ | ||
| 1320 | 0xf87f21f4, | ||
| 1321 | /* 0x0649: i2c_stop */ | ||
| 1322 | 0x0037f000, | ||
| 1323 | 0x055421f5, | ||
| 1324 | 0xf50037f0, | ||
| 1325 | 0xf1057621, | ||
| 1326 | 0xf403e8e7, | ||
| 1327 | 0x37f07f21, | ||
| 1328 | 0x5421f501, | ||
| 1329 | 0x88e7f105, | ||
| 1330 | 0x7f21f413, | ||
| 1331 | 0xf50137f0, | ||
| 1332 | 0xf1057621, | ||
| 1333 | 0xf41388e7, | ||
| 1334 | 0x00f87f21, | ||
| 1335 | /* 0x067c: i2c_bitw */ | ||
| 1336 | 0x057621f5, | ||
| 1337 | 0x03e8e7f1, | ||
| 1338 | 0xbb7f21f4, | ||
| 1339 | 0x65b60076, | ||
| 1340 | 0x9450f904, | ||
| 1341 | 0x56bb0465, | ||
| 1342 | 0xfd50bd02, | ||
| 1343 | 0x50fc0475, | ||
| 1344 | 0x05c821f5, | ||
| 1345 | 0xf40464b6, | ||
| 1346 | 0xe7f11811, | ||
| 1347 | 0x21f41388, | ||
| 1348 | 0x0037f07f, | ||
| 1349 | 0x055421f5, | ||
| 1350 | 0x1388e7f1, | ||
| 1351 | /* 0x06bb: i2c_bitw_out */ | ||
| 1352 | 0xf87f21f4, | ||
| 1353 | /* 0x06bd: i2c_bitr */ | ||
| 1354 | 0x0137f000, | ||
| 1355 | 0x057621f5, | ||
| 1356 | 0x03e8e7f1, | ||
| 1357 | 0xbb7f21f4, | ||
| 1358 | 0x65b60076, | ||
| 1359 | 0x9450f904, | ||
| 1360 | 0x56bb0465, | ||
| 1361 | 0xfd50bd02, | ||
| 1362 | 0x50fc0475, | ||
| 1363 | 0x05c821f5, | ||
| 1364 | 0xf40464b6, | ||
| 1365 | 0x21f51b11, | ||
| 1366 | 0x37f005b0, | ||
| 1367 | 0x5421f500, | ||
| 1368 | 0x88e7f105, | ||
| 1369 | 0x7f21f413, | ||
| 1370 | 0xf4013cf0, | ||
| 1371 | /* 0x0702: i2c_bitr_done */ | ||
| 1372 | 0x00f80131, | ||
| 1373 | /* 0x0704: i2c_get_byte */ | ||
| 1374 | 0xf00057f0, | ||
| 1375 | /* 0x070a: i2c_get_byte_next */ | ||
| 1376 | 0x54b60847, | ||
| 1377 | 0x0076bb01, | ||
| 1378 | 0xf90465b6, | ||
| 1379 | 0x04659450, | ||
| 1380 | 0xbd0256bb, | ||
| 1381 | 0x0475fd50, | ||
| 1382 | 0x21f550fc, | ||
| 1383 | 0x64b606bd, | ||
| 1384 | 0x2b11f404, | ||
| 1385 | 0xb60553fd, | ||
| 1386 | 0x1bf40142, | ||
| 1387 | 0x0137f0d8, | ||
| 1388 | 0xb60076bb, | ||
| 1389 | 0x50f90465, | ||
| 1390 | 0xbb046594, | ||
| 1391 | 0x50bd0256, | ||
| 1392 | 0xfc0475fd, | ||
| 1393 | 0x7c21f550, | ||
| 1394 | 0x0464b606, | ||
| 1395 | /* 0x0754: i2c_get_byte_done */ | ||
| 1396 | /* 0x0756: i2c_put_byte */ | ||
| 1397 | 0x47f000f8, | ||
| 1398 | /* 0x0759: i2c_put_byte_next */ | ||
| 1399 | 0x0142b608, | ||
| 1400 | 0xbb3854ff, | ||
| 1401 | 0x65b60076, | ||
| 1402 | 0x9450f904, | ||
| 1403 | 0x56bb0465, | ||
| 1404 | 0xfd50bd02, | ||
| 1405 | 0x50fc0475, | ||
| 1406 | 0x067c21f5, | ||
| 1407 | 0xf40464b6, | ||
| 1408 | 0x46b03411, | ||
| 1409 | 0xd81bf400, | ||
| 1410 | 0xb60076bb, | ||
| 1411 | 0x50f90465, | ||
| 1412 | 0xbb046594, | ||
| 1413 | 0x50bd0256, | ||
| 1414 | 0xfc0475fd, | ||
| 1415 | 0xbd21f550, | ||
| 1416 | 0x0464b606, | ||
| 1417 | 0xbb0f11f4, | ||
| 1418 | 0x36b00076, | ||
| 1419 | 0x061bf401, | ||
| 1420 | /* 0x07af: i2c_put_byte_done */ | ||
| 1421 | 0xf80132f4, | ||
| 1422 | /* 0x07b1: i2c_addr */ | ||
| 1423 | 0x0076bb00, | ||
| 1424 | 0xf90465b6, | ||
| 1425 | 0x04659450, | ||
| 1426 | 0xbd0256bb, | ||
| 1427 | 0x0475fd50, | ||
| 1428 | 0x21f550fc, | ||
| 1429 | 0x64b605ed, | ||
| 1430 | 0x2911f404, | ||
| 1431 | 0x012ec3e7, | ||
| 1432 | 0xfd0134b6, | ||
| 1433 | 0x76bb0553, | ||
| 1434 | 0x0465b600, | ||
| 1435 | 0x659450f9, | ||
| 1436 | 0x0256bb04, | ||
| 1437 | 0x75fd50bd, | ||
| 1438 | 0xf550fc04, | ||
| 1439 | 0xb6075621, | ||
| 1440 | /* 0x07f6: i2c_addr_done */ | ||
| 1441 | 0x00f80464, | ||
| 1442 | /* 0x07f8: i2c_acquire_addr */ | ||
| 1443 | 0xb6f8cec7, | ||
| 1444 | 0xe0b702e4, | ||
| 1445 | 0xee980bfc, | ||
| 1446 | /* 0x0807: i2c_acquire */ | ||
| 1447 | 0xf500f800, | ||
| 1448 | 0xf407f821, | ||
| 1449 | 0xd9f00421, | ||
| 1450 | 0x3f21f403, | ||
| 1451 | /* 0x0816: i2c_release */ | ||
| 1452 | 0x21f500f8, | ||
| 1453 | 0x21f407f8, | ||
| 1454 | 0x03daf004, | ||
| 1455 | 0xf83f21f4, | ||
| 1456 | /* 0x0825: i2c_recv */ | ||
| 1457 | 0x0132f400, | ||
| 1458 | 0xb6f8c1c7, | ||
| 1459 | 0x16b00214, | ||
| 1460 | 0x3a1ff528, | ||
| 1461 | 0xd413a001, | ||
| 1462 | 0x0032980b, | ||
| 1463 | 0x0bac13a0, | ||
| 1464 | 0xf4003198, | ||
| 1465 | 0xd0f90231, | ||
| 1466 | 0xd0f9e0f9, | ||
| 1467 | 0x000067f1, | ||
| 1468 | 0x100063f1, | ||
| 1469 | 0xbb016792, | ||
| 1470 | 0x65b60076, | ||
| 1471 | 0x9450f904, | ||
| 1472 | 0x56bb0465, | ||
| 1473 | 0xfd50bd02, | ||
| 1474 | 0x50fc0475, | ||
| 1475 | 0x080721f5, | ||
| 1476 | 0xfc0464b6, | ||
| 1477 | 0x00d6b0d0, | ||
| 1478 | 0x00b31bf5, | ||
| 1479 | 0xbb0057f0, | ||
| 1480 | 0x65b60076, | ||
| 1481 | 0x9450f904, | ||
| 1482 | 0x56bb0465, | ||
| 1483 | 0xfd50bd02, | ||
| 1484 | 0x50fc0475, | ||
| 1485 | 0x07b121f5, | ||
| 1486 | 0xf50464b6, | ||
| 1487 | 0xc700d011, | ||
| 1488 | 0x76bbe0c5, | ||
| 1489 | 0x0465b600, | ||
| 1490 | 0x659450f9, | ||
| 1491 | 0x0256bb04, | ||
| 1492 | 0x75fd50bd, | ||
| 1493 | 0xf550fc04, | ||
| 1494 | 0xb6075621, | ||
| 1495 | 0x11f50464, | ||
| 1496 | 0x57f000ad, | ||
| 1497 | 0x0076bb01, | ||
| 1498 | 0xf90465b6, | ||
| 1499 | 0x04659450, | ||
| 1500 | 0xbd0256bb, | ||
| 1501 | 0x0475fd50, | ||
| 1502 | 0x21f550fc, | ||
| 1503 | 0x64b607b1, | ||
| 1504 | 0x8a11f504, | ||
| 1505 | 0x0076bb00, | ||
| 1506 | 0xf90465b6, | ||
| 1507 | 0x04659450, | ||
| 1508 | 0xbd0256bb, | ||
| 1509 | 0x0475fd50, | ||
| 1510 | 0x21f550fc, | ||
| 1511 | 0x64b60704, | ||
| 1512 | 0x6a11f404, | ||
| 1513 | 0xbbe05bcb, | ||
| 1514 | 0x65b60076, | ||
| 1515 | 0x9450f904, | ||
| 1516 | 0x56bb0465, | ||
| 1517 | 0xfd50bd02, | ||
| 1518 | 0x50fc0475, | ||
| 1519 | 0x064921f5, | ||
| 1520 | 0xb90464b6, | ||
| 1521 | 0x74bd025b, | ||
| 1522 | /* 0x092b: i2c_recv_not_rd08 */ | ||
| 1523 | 0xb0430ef4, | ||
| 1524 | 0x1bf401d6, | ||
| 1525 | 0x0057f03d, | ||
| 1526 | 0x07b121f5, | ||
| 1527 | 0xc73311f4, | ||
| 1528 | 0x21f5e0c5, | ||
| 1529 | 0x11f40756, | ||
| 1530 | 0x0057f029, | ||
| 1531 | 0x07b121f5, | ||
| 1532 | 0xc71f11f4, | ||
| 1533 | 0x21f5e0b5, | ||
| 1534 | 0x11f40756, | ||
| 1535 | 0x4921f515, | ||
| 1536 | 0xc774bd06, | ||
| 1537 | 0x1bf408c5, | ||
| 1538 | 0x0232f409, | ||
| 1539 | /* 0x096b: i2c_recv_not_wr08 */ | ||
| 1540 | /* 0x096b: i2c_recv_done */ | ||
| 1541 | 0xc7030ef4, | ||
| 1542 | 0x21f5f8ce, | ||
| 1543 | 0xe0fc0816, | ||
| 1544 | 0x12f4d0fc, | ||
| 1545 | 0x027cb90a, | ||
| 1546 | 0x02b921f5, | ||
| 1547 | /* 0x0980: i2c_recv_exit */ | ||
| 1548 | /* 0x0982: i2c_init */ | ||
| 1549 | 0x00f800f8, | ||
| 1550 | /* 0x0984: test_recv */ | ||
| 1179 | 0x05d817f1, | 1551 | 0x05d817f1, |
| 1180 | 0xcf0614b6, | 1552 | 0xcf0614b6, |
| 1181 | 0x10b60011, | 1553 | 0x10b60011, |
| @@ -1185,12 +1557,12 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1185 | 0x00e7f104, | 1557 | 0x00e7f104, |
| 1186 | 0x4fe3f1d9, | 1558 | 0x4fe3f1d9, |
| 1187 | 0xf521f513, | 1559 | 0xf521f513, |
| 1188 | /* 0x057b: test_init */ | 1560 | /* 0x09ab: test_init */ |
| 1189 | 0xf100f801, | 1561 | 0xf100f801, |
| 1190 | 0xf50800e7, | 1562 | 0xf50800e7, |
| 1191 | 0xf801f521, | 1563 | 0xf801f521, |
| 1192 | /* 0x0585: idle_recv */ | 1564 | /* 0x09b5: idle_recv */ |
| 1193 | /* 0x0587: idle */ | 1565 | /* 0x09b7: idle */ |
| 1194 | 0xf400f800, | 1566 | 0xf400f800, |
| 1195 | 0x17f10031, | 1567 | 0x17f10031, |
| 1196 | 0x14b605d4, | 1568 | 0x14b605d4, |
| @@ -1198,32 +1570,20 @@ uint32_t nvc0_pwr_code[] = { | |||
| 1198 | 0xf10110b6, | 1570 | 0xf10110b6, |
| 1199 | 0xb605d407, | 1571 | 0xb605d407, |
| 1200 | 0x01d00604, | 1572 | 0x01d00604, |
| 1201 | /* 0x05a3: idle_loop */ | 1573 | /* 0x09d3: idle_loop */ |
| 1202 | 0xf004bd00, | 1574 | 0xf004bd00, |
| 1203 | 0x32f45817, | 1575 | 0x32f45817, |
| 1204 | /* 0x05a9: idle_proc */ | 1576 | /* 0x09d9: idle_proc */ |
| 1205 | /* 0x05a9: idle_proc_exec */ | 1577 | /* 0x09d9: idle_proc_exec */ |
| 1206 | 0xb910f902, | 1578 | 0xb910f902, |
| 1207 | 0x21f5021e, | 1579 | 0x21f5021e, |
| 1208 | 0x10fc02c2, | 1580 | 0x10fc02c2, |
| 1209 | 0xf40911f4, | 1581 | 0xf40911f4, |
| 1210 | 0x0ef40231, | 1582 | 0x0ef40231, |
| 1211 | /* 0x05bd: idle_proc_next */ | 1583 | /* 0x09ed: idle_proc_next */ |
| 1212 | 0x5810b6ef, | 1584 | 0x5810b6ef, |
| 1213 | 0xf4061fb8, | 1585 | 0xf4061fb8, |
| 1214 | 0x02f4e61b, | 1586 | 0x02f4e61b, |
| 1215 | 0x0028f4dd, | 1587 | 0x0028f4dd, |
| 1216 | 0x00bb0ef4, | 1588 | 0x00bb0ef4, |
| 1217 | 0x00000000, | ||
| 1218 | 0x00000000, | ||
| 1219 | 0x00000000, | ||
| 1220 | 0x00000000, | ||
| 1221 | 0x00000000, | ||
| 1222 | 0x00000000, | ||
| 1223 | 0x00000000, | ||
| 1224 | 0x00000000, | ||
| 1225 | 0x00000000, | ||
| 1226 | 0x00000000, | ||
| 1227 | 0x00000000, | ||
| 1228 | 0x00000000, | ||
| 1229 | }; | 1589 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc index 32d65ea254dd..8a89dfe41ce1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "host.fuc" | 37 | #include "host.fuc" |
| 38 | #include "memx.fuc" | 38 | #include "memx.fuc" |
| 39 | #include "perf.fuc" | 39 | #include "perf.fuc" |
| 40 | #include "i2c_.fuc" | ||
| 40 | #include "test.fuc" | 41 | #include "test.fuc" |
| 41 | #include "idle.fuc" | 42 | #include "idle.fuc" |
| 42 | #undef INCLUDE_PROC | 43 | #undef INCLUDE_PROC |
| @@ -46,6 +47,7 @@ | |||
| 46 | #include "host.fuc" | 47 | #include "host.fuc" |
| 47 | #include "memx.fuc" | 48 | #include "memx.fuc" |
| 48 | #include "perf.fuc" | 49 | #include "perf.fuc" |
| 50 | #include "i2c_.fuc" | ||
| 49 | #include "test.fuc" | 51 | #include "test.fuc" |
| 50 | #include "idle.fuc" | 52 | #include "idle.fuc" |
| 51 | #undef INCLUDE_DATA | 53 | #undef INCLUDE_DATA |
| @@ -57,6 +59,7 @@ | |||
| 57 | #include "host.fuc" | 59 | #include "host.fuc" |
| 58 | #include "memx.fuc" | 60 | #include "memx.fuc" |
| 59 | #include "perf.fuc" | 61 | #include "perf.fuc" |
| 62 | #include "i2c_.fuc" | ||
| 60 | #include "test.fuc" | 63 | #include "test.fuc" |
| 61 | #include "idle.fuc" | 64 | #include "idle.fuc" |
| 62 | #undef INCLUDE_CODE | 65 | #undef INCLUDE_CODE |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h index ce65e2a4b789..5e24c6bc041d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h | |||
| @@ -89,33 +89,13 @@ uint32_t nvd0_pwr_data[] = { | |||
| 89 | 0x00000000, | 89 | 0x00000000, |
| 90 | 0x00000000, | 90 | 0x00000000, |
| 91 | 0x00000000, | 91 | 0x00000000, |
| 92 | 0x54534554, | 92 | 0x5f433249, |
| 93 | 0x000004eb, | 93 | 0x000008e3, |
| 94 | 0x000004ca, | 94 | 0x00000786, |
| 95 | 0x00000000, | ||
| 96 | 0x00000000, | ||
| 97 | 0x00000000, | ||
| 98 | 0x00000000, | ||
| 99 | 0x00000000, | ||
| 100 | 0x00000000, | ||
| 101 | 0x00000000, | ||
| 102 | 0x00000000, | ||
| 103 | 0x00000000, | ||
| 104 | 0x00000000, | ||
| 105 | 0x00000000, | ||
| 106 | 0x00000000, | ||
| 107 | 0x00000000, | ||
| 108 | 0x00000000, | ||
| 109 | 0x00000000, | ||
| 110 | 0x00000000, | 95 | 0x00000000, |
| 111 | 0x00000000, | 96 | 0x00000000, |
| 112 | 0x00000000, | 97 | 0x00000000, |
| 113 | 0x00000000, | 98 | 0x00000000, |
| 114 | 0x454c4449, | ||
| 115 | 0x000004f7, | ||
| 116 | 0x000004f5, | ||
| 117 | 0x00000000, | ||
| 118 | 0x00000000, | ||
| 119 | 0x00000000, | 99 | 0x00000000, |
| 120 | 0x00000000, | 100 | 0x00000000, |
| 121 | 0x00000000, | 101 | 0x00000000, |
| @@ -131,14 +111,13 @@ uint32_t nvd0_pwr_data[] = { | |||
| 131 | 0x00000000, | 111 | 0x00000000, |
| 132 | 0x00000000, | 112 | 0x00000000, |
| 133 | 0x00000000, | 113 | 0x00000000, |
| 114 | 0x54534554, | ||
| 115 | 0x00000906, | ||
| 116 | 0x000008e5, | ||
| 134 | 0x00000000, | 117 | 0x00000000, |
| 135 | 0x00000000, | 118 | 0x00000000, |
| 136 | /* 0x0210: proc_list_tail */ | ||
| 137 | /* 0x0210: time_prev */ | ||
| 138 | 0x00000000, | 119 | 0x00000000, |
| 139 | /* 0x0214: time_next */ | ||
| 140 | 0x00000000, | 120 | 0x00000000, |
| 141 | /* 0x0218: fifo_queue */ | ||
| 142 | 0x00000000, | 121 | 0x00000000, |
| 143 | 0x00000000, | 122 | 0x00000000, |
| 144 | 0x00000000, | 123 | 0x00000000, |
| @@ -154,6 +133,9 @@ uint32_t nvd0_pwr_data[] = { | |||
| 154 | 0x00000000, | 133 | 0x00000000, |
| 155 | 0x00000000, | 134 | 0x00000000, |
| 156 | 0x00000000, | 135 | 0x00000000, |
| 136 | 0x454c4449, | ||
| 137 | 0x00000912, | ||
| 138 | 0x00000910, | ||
| 157 | 0x00000000, | 139 | 0x00000000, |
| 158 | 0x00000000, | 140 | 0x00000000, |
| 159 | 0x00000000, | 141 | 0x00000000, |
| @@ -171,11 +153,14 @@ uint32_t nvd0_pwr_data[] = { | |||
| 171 | 0x00000000, | 153 | 0x00000000, |
| 172 | 0x00000000, | 154 | 0x00000000, |
| 173 | 0x00000000, | 155 | 0x00000000, |
| 174 | /* 0x0298: rfifo_queue */ | ||
| 175 | 0x00000000, | 156 | 0x00000000, |
| 176 | 0x00000000, | 157 | 0x00000000, |
| 158 | /* 0x0268: proc_list_tail */ | ||
| 159 | /* 0x0268: time_prev */ | ||
| 177 | 0x00000000, | 160 | 0x00000000, |
| 161 | /* 0x026c: time_next */ | ||
| 178 | 0x00000000, | 162 | 0x00000000, |
| 163 | /* 0x0270: fifo_queue */ | ||
| 179 | 0x00000000, | 164 | 0x00000000, |
| 180 | 0x00000000, | 165 | 0x00000000, |
| 181 | 0x00000000, | 166 | 0x00000000, |
| @@ -204,31 +189,11 @@ uint32_t nvd0_pwr_data[] = { | |||
| 204 | 0x00000000, | 189 | 0x00000000, |
| 205 | 0x00000000, | 190 | 0x00000000, |
| 206 | 0x00000000, | 191 | 0x00000000, |
| 207 | /* 0x0318: memx_func_head */ | ||
| 208 | 0x00010000, | ||
| 209 | 0x00000000, | ||
| 210 | 0x000003f4, | ||
| 211 | /* 0x0324: memx_func_next */ | ||
| 212 | 0x00000001, | ||
| 213 | 0x00000000, | ||
| 214 | 0x00000415, | ||
| 215 | 0x00000002, | ||
| 216 | 0x00000002, | ||
| 217 | 0x00000430, | ||
| 218 | 0x00040003, | ||
| 219 | 0x00000000, | ||
| 220 | 0x00000458, | ||
| 221 | 0x00010004, | ||
| 222 | 0x00000000, | ||
| 223 | 0x00000472, | ||
| 224 | /* 0x0354: memx_func_tail */ | ||
| 225 | /* 0x0354: memx_data_head */ | ||
| 226 | 0x00000000, | ||
| 227 | 0x00000000, | ||
| 228 | 0x00000000, | 192 | 0x00000000, |
| 229 | 0x00000000, | 193 | 0x00000000, |
| 230 | 0x00000000, | 194 | 0x00000000, |
| 231 | 0x00000000, | 195 | 0x00000000, |
| 196 | /* 0x02f0: rfifo_queue */ | ||
| 232 | 0x00000000, | 197 | 0x00000000, |
| 233 | 0x00000000, | 198 | 0x00000000, |
| 234 | 0x00000000, | 199 | 0x00000000, |
| @@ -261,10 +226,25 @@ uint32_t nvd0_pwr_data[] = { | |||
| 261 | 0x00000000, | 226 | 0x00000000, |
| 262 | 0x00000000, | 227 | 0x00000000, |
| 263 | 0x00000000, | 228 | 0x00000000, |
| 229 | /* 0x0370: memx_func_head */ | ||
| 230 | 0x00010000, | ||
| 264 | 0x00000000, | 231 | 0x00000000, |
| 232 | 0x000003f4, | ||
| 233 | /* 0x037c: memx_func_next */ | ||
| 234 | 0x00000001, | ||
| 265 | 0x00000000, | 235 | 0x00000000, |
| 236 | 0x00000415, | ||
| 237 | 0x00000002, | ||
| 238 | 0x00000002, | ||
| 239 | 0x00000430, | ||
| 240 | 0x00040003, | ||
| 266 | 0x00000000, | 241 | 0x00000000, |
| 242 | 0x00000458, | ||
| 243 | 0x00010004, | ||
| 267 | 0x00000000, | 244 | 0x00000000, |
| 245 | 0x00000472, | ||
| 246 | /* 0x03ac: memx_func_tail */ | ||
| 247 | /* 0x03ac: memx_data_head */ | ||
| 268 | 0x00000000, | 248 | 0x00000000, |
| 269 | 0x00000000, | 249 | 0x00000000, |
| 270 | 0x00000000, | 250 | 0x00000000, |
| @@ -735,7 +715,6 @@ uint32_t nvd0_pwr_data[] = { | |||
| 735 | 0x00000000, | 715 | 0x00000000, |
| 736 | 0x00000000, | 716 | 0x00000000, |
| 737 | 0x00000000, | 717 | 0x00000000, |
| 738 | /* 0x0b54: memx_data_tail */ | ||
| 739 | 0x00000000, | 718 | 0x00000000, |
| 740 | 0x00000000, | 719 | 0x00000000, |
| 741 | 0x00000000, | 720 | 0x00000000, |
| @@ -778,6 +757,29 @@ uint32_t nvd0_pwr_data[] = { | |||
| 778 | 0x00000000, | 757 | 0x00000000, |
| 779 | 0x00000000, | 758 | 0x00000000, |
| 780 | 0x00000000, | 759 | 0x00000000, |
| 760 | /* 0x0bac: memx_data_tail */ | ||
| 761 | /* 0x0bac: i2c_scl_map */ | ||
| 762 | 0x00000400, | ||
| 763 | 0x00000800, | ||
| 764 | 0x00001000, | ||
| 765 | 0x00002000, | ||
| 766 | 0x00004000, | ||
| 767 | 0x00008000, | ||
| 768 | 0x00010000, | ||
| 769 | 0x00020000, | ||
| 770 | 0x00040000, | ||
| 771 | 0x00080000, | ||
| 772 | /* 0x0bd4: i2c_sda_map */ | ||
| 773 | 0x00100000, | ||
| 774 | 0x00200000, | ||
| 775 | 0x00400000, | ||
| 776 | 0x00800000, | ||
| 777 | 0x01000000, | ||
| 778 | 0x02000000, | ||
| 779 | 0x04000000, | ||
| 780 | 0x08000000, | ||
| 781 | 0x10000000, | ||
| 782 | 0x20000000, | ||
| 781 | 0x00000000, | 783 | 0x00000000, |
| 782 | }; | 784 | }; |
| 783 | 785 | ||
| @@ -786,14 +788,14 @@ uint32_t nvd0_pwr_code[] = { | |||
| 786 | /* 0x0004: rd32 */ | 788 | /* 0x0004: rd32 */ |
| 787 | 0x07a007f1, | 789 | 0x07a007f1, |
| 788 | 0xbd000ed0, | 790 | 0xbd000ed0, |
| 789 | 0x01e7f004, | 791 | 0x01d7f004, |
| 790 | 0xf101e3f0, | 792 | 0xf101d3f0, |
| 791 | 0xd007ac07, | 793 | 0xd007ac07, |
| 792 | 0x04bd000e, | 794 | 0x04bd000d, |
| 793 | /* 0x001c: rd32_wait */ | 795 | /* 0x001c: rd32_wait */ |
| 794 | 0x07ace7f1, | 796 | 0x07acd7f1, |
| 795 | 0xf100eecf, | 797 | 0xf100ddcf, |
| 796 | 0xf47000e4, | 798 | 0xf47000d4, |
| 797 | 0xd7f1f51b, | 799 | 0xd7f1f51b, |
| 798 | 0xddcf07a4, | 800 | 0xddcf07a4, |
| 799 | /* 0x0033: wr32 */ | 801 | /* 0x0033: wr32 */ |
| @@ -802,14 +804,14 @@ uint32_t nvd0_pwr_code[] = { | |||
| 802 | 0x04bd000e, | 804 | 0x04bd000e, |
| 803 | 0x07a407f1, | 805 | 0x07a407f1, |
| 804 | 0xbd000dd0, | 806 | 0xbd000dd0, |
| 805 | 0x02e7f004, | 807 | 0x02d7f004, |
| 806 | 0xf0f0e5f0, | 808 | 0xf0f0d5f0, |
| 807 | 0x07f101e3, | 809 | 0x07f101d3, |
| 808 | 0x0ed007ac, | 810 | 0x0dd007ac, |
| 809 | /* 0x0057: wr32_wait */ | 811 | /* 0x0057: wr32_wait */ |
| 810 | 0xf104bd00, | 812 | 0xf104bd00, |
| 811 | 0xcf07ace7, | 813 | 0xcf07acd7, |
| 812 | 0xe4f100ee, | 814 | 0xd4f100dd, |
| 813 | 0x1bf47000, | 815 | 0x1bf47000, |
| 814 | /* 0x0067: nsec */ | 816 | /* 0x0067: nsec */ |
| 815 | 0xf000f8f5, | 817 | 0xf000f8f5, |
| @@ -836,21 +838,21 @@ uint32_t nvd0_pwr_code[] = { | |||
| 836 | 0x9800f8e2, | 838 | 0x9800f8e2, |
| 837 | 0x96b003e9, | 839 | 0x96b003e9, |
| 838 | 0x2a0bf400, | 840 | 0x2a0bf400, |
| 839 | 0xbb840a98, | 841 | 0xbb9a0a98, |
| 840 | 0x1cf4029a, | 842 | 0x1cf4029a, |
| 841 | 0x01d7f00f, | 843 | 0x01d7f00f, |
| 842 | 0x020621f5, | 844 | 0x020621f5, |
| 843 | 0x0ef494bd, | 845 | 0x0ef494bd, |
| 844 | /* 0x00c5: intr_watchdog_next_time */ | 846 | /* 0x00c5: intr_watchdog_next_time */ |
| 845 | 0x850a9815, | 847 | 0x9b0a9815, |
| 846 | 0xf400a6b0, | 848 | 0xf400a6b0, |
| 847 | 0x9ab8090b, | 849 | 0x9ab8090b, |
| 848 | 0x061cf406, | 850 | 0x061cf406, |
| 849 | /* 0x00d4: intr_watchdog_next_time_set */ | 851 | /* 0x00d4: intr_watchdog_next_time_set */ |
| 850 | /* 0x00d7: intr_watchdog_next_proc */ | 852 | /* 0x00d7: intr_watchdog_next_proc */ |
| 851 | 0x80850980, | 853 | 0x809b0980, |
| 852 | 0xe0b603e9, | 854 | 0xe0b603e9, |
| 853 | 0x10e6b158, | 855 | 0x68e6b158, |
| 854 | 0xc61bf402, | 856 | 0xc61bf402, |
| 855 | /* 0x00e6: intr */ | 857 | /* 0x00e6: intr */ |
| 856 | 0x00f900f8, | 858 | 0x00f900f8, |
| @@ -868,15 +870,15 @@ uint32_t nvd0_pwr_code[] = { | |||
| 868 | 0x0887f004, | 870 | 0x0887f004, |
| 869 | 0xc40088cf, | 871 | 0xc40088cf, |
| 870 | 0x0bf40289, | 872 | 0x0bf40289, |
| 871 | 0x85008020, | 873 | 0x9b008020, |
| 872 | 0xf458e7f0, | 874 | 0xf458e7f0, |
| 873 | 0x0998a721, | 875 | 0x0998a721, |
| 874 | 0x0096b085, | 876 | 0x0096b09b, |
| 875 | 0xf00e0bf4, | 877 | 0xf00e0bf4, |
| 876 | 0x09d03407, | 878 | 0x09d03407, |
| 877 | 0x8004bd00, | 879 | 0x8004bd00, |
| 878 | /* 0x013e: intr_skip_watchdog */ | 880 | /* 0x013e: intr_skip_watchdog */ |
| 879 | 0x89e48409, | 881 | 0x89e49a09, |
| 880 | 0x0bf40800, | 882 | 0x0bf40800, |
| 881 | 0x8897f13c, | 883 | 0x8897f13c, |
| 882 | 0x0099cf06, | 884 | 0x0099cf06, |
| @@ -929,7 +931,7 @@ uint32_t nvd0_pwr_code[] = { | |||
| 929 | 0x0ed03407, | 931 | 0x0ed03407, |
| 930 | 0x8004bd00, | 932 | 0x8004bd00, |
| 931 | /* 0x01f6: timer_enable */ | 933 | /* 0x01f6: timer_enable */ |
| 932 | 0x87f0840e, | 934 | 0x87f09a0e, |
| 933 | 0x3807f001, | 935 | 0x3807f001, |
| 934 | 0xbd0008d0, | 936 | 0xbd0008d0, |
| 935 | /* 0x0201: timer_done */ | 937 | /* 0x0201: timer_done */ |
| @@ -960,7 +962,7 @@ uint32_t nvd0_pwr_code[] = { | |||
| 960 | 0x06aeb800, | 962 | 0x06aeb800, |
| 961 | 0xb6100bf4, | 963 | 0xb6100bf4, |
| 962 | 0x86b15880, | 964 | 0x86b15880, |
| 963 | 0x1bf40210, | 965 | 0x1bf40268, |
| 964 | 0x0132f4f0, | 966 | 0x0132f4f0, |
| 965 | /* 0x0264: find_done */ | 967 | /* 0x0264: find_done */ |
| 966 | 0xfc028eb9, | 968 | 0xfc028eb9, |
| @@ -1024,7 +1026,7 @@ uint32_t nvd0_pwr_code[] = { | |||
| 1024 | 0x0bf40612, | 1026 | 0x0bf40612, |
| 1025 | 0x071ec42f, | 1027 | 0x071ec42f, |
| 1026 | 0xb704ee94, | 1028 | 0xb704ee94, |
| 1027 | 0x980218e0, | 1029 | 0x980270e0, |
| 1028 | 0xec9803eb, | 1030 | 0xec9803eb, |
| 1029 | 0x01ed9802, | 1031 | 0x01ed9802, |
| 1030 | 0xf500ee98, | 1032 | 0xf500ee98, |
| @@ -1048,7 +1050,7 @@ uint32_t nvd0_pwr_code[] = { | |||
| 1048 | 0xec0bf406, | 1050 | 0xec0bf406, |
| 1049 | 0xb60723c4, | 1051 | 0xb60723c4, |
| 1050 | 0x30b70434, | 1052 | 0x30b70434, |
| 1051 | 0x3b800298, | 1053 | 0x3b8002f0, |
| 1052 | 0x023c8003, | 1054 | 0x023c8003, |
| 1053 | 0x80013d80, | 1055 | 0x80013d80, |
| 1054 | 0x20b6003e, | 1056 | 0x20b6003e, |
| @@ -1061,12 +1063,12 @@ uint32_t nvd0_pwr_code[] = { | |||
| 1061 | /* 0x03be: host_init */ | 1063 | /* 0x03be: host_init */ |
| 1062 | 0x17f100f8, | 1064 | 0x17f100f8, |
| 1063 | 0x14b60080, | 1065 | 0x14b60080, |
| 1064 | 0x1815f110, | 1066 | 0x7015f110, |
| 1065 | 0xd007f102, | 1067 | 0xd007f102, |
| 1066 | 0x0001d004, | 1068 | 0x0001d004, |
| 1067 | 0x17f104bd, | 1069 | 0x17f104bd, |
| 1068 | 0x14b60080, | 1070 | 0x14b60080, |
| 1069 | 0x9815f110, | 1071 | 0xf015f110, |
| 1070 | 0xdc07f102, | 1072 | 0xdc07f102, |
| 1071 | 0x0001d004, | 1073 | 0x0001d004, |
| 1072 | 0x17f004bd, | 1074 | 0x17f004bd, |
| @@ -1122,13 +1124,13 @@ uint32_t nvd0_pwr_code[] = { | |||
| 1122 | 0x10b60013, | 1124 | 0x10b60013, |
| 1123 | 0x10349504, | 1125 | 0x10349504, |
| 1124 | 0x980c30f0, | 1126 | 0x980c30f0, |
| 1125 | 0x55f9c835, | 1127 | 0x55f9de35, |
| 1126 | 0xf40612b8, | 1128 | 0xf40612b8, |
| 1127 | 0xd0fcec1e, | 1129 | 0xd0fcec1e, |
| 1128 | 0x21f5e0fc, | 1130 | 0x21f5e0fc, |
| 1129 | 0x00f8026b, | 1131 | 0x00f8026b, |
| 1130 | /* 0x04a8: memx_info */ | 1132 | /* 0x04a8: memx_info */ |
| 1131 | 0x0354c7f1, | 1133 | 0x03acc7f1, |
| 1132 | 0x0800b7f1, | 1134 | 0x0800b7f1, |
| 1133 | 0x026b21f5, | 1135 | 0x026b21f5, |
| 1134 | /* 0x04b6: memx_recv */ | 1136 | /* 0x04b6: memx_recv */ |
| @@ -1140,49 +1142,342 @@ uint32_t nvd0_pwr_code[] = { | |||
| 1140 | /* 0x04c6: perf_recv */ | 1142 | /* 0x04c6: perf_recv */ |
| 1141 | 0x00f800f8, | 1143 | 0x00f800f8, |
| 1142 | /* 0x04c8: perf_init */ | 1144 | /* 0x04c8: perf_init */ |
| 1143 | /* 0x04ca: test_recv */ | 1145 | /* 0x04ca: i2c_drive_scl */ |
| 1144 | 0x17f100f8, | 1146 | 0x36b000f8, |
| 1145 | 0x11cf05d8, | 1147 | 0x0e0bf400, |
| 1146 | 0x0110b600, | 1148 | 0x07e007f1, |
| 1147 | 0x05d807f1, | ||
| 1148 | 0xbd0001d0, | 1149 | 0xbd0001d0, |
| 1149 | 0x00e7f104, | 1150 | /* 0x04db: i2c_drive_scl_lo */ |
| 1150 | 0x4fe3f1d9, | 1151 | 0xf100f804, |
| 1151 | 0xb621f513, | 1152 | 0xd007e407, |
| 1152 | /* 0x04eb: test_init */ | 1153 | 0x04bd0001, |
| 1153 | 0xf100f801, | 1154 | /* 0x04e6: i2c_drive_sda */ |
| 1154 | 0xf50800e7, | 1155 | 0x36b000f8, |
| 1155 | 0xf801b621, | 1156 | 0x0e0bf400, |
| 1156 | /* 0x04f5: idle_recv */ | 1157 | 0x07e007f1, |
| 1157 | /* 0x04f7: idle */ | 1158 | 0xbd0002d0, |
| 1158 | 0xf400f800, | 1159 | /* 0x04f7: i2c_drive_sda_lo */ |
| 1159 | 0x17f10031, | 1160 | 0xf100f804, |
| 1160 | 0x11cf05d4, | 1161 | 0xd007e407, |
| 1161 | 0x0110b600, | 1162 | 0x04bd0002, |
| 1162 | 0x05d407f1, | 1163 | /* 0x0502: i2c_sense_scl */ |
| 1163 | 0xbd0001d0, | 1164 | 0x32f400f8, |
| 1164 | /* 0x050d: idle_loop */ | 1165 | 0xc437f101, |
| 1165 | 0x5817f004, | 1166 | 0x0033cf07, |
| 1166 | /* 0x0513: idle_proc */ | 1167 | 0xf40431fd, |
| 1167 | /* 0x0513: idle_proc_exec */ | 1168 | 0x31f4060b, |
| 1168 | 0xf90232f4, | 1169 | /* 0x0515: i2c_sense_scl_done */ |
| 1169 | 0x021eb910, | 1170 | /* 0x0517: i2c_sense_sda */ |
| 1170 | 0x027421f5, | 1171 | 0xf400f801, |
| 1171 | 0x11f410fc, | 1172 | 0x37f10132, |
| 1172 | 0x0231f409, | 1173 | 0x33cf07c4, |
| 1173 | /* 0x0527: idle_proc_next */ | 1174 | 0x0432fd00, |
| 1174 | 0xb6ef0ef4, | 1175 | 0xf4060bf4, |
| 1175 | 0x1fb85810, | 1176 | /* 0x052a: i2c_sense_sda_done */ |
| 1176 | 0xe61bf406, | 1177 | 0x00f80131, |
| 1177 | 0xf4dd02f4, | 1178 | /* 0x052c: i2c_raise_scl */ |
| 1178 | 0x0ef40028, | 1179 | 0x47f140f9, |
| 1179 | 0x000000c1, | 1180 | 0x37f00898, |
| 1180 | 0x00000000, | 1181 | 0xca21f501, |
| 1181 | 0x00000000, | 1182 | /* 0x0539: i2c_raise_scl_wait */ |
| 1182 | 0x00000000, | 1183 | 0xe8e7f104, |
| 1183 | 0x00000000, | 1184 | 0x6721f403, |
| 1184 | 0x00000000, | 1185 | 0x050221f5, |
| 1185 | 0x00000000, | 1186 | 0xb60901f4, |
| 1187 | 0x1bf40142, | ||
| 1188 | /* 0x054d: i2c_raise_scl_done */ | ||
| 1189 | 0xf840fcef, | ||
| 1190 | /* 0x0551: i2c_start */ | ||
| 1191 | 0x0221f500, | ||
| 1192 | 0x0d11f405, | ||
| 1193 | 0x051721f5, | ||
| 1194 | 0xf40611f4, | ||
| 1195 | /* 0x0562: i2c_start_rep */ | ||
| 1196 | 0x37f0300e, | ||
| 1197 | 0xca21f500, | ||
| 1198 | 0x0137f004, | ||
| 1199 | 0x04e621f5, | ||
| 1200 | 0xb60076bb, | ||
| 1201 | 0x50f90465, | ||
| 1202 | 0xbb046594, | ||
| 1203 | 0x50bd0256, | ||
| 1204 | 0xfc0475fd, | ||
| 1205 | 0x2c21f550, | ||
| 1206 | 0x0464b605, | ||
| 1207 | /* 0x058f: i2c_start_send */ | ||
| 1208 | 0xf01f11f4, | ||
| 1209 | 0x21f50037, | ||
| 1210 | 0xe7f104e6, | ||
| 1211 | 0x21f41388, | ||
| 1212 | 0x0037f067, | ||
| 1213 | 0x04ca21f5, | ||
| 1214 | 0x1388e7f1, | ||
| 1215 | /* 0x05ab: i2c_start_out */ | ||
| 1216 | 0xf86721f4, | ||
| 1217 | /* 0x05ad: i2c_stop */ | ||
| 1218 | 0x0037f000, | ||
| 1219 | 0x04ca21f5, | ||
| 1220 | 0xf50037f0, | ||
| 1221 | 0xf104e621, | ||
| 1222 | 0xf403e8e7, | ||
| 1223 | 0x37f06721, | ||
| 1224 | 0xca21f501, | ||
| 1225 | 0x88e7f104, | ||
| 1226 | 0x6721f413, | ||
| 1227 | 0xf50137f0, | ||
| 1228 | 0xf104e621, | ||
| 1229 | 0xf41388e7, | ||
| 1230 | 0x00f86721, | ||
| 1231 | /* 0x05e0: i2c_bitw */ | ||
| 1232 | 0x04e621f5, | ||
| 1233 | 0x03e8e7f1, | ||
| 1234 | 0xbb6721f4, | ||
| 1235 | 0x65b60076, | ||
| 1236 | 0x9450f904, | ||
| 1237 | 0x56bb0465, | ||
| 1238 | 0xfd50bd02, | ||
| 1239 | 0x50fc0475, | ||
| 1240 | 0x052c21f5, | ||
| 1241 | 0xf40464b6, | ||
| 1242 | 0xe7f11811, | ||
| 1243 | 0x21f41388, | ||
| 1244 | 0x0037f067, | ||
| 1245 | 0x04ca21f5, | ||
| 1246 | 0x1388e7f1, | ||
| 1247 | /* 0x061f: i2c_bitw_out */ | ||
| 1248 | 0xf86721f4, | ||
| 1249 | /* 0x0621: i2c_bitr */ | ||
| 1250 | 0x0137f000, | ||
| 1251 | 0x04e621f5, | ||
| 1252 | 0x03e8e7f1, | ||
| 1253 | 0xbb6721f4, | ||
| 1254 | 0x65b60076, | ||
| 1255 | 0x9450f904, | ||
| 1256 | 0x56bb0465, | ||
| 1257 | 0xfd50bd02, | ||
| 1258 | 0x50fc0475, | ||
| 1259 | 0x052c21f5, | ||
| 1260 | 0xf40464b6, | ||
| 1261 | 0x21f51b11, | ||
| 1262 | 0x37f00517, | ||
| 1263 | 0xca21f500, | ||
| 1264 | 0x88e7f104, | ||
| 1265 | 0x6721f413, | ||
| 1266 | 0xf4013cf0, | ||
| 1267 | /* 0x0666: i2c_bitr_done */ | ||
| 1268 | 0x00f80131, | ||
| 1269 | /* 0x0668: i2c_get_byte */ | ||
| 1270 | 0xf00057f0, | ||
| 1271 | /* 0x066e: i2c_get_byte_next */ | ||
| 1272 | 0x54b60847, | ||
| 1273 | 0x0076bb01, | ||
| 1274 | 0xf90465b6, | ||
| 1275 | 0x04659450, | ||
| 1276 | 0xbd0256bb, | ||
| 1277 | 0x0475fd50, | ||
| 1278 | 0x21f550fc, | ||
| 1279 | 0x64b60621, | ||
| 1280 | 0x2b11f404, | ||
| 1281 | 0xb60553fd, | ||
| 1282 | 0x1bf40142, | ||
| 1283 | 0x0137f0d8, | ||
| 1284 | 0xb60076bb, | ||
| 1285 | 0x50f90465, | ||
| 1286 | 0xbb046594, | ||
| 1287 | 0x50bd0256, | ||
| 1288 | 0xfc0475fd, | ||
| 1289 | 0xe021f550, | ||
| 1290 | 0x0464b605, | ||
| 1291 | /* 0x06b8: i2c_get_byte_done */ | ||
| 1292 | /* 0x06ba: i2c_put_byte */ | ||
| 1293 | 0x47f000f8, | ||
| 1294 | /* 0x06bd: i2c_put_byte_next */ | ||
| 1295 | 0x0142b608, | ||
| 1296 | 0xbb3854ff, | ||
| 1297 | 0x65b60076, | ||
| 1298 | 0x9450f904, | ||
| 1299 | 0x56bb0465, | ||
| 1300 | 0xfd50bd02, | ||
| 1301 | 0x50fc0475, | ||
| 1302 | 0x05e021f5, | ||
| 1303 | 0xf40464b6, | ||
| 1304 | 0x46b03411, | ||
| 1305 | 0xd81bf400, | ||
| 1306 | 0xb60076bb, | ||
| 1307 | 0x50f90465, | ||
| 1308 | 0xbb046594, | ||
| 1309 | 0x50bd0256, | ||
| 1310 | 0xfc0475fd, | ||
| 1311 | 0x2121f550, | ||
| 1312 | 0x0464b606, | ||
| 1313 | 0xbb0f11f4, | ||
| 1314 | 0x36b00076, | ||
| 1315 | 0x061bf401, | ||
| 1316 | /* 0x0713: i2c_put_byte_done */ | ||
| 1317 | 0xf80132f4, | ||
| 1318 | /* 0x0715: i2c_addr */ | ||
| 1319 | 0x0076bb00, | ||
| 1320 | 0xf90465b6, | ||
| 1321 | 0x04659450, | ||
| 1322 | 0xbd0256bb, | ||
| 1323 | 0x0475fd50, | ||
| 1324 | 0x21f550fc, | ||
| 1325 | 0x64b60551, | ||
| 1326 | 0x2911f404, | ||
| 1327 | 0x012ec3e7, | ||
| 1328 | 0xfd0134b6, | ||
| 1329 | 0x76bb0553, | ||
| 1330 | 0x0465b600, | ||
| 1331 | 0x659450f9, | ||
| 1332 | 0x0256bb04, | ||
| 1333 | 0x75fd50bd, | ||
| 1334 | 0xf550fc04, | ||
| 1335 | 0xb606ba21, | ||
| 1336 | /* 0x075a: i2c_addr_done */ | ||
| 1337 | 0x00f80464, | ||
| 1338 | /* 0x075c: i2c_acquire_addr */ | ||
| 1339 | 0xb6f8cec7, | ||
| 1340 | 0xe0b705e4, | ||
| 1341 | 0x00f8d014, | ||
| 1342 | /* 0x0768: i2c_acquire */ | ||
| 1343 | 0x075c21f5, | ||
| 1344 | 0xf00421f4, | ||
| 1345 | 0x21f403d9, | ||
| 1346 | /* 0x0777: i2c_release */ | ||
| 1347 | 0xf500f833, | ||
| 1348 | 0xf4075c21, | ||
| 1349 | 0xdaf00421, | ||
| 1350 | 0x3321f403, | ||
| 1351 | /* 0x0786: i2c_recv */ | ||
| 1352 | 0x32f400f8, | ||
| 1353 | 0xf8c1c701, | ||
| 1354 | 0xb00214b6, | ||
| 1355 | 0x1ff52816, | ||
| 1356 | 0x13a0013a, | ||
| 1357 | 0x32980bd4, | ||
| 1358 | 0xac13a000, | ||
| 1359 | 0x0031980b, | ||
| 1360 | 0xf90231f4, | ||
| 1361 | 0xf9e0f9d0, | ||
| 1362 | 0x0067f1d0, | ||
| 1363 | 0x0063f100, | ||
| 1364 | 0x01679210, | ||
| 1365 | 0xb60076bb, | ||
| 1366 | 0x50f90465, | ||
| 1367 | 0xbb046594, | ||
| 1368 | 0x50bd0256, | ||
| 1369 | 0xfc0475fd, | ||
| 1370 | 0x6821f550, | ||
| 1371 | 0x0464b607, | ||
| 1372 | 0xd6b0d0fc, | ||
| 1373 | 0xb31bf500, | ||
| 1374 | 0x0057f000, | ||
| 1375 | 0xb60076bb, | ||
| 1376 | 0x50f90465, | ||
| 1377 | 0xbb046594, | ||
| 1378 | 0x50bd0256, | ||
| 1379 | 0xfc0475fd, | ||
| 1380 | 0x1521f550, | ||
| 1381 | 0x0464b607, | ||
| 1382 | 0x00d011f5, | ||
| 1383 | 0xbbe0c5c7, | ||
| 1384 | 0x65b60076, | ||
| 1385 | 0x9450f904, | ||
| 1386 | 0x56bb0465, | ||
| 1387 | 0xfd50bd02, | ||
| 1388 | 0x50fc0475, | ||
| 1389 | 0x06ba21f5, | ||
| 1390 | 0xf50464b6, | ||
| 1391 | 0xf000ad11, | ||
| 1392 | 0x76bb0157, | ||
| 1393 | 0x0465b600, | ||
| 1394 | 0x659450f9, | ||
| 1395 | 0x0256bb04, | ||
| 1396 | 0x75fd50bd, | ||
| 1397 | 0xf550fc04, | ||
| 1398 | 0xb6071521, | ||
| 1399 | 0x11f50464, | ||
| 1400 | 0x76bb008a, | ||
| 1401 | 0x0465b600, | ||
| 1402 | 0x659450f9, | ||
| 1403 | 0x0256bb04, | ||
| 1404 | 0x75fd50bd, | ||
| 1405 | 0xf550fc04, | ||
| 1406 | 0xb6066821, | ||
| 1407 | 0x11f40464, | ||
| 1408 | 0xe05bcb6a, | ||
| 1409 | 0xb60076bb, | ||
| 1410 | 0x50f90465, | ||
| 1411 | 0xbb046594, | ||
| 1412 | 0x50bd0256, | ||
| 1413 | 0xfc0475fd, | ||
| 1414 | 0xad21f550, | ||
| 1415 | 0x0464b605, | ||
| 1416 | 0xbd025bb9, | ||
| 1417 | 0x430ef474, | ||
| 1418 | /* 0x088c: i2c_recv_not_rd08 */ | ||
| 1419 | 0xf401d6b0, | ||
| 1420 | 0x57f03d1b, | ||
| 1421 | 0x1521f500, | ||
| 1422 | 0x3311f407, | ||
| 1423 | 0xf5e0c5c7, | ||
| 1424 | 0xf406ba21, | ||
| 1425 | 0x57f02911, | ||
| 1426 | 0x1521f500, | ||
| 1427 | 0x1f11f407, | ||
| 1428 | 0xf5e0b5c7, | ||
| 1429 | 0xf406ba21, | ||
| 1430 | 0x21f51511, | ||
| 1431 | 0x74bd05ad, | ||
| 1432 | 0xf408c5c7, | ||
| 1433 | 0x32f4091b, | ||
| 1434 | 0x030ef402, | ||
| 1435 | /* 0x08cc: i2c_recv_not_wr08 */ | ||
| 1436 | /* 0x08cc: i2c_recv_done */ | ||
| 1437 | 0xf5f8cec7, | ||
| 1438 | 0xfc077721, | ||
| 1439 | 0xf4d0fce0, | ||
| 1440 | 0x7cb90a12, | ||
| 1441 | 0x6b21f502, | ||
| 1442 | /* 0x08e1: i2c_recv_exit */ | ||
| 1443 | /* 0x08e3: i2c_init */ | ||
| 1444 | 0xf800f802, | ||
| 1445 | /* 0x08e5: test_recv */ | ||
| 1446 | 0xd817f100, | ||
| 1447 | 0x0011cf05, | ||
| 1448 | 0xf10110b6, | ||
| 1449 | 0xd005d807, | ||
| 1450 | 0x04bd0001, | ||
| 1451 | 0xd900e7f1, | ||
| 1452 | 0x134fe3f1, | ||
| 1453 | 0x01b621f5, | ||
| 1454 | /* 0x0906: test_init */ | ||
| 1455 | 0xe7f100f8, | ||
| 1456 | 0x21f50800, | ||
| 1457 | 0x00f801b6, | ||
| 1458 | /* 0x0910: idle_recv */ | ||
| 1459 | /* 0x0912: idle */ | ||
| 1460 | 0x31f400f8, | ||
| 1461 | 0xd417f100, | ||
| 1462 | 0x0011cf05, | ||
| 1463 | 0xf10110b6, | ||
| 1464 | 0xd005d407, | ||
| 1465 | 0x04bd0001, | ||
| 1466 | /* 0x0928: idle_loop */ | ||
| 1467 | 0xf45817f0, | ||
| 1468 | /* 0x092e: idle_proc */ | ||
| 1469 | /* 0x092e: idle_proc_exec */ | ||
| 1470 | 0x10f90232, | ||
| 1471 | 0xf5021eb9, | ||
| 1472 | 0xfc027421, | ||
| 1473 | 0x0911f410, | ||
| 1474 | 0xf40231f4, | ||
| 1475 | /* 0x0942: idle_proc_next */ | ||
| 1476 | 0x10b6ef0e, | ||
| 1477 | 0x061fb858, | ||
| 1478 | 0xf4e61bf4, | ||
| 1479 | 0x28f4dd02, | ||
| 1480 | 0xc10ef400, | ||
| 1186 | 0x00000000, | 1481 | 0x00000000, |
| 1187 | 0x00000000, | 1482 | 0x00000000, |
| 1188 | 0x00000000, | 1483 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h index 5fb0cccc6c64..574acfa44c8c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #define PROC_HOST 0x54534f48 | 7 | #define PROC_HOST 0x54534f48 |
| 8 | #define PROC_MEMX 0x584d454d | 8 | #define PROC_MEMX 0x584d454d |
| 9 | #define PROC_PERF 0x46524550 | 9 | #define PROC_PERF 0x46524550 |
| 10 | #define PROC_I2C_ 0x5f433249 | ||
| 10 | #define PROC_TEST 0x54534554 | 11 | #define PROC_TEST 0x54534554 |
| 11 | 12 | ||
| 12 | /* KERN: message identifiers */ | 13 | /* KERN: message identifiers */ |
| @@ -24,4 +25,22 @@ | |||
| 24 | #define MEMX_WAIT 3 | 25 | #define MEMX_WAIT 3 |
| 25 | #define MEMX_DELAY 4 | 26 | #define MEMX_DELAY 4 |
| 26 | 27 | ||
| 28 | /* I2C_: message identifiers */ | ||
| 29 | #define I2C__MSG_RD08 0 | ||
| 30 | #define I2C__MSG_WR08 1 | ||
| 31 | |||
| 32 | #define I2C__MSG_DATA0_PORT 24:31 | ||
| 33 | #define I2C__MSG_DATA0_ADDR 14:23 | ||
| 34 | |||
| 35 | #define I2C__MSG_DATA0_RD08_PORT I2C__MSG_DATA0_PORT | ||
| 36 | #define I2C__MSG_DATA0_RD08_ADDR I2C__MSG_DATA0_ADDR | ||
| 37 | #define I2C__MSG_DATA0_RD08_REG 0:7 | ||
| 38 | #define I2C__MSG_DATA1_RD08_VAL 0:7 | ||
| 39 | |||
| 40 | #define I2C__MSG_DATA0_WR08_PORT I2C__MSG_DATA0_PORT | ||
| 41 | #define I2C__MSG_DATA0_WR08_ADDR I2C__MSG_DATA0_ADDR | ||
| 42 | #define I2C__MSG_DATA0_WR08_SYNC 8:8 | ||
| 43 | #define I2C__MSG_DATA0_WR08_REG 0:7 | ||
| 44 | #define I2C__MSG_DATA1_WR08_VAL 0:7 | ||
| 45 | |||
| 27 | #endif | 46 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c index ef3133e7575c..7dd680ff2f6f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c | |||
| @@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) | |||
| 72 | vmm->flush(vm); | 72 | vmm->flush(vm); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | void | 75 | static void |
| 76 | nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) | ||
| 77 | { | ||
| 78 | nouveau_vm_map_at(vma, 0, node); | ||
| 79 | } | ||
| 80 | |||
| 81 | void | ||
| 82 | nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, | 76 | nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, |
| 83 | struct nouveau_mem *mem) | 77 | struct nouveau_mem *mem) |
| 84 | { | 78 | { |
| @@ -136,7 +130,7 @@ finish: | |||
| 136 | vmm->flush(vm); | 130 | vmm->flush(vm); |
| 137 | } | 131 | } |
| 138 | 132 | ||
| 139 | void | 133 | static void |
| 140 | nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, | 134 | nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, |
| 141 | struct nouveau_mem *mem) | 135 | struct nouveau_mem *mem) |
| 142 | { | 136 | { |
| @@ -175,6 +169,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, | |||
| 175 | } | 169 | } |
| 176 | 170 | ||
| 177 | void | 171 | void |
| 172 | nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) | ||
| 173 | { | ||
| 174 | if (node->sg) | ||
| 175 | nouveau_vm_map_sg_table(vma, 0, node->size << 12, node); | ||
| 176 | else | ||
| 177 | if (node->pages) | ||
| 178 | nouveau_vm_map_sg(vma, 0, node->size << 12, node); | ||
| 179 | else | ||
| 180 | nouveau_vm_map_at(vma, 0, node); | ||
| 181 | } | ||
| 182 | |||
| 183 | void | ||
| 178 | nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) | 184 | nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) |
| 179 | { | 185 | { |
| 180 | struct nouveau_vm *vm = vma->vm; | 186 | struct nouveau_vm *vm = vma->vm; |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index b13ff0fc42de..2f1ed61f7c8c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c | |||
| @@ -77,11 +77,6 @@ nv04_display_create(struct drm_device *dev) | |||
| 77 | 77 | ||
| 78 | nouveau_hw_save_vga_fonts(dev, 1); | 78 | nouveau_hw_save_vga_fonts(dev, 1); |
| 79 | 79 | ||
| 80 | ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000, | ||
| 81 | NV04_DISP_CLASS, NULL, 0, &disp->core); | ||
| 82 | if (ret) | ||
| 83 | return ret; | ||
| 84 | |||
| 85 | nv04_crtc_create(dev, 0); | 80 | nv04_crtc_create(dev, 0); |
| 86 | if (nv_two_heads(dev)) | 81 | if (nv_two_heads(dev)) |
| 87 | nv04_crtc_create(dev, 1); | 82 | nv04_crtc_create(dev, 1); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 56a28db04000..4245fc3dab70 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
| @@ -80,7 +80,6 @@ struct nv04_display { | |||
| 80 | struct nv04_mode_state saved_reg; | 80 | struct nv04_mode_state saved_reg; |
| 81 | uint32_t saved_vga_font[4][16384]; | 81 | uint32_t saved_vga_font[4][16384]; |
| 82 | uint32_t dac_users[4]; | 82 | uint32_t dac_users[4]; |
| 83 | struct nouveau_object *core; | ||
| 84 | struct nouveau_bo *image[2]; | 83 | struct nouveau_bo *image[2]; |
| 85 | }; | 84 | }; |
| 86 | 85 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 32e7064b819b..ab03f7719d2d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c | |||
| @@ -55,9 +55,12 @@ struct nouveau_plane { | |||
| 55 | int hue; | 55 | int hue; |
| 56 | int saturation; | 56 | int saturation; |
| 57 | int iturbt_709; | 57 | int iturbt_709; |
| 58 | |||
| 59 | void (*set_params)(struct nouveau_plane *); | ||
| 58 | }; | 60 | }; |
| 59 | 61 | ||
| 60 | static uint32_t formats[] = { | 62 | static uint32_t formats[] = { |
| 63 | DRM_FORMAT_YUYV, | ||
| 61 | DRM_FORMAT_UYVY, | 64 | DRM_FORMAT_UYVY, |
| 62 | DRM_FORMAT_NV12, | 65 | DRM_FORMAT_NV12, |
| 63 | }; | 66 | }; |
| @@ -140,10 +143,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 140 | nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); | 143 | nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); |
| 141 | nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); | 144 | nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); |
| 142 | 145 | ||
| 143 | if (fb->pixel_format == DRM_FORMAT_NV12) { | 146 | if (fb->pixel_format != DRM_FORMAT_UYVY) |
| 144 | format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; | 147 | format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; |
| 148 | if (fb->pixel_format == DRM_FORMAT_NV12) | ||
| 145 | format |= NV_PVIDEO_FORMAT_PLANAR; | 149 | format |= NV_PVIDEO_FORMAT_PLANAR; |
| 146 | } | ||
| 147 | if (nv_plane->iturbt_709) | 150 | if (nv_plane->iturbt_709) |
| 148 | format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; | 151 | format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; |
| 149 | if (nv_plane->colorkey & (1 << 24)) | 152 | if (nv_plane->colorkey & (1 << 24)) |
| @@ -182,9 +185,9 @@ nv10_disable_plane(struct drm_plane *plane) | |||
| 182 | } | 185 | } |
| 183 | 186 | ||
| 184 | static void | 187 | static void |
| 185 | nv10_destroy_plane(struct drm_plane *plane) | 188 | nv_destroy_plane(struct drm_plane *plane) |
| 186 | { | 189 | { |
| 187 | nv10_disable_plane(plane); | 190 | plane->funcs->disable_plane(plane); |
| 188 | drm_plane_cleanup(plane); | 191 | drm_plane_cleanup(plane); |
| 189 | kfree(plane); | 192 | kfree(plane); |
| 190 | } | 193 | } |
| @@ -217,9 +220,9 @@ nv10_set_params(struct nouveau_plane *plane) | |||
| 217 | } | 220 | } |
| 218 | 221 | ||
| 219 | static int | 222 | static int |
| 220 | nv10_set_property(struct drm_plane *plane, | 223 | nv_set_property(struct drm_plane *plane, |
| 221 | struct drm_property *property, | 224 | struct drm_property *property, |
| 222 | uint64_t value) | 225 | uint64_t value) |
| 223 | { | 226 | { |
| 224 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | 227 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; |
| 225 | 228 | ||
| @@ -238,15 +241,16 @@ nv10_set_property(struct drm_plane *plane, | |||
| 238 | else | 241 | else |
| 239 | return -EINVAL; | 242 | return -EINVAL; |
| 240 | 243 | ||
| 241 | nv10_set_params(nv_plane); | 244 | if (nv_plane->set_params) |
| 245 | nv_plane->set_params(nv_plane); | ||
| 242 | return 0; | 246 | return 0; |
| 243 | } | 247 | } |
| 244 | 248 | ||
| 245 | static const struct drm_plane_funcs nv10_plane_funcs = { | 249 | static const struct drm_plane_funcs nv10_plane_funcs = { |
| 246 | .update_plane = nv10_update_plane, | 250 | .update_plane = nv10_update_plane, |
| 247 | .disable_plane = nv10_disable_plane, | 251 | .disable_plane = nv10_disable_plane, |
| 248 | .set_property = nv10_set_property, | 252 | .set_property = nv_set_property, |
| 249 | .destroy = nv10_destroy_plane, | 253 | .destroy = nv_destroy_plane, |
| 250 | }; | 254 | }; |
| 251 | 255 | ||
| 252 | static void | 256 | static void |
| @@ -266,7 +270,7 @@ nv10_overlay_init(struct drm_device *device) | |||
| 266 | case 0x15: | 270 | case 0x15: |
| 267 | case 0x1a: | 271 | case 0x1a: |
| 268 | case 0x20: | 272 | case 0x20: |
| 269 | num_formats = 1; | 273 | num_formats = 2; |
| 270 | break; | 274 | break; |
| 271 | } | 275 | } |
| 272 | 276 | ||
| @@ -321,8 +325,159 @@ nv10_overlay_init(struct drm_device *device) | |||
| 321 | drm_object_attach_property(&plane->base.base, | 325 | drm_object_attach_property(&plane->base.base, |
| 322 | plane->props.iturbt_709, plane->iturbt_709); | 326 | plane->props.iturbt_709, plane->iturbt_709); |
| 323 | 327 | ||
| 328 | plane->set_params = nv10_set_params; | ||
| 324 | nv10_set_params(plane); | 329 | nv10_set_params(plane); |
| 325 | nv_wr32(dev, NV_PVIDEO_STOP, 1); | 330 | nv10_disable_plane(&plane->base); |
| 331 | return; | ||
| 332 | cleanup: | ||
| 333 | drm_plane_cleanup(&plane->base); | ||
| 334 | err: | ||
| 335 | kfree(plane); | ||
| 336 | nv_error(dev, "Failed to create plane\n"); | ||
| 337 | } | ||
| 338 | |||
| 339 | static int | ||
| 340 | nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | ||
| 341 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
| 342 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 343 | uint32_t src_x, uint32_t src_y, | ||
| 344 | uint32_t src_w, uint32_t src_h) | ||
| 345 | { | ||
| 346 | struct nouveau_device *dev = nouveau_dev(plane->dev); | ||
| 347 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | ||
| 348 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | ||
| 349 | struct nouveau_bo *cur = nv_plane->cur; | ||
| 350 | uint32_t overlay = 1; | ||
| 351 | int brightness = (nv_plane->brightness - 512) * 62 / 512; | ||
| 352 | int pitch, ret, i; | ||
| 353 | |||
| 354 | /* Source parameters given in 16.16 fixed point, ignore fractional. */ | ||
| 355 | src_x >>= 16; | ||
| 356 | src_y >>= 16; | ||
| 357 | src_w >>= 16; | ||
| 358 | src_h >>= 16; | ||
| 359 | |||
| 360 | pitch = ALIGN(src_w * 4, 0x100); | ||
| 361 | |||
| 362 | if (pitch > 0xffff) | ||
| 363 | return -ERANGE; | ||
| 364 | |||
| 365 | /* TODO: Compute an offset? Not sure how to do this for YUYV. */ | ||
| 366 | if (src_x != 0 || src_y != 0) | ||
| 367 | return -ERANGE; | ||
| 368 | |||
| 369 | if (crtc_w < src_w || crtc_h < src_h) | ||
| 370 | return -ERANGE; | ||
| 371 | |||
| 372 | ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); | ||
| 373 | if (ret) | ||
| 374 | return ret; | ||
| 375 | |||
| 376 | nv_plane->cur = nv_fb->nvbo; | ||
| 377 | |||
| 378 | nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); | ||
| 379 | nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); | ||
| 380 | nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); | ||
| 381 | |||
| 382 | for (i = 0; i < 2; i++) { | ||
| 383 | nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, | ||
| 384 | nv_fb->nvbo->bo.offset); | ||
| 385 | nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); | ||
| 386 | nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); | ||
| 387 | } | ||
| 388 | nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); | ||
| 389 | nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); | ||
| 390 | nv_wr32(dev, NV_PVIDEO_STEP_SIZE, | ||
| 391 | (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1))); | ||
| 392 | |||
| 393 | /* It should be possible to convert hue/contrast to this */ | ||
| 394 | nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); | ||
| 395 | nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); | ||
| 396 | nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); | ||
| 397 | nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); | ||
| 398 | |||
| 399 | nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ | ||
| 400 | nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ | ||
| 401 | |||
| 402 | nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); | ||
| 403 | nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); | ||
| 404 | |||
| 405 | nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); | ||
| 406 | |||
| 407 | if (nv_plane->colorkey & (1 << 24)) | ||
| 408 | overlay |= 0x10; | ||
| 409 | if (fb->pixel_format == DRM_FORMAT_YUYV) | ||
| 410 | overlay |= 0x100; | ||
| 411 | |||
| 412 | nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay); | ||
| 413 | |||
| 414 | nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); | ||
| 415 | |||
| 416 | if (cur) | ||
| 417 | nouveau_bo_unpin(cur); | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | static int | ||
| 423 | nv04_disable_plane(struct drm_plane *plane) | ||
| 424 | { | ||
| 425 | struct nouveau_device *dev = nouveau_dev(plane->dev); | ||
| 426 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | ||
| 427 | |||
| 428 | nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); | ||
| 429 | nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); | ||
| 430 | nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); | ||
| 431 | nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); | ||
| 432 | if (nv_plane->cur) { | ||
| 433 | nouveau_bo_unpin(nv_plane->cur); | ||
| 434 | nv_plane->cur = NULL; | ||
| 435 | } | ||
| 436 | |||
| 437 | return 0; | ||
| 438 | } | ||
| 439 | |||
| 440 | static const struct drm_plane_funcs nv04_plane_funcs = { | ||
| 441 | .update_plane = nv04_update_plane, | ||
| 442 | .disable_plane = nv04_disable_plane, | ||
| 443 | .set_property = nv_set_property, | ||
| 444 | .destroy = nv_destroy_plane, | ||
| 445 | }; | ||
| 446 | |||
| 447 | static void | ||
| 448 | nv04_overlay_init(struct drm_device *device) | ||
| 449 | { | ||
| 450 | struct nouveau_device *dev = nouveau_dev(device); | ||
| 451 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); | ||
| 452 | int ret; | ||
| 453 | |||
| 454 | if (!plane) | ||
| 455 | return; | ||
| 456 | |||
| 457 | ret = drm_plane_init(device, &plane->base, 1 /* single crtc */, | ||
| 458 | &nv04_plane_funcs, | ||
| 459 | formats, 2, false); | ||
| 460 | if (ret) | ||
| 461 | goto err; | ||
| 462 | |||
| 463 | /* Set up the plane properties */ | ||
| 464 | plane->props.colorkey = drm_property_create_range( | ||
| 465 | device, 0, "colorkey", 0, 0x01ffffff); | ||
| 466 | plane->props.brightness = drm_property_create_range( | ||
| 467 | device, 0, "brightness", 0, 1024); | ||
| 468 | if (!plane->props.colorkey || | ||
| 469 | !plane->props.brightness) | ||
| 470 | goto cleanup; | ||
| 471 | |||
| 472 | plane->colorkey = 0; | ||
| 473 | drm_object_attach_property(&plane->base.base, | ||
| 474 | plane->props.colorkey, plane->colorkey); | ||
| 475 | |||
| 476 | plane->brightness = 512; | ||
| 477 | drm_object_attach_property(&plane->base.base, | ||
| 478 | plane->props.brightness, plane->brightness); | ||
| 479 | |||
| 480 | nv04_disable_plane(&plane->base); | ||
| 326 | return; | 481 | return; |
| 327 | cleanup: | 482 | cleanup: |
| 328 | drm_plane_cleanup(&plane->base); | 483 | drm_plane_cleanup(&plane->base); |
| @@ -335,6 +490,8 @@ void | |||
| 335 | nouveau_overlay_init(struct drm_device *device) | 490 | nouveau_overlay_init(struct drm_device *device) |
| 336 | { | 491 | { |
| 337 | struct nouveau_device *dev = nouveau_dev(device); | 492 | struct nouveau_device *dev = nouveau_dev(device); |
| 338 | if (dev->chipset >= 0x10 && dev->chipset <= 0x40) | 493 | if (dev->chipset < 0x10) |
| 494 | nv04_overlay_init(device); | ||
| 495 | else if (dev->chipset <= 0x40) | ||
| 339 | nv10_overlay_init(device); | 496 | nv10_overlay_init(device); |
| 340 | } | 497 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 3c149617cfcb..4ef83df2b246 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -61,6 +61,7 @@ bool nouveau_is_v1_dsm(void) { | |||
| 61 | #define NOUVEAU_DSM_HAS_MUX 0x1 | 61 | #define NOUVEAU_DSM_HAS_MUX 0x1 |
| 62 | #define NOUVEAU_DSM_HAS_OPT 0x2 | 62 | #define NOUVEAU_DSM_HAS_OPT 0x2 |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_VGA_SWITCHEROO | ||
| 64 | static const char nouveau_dsm_muid[] = { | 65 | static const char nouveau_dsm_muid[] = { |
| 65 | 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, | 66 | 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, |
| 66 | 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, | 67 | 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, |
| @@ -326,6 +327,11 @@ void nouveau_unregister_dsm_handler(void) | |||
| 326 | if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected) | 327 | if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected) |
| 327 | vga_switcheroo_unregister_handler(); | 328 | vga_switcheroo_unregister_handler(); |
| 328 | } | 329 | } |
| 330 | #else | ||
| 331 | void nouveau_register_dsm_handler(void) {} | ||
| 332 | void nouveau_unregister_dsm_handler(void) {} | ||
| 333 | void nouveau_switcheroo_optimus_dsm(void) {} | ||
| 334 | #endif | ||
| 329 | 335 | ||
| 330 | /* retrieve the ROM in 4k blocks */ | 336 | /* retrieve the ROM in 4k blocks */ |
| 331 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | 337 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c0fde6b9393c..488686d490c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -560,28 +560,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | 562 | ||
| 563 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | ||
| 564 | * TTM_PL_{VRAM,TT} directly. | ||
| 565 | */ | ||
| 566 | |||
| 567 | static int | ||
| 568 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | ||
| 569 | struct nouveau_bo *nvbo, bool evict, | ||
| 570 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | ||
| 571 | { | ||
| 572 | struct nouveau_fence *fence = NULL; | ||
| 573 | int ret; | ||
| 574 | |||
| 575 | ret = nouveau_fence_new(chan, false, &fence); | ||
| 576 | if (ret) | ||
| 577 | return ret; | ||
| 578 | |||
| 579 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, | ||
| 580 | no_wait_gpu, new_mem); | ||
| 581 | nouveau_fence_unref(&fence); | ||
| 582 | return ret; | ||
| 583 | } | ||
| 584 | |||
| 585 | static int | 563 | static int |
| 586 | nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | 564 | nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
| 587 | { | 565 | { |
| @@ -798,25 +776,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 798 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 776 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 799 | { | 777 | { |
| 800 | struct nouveau_mem *node = old_mem->mm_node; | 778 | struct nouveau_mem *node = old_mem->mm_node; |
| 801 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
| 802 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | 779 | u64 length = (new_mem->num_pages << PAGE_SHIFT); |
| 803 | u64 src_offset = node->vma[0].offset; | 780 | u64 src_offset = node->vma[0].offset; |
| 804 | u64 dst_offset = node->vma[1].offset; | 781 | u64 dst_offset = node->vma[1].offset; |
| 782 | int src_tiled = !!node->memtype; | ||
| 783 | int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype; | ||
| 805 | int ret; | 784 | int ret; |
| 806 | 785 | ||
| 807 | while (length) { | 786 | while (length) { |
| 808 | u32 amount, stride, height; | 787 | u32 amount, stride, height; |
| 809 | 788 | ||
| 789 | ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); | ||
| 790 | if (ret) | ||
| 791 | return ret; | ||
| 792 | |||
| 810 | amount = min(length, (u64)(4 * 1024 * 1024)); | 793 | amount = min(length, (u64)(4 * 1024 * 1024)); |
| 811 | stride = 16 * 4; | 794 | stride = 16 * 4; |
| 812 | height = amount / stride; | 795 | height = amount / stride; |
| 813 | 796 | ||
| 814 | if (old_mem->mem_type == TTM_PL_VRAM && | 797 | if (src_tiled) { |
| 815 | nouveau_bo_tile_layout(nvbo)) { | ||
| 816 | ret = RING_SPACE(chan, 8); | ||
| 817 | if (ret) | ||
| 818 | return ret; | ||
| 819 | |||
| 820 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); | 798 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
| 821 | OUT_RING (chan, 0); | 799 | OUT_RING (chan, 0); |
| 822 | OUT_RING (chan, 0); | 800 | OUT_RING (chan, 0); |
| @@ -826,19 +804,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 826 | OUT_RING (chan, 0); | 804 | OUT_RING (chan, 0); |
| 827 | OUT_RING (chan, 0); | 805 | OUT_RING (chan, 0); |
| 828 | } else { | 806 | } else { |
| 829 | ret = RING_SPACE(chan, 2); | ||
| 830 | if (ret) | ||
| 831 | return ret; | ||
| 832 | |||
| 833 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); | 807 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
| 834 | OUT_RING (chan, 1); | 808 | OUT_RING (chan, 1); |
| 835 | } | 809 | } |
| 836 | if (new_mem->mem_type == TTM_PL_VRAM && | 810 | if (dst_tiled) { |
| 837 | nouveau_bo_tile_layout(nvbo)) { | ||
| 838 | ret = RING_SPACE(chan, 8); | ||
| 839 | if (ret) | ||
| 840 | return ret; | ||
| 841 | |||
| 842 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); | 811 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
| 843 | OUT_RING (chan, 0); | 812 | OUT_RING (chan, 0); |
| 844 | OUT_RING (chan, 0); | 813 | OUT_RING (chan, 0); |
| @@ -848,18 +817,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 848 | OUT_RING (chan, 0); | 817 | OUT_RING (chan, 0); |
| 849 | OUT_RING (chan, 0); | 818 | OUT_RING (chan, 0); |
| 850 | } else { | 819 | } else { |
| 851 | ret = RING_SPACE(chan, 2); | ||
| 852 | if (ret) | ||
| 853 | return ret; | ||
| 854 | |||
| 855 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); | 820 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
| 856 | OUT_RING (chan, 1); | 821 | OUT_RING (chan, 1); |
| 857 | } | 822 | } |
| 858 | 823 | ||
| 859 | ret = RING_SPACE(chan, 14); | ||
| 860 | if (ret) | ||
| 861 | return ret; | ||
| 862 | |||
| 863 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); | 824 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
| 864 | OUT_RING (chan, upper_32_bits(src_offset)); | 825 | OUT_RING (chan, upper_32_bits(src_offset)); |
| 865 | OUT_RING (chan, upper_32_bits(dst_offset)); | 826 | OUT_RING (chan, upper_32_bits(dst_offset)); |
| @@ -953,23 +914,28 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 953 | } | 914 | } |
| 954 | 915 | ||
| 955 | static int | 916 | static int |
| 956 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | 917 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
| 957 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | 918 | struct ttm_mem_reg *mem) |
| 958 | { | 919 | { |
| 959 | struct nouveau_mem *node = mem->mm_node; | 920 | struct nouveau_mem *old_node = bo->mem.mm_node; |
| 921 | struct nouveau_mem *new_node = mem->mm_node; | ||
| 922 | u64 size = (u64)mem->num_pages << PAGE_SHIFT; | ||
| 960 | int ret; | 923 | int ret; |
| 961 | 924 | ||
| 962 | ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << | 925 | ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, |
| 963 | PAGE_SHIFT, node->page_shift, | 926 | NV_MEM_ACCESS_RW, &old_node->vma[0]); |
| 964 | NV_MEM_ACCESS_RW, vma); | ||
| 965 | if (ret) | 927 | if (ret) |
| 966 | return ret; | 928 | return ret; |
| 967 | 929 | ||
| 968 | if (mem->mem_type == TTM_PL_VRAM) | 930 | ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, |
| 969 | nouveau_vm_map(vma, node); | 931 | NV_MEM_ACCESS_RW, &old_node->vma[1]); |
| 970 | else | 932 | if (ret) { |
| 971 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node); | 933 | nouveau_vm_put(&old_node->vma[0]); |
| 934 | return ret; | ||
| 935 | } | ||
| 972 | 936 | ||
| 937 | nouveau_vm_map(&old_node->vma[0], old_node); | ||
| 938 | nouveau_vm_map(&old_node->vma[1], new_node); | ||
| 973 | return 0; | 939 | return 0; |
| 974 | } | 940 | } |
| 975 | 941 | ||
| @@ -979,35 +945,34 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
| 979 | { | 945 | { |
| 980 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 946 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 981 | struct nouveau_channel *chan = drm->ttm.chan; | 947 | struct nouveau_channel *chan = drm->ttm.chan; |
| 982 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 948 | struct nouveau_fence *fence; |
| 983 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 984 | int ret; | 949 | int ret; |
| 985 | 950 | ||
| 986 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); | ||
| 987 | |||
| 988 | /* create temporary vmas for the transfer and attach them to the | 951 | /* create temporary vmas for the transfer and attach them to the |
| 989 | * old nouveau_mem node, these will get cleaned up after ttm has | 952 | * old nouveau_mem node, these will get cleaned up after ttm has |
| 990 | * destroyed the ttm_mem_reg | 953 | * destroyed the ttm_mem_reg |
| 991 | */ | 954 | */ |
| 992 | if (nv_device(drm->device)->card_type >= NV_50) { | 955 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 993 | struct nouveau_mem *node = old_mem->mm_node; | 956 | ret = nouveau_bo_move_prep(drm, bo, new_mem); |
| 994 | |||
| 995 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); | ||
| 996 | if (ret) | ||
| 997 | goto out; | ||
| 998 | |||
| 999 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); | ||
| 1000 | if (ret) | 957 | if (ret) |
| 1001 | goto out; | 958 | return ret; |
| 1002 | } | 959 | } |
| 1003 | 960 | ||
| 1004 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); | 961 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
| 962 | ret = nouveau_fence_sync(bo->sync_obj, chan); | ||
| 1005 | if (ret == 0) { | 963 | if (ret == 0) { |
| 1006 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | 964 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
| 1007 | no_wait_gpu, new_mem); | 965 | if (ret == 0) { |
| 966 | ret = nouveau_fence_new(chan, false, &fence); | ||
| 967 | if (ret == 0) { | ||
| 968 | ret = ttm_bo_move_accel_cleanup(bo, fence, | ||
| 969 | evict, | ||
| 970 | no_wait_gpu, | ||
| 971 | new_mem); | ||
| 972 | nouveau_fence_unref(&fence); | ||
| 973 | } | ||
| 974 | } | ||
| 1008 | } | 975 | } |
| 1009 | |||
| 1010 | out: | ||
| 1011 | mutex_unlock(&chan->cli->mutex); | 976 | mutex_unlock(&chan->cli->mutex); |
| 1012 | return ret; | 977 | return ret; |
| 1013 | } | 978 | } |
| @@ -1147,19 +1112,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |||
| 1147 | return; | 1112 | return; |
| 1148 | 1113 | ||
| 1149 | list_for_each_entry(vma, &nvbo->vma_list, head) { | 1114 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
| 1150 | if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { | 1115 | if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && |
| 1116 | (new_mem->mem_type == TTM_PL_VRAM || | ||
| 1117 | nvbo->page_shift != vma->vm->vmm->lpg_shift)) { | ||
| 1151 | nouveau_vm_map(vma, new_mem->mm_node); | 1118 | nouveau_vm_map(vma, new_mem->mm_node); |
| 1152 | } else | ||
| 1153 | if (new_mem && new_mem->mem_type == TTM_PL_TT && | ||
| 1154 | nvbo->page_shift == vma->vm->vmm->spg_shift) { | ||
| 1155 | if (((struct nouveau_mem *)new_mem->mm_node)->sg) | ||
| 1156 | nouveau_vm_map_sg_table(vma, 0, new_mem-> | ||
| 1157 | num_pages << PAGE_SHIFT, | ||
| 1158 | new_mem->mm_node); | ||
| 1159 | else | ||
| 1160 | nouveau_vm_map_sg(vma, 0, new_mem-> | ||
| 1161 | num_pages << PAGE_SHIFT, | ||
| 1162 | new_mem->mm_node); | ||
| 1163 | } else { | 1119 | } else { |
| 1164 | nouveau_vm_unmap(vma); | 1120 | nouveau_vm_unmap(vma); |
| 1165 | } | 1121 | } |
| @@ -1224,28 +1180,27 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 1224 | goto out; | 1180 | goto out; |
| 1225 | } | 1181 | } |
| 1226 | 1182 | ||
| 1227 | /* CPU copy if we have no accelerated method available */ | ||
| 1228 | if (!drm->ttm.move) { | ||
| 1229 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
| 1230 | goto out; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | /* Hardware assisted copy. */ | 1183 | /* Hardware assisted copy. */ |
| 1234 | if (new_mem->mem_type == TTM_PL_SYSTEM) | 1184 | if (drm->ttm.move) { |
| 1235 | ret = nouveau_bo_move_flipd(bo, evict, intr, | 1185 | if (new_mem->mem_type == TTM_PL_SYSTEM) |
| 1236 | no_wait_gpu, new_mem); | 1186 | ret = nouveau_bo_move_flipd(bo, evict, intr, |
| 1237 | else if (old_mem->mem_type == TTM_PL_SYSTEM) | 1187 | no_wait_gpu, new_mem); |
| 1238 | ret = nouveau_bo_move_flips(bo, evict, intr, | 1188 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
| 1239 | no_wait_gpu, new_mem); | 1189 | ret = nouveau_bo_move_flips(bo, evict, intr, |
| 1240 | else | 1190 | no_wait_gpu, new_mem); |
| 1241 | ret = nouveau_bo_move_m2mf(bo, evict, intr, | 1191 | else |
| 1242 | no_wait_gpu, new_mem); | 1192 | ret = nouveau_bo_move_m2mf(bo, evict, intr, |
| 1243 | 1193 | no_wait_gpu, new_mem); | |
| 1244 | if (!ret) | 1194 | if (!ret) |
| 1245 | goto out; | 1195 | goto out; |
| 1196 | } | ||
| 1246 | 1197 | ||
| 1247 | /* Fallback to software copy. */ | 1198 | /* Fallback to software copy. */ |
| 1248 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | 1199 | spin_lock(&bo->bdev->fence_lock); |
| 1200 | ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); | ||
| 1201 | spin_unlock(&bo->bdev->fence_lock); | ||
| 1202 | if (ret == 0) | ||
| 1203 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
| 1249 | 1204 | ||
| 1250 | out: | 1205 | out: |
| 1251 | if (nv_device(drm->device)->card_type < NV_50) { | 1206 | if (nv_device(drm->device)->card_type < NV_50) { |
| @@ -1271,6 +1226,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
| 1271 | { | 1226 | { |
| 1272 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 1227 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| 1273 | struct nouveau_drm *drm = nouveau_bdev(bdev); | 1228 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 1229 | struct nouveau_mem *node = mem->mm_node; | ||
| 1274 | struct drm_device *dev = drm->dev; | 1230 | struct drm_device *dev = drm->dev; |
| 1275 | int ret; | 1231 | int ret; |
| 1276 | 1232 | ||
| @@ -1293,14 +1249,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
| 1293 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; | 1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; |
| 1294 | } | 1250 | } |
| 1295 | #endif | 1251 | #endif |
| 1296 | break; | 1252 | if (!node->memtype) |
| 1253 | /* untiled */ | ||
| 1254 | break; | ||
| 1255 | /* fallthrough, tiled memory */ | ||
| 1297 | case TTM_PL_VRAM: | 1256 | case TTM_PL_VRAM: |
| 1298 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1257 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 1299 | mem->bus.base = pci_resource_start(dev->pdev, 1); | 1258 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
| 1300 | mem->bus.is_iomem = true; | 1259 | mem->bus.is_iomem = true; |
| 1301 | if (nv_device(drm->device)->card_type >= NV_50) { | 1260 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 1302 | struct nouveau_bar *bar = nouveau_bar(drm->device); | 1261 | struct nouveau_bar *bar = nouveau_bar(drm->device); |
| 1303 | struct nouveau_mem *node = mem->mm_node; | ||
| 1304 | 1262 | ||
| 1305 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, | 1263 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, |
| 1306 | &node->bar_vma); | 1264 | &node->bar_vma); |
| @@ -1336,6 +1294,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 1336 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1294 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1337 | struct nouveau_device *device = nv_device(drm->device); | 1295 | struct nouveau_device *device = nv_device(drm->device); |
| 1338 | u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; | 1296 | u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; |
| 1297 | int ret; | ||
| 1339 | 1298 | ||
| 1340 | /* as long as the bo isn't in vram, and isn't tiled, we've got | 1299 | /* as long as the bo isn't in vram, and isn't tiled, we've got |
| 1341 | * nothing to do here. | 1300 | * nothing to do here. |
| @@ -1344,10 +1303,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 1344 | if (nv_device(drm->device)->card_type < NV_50 || | 1303 | if (nv_device(drm->device)->card_type < NV_50 || |
| 1345 | !nouveau_bo_tile_layout(nvbo)) | 1304 | !nouveau_bo_tile_layout(nvbo)) |
| 1346 | return 0; | 1305 | return 0; |
| 1306 | |||
| 1307 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | ||
| 1308 | nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); | ||
| 1309 | |||
| 1310 | ret = nouveau_bo_validate(nvbo, false, false); | ||
| 1311 | if (ret) | ||
| 1312 | return ret; | ||
| 1313 | } | ||
| 1314 | return 0; | ||
| 1347 | } | 1315 | } |
| 1348 | 1316 | ||
| 1349 | /* make sure bo is in mappable vram */ | 1317 | /* make sure bo is in mappable vram */ |
| 1350 | if (bo->mem.start + bo->mem.num_pages < mappable) | 1318 | if (nv_device(drm->device)->card_type >= NV_50 || |
| 1319 | bo->mem.start + bo->mem.num_pages < mappable) | ||
| 1351 | return 0; | 1320 | return 0; |
| 1352 | 1321 | ||
| 1353 | 1322 | ||
| @@ -1535,7 +1504,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |||
| 1535 | struct nouveau_vma *vma) | 1504 | struct nouveau_vma *vma) |
| 1536 | { | 1505 | { |
| 1537 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | 1506 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
| 1538 | struct nouveau_mem *node = nvbo->bo.mem.mm_node; | ||
| 1539 | int ret; | 1507 | int ret; |
| 1540 | 1508 | ||
| 1541 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, | 1509 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, |
| @@ -1543,15 +1511,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |||
| 1543 | if (ret) | 1511 | if (ret) |
| 1544 | return ret; | 1512 | return ret; |
| 1545 | 1513 | ||
| 1546 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | 1514 | if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && |
| 1515 | (nvbo->bo.mem.mem_type == TTM_PL_VRAM || | ||
| 1516 | nvbo->page_shift != vma->vm->vmm->lpg_shift)) | ||
| 1547 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | 1517 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); |
| 1548 | else if (nvbo->bo.mem.mem_type == TTM_PL_TT && | ||
| 1549 | nvbo->page_shift == vma->vm->vmm->spg_shift) { | ||
| 1550 | if (node->sg) | ||
| 1551 | nouveau_vm_map_sg_table(vma, 0, size, node); | ||
| 1552 | else | ||
| 1553 | nouveau_vm_map_sg(vma, 0, size, node); | ||
| 1554 | } | ||
| 1555 | 1518 | ||
| 1556 | list_add_tail(&vma->head, &nvbo->vma_list); | 1519 | list_add_tail(&vma->head, &nvbo->vma_list); |
| 1557 | vma->refcount = 1; | 1520 | vma->refcount = 1; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 25ea82f8def3..24011596af43 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -68,20 +68,100 @@ nouveau_display_vblank_disable(struct drm_device *dev, int head) | |||
| 68 | nouveau_event_put(disp->vblank[head]); | 68 | nouveau_event_put(disp->vblank[head]); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static inline int | ||
| 72 | calc(int blanks, int blanke, int total, int line) | ||
| 73 | { | ||
| 74 | if (blanke >= blanks) { | ||
| 75 | if (line >= blanks) | ||
| 76 | line -= total; | ||
| 77 | } else { | ||
| 78 | if (line >= blanks) | ||
| 79 | line -= total; | ||
| 80 | line -= blanke + 1; | ||
| 81 | } | ||
| 82 | return line; | ||
| 83 | } | ||
| 84 | |||
| 85 | int | ||
| 86 | nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, | ||
| 87 | ktime_t *stime, ktime_t *etime) | ||
| 88 | { | ||
| 89 | const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index; | ||
| 90 | struct nouveau_display *disp = nouveau_display(crtc->dev); | ||
| 91 | struct nv04_display_scanoutpos args; | ||
| 92 | int ret, retry = 1; | ||
| 93 | |||
| 94 | do { | ||
| 95 | ret = nv_exec(disp->core, mthd, &args, sizeof(args)); | ||
| 96 | if (ret != 0) | ||
| 97 | return 0; | ||
| 98 | |||
| 99 | if (args.vline) { | ||
| 100 | ret |= DRM_SCANOUTPOS_ACCURATE; | ||
| 101 | ret |= DRM_SCANOUTPOS_VALID; | ||
| 102 | break; | ||
| 103 | } | ||
| 104 | |||
| 105 | if (retry) ndelay(crtc->linedur_ns); | ||
| 106 | } while (retry--); | ||
| 107 | |||
| 108 | *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline); | ||
| 109 | *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); | ||
| 110 | if (stime) *stime = ns_to_ktime(args.time[0]); | ||
| 111 | if (etime) *etime = ns_to_ktime(args.time[1]); | ||
| 112 | |||
| 113 | if (*vpos < 0) | ||
| 114 | ret |= DRM_SCANOUTPOS_INVBL; | ||
| 115 | return ret; | ||
| 116 | } | ||
| 117 | |||
| 118 | int | ||
| 119 | nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags, | ||
| 120 | int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) | ||
| 121 | { | ||
| 122 | struct drm_crtc *crtc; | ||
| 123 | |||
| 124 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 125 | if (nouveau_crtc(crtc)->index == head) { | ||
| 126 | return nouveau_display_scanoutpos_head(crtc, vpos, hpos, | ||
| 127 | stime, etime); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | int | ||
| 135 | nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error, | ||
| 136 | struct timeval *time, unsigned flags) | ||
| 137 | { | ||
| 138 | struct drm_crtc *crtc; | ||
| 139 | |||
| 140 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 141 | if (nouveau_crtc(crtc)->index == head) { | ||
| 142 | return drm_calc_vbltimestamp_from_scanoutpos(dev, | ||
| 143 | head, max_error, time, flags, crtc, | ||
| 144 | &crtc->hwmode); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | return -EINVAL; | ||
| 149 | } | ||
| 150 | |||
| 71 | static void | 151 | static void |
| 72 | nouveau_display_vblank_fini(struct drm_device *dev) | 152 | nouveau_display_vblank_fini(struct drm_device *dev) |
| 73 | { | 153 | { |
| 74 | struct nouveau_display *disp = nouveau_display(dev); | 154 | struct nouveau_display *disp = nouveau_display(dev); |
| 75 | int i; | 155 | int i; |
| 76 | 156 | ||
| 157 | drm_vblank_cleanup(dev); | ||
| 158 | |||
| 77 | if (disp->vblank) { | 159 | if (disp->vblank) { |
| 78 | for (i = 0; i < dev->mode_config.num_crtc; i++) | 160 | for (i = 0; i < dev->mode_config.num_crtc; i++) |
| 79 | nouveau_event_ref(NULL, &disp->vblank[i]); | 161 | nouveau_event_ref(NULL, &disp->vblank[i]); |
| 80 | kfree(disp->vblank); | 162 | kfree(disp->vblank); |
| 81 | disp->vblank = NULL; | 163 | disp->vblank = NULL; |
| 82 | } | 164 | } |
| 83 | |||
| 84 | drm_vblank_cleanup(dev); | ||
| 85 | } | 165 | } |
| 86 | 166 | ||
| 87 | static int | 167 | static int |
| @@ -407,10 +487,31 @@ nouveau_display_create(struct drm_device *dev) | |||
| 407 | drm_kms_helper_poll_disable(dev); | 487 | drm_kms_helper_poll_disable(dev); |
| 408 | 488 | ||
| 409 | if (drm->vbios.dcb.entries) { | 489 | if (drm->vbios.dcb.entries) { |
| 410 | if (nv_device(drm->device)->card_type < NV_50) | 490 | static const u16 oclass[] = { |
| 411 | ret = nv04_display_create(dev); | 491 | NVF0_DISP_CLASS, |
| 412 | else | 492 | NVE0_DISP_CLASS, |
| 413 | ret = nv50_display_create(dev); | 493 | NVD0_DISP_CLASS, |
| 494 | NVA3_DISP_CLASS, | ||
| 495 | NV94_DISP_CLASS, | ||
| 496 | NVA0_DISP_CLASS, | ||
| 497 | NV84_DISP_CLASS, | ||
| 498 | NV50_DISP_CLASS, | ||
| 499 | NV04_DISP_CLASS, | ||
| 500 | }; | ||
| 501 | int i; | ||
| 502 | |||
| 503 | for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { | ||
| 504 | ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, | ||
| 505 | NVDRM_DISPLAY, oclass[i], | ||
| 506 | NULL, 0, &disp->core); | ||
| 507 | } | ||
| 508 | |||
| 509 | if (ret == 0) { | ||
| 510 | if (nv_mclass(disp->core) < NV50_DISP_CLASS) | ||
| 511 | ret = nv04_display_create(dev); | ||
| 512 | else | ||
| 513 | ret = nv50_display_create(dev); | ||
| 514 | } | ||
| 414 | } else { | 515 | } else { |
| 415 | ret = 0; | 516 | ret = 0; |
| 416 | } | 517 | } |
| @@ -439,6 +540,7 @@ void | |||
| 439 | nouveau_display_destroy(struct drm_device *dev) | 540 | nouveau_display_destroy(struct drm_device *dev) |
| 440 | { | 541 | { |
| 441 | struct nouveau_display *disp = nouveau_display(dev); | 542 | struct nouveau_display *disp = nouveau_display(dev); |
| 543 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 442 | 544 | ||
| 443 | nouveau_backlight_exit(dev); | 545 | nouveau_backlight_exit(dev); |
| 444 | nouveau_display_vblank_fini(dev); | 546 | nouveau_display_vblank_fini(dev); |
| @@ -449,6 +551,8 @@ nouveau_display_destroy(struct drm_device *dev) | |||
| 449 | if (disp->dtor) | 551 | if (disp->dtor) |
| 450 | disp->dtor(dev); | 552 | disp->dtor(dev); |
| 451 | 553 | ||
| 554 | nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY); | ||
| 555 | |||
| 452 | nouveau_drm(dev)->display = NULL; | 556 | nouveau_drm(dev)->display = NULL; |
| 453 | kfree(disp); | 557 | kfree(disp); |
| 454 | } | 558 | } |
| @@ -603,6 +707,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 603 | if (!s) | 707 | if (!s) |
| 604 | return -ENOMEM; | 708 | return -ENOMEM; |
| 605 | 709 | ||
| 710 | if (new_bo != old_bo) { | ||
| 711 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | ||
| 712 | if (ret) | ||
| 713 | goto fail_free; | ||
| 714 | } | ||
| 715 | |||
| 716 | mutex_lock(&chan->cli->mutex); | ||
| 717 | |||
| 606 | /* synchronise rendering channel with the kernel's channel */ | 718 | /* synchronise rendering channel with the kernel's channel */ |
| 607 | spin_lock(&new_bo->bo.bdev->fence_lock); | 719 | spin_lock(&new_bo->bo.bdev->fence_lock); |
| 608 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); | 720 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); |
| @@ -610,15 +722,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 610 | ret = nouveau_fence_sync(fence, chan); | 722 | ret = nouveau_fence_sync(fence, chan); |
| 611 | nouveau_fence_unref(&fence); | 723 | nouveau_fence_unref(&fence); |
| 612 | if (ret) | 724 | if (ret) |
| 613 | goto fail_free; | 725 | goto fail_unpin; |
| 614 | |||
| 615 | if (new_bo != old_bo) { | ||
| 616 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | ||
| 617 | if (ret) | ||
| 618 | goto fail_free; | ||
| 619 | } | ||
| 620 | 726 | ||
| 621 | mutex_lock(&chan->cli->mutex); | ||
| 622 | ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); | 727 | ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); |
| 623 | if (ret) | 728 | if (ret) |
| 624 | goto fail_unpin; | 729 | goto fail_unpin; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 8bc8bab90e8d..a71cf77e55b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h | |||
| @@ -36,6 +36,7 @@ struct nouveau_display { | |||
| 36 | int (*init)(struct drm_device *); | 36 | int (*init)(struct drm_device *); |
| 37 | void (*fini)(struct drm_device *); | 37 | void (*fini)(struct drm_device *); |
| 38 | 38 | ||
| 39 | struct nouveau_object *core; | ||
| 39 | struct nouveau_eventh **vblank; | 40 | struct nouveau_eventh **vblank; |
| 40 | 41 | ||
| 41 | struct drm_property *dithering_mode; | 42 | struct drm_property *dithering_mode; |
| @@ -63,6 +64,10 @@ void nouveau_display_repin(struct drm_device *dev); | |||
| 63 | void nouveau_display_resume(struct drm_device *dev); | 64 | void nouveau_display_resume(struct drm_device *dev); |
| 64 | int nouveau_display_vblank_enable(struct drm_device *, int); | 65 | int nouveau_display_vblank_enable(struct drm_device *, int); |
| 65 | void nouveau_display_vblank_disable(struct drm_device *, int); | 66 | void nouveau_display_vblank_disable(struct drm_device *, int); |
| 67 | int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, | ||
| 68 | int *, int *, ktime_t *, ktime_t *); | ||
| 69 | int nouveau_display_vblstamp(struct drm_device *, int, int *, | ||
| 70 | struct timeval *, unsigned); | ||
| 66 | 71 | ||
| 67 | int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 72 | int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
| 68 | struct drm_pending_vblank_event *event, | 73 | struct drm_pending_vblank_event *event, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 40f91e1e5842..c177272152e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
| @@ -100,7 +100,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | |||
| 100 | 100 | ||
| 101 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; | 101 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; |
| 102 | 102 | ||
| 103 | DRM_MEMORYBARRIER(); | 103 | mb(); |
| 104 | /* Flush writes. */ | 104 | /* Flush writes. */ |
| 105 | nouveau_bo_rd32(pb, 0); | 105 | nouveau_bo_rd32(pb, 0); |
| 106 | 106 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 984004d66a6d..dc0e0c5cadb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
| @@ -155,7 +155,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data) | |||
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | #define WRITE_PUT(val) do { \ | 157 | #define WRITE_PUT(val) do { \ |
| 158 | DRM_MEMORYBARRIER(); \ | 158 | mb(); \ |
| 159 | nouveau_bo_rd32(chan->push.buffer, 0); \ | 159 | nouveau_bo_rd32(chan->push.buffer, 0); \ |
| 160 | nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ | 160 | nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ |
| 161 | } while (0) | 161 | } while (0) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 98a22e6e27a1..78c8e7146d56 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -503,19 +503,21 @@ nouveau_do_suspend(struct drm_device *dev) | |||
| 503 | if (drm->cechan) { | 503 | if (drm->cechan) { |
| 504 | ret = nouveau_channel_idle(drm->cechan); | 504 | ret = nouveau_channel_idle(drm->cechan); |
| 505 | if (ret) | 505 | if (ret) |
| 506 | return ret; | 506 | goto fail_display; |
| 507 | } | 507 | } |
| 508 | 508 | ||
| 509 | if (drm->channel) { | 509 | if (drm->channel) { |
| 510 | ret = nouveau_channel_idle(drm->channel); | 510 | ret = nouveau_channel_idle(drm->channel); |
| 511 | if (ret) | 511 | if (ret) |
| 512 | return ret; | 512 | goto fail_display; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | NV_INFO(drm, "suspending client object trees...\n"); | 515 | NV_INFO(drm, "suspending client object trees...\n"); |
| 516 | if (drm->fence && nouveau_fence(drm)->suspend) { | 516 | if (drm->fence && nouveau_fence(drm)->suspend) { |
| 517 | if (!nouveau_fence(drm)->suspend(drm)) | 517 | if (!nouveau_fence(drm)->suspend(drm)) { |
| 518 | return -ENOMEM; | 518 | ret = -ENOMEM; |
| 519 | goto fail_display; | ||
| 520 | } | ||
| 519 | } | 521 | } |
| 520 | 522 | ||
| 521 | list_for_each_entry(cli, &drm->clients, head) { | 523 | list_for_each_entry(cli, &drm->clients, head) { |
| @@ -537,6 +539,10 @@ fail_client: | |||
| 537 | nouveau_client_init(&cli->base); | 539 | nouveau_client_init(&cli->base); |
| 538 | } | 540 | } |
| 539 | 541 | ||
| 542 | if (drm->fence && nouveau_fence(drm)->resume) | ||
| 543 | nouveau_fence(drm)->resume(drm); | ||
| 544 | |||
| 545 | fail_display: | ||
| 540 | if (dev->mode_config.num_crtc) { | 546 | if (dev->mode_config.num_crtc) { |
| 541 | NV_INFO(drm, "resuming display...\n"); | 547 | NV_INFO(drm, "resuming display...\n"); |
| 542 | nouveau_display_resume(dev); | 548 | nouveau_display_resume(dev); |
| @@ -798,6 +804,8 @@ driver = { | |||
| 798 | .get_vblank_counter = drm_vblank_count, | 804 | .get_vblank_counter = drm_vblank_count, |
| 799 | .enable_vblank = nouveau_display_vblank_enable, | 805 | .enable_vblank = nouveau_display_vblank_enable, |
| 800 | .disable_vblank = nouveau_display_vblank_disable, | 806 | .disable_vblank = nouveau_display_vblank_disable, |
| 807 | .get_scanout_position = nouveau_display_scanoutpos, | ||
| 808 | .get_vblank_timestamp = nouveau_display_vblstamp, | ||
| 801 | 809 | ||
| 802 | .ioctls = nouveau_ioctls, | 810 | .ioctls = nouveau_ioctls, |
| 803 | .num_ioctls = ARRAY_SIZE(nouveau_ioctls), | 811 | .num_ioctls = ARRAY_SIZE(nouveau_ioctls), |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 4b0fb6c66be9..23ca7a517246 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
| @@ -54,6 +54,7 @@ enum nouveau_drm_handle { | |||
| 54 | NVDRM_CLIENT = 0xffffffff, | 54 | NVDRM_CLIENT = 0xffffffff, |
| 55 | NVDRM_DEVICE = 0xdddddddd, | 55 | NVDRM_DEVICE = 0xdddddddd, |
| 56 | NVDRM_CONTROL = 0xdddddddc, | 56 | NVDRM_CONTROL = 0xdddddddc, |
| 57 | NVDRM_DISPLAY = 0xd1500000, | ||
| 57 | NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ | 58 | NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ |
| 58 | NVDRM_CHAN = 0xcccc0000, /* |= client chid */ | 59 | NVDRM_CHAN = 0xcccc0000, /* |= client chid */ |
| 59 | NVDRM_NVSW = 0x55550000, | 60 | NVDRM_NVSW = 0x55550000, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 40cf52e6d6d2..90074d620e31 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
| 143 | int ret; | 143 | int ret; |
| 144 | 144 | ||
| 145 | fence->channel = chan; | 145 | fence->channel = chan; |
| 146 | fence->timeout = jiffies + (15 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * HZ); |
| 147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
| 148 | 148 | ||
| 149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 78a27f8ad7d9..27c3fd89e8ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -463,12 +463,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
| 463 | list_for_each_entry(nvbo, list, entry) { | 463 | list_for_each_entry(nvbo, list, entry) { |
| 464 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | 464 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; |
| 465 | 465 | ||
| 466 | ret = validate_sync(chan, nvbo); | ||
| 467 | if (unlikely(ret)) { | ||
| 468 | NV_ERROR(cli, "fail pre-validate sync\n"); | ||
| 469 | return ret; | ||
| 470 | } | ||
| 471 | |||
| 472 | ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, | 466 | ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, |
| 473 | b->write_domains, | 467 | b->write_domains, |
| 474 | b->valid_domains); | 468 | b->valid_domains); |
| @@ -506,7 +500,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
| 506 | b->presumed.valid = 0; | 500 | b->presumed.valid = 0; |
| 507 | relocs++; | 501 | relocs++; |
| 508 | 502 | ||
| 509 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, | 503 | if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, |
| 510 | &b->presumed, sizeof(b->presumed))) | 504 | &b->presumed, sizeof(b->presumed))) |
| 511 | return -EFAULT; | 505 | return -EFAULT; |
| 512 | } | 506 | } |
| @@ -593,7 +587,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |||
| 593 | if (!mem) | 587 | if (!mem) |
| 594 | return ERR_PTR(-ENOMEM); | 588 | return ERR_PTR(-ENOMEM); |
| 595 | 589 | ||
| 596 | if (DRM_COPY_FROM_USER(mem, userptr, size)) { | 590 | if (copy_from_user(mem, userptr, size)) { |
| 597 | u_free(mem); | 591 | u_free(mem); |
| 598 | return ERR_PTR(-EFAULT); | 592 | return ERR_PTR(-EFAULT); |
| 599 | } | 593 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 0843ebc910d4..a4d22e5eb176 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
| 31 | { | 31 | { |
| 32 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | 32 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
| 33 | struct nouveau_mem *node = mem->mm_node; | 33 | struct nouveau_mem *node = mem->mm_node; |
| 34 | u64 size = mem->num_pages << 12; | ||
| 35 | 34 | ||
| 36 | if (ttm->sg) { | 35 | if (ttm->sg) { |
| 37 | node->sg = ttm->sg; | 36 | node->sg = ttm->sg; |
| 38 | nouveau_vm_map_sg_table(&node->vma[0], 0, size, node); | 37 | node->pages = NULL; |
| 39 | } else { | 38 | } else { |
| 39 | node->sg = NULL; | ||
| 40 | node->pages = nvbe->ttm.dma_address; | 40 | node->pages = nvbe->ttm.dma_address; |
| 41 | nouveau_vm_map_sg(&node->vma[0], 0, size, node); | ||
| 42 | } | 41 | } |
| 42 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; | ||
| 43 | 43 | ||
| 44 | nouveau_vm_map(&node->vma[0], node); | ||
| 44 | nvbe->node = node; | 45 | nvbe->node = node; |
| 45 | return 0; | 46 | return 0; |
| 46 | } | 47 | } |
| @@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
| 67 | 68 | ||
| 68 | /* noop: bound in move_notify() */ | 69 | /* noop: bound in move_notify() */ |
| 69 | if (ttm->sg) { | 70 | if (ttm->sg) { |
| 70 | node->sg = ttm->sg; | 71 | node->sg = ttm->sg; |
| 71 | } else | 72 | node->pages = NULL; |
| 73 | } else { | ||
| 74 | node->sg = NULL; | ||
| 72 | node->pages = nvbe->ttm.dma_address; | 75 | node->pages = nvbe->ttm.dma_address; |
| 76 | } | ||
| 77 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; | ||
| 73 | return 0; | 78 | return 0; |
| 74 | } | 79 | } |
| 75 | 80 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 19e3757291fb..d45d50da978f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
| @@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
| 171 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 171 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 172 | if (!node) | 172 | if (!node) |
| 173 | return -ENOMEM; | 173 | return -ENOMEM; |
| 174 | |||
| 174 | node->page_shift = 12; | 175 | node->page_shift = 12; |
| 175 | 176 | ||
| 176 | switch (nv_device(drm->device)->card_type) { | 177 | switch (nv_device(drm->device)->card_type) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 4e384a2f99c3..2dccafc6e9db 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -1035,6 +1035,7 @@ static bool | |||
| 1035 | nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, | 1035 | nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, |
| 1036 | struct drm_display_mode *adjusted_mode) | 1036 | struct drm_display_mode *adjusted_mode) |
| 1037 | { | 1037 | { |
| 1038 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
| 1038 | return true; | 1039 | return true; |
| 1039 | } | 1040 | } |
| 1040 | 1041 | ||
| @@ -2199,16 +2200,6 @@ nv50_display_destroy(struct drm_device *dev) | |||
| 2199 | int | 2200 | int |
| 2200 | nv50_display_create(struct drm_device *dev) | 2201 | nv50_display_create(struct drm_device *dev) |
| 2201 | { | 2202 | { |
| 2202 | static const u16 oclass[] = { | ||
| 2203 | NVF0_DISP_CLASS, | ||
| 2204 | NVE0_DISP_CLASS, | ||
| 2205 | NVD0_DISP_CLASS, | ||
| 2206 | NVA3_DISP_CLASS, | ||
| 2207 | NV94_DISP_CLASS, | ||
| 2208 | NVA0_DISP_CLASS, | ||
| 2209 | NV84_DISP_CLASS, | ||
| 2210 | NV50_DISP_CLASS, | ||
| 2211 | }; | ||
| 2212 | struct nouveau_device *device = nouveau_dev(dev); | 2203 | struct nouveau_device *device = nouveau_dev(dev); |
| 2213 | struct nouveau_drm *drm = nouveau_drm(dev); | 2204 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 2214 | struct dcb_table *dcb = &drm->vbios.dcb; | 2205 | struct dcb_table *dcb = &drm->vbios.dcb; |
| @@ -2225,6 +2216,7 @@ nv50_display_create(struct drm_device *dev) | |||
| 2225 | nouveau_display(dev)->dtor = nv50_display_destroy; | 2216 | nouveau_display(dev)->dtor = nv50_display_destroy; |
| 2226 | nouveau_display(dev)->init = nv50_display_init; | 2217 | nouveau_display(dev)->init = nv50_display_init; |
| 2227 | nouveau_display(dev)->fini = nv50_display_fini; | 2218 | nouveau_display(dev)->fini = nv50_display_fini; |
| 2219 | disp->core = nouveau_display(dev)->core; | ||
| 2228 | 2220 | ||
| 2229 | /* small shared memory area we use for notifiers and semaphores */ | 2221 | /* small shared memory area we use for notifiers and semaphores */ |
| 2230 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 2222 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
| @@ -2243,17 +2235,6 @@ nv50_display_create(struct drm_device *dev) | |||
| 2243 | if (ret) | 2235 | if (ret) |
| 2244 | goto out; | 2236 | goto out; |
| 2245 | 2237 | ||
| 2246 | /* attempt to allocate a supported evo display class */ | ||
| 2247 | ret = -ENODEV; | ||
| 2248 | for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) { | ||
| 2249 | ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, | ||
| 2250 | 0xd1500000, oclass[i], NULL, 0, | ||
| 2251 | &disp->core); | ||
| 2252 | } | ||
| 2253 | |||
| 2254 | if (ret) | ||
| 2255 | goto out; | ||
| 2256 | |||
| 2257 | /* allocate master evo channel */ | 2238 | /* allocate master evo channel */ |
| 2258 | ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, | 2239 | ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, |
| 2259 | &(struct nv50_display_mast_class) { | 2240 | &(struct nv50_display_mast_class) { |
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 0fd2eb139f6e..4313bb0a49a6 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c | |||
| @@ -411,7 +411,7 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | |||
| 411 | struct drm_crtc *crtc = &omap_crtc->base; | 411 | struct drm_crtc *crtc = &omap_crtc->base; |
| 412 | DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus); | 412 | DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus); |
| 413 | /* avoid getting in a flood, unregister the irq until next vblank */ | 413 | /* avoid getting in a flood, unregister the irq until next vblank */ |
| 414 | omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); | 414 | __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); |
| 415 | } | 415 | } |
| 416 | 416 | ||
| 417 | static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | 417 | static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) |
| @@ -421,13 +421,13 @@ static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | |||
| 421 | struct drm_crtc *crtc = &omap_crtc->base; | 421 | struct drm_crtc *crtc = &omap_crtc->base; |
| 422 | 422 | ||
| 423 | if (!omap_crtc->error_irq.registered) | 423 | if (!omap_crtc->error_irq.registered) |
| 424 | omap_irq_register(crtc->dev, &omap_crtc->error_irq); | 424 | __omap_irq_register(crtc->dev, &omap_crtc->error_irq); |
| 425 | 425 | ||
| 426 | if (!dispc_mgr_go_busy(omap_crtc->channel)) { | 426 | if (!dispc_mgr_go_busy(omap_crtc->channel)) { |
| 427 | struct omap_drm_private *priv = | 427 | struct omap_drm_private *priv = |
| 428 | crtc->dev->dev_private; | 428 | crtc->dev->dev_private; |
| 429 | DBG("%s: apply done", omap_crtc->name); | 429 | DBG("%s: apply done", omap_crtc->name); |
| 430 | omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq); | 430 | __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq); |
| 431 | queue_work(priv->wq, &omap_crtc->apply_work); | 431 | queue_work(priv->wq, &omap_crtc->apply_work); |
| 432 | } | 432 | } |
| 433 | } | 433 | } |
| @@ -623,6 +623,11 @@ void omap_crtc_pre_init(void) | |||
| 623 | dss_install_mgr_ops(&mgr_ops); | 623 | dss_install_mgr_ops(&mgr_ops); |
| 624 | } | 624 | } |
| 625 | 625 | ||
| 626 | void omap_crtc_pre_uninit(void) | ||
| 627 | { | ||
| 628 | dss_uninstall_mgr_ops(); | ||
| 629 | } | ||
| 630 | |||
| 626 | /* initialize crtc */ | 631 | /* initialize crtc */ |
| 627 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, | 632 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, |
| 628 | struct drm_plane *plane, enum omap_channel channel, int id) | 633 | struct drm_plane *plane, enum omap_channel channel, int id) |
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index c27f59da7f29..d4c04d69fc4d 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c | |||
| @@ -48,7 +48,7 @@ static int mm_show(struct seq_file *m, void *arg) | |||
| 48 | { | 48 | { |
| 49 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 49 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 50 | struct drm_device *dev = node->minor->dev; | 50 | struct drm_device *dev = node->minor->dev; |
| 51 | return drm_mm_dump_table(m, dev->mm_private); | 51 | return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static int fb_show(struct seq_file *m, void *arg) | 54 | static int fb_show(struct seq_file *m, void *arg) |
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 701c4c10e08b..f926b4caf449 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
| @@ -969,12 +969,21 @@ static const struct dev_pm_ops omap_dmm_pm_ops = { | |||
| 969 | }; | 969 | }; |
| 970 | #endif | 970 | #endif |
| 971 | 971 | ||
| 972 | #if defined(CONFIG_OF) | ||
| 973 | static const struct of_device_id dmm_of_match[] = { | ||
| 974 | { .compatible = "ti,omap4-dmm", }, | ||
| 975 | { .compatible = "ti,omap5-dmm", }, | ||
| 976 | {}, | ||
| 977 | }; | ||
| 978 | #endif | ||
| 979 | |||
| 972 | struct platform_driver omap_dmm_driver = { | 980 | struct platform_driver omap_dmm_driver = { |
| 973 | .probe = omap_dmm_probe, | 981 | .probe = omap_dmm_probe, |
| 974 | .remove = omap_dmm_remove, | 982 | .remove = omap_dmm_remove, |
| 975 | .driver = { | 983 | .driver = { |
| 976 | .owner = THIS_MODULE, | 984 | .owner = THIS_MODULE, |
| 977 | .name = DMM_DRIVER_NAME, | 985 | .name = DMM_DRIVER_NAME, |
| 986 | .of_match_table = of_match_ptr(dmm_of_match), | ||
| 978 | #ifdef CONFIG_PM | 987 | #ifdef CONFIG_PM |
| 979 | .pm = &omap_dmm_pm_ops, | 988 | .pm = &omap_dmm_pm_ops, |
| 980 | #endif | 989 | #endif |
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index e7fa3cd96743..bf39fcc49e0f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c | |||
| @@ -86,6 +86,47 @@ static bool channel_used(struct drm_device *dev, enum omap_channel channel) | |||
| 86 | 86 | ||
| 87 | return false; | 87 | return false; |
| 88 | } | 88 | } |
| 89 | static void omap_disconnect_dssdevs(void) | ||
| 90 | { | ||
| 91 | struct omap_dss_device *dssdev = NULL; | ||
| 92 | |||
| 93 | for_each_dss_dev(dssdev) | ||
| 94 | dssdev->driver->disconnect(dssdev); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int omap_connect_dssdevs(void) | ||
| 98 | { | ||
| 99 | int r; | ||
| 100 | struct omap_dss_device *dssdev = NULL; | ||
| 101 | bool no_displays = true; | ||
| 102 | |||
| 103 | for_each_dss_dev(dssdev) { | ||
| 104 | r = dssdev->driver->connect(dssdev); | ||
| 105 | if (r == -EPROBE_DEFER) { | ||
| 106 | omap_dss_put_device(dssdev); | ||
| 107 | goto cleanup; | ||
| 108 | } else if (r) { | ||
| 109 | dev_warn(dssdev->dev, "could not connect display: %s\n", | ||
| 110 | dssdev->name); | ||
| 111 | } else { | ||
| 112 | no_displays = false; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | if (no_displays) | ||
| 117 | return -EPROBE_DEFER; | ||
| 118 | |||
| 119 | return 0; | ||
| 120 | |||
| 121 | cleanup: | ||
| 122 | /* | ||
| 123 | * if we are deferring probe, we disconnect the devices we previously | ||
| 124 | * connected | ||
| 125 | */ | ||
| 126 | omap_disconnect_dssdevs(); | ||
| 127 | |||
| 128 | return r; | ||
| 129 | } | ||
| 89 | 130 | ||
| 90 | static int omap_modeset_init(struct drm_device *dev) | 131 | static int omap_modeset_init(struct drm_device *dev) |
| 91 | { | 132 | { |
| @@ -95,9 +136,6 @@ static int omap_modeset_init(struct drm_device *dev) | |||
| 95 | int num_mgrs = dss_feat_get_num_mgrs(); | 136 | int num_mgrs = dss_feat_get_num_mgrs(); |
| 96 | int num_crtcs; | 137 | int num_crtcs; |
| 97 | int i, id = 0; | 138 | int i, id = 0; |
| 98 | int r; | ||
| 99 | |||
| 100 | omap_crtc_pre_init(); | ||
| 101 | 139 | ||
| 102 | drm_mode_config_init(dev); | 140 | drm_mode_config_init(dev); |
| 103 | 141 | ||
| @@ -119,26 +157,8 @@ static int omap_modeset_init(struct drm_device *dev) | |||
| 119 | enum omap_channel channel; | 157 | enum omap_channel channel; |
| 120 | struct omap_overlay_manager *mgr; | 158 | struct omap_overlay_manager *mgr; |
| 121 | 159 | ||
| 122 | if (!dssdev->driver) { | 160 | if (!omapdss_device_is_connected(dssdev)) |
| 123 | dev_warn(dev->dev, "%s has no driver.. skipping it\n", | ||
| 124 | dssdev->name); | ||
| 125 | continue; | ||
| 126 | } | ||
| 127 | |||
| 128 | if (!(dssdev->driver->get_timings || | ||
| 129 | dssdev->driver->read_edid)) { | ||
| 130 | dev_warn(dev->dev, "%s driver does not support " | ||
| 131 | "get_timings or read_edid.. skipping it!\n", | ||
| 132 | dssdev->name); | ||
| 133 | continue; | ||
| 134 | } | ||
| 135 | |||
| 136 | r = dssdev->driver->connect(dssdev); | ||
| 137 | if (r) { | ||
| 138 | dev_err(dev->dev, "could not connect display: %s\n", | ||
| 139 | dssdev->name); | ||
| 140 | continue; | 161 | continue; |
| 141 | } | ||
| 142 | 162 | ||
| 143 | encoder = omap_encoder_init(dev, dssdev); | 163 | encoder = omap_encoder_init(dev, dssdev); |
| 144 | 164 | ||
| @@ -497,16 +517,16 @@ static int dev_unload(struct drm_device *dev) | |||
| 497 | DBG("unload: dev=%p", dev); | 517 | DBG("unload: dev=%p", dev); |
| 498 | 518 | ||
| 499 | drm_kms_helper_poll_fini(dev); | 519 | drm_kms_helper_poll_fini(dev); |
| 500 | drm_vblank_cleanup(dev); | ||
| 501 | omap_drm_irq_uninstall(dev); | ||
| 502 | 520 | ||
| 503 | omap_fbdev_free(dev); | 521 | omap_fbdev_free(dev); |
| 504 | omap_modeset_free(dev); | 522 | omap_modeset_free(dev); |
| 505 | omap_gem_deinit(dev); | 523 | omap_gem_deinit(dev); |
| 506 | 524 | ||
| 507 | flush_workqueue(priv->wq); | ||
| 508 | destroy_workqueue(priv->wq); | 525 | destroy_workqueue(priv->wq); |
| 509 | 526 | ||
| 527 | drm_vblank_cleanup(dev); | ||
| 528 | omap_drm_irq_uninstall(dev); | ||
| 529 | |||
| 510 | kfree(dev->dev_private); | 530 | kfree(dev->dev_private); |
| 511 | dev->dev_private = NULL; | 531 | dev->dev_private = NULL; |
| 512 | 532 | ||
| @@ -655,9 +675,19 @@ static void pdev_shutdown(struct platform_device *device) | |||
| 655 | 675 | ||
| 656 | static int pdev_probe(struct platform_device *device) | 676 | static int pdev_probe(struct platform_device *device) |
| 657 | { | 677 | { |
| 678 | int r; | ||
| 679 | |||
| 658 | if (omapdss_is_initialized() == false) | 680 | if (omapdss_is_initialized() == false) |
| 659 | return -EPROBE_DEFER; | 681 | return -EPROBE_DEFER; |
| 660 | 682 | ||
| 683 | omap_crtc_pre_init(); | ||
| 684 | |||
| 685 | r = omap_connect_dssdevs(); | ||
| 686 | if (r) { | ||
| 687 | omap_crtc_pre_uninit(); | ||
| 688 | return r; | ||
| 689 | } | ||
| 690 | |||
| 661 | DBG("%s", device->name); | 691 | DBG("%s", device->name); |
| 662 | return drm_platform_init(&omap_drm_driver, device); | 692 | return drm_platform_init(&omap_drm_driver, device); |
| 663 | } | 693 | } |
| @@ -665,9 +695,11 @@ static int pdev_probe(struct platform_device *device) | |||
| 665 | static int pdev_remove(struct platform_device *device) | 695 | static int pdev_remove(struct platform_device *device) |
| 666 | { | 696 | { |
| 667 | DBG(""); | 697 | DBG(""); |
| 668 | drm_platform_exit(&omap_drm_driver, device); | ||
| 669 | 698 | ||
| 670 | platform_driver_unregister(&omap_dmm_driver); | 699 | omap_disconnect_dssdevs(); |
| 700 | omap_crtc_pre_uninit(); | ||
| 701 | |||
| 702 | drm_put_dev(platform_get_drvdata(device)); | ||
| 671 | return 0; | 703 | return 0; |
| 672 | } | 704 | } |
| 673 | 705 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 07847693cf49..428b2981fd68 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h | |||
| @@ -141,10 +141,12 @@ int omap_gem_resume(struct device *dev); | |||
| 141 | 141 | ||
| 142 | int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id); | 142 | int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id); |
| 143 | void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id); | 143 | void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id); |
| 144 | irqreturn_t omap_irq_handler(DRM_IRQ_ARGS); | 144 | irqreturn_t omap_irq_handler(int irq, void *arg); |
| 145 | void omap_irq_preinstall(struct drm_device *dev); | 145 | void omap_irq_preinstall(struct drm_device *dev); |
| 146 | int omap_irq_postinstall(struct drm_device *dev); | 146 | int omap_irq_postinstall(struct drm_device *dev); |
| 147 | void omap_irq_uninstall(struct drm_device *dev); | 147 | void omap_irq_uninstall(struct drm_device *dev); |
| 148 | void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); | ||
| 149 | void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); | ||
| 148 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); | 150 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); |
| 149 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); | 151 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); |
| 150 | int omap_drm_irq_uninstall(struct drm_device *dev); | 152 | int omap_drm_irq_uninstall(struct drm_device *dev); |
| @@ -158,6 +160,7 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); | |||
| 158 | int omap_crtc_apply(struct drm_crtc *crtc, | 160 | int omap_crtc_apply(struct drm_crtc *crtc, |
| 159 | struct omap_drm_apply *apply); | 161 | struct omap_drm_apply *apply); |
| 160 | void omap_crtc_pre_init(void); | 162 | void omap_crtc_pre_init(void); |
| 163 | void omap_crtc_pre_uninit(void); | ||
| 161 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, | 164 | struct drm_crtc *omap_crtc_init(struct drm_device *dev, |
| 162 | struct drm_plane *plane, enum omap_channel channel, int id); | 165 | struct drm_plane *plane, enum omap_channel channel, int id); |
| 163 | 166 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 6a12e899235b..5290a88c681d 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c | |||
| @@ -51,6 +51,9 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder) | |||
| 51 | static void omap_encoder_destroy(struct drm_encoder *encoder) | 51 | static void omap_encoder_destroy(struct drm_encoder *encoder) |
| 52 | { | 52 | { |
| 53 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); | 53 | struct omap_encoder *omap_encoder = to_omap_encoder(encoder); |
| 54 | |||
| 55 | omap_encoder_set_enabled(encoder, false); | ||
| 56 | |||
| 54 | drm_encoder_cleanup(encoder); | 57 | drm_encoder_cleanup(encoder); |
| 55 | kfree(omap_encoder); | 58 | kfree(omap_encoder); |
| 56 | } | 59 | } |
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index f2b8f0668c0c..f466c4aaee94 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c | |||
| @@ -123,12 +123,16 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 123 | { | 123 | { |
| 124 | int i; | 124 | int i; |
| 125 | 125 | ||
| 126 | drm_modeset_lock_all(fb->dev); | ||
| 127 | |||
| 126 | for (i = 0; i < num_clips; i++) { | 128 | for (i = 0; i < num_clips; i++) { |
| 127 | omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1, | 129 | omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1, |
| 128 | clips[i].x2 - clips[i].x1, | 130 | clips[i].x2 - clips[i].x1, |
| 129 | clips[i].y2 - clips[i].y1); | 131 | clips[i].y2 - clips[i].y1); |
| 130 | } | 132 | } |
| 131 | 133 | ||
| 134 | drm_modeset_unlock_all(fb->dev); | ||
| 135 | |||
| 132 | return 0; | 136 | return 0; |
| 133 | } | 137 | } |
| 134 | 138 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index cb858600185f..f035d2bceae7 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c | |||
| @@ -45,12 +45,11 @@ static void omap_irq_update(struct drm_device *dev) | |||
| 45 | dispc_read_irqenable(); /* flush posted write */ | 45 | dispc_read_irqenable(); /* flush posted write */ |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) | 48 | void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) |
| 49 | { | 49 | { |
| 50 | struct omap_drm_private *priv = dev->dev_private; | 50 | struct omap_drm_private *priv = dev->dev_private; |
| 51 | unsigned long flags; | 51 | unsigned long flags; |
| 52 | 52 | ||
| 53 | dispc_runtime_get(); | ||
| 54 | spin_lock_irqsave(&list_lock, flags); | 53 | spin_lock_irqsave(&list_lock, flags); |
| 55 | 54 | ||
| 56 | if (!WARN_ON(irq->registered)) { | 55 | if (!WARN_ON(irq->registered)) { |
| @@ -60,14 +59,21 @@ void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) | |||
| 60 | } | 59 | } |
| 61 | 60 | ||
| 62 | spin_unlock_irqrestore(&list_lock, flags); | 61 | spin_unlock_irqrestore(&list_lock, flags); |
| 62 | } | ||
| 63 | |||
| 64 | void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) | ||
| 65 | { | ||
| 66 | dispc_runtime_get(); | ||
| 67 | |||
| 68 | __omap_irq_register(dev, irq); | ||
| 69 | |||
| 63 | dispc_runtime_put(); | 70 | dispc_runtime_put(); |
| 64 | } | 71 | } |
| 65 | 72 | ||
| 66 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) | 73 | void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) |
| 67 | { | 74 | { |
| 68 | unsigned long flags; | 75 | unsigned long flags; |
| 69 | 76 | ||
| 70 | dispc_runtime_get(); | ||
| 71 | spin_lock_irqsave(&list_lock, flags); | 77 | spin_lock_irqsave(&list_lock, flags); |
| 72 | 78 | ||
| 73 | if (!WARN_ON(!irq->registered)) { | 79 | if (!WARN_ON(!irq->registered)) { |
| @@ -77,6 +83,14 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) | |||
| 77 | } | 83 | } |
| 78 | 84 | ||
| 79 | spin_unlock_irqrestore(&list_lock, flags); | 85 | spin_unlock_irqrestore(&list_lock, flags); |
| 86 | } | ||
| 87 | |||
| 88 | void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) | ||
| 89 | { | ||
| 90 | dispc_runtime_get(); | ||
| 91 | |||
| 92 | __omap_irq_unregister(dev, irq); | ||
| 93 | |||
| 80 | dispc_runtime_put(); | 94 | dispc_runtime_put(); |
| 81 | } | 95 | } |
| 82 | 96 | ||
| @@ -173,7 +187,7 @@ void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id) | |||
| 173 | dispc_runtime_put(); | 187 | dispc_runtime_put(); |
| 174 | } | 188 | } |
| 175 | 189 | ||
| 176 | irqreturn_t omap_irq_handler(DRM_IRQ_ARGS) | 190 | irqreturn_t omap_irq_handler(int irq, void *arg) |
| 177 | { | 191 | { |
| 178 | struct drm_device *dev = (struct drm_device *) arg; | 192 | struct drm_device *dev = (struct drm_device *) arg; |
| 179 | struct omap_drm_private *priv = dev->dev_private; | 193 | struct omap_drm_private *priv = dev->dev_private; |
| @@ -308,7 +322,7 @@ int omap_drm_irq_uninstall(struct drm_device *dev) | |||
| 308 | if (dev->num_crtcs) { | 322 | if (dev->num_crtcs) { |
| 309 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 323 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
| 310 | for (i = 0; i < dev->num_crtcs; i++) { | 324 | for (i = 0; i < dev->num_crtcs; i++) { |
| 311 | DRM_WAKEUP(&dev->vblank[i].queue); | 325 | wake_up(&dev->vblank[i].queue); |
| 312 | dev->vblank[i].enabled = false; | 326 | dev->vblank[i].enabled = false; |
| 313 | dev->vblank[i].last = | 327 | dev->vblank[i].last = |
| 314 | dev->driver->get_vblank_counter(dev, i); | 328 | dev->driver->get_vblank_counter(dev, i); |
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig new file mode 100644 index 000000000000..3e0f13d1bc84 --- /dev/null +++ b/drivers/gpu/drm/panel/Kconfig | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | config DRM_PANEL | ||
| 2 | bool | ||
| 3 | depends on DRM | ||
| 4 | help | ||
| 5 | Panel registration and lookup framework. | ||
| 6 | |||
| 7 | menu "Display Panels" | ||
| 8 | depends on DRM_PANEL | ||
| 9 | |||
| 10 | config DRM_PANEL_SIMPLE | ||
| 11 | tristate "support for simple panels" | ||
| 12 | depends on OF | ||
| 13 | help | ||
| 14 | DRM panel driver for dumb panels that need at most a regulator and | ||
| 15 | a GPIO to be powered up. Optionally a backlight can be attached so | ||
| 16 | that it can be automatically turned off when the panel goes into a | ||
| 17 | low power state. | ||
| 18 | |||
| 19 | endmenu | ||
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile new file mode 100644 index 000000000000..af9dfa235b94 --- /dev/null +++ b/drivers/gpu/drm/panel/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o | |||
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c new file mode 100644 index 000000000000..59d52ca2c67f --- /dev/null +++ b/drivers/gpu/drm/panel/panel-simple.c | |||
| @@ -0,0 +1,548 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the | ||
| 12 | * next paragraph) shall be included in all copies or substantial portions | ||
| 13 | * of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/backlight.h> | ||
| 25 | #include <linux/gpio.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/of_gpio.h> | ||
| 28 | #include <linux/of_platform.h> | ||
| 29 | #include <linux/platform_device.h> | ||
| 30 | #include <linux/regulator/consumer.h> | ||
| 31 | |||
| 32 | #include <drm/drmP.h> | ||
| 33 | #include <drm/drm_crtc.h> | ||
| 34 | #include <drm/drm_mipi_dsi.h> | ||
| 35 | #include <drm/drm_panel.h> | ||
| 36 | |||
| 37 | struct panel_desc { | ||
| 38 | const struct drm_display_mode *modes; | ||
| 39 | unsigned int num_modes; | ||
| 40 | |||
| 41 | struct { | ||
| 42 | unsigned int width; | ||
| 43 | unsigned int height; | ||
| 44 | } size; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /* TODO: convert to gpiod_*() API once it's been merged */ | ||
| 48 | #define GPIO_ACTIVE_LOW (1 << 0) | ||
| 49 | |||
| 50 | struct panel_simple { | ||
| 51 | struct drm_panel base; | ||
| 52 | bool enabled; | ||
| 53 | |||
| 54 | const struct panel_desc *desc; | ||
| 55 | |||
| 56 | struct backlight_device *backlight; | ||
| 57 | struct regulator *supply; | ||
| 58 | struct i2c_adapter *ddc; | ||
| 59 | |||
| 60 | unsigned long enable_gpio_flags; | ||
| 61 | int enable_gpio; | ||
| 62 | }; | ||
| 63 | |||
| 64 | static inline struct panel_simple *to_panel_simple(struct drm_panel *panel) | ||
| 65 | { | ||
| 66 | return container_of(panel, struct panel_simple, base); | ||
| 67 | } | ||
| 68 | |||
| 69 | static int panel_simple_get_fixed_modes(struct panel_simple *panel) | ||
| 70 | { | ||
| 71 | struct drm_connector *connector = panel->base.connector; | ||
| 72 | struct drm_device *drm = panel->base.drm; | ||
| 73 | struct drm_display_mode *mode; | ||
| 74 | unsigned int i, num = 0; | ||
| 75 | |||
| 76 | if (!panel->desc) | ||
| 77 | return 0; | ||
| 78 | |||
| 79 | for (i = 0; i < panel->desc->num_modes; i++) { | ||
| 80 | const struct drm_display_mode *m = &panel->desc->modes[i]; | ||
| 81 | |||
| 82 | mode = drm_mode_duplicate(drm, m); | ||
| 83 | if (!mode) { | ||
| 84 | dev_err(drm->dev, "failed to add mode %ux%u@%u\n", | ||
| 85 | m->hdisplay, m->vdisplay, m->vrefresh); | ||
| 86 | continue; | ||
| 87 | } | ||
| 88 | |||
| 89 | drm_mode_set_name(mode); | ||
| 90 | |||
| 91 | drm_mode_probed_add(connector, mode); | ||
| 92 | num++; | ||
| 93 | } | ||
| 94 | |||
| 95 | connector->display_info.width_mm = panel->desc->size.width; | ||
| 96 | connector->display_info.height_mm = panel->desc->size.height; | ||
| 97 | |||
| 98 | return num; | ||
| 99 | } | ||
| 100 | |||
| 101 | static int panel_simple_disable(struct drm_panel *panel) | ||
| 102 | { | ||
| 103 | struct panel_simple *p = to_panel_simple(panel); | ||
| 104 | |||
| 105 | if (!p->enabled) | ||
| 106 | return 0; | ||
| 107 | |||
| 108 | if (p->backlight) { | ||
| 109 | p->backlight->props.power = FB_BLANK_POWERDOWN; | ||
| 110 | backlight_update_status(p->backlight); | ||
| 111 | } | ||
| 112 | |||
| 113 | if (gpio_is_valid(p->enable_gpio)) { | ||
| 114 | if (p->enable_gpio_flags & GPIO_ACTIVE_LOW) | ||
| 115 | gpio_set_value(p->enable_gpio, 1); | ||
| 116 | else | ||
| 117 | gpio_set_value(p->enable_gpio, 0); | ||
| 118 | } | ||
| 119 | |||
| 120 | regulator_disable(p->supply); | ||
| 121 | p->enabled = false; | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int panel_simple_enable(struct drm_panel *panel) | ||
| 127 | { | ||
| 128 | struct panel_simple *p = to_panel_simple(panel); | ||
| 129 | int err; | ||
| 130 | |||
| 131 | if (p->enabled) | ||
| 132 | return 0; | ||
| 133 | |||
| 134 | err = regulator_enable(p->supply); | ||
| 135 | if (err < 0) { | ||
| 136 | dev_err(panel->dev, "failed to enable supply: %d\n", err); | ||
| 137 | return err; | ||
| 138 | } | ||
| 139 | |||
| 140 | if (gpio_is_valid(p->enable_gpio)) { | ||
| 141 | if (p->enable_gpio_flags & GPIO_ACTIVE_LOW) | ||
| 142 | gpio_set_value(p->enable_gpio, 0); | ||
| 143 | else | ||
| 144 | gpio_set_value(p->enable_gpio, 1); | ||
| 145 | } | ||
| 146 | |||
| 147 | if (p->backlight) { | ||
| 148 | p->backlight->props.power = FB_BLANK_UNBLANK; | ||
| 149 | backlight_update_status(p->backlight); | ||
| 150 | } | ||
| 151 | |||
| 152 | p->enabled = true; | ||
| 153 | |||
| 154 | return 0; | ||
| 155 | } | ||
| 156 | |||
| 157 | static int panel_simple_get_modes(struct drm_panel *panel) | ||
| 158 | { | ||
| 159 | struct panel_simple *p = to_panel_simple(panel); | ||
| 160 | int num = 0; | ||
| 161 | |||
| 162 | /* probe EDID if a DDC bus is available */ | ||
| 163 | if (p->ddc) { | ||
| 164 | struct edid *edid = drm_get_edid(panel->connector, p->ddc); | ||
| 165 | drm_mode_connector_update_edid_property(panel->connector, edid); | ||
| 166 | if (edid) { | ||
| 167 | num += drm_add_edid_modes(panel->connector, edid); | ||
| 168 | kfree(edid); | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | /* add hard-coded panel modes */ | ||
| 173 | num += panel_simple_get_fixed_modes(p); | ||
| 174 | |||
| 175 | return num; | ||
| 176 | } | ||
| 177 | |||
| 178 | static const struct drm_panel_funcs panel_simple_funcs = { | ||
| 179 | .disable = panel_simple_disable, | ||
| 180 | .enable = panel_simple_enable, | ||
| 181 | .get_modes = panel_simple_get_modes, | ||
| 182 | }; | ||
| 183 | |||
| 184 | static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) | ||
| 185 | { | ||
| 186 | struct device_node *backlight, *ddc; | ||
| 187 | struct panel_simple *panel; | ||
| 188 | enum of_gpio_flags flags; | ||
| 189 | int err; | ||
| 190 | |||
| 191 | panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); | ||
| 192 | if (!panel) | ||
| 193 | return -ENOMEM; | ||
| 194 | |||
| 195 | panel->enabled = false; | ||
| 196 | panel->desc = desc; | ||
| 197 | |||
| 198 | panel->supply = devm_regulator_get(dev, "power"); | ||
| 199 | if (IS_ERR(panel->supply)) | ||
| 200 | return PTR_ERR(panel->supply); | ||
| 201 | |||
| 202 | panel->enable_gpio = of_get_named_gpio_flags(dev->of_node, | ||
| 203 | "enable-gpios", 0, | ||
| 204 | &flags); | ||
| 205 | if (gpio_is_valid(panel->enable_gpio)) { | ||
| 206 | unsigned int value; | ||
| 207 | |||
| 208 | if (flags & OF_GPIO_ACTIVE_LOW) | ||
| 209 | panel->enable_gpio_flags |= GPIO_ACTIVE_LOW; | ||
| 210 | |||
| 211 | err = gpio_request(panel->enable_gpio, "enable"); | ||
| 212 | if (err < 0) { | ||
| 213 | dev_err(dev, "failed to request GPIO#%u: %d\n", | ||
| 214 | panel->enable_gpio, err); | ||
| 215 | return err; | ||
| 216 | } | ||
| 217 | |||
| 218 | value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0; | ||
| 219 | |||
| 220 | err = gpio_direction_output(panel->enable_gpio, value); | ||
| 221 | if (err < 0) { | ||
| 222 | dev_err(dev, "failed to setup GPIO%u: %d\n", | ||
| 223 | panel->enable_gpio, err); | ||
| 224 | goto free_gpio; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 228 | backlight = of_parse_phandle(dev->of_node, "backlight", 0); | ||
| 229 | if (backlight) { | ||
| 230 | panel->backlight = of_find_backlight_by_node(backlight); | ||
| 231 | of_node_put(backlight); | ||
| 232 | |||
| 233 | if (!panel->backlight) { | ||
| 234 | err = -EPROBE_DEFER; | ||
| 235 | goto free_gpio; | ||
| 236 | } | ||
| 237 | } | ||
| 238 | |||
| 239 | ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0); | ||
| 240 | if (ddc) { | ||
| 241 | panel->ddc = of_find_i2c_adapter_by_node(ddc); | ||
| 242 | of_node_put(ddc); | ||
| 243 | |||
| 244 | if (!panel->ddc) { | ||
| 245 | err = -EPROBE_DEFER; | ||
| 246 | goto free_backlight; | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 250 | drm_panel_init(&panel->base); | ||
| 251 | panel->base.dev = dev; | ||
| 252 | panel->base.funcs = &panel_simple_funcs; | ||
| 253 | |||
| 254 | err = drm_panel_add(&panel->base); | ||
| 255 | if (err < 0) | ||
| 256 | goto free_ddc; | ||
| 257 | |||
| 258 | dev_set_drvdata(dev, panel); | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | |||
| 262 | free_ddc: | ||
| 263 | if (panel->ddc) | ||
| 264 | put_device(&panel->ddc->dev); | ||
| 265 | free_backlight: | ||
| 266 | if (panel->backlight) | ||
| 267 | put_device(&panel->backlight->dev); | ||
| 268 | free_gpio: | ||
| 269 | if (gpio_is_valid(panel->enable_gpio)) | ||
| 270 | gpio_free(panel->enable_gpio); | ||
| 271 | |||
| 272 | return err; | ||
| 273 | } | ||
| 274 | |||
| 275 | static int panel_simple_remove(struct device *dev) | ||
| 276 | { | ||
| 277 | struct panel_simple *panel = dev_get_drvdata(dev); | ||
| 278 | |||
| 279 | drm_panel_detach(&panel->base); | ||
| 280 | drm_panel_remove(&panel->base); | ||
| 281 | |||
| 282 | panel_simple_disable(&panel->base); | ||
| 283 | |||
| 284 | if (panel->ddc) | ||
| 285 | put_device(&panel->ddc->dev); | ||
| 286 | |||
| 287 | if (panel->backlight) | ||
| 288 | put_device(&panel->backlight->dev); | ||
| 289 | |||
| 290 | if (gpio_is_valid(panel->enable_gpio)) | ||
| 291 | gpio_free(panel->enable_gpio); | ||
| 292 | |||
| 293 | regulator_disable(panel->supply); | ||
| 294 | |||
| 295 | return 0; | ||
| 296 | } | ||
| 297 | |||
| 298 | static const struct drm_display_mode auo_b101aw03_mode = { | ||
| 299 | .clock = 51450, | ||
| 300 | .hdisplay = 1024, | ||
| 301 | .hsync_start = 1024 + 156, | ||
| 302 | .hsync_end = 1024 + 156 + 8, | ||
| 303 | .htotal = 1024 + 156 + 8 + 156, | ||
| 304 | .vdisplay = 600, | ||
| 305 | .vsync_start = 600 + 16, | ||
| 306 | .vsync_end = 600 + 16 + 6, | ||
| 307 | .vtotal = 600 + 16 + 6 + 16, | ||
| 308 | .vrefresh = 60, | ||
| 309 | }; | ||
| 310 | |||
| 311 | static const struct panel_desc auo_b101aw03 = { | ||
| 312 | .modes = &auo_b101aw03_mode, | ||
| 313 | .num_modes = 1, | ||
| 314 | .size = { | ||
| 315 | .width = 223, | ||
| 316 | .height = 125, | ||
| 317 | }, | ||
| 318 | }; | ||
| 319 | |||
| 320 | static const struct drm_display_mode chunghwa_claa101wa01a_mode = { | ||
| 321 | .clock = 72070, | ||
| 322 | .hdisplay = 1366, | ||
| 323 | .hsync_start = 1366 + 58, | ||
| 324 | .hsync_end = 1366 + 58 + 58, | ||
| 325 | .htotal = 1366 + 58 + 58 + 58, | ||
| 326 | .vdisplay = 768, | ||
| 327 | .vsync_start = 768 + 4, | ||
| 328 | .vsync_end = 768 + 4 + 4, | ||
| 329 | .vtotal = 768 + 4 + 4 + 4, | ||
| 330 | .vrefresh = 60, | ||
| 331 | }; | ||
| 332 | |||
| 333 | static const struct panel_desc chunghwa_claa101wa01a = { | ||
| 334 | .modes = &chunghwa_claa101wa01a_mode, | ||
| 335 | .num_modes = 1, | ||
| 336 | .size = { | ||
| 337 | .width = 220, | ||
| 338 | .height = 120, | ||
| 339 | }, | ||
| 340 | }; | ||
| 341 | |||
| 342 | static const struct drm_display_mode chunghwa_claa101wb01_mode = { | ||
| 343 | .clock = 69300, | ||
| 344 | .hdisplay = 1366, | ||
| 345 | .hsync_start = 1366 + 48, | ||
| 346 | .hsync_end = 1366 + 48 + 32, | ||
| 347 | .htotal = 1366 + 48 + 32 + 20, | ||
| 348 | .vdisplay = 768, | ||
| 349 | .vsync_start = 768 + 16, | ||
| 350 | .vsync_end = 768 + 16 + 8, | ||
| 351 | .vtotal = 768 + 16 + 8 + 16, | ||
| 352 | .vrefresh = 60, | ||
| 353 | }; | ||
| 354 | |||
| 355 | static const struct panel_desc chunghwa_claa101wb01 = { | ||
| 356 | .modes = &chunghwa_claa101wb01_mode, | ||
| 357 | .num_modes = 1, | ||
| 358 | .size = { | ||
| 359 | .width = 223, | ||
| 360 | .height = 125, | ||
| 361 | }, | ||
| 362 | }; | ||
| 363 | |||
| 364 | static const struct drm_display_mode samsung_ltn101nt05_mode = { | ||
| 365 | .clock = 54030, | ||
| 366 | .hdisplay = 1024, | ||
| 367 | .hsync_start = 1024 + 24, | ||
| 368 | .hsync_end = 1024 + 24 + 136, | ||
| 369 | .htotal = 1024 + 24 + 136 + 160, | ||
| 370 | .vdisplay = 600, | ||
| 371 | .vsync_start = 600 + 3, | ||
| 372 | .vsync_end = 600 + 3 + 6, | ||
| 373 | .vtotal = 600 + 3 + 6 + 61, | ||
| 374 | .vrefresh = 60, | ||
| 375 | }; | ||
| 376 | |||
| 377 | static const struct panel_desc samsung_ltn101nt05 = { | ||
| 378 | .modes = &samsung_ltn101nt05_mode, | ||
| 379 | .num_modes = 1, | ||
| 380 | .size = { | ||
| 381 | .width = 1024, | ||
| 382 | .height = 600, | ||
| 383 | }, | ||
| 384 | }; | ||
| 385 | |||
| 386 | static const struct of_device_id platform_of_match[] = { | ||
| 387 | { | ||
| 388 | .compatible = "auo,b101aw03", | ||
| 389 | .data = &auo_b101aw03, | ||
| 390 | }, { | ||
| 391 | .compatible = "chunghwa,claa101wa01a", | ||
| 392 | .data = &chunghwa_claa101wa01a | ||
| 393 | }, { | ||
| 394 | .compatible = "chunghwa,claa101wb01", | ||
| 395 | .data = &chunghwa_claa101wb01 | ||
| 396 | }, { | ||
| 397 | .compatible = "samsung,ltn101nt05", | ||
| 398 | .data = &samsung_ltn101nt05, | ||
| 399 | }, { | ||
| 400 | .compatible = "simple-panel", | ||
| 401 | }, { | ||
| 402 | /* sentinel */ | ||
| 403 | } | ||
| 404 | }; | ||
| 405 | MODULE_DEVICE_TABLE(of, platform_of_match); | ||
| 406 | |||
| 407 | static int panel_simple_platform_probe(struct platform_device *pdev) | ||
| 408 | { | ||
| 409 | const struct of_device_id *id; | ||
| 410 | |||
| 411 | id = of_match_node(platform_of_match, pdev->dev.of_node); | ||
| 412 | if (!id) | ||
| 413 | return -ENODEV; | ||
| 414 | |||
| 415 | return panel_simple_probe(&pdev->dev, id->data); | ||
| 416 | } | ||
| 417 | |||
| 418 | static int panel_simple_platform_remove(struct platform_device *pdev) | ||
| 419 | { | ||
| 420 | return panel_simple_remove(&pdev->dev); | ||
| 421 | } | ||
| 422 | |||
| 423 | static struct platform_driver panel_simple_platform_driver = { | ||
| 424 | .driver = { | ||
| 425 | .name = "panel-simple", | ||
| 426 | .owner = THIS_MODULE, | ||
| 427 | .of_match_table = platform_of_match, | ||
| 428 | }, | ||
| 429 | .probe = panel_simple_platform_probe, | ||
| 430 | .remove = panel_simple_platform_remove, | ||
| 431 | }; | ||
| 432 | |||
| 433 | struct panel_desc_dsi { | ||
| 434 | struct panel_desc desc; | ||
| 435 | |||
| 436 | enum mipi_dsi_pixel_format format; | ||
| 437 | unsigned int lanes; | ||
| 438 | }; | ||
| 439 | |||
| 440 | static const struct drm_display_mode panasonic_vvx10f004b00_mode = { | ||
| 441 | .clock = 157200, | ||
| 442 | .hdisplay = 1920, | ||
| 443 | .hsync_start = 1920 + 154, | ||
| 444 | .hsync_end = 1920 + 154 + 16, | ||
| 445 | .htotal = 1920 + 154 + 16 + 32, | ||
| 446 | .vdisplay = 1200, | ||
| 447 | .vsync_start = 1200 + 17, | ||
| 448 | .vsync_end = 1200 + 17 + 2, | ||
| 449 | .vtotal = 1200 + 17 + 2 + 16, | ||
| 450 | .vrefresh = 60, | ||
| 451 | }; | ||
| 452 | |||
| 453 | static const struct panel_desc_dsi panasonic_vvx10f004b00 = { | ||
| 454 | .desc = { | ||
| 455 | .modes = &panasonic_vvx10f004b00_mode, | ||
| 456 | .num_modes = 1, | ||
| 457 | .size = { | ||
| 458 | .width = 217, | ||
| 459 | .height = 136, | ||
| 460 | }, | ||
| 461 | }, | ||
| 462 | .format = MIPI_DSI_FMT_RGB888, | ||
| 463 | .lanes = 4, | ||
| 464 | }; | ||
| 465 | |||
| 466 | static const struct of_device_id dsi_of_match[] = { | ||
| 467 | { | ||
| 468 | .compatible = "panasonic,vvx10f004b00", | ||
| 469 | .data = &panasonic_vvx10f004b00 | ||
| 470 | }, { | ||
| 471 | /* sentinel */ | ||
| 472 | } | ||
| 473 | }; | ||
| 474 | MODULE_DEVICE_TABLE(of, dsi_of_match); | ||
| 475 | |||
| 476 | static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) | ||
| 477 | { | ||
| 478 | const struct panel_desc_dsi *desc; | ||
| 479 | const struct of_device_id *id; | ||
| 480 | int err; | ||
| 481 | |||
| 482 | id = of_match_node(dsi_of_match, dsi->dev.of_node); | ||
| 483 | if (!id) | ||
| 484 | return -ENODEV; | ||
| 485 | |||
| 486 | desc = id->data; | ||
| 487 | |||
| 488 | err = panel_simple_probe(&dsi->dev, &desc->desc); | ||
| 489 | if (err < 0) | ||
| 490 | return err; | ||
| 491 | |||
| 492 | dsi->format = desc->format; | ||
| 493 | dsi->lanes = desc->lanes; | ||
| 494 | |||
| 495 | return mipi_dsi_attach(dsi); | ||
| 496 | } | ||
| 497 | |||
| 498 | static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi) | ||
| 499 | { | ||
| 500 | int err; | ||
| 501 | |||
| 502 | err = mipi_dsi_detach(dsi); | ||
| 503 | if (err < 0) | ||
| 504 | dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); | ||
| 505 | |||
| 506 | return panel_simple_remove(&dsi->dev); | ||
| 507 | } | ||
| 508 | |||
| 509 | static struct mipi_dsi_driver panel_simple_dsi_driver = { | ||
| 510 | .driver = { | ||
| 511 | .name = "panel-simple-dsi", | ||
| 512 | .owner = THIS_MODULE, | ||
| 513 | .of_match_table = dsi_of_match, | ||
| 514 | }, | ||
| 515 | .probe = panel_simple_dsi_probe, | ||
| 516 | .remove = panel_simple_dsi_remove, | ||
| 517 | }; | ||
| 518 | |||
| 519 | static int __init panel_simple_init(void) | ||
| 520 | { | ||
| 521 | int err; | ||
| 522 | |||
| 523 | err = platform_driver_register(&panel_simple_platform_driver); | ||
| 524 | if (err < 0) | ||
| 525 | return err; | ||
| 526 | |||
| 527 | if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { | ||
| 528 | err = mipi_dsi_driver_register(&panel_simple_dsi_driver); | ||
| 529 | if (err < 0) | ||
| 530 | return err; | ||
| 531 | } | ||
| 532 | |||
| 533 | return 0; | ||
| 534 | } | ||
| 535 | module_init(panel_simple_init); | ||
| 536 | |||
| 537 | static void __exit panel_simple_exit(void) | ||
| 538 | { | ||
| 539 | if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) | ||
| 540 | mipi_dsi_driver_unregister(&panel_simple_dsi_driver); | ||
| 541 | |||
| 542 | platform_driver_unregister(&panel_simple_platform_driver); | ||
| 543 | } | ||
| 544 | module_exit(panel_simple_exit); | ||
| 545 | |||
| 546 | MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); | ||
| 547 | MODULE_DESCRIPTION("DRM Driver for Simple Panels"); | ||
| 548 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index d70aafb83307..798bde2e5881 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -399,10 +399,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 399 | struct qxl_bo *qobj; | 399 | struct qxl_bo *qobj; |
| 400 | int inc = 1; | 400 | int inc = 1; |
| 401 | 401 | ||
| 402 | drm_modeset_lock_all(fb->dev); | ||
| 403 | |||
| 402 | qobj = gem_to_qxl_bo(qxl_fb->obj); | 404 | qobj = gem_to_qxl_bo(qxl_fb->obj); |
| 403 | /* if we aren't primary surface ignore this */ | 405 | /* if we aren't primary surface ignore this */ |
| 404 | if (!qobj->is_primary) | 406 | if (!qobj->is_primary) { |
| 407 | drm_modeset_unlock_all(fb->dev); | ||
| 405 | return 0; | 408 | return 0; |
| 409 | } | ||
| 406 | 410 | ||
| 407 | if (!num_clips) { | 411 | if (!num_clips) { |
| 408 | num_clips = 1; | 412 | num_clips = 1; |
| @@ -417,6 +421,9 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | |||
| 417 | 421 | ||
| 418 | qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, | 422 | qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, |
| 419 | clips, num_clips, inc); | 423 | clips, num_clips, inc); |
| 424 | |||
| 425 | drm_modeset_unlock_all(fb->dev); | ||
| 426 | |||
| 420 | return 0; | 427 | return 0; |
| 421 | } | 428 | } |
| 422 | 429 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 7bda32f68d3b..36ed40ba773f 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
| @@ -534,7 +534,7 @@ void qxl_debugfs_takedown(struct drm_minor *minor); | |||
| 534 | 534 | ||
| 535 | /* qxl_irq.c */ | 535 | /* qxl_irq.c */ |
| 536 | int qxl_irq_init(struct qxl_device *qdev); | 536 | int qxl_irq_init(struct qxl_device *qdev); |
| 537 | irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS); | 537 | irqreturn_t qxl_irq_handler(int irq, void *arg); |
| 538 | 538 | ||
| 539 | /* qxl_fb.c */ | 539 | /* qxl_fb.c */ |
| 540 | int qxl_fb_init(struct qxl_device *qdev); | 540 | int qxl_fb_init(struct qxl_device *qdev); |
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 7b95c75e9626..0bb86e6d41b4 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
| @@ -200,7 +200,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, | |||
| 200 | for (i = 0; i < cmd->relocs_num; ++i) { | 200 | for (i = 0; i < cmd->relocs_num; ++i) { |
| 201 | struct drm_qxl_reloc reloc; | 201 | struct drm_qxl_reloc reloc; |
| 202 | 202 | ||
| 203 | if (DRM_COPY_FROM_USER(&reloc, | 203 | if (copy_from_user(&reloc, |
| 204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], | 204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], |
| 205 | sizeof(reloc))) { | 205 | sizeof(reloc))) { |
| 206 | ret = -EFAULT; | 206 | ret = -EFAULT; |
| @@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | |||
| 297 | struct drm_qxl_command *commands = | 297 | struct drm_qxl_command *commands = |
| 298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | 298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; |
| 299 | 299 | ||
| 300 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | 300 | if (copy_from_user(&user_cmd, &commands[cmd_num], |
| 301 | sizeof(user_cmd))) | 301 | sizeof(user_cmd))) |
| 302 | return -EFAULT; | 302 | return -EFAULT; |
| 303 | 303 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 21393dc4700a..28f84b4fce32 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #include "qxl_drv.h" | 26 | #include "qxl_drv.h" |
| 27 | 27 | ||
| 28 | irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS) | 28 | irqreturn_t qxl_irq_handler(int irq, void *arg) |
| 29 | { | 29 | { |
| 30 | struct drm_device *dev = (struct drm_device *) arg; | 30 | struct drm_device *dev = (struct drm_device *) arg; |
| 31 | struct qxl_device *qdev = (struct qxl_device *)dev->dev_private; | 31 | struct qxl_device *qdev = (struct qxl_device *)dev->dev_private; |
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index e5ca498be920..fd88eb4a3f79 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c | |||
| @@ -115,7 +115,7 @@ static void qxl_gc_work(struct work_struct *work) | |||
| 115 | qxl_garbage_collect(qdev); | 115 | qxl_garbage_collect(qdev); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | int qxl_device_init(struct qxl_device *qdev, | 118 | static int qxl_device_init(struct qxl_device *qdev, |
| 119 | struct drm_device *ddev, | 119 | struct drm_device *ddev, |
| 120 | struct pci_dev *pdev, | 120 | struct pci_dev *pdev, |
| 121 | unsigned long flags) | 121 | unsigned long flags) |
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index c451257f08fb..59459fe4e8c5 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c | |||
| @@ -892,10 +892,10 @@ static int r128_cce_get_buffers(struct drm_device *dev, | |||
| 892 | 892 | ||
| 893 | buf->file_priv = file_priv; | 893 | buf->file_priv = file_priv; |
| 894 | 894 | ||
| 895 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, | 895 | if (copy_to_user(&d->request_indices[i], &buf->idx, |
| 896 | sizeof(buf->idx))) | 896 | sizeof(buf->idx))) |
| 897 | return -EFAULT; | 897 | return -EFAULT; |
| 898 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, | 898 | if (copy_to_user(&d->request_sizes[i], &buf->total, |
| 899 | sizeof(buf->total))) | 899 | sizeof(buf->total))) |
| 900 | return -EFAULT; | 900 | return -EFAULT; |
| 901 | 901 | ||
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 56eb5e3f5439..5bf3f5ff805d 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h | |||
| @@ -154,7 +154,7 @@ extern int r128_do_cleanup_cce(struct drm_device *dev); | |||
| 154 | extern int r128_enable_vblank(struct drm_device *dev, int crtc); | 154 | extern int r128_enable_vblank(struct drm_device *dev, int crtc); |
| 155 | extern void r128_disable_vblank(struct drm_device *dev, int crtc); | 155 | extern void r128_disable_vblank(struct drm_device *dev, int crtc); |
| 156 | extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); | 156 | extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); |
| 157 | extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); | 157 | extern irqreturn_t r128_driver_irq_handler(int irq, void *arg); |
| 158 | extern void r128_driver_irq_preinstall(struct drm_device *dev); | 158 | extern void r128_driver_irq_preinstall(struct drm_device *dev); |
| 159 | extern int r128_driver_irq_postinstall(struct drm_device *dev); | 159 | extern int r128_driver_irq_postinstall(struct drm_device *dev); |
| 160 | extern void r128_driver_irq_uninstall(struct drm_device *dev); | 160 | extern void r128_driver_irq_uninstall(struct drm_device *dev); |
| @@ -514,7 +514,7 @@ do { \ | |||
| 514 | if (R128_VERBOSE) \ | 514 | if (R128_VERBOSE) \ |
| 515 | DRM_INFO("COMMIT_RING() tail=0x%06x\n", \ | 515 | DRM_INFO("COMMIT_RING() tail=0x%06x\n", \ |
| 516 | dev_priv->ring.tail); \ | 516 | dev_priv->ring.tail); \ |
| 517 | DRM_MEMORYBARRIER(); \ | 517 | mb(); \ |
| 518 | R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \ | 518 | R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \ |
| 519 | R128_READ(R128_PM4_BUFFER_DL_WPTR); \ | 519 | R128_READ(R128_PM4_BUFFER_DL_WPTR); \ |
| 520 | } while (0) | 520 | } while (0) |
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c index a954c548201e..b0d0fd3e4376 100644 --- a/drivers/gpu/drm/r128/r128_ioc32.c +++ b/drivers/gpu/drm/r128/r128_ioc32.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
| 35 | #include <drm/r128_drm.h> | 35 | #include <drm/r128_drm.h> |
| 36 | #include "r128_drv.h" | ||
| 36 | 37 | ||
| 37 | typedef struct drm_r128_init32 { | 38 | typedef struct drm_r128_init32 { |
| 38 | int func; | 39 | int func; |
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c index 2ea4f09d2691..c2ae496babb7 100644 --- a/drivers/gpu/drm/r128/r128_irq.c +++ b/drivers/gpu/drm/r128/r128_irq.c | |||
| @@ -44,7 +44,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) | |||
| 44 | return atomic_read(&dev_priv->vbl_received); | 44 | return atomic_read(&dev_priv->vbl_received); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) | 47 | irqreturn_t r128_driver_irq_handler(int irq, void *arg) |
| 48 | { | 48 | { |
| 49 | struct drm_device *dev = (struct drm_device *) arg; | 49 | struct drm_device *dev = (struct drm_device *) arg; |
| 50 | drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; | 50 | drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; |
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 01dd9aef9f0e..e806dacd452f 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c | |||
| @@ -895,31 +895,22 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev, | |||
| 895 | if (count > 4096 || count <= 0) | 895 | if (count > 4096 || count <= 0) |
| 896 | return -EMSGSIZE; | 896 | return -EMSGSIZE; |
| 897 | 897 | ||
| 898 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) | 898 | if (copy_from_user(&x, depth->x, sizeof(x))) |
| 899 | return -EFAULT; | 899 | return -EFAULT; |
| 900 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) | 900 | if (copy_from_user(&y, depth->y, sizeof(y))) |
| 901 | return -EFAULT; | 901 | return -EFAULT; |
| 902 | 902 | ||
| 903 | buffer_size = depth->n * sizeof(u32); | 903 | buffer_size = depth->n * sizeof(u32); |
| 904 | buffer = kmalloc(buffer_size, GFP_KERNEL); | 904 | buffer = memdup_user(depth->buffer, buffer_size); |
| 905 | if (buffer == NULL) | 905 | if (IS_ERR(buffer)) |
| 906 | return -ENOMEM; | 906 | return PTR_ERR(buffer); |
| 907 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { | ||
| 908 | kfree(buffer); | ||
| 909 | return -EFAULT; | ||
| 910 | } | ||
| 911 | 907 | ||
| 912 | mask_size = depth->n * sizeof(u8); | 908 | mask_size = depth->n * sizeof(u8); |
| 913 | if (depth->mask) { | 909 | if (depth->mask) { |
| 914 | mask = kmalloc(mask_size, GFP_KERNEL); | 910 | mask = memdup_user(depth->mask, mask_size); |
| 915 | if (mask == NULL) { | 911 | if (IS_ERR(mask)) { |
| 916 | kfree(buffer); | 912 | kfree(buffer); |
| 917 | return -ENOMEM; | 913 | return PTR_ERR(mask); |
| 918 | } | ||
| 919 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { | ||
| 920 | kfree(buffer); | ||
| 921 | kfree(mask); | ||
| 922 | return -EFAULT; | ||
| 923 | } | 914 | } |
| 924 | 915 | ||
| 925 | for (i = 0; i < count; i++, x++) { | 916 | for (i = 0; i < count; i++, x++) { |
| @@ -999,46 +990,33 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev, | |||
| 999 | kfree(x); | 990 | kfree(x); |
| 1000 | return -ENOMEM; | 991 | return -ENOMEM; |
| 1001 | } | 992 | } |
| 1002 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { | 993 | if (copy_from_user(x, depth->x, xbuf_size)) { |
| 1003 | kfree(x); | 994 | kfree(x); |
| 1004 | kfree(y); | 995 | kfree(y); |
| 1005 | return -EFAULT; | 996 | return -EFAULT; |
| 1006 | } | 997 | } |
| 1007 | if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { | 998 | if (copy_from_user(y, depth->y, xbuf_size)) { |
| 1008 | kfree(x); | 999 | kfree(x); |
| 1009 | kfree(y); | 1000 | kfree(y); |
| 1010 | return -EFAULT; | 1001 | return -EFAULT; |
| 1011 | } | 1002 | } |
| 1012 | 1003 | ||
| 1013 | buffer_size = depth->n * sizeof(u32); | 1004 | buffer_size = depth->n * sizeof(u32); |
| 1014 | buffer = kmalloc(buffer_size, GFP_KERNEL); | 1005 | buffer = memdup_user(depth->buffer, buffer_size); |
| 1015 | if (buffer == NULL) { | 1006 | if (IS_ERR(buffer)) { |
| 1016 | kfree(x); | ||
| 1017 | kfree(y); | ||
| 1018 | return -ENOMEM; | ||
| 1019 | } | ||
| 1020 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { | ||
| 1021 | kfree(x); | 1007 | kfree(x); |
| 1022 | kfree(y); | 1008 | kfree(y); |
| 1023 | kfree(buffer); | 1009 | return PTR_ERR(buffer); |
| 1024 | return -EFAULT; | ||
| 1025 | } | 1010 | } |
| 1026 | 1011 | ||
| 1027 | if (depth->mask) { | 1012 | if (depth->mask) { |
| 1028 | mask_size = depth->n * sizeof(u8); | 1013 | mask_size = depth->n * sizeof(u8); |
| 1029 | mask = kmalloc(mask_size, GFP_KERNEL); | 1014 | mask = memdup_user(depth->mask, mask_size); |
| 1030 | if (mask == NULL) { | 1015 | if (IS_ERR(mask)) { |
| 1031 | kfree(x); | ||
| 1032 | kfree(y); | ||
| 1033 | kfree(buffer); | ||
| 1034 | return -ENOMEM; | ||
| 1035 | } | ||
| 1036 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { | ||
| 1037 | kfree(x); | 1016 | kfree(x); |
| 1038 | kfree(y); | 1017 | kfree(y); |
| 1039 | kfree(buffer); | 1018 | kfree(buffer); |
| 1040 | kfree(mask); | 1019 | return PTR_ERR(mask); |
| 1041 | return -EFAULT; | ||
| 1042 | } | 1020 | } |
| 1043 | 1021 | ||
| 1044 | for (i = 0; i < count; i++) { | 1022 | for (i = 0; i < count; i++) { |
| @@ -1107,9 +1085,9 @@ static int r128_cce_dispatch_read_span(struct drm_device *dev, | |||
| 1107 | if (count > 4096 || count <= 0) | 1085 | if (count > 4096 || count <= 0) |
| 1108 | return -EMSGSIZE; | 1086 | return -EMSGSIZE; |
| 1109 | 1087 | ||
| 1110 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) | 1088 | if (copy_from_user(&x, depth->x, sizeof(x))) |
| 1111 | return -EFAULT; | 1089 | return -EFAULT; |
| 1112 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) | 1090 | if (copy_from_user(&y, depth->y, sizeof(y))) |
| 1113 | return -EFAULT; | 1091 | return -EFAULT; |
| 1114 | 1092 | ||
| 1115 | BEGIN_RING(7); | 1093 | BEGIN_RING(7); |
| @@ -1162,12 +1140,12 @@ static int r128_cce_dispatch_read_pixels(struct drm_device *dev, | |||
| 1162 | kfree(x); | 1140 | kfree(x); |
| 1163 | return -ENOMEM; | 1141 | return -ENOMEM; |
| 1164 | } | 1142 | } |
| 1165 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { | 1143 | if (copy_from_user(x, depth->x, xbuf_size)) { |
| 1166 | kfree(x); | 1144 | kfree(x); |
| 1167 | kfree(y); | 1145 | kfree(y); |
| 1168 | return -EFAULT; | 1146 | return -EFAULT; |
| 1169 | } | 1147 | } |
| 1170 | if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { | 1148 | if (copy_from_user(y, depth->y, ybuf_size)) { |
| 1171 | kfree(x); | 1149 | kfree(x); |
| 1172 | kfree(y); | 1150 | kfree(y); |
| 1173 | return -EFAULT; | 1151 | return -EFAULT; |
| @@ -1524,7 +1502,7 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file | |||
| 1524 | 1502 | ||
| 1525 | DEV_INIT_TEST_WITH_RETURN(dev_priv); | 1503 | DEV_INIT_TEST_WITH_RETURN(dev_priv); |
| 1526 | 1504 | ||
| 1527 | if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) | 1505 | if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32))) |
| 1528 | return -EFAULT; | 1506 | return -EFAULT; |
| 1529 | 1507 | ||
| 1530 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1508 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
| @@ -1622,7 +1600,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi | |||
| 1622 | return -EINVAL; | 1600 | return -EINVAL; |
| 1623 | } | 1601 | } |
| 1624 | 1602 | ||
| 1625 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | 1603 | if (copy_to_user(param->value, &value, sizeof(int))) { |
| 1626 | DRM_ERROR("copy_to_user\n"); | 1604 | DRM_ERROR("copy_to_user\n"); |
| 1627 | return -EFAULT; | 1605 | return -EFAULT; |
| 1628 | } | 1606 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 0b9621c9aeea..a9338c85630f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) | |||
| 209 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 209 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static const u32 vga_control_regs[6] = | ||
| 213 | { | ||
| 214 | AVIVO_D1VGA_CONTROL, | ||
| 215 | AVIVO_D2VGA_CONTROL, | ||
| 216 | EVERGREEN_D3VGA_CONTROL, | ||
| 217 | EVERGREEN_D4VGA_CONTROL, | ||
| 218 | EVERGREEN_D5VGA_CONTROL, | ||
| 219 | EVERGREEN_D6VGA_CONTROL, | ||
| 220 | }; | ||
| 221 | |||
| 212 | static void atombios_blank_crtc(struct drm_crtc *crtc, int state) | 222 | static void atombios_blank_crtc(struct drm_crtc *crtc, int state) |
| 213 | { | 223 | { |
| 214 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 224 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| @@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state) | |||
| 216 | struct radeon_device *rdev = dev->dev_private; | 226 | struct radeon_device *rdev = dev->dev_private; |
| 217 | int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); | 227 | int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); |
| 218 | BLANK_CRTC_PS_ALLOCATION args; | 228 | BLANK_CRTC_PS_ALLOCATION args; |
| 229 | u32 vga_control = 0; | ||
| 219 | 230 | ||
| 220 | memset(&args, 0, sizeof(args)); | 231 | memset(&args, 0, sizeof(args)); |
| 221 | 232 | ||
| 233 | if (ASIC_IS_DCE8(rdev)) { | ||
| 234 | vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]); | ||
| 235 | WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1); | ||
| 236 | } | ||
| 237 | |||
| 222 | args.ucCRTC = radeon_crtc->crtc_id; | 238 | args.ucCRTC = radeon_crtc->crtc_id; |
| 223 | args.ucBlanking = state; | 239 | args.ucBlanking = state; |
| 224 | 240 | ||
| 225 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 241 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 242 | |||
| 243 | if (ASIC_IS_DCE8(rdev)) { | ||
| 244 | WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); | ||
| 245 | } | ||
| 226 | } | 246 | } |
| 227 | 247 | ||
| 228 | static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) | 248 | static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) |
| @@ -423,7 +443,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, | |||
| 423 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); | 443 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); |
| 424 | union atom_enable_ss args; | 444 | union atom_enable_ss args; |
| 425 | 445 | ||
| 426 | if (!enable) { | 446 | if (enable) { |
| 447 | /* Don't mess with SS if percentage is 0 or external ss. | ||
| 448 | * SS is already disabled previously, and disabling it | ||
| 449 | * again can cause display problems if the pll is already | ||
| 450 | * programmed. | ||
| 451 | */ | ||
| 452 | if (ss->percentage == 0) | ||
| 453 | return; | ||
| 454 | if (ss->type & ATOM_EXTERNAL_SS_MASK) | ||
| 455 | return; | ||
| 456 | } else { | ||
| 427 | for (i = 0; i < rdev->num_crtc; i++) { | 457 | for (i = 0; i < rdev->num_crtc; i++) { |
| 428 | if (rdev->mode_info.crtcs[i] && | 458 | if (rdev->mode_info.crtcs[i] && |
| 429 | rdev->mode_info.crtcs[i]->enabled && | 459 | rdev->mode_info.crtcs[i]->enabled && |
| @@ -459,8 +489,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, | |||
| 459 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); | 489 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
| 460 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); | 490 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
| 461 | args.v3.ucEnable = enable; | 491 | args.v3.ucEnable = enable; |
| 462 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev)) | ||
| 463 | args.v3.ucEnable = ATOM_DISABLE; | ||
| 464 | } else if (ASIC_IS_DCE4(rdev)) { | 492 | } else if (ASIC_IS_DCE4(rdev)) { |
| 465 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 493 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
| 466 | args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | 494 | args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; |
| @@ -480,8 +508,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, | |||
| 480 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); | 508 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
| 481 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); | 509 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
| 482 | args.v2.ucEnable = enable; | 510 | args.v2.ucEnable = enable; |
| 483 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) | ||
| 484 | args.v2.ucEnable = ATOM_DISABLE; | ||
| 485 | } else if (ASIC_IS_DCE3(rdev)) { | 511 | } else if (ASIC_IS_DCE3(rdev)) { |
| 486 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 512 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
| 487 | args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; | 513 | args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; |
| @@ -503,8 +529,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, | |||
| 503 | args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; | 529 | args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; |
| 504 | args.lvds_ss_2.ucEnable = enable; | 530 | args.lvds_ss_2.ucEnable = enable; |
| 505 | } else { | 531 | } else { |
| 506 | if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || | 532 | if (enable == ATOM_DISABLE) { |
| 507 | (ss->type & ATOM_EXTERNAL_SS_MASK)) { | ||
| 508 | atombios_disable_ss(rdev, pll_id); | 533 | atombios_disable_ss(rdev, pll_id); |
| 509 | return; | 534 | return; |
| 510 | } | 535 | } |
| @@ -938,11 +963,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ | |||
| 938 | radeon_atombios_get_ppll_ss_info(rdev, | 963 | radeon_atombios_get_ppll_ss_info(rdev, |
| 939 | &radeon_crtc->ss, | 964 | &radeon_crtc->ss, |
| 940 | ATOM_DP_SS_ID1); | 965 | ATOM_DP_SS_ID1); |
| 941 | } else | 966 | } else { |
| 942 | radeon_crtc->ss_enabled = | 967 | radeon_crtc->ss_enabled = |
| 943 | radeon_atombios_get_ppll_ss_info(rdev, | 968 | radeon_atombios_get_ppll_ss_info(rdev, |
| 944 | &radeon_crtc->ss, | 969 | &radeon_crtc->ss, |
| 945 | ATOM_DP_SS_ID1); | 970 | ATOM_DP_SS_ID1); |
| 971 | } | ||
| 972 | /* disable spread spectrum on DCE3 DP */ | ||
| 973 | radeon_crtc->ss_enabled = false; | ||
| 946 | } | 974 | } |
| 947 | break; | 975 | break; |
| 948 | case ATOM_ENCODER_MODE_LVDS: | 976 | case ATOM_ENCODER_MODE_LVDS: |
| @@ -1039,15 +1067,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
| 1039 | /* calculate ss amount and step size */ | 1067 | /* calculate ss amount and step size */ |
| 1040 | if (ASIC_IS_DCE4(rdev)) { | 1068 | if (ASIC_IS_DCE4(rdev)) { |
| 1041 | u32 step_size; | 1069 | u32 step_size; |
| 1042 | u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000; | 1070 | u32 amount = (((fb_div * 10) + frac_fb_div) * |
| 1071 | (u32)radeon_crtc->ss.percentage) / | ||
| 1072 | (100 * (u32)radeon_crtc->ss.percentage_divider); | ||
| 1043 | radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; | 1073 | radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; |
| 1044 | radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & | 1074 | radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & |
| 1045 | ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; | 1075 | ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; |
| 1046 | if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) | 1076 | if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) |
| 1047 | step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / | 1077 | step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / |
| 1048 | (125 * 25 * pll->reference_freq / 100); | 1078 | (125 * 25 * pll->reference_freq / 100); |
| 1049 | else | 1079 | else |
| 1050 | step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / | 1080 | step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / |
| 1051 | (125 * 25 * pll->reference_freq / 100); | 1081 | (125 * 25 * pll->reference_freq / 100); |
| 1052 | radeon_crtc->ss.step = step_size; | 1082 | radeon_crtc->ss.step = step_size; |
| 1053 | } | 1083 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fb3ae07a1469..4ad7643fce5f 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -157,21 +157,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
| 157 | 157 | ||
| 158 | msg[0] = address; | 158 | msg[0] = address; |
| 159 | msg[1] = address >> 8; | 159 | msg[1] = address >> 8; |
| 160 | msg[2] = AUX_NATIVE_WRITE << 4; | 160 | msg[2] = DP_AUX_NATIVE_WRITE << 4; |
| 161 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 161 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
| 162 | memcpy(&msg[4], send, send_bytes); | 162 | memcpy(&msg[4], send, send_bytes); |
| 163 | 163 | ||
| 164 | for (retry = 0; retry < 4; retry++) { | 164 | for (retry = 0; retry < 7; retry++) { |
| 165 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 165 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
| 166 | msg, msg_bytes, NULL, 0, delay, &ack); | 166 | msg, msg_bytes, NULL, 0, delay, &ack); |
| 167 | if (ret == -EBUSY) | 167 | if (ret == -EBUSY) |
| 168 | continue; | 168 | continue; |
| 169 | else if (ret < 0) | 169 | else if (ret < 0) |
| 170 | return ret; | 170 | return ret; |
| 171 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 171 | ack >>= 4; |
| 172 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
| 172 | return send_bytes; | 173 | return send_bytes; |
| 173 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 174 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 174 | udelay(400); | 175 | usleep_range(400, 500); |
| 175 | else | 176 | else |
| 176 | return -EIO; | 177 | return -EIO; |
| 177 | } | 178 | } |
| @@ -191,20 +192,21 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
| 191 | 192 | ||
| 192 | msg[0] = address; | 193 | msg[0] = address; |
| 193 | msg[1] = address >> 8; | 194 | msg[1] = address >> 8; |
| 194 | msg[2] = AUX_NATIVE_READ << 4; | 195 | msg[2] = DP_AUX_NATIVE_READ << 4; |
| 195 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 196 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
| 196 | 197 | ||
| 197 | for (retry = 0; retry < 4; retry++) { | 198 | for (retry = 0; retry < 7; retry++) { |
| 198 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 199 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
| 199 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 200 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
| 200 | if (ret == -EBUSY) | 201 | if (ret == -EBUSY) |
| 201 | continue; | 202 | continue; |
| 202 | else if (ret < 0) | 203 | else if (ret < 0) |
| 203 | return ret; | 204 | return ret; |
| 204 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 205 | ack >>= 4; |
| 206 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
| 205 | return ret; | 207 | return ret; |
| 206 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 208 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 207 | udelay(400); | 209 | usleep_range(400, 500); |
| 208 | else if (ret == 0) | 210 | else if (ret == 0) |
| 209 | return -EPROTO; | 211 | return -EPROTO; |
| 210 | else | 212 | else |
| @@ -246,12 +248,12 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 246 | 248 | ||
| 247 | /* Set up the command byte */ | 249 | /* Set up the command byte */ |
| 248 | if (mode & MODE_I2C_READ) | 250 | if (mode & MODE_I2C_READ) |
| 249 | msg[2] = AUX_I2C_READ << 4; | 251 | msg[2] = DP_AUX_I2C_READ << 4; |
| 250 | else | 252 | else |
| 251 | msg[2] = AUX_I2C_WRITE << 4; | 253 | msg[2] = DP_AUX_I2C_WRITE << 4; |
| 252 | 254 | ||
| 253 | if (!(mode & MODE_I2C_STOP)) | 255 | if (!(mode & MODE_I2C_STOP)) |
| 254 | msg[2] |= AUX_I2C_MOT << 4; | 256 | msg[2] |= DP_AUX_I2C_MOT << 4; |
| 255 | 257 | ||
| 256 | msg[0] = address; | 258 | msg[0] = address; |
| 257 | msg[1] = address >> 8; | 259 | msg[1] = address >> 8; |
| @@ -272,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 272 | break; | 274 | break; |
| 273 | } | 275 | } |
| 274 | 276 | ||
| 275 | for (retry = 0; retry < 4; retry++) { | 277 | for (retry = 0; retry < 7; retry++) { |
| 276 | ret = radeon_process_aux_ch(auxch, | 278 | ret = radeon_process_aux_ch(auxch, |
| 277 | msg, msg_bytes, reply, reply_bytes, 0, &ack); | 279 | msg, msg_bytes, reply, reply_bytes, 0, &ack); |
| 278 | if (ret == -EBUSY) | 280 | if (ret == -EBUSY) |
| @@ -282,35 +284,35 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 282 | return ret; | 284 | return ret; |
| 283 | } | 285 | } |
| 284 | 286 | ||
| 285 | switch (ack & AUX_NATIVE_REPLY_MASK) { | 287 | switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) { |
| 286 | case AUX_NATIVE_REPLY_ACK: | 288 | case DP_AUX_NATIVE_REPLY_ACK: |
| 287 | /* I2C-over-AUX Reply field is only valid | 289 | /* I2C-over-AUX Reply field is only valid |
| 288 | * when paired with AUX ACK. | 290 | * when paired with AUX ACK. |
| 289 | */ | 291 | */ |
| 290 | break; | 292 | break; |
| 291 | case AUX_NATIVE_REPLY_NACK: | 293 | case DP_AUX_NATIVE_REPLY_NACK: |
| 292 | DRM_DEBUG_KMS("aux_ch native nack\n"); | 294 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
| 293 | return -EREMOTEIO; | 295 | return -EREMOTEIO; |
| 294 | case AUX_NATIVE_REPLY_DEFER: | 296 | case DP_AUX_NATIVE_REPLY_DEFER: |
| 295 | DRM_DEBUG_KMS("aux_ch native defer\n"); | 297 | DRM_DEBUG_KMS("aux_ch native defer\n"); |
| 296 | udelay(400); | 298 | usleep_range(500, 600); |
| 297 | continue; | 299 | continue; |
| 298 | default: | 300 | default: |
| 299 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); | 301 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); |
| 300 | return -EREMOTEIO; | 302 | return -EREMOTEIO; |
| 301 | } | 303 | } |
| 302 | 304 | ||
| 303 | switch (ack & AUX_I2C_REPLY_MASK) { | 305 | switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) { |
| 304 | case AUX_I2C_REPLY_ACK: | 306 | case DP_AUX_I2C_REPLY_ACK: |
| 305 | if (mode == MODE_I2C_READ) | 307 | if (mode == MODE_I2C_READ) |
| 306 | *read_byte = reply[0]; | 308 | *read_byte = reply[0]; |
| 307 | return ret; | 309 | return ret; |
| 308 | case AUX_I2C_REPLY_NACK: | 310 | case DP_AUX_I2C_REPLY_NACK: |
| 309 | DRM_DEBUG_KMS("aux_i2c nack\n"); | 311 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
| 310 | return -EREMOTEIO; | 312 | return -EREMOTEIO; |
| 311 | case AUX_I2C_REPLY_DEFER: | 313 | case DP_AUX_I2C_REPLY_DEFER: |
| 312 | DRM_DEBUG_KMS("aux_i2c defer\n"); | 314 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
| 313 | udelay(400); | 315 | usleep_range(400, 500); |
| 314 | break; | 316 | break; |
| 315 | default: | 317 | default: |
| 316 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); | 318 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); |
| @@ -671,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) | |||
| 671 | u8 tmp; | 673 | u8 tmp; |
| 672 | 674 | ||
| 673 | /* power up the sink */ | 675 | /* power up the sink */ |
| 674 | if (dp_info->dpcd[0] >= 0x11) | 676 | if (dp_info->dpcd[0] >= 0x11) { |
| 675 | radeon_write_dpcd_reg(dp_info->radeon_connector, | 677 | radeon_write_dpcd_reg(dp_info->radeon_connector, |
| 676 | DP_SET_POWER, DP_SET_POWER_D0); | 678 | DP_SET_POWER, DP_SET_POWER_D0); |
| 679 | usleep_range(1000, 2000); | ||
| 680 | } | ||
| 677 | 681 | ||
| 678 | /* possibly enable downspread on the sink */ | 682 | /* possibly enable downspread on the sink */ |
| 679 | if (dp_info->dpcd[3] & 0x1) | 683 | if (dp_info->dpcd[3] & 0x1) |
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index f685035dbe39..b5162c3b6111 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #include "radeon.h" | 27 | #include "radeon.h" |
| 28 | #include "atom.h" | 28 | #include "atom.h" |
| 29 | 29 | ||
| 30 | extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | ||
| 31 | |||
| 32 | #define TARGET_HW_I2C_CLOCK 50 | 30 | #define TARGET_HW_I2C_CLOCK 50 |
| 33 | 31 | ||
| 34 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ | 32 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 9b6950d9b3c0..0fbd36f3d4e9 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
| @@ -49,6 +49,7 @@ struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps); | |||
| 49 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); | 49 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); |
| 50 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | 50 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); |
| 51 | 51 | ||
| 52 | extern int ni_mc_load_microcode(struct radeon_device *rdev); | ||
| 52 | 53 | ||
| 53 | //********* BARTS **************// | 54 | //********* BARTS **************// |
| 54 | static const u32 barts_cgcg_cgls_default[] = | 55 | static const u32 barts_cgcg_cgls_default[] = |
| @@ -2510,21 +2511,6 @@ int btc_dpm_enable(struct radeon_device *rdev) | |||
| 2510 | if (eg_pi->ls_clock_gating) | 2511 | if (eg_pi->ls_clock_gating) |
| 2511 | btc_ls_clock_gating_enable(rdev, true); | 2512 | btc_ls_clock_gating_enable(rdev, true); |
| 2512 | 2513 | ||
| 2513 | if (rdev->irq.installed && | ||
| 2514 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
| 2515 | PPSMC_Result result; | ||
| 2516 | |||
| 2517 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
| 2518 | if (ret) | ||
| 2519 | return ret; | ||
| 2520 | rdev->irq.dpm_thermal = true; | ||
| 2521 | radeon_irq_set(rdev); | ||
| 2522 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
| 2523 | |||
| 2524 | if (result != PPSMC_Result_OK) | ||
| 2525 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
| 2526 | } | ||
| 2527 | |||
| 2528 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 2514 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 2529 | 2515 | ||
| 2530 | btc_init_stutter_mode(rdev); | 2516 | btc_init_stutter_mode(rdev); |
| @@ -2576,7 +2562,11 @@ void btc_dpm_disable(struct radeon_device *rdev) | |||
| 2576 | void btc_dpm_setup_asic(struct radeon_device *rdev) | 2562 | void btc_dpm_setup_asic(struct radeon_device *rdev) |
| 2577 | { | 2563 | { |
| 2578 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2564 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 2565 | int r; | ||
| 2579 | 2566 | ||
| 2567 | r = ni_mc_load_microcode(rdev); | ||
| 2568 | if (r) | ||
| 2569 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
| 2580 | rv770_get_memory_type(rdev); | 2570 | rv770_get_memory_type(rdev); |
| 2581 | rv740_read_clock_registers(rdev); | 2571 | rv740_read_clock_registers(rdev); |
| 2582 | btc_read_arb_registers(rdev); | 2572 | btc_read_arb_registers(rdev); |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 1ed479976358..8d49104ca6c2 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
| @@ -171,8 +171,7 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, | |||
| 171 | struct atom_voltage_table *voltage_table); | 171 | struct atom_voltage_table *voltage_table); |
| 172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); | 172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); |
| 173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); | 173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); |
| 174 | extern void cik_update_cg(struct radeon_device *rdev, | 174 | extern int ci_mc_load_microcode(struct radeon_device *rdev); |
| 175 | u32 block, bool enable); | ||
| 176 | 175 | ||
| 177 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, | 176 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, |
| 178 | struct atom_voltage_table_entry *voltage_table, | 177 | struct atom_voltage_table_entry *voltage_table, |
| @@ -4503,8 +4502,8 @@ static void ci_get_memory_type(struct radeon_device *rdev) | |||
| 4503 | 4502 | ||
| 4504 | } | 4503 | } |
| 4505 | 4504 | ||
| 4506 | void ci_update_current_ps(struct radeon_device *rdev, | 4505 | static void ci_update_current_ps(struct radeon_device *rdev, |
| 4507 | struct radeon_ps *rps) | 4506 | struct radeon_ps *rps) |
| 4508 | { | 4507 | { |
| 4509 | struct ci_ps *new_ps = ci_get_ps(rps); | 4508 | struct ci_ps *new_ps = ci_get_ps(rps); |
| 4510 | struct ci_power_info *pi = ci_get_pi(rdev); | 4509 | struct ci_power_info *pi = ci_get_pi(rdev); |
| @@ -4514,8 +4513,8 @@ void ci_update_current_ps(struct radeon_device *rdev, | |||
| 4514 | pi->current_rps.ps_priv = &pi->current_ps; | 4513 | pi->current_rps.ps_priv = &pi->current_ps; |
| 4515 | } | 4514 | } |
| 4516 | 4515 | ||
| 4517 | void ci_update_requested_ps(struct radeon_device *rdev, | 4516 | static void ci_update_requested_ps(struct radeon_device *rdev, |
| 4518 | struct radeon_ps *rps) | 4517 | struct radeon_ps *rps) |
| 4519 | { | 4518 | { |
| 4520 | struct ci_ps *new_ps = ci_get_ps(rps); | 4519 | struct ci_ps *new_ps = ci_get_ps(rps); |
| 4521 | struct ci_power_info *pi = ci_get_pi(rdev); | 4520 | struct ci_power_info *pi = ci_get_pi(rdev); |
| @@ -4549,6 +4548,11 @@ void ci_dpm_post_set_power_state(struct radeon_device *rdev) | |||
| 4549 | 4548 | ||
| 4550 | void ci_dpm_setup_asic(struct radeon_device *rdev) | 4549 | void ci_dpm_setup_asic(struct radeon_device *rdev) |
| 4551 | { | 4550 | { |
| 4551 | int r; | ||
| 4552 | |||
| 4553 | r = ci_mc_load_microcode(rdev); | ||
| 4554 | if (r) | ||
| 4555 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
| 4552 | ci_read_clock_registers(rdev); | 4556 | ci_read_clock_registers(rdev); |
| 4553 | ci_get_memory_type(rdev); | 4557 | ci_get_memory_type(rdev); |
| 4554 | ci_enable_acpi_power_management(rdev); | 4558 | ci_enable_acpi_power_management(rdev); |
| @@ -4561,13 +4565,6 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
| 4561 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 4565 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
| 4562 | int ret; | 4566 | int ret; |
| 4563 | 4567 | ||
| 4564 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 4565 | RADEON_CG_BLOCK_MC | | ||
| 4566 | RADEON_CG_BLOCK_SDMA | | ||
| 4567 | RADEON_CG_BLOCK_BIF | | ||
| 4568 | RADEON_CG_BLOCK_UVD | | ||
| 4569 | RADEON_CG_BLOCK_HDP), false); | ||
| 4570 | |||
| 4571 | if (ci_is_smc_running(rdev)) | 4568 | if (ci_is_smc_running(rdev)) |
| 4572 | return -EINVAL; | 4569 | return -EINVAL; |
| 4573 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { | 4570 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { |
| @@ -4665,6 +4662,18 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
| 4665 | DRM_ERROR("ci_enable_power_containment failed\n"); | 4662 | DRM_ERROR("ci_enable_power_containment failed\n"); |
| 4666 | return ret; | 4663 | return ret; |
| 4667 | } | 4664 | } |
| 4665 | |||
| 4666 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 4667 | |||
| 4668 | ci_update_current_ps(rdev, boot_ps); | ||
| 4669 | |||
| 4670 | return 0; | ||
| 4671 | } | ||
| 4672 | |||
| 4673 | int ci_dpm_late_enable(struct radeon_device *rdev) | ||
| 4674 | { | ||
| 4675 | int ret; | ||
| 4676 | |||
| 4668 | if (rdev->irq.installed && | 4677 | if (rdev->irq.installed && |
| 4669 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 4678 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 4670 | #if 0 | 4679 | #if 0 |
| @@ -4685,19 +4694,8 @@ int ci_dpm_enable(struct radeon_device *rdev) | |||
| 4685 | #endif | 4694 | #endif |
| 4686 | } | 4695 | } |
| 4687 | 4696 | ||
| 4688 | ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 4689 | |||
| 4690 | ci_dpm_powergate_uvd(rdev, true); | 4697 | ci_dpm_powergate_uvd(rdev, true); |
| 4691 | 4698 | ||
| 4692 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 4693 | RADEON_CG_BLOCK_MC | | ||
| 4694 | RADEON_CG_BLOCK_SDMA | | ||
| 4695 | RADEON_CG_BLOCK_BIF | | ||
| 4696 | RADEON_CG_BLOCK_UVD | | ||
| 4697 | RADEON_CG_BLOCK_HDP), true); | ||
| 4698 | |||
| 4699 | ci_update_current_ps(rdev, boot_ps); | ||
| 4700 | |||
| 4701 | return 0; | 4699 | return 0; |
| 4702 | } | 4700 | } |
| 4703 | 4701 | ||
| @@ -4706,12 +4704,6 @@ void ci_dpm_disable(struct radeon_device *rdev) | |||
| 4706 | struct ci_power_info *pi = ci_get_pi(rdev); | 4704 | struct ci_power_info *pi = ci_get_pi(rdev); |
| 4707 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 4705 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
| 4708 | 4706 | ||
| 4709 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 4710 | RADEON_CG_BLOCK_MC | | ||
| 4711 | RADEON_CG_BLOCK_SDMA | | ||
| 4712 | RADEON_CG_BLOCK_UVD | | ||
| 4713 | RADEON_CG_BLOCK_HDP), false); | ||
| 4714 | |||
| 4715 | ci_dpm_powergate_uvd(rdev, false); | 4707 | ci_dpm_powergate_uvd(rdev, false); |
| 4716 | 4708 | ||
| 4717 | if (!ci_is_smc_running(rdev)) | 4709 | if (!ci_is_smc_running(rdev)) |
| @@ -4742,13 +4734,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
| 4742 | struct radeon_ps *old_ps = &pi->current_rps; | 4734 | struct radeon_ps *old_ps = &pi->current_rps; |
| 4743 | int ret; | 4735 | int ret; |
| 4744 | 4736 | ||
| 4745 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 4746 | RADEON_CG_BLOCK_MC | | ||
| 4747 | RADEON_CG_BLOCK_SDMA | | ||
| 4748 | RADEON_CG_BLOCK_BIF | | ||
| 4749 | RADEON_CG_BLOCK_UVD | | ||
| 4750 | RADEON_CG_BLOCK_HDP), false); | ||
| 4751 | |||
| 4752 | ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); | 4737 | ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); |
| 4753 | if (pi->pcie_performance_request) | 4738 | if (pi->pcie_performance_request) |
| 4754 | ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); | 4739 | ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); |
| @@ -4804,13 +4789,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
| 4804 | if (pi->pcie_performance_request) | 4789 | if (pi->pcie_performance_request) |
| 4805 | ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); | 4790 | ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); |
| 4806 | 4791 | ||
| 4807 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 4808 | RADEON_CG_BLOCK_MC | | ||
| 4809 | RADEON_CG_BLOCK_SDMA | | ||
| 4810 | RADEON_CG_BLOCK_BIF | | ||
| 4811 | RADEON_CG_BLOCK_UVD | | ||
| 4812 | RADEON_CG_BLOCK_HDP), true); | ||
| 4813 | |||
| 4814 | return 0; | 4792 | return 0; |
| 4815 | } | 4793 | } |
| 4816 | 4794 | ||
| @@ -5023,8 +5001,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) | |||
| 5023 | return 0; | 5001 | return 0; |
| 5024 | } | 5002 | } |
| 5025 | 5003 | ||
| 5026 | int ci_get_vbios_boot_values(struct radeon_device *rdev, | 5004 | static int ci_get_vbios_boot_values(struct radeon_device *rdev, |
| 5027 | struct ci_vbios_boot_state *boot_state) | 5005 | struct ci_vbios_boot_state *boot_state) |
| 5028 | { | 5006 | { |
| 5029 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 5007 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
| 5030 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 5008 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c index 9c745dd22438..8debc9d47362 100644 --- a/drivers/gpu/drm/radeon/ci_smc.c +++ b/drivers/gpu/drm/radeon/ci_smc.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "cikd.h" | 28 | #include "cikd.h" |
| 29 | #include "ppsmc.h" | 29 | #include "ppsmc.h" |
| 30 | #include "radeon_ucode.h" | 30 | #include "radeon_ucode.h" |
| 31 | #include "ci_dpm.h" | ||
| 31 | 32 | ||
| 32 | static int ci_set_smc_sram_address(struct radeon_device *rdev, | 33 | static int ci_set_smc_sram_address(struct radeon_device *rdev, |
| 33 | u32 smc_address, u32 limit) | 34 | u32 smc_address, u32 limit) |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e950fabd7f5e..e6419ca7cd37 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev, | |||
| 1697 | * Load the GDDR MC ucode into the hw (CIK). | 1697 | * Load the GDDR MC ucode into the hw (CIK). |
| 1698 | * Returns 0 on success, error on failure. | 1698 | * Returns 0 on success, error on failure. |
| 1699 | */ | 1699 | */ |
| 1700 | static int ci_mc_load_microcode(struct radeon_device *rdev) | 1700 | int ci_mc_load_microcode(struct radeon_device *rdev) |
| 1701 | { | 1701 | { |
| 1702 | const __be32 *fw_data; | 1702 | const __be32 *fw_data; |
| 1703 | u32 running, blackout = 0; | 1703 | u32 running, blackout = 0; |
| @@ -3487,6 +3487,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 3487 | } | 3487 | } |
| 3488 | 3488 | ||
| 3489 | /** | 3489 | /** |
| 3490 | * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp | ||
| 3491 | * | ||
| 3492 | * @rdev: radeon_device pointer | ||
| 3493 | * @ridx: radeon ring index | ||
| 3494 | * | ||
| 3495 | * Emits an hdp flush on the cp. | ||
| 3496 | */ | ||
| 3497 | static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev, | ||
| 3498 | int ridx) | ||
| 3499 | { | ||
| 3500 | struct radeon_ring *ring = &rdev->ring[ridx]; | ||
| 3501 | u32 ref_and_mask; | ||
| 3502 | |||
| 3503 | switch (ring->idx) { | ||
| 3504 | case CAYMAN_RING_TYPE_CP1_INDEX: | ||
| 3505 | case CAYMAN_RING_TYPE_CP2_INDEX: | ||
| 3506 | default: | ||
| 3507 | switch (ring->me) { | ||
| 3508 | case 0: | ||
| 3509 | ref_and_mask = CP2 << ring->pipe; | ||
| 3510 | break; | ||
| 3511 | case 1: | ||
| 3512 | ref_and_mask = CP6 << ring->pipe; | ||
| 3513 | break; | ||
| 3514 | default: | ||
| 3515 | return; | ||
| 3516 | } | ||
| 3517 | break; | ||
| 3518 | case RADEON_RING_TYPE_GFX_INDEX: | ||
| 3519 | ref_and_mask = CP0; | ||
| 3520 | break; | ||
| 3521 | } | ||
| 3522 | |||
| 3523 | radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
| 3524 | radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ | ||
| 3525 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | ||
| 3526 | WAIT_REG_MEM_ENGINE(1))); /* pfp */ | ||
| 3527 | radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2); | ||
| 3528 | radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2); | ||
| 3529 | radeon_ring_write(ring, ref_and_mask); | ||
| 3530 | radeon_ring_write(ring, ref_and_mask); | ||
| 3531 | radeon_ring_write(ring, 0x20); /* poll interval */ | ||
| 3532 | } | ||
| 3533 | |||
| 3534 | /** | ||
| 3490 | * cik_fence_gfx_ring_emit - emit a fence on the gfx ring | 3535 | * cik_fence_gfx_ring_emit - emit a fence on the gfx ring |
| 3491 | * | 3536 | * |
| 3492 | * @rdev: radeon_device pointer | 3537 | * @rdev: radeon_device pointer |
| @@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, | |||
| 3512 | radeon_ring_write(ring, fence->seq); | 3557 | radeon_ring_write(ring, fence->seq); |
| 3513 | radeon_ring_write(ring, 0); | 3558 | radeon_ring_write(ring, 0); |
| 3514 | /* HDP flush */ | 3559 | /* HDP flush */ |
| 3515 | /* We should be using the new WAIT_REG_MEM special op packet here | 3560 | cik_hdp_flush_cp_ring_emit(rdev, fence->ring); |
| 3516 | * but it causes the CP to hang | ||
| 3517 | */ | ||
| 3518 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
| 3519 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
| 3520 | WRITE_DATA_DST_SEL(0))); | ||
| 3521 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
| 3522 | radeon_ring_write(ring, 0); | ||
| 3523 | radeon_ring_write(ring, 0); | ||
| 3524 | } | 3561 | } |
| 3525 | 3562 | ||
| 3526 | /** | 3563 | /** |
| @@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, | |||
| 3550 | radeon_ring_write(ring, fence->seq); | 3587 | radeon_ring_write(ring, fence->seq); |
| 3551 | radeon_ring_write(ring, 0); | 3588 | radeon_ring_write(ring, 0); |
| 3552 | /* HDP flush */ | 3589 | /* HDP flush */ |
| 3553 | /* We should be using the new WAIT_REG_MEM special op packet here | 3590 | cik_hdp_flush_cp_ring_emit(rdev, fence->ring); |
| 3554 | * but it causes the CP to hang | ||
| 3555 | */ | ||
| 3556 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
| 3557 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
| 3558 | WRITE_DATA_DST_SEL(0))); | ||
| 3559 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
| 3560 | radeon_ring_write(ring, 0); | ||
| 3561 | radeon_ring_write(ring, 0); | ||
| 3562 | } | 3591 | } |
| 3563 | 3592 | ||
| 3564 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, | 3593 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, |
| @@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
| 3566 | struct radeon_semaphore *semaphore, | 3595 | struct radeon_semaphore *semaphore, |
| 3567 | bool emit_wait) | 3596 | bool emit_wait) |
| 3568 | { | 3597 | { |
| 3569 | /* TODO: figure out why semaphore cause lockups */ | ||
| 3570 | #if 0 | ||
| 3571 | uint64_t addr = semaphore->gpu_addr; | 3598 | uint64_t addr = semaphore->gpu_addr; |
| 3572 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; | 3599 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; |
| 3573 | 3600 | ||
| @@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
| 3576 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); | 3603 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); |
| 3577 | 3604 | ||
| 3578 | return true; | 3605 | return true; |
| 3579 | #else | ||
| 3580 | return false; | ||
| 3581 | #endif | ||
| 3582 | } | 3606 | } |
| 3583 | 3607 | ||
| 3584 | /** | 3608 | /** |
| @@ -3816,6 +3840,8 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable) | |||
| 3816 | if (enable) | 3840 | if (enable) |
| 3817 | WREG32(CP_ME_CNTL, 0); | 3841 | WREG32(CP_ME_CNTL, 0); |
| 3818 | else { | 3842 | else { |
| 3843 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
| 3844 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 3819 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); | 3845 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); |
| 3820 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 3846 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| 3821 | } | 3847 | } |
| @@ -4014,18 +4040,50 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) | |||
| 4014 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 4040 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| 4015 | return r; | 4041 | return r; |
| 4016 | } | 4042 | } |
| 4043 | |||
| 4044 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
| 4045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 4046 | |||
| 4017 | return 0; | 4047 | return 0; |
| 4018 | } | 4048 | } |
| 4019 | 4049 | ||
| 4020 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 4050 | u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
| 4021 | struct radeon_ring *ring) | 4051 | struct radeon_ring *ring) |
| 4022 | { | 4052 | { |
| 4023 | u32 rptr; | 4053 | u32 rptr; |
| 4024 | 4054 | ||
| 4055 | if (rdev->wb.enabled) | ||
| 4056 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 4057 | else | ||
| 4058 | rptr = RREG32(CP_RB0_RPTR); | ||
| 4059 | |||
| 4060 | return rptr; | ||
| 4061 | } | ||
| 4062 | |||
| 4063 | u32 cik_gfx_get_wptr(struct radeon_device *rdev, | ||
| 4064 | struct radeon_ring *ring) | ||
| 4065 | { | ||
| 4066 | u32 wptr; | ||
| 4067 | |||
| 4068 | wptr = RREG32(CP_RB0_WPTR); | ||
| 4069 | |||
| 4070 | return wptr; | ||
| 4071 | } | ||
| 4025 | 4072 | ||
| 4073 | void cik_gfx_set_wptr(struct radeon_device *rdev, | ||
| 4074 | struct radeon_ring *ring) | ||
| 4075 | { | ||
| 4076 | WREG32(CP_RB0_WPTR, ring->wptr); | ||
| 4077 | (void)RREG32(CP_RB0_WPTR); | ||
| 4078 | } | ||
| 4079 | |||
| 4080 | u32 cik_compute_get_rptr(struct radeon_device *rdev, | ||
| 4081 | struct radeon_ring *ring) | ||
| 4082 | { | ||
| 4083 | u32 rptr; | ||
| 4026 | 4084 | ||
| 4027 | if (rdev->wb.enabled) { | 4085 | if (rdev->wb.enabled) { |
| 4028 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 4086 | rptr = rdev->wb.wb[ring->rptr_offs/4]; |
| 4029 | } else { | 4087 | } else { |
| 4030 | mutex_lock(&rdev->srbm_mutex); | 4088 | mutex_lock(&rdev->srbm_mutex); |
| 4031 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4089 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| @@ -4037,13 +4095,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
| 4037 | return rptr; | 4095 | return rptr; |
| 4038 | } | 4096 | } |
| 4039 | 4097 | ||
| 4040 | u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | 4098 | u32 cik_compute_get_wptr(struct radeon_device *rdev, |
| 4041 | struct radeon_ring *ring) | 4099 | struct radeon_ring *ring) |
| 4042 | { | 4100 | { |
| 4043 | u32 wptr; | 4101 | u32 wptr; |
| 4044 | 4102 | ||
| 4045 | if (rdev->wb.enabled) { | 4103 | if (rdev->wb.enabled) { |
| 4046 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 4104 | /* XXX check if swapping is necessary on BE */ |
| 4105 | wptr = rdev->wb.wb[ring->wptr_offs/4]; | ||
| 4047 | } else { | 4106 | } else { |
| 4048 | mutex_lock(&rdev->srbm_mutex); | 4107 | mutex_lock(&rdev->srbm_mutex); |
| 4049 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4108 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| @@ -4055,10 +4114,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
| 4055 | return wptr; | 4114 | return wptr; |
| 4056 | } | 4115 | } |
| 4057 | 4116 | ||
| 4058 | void cik_compute_ring_set_wptr(struct radeon_device *rdev, | 4117 | void cik_compute_set_wptr(struct radeon_device *rdev, |
| 4059 | struct radeon_ring *ring) | 4118 | struct radeon_ring *ring) |
| 4060 | { | 4119 | { |
| 4061 | rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); | 4120 | /* XXX check if swapping is necessary on BE */ |
| 4121 | rdev->wb.wb[ring->wptr_offs/4] = ring->wptr; | ||
| 4062 | WDOORBELL32(ring->doorbell_index, ring->wptr); | 4122 | WDOORBELL32(ring->doorbell_index, ring->wptr); |
| 4063 | } | 4123 | } |
| 4064 | 4124 | ||
| @@ -4852,6 +4912,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
| 4852 | cik_print_gpu_status_regs(rdev); | 4912 | cik_print_gpu_status_regs(rdev); |
| 4853 | } | 4913 | } |
| 4854 | 4914 | ||
| 4915 | struct kv_reset_save_regs { | ||
| 4916 | u32 gmcon_reng_execute; | ||
| 4917 | u32 gmcon_misc; | ||
| 4918 | u32 gmcon_misc3; | ||
| 4919 | }; | ||
| 4920 | |||
| 4921 | static void kv_save_regs_for_reset(struct radeon_device *rdev, | ||
| 4922 | struct kv_reset_save_regs *save) | ||
| 4923 | { | ||
| 4924 | save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE); | ||
| 4925 | save->gmcon_misc = RREG32(GMCON_MISC); | ||
| 4926 | save->gmcon_misc3 = RREG32(GMCON_MISC3); | ||
| 4927 | |||
| 4928 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP); | ||
| 4929 | WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE | | ||
| 4930 | STCTRL_STUTTER_EN)); | ||
| 4931 | } | ||
| 4932 | |||
| 4933 | static void kv_restore_regs_for_reset(struct radeon_device *rdev, | ||
| 4934 | struct kv_reset_save_regs *save) | ||
| 4935 | { | ||
| 4936 | int i; | ||
| 4937 | |||
| 4938 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4939 | WREG32(GMCON_PGFSM_CONFIG, 0x200010ff); | ||
| 4940 | |||
| 4941 | for (i = 0; i < 5; i++) | ||
| 4942 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4943 | |||
| 4944 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4945 | WREG32(GMCON_PGFSM_CONFIG, 0x300010ff); | ||
| 4946 | |||
| 4947 | for (i = 0; i < 5; i++) | ||
| 4948 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4949 | |||
| 4950 | WREG32(GMCON_PGFSM_WRITE, 0x210000); | ||
| 4951 | WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff); | ||
| 4952 | |||
| 4953 | for (i = 0; i < 5; i++) | ||
| 4954 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4955 | |||
| 4956 | WREG32(GMCON_PGFSM_WRITE, 0x21003); | ||
| 4957 | WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff); | ||
| 4958 | |||
| 4959 | for (i = 0; i < 5; i++) | ||
| 4960 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4961 | |||
| 4962 | WREG32(GMCON_PGFSM_WRITE, 0x2b00); | ||
| 4963 | WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff); | ||
| 4964 | |||
| 4965 | for (i = 0; i < 5; i++) | ||
| 4966 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4967 | |||
| 4968 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4969 | WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff); | ||
| 4970 | |||
| 4971 | for (i = 0; i < 5; i++) | ||
| 4972 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4973 | |||
| 4974 | WREG32(GMCON_PGFSM_WRITE, 0x420000); | ||
| 4975 | WREG32(GMCON_PGFSM_CONFIG, 0x100010ff); | ||
| 4976 | |||
| 4977 | for (i = 0; i < 5; i++) | ||
| 4978 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4979 | |||
| 4980 | WREG32(GMCON_PGFSM_WRITE, 0x120202); | ||
| 4981 | WREG32(GMCON_PGFSM_CONFIG, 0x500010ff); | ||
| 4982 | |||
| 4983 | for (i = 0; i < 5; i++) | ||
| 4984 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4985 | |||
| 4986 | WREG32(GMCON_PGFSM_WRITE, 0x3e3e36); | ||
| 4987 | WREG32(GMCON_PGFSM_CONFIG, 0x600010ff); | ||
| 4988 | |||
| 4989 | for (i = 0; i < 5; i++) | ||
| 4990 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4991 | |||
| 4992 | WREG32(GMCON_PGFSM_WRITE, 0x373f3e); | ||
| 4993 | WREG32(GMCON_PGFSM_CONFIG, 0x700010ff); | ||
| 4994 | |||
| 4995 | for (i = 0; i < 5; i++) | ||
| 4996 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
| 4997 | |||
| 4998 | WREG32(GMCON_PGFSM_WRITE, 0x3e1332); | ||
| 4999 | WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff); | ||
| 5000 | |||
| 5001 | WREG32(GMCON_MISC3, save->gmcon_misc3); | ||
| 5002 | WREG32(GMCON_MISC, save->gmcon_misc); | ||
| 5003 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute); | ||
| 5004 | } | ||
| 5005 | |||
| 5006 | static void cik_gpu_pci_config_reset(struct radeon_device *rdev) | ||
| 5007 | { | ||
| 5008 | struct evergreen_mc_save save; | ||
| 5009 | struct kv_reset_save_regs kv_save = { 0 }; | ||
| 5010 | u32 tmp, i; | ||
| 5011 | |||
| 5012 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
| 5013 | |||
| 5014 | /* disable dpm? */ | ||
| 5015 | |||
| 5016 | /* disable cg/pg */ | ||
| 5017 | cik_fini_pg(rdev); | ||
| 5018 | cik_fini_cg(rdev); | ||
| 5019 | |||
| 5020 | /* Disable GFX parsing/prefetching */ | ||
| 5021 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | ||
| 5022 | |||
| 5023 | /* Disable MEC parsing/prefetching */ | ||
| 5024 | WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT); | ||
| 5025 | |||
| 5026 | /* sdma0 */ | ||
| 5027 | tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET); | ||
| 5028 | tmp |= SDMA_HALT; | ||
| 5029 | WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
| 5030 | /* sdma1 */ | ||
| 5031 | tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET); | ||
| 5032 | tmp |= SDMA_HALT; | ||
| 5033 | WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
| 5034 | /* XXX other engines? */ | ||
| 5035 | |||
| 5036 | /* halt the rlc, disable cp internal ints */ | ||
| 5037 | cik_rlc_stop(rdev); | ||
| 5038 | |||
| 5039 | udelay(50); | ||
| 5040 | |||
| 5041 | /* disable mem access */ | ||
| 5042 | evergreen_mc_stop(rdev, &save); | ||
| 5043 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
| 5044 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
| 5045 | } | ||
| 5046 | |||
| 5047 | if (rdev->flags & RADEON_IS_IGP) | ||
| 5048 | kv_save_regs_for_reset(rdev, &kv_save); | ||
| 5049 | |||
| 5050 | /* disable BM */ | ||
| 5051 | pci_clear_master(rdev->pdev); | ||
| 5052 | /* reset */ | ||
| 5053 | radeon_pci_config_reset(rdev); | ||
| 5054 | |||
| 5055 | udelay(100); | ||
| 5056 | |||
| 5057 | /* wait for asic to come out of reset */ | ||
| 5058 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 5059 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
| 5060 | break; | ||
| 5061 | udelay(1); | ||
| 5062 | } | ||
| 5063 | |||
| 5064 | /* does asic init need to be run first??? */ | ||
| 5065 | if (rdev->flags & RADEON_IS_IGP) | ||
| 5066 | kv_restore_regs_for_reset(rdev, &kv_save); | ||
| 5067 | } | ||
| 5068 | |||
| 4855 | /** | 5069 | /** |
| 4856 | * cik_asic_reset - soft reset GPU | 5070 | * cik_asic_reset - soft reset GPU |
| 4857 | * | 5071 | * |
| @@ -4870,10 +5084,17 @@ int cik_asic_reset(struct radeon_device *rdev) | |||
| 4870 | if (reset_mask) | 5084 | if (reset_mask) |
| 4871 | r600_set_bios_scratch_engine_hung(rdev, true); | 5085 | r600_set_bios_scratch_engine_hung(rdev, true); |
| 4872 | 5086 | ||
| 5087 | /* try soft reset */ | ||
| 4873 | cik_gpu_soft_reset(rdev, reset_mask); | 5088 | cik_gpu_soft_reset(rdev, reset_mask); |
| 4874 | 5089 | ||
| 4875 | reset_mask = cik_gpu_check_soft_reset(rdev); | 5090 | reset_mask = cik_gpu_check_soft_reset(rdev); |
| 4876 | 5091 | ||
| 5092 | /* try pci config reset */ | ||
| 5093 | if (reset_mask && radeon_hard_reset) | ||
| 5094 | cik_gpu_pci_config_reset(rdev); | ||
| 5095 | |||
| 5096 | reset_mask = cik_gpu_check_soft_reset(rdev); | ||
| 5097 | |||
| 4877 | if (!reset_mask) | 5098 | if (!reset_mask) |
| 4878 | r600_set_bios_scratch_engine_hung(rdev, false); | 5099 | r600_set_bios_scratch_engine_hung(rdev, false); |
| 4879 | 5100 | ||
| @@ -5138,20 +5359,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 5138 | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | | 5359 | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
| 5139 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); | 5360 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); |
| 5140 | 5361 | ||
| 5141 | /* TC cache setup ??? */ | ||
| 5142 | WREG32(TC_CFG_L1_LOAD_POLICY0, 0); | ||
| 5143 | WREG32(TC_CFG_L1_LOAD_POLICY1, 0); | ||
| 5144 | WREG32(TC_CFG_L1_STORE_POLICY, 0); | ||
| 5145 | |||
| 5146 | WREG32(TC_CFG_L2_LOAD_POLICY0, 0); | ||
| 5147 | WREG32(TC_CFG_L2_LOAD_POLICY1, 0); | ||
| 5148 | WREG32(TC_CFG_L2_STORE_POLICY0, 0); | ||
| 5149 | WREG32(TC_CFG_L2_STORE_POLICY1, 0); | ||
| 5150 | WREG32(TC_CFG_L2_ATOMIC_POLICY, 0); | ||
| 5151 | |||
| 5152 | WREG32(TC_CFG_L1_VOLATILE, 0); | ||
| 5153 | WREG32(TC_CFG_L2_VOLATILE, 0); | ||
| 5154 | |||
| 5155 | if (rdev->family == CHIP_KAVERI) { | 5362 | if (rdev->family == CHIP_KAVERI) { |
| 5156 | u32 tmp = RREG32(CHUB_CONTROL); | 5363 | u32 tmp = RREG32(CHUB_CONTROL); |
| 5157 | tmp &= ~BYPASS_VM; | 5364 | tmp &= ~BYPASS_VM; |
| @@ -5367,16 +5574,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5367 | radeon_ring_write(ring, VMID(0)); | 5574 | radeon_ring_write(ring, VMID(0)); |
| 5368 | 5575 | ||
| 5369 | /* HDP flush */ | 5576 | /* HDP flush */ |
| 5370 | /* We should be using the WAIT_REG_MEM packet here like in | 5577 | cik_hdp_flush_cp_ring_emit(rdev, ridx); |
| 5371 | * cik_fence_ring_emit(), but it causes the CP to hang in this | ||
| 5372 | * context... | ||
| 5373 | */ | ||
| 5374 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
| 5375 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
| 5376 | WRITE_DATA_DST_SEL(0))); | ||
| 5377 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
| 5378 | radeon_ring_write(ring, 0); | ||
| 5379 | radeon_ring_write(ring, 0); | ||
| 5380 | 5578 | ||
| 5381 | /* bits 0-15 are the VM contexts0-15 */ | 5579 | /* bits 0-15 are the VM contexts0-15 */ |
| 5382 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5580 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| @@ -7503,26 +7701,7 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7503 | 7701 | ||
| 7504 | cik_mc_program(rdev); | 7702 | cik_mc_program(rdev); |
| 7505 | 7703 | ||
| 7506 | if (rdev->flags & RADEON_IS_IGP) { | 7704 | if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { |
| 7507 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
| 7508 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
| 7509 | r = cik_init_microcode(rdev); | ||
| 7510 | if (r) { | ||
| 7511 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 7512 | return r; | ||
| 7513 | } | ||
| 7514 | } | ||
| 7515 | } else { | ||
| 7516 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
| 7517 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
| 7518 | !rdev->mc_fw) { | ||
| 7519 | r = cik_init_microcode(rdev); | ||
| 7520 | if (r) { | ||
| 7521 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 7522 | return r; | ||
| 7523 | } | ||
| 7524 | } | ||
| 7525 | |||
| 7526 | r = ci_mc_load_microcode(rdev); | 7705 | r = ci_mc_load_microcode(rdev); |
| 7527 | if (r) { | 7706 | if (r) { |
| 7528 | DRM_ERROR("Failed to load MC firmware!\n"); | 7707 | DRM_ERROR("Failed to load MC firmware!\n"); |
| @@ -7627,7 +7806,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7627 | 7806 | ||
| 7628 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 7807 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 7629 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 7808 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 7630 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
| 7631 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7809 | PACKET3(PACKET3_NOP, 0x3FFF)); |
| 7632 | if (r) | 7810 | if (r) |
| 7633 | return r; | 7811 | return r; |
| @@ -7636,7 +7814,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7636 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7814 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
| 7637 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 7815 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
| 7638 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 7816 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
| 7639 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
| 7640 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7817 | PACKET3(PACKET3_NOP, 0x3FFF)); |
| 7641 | if (r) | 7818 | if (r) |
| 7642 | return r; | 7819 | return r; |
| @@ -7648,7 +7825,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7648 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7825 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
| 7649 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 7826 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
| 7650 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 7827 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
| 7651 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
| 7652 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7828 | PACKET3(PACKET3_NOP, 0x3FFF)); |
| 7653 | if (r) | 7829 | if (r) |
| 7654 | return r; | 7830 | return r; |
| @@ -7660,16 +7836,12 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7660 | 7836 | ||
| 7661 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 7837 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
| 7662 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 7838 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
| 7663 | SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, | ||
| 7664 | SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, | ||
| 7665 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7839 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
| 7666 | if (r) | 7840 | if (r) |
| 7667 | return r; | 7841 | return r; |
| 7668 | 7842 | ||
| 7669 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 7843 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
| 7670 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 7844 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
| 7671 | SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, | ||
| 7672 | SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, | ||
| 7673 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7845 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
| 7674 | if (r) | 7846 | if (r) |
| 7675 | return r; | 7847 | return r; |
| @@ -7685,7 +7857,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 7685 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 7857 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 7686 | if (ring->ring_size) { | 7858 | if (ring->ring_size) { |
| 7687 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 7859 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
| 7688 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
| 7689 | RADEON_CP_PACKET2); | 7860 | RADEON_CP_PACKET2); |
| 7690 | if (!r) | 7861 | if (!r) |
| 7691 | r = uvd_v1_0_init(rdev); | 7862 | r = uvd_v1_0_init(rdev); |
| @@ -7731,6 +7902,8 @@ int cik_resume(struct radeon_device *rdev) | |||
| 7731 | /* init golden registers */ | 7902 | /* init golden registers */ |
| 7732 | cik_init_golden_registers(rdev); | 7903 | cik_init_golden_registers(rdev); |
| 7733 | 7904 | ||
| 7905 | radeon_pm_resume(rdev); | ||
| 7906 | |||
| 7734 | rdev->accel_working = true; | 7907 | rdev->accel_working = true; |
| 7735 | r = cik_startup(rdev); | 7908 | r = cik_startup(rdev); |
| 7736 | if (r) { | 7909 | if (r) { |
| @@ -7754,6 +7927,7 @@ int cik_resume(struct radeon_device *rdev) | |||
| 7754 | */ | 7927 | */ |
| 7755 | int cik_suspend(struct radeon_device *rdev) | 7928 | int cik_suspend(struct radeon_device *rdev) |
| 7756 | { | 7929 | { |
| 7930 | radeon_pm_suspend(rdev); | ||
| 7757 | dce6_audio_fini(rdev); | 7931 | dce6_audio_fini(rdev); |
| 7758 | radeon_vm_manager_fini(rdev); | 7932 | radeon_vm_manager_fini(rdev); |
| 7759 | cik_cp_enable(rdev, false); | 7933 | cik_cp_enable(rdev, false); |
| @@ -7835,6 +8009,30 @@ int cik_init(struct radeon_device *rdev) | |||
| 7835 | if (r) | 8009 | if (r) |
| 7836 | return r; | 8010 | return r; |
| 7837 | 8011 | ||
| 8012 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 8013 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
| 8014 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
| 8015 | r = cik_init_microcode(rdev); | ||
| 8016 | if (r) { | ||
| 8017 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 8018 | return r; | ||
| 8019 | } | ||
| 8020 | } | ||
| 8021 | } else { | ||
| 8022 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
| 8023 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
| 8024 | !rdev->mc_fw) { | ||
| 8025 | r = cik_init_microcode(rdev); | ||
| 8026 | if (r) { | ||
| 8027 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 8028 | return r; | ||
| 8029 | } | ||
| 8030 | } | ||
| 8031 | } | ||
| 8032 | |||
| 8033 | /* Initialize power management */ | ||
| 8034 | radeon_pm_init(rdev); | ||
| 8035 | |||
| 7838 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 8036 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 7839 | ring->ring_obj = NULL; | 8037 | ring->ring_obj = NULL; |
| 7840 | r600_ring_init(rdev, ring, 1024 * 1024); | 8038 | r600_ring_init(rdev, ring, 1024 * 1024); |
| @@ -7915,6 +8113,7 @@ int cik_init(struct radeon_device *rdev) | |||
| 7915 | */ | 8113 | */ |
| 7916 | void cik_fini(struct radeon_device *rdev) | 8114 | void cik_fini(struct radeon_device *rdev) |
| 7917 | { | 8115 | { |
| 8116 | radeon_pm_fini(rdev); | ||
| 7918 | cik_cp_fini(rdev); | 8117 | cik_cp_fini(rdev); |
| 7919 | cik_sdma_fini(rdev); | 8118 | cik_sdma_fini(rdev); |
| 7920 | cik_fini_pg(rdev); | 8119 | cik_fini_pg(rdev); |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index d08b83c6267b..1ecb3f1070e3 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -52,6 +52,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev); | |||
| 52 | */ | 52 | */ |
| 53 | 53 | ||
| 54 | /** | 54 | /** |
| 55 | * cik_sdma_get_rptr - get the current read pointer | ||
| 56 | * | ||
| 57 | * @rdev: radeon_device pointer | ||
| 58 | * @ring: radeon ring pointer | ||
| 59 | * | ||
| 60 | * Get the current rptr from the hardware (CIK+). | ||
| 61 | */ | ||
| 62 | uint32_t cik_sdma_get_rptr(struct radeon_device *rdev, | ||
| 63 | struct radeon_ring *ring) | ||
| 64 | { | ||
| 65 | u32 rptr, reg; | ||
| 66 | |||
| 67 | if (rdev->wb.enabled) { | ||
| 68 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 69 | } else { | ||
| 70 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 71 | reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET; | ||
| 72 | else | ||
| 73 | reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET; | ||
| 74 | |||
| 75 | rptr = RREG32(reg); | ||
| 76 | } | ||
| 77 | |||
| 78 | return (rptr & 0x3fffc) >> 2; | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * cik_sdma_get_wptr - get the current write pointer | ||
| 83 | * | ||
| 84 | * @rdev: radeon_device pointer | ||
| 85 | * @ring: radeon ring pointer | ||
| 86 | * | ||
| 87 | * Get the current wptr from the hardware (CIK+). | ||
| 88 | */ | ||
| 89 | uint32_t cik_sdma_get_wptr(struct radeon_device *rdev, | ||
| 90 | struct radeon_ring *ring) | ||
| 91 | { | ||
| 92 | u32 reg; | ||
| 93 | |||
| 94 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 95 | reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; | ||
| 96 | else | ||
| 97 | reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; | ||
| 98 | |||
| 99 | return (RREG32(reg) & 0x3fffc) >> 2; | ||
| 100 | } | ||
| 101 | |||
| 102 | /** | ||
| 103 | * cik_sdma_set_wptr - commit the write pointer | ||
| 104 | * | ||
| 105 | * @rdev: radeon_device pointer | ||
| 106 | * @ring: radeon ring pointer | ||
| 107 | * | ||
| 108 | * Write the wptr back to the hardware (CIK+). | ||
| 109 | */ | ||
| 110 | void cik_sdma_set_wptr(struct radeon_device *rdev, | ||
| 111 | struct radeon_ring *ring) | ||
| 112 | { | ||
| 113 | u32 reg; | ||
| 114 | |||
| 115 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 116 | reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET; | ||
| 117 | else | ||
| 118 | reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; | ||
| 119 | |||
| 120 | WREG32(reg, (ring->wptr << 2) & 0x3fffc); | ||
| 121 | } | ||
| 122 | |||
| 123 | /** | ||
| 55 | * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine | 124 | * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine |
| 56 | * | 125 | * |
| 57 | * @rdev: radeon_device pointer | 126 | * @rdev: radeon_device pointer |
| @@ -88,6 +157,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, | |||
| 88 | } | 157 | } |
| 89 | 158 | ||
| 90 | /** | 159 | /** |
| 160 | * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring | ||
| 161 | * | ||
| 162 | * @rdev: radeon_device pointer | ||
| 163 | * @ridx: radeon ring index | ||
| 164 | * | ||
| 165 | * Emit an hdp flush packet on the requested DMA ring. | ||
| 166 | */ | ||
| 167 | static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev, | ||
| 168 | int ridx) | ||
| 169 | { | ||
| 170 | struct radeon_ring *ring = &rdev->ring[ridx]; | ||
| 171 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | | ||
| 172 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | ||
| 173 | u32 ref_and_mask; | ||
| 174 | |||
| 175 | if (ridx == R600_RING_TYPE_DMA_INDEX) | ||
| 176 | ref_and_mask = SDMA0; | ||
| 177 | else | ||
| 178 | ref_and_mask = SDMA1; | ||
| 179 | |||
| 180 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); | ||
| 181 | radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); | ||
| 182 | radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); | ||
| 183 | radeon_ring_write(ring, ref_and_mask); /* reference */ | ||
| 184 | radeon_ring_write(ring, ref_and_mask); /* mask */ | ||
| 185 | radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ | ||
| 186 | } | ||
| 187 | |||
| 188 | /** | ||
| 91 | * cik_sdma_fence_ring_emit - emit a fence on the DMA ring | 189 | * cik_sdma_fence_ring_emit - emit a fence on the DMA ring |
| 92 | * | 190 | * |
| 93 | * @rdev: radeon_device pointer | 191 | * @rdev: radeon_device pointer |
| @@ -111,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, | |||
| 111 | /* generate an interrupt */ | 209 | /* generate an interrupt */ |
| 112 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); | 210 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); |
| 113 | /* flush HDP */ | 211 | /* flush HDP */ |
| 114 | /* We should be using the new POLL_REG_MEM special op packet here | 212 | cik_sdma_hdp_flush_ring_emit(rdev, fence->ring); |
| 115 | * but it causes sDMA to hang sometimes | ||
| 116 | */ | ||
| 117 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
| 118 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
| 119 | radeon_ring_write(ring, 0); | ||
| 120 | } | 213 | } |
| 121 | 214 | ||
| 122 | /** | 215 | /** |
| @@ -157,7 +250,9 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) | |||
| 157 | u32 rb_cntl, reg_offset; | 250 | u32 rb_cntl, reg_offset; |
| 158 | int i; | 251 | int i; |
| 159 | 252 | ||
| 160 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 253 | if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || |
| 254 | (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) | ||
| 255 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 161 | 256 | ||
| 162 | for (i = 0; i < 2; i++) { | 257 | for (i = 0; i < 2; i++) { |
| 163 | if (i == 0) | 258 | if (i == 0) |
| @@ -288,7 +383,9 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) | |||
| 288 | } | 383 | } |
| 289 | } | 384 | } |
| 290 | 385 | ||
| 291 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | 386 | if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || |
| 387 | (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) | ||
| 388 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 292 | 389 | ||
| 293 | return 0; | 390 | return 0; |
| 294 | } | 391 | } |
| @@ -747,12 +844,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm | |||
| 747 | radeon_ring_write(ring, VMID(0)); | 844 | radeon_ring_write(ring, VMID(0)); |
| 748 | 845 | ||
| 749 | /* flush HDP */ | 846 | /* flush HDP */ |
| 750 | /* We should be using the new POLL_REG_MEM special op packet here | 847 | cik_sdma_hdp_flush_ring_emit(rdev, ridx); |
| 751 | * but it causes sDMA to hang sometimes | ||
| 752 | */ | ||
| 753 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
| 754 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
| 755 | radeon_ring_write(ring, 0); | ||
| 756 | 848 | ||
| 757 | /* flush TLB */ | 849 | /* flush TLB */ |
| 758 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | 850 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 5964af5e5b2d..98bae9d7b74d 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
| @@ -724,6 +724,17 @@ | |||
| 724 | 724 | ||
| 725 | #define ATC_MISC_CG 0x3350 | 725 | #define ATC_MISC_CG 0x3350 |
| 726 | 726 | ||
| 727 | #define GMCON_RENG_EXECUTE 0x3508 | ||
| 728 | #define RENG_EXECUTE_ON_PWR_UP (1 << 0) | ||
| 729 | #define GMCON_MISC 0x350c | ||
| 730 | #define RENG_EXECUTE_ON_REG_UPDATE (1 << 11) | ||
| 731 | #define STCTRL_STUTTER_EN (1 << 16) | ||
| 732 | |||
| 733 | #define GMCON_PGFSM_CONFIG 0x3538 | ||
| 734 | #define GMCON_PGFSM_WRITE 0x353c | ||
| 735 | #define GMCON_PGFSM_READ 0x3540 | ||
| 736 | #define GMCON_MISC3 0x3544 | ||
| 737 | |||
| 727 | #define MC_SEQ_CNTL_3 0x3600 | 738 | #define MC_SEQ_CNTL_3 0x3600 |
| 728 | # define CAC_EN (1 << 31) | 739 | # define CAC_EN (1 << 31) |
| 729 | #define MC_SEQ_G5PDX_CTRL 0x3604 | 740 | #define MC_SEQ_G5PDX_CTRL 0x3604 |
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 920e1e4a52c5..cf783fc0ef21 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
| @@ -1905,21 +1905,6 @@ int cypress_dpm_enable(struct radeon_device *rdev) | |||
| 1905 | if (pi->mg_clock_gating) | 1905 | if (pi->mg_clock_gating) |
| 1906 | cypress_mg_clock_gating_enable(rdev, true); | 1906 | cypress_mg_clock_gating_enable(rdev, true); |
| 1907 | 1907 | ||
| 1908 | if (rdev->irq.installed && | ||
| 1909 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
| 1910 | PPSMC_Result result; | ||
| 1911 | |||
| 1912 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
| 1913 | if (ret) | ||
| 1914 | return ret; | ||
| 1915 | rdev->irq.dpm_thermal = true; | ||
| 1916 | radeon_irq_set(rdev); | ||
| 1917 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
| 1918 | |||
| 1919 | if (result != PPSMC_Result_OK) | ||
| 1920 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 1908 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 1924 | 1909 | ||
| 1925 | return 0; | 1910 | return 0; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 9702e55e924e..f2b9e21ce4da 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev); | |||
| 146 | extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); | 146 | extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
| 147 | extern u32 cik_get_csb_size(struct radeon_device *rdev); | 147 | extern u32 cik_get_csb_size(struct radeon_device *rdev); |
| 148 | extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); | 148 | extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
| 149 | extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); | ||
| 149 | 150 | ||
| 150 | static const u32 evergreen_golden_registers[] = | 151 | static const u32 evergreen_golden_registers[] = |
| 151 | { | 152 | { |
| @@ -3867,6 +3868,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
| 3867 | evergreen_print_gpu_status_regs(rdev); | 3868 | evergreen_print_gpu_status_regs(rdev); |
| 3868 | } | 3869 | } |
| 3869 | 3870 | ||
| 3871 | void evergreen_gpu_pci_config_reset(struct radeon_device *rdev) | ||
| 3872 | { | ||
| 3873 | struct evergreen_mc_save save; | ||
| 3874 | u32 tmp, i; | ||
| 3875 | |||
| 3876 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
| 3877 | |||
| 3878 | /* disable dpm? */ | ||
| 3879 | |||
| 3880 | /* Disable CP parsing/prefetching */ | ||
| 3881 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | ||
| 3882 | udelay(50); | ||
| 3883 | /* Disable DMA */ | ||
| 3884 | tmp = RREG32(DMA_RB_CNTL); | ||
| 3885 | tmp &= ~DMA_RB_ENABLE; | ||
| 3886 | WREG32(DMA_RB_CNTL, tmp); | ||
| 3887 | /* XXX other engines? */ | ||
| 3888 | |||
| 3889 | /* halt the rlc */ | ||
| 3890 | r600_rlc_stop(rdev); | ||
| 3891 | |||
| 3892 | udelay(50); | ||
| 3893 | |||
| 3894 | /* set mclk/sclk to bypass */ | ||
| 3895 | rv770_set_clk_bypass_mode(rdev); | ||
| 3896 | /* disable BM */ | ||
| 3897 | pci_clear_master(rdev->pdev); | ||
| 3898 | /* disable mem access */ | ||
| 3899 | evergreen_mc_stop(rdev, &save); | ||
| 3900 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
| 3901 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
| 3902 | } | ||
| 3903 | /* reset */ | ||
| 3904 | radeon_pci_config_reset(rdev); | ||
| 3905 | /* wait for asic to come out of reset */ | ||
| 3906 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 3907 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
| 3908 | break; | ||
| 3909 | udelay(1); | ||
| 3910 | } | ||
| 3911 | } | ||
| 3912 | |||
| 3870 | int evergreen_asic_reset(struct radeon_device *rdev) | 3913 | int evergreen_asic_reset(struct radeon_device *rdev) |
| 3871 | { | 3914 | { |
| 3872 | u32 reset_mask; | 3915 | u32 reset_mask; |
| @@ -3876,10 +3919,17 @@ int evergreen_asic_reset(struct radeon_device *rdev) | |||
| 3876 | if (reset_mask) | 3919 | if (reset_mask) |
| 3877 | r600_set_bios_scratch_engine_hung(rdev, true); | 3920 | r600_set_bios_scratch_engine_hung(rdev, true); |
| 3878 | 3921 | ||
| 3922 | /* try soft reset */ | ||
| 3879 | evergreen_gpu_soft_reset(rdev, reset_mask); | 3923 | evergreen_gpu_soft_reset(rdev, reset_mask); |
| 3880 | 3924 | ||
| 3881 | reset_mask = evergreen_gpu_check_soft_reset(rdev); | 3925 | reset_mask = evergreen_gpu_check_soft_reset(rdev); |
| 3882 | 3926 | ||
| 3927 | /* try pci config reset */ | ||
| 3928 | if (reset_mask && radeon_hard_reset) | ||
| 3929 | evergreen_gpu_pci_config_reset(rdev); | ||
| 3930 | |||
| 3931 | reset_mask = evergreen_gpu_check_soft_reset(rdev); | ||
| 3932 | |||
| 3883 | if (!reset_mask) | 3933 | if (!reset_mask) |
| 3884 | r600_set_bios_scratch_engine_hung(rdev, false); | 3934 | r600_set_bios_scratch_engine_hung(rdev, false); |
| 3885 | 3935 | ||
| @@ -4298,8 +4348,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
| 4298 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 4348 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); |
| 4299 | } | 4349 | } |
| 4300 | 4350 | ||
| 4301 | /* only one DAC on DCE6 */ | 4351 | /* only one DAC on DCE5 */ |
| 4302 | if (!ASIC_IS_DCE6(rdev)) | 4352 | if (!ASIC_IS_DCE5(rdev)) |
| 4303 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 4353 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); |
| 4304 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | 4354 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); |
| 4305 | 4355 | ||
| @@ -5109,27 +5159,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5109 | 5159 | ||
| 5110 | evergreen_mc_program(rdev); | 5160 | evergreen_mc_program(rdev); |
| 5111 | 5161 | ||
| 5112 | if (ASIC_IS_DCE5(rdev)) { | 5162 | if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) { |
| 5113 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
| 5114 | r = ni_init_microcode(rdev); | ||
| 5115 | if (r) { | ||
| 5116 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 5117 | return r; | ||
| 5118 | } | ||
| 5119 | } | ||
| 5120 | r = ni_mc_load_microcode(rdev); | 5163 | r = ni_mc_load_microcode(rdev); |
| 5121 | if (r) { | 5164 | if (r) { |
| 5122 | DRM_ERROR("Failed to load MC firmware!\n"); | 5165 | DRM_ERROR("Failed to load MC firmware!\n"); |
| 5123 | return r; | 5166 | return r; |
| 5124 | } | 5167 | } |
| 5125 | } else { | ||
| 5126 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 5127 | r = r600_init_microcode(rdev); | ||
| 5128 | if (r) { | ||
| 5129 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 5130 | return r; | ||
| 5131 | } | ||
| 5132 | } | ||
| 5133 | } | 5168 | } |
| 5134 | 5169 | ||
| 5135 | if (rdev->flags & RADEON_IS_AGP) { | 5170 | if (rdev->flags & RADEON_IS_AGP) { |
| @@ -5199,14 +5234,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5199 | 5234 | ||
| 5200 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 5235 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 5201 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 5236 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 5202 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
| 5203 | RADEON_CP_PACKET2); | 5237 | RADEON_CP_PACKET2); |
| 5204 | if (r) | 5238 | if (r) |
| 5205 | return r; | 5239 | return r; |
| 5206 | 5240 | ||
| 5207 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 5241 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
| 5208 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 5242 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
| 5209 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
| 5210 | DMA_PACKET(DMA_PACKET_NOP, 0, 0)); | 5243 | DMA_PACKET(DMA_PACKET_NOP, 0, 0)); |
| 5211 | if (r) | 5244 | if (r) |
| 5212 | return r; | 5245 | return r; |
| @@ -5224,7 +5257,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5224 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 5257 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 5225 | if (ring->ring_size) { | 5258 | if (ring->ring_size) { |
| 5226 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 5259 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
| 5227 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
| 5228 | RADEON_CP_PACKET2); | 5260 | RADEON_CP_PACKET2); |
| 5229 | if (!r) | 5261 | if (!r) |
| 5230 | r = uvd_v1_0_init(rdev); | 5262 | r = uvd_v1_0_init(rdev); |
| @@ -5267,6 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5267 | /* init golden registers */ | 5299 | /* init golden registers */ |
| 5268 | evergreen_init_golden_registers(rdev); | 5300 | evergreen_init_golden_registers(rdev); |
| 5269 | 5301 | ||
| 5302 | radeon_pm_resume(rdev); | ||
| 5303 | |||
| 5270 | rdev->accel_working = true; | 5304 | rdev->accel_working = true; |
| 5271 | r = evergreen_startup(rdev); | 5305 | r = evergreen_startup(rdev); |
| 5272 | if (r) { | 5306 | if (r) { |
| @@ -5281,6 +5315,7 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5281 | 5315 | ||
| 5282 | int evergreen_suspend(struct radeon_device *rdev) | 5316 | int evergreen_suspend(struct radeon_device *rdev) |
| 5283 | { | 5317 | { |
| 5318 | radeon_pm_suspend(rdev); | ||
| 5284 | r600_audio_fini(rdev); | 5319 | r600_audio_fini(rdev); |
| 5285 | uvd_v1_0_fini(rdev); | 5320 | uvd_v1_0_fini(rdev); |
| 5286 | radeon_uvd_suspend(rdev); | 5321 | radeon_uvd_suspend(rdev); |
| @@ -5357,6 +5392,27 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 5357 | if (r) | 5392 | if (r) |
| 5358 | return r; | 5393 | return r; |
| 5359 | 5394 | ||
| 5395 | if (ASIC_IS_DCE5(rdev)) { | ||
| 5396 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
| 5397 | r = ni_init_microcode(rdev); | ||
| 5398 | if (r) { | ||
| 5399 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 5400 | return r; | ||
| 5401 | } | ||
| 5402 | } | ||
| 5403 | } else { | ||
| 5404 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 5405 | r = r600_init_microcode(rdev); | ||
| 5406 | if (r) { | ||
| 5407 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 5408 | return r; | ||
| 5409 | } | ||
| 5410 | } | ||
| 5411 | } | ||
| 5412 | |||
| 5413 | /* Initialize power management */ | ||
| 5414 | radeon_pm_init(rdev); | ||
| 5415 | |||
| 5360 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 5416 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
| 5361 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 5417 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
| 5362 | 5418 | ||
| @@ -5409,6 +5465,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 5409 | 5465 | ||
| 5410 | void evergreen_fini(struct radeon_device *rdev) | 5466 | void evergreen_fini(struct radeon_device *rdev) |
| 5411 | { | 5467 | { |
| 5468 | radeon_pm_fini(rdev); | ||
| 5412 | r600_audio_fini(rdev); | 5469 | r600_audio_fini(rdev); |
| 5413 | r700_cp_fini(rdev); | 5470 | r700_cp_fini(rdev); |
| 5414 | r600_dma_fini(rdev); | 5471 | r600_dma_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index eb8ac315f92f..c7cac07f139b 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) | |||
| 967 | if (track->cb_dirty) { | 967 | if (track->cb_dirty) { |
| 968 | tmp = track->cb_target_mask; | 968 | tmp = track->cb_target_mask; |
| 969 | for (i = 0; i < 8; i++) { | 969 | for (i = 0; i < 8; i++) { |
| 970 | if ((tmp >> (i * 4)) & 0xF) { | 970 | u32 format = G_028C70_FORMAT(track->cb_color_info[i]); |
| 971 | |||
| 972 | if (format != V_028C70_COLOR_INVALID && | ||
| 973 | (tmp >> (i * 4)) & 0xF) { | ||
| 971 | /* at least one component is enabled */ | 974 | /* at least one component is enabled */ |
| 972 | if (track->cb_color_bo[i] == NULL) { | 975 | if (track->cb_color_bo[i] == NULL) { |
| 973 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | 976 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 8a4e641f0e3c..a0f63ff5a5e9 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #define EVERGREEN_PIF_PHY0_DATA 0xc | 33 | #define EVERGREEN_PIF_PHY0_DATA 0xc |
| 34 | #define EVERGREEN_PIF_PHY1_INDEX 0x10 | 34 | #define EVERGREEN_PIF_PHY1_INDEX 0x10 |
| 35 | #define EVERGREEN_PIF_PHY1_DATA 0x14 | 35 | #define EVERGREEN_PIF_PHY1_DATA 0x14 |
| 36 | #define EVERGREEN_MM_INDEX_HI 0x18 | ||
| 36 | 37 | ||
| 37 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 | 38 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 |
| 38 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 | 39 | #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 17f990798992..f9c7963b3ee6 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -82,12 +82,16 @@ | |||
| 82 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 82 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
| 83 | #define SCLK_MUX_SEL(x) ((x) << 0) | 83 | #define SCLK_MUX_SEL(x) ((x) << 0) |
| 84 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 84 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
| 85 | #define SCLK_MUX_UPDATE (1 << 26) | ||
| 85 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 86 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
| 86 | #define SPLL_FB_DIV(x) ((x) << 0) | 87 | #define SPLL_FB_DIV(x) ((x) << 0) |
| 87 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 88 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
| 88 | #define SPLL_DITHEN (1 << 28) | 89 | #define SPLL_DITHEN (1 << 28) |
| 90 | #define CG_SPLL_STATUS 0x60c | ||
| 91 | #define SPLL_CHG_STATUS (1 << 1) | ||
| 89 | 92 | ||
| 90 | #define MPLL_CNTL_MODE 0x61c | 93 | #define MPLL_CNTL_MODE 0x61c |
| 94 | # define MPLL_MCLK_SEL (1 << 11) | ||
| 91 | # define SS_SSEN (1 << 24) | 95 | # define SS_SSEN (1 << 24) |
| 92 | # define SS_DSMODE_EN (1 << 25) | 96 | # define SS_DSMODE_EN (1 << 25) |
| 93 | 97 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b41905573cd2..b6e01d5d2cce 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
| @@ -1126,11 +1126,6 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
| 1126 | struct kv_power_info *pi = kv_get_pi(rdev); | 1126 | struct kv_power_info *pi = kv_get_pi(rdev); |
| 1127 | int ret; | 1127 | int ret; |
| 1128 | 1128 | ||
| 1129 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 1130 | RADEON_CG_BLOCK_SDMA | | ||
| 1131 | RADEON_CG_BLOCK_BIF | | ||
| 1132 | RADEON_CG_BLOCK_HDP), false); | ||
| 1133 | |||
| 1134 | ret = kv_process_firmware_header(rdev); | 1129 | ret = kv_process_firmware_header(rdev); |
| 1135 | if (ret) { | 1130 | if (ret) { |
| 1136 | DRM_ERROR("kv_process_firmware_header failed\n"); | 1131 | DRM_ERROR("kv_process_firmware_header failed\n"); |
| @@ -1215,6 +1210,21 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
| 1215 | 1210 | ||
| 1216 | kv_reset_acp_boot_level(rdev); | 1211 | kv_reset_acp_boot_level(rdev); |
| 1217 | 1212 | ||
| 1213 | ret = kv_smc_bapm_enable(rdev, false); | ||
| 1214 | if (ret) { | ||
| 1215 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
| 1216 | return ret; | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1220 | |||
| 1221 | return ret; | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | int kv_dpm_late_enable(struct radeon_device *rdev) | ||
| 1225 | { | ||
| 1226 | int ret; | ||
| 1227 | |||
| 1218 | if (rdev->irq.installed && | 1228 | if (rdev->irq.installed && |
| 1219 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 1220 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1230 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
| @@ -1226,35 +1236,17 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
| 1226 | radeon_irq_set(rdev); | 1236 | radeon_irq_set(rdev); |
| 1227 | } | 1237 | } |
| 1228 | 1238 | ||
| 1229 | ret = kv_smc_bapm_enable(rdev, false); | ||
| 1230 | if (ret) { | ||
| 1231 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
| 1232 | return ret; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | /* powerdown unused blocks for now */ | 1239 | /* powerdown unused blocks for now */ |
| 1236 | kv_dpm_powergate_acp(rdev, true); | 1240 | kv_dpm_powergate_acp(rdev, true); |
| 1237 | kv_dpm_powergate_samu(rdev, true); | 1241 | kv_dpm_powergate_samu(rdev, true); |
| 1238 | kv_dpm_powergate_vce(rdev, true); | 1242 | kv_dpm_powergate_vce(rdev, true); |
| 1239 | kv_dpm_powergate_uvd(rdev, true); | 1243 | kv_dpm_powergate_uvd(rdev, true); |
| 1240 | 1244 | ||
| 1241 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 1242 | RADEON_CG_BLOCK_SDMA | | ||
| 1243 | RADEON_CG_BLOCK_BIF | | ||
| 1244 | RADEON_CG_BLOCK_HDP), true); | ||
| 1245 | |||
| 1246 | kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1247 | |||
| 1248 | return ret; | 1245 | return ret; |
| 1249 | } | 1246 | } |
| 1250 | 1247 | ||
| 1251 | void kv_dpm_disable(struct radeon_device *rdev) | 1248 | void kv_dpm_disable(struct radeon_device *rdev) |
| 1252 | { | 1249 | { |
| 1253 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 1254 | RADEON_CG_BLOCK_SDMA | | ||
| 1255 | RADEON_CG_BLOCK_BIF | | ||
| 1256 | RADEON_CG_BLOCK_HDP), false); | ||
| 1257 | |||
| 1258 | kv_smc_bapm_enable(rdev, false); | 1250 | kv_smc_bapm_enable(rdev, false); |
| 1259 | 1251 | ||
| 1260 | /* powerup blocks */ | 1252 | /* powerup blocks */ |
| @@ -1779,11 +1771,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1779 | /*struct radeon_ps *old_ps = &pi->current_rps;*/ | 1771 | /*struct radeon_ps *old_ps = &pi->current_rps;*/ |
| 1780 | int ret; | 1772 | int ret; |
| 1781 | 1773 | ||
| 1782 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 1783 | RADEON_CG_BLOCK_SDMA | | ||
| 1784 | RADEON_CG_BLOCK_BIF | | ||
| 1785 | RADEON_CG_BLOCK_HDP), false); | ||
| 1786 | |||
| 1787 | if (pi->bapm_enable) { | 1774 | if (pi->bapm_enable) { |
| 1788 | ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); | 1775 | ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); |
| 1789 | if (ret) { | 1776 | if (ret) { |
| @@ -1849,11 +1836,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1849 | } | 1836 | } |
| 1850 | } | 1837 | } |
| 1851 | 1838 | ||
| 1852 | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 1853 | RADEON_CG_BLOCK_SDMA | | ||
| 1854 | RADEON_CG_BLOCK_BIF | | ||
| 1855 | RADEON_CG_BLOCK_HDP), true); | ||
| 1856 | |||
| 1857 | return 0; | 1839 | return 0; |
| 1858 | } | 1840 | } |
| 1859 | 1841 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index f59a9e9fccf8..ea932ac66fc6 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -174,6 +174,7 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | |||
| 174 | extern void evergreen_program_aspm(struct radeon_device *rdev); | 174 | extern void evergreen_program_aspm(struct radeon_device *rdev); |
| 175 | extern void sumo_rlc_fini(struct radeon_device *rdev); | 175 | extern void sumo_rlc_fini(struct radeon_device *rdev); |
| 176 | extern int sumo_rlc_init(struct radeon_device *rdev); | 176 | extern int sumo_rlc_init(struct radeon_device *rdev); |
| 177 | extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev); | ||
| 177 | 178 | ||
| 178 | /* Firmware Names */ | 179 | /* Firmware Names */ |
| 179 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); | 180 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); |
| @@ -1330,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, | |||
| 1330 | { | 1331 | { |
| 1331 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | 1332 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
| 1332 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | 1333 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
| 1334 | u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | | ||
| 1335 | PACKET3_SH_ACTION_ENA; | ||
| 1333 | 1336 | ||
| 1334 | /* flush read cache over gart for this vmid */ | 1337 | /* flush read cache over gart for this vmid */ |
| 1335 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
| 1336 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
| 1337 | radeon_ring_write(ring, 0); | ||
| 1338 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 1338 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
| 1339 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); | 1339 | radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); |
| 1340 | radeon_ring_write(ring, 0xFFFFFFFF); | 1340 | radeon_ring_write(ring, 0xFFFFFFFF); |
| 1341 | radeon_ring_write(ring, 0); | 1341 | radeon_ring_write(ring, 0); |
| 1342 | radeon_ring_write(ring, 10); /* poll interval */ | 1342 | radeon_ring_write(ring, 10); /* poll interval */ |
| @@ -1352,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, | |||
| 1352 | void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 1352 | void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
| 1353 | { | 1353 | { |
| 1354 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | 1354 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
| 1355 | u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | | ||
| 1356 | PACKET3_SH_ACTION_ENA; | ||
| 1355 | 1357 | ||
| 1356 | /* set to DX10/11 mode */ | 1358 | /* set to DX10/11 mode */ |
| 1357 | radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); | 1359 | radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); |
| @@ -1376,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 1376 | (ib->vm ? (ib->vm->id << 24) : 0)); | 1378 | (ib->vm ? (ib->vm->id << 24) : 0)); |
| 1377 | 1379 | ||
| 1378 | /* flush read cache over gart for this vmid */ | 1380 | /* flush read cache over gart for this vmid */ |
| 1379 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
| 1380 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
| 1381 | radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); | ||
| 1382 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 1381 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
| 1383 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); | 1382 | radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); |
| 1384 | radeon_ring_write(ring, 0xFFFFFFFF); | 1383 | radeon_ring_write(ring, 0xFFFFFFFF); |
| 1385 | radeon_ring_write(ring, 0); | 1384 | radeon_ring_write(ring, 0); |
| 1386 | radeon_ring_write(ring, 10); /* poll interval */ | 1385 | radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ |
| 1387 | } | 1386 | } |
| 1388 | 1387 | ||
| 1389 | static void cayman_cp_enable(struct radeon_device *rdev, bool enable) | 1388 | static void cayman_cp_enable(struct radeon_device *rdev, bool enable) |
| @@ -1391,13 +1390,63 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable) | |||
| 1391 | if (enable) | 1390 | if (enable) |
| 1392 | WREG32(CP_ME_CNTL, 0); | 1391 | WREG32(CP_ME_CNTL, 0); |
| 1393 | else { | 1392 | else { |
| 1394 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 1393 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
| 1394 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 1395 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 1395 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
| 1396 | WREG32(SCRATCH_UMSK, 0); | 1396 | WREG32(SCRATCH_UMSK, 0); |
| 1397 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1397 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| 1398 | } | 1398 | } |
| 1399 | } | 1399 | } |
| 1400 | 1400 | ||
| 1401 | u32 cayman_gfx_get_rptr(struct radeon_device *rdev, | ||
| 1402 | struct radeon_ring *ring) | ||
| 1403 | { | ||
| 1404 | u32 rptr; | ||
| 1405 | |||
| 1406 | if (rdev->wb.enabled) | ||
| 1407 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 1408 | else { | ||
| 1409 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) | ||
| 1410 | rptr = RREG32(CP_RB0_RPTR); | ||
| 1411 | else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) | ||
| 1412 | rptr = RREG32(CP_RB1_RPTR); | ||
| 1413 | else | ||
| 1414 | rptr = RREG32(CP_RB2_RPTR); | ||
| 1415 | } | ||
| 1416 | |||
| 1417 | return rptr; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | u32 cayman_gfx_get_wptr(struct radeon_device *rdev, | ||
| 1421 | struct radeon_ring *ring) | ||
| 1422 | { | ||
| 1423 | u32 wptr; | ||
| 1424 | |||
| 1425 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) | ||
| 1426 | wptr = RREG32(CP_RB0_WPTR); | ||
| 1427 | else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) | ||
| 1428 | wptr = RREG32(CP_RB1_WPTR); | ||
| 1429 | else | ||
| 1430 | wptr = RREG32(CP_RB2_WPTR); | ||
| 1431 | |||
| 1432 | return wptr; | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | void cayman_gfx_set_wptr(struct radeon_device *rdev, | ||
| 1436 | struct radeon_ring *ring) | ||
| 1437 | { | ||
| 1438 | if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) { | ||
| 1439 | WREG32(CP_RB0_WPTR, ring->wptr); | ||
| 1440 | (void)RREG32(CP_RB0_WPTR); | ||
| 1441 | } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) { | ||
| 1442 | WREG32(CP_RB1_WPTR, ring->wptr); | ||
| 1443 | (void)RREG32(CP_RB1_WPTR); | ||
| 1444 | } else { | ||
| 1445 | WREG32(CP_RB2_WPTR, ring->wptr); | ||
| 1446 | (void)RREG32(CP_RB2_WPTR); | ||
| 1447 | } | ||
| 1448 | } | ||
| 1449 | |||
| 1401 | static int cayman_cp_load_microcode(struct radeon_device *rdev) | 1450 | static int cayman_cp_load_microcode(struct radeon_device *rdev) |
| 1402 | { | 1451 | { |
| 1403 | const __be32 *fw_data; | 1452 | const __be32 *fw_data; |
| @@ -1526,6 +1575,16 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
| 1526 | CP_RB1_BASE, | 1575 | CP_RB1_BASE, |
| 1527 | CP_RB2_BASE | 1576 | CP_RB2_BASE |
| 1528 | }; | 1577 | }; |
| 1578 | static const unsigned cp_rb_rptr[] = { | ||
| 1579 | CP_RB0_RPTR, | ||
| 1580 | CP_RB1_RPTR, | ||
| 1581 | CP_RB2_RPTR | ||
| 1582 | }; | ||
| 1583 | static const unsigned cp_rb_wptr[] = { | ||
| 1584 | CP_RB0_WPTR, | ||
| 1585 | CP_RB1_WPTR, | ||
| 1586 | CP_RB2_WPTR | ||
| 1587 | }; | ||
| 1529 | struct radeon_ring *ring; | 1588 | struct radeon_ring *ring; |
| 1530 | int i, r; | 1589 | int i, r; |
| 1531 | 1590 | ||
| @@ -1584,8 +1643,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
| 1584 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); | 1643 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); |
| 1585 | 1644 | ||
| 1586 | ring->rptr = ring->wptr = 0; | 1645 | ring->rptr = ring->wptr = 0; |
| 1587 | WREG32(ring->rptr_reg, ring->rptr); | 1646 | WREG32(cp_rb_rptr[i], ring->rptr); |
| 1588 | WREG32(ring->wptr_reg, ring->wptr); | 1647 | WREG32(cp_rb_wptr[i], ring->wptr); |
| 1589 | 1648 | ||
| 1590 | mdelay(1); | 1649 | mdelay(1); |
| 1591 | WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); | 1650 | WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); |
| @@ -1605,6 +1664,9 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
| 1605 | return r; | 1664 | return r; |
| 1606 | } | 1665 | } |
| 1607 | 1666 | ||
| 1667 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
| 1668 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 1669 | |||
| 1608 | return 0; | 1670 | return 0; |
| 1609 | } | 1671 | } |
| 1610 | 1672 | ||
| @@ -1831,8 +1893,10 @@ int cayman_asic_reset(struct radeon_device *rdev) | |||
| 1831 | 1893 | ||
| 1832 | reset_mask = cayman_gpu_check_soft_reset(rdev); | 1894 | reset_mask = cayman_gpu_check_soft_reset(rdev); |
| 1833 | 1895 | ||
| 1834 | if (!reset_mask) | 1896 | if (reset_mask) |
| 1835 | r600_set_bios_scratch_engine_hung(rdev, false); | 1897 | evergreen_gpu_pci_config_reset(rdev); |
| 1898 | |||
| 1899 | r600_set_bios_scratch_engine_hung(rdev, false); | ||
| 1836 | 1900 | ||
| 1837 | return 0; | 1901 | return 0; |
| 1838 | } | 1902 | } |
| @@ -1878,23 +1942,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 1878 | 1942 | ||
| 1879 | evergreen_mc_program(rdev); | 1943 | evergreen_mc_program(rdev); |
| 1880 | 1944 | ||
| 1881 | if (rdev->flags & RADEON_IS_IGP) { | 1945 | if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { |
| 1882 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 1883 | r = ni_init_microcode(rdev); | ||
| 1884 | if (r) { | ||
| 1885 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 1886 | return r; | ||
| 1887 | } | ||
| 1888 | } | ||
| 1889 | } else { | ||
| 1890 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
| 1891 | r = ni_init_microcode(rdev); | ||
| 1892 | if (r) { | ||
| 1893 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 1894 | return r; | ||
| 1895 | } | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | r = ni_mc_load_microcode(rdev); | 1946 | r = ni_mc_load_microcode(rdev); |
| 1899 | if (r) { | 1947 | if (r) { |
| 1900 | DRM_ERROR("Failed to load MC firmware!\n"); | 1948 | DRM_ERROR("Failed to load MC firmware!\n"); |
| @@ -1981,23 +2029,18 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 1981 | evergreen_irq_set(rdev); | 2029 | evergreen_irq_set(rdev); |
| 1982 | 2030 | ||
| 1983 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 2031 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 1984 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
| 1985 | RADEON_CP_PACKET2); | 2032 | RADEON_CP_PACKET2); |
| 1986 | if (r) | 2033 | if (r) |
| 1987 | return r; | 2034 | return r; |
| 1988 | 2035 | ||
| 1989 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 2036 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
| 1990 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 2037 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
| 1991 | DMA_RB_RPTR + DMA0_REGISTER_OFFSET, | ||
| 1992 | DMA_RB_WPTR + DMA0_REGISTER_OFFSET, | ||
| 1993 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 2038 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
| 1994 | if (r) | 2039 | if (r) |
| 1995 | return r; | 2040 | return r; |
| 1996 | 2041 | ||
| 1997 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 2042 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
| 1998 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 2043 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
| 1999 | DMA_RB_RPTR + DMA1_REGISTER_OFFSET, | ||
| 2000 | DMA_RB_WPTR + DMA1_REGISTER_OFFSET, | ||
| 2001 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 2044 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
| 2002 | if (r) | 2045 | if (r) |
| 2003 | return r; | 2046 | return r; |
| @@ -2016,7 +2059,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 2016 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2059 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 2017 | if (ring->ring_size) { | 2060 | if (ring->ring_size) { |
| 2018 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 2061 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
| 2019 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
| 2020 | RADEON_CP_PACKET2); | 2062 | RADEON_CP_PACKET2); |
| 2021 | if (!r) | 2063 | if (!r) |
| 2022 | r = uvd_v1_0_init(rdev); | 2064 | r = uvd_v1_0_init(rdev); |
| @@ -2063,6 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) | |||
| 2063 | /* init golden registers */ | 2105 | /* init golden registers */ |
| 2064 | ni_init_golden_registers(rdev); | 2106 | ni_init_golden_registers(rdev); |
| 2065 | 2107 | ||
| 2108 | radeon_pm_resume(rdev); | ||
| 2109 | |||
| 2066 | rdev->accel_working = true; | 2110 | rdev->accel_working = true; |
| 2067 | r = cayman_startup(rdev); | 2111 | r = cayman_startup(rdev); |
| 2068 | if (r) { | 2112 | if (r) { |
| @@ -2075,6 +2119,7 @@ int cayman_resume(struct radeon_device *rdev) | |||
| 2075 | 2119 | ||
| 2076 | int cayman_suspend(struct radeon_device *rdev) | 2120 | int cayman_suspend(struct radeon_device *rdev) |
| 2077 | { | 2121 | { |
| 2122 | radeon_pm_suspend(rdev); | ||
| 2078 | if (ASIC_IS_DCE6(rdev)) | 2123 | if (ASIC_IS_DCE6(rdev)) |
| 2079 | dce6_audio_fini(rdev); | 2124 | dce6_audio_fini(rdev); |
| 2080 | else | 2125 | else |
| @@ -2145,6 +2190,27 @@ int cayman_init(struct radeon_device *rdev) | |||
| 2145 | if (r) | 2190 | if (r) |
| 2146 | return r; | 2191 | return r; |
| 2147 | 2192 | ||
| 2193 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 2194 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 2195 | r = ni_init_microcode(rdev); | ||
| 2196 | if (r) { | ||
| 2197 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 2198 | return r; | ||
| 2199 | } | ||
| 2200 | } | ||
| 2201 | } else { | ||
| 2202 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
| 2203 | r = ni_init_microcode(rdev); | ||
| 2204 | if (r) { | ||
| 2205 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 2206 | return r; | ||
| 2207 | } | ||
| 2208 | } | ||
| 2209 | } | ||
| 2210 | |||
| 2211 | /* Initialize power management */ | ||
| 2212 | radeon_pm_init(rdev); | ||
| 2213 | |||
| 2148 | ring->ring_obj = NULL; | 2214 | ring->ring_obj = NULL; |
| 2149 | r600_ring_init(rdev, ring, 1024 * 1024); | 2215 | r600_ring_init(rdev, ring, 1024 * 1024); |
| 2150 | 2216 | ||
| @@ -2204,6 +2270,7 @@ int cayman_init(struct radeon_device *rdev) | |||
| 2204 | 2270 | ||
| 2205 | void cayman_fini(struct radeon_device *rdev) | 2271 | void cayman_fini(struct radeon_device *rdev) |
| 2206 | { | 2272 | { |
| 2273 | radeon_pm_fini(rdev); | ||
| 2207 | cayman_cp_fini(rdev); | 2274 | cayman_cp_fini(rdev); |
| 2208 | cayman_dma_fini(rdev); | 2275 | cayman_dma_fini(rdev); |
| 2209 | r600_irq_fini(rdev); | 2276 | r600_irq_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index bdeb65ed3658..7cf96b15377f 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
| @@ -43,6 +43,75 @@ u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); | |||
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | /** | 45 | /** |
| 46 | * cayman_dma_get_rptr - get the current read pointer | ||
| 47 | * | ||
| 48 | * @rdev: radeon_device pointer | ||
| 49 | * @ring: radeon ring pointer | ||
| 50 | * | ||
| 51 | * Get the current rptr from the hardware (cayman+). | ||
| 52 | */ | ||
| 53 | uint32_t cayman_dma_get_rptr(struct radeon_device *rdev, | ||
| 54 | struct radeon_ring *ring) | ||
| 55 | { | ||
| 56 | u32 rptr, reg; | ||
| 57 | |||
| 58 | if (rdev->wb.enabled) { | ||
| 59 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 60 | } else { | ||
| 61 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 62 | reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET; | ||
| 63 | else | ||
| 64 | reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET; | ||
| 65 | |||
| 66 | rptr = RREG32(reg); | ||
| 67 | } | ||
| 68 | |||
| 69 | return (rptr & 0x3fffc) >> 2; | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | ||
| 73 | * cayman_dma_get_wptr - get the current write pointer | ||
| 74 | * | ||
| 75 | * @rdev: radeon_device pointer | ||
| 76 | * @ring: radeon ring pointer | ||
| 77 | * | ||
| 78 | * Get the current wptr from the hardware (cayman+). | ||
| 79 | */ | ||
| 80 | uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, | ||
| 81 | struct radeon_ring *ring) | ||
| 82 | { | ||
| 83 | u32 reg; | ||
| 84 | |||
| 85 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 86 | reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET; | ||
| 87 | else | ||
| 88 | reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET; | ||
| 89 | |||
| 90 | return (RREG32(reg) & 0x3fffc) >> 2; | ||
| 91 | } | ||
| 92 | |||
| 93 | /** | ||
| 94 | * cayman_dma_set_wptr - commit the write pointer | ||
| 95 | * | ||
| 96 | * @rdev: radeon_device pointer | ||
| 97 | * @ring: radeon ring pointer | ||
| 98 | * | ||
| 99 | * Write the wptr back to the hardware (cayman+). | ||
| 100 | */ | ||
| 101 | void cayman_dma_set_wptr(struct radeon_device *rdev, | ||
| 102 | struct radeon_ring *ring) | ||
| 103 | { | ||
| 104 | u32 reg; | ||
| 105 | |||
| 106 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
| 107 | reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET; | ||
| 108 | else | ||
| 109 | reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET; | ||
| 110 | |||
| 111 | WREG32(reg, (ring->wptr << 2) & 0x3fffc); | ||
| 112 | } | ||
| 113 | |||
| 114 | /** | ||
| 46 | * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine | 115 | * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine |
| 47 | * | 116 | * |
| 48 | * @rdev: radeon_device pointer | 117 | * @rdev: radeon_device pointer |
| @@ -88,7 +157,9 @@ void cayman_dma_stop(struct radeon_device *rdev) | |||
| 88 | { | 157 | { |
| 89 | u32 rb_cntl; | 158 | u32 rb_cntl; |
| 90 | 159 | ||
| 91 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 160 | if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || |
| 161 | (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) | ||
| 162 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 92 | 163 | ||
| 93 | /* dma0 */ | 164 | /* dma0 */ |
| 94 | rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); | 165 | rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); |
| @@ -190,7 +261,9 @@ int cayman_dma_resume(struct radeon_device *rdev) | |||
| 190 | } | 261 | } |
| 191 | } | 262 | } |
| 192 | 263 | ||
| 193 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | 264 | if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || |
| 265 | (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) | ||
| 266 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 194 | 267 | ||
| 195 | return 0; | 268 | return 0; |
| 196 | } | 269 | } |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 49c4d48f54d6..c351226ecb31 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
| @@ -720,6 +720,8 @@ static const u32 cayman_sysls_enable[] = | |||
| 720 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); | 720 | struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); |
| 721 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | 721 | struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); |
| 722 | 722 | ||
| 723 | extern int ni_mc_load_microcode(struct radeon_device *rdev); | ||
| 724 | |||
| 723 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev) | 725 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev) |
| 724 | { | 726 | { |
| 725 | struct ni_power_info *pi = rdev->pm.dpm.priv; | 727 | struct ni_power_info *pi = rdev->pm.dpm.priv; |
| @@ -3565,7 +3567,11 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | |||
| 3565 | void ni_dpm_setup_asic(struct radeon_device *rdev) | 3567 | void ni_dpm_setup_asic(struct radeon_device *rdev) |
| 3566 | { | 3568 | { |
| 3567 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3569 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 3570 | int r; | ||
| 3568 | 3571 | ||
| 3572 | r = ni_mc_load_microcode(rdev); | ||
| 3573 | if (r) | ||
| 3574 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
| 3569 | ni_read_clock_registers(rdev); | 3575 | ni_read_clock_registers(rdev); |
| 3570 | btc_read_arb_registers(rdev); | 3576 | btc_read_arb_registers(rdev); |
| 3571 | rv770_get_memory_type(rdev); | 3577 | rv770_get_memory_type(rdev); |
| @@ -3710,21 +3716,6 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
| 3710 | if (eg_pi->ls_clock_gating) | 3716 | if (eg_pi->ls_clock_gating) |
| 3711 | ni_ls_clockgating_enable(rdev, true); | 3717 | ni_ls_clockgating_enable(rdev, true); |
| 3712 | 3718 | ||
| 3713 | if (rdev->irq.installed && | ||
| 3714 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
| 3715 | PPSMC_Result result; | ||
| 3716 | |||
| 3717 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000); | ||
| 3718 | if (ret) | ||
| 3719 | return ret; | ||
| 3720 | rdev->irq.dpm_thermal = true; | ||
| 3721 | radeon_irq_set(rdev); | ||
| 3722 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | ||
| 3723 | |||
| 3724 | if (result != PPSMC_Result_OK) | ||
| 3725 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | ||
| 3726 | } | ||
| 3727 | |||
| 3728 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 3719 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 3729 | 3720 | ||
| 3730 | ni_update_current_ps(rdev, boot_ps); | 3721 | ni_update_current_ps(rdev, boot_ps); |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 22421bc80c0d..d996033c243e 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
| @@ -1154,6 +1154,7 @@ | |||
| 1154 | # define PACKET3_DB_ACTION_ENA (1 << 26) | 1154 | # define PACKET3_DB_ACTION_ENA (1 << 26) |
| 1155 | # define PACKET3_SH_ACTION_ENA (1 << 27) | 1155 | # define PACKET3_SH_ACTION_ENA (1 << 27) |
| 1156 | # define PACKET3_SX_ACTION_ENA (1 << 28) | 1156 | # define PACKET3_SX_ACTION_ENA (1 << 28) |
| 1157 | # define PACKET3_ENGINE_ME (1 << 31) | ||
| 1157 | #define PACKET3_ME_INITIALIZE 0x44 | 1158 | #define PACKET3_ME_INITIALIZE 0x44 |
| 1158 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | 1159 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) |
| 1159 | #define PACKET3_COND_WRITE 0x45 | 1160 | #define PACKET3_COND_WRITE 0x45 |
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index da43ab328833..2d532996c697 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #ifndef _PPTABLE_H | 23 | #ifndef _PPTABLE_H |
| 24 | #define _PPTABLE_H | 24 | #define _PPTABLE_H |
| 25 | 25 | ||
| 26 | #pragma pack(push, 1) | 26 | #pragma pack(1) |
| 27 | 27 | ||
| 28 | typedef struct _ATOM_PPLIB_THERMALCONTROLLER | 28 | typedef struct _ATOM_PPLIB_THERMALCONTROLLER |
| 29 | 29 | ||
| @@ -677,6 +677,6 @@ typedef struct _ATOM_PPLIB_PPM_Table | |||
| 677 | ULONG ulTjmax; | 677 | ULONG ulTjmax; |
| 678 | } ATOM_PPLIB_PPM_Table; | 678 | } ATOM_PPLIB_PPM_Table; |
| 679 | 679 | ||
| 680 | #pragma pack(pop) | 680 | #pragma pack() |
| 681 | 681 | ||
| 682 | #endif | 682 | #endif |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 10abc4d5a6cc..ef024ce3f7cc 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -1050,6 +1050,36 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
| 1050 | return err; | 1050 | return err; |
| 1051 | } | 1051 | } |
| 1052 | 1052 | ||
| 1053 | u32 r100_gfx_get_rptr(struct radeon_device *rdev, | ||
| 1054 | struct radeon_ring *ring) | ||
| 1055 | { | ||
| 1056 | u32 rptr; | ||
| 1057 | |||
| 1058 | if (rdev->wb.enabled) | ||
| 1059 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | ||
| 1060 | else | ||
| 1061 | rptr = RREG32(RADEON_CP_RB_RPTR); | ||
| 1062 | |||
| 1063 | return rptr; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | u32 r100_gfx_get_wptr(struct radeon_device *rdev, | ||
| 1067 | struct radeon_ring *ring) | ||
| 1068 | { | ||
| 1069 | u32 wptr; | ||
| 1070 | |||
| 1071 | wptr = RREG32(RADEON_CP_RB_WPTR); | ||
| 1072 | |||
| 1073 | return wptr; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | void r100_gfx_set_wptr(struct radeon_device *rdev, | ||
| 1077 | struct radeon_ring *ring) | ||
| 1078 | { | ||
| 1079 | WREG32(RADEON_CP_RB_WPTR, ring->wptr); | ||
| 1080 | (void)RREG32(RADEON_CP_RB_WPTR); | ||
| 1081 | } | ||
| 1082 | |||
| 1053 | static void r100_cp_load_microcode(struct radeon_device *rdev) | 1083 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
| 1054 | { | 1084 | { |
| 1055 | const __be32 *fw_data; | 1085 | const __be32 *fw_data; |
| @@ -1102,7 +1132,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 1102 | ring_size = (1 << (rb_bufsz + 1)) * 4; | 1132 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
| 1103 | r100_cp_load_microcode(rdev); | 1133 | r100_cp_load_microcode(rdev); |
| 1104 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1134 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 1105 | RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR, | ||
| 1106 | RADEON_CP_PACKET2); | 1135 | RADEON_CP_PACKET2); |
| 1107 | if (r) { | 1136 | if (r) { |
| 1108 | return r; | 1137 | return r; |
| @@ -3913,6 +3942,8 @@ int r100_resume(struct radeon_device *rdev) | |||
| 3913 | /* Initialize surface registers */ | 3942 | /* Initialize surface registers */ |
| 3914 | radeon_surface_init(rdev); | 3943 | radeon_surface_init(rdev); |
| 3915 | 3944 | ||
| 3945 | radeon_pm_resume(rdev); | ||
| 3946 | |||
| 3916 | rdev->accel_working = true; | 3947 | rdev->accel_working = true; |
| 3917 | r = r100_startup(rdev); | 3948 | r = r100_startup(rdev); |
| 3918 | if (r) { | 3949 | if (r) { |
| @@ -3923,6 +3954,7 @@ int r100_resume(struct radeon_device *rdev) | |||
| 3923 | 3954 | ||
| 3924 | int r100_suspend(struct radeon_device *rdev) | 3955 | int r100_suspend(struct radeon_device *rdev) |
| 3925 | { | 3956 | { |
| 3957 | radeon_pm_suspend(rdev); | ||
| 3926 | r100_cp_disable(rdev); | 3958 | r100_cp_disable(rdev); |
| 3927 | radeon_wb_disable(rdev); | 3959 | radeon_wb_disable(rdev); |
| 3928 | r100_irq_disable(rdev); | 3960 | r100_irq_disable(rdev); |
| @@ -3933,6 +3965,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
| 3933 | 3965 | ||
| 3934 | void r100_fini(struct radeon_device *rdev) | 3966 | void r100_fini(struct radeon_device *rdev) |
| 3935 | { | 3967 | { |
| 3968 | radeon_pm_fini(rdev); | ||
| 3936 | r100_cp_fini(rdev); | 3969 | r100_cp_fini(rdev); |
| 3937 | radeon_wb_fini(rdev); | 3970 | radeon_wb_fini(rdev); |
| 3938 | radeon_ib_pool_fini(rdev); | 3971 | radeon_ib_pool_fini(rdev); |
| @@ -4039,6 +4072,9 @@ int r100_init(struct radeon_device *rdev) | |||
| 4039 | } | 4072 | } |
| 4040 | r100_set_safe_registers(rdev); | 4073 | r100_set_safe_registers(rdev); |
| 4041 | 4074 | ||
| 4075 | /* Initialize power management */ | ||
| 4076 | radeon_pm_init(rdev); | ||
| 4077 | |||
| 4042 | rdev->accel_working = true; | 4078 | rdev->accel_working = true; |
| 4043 | r = r100_startup(rdev); | 4079 | r = r100_startup(rdev); |
| 4044 | if (r) { | 4080 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index d8dd269b9159..7c63ef840e86 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -1430,6 +1430,8 @@ int r300_resume(struct radeon_device *rdev) | |||
| 1430 | /* Initialize surface registers */ | 1430 | /* Initialize surface registers */ |
| 1431 | radeon_surface_init(rdev); | 1431 | radeon_surface_init(rdev); |
| 1432 | 1432 | ||
| 1433 | radeon_pm_resume(rdev); | ||
| 1434 | |||
| 1433 | rdev->accel_working = true; | 1435 | rdev->accel_working = true; |
| 1434 | r = r300_startup(rdev); | 1436 | r = r300_startup(rdev); |
| 1435 | if (r) { | 1437 | if (r) { |
| @@ -1440,6 +1442,7 @@ int r300_resume(struct radeon_device *rdev) | |||
| 1440 | 1442 | ||
| 1441 | int r300_suspend(struct radeon_device *rdev) | 1443 | int r300_suspend(struct radeon_device *rdev) |
| 1442 | { | 1444 | { |
| 1445 | radeon_pm_suspend(rdev); | ||
| 1443 | r100_cp_disable(rdev); | 1446 | r100_cp_disable(rdev); |
| 1444 | radeon_wb_disable(rdev); | 1447 | radeon_wb_disable(rdev); |
| 1445 | r100_irq_disable(rdev); | 1448 | r100_irq_disable(rdev); |
| @@ -1452,6 +1455,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
| 1452 | 1455 | ||
| 1453 | void r300_fini(struct radeon_device *rdev) | 1456 | void r300_fini(struct radeon_device *rdev) |
| 1454 | { | 1457 | { |
| 1458 | radeon_pm_fini(rdev); | ||
| 1455 | r100_cp_fini(rdev); | 1459 | r100_cp_fini(rdev); |
| 1456 | radeon_wb_fini(rdev); | 1460 | radeon_wb_fini(rdev); |
| 1457 | radeon_ib_pool_fini(rdev); | 1461 | radeon_ib_pool_fini(rdev); |
| @@ -1538,6 +1542,9 @@ int r300_init(struct radeon_device *rdev) | |||
| 1538 | } | 1542 | } |
| 1539 | r300_set_reg_safe(rdev); | 1543 | r300_set_reg_safe(rdev); |
| 1540 | 1544 | ||
| 1545 | /* Initialize power management */ | ||
| 1546 | radeon_pm_init(rdev); | ||
| 1547 | |||
| 1541 | rdev->accel_working = true; | 1548 | rdev->accel_working = true; |
| 1542 | r = r300_startup(rdev); | 1549 | r = r300_startup(rdev); |
| 1543 | if (r) { | 1550 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 60170ea5e3a2..84b1d5367a11 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
| @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
| 75 | OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); | 75 | OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); |
| 76 | 76 | ||
| 77 | for (i = 0; i < nr; ++i) { | 77 | for (i = 0; i < nr; ++i) { |
| 78 | if (DRM_COPY_FROM_USER | 78 | if (copy_from_user |
| 79 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { | 79 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { |
| 80 | DRM_ERROR("copy cliprect faulted\n"); | 80 | DRM_ERROR("copy cliprect faulted\n"); |
| 81 | return -EFAULT; | 81 | return -EFAULT; |
| @@ -928,12 +928,12 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
| 928 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); | 928 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
| 929 | *buf_idx *= 2; /* 8 bytes per buf */ | 929 | *buf_idx *= 2; /* 8 bytes per buf */ |
| 930 | 930 | ||
| 931 | if (DRM_COPY_TO_USER(ref_age_base + *buf_idx, | 931 | if (copy_to_user(ref_age_base + *buf_idx, |
| 932 | &dev_priv->scratch_ages[header.scratch.reg], | 932 | &dev_priv->scratch_ages[header.scratch.reg], |
| 933 | sizeof(u32))) | 933 | sizeof(u32))) |
| 934 | return -EINVAL; | 934 | return -EINVAL; |
| 935 | 935 | ||
| 936 | if (DRM_COPY_FROM_USER(&h_pending, | 936 | if (copy_from_user(&h_pending, |
| 937 | ref_age_base + *buf_idx + 1, | 937 | ref_age_base + *buf_idx + 1, |
| 938 | sizeof(u32))) | 938 | sizeof(u32))) |
| 939 | return -EINVAL; | 939 | return -EINVAL; |
| @@ -943,7 +943,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
| 943 | 943 | ||
| 944 | h_pending--; | 944 | h_pending--; |
| 945 | 945 | ||
| 946 | if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1, | 946 | if (copy_to_user(ref_age_base + *buf_idx + 1, |
| 947 | &h_pending, | 947 | &h_pending, |
| 948 | sizeof(u32))) | 948 | sizeof(u32))) |
| 949 | return -EINVAL; | 949 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 6edf2b3a52b4..3768aab2710b 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -325,6 +325,8 @@ int r420_resume(struct radeon_device *rdev) | |||
| 325 | /* Initialize surface registers */ | 325 | /* Initialize surface registers */ |
| 326 | radeon_surface_init(rdev); | 326 | radeon_surface_init(rdev); |
| 327 | 327 | ||
| 328 | radeon_pm_resume(rdev); | ||
| 329 | |||
| 328 | rdev->accel_working = true; | 330 | rdev->accel_working = true; |
| 329 | r = r420_startup(rdev); | 331 | r = r420_startup(rdev); |
| 330 | if (r) { | 332 | if (r) { |
| @@ -335,6 +337,7 @@ int r420_resume(struct radeon_device *rdev) | |||
| 335 | 337 | ||
| 336 | int r420_suspend(struct radeon_device *rdev) | 338 | int r420_suspend(struct radeon_device *rdev) |
| 337 | { | 339 | { |
| 340 | radeon_pm_suspend(rdev); | ||
| 338 | r420_cp_errata_fini(rdev); | 341 | r420_cp_errata_fini(rdev); |
| 339 | r100_cp_disable(rdev); | 342 | r100_cp_disable(rdev); |
| 340 | radeon_wb_disable(rdev); | 343 | radeon_wb_disable(rdev); |
| @@ -348,6 +351,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
| 348 | 351 | ||
| 349 | void r420_fini(struct radeon_device *rdev) | 352 | void r420_fini(struct radeon_device *rdev) |
| 350 | { | 353 | { |
| 354 | radeon_pm_fini(rdev); | ||
| 351 | r100_cp_fini(rdev); | 355 | r100_cp_fini(rdev); |
| 352 | radeon_wb_fini(rdev); | 356 | radeon_wb_fini(rdev); |
| 353 | radeon_ib_pool_fini(rdev); | 357 | radeon_ib_pool_fini(rdev); |
| @@ -444,6 +448,9 @@ int r420_init(struct radeon_device *rdev) | |||
| 444 | } | 448 | } |
| 445 | r420_set_reg_safe(rdev); | 449 | r420_set_reg_safe(rdev); |
| 446 | 450 | ||
| 451 | /* Initialize power management */ | ||
| 452 | radeon_pm_init(rdev); | ||
| 453 | |||
| 447 | rdev->accel_working = true; | 454 | rdev->accel_working = true; |
| 448 | r = r420_startup(rdev); | 455 | r = r420_startup(rdev); |
| 449 | if (r) { | 456 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e1aece73b370..e209eb75024f 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -240,6 +240,8 @@ int r520_resume(struct radeon_device *rdev) | |||
| 240 | /* Initialize surface registers */ | 240 | /* Initialize surface registers */ |
| 241 | radeon_surface_init(rdev); | 241 | radeon_surface_init(rdev); |
| 242 | 242 | ||
| 243 | radeon_pm_resume(rdev); | ||
| 244 | |||
| 243 | rdev->accel_working = true; | 245 | rdev->accel_working = true; |
| 244 | r = r520_startup(rdev); | 246 | r = r520_startup(rdev); |
| 245 | if (r) { | 247 | if (r) { |
| @@ -312,6 +314,9 @@ int r520_init(struct radeon_device *rdev) | |||
| 312 | return r; | 314 | return r; |
| 313 | rv515_set_safe_registers(rdev); | 315 | rv515_set_safe_registers(rdev); |
| 314 | 316 | ||
| 317 | /* Initialize power management */ | ||
| 318 | radeon_pm_init(rdev); | ||
| 319 | |||
| 315 | rdev->accel_working = true; | 320 | rdev->accel_working = true; |
| 316 | r = r520_startup(rdev); | 321 | r = r520_startup(rdev); |
| 317 | if (r) { | 322 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9ad06732a78b..56140b4e5bb2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -105,6 +105,7 @@ void r600_fini(struct radeon_device *rdev); | |||
| 105 | void r600_irq_disable(struct radeon_device *rdev); | 105 | void r600_irq_disable(struct radeon_device *rdev); |
| 106 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | 106 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); |
| 107 | extern int evergreen_rlc_resume(struct radeon_device *rdev); | 107 | extern int evergreen_rlc_resume(struct radeon_device *rdev); |
| 108 | extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); | ||
| 108 | 109 | ||
| 109 | /** | 110 | /** |
| 110 | * r600_get_xclk - get the xclk | 111 | * r600_get_xclk - get the xclk |
| @@ -1644,6 +1645,67 @@ static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
| 1644 | r600_print_gpu_status_regs(rdev); | 1645 | r600_print_gpu_status_regs(rdev); |
| 1645 | } | 1646 | } |
| 1646 | 1647 | ||
| 1648 | static void r600_gpu_pci_config_reset(struct radeon_device *rdev) | ||
| 1649 | { | ||
| 1650 | struct rv515_mc_save save; | ||
| 1651 | u32 tmp, i; | ||
| 1652 | |||
| 1653 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
| 1654 | |||
| 1655 | /* disable dpm? */ | ||
| 1656 | |||
| 1657 | /* Disable CP parsing/prefetching */ | ||
| 1658 | if (rdev->family >= CHIP_RV770) | ||
| 1659 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); | ||
| 1660 | else | ||
| 1661 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | ||
| 1662 | |||
| 1663 | /* disable the RLC */ | ||
| 1664 | WREG32(RLC_CNTL, 0); | ||
| 1665 | |||
| 1666 | /* Disable DMA */ | ||
| 1667 | tmp = RREG32(DMA_RB_CNTL); | ||
| 1668 | tmp &= ~DMA_RB_ENABLE; | ||
| 1669 | WREG32(DMA_RB_CNTL, tmp); | ||
| 1670 | |||
| 1671 | mdelay(50); | ||
| 1672 | |||
| 1673 | /* set mclk/sclk to bypass */ | ||
| 1674 | if (rdev->family >= CHIP_RV770) | ||
| 1675 | rv770_set_clk_bypass_mode(rdev); | ||
| 1676 | /* disable BM */ | ||
| 1677 | pci_clear_master(rdev->pdev); | ||
| 1678 | /* disable mem access */ | ||
| 1679 | rv515_mc_stop(rdev, &save); | ||
| 1680 | if (r600_mc_wait_for_idle(rdev)) { | ||
| 1681 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | /* BIF reset workaround. Not sure if this is needed on 6xx */ | ||
| 1685 | tmp = RREG32(BUS_CNTL); | ||
| 1686 | tmp |= VGA_COHE_SPEC_TIMER_DIS; | ||
| 1687 | WREG32(BUS_CNTL, tmp); | ||
| 1688 | |||
| 1689 | tmp = RREG32(BIF_SCRATCH0); | ||
| 1690 | |||
| 1691 | /* reset */ | ||
| 1692 | radeon_pci_config_reset(rdev); | ||
| 1693 | mdelay(1); | ||
| 1694 | |||
| 1695 | /* BIF reset workaround. Not sure if this is needed on 6xx */ | ||
| 1696 | tmp = SOFT_RESET_BIF; | ||
| 1697 | WREG32(SRBM_SOFT_RESET, tmp); | ||
| 1698 | mdelay(1); | ||
| 1699 | WREG32(SRBM_SOFT_RESET, 0); | ||
| 1700 | |||
| 1701 | /* wait for asic to come out of reset */ | ||
| 1702 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 1703 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
| 1704 | break; | ||
| 1705 | udelay(1); | ||
| 1706 | } | ||
| 1707 | } | ||
| 1708 | |||
| 1647 | int r600_asic_reset(struct radeon_device *rdev) | 1709 | int r600_asic_reset(struct radeon_device *rdev) |
| 1648 | { | 1710 | { |
| 1649 | u32 reset_mask; | 1711 | u32 reset_mask; |
| @@ -1653,10 +1715,17 @@ int r600_asic_reset(struct radeon_device *rdev) | |||
| 1653 | if (reset_mask) | 1715 | if (reset_mask) |
| 1654 | r600_set_bios_scratch_engine_hung(rdev, true); | 1716 | r600_set_bios_scratch_engine_hung(rdev, true); |
| 1655 | 1717 | ||
| 1718 | /* try soft reset */ | ||
| 1656 | r600_gpu_soft_reset(rdev, reset_mask); | 1719 | r600_gpu_soft_reset(rdev, reset_mask); |
| 1657 | 1720 | ||
| 1658 | reset_mask = r600_gpu_check_soft_reset(rdev); | 1721 | reset_mask = r600_gpu_check_soft_reset(rdev); |
| 1659 | 1722 | ||
| 1723 | /* try pci config reset */ | ||
| 1724 | if (reset_mask && radeon_hard_reset) | ||
| 1725 | r600_gpu_pci_config_reset(rdev); | ||
| 1726 | |||
| 1727 | reset_mask = r600_gpu_check_soft_reset(rdev); | ||
| 1728 | |||
| 1660 | if (!reset_mask) | 1729 | if (!reset_mask) |
| 1661 | r600_set_bios_scratch_engine_hung(rdev, false); | 1730 | r600_set_bios_scratch_engine_hung(rdev, false); |
| 1662 | 1731 | ||
| @@ -2185,7 +2254,8 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
| 2185 | */ | 2254 | */ |
| 2186 | void r600_cp_stop(struct radeon_device *rdev) | 2255 | void r600_cp_stop(struct radeon_device *rdev) |
| 2187 | { | 2256 | { |
| 2188 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 2257 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
| 2258 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 2189 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 2259 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
| 2190 | WREG32(SCRATCH_UMSK, 0); | 2260 | WREG32(SCRATCH_UMSK, 0); |
| 2191 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 2261 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| @@ -2382,6 +2452,36 @@ out: | |||
| 2382 | return err; | 2452 | return err; |
| 2383 | } | 2453 | } |
| 2384 | 2454 | ||
| 2455 | u32 r600_gfx_get_rptr(struct radeon_device *rdev, | ||
| 2456 | struct radeon_ring *ring) | ||
| 2457 | { | ||
| 2458 | u32 rptr; | ||
| 2459 | |||
| 2460 | if (rdev->wb.enabled) | ||
| 2461 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 2462 | else | ||
| 2463 | rptr = RREG32(R600_CP_RB_RPTR); | ||
| 2464 | |||
| 2465 | return rptr; | ||
| 2466 | } | ||
| 2467 | |||
| 2468 | u32 r600_gfx_get_wptr(struct radeon_device *rdev, | ||
| 2469 | struct radeon_ring *ring) | ||
| 2470 | { | ||
| 2471 | u32 wptr; | ||
| 2472 | |||
| 2473 | wptr = RREG32(R600_CP_RB_WPTR); | ||
| 2474 | |||
| 2475 | return wptr; | ||
| 2476 | } | ||
| 2477 | |||
| 2478 | void r600_gfx_set_wptr(struct radeon_device *rdev, | ||
| 2479 | struct radeon_ring *ring) | ||
| 2480 | { | ||
| 2481 | WREG32(R600_CP_RB_WPTR, ring->wptr); | ||
| 2482 | (void)RREG32(R600_CP_RB_WPTR); | ||
| 2483 | } | ||
| 2484 | |||
| 2385 | static int r600_cp_load_microcode(struct radeon_device *rdev) | 2485 | static int r600_cp_load_microcode(struct radeon_device *rdev) |
| 2386 | { | 2486 | { |
| 2387 | const __be32 *fw_data; | 2487 | const __be32 *fw_data; |
| @@ -2513,6 +2613,10 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
| 2513 | ring->ready = false; | 2613 | ring->ready = false; |
| 2514 | return r; | 2614 | return r; |
| 2515 | } | 2615 | } |
| 2616 | |||
| 2617 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
| 2618 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 2619 | |||
| 2516 | return 0; | 2620 | return 0; |
| 2517 | } | 2621 | } |
| 2518 | 2622 | ||
| @@ -2607,14 +2711,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
| 2607 | struct radeon_fence *fence) | 2711 | struct radeon_fence *fence) |
| 2608 | { | 2712 | { |
| 2609 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | 2713 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
| 2714 | u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | | ||
| 2715 | PACKET3_SH_ACTION_ENA; | ||
| 2716 | |||
| 2717 | if (rdev->family >= CHIP_RV770) | ||
| 2718 | cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; | ||
| 2610 | 2719 | ||
| 2611 | if (rdev->wb.use_event) { | 2720 | if (rdev->wb.use_event) { |
| 2612 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | 2721 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
| 2613 | /* flush read cache over gart */ | 2722 | /* flush read cache over gart */ |
| 2614 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 2723 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
| 2615 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | | 2724 | radeon_ring_write(ring, cp_coher_cntl); |
| 2616 | PACKET3_VC_ACTION_ENA | | ||
| 2617 | PACKET3_SH_ACTION_ENA); | ||
| 2618 | radeon_ring_write(ring, 0xFFFFFFFF); | 2725 | radeon_ring_write(ring, 0xFFFFFFFF); |
| 2619 | radeon_ring_write(ring, 0); | 2726 | radeon_ring_write(ring, 0); |
| 2620 | radeon_ring_write(ring, 10); /* poll interval */ | 2727 | radeon_ring_write(ring, 10); /* poll interval */ |
| @@ -2628,9 +2735,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
| 2628 | } else { | 2735 | } else { |
| 2629 | /* flush read cache over gart */ | 2736 | /* flush read cache over gart */ |
| 2630 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 2737 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
| 2631 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | | 2738 | radeon_ring_write(ring, cp_coher_cntl); |
| 2632 | PACKET3_VC_ACTION_ENA | | ||
| 2633 | PACKET3_SH_ACTION_ENA); | ||
| 2634 | radeon_ring_write(ring, 0xFFFFFFFF); | 2739 | radeon_ring_write(ring, 0xFFFFFFFF); |
| 2635 | radeon_ring_write(ring, 0); | 2740 | radeon_ring_write(ring, 0); |
| 2636 | radeon_ring_write(ring, 10); /* poll interval */ | 2741 | radeon_ring_write(ring, 10); /* poll interval */ |
| @@ -2775,14 +2880,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 2775 | 2880 | ||
| 2776 | r600_mc_program(rdev); | 2881 | r600_mc_program(rdev); |
| 2777 | 2882 | ||
| 2778 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 2779 | r = r600_init_microcode(rdev); | ||
| 2780 | if (r) { | ||
| 2781 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 2782 | return r; | ||
| 2783 | } | ||
| 2784 | } | ||
| 2785 | |||
| 2786 | if (rdev->flags & RADEON_IS_AGP) { | 2883 | if (rdev->flags & RADEON_IS_AGP) { |
| 2787 | r600_agp_enable(rdev); | 2884 | r600_agp_enable(rdev); |
| 2788 | } else { | 2885 | } else { |
| @@ -2803,12 +2900,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 2803 | return r; | 2900 | return r; |
| 2804 | } | 2901 | } |
| 2805 | 2902 | ||
| 2806 | r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); | ||
| 2807 | if (r) { | ||
| 2808 | dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); | ||
| 2809 | return r; | ||
| 2810 | } | ||
| 2811 | |||
| 2812 | /* Enable IRQ */ | 2903 | /* Enable IRQ */ |
| 2813 | if (!rdev->irq.installed) { | 2904 | if (!rdev->irq.installed) { |
| 2814 | r = radeon_irq_kms_init(rdev); | 2905 | r = radeon_irq_kms_init(rdev); |
| @@ -2826,18 +2917,10 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 2826 | 2917 | ||
| 2827 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 2918 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 2828 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 2919 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 2829 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
| 2830 | RADEON_CP_PACKET2); | 2920 | RADEON_CP_PACKET2); |
| 2831 | if (r) | 2921 | if (r) |
| 2832 | return r; | 2922 | return r; |
| 2833 | 2923 | ||
| 2834 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | ||
| 2835 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | ||
| 2836 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
| 2837 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | ||
| 2838 | if (r) | ||
| 2839 | return r; | ||
| 2840 | |||
| 2841 | r = r600_cp_load_microcode(rdev); | 2924 | r = r600_cp_load_microcode(rdev); |
| 2842 | if (r) | 2925 | if (r) |
| 2843 | return r; | 2926 | return r; |
| @@ -2845,10 +2928,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 2845 | if (r) | 2928 | if (r) |
| 2846 | return r; | 2929 | return r; |
| 2847 | 2930 | ||
| 2848 | r = r600_dma_resume(rdev); | ||
| 2849 | if (r) | ||
| 2850 | return r; | ||
| 2851 | |||
| 2852 | r = radeon_ib_pool_init(rdev); | 2931 | r = radeon_ib_pool_init(rdev); |
| 2853 | if (r) { | 2932 | if (r) { |
| 2854 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | 2933 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
| @@ -2889,6 +2968,8 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2889 | /* post card */ | 2968 | /* post card */ |
| 2890 | atom_asic_init(rdev->mode_info.atom_context); | 2969 | atom_asic_init(rdev->mode_info.atom_context); |
| 2891 | 2970 | ||
| 2971 | radeon_pm_resume(rdev); | ||
| 2972 | |||
| 2892 | rdev->accel_working = true; | 2973 | rdev->accel_working = true; |
| 2893 | r = r600_startup(rdev); | 2974 | r = r600_startup(rdev); |
| 2894 | if (r) { | 2975 | if (r) { |
| @@ -2902,9 +2983,9 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2902 | 2983 | ||
| 2903 | int r600_suspend(struct radeon_device *rdev) | 2984 | int r600_suspend(struct radeon_device *rdev) |
| 2904 | { | 2985 | { |
| 2986 | radeon_pm_suspend(rdev); | ||
| 2905 | r600_audio_fini(rdev); | 2987 | r600_audio_fini(rdev); |
| 2906 | r600_cp_stop(rdev); | 2988 | r600_cp_stop(rdev); |
| 2907 | r600_dma_stop(rdev); | ||
| 2908 | r600_irq_suspend(rdev); | 2989 | r600_irq_suspend(rdev); |
| 2909 | radeon_wb_disable(rdev); | 2990 | radeon_wb_disable(rdev); |
| 2910 | r600_pcie_gart_disable(rdev); | 2991 | r600_pcie_gart_disable(rdev); |
| @@ -2970,12 +3051,20 @@ int r600_init(struct radeon_device *rdev) | |||
| 2970 | if (r) | 3051 | if (r) |
| 2971 | return r; | 3052 | return r; |
| 2972 | 3053 | ||
| 3054 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 3055 | r = r600_init_microcode(rdev); | ||
| 3056 | if (r) { | ||
| 3057 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 3058 | return r; | ||
| 3059 | } | ||
| 3060 | } | ||
| 3061 | |||
| 3062 | /* Initialize power management */ | ||
| 3063 | radeon_pm_init(rdev); | ||
| 3064 | |||
| 2973 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 3065 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
| 2974 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 3066 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
| 2975 | 3067 | ||
| 2976 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; | ||
| 2977 | r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); | ||
| 2978 | |||
| 2979 | rdev->ih.ring_obj = NULL; | 3068 | rdev->ih.ring_obj = NULL; |
| 2980 | r600_ih_ring_init(rdev, 64 * 1024); | 3069 | r600_ih_ring_init(rdev, 64 * 1024); |
| 2981 | 3070 | ||
| @@ -2988,7 +3077,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2988 | if (r) { | 3077 | if (r) { |
| 2989 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 3078 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
| 2990 | r600_cp_fini(rdev); | 3079 | r600_cp_fini(rdev); |
| 2991 | r600_dma_fini(rdev); | ||
| 2992 | r600_irq_fini(rdev); | 3080 | r600_irq_fini(rdev); |
| 2993 | radeon_wb_fini(rdev); | 3081 | radeon_wb_fini(rdev); |
| 2994 | radeon_ib_pool_fini(rdev); | 3082 | radeon_ib_pool_fini(rdev); |
| @@ -3002,9 +3090,9 @@ int r600_init(struct radeon_device *rdev) | |||
| 3002 | 3090 | ||
| 3003 | void r600_fini(struct radeon_device *rdev) | 3091 | void r600_fini(struct radeon_device *rdev) |
| 3004 | { | 3092 | { |
| 3093 | radeon_pm_fini(rdev); | ||
| 3005 | r600_audio_fini(rdev); | 3094 | r600_audio_fini(rdev); |
| 3006 | r600_cp_fini(rdev); | 3095 | r600_cp_fini(rdev); |
| 3007 | r600_dma_fini(rdev); | ||
| 3008 | r600_irq_fini(rdev); | 3096 | r600_irq_fini(rdev); |
| 3009 | radeon_wb_fini(rdev); | 3097 | radeon_wb_fini(rdev); |
| 3010 | radeon_ib_pool_fini(rdev); | 3098 | radeon_ib_pool_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index d8eb48bff0ed..8c9b7e26533c 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
| @@ -2515,7 +2515,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev, | |||
| 2515 | buf = radeon_freelist_get(dev); | 2515 | buf = radeon_freelist_get(dev); |
| 2516 | if (!buf) { | 2516 | if (!buf) { |
| 2517 | DRM_DEBUG("EAGAIN\n"); | 2517 | DRM_DEBUG("EAGAIN\n"); |
| 2518 | if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) | 2518 | if (copy_to_user(tex->image, image, sizeof(*image))) |
| 2519 | return -EFAULT; | 2519 | return -EFAULT; |
| 2520 | return -EAGAIN; | 2520 | return -EAGAIN; |
| 2521 | } | 2521 | } |
| @@ -2528,7 +2528,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev, | |||
| 2528 | buffer = | 2528 | buffer = |
| 2529 | (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); | 2529 | (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); |
| 2530 | 2530 | ||
| 2531 | if (DRM_COPY_FROM_USER(buffer, data, pass_size)) { | 2531 | if (copy_from_user(buffer, data, pass_size)) { |
| 2532 | DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size); | 2532 | DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size); |
| 2533 | return -EFAULT; | 2533 | return -EFAULT; |
| 2534 | } | 2534 | } |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 5dceea6f71ae..7b399dc5fd54 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | for (i = 0; i < 8; i++) { | 751 | for (i = 0; i < 8; i++) { |
| 752 | if ((tmp >> (i * 4)) & 0xF) { | 752 | u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); |
| 753 | |||
| 754 | if (format != V_0280A0_COLOR_INVALID && | ||
| 755 | (tmp >> (i * 4)) & 0xF) { | ||
| 753 | /* at least one component is enabled */ | 756 | /* at least one component is enabled */ |
| 754 | if (track->cb_color_bo[i] == NULL) { | 757 | if (track->cb_color_bo[i] == NULL) { |
| 755 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | 758 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", |
| @@ -2386,7 +2389,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
| 2386 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 2389 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
| 2387 | parser.ib.length_dw = ib_chunk->length_dw; | 2390 | parser.ib.length_dw = ib_chunk->length_dw; |
| 2388 | *l = parser.ib.length_dw; | 2391 | *l = parser.ib.length_dw; |
| 2389 | if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { | 2392 | if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { |
| 2390 | r = -EFAULT; | 2393 | r = -EFAULT; |
| 2391 | r600_cs_parser_fini(&parser, r); | 2394 | r600_cs_parser_fini(&parser, r); |
| 2392 | return r; | 2395 | return r; |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 7844d15c139f..b2d4c91e6272 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
| @@ -51,7 +51,14 @@ u32 r600_gpu_check_soft_reset(struct radeon_device *rdev); | |||
| 51 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | 51 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, |
| 52 | struct radeon_ring *ring) | 52 | struct radeon_ring *ring) |
| 53 | { | 53 | { |
| 54 | return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2; | 54 | u32 rptr; |
| 55 | |||
| 56 | if (rdev->wb.enabled) | ||
| 57 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
| 58 | else | ||
| 59 | rptr = RREG32(DMA_RB_RPTR); | ||
| 60 | |||
| 61 | return (rptr & 0x3fffc) >> 2; | ||
| 55 | } | 62 | } |
| 56 | 63 | ||
| 57 | /** | 64 | /** |
| @@ -65,7 +72,7 @@ uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | |||
| 65 | uint32_t r600_dma_get_wptr(struct radeon_device *rdev, | 72 | uint32_t r600_dma_get_wptr(struct radeon_device *rdev, |
| 66 | struct radeon_ring *ring) | 73 | struct radeon_ring *ring) |
| 67 | { | 74 | { |
| 68 | return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2; | 75 | return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2; |
| 69 | } | 76 | } |
| 70 | 77 | ||
| 71 | /** | 78 | /** |
| @@ -79,7 +86,7 @@ uint32_t r600_dma_get_wptr(struct radeon_device *rdev, | |||
| 79 | void r600_dma_set_wptr(struct radeon_device *rdev, | 86 | void r600_dma_set_wptr(struct radeon_device *rdev, |
| 80 | struct radeon_ring *ring) | 87 | struct radeon_ring *ring) |
| 81 | { | 88 | { |
| 82 | WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc); | 89 | WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); |
| 83 | } | 90 | } |
| 84 | 91 | ||
| 85 | /** | 92 | /** |
| @@ -93,7 +100,8 @@ void r600_dma_stop(struct radeon_device *rdev) | |||
| 93 | { | 100 | { |
| 94 | u32 rb_cntl = RREG32(DMA_RB_CNTL); | 101 | u32 rb_cntl = RREG32(DMA_RB_CNTL); |
| 95 | 102 | ||
| 96 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 103 | if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) |
| 104 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 97 | 105 | ||
| 98 | rb_cntl &= ~DMA_RB_ENABLE; | 106 | rb_cntl &= ~DMA_RB_ENABLE; |
| 99 | WREG32(DMA_RB_CNTL, rb_cntl); | 107 | WREG32(DMA_RB_CNTL, rb_cntl); |
| @@ -180,7 +188,8 @@ int r600_dma_resume(struct radeon_device *rdev) | |||
| 180 | return r; | 188 | return r; |
| 181 | } | 189 | } |
| 182 | 190 | ||
| 183 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | 191 | if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) |
| 192 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 184 | 193 | ||
| 185 | return 0; | 194 | return 0; |
| 186 | } | 195 | } |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 5513d8f06252..e4cc9b314ce9 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
| @@ -729,8 +729,8 @@ bool r600_is_uvd_state(u32 class, u32 class2) | |||
| 729 | return false; | 729 | return false; |
| 730 | } | 730 | } |
| 731 | 731 | ||
| 732 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | 732 | static int r600_set_thermal_temperature_range(struct radeon_device *rdev, |
| 733 | int min_temp, int max_temp) | 733 | int min_temp, int max_temp) |
| 734 | { | 734 | { |
| 735 | int low_temp = 0 * 1000; | 735 | int low_temp = 0 * 1000; |
| 736 | int high_temp = 255 * 1000; | 736 | int high_temp = 255 * 1000; |
| @@ -777,6 +777,22 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) | |||
| 777 | } | 777 | } |
| 778 | } | 778 | } |
| 779 | 779 | ||
| 780 | int r600_dpm_late_enable(struct radeon_device *rdev) | ||
| 781 | { | ||
| 782 | int ret; | ||
| 783 | |||
| 784 | if (rdev->irq.installed && | ||
| 785 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
| 786 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
| 787 | if (ret) | ||
| 788 | return ret; | ||
| 789 | rdev->irq.dpm_thermal = true; | ||
| 790 | radeon_irq_set(rdev); | ||
| 791 | } | ||
| 792 | |||
| 793 | return 0; | ||
| 794 | } | ||
| 795 | |||
| 780 | union power_info { | 796 | union power_info { |
| 781 | struct _ATOM_POWERPLAY_INFO info; | 797 | struct _ATOM_POWERPLAY_INFO info; |
| 782 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 798 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 1000bf9719f2..07eab2b04e81 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h | |||
| @@ -213,8 +213,6 @@ void r600_wait_for_power_level(struct radeon_device *rdev, | |||
| 213 | void r600_start_dpm(struct radeon_device *rdev); | 213 | void r600_start_dpm(struct radeon_device *rdev); |
| 214 | void r600_stop_dpm(struct radeon_device *rdev); | 214 | void r600_stop_dpm(struct radeon_device *rdev); |
| 215 | 215 | ||
| 216 | int r600_set_thermal_temperature_range(struct radeon_device *rdev, | ||
| 217 | int min_temp, int max_temp); | ||
| 218 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); | 216 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); |
| 219 | 217 | ||
| 220 | int r600_parse_extended_power_table(struct radeon_device *rdev); | 218 | int r600_parse_extended_power_table(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index b7d3ecba43e3..3016fc14f502 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -250,7 +250,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) | |||
| 250 | value, ~HDMI0_AUDIO_TEST_EN); | 250 | value, ~HDMI0_AUDIO_TEST_EN); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | 253 | static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) |
| 254 | { | 254 | { |
| 255 | struct drm_device *dev = encoder->dev; | 255 | struct drm_device *dev = encoder->dev; |
| 256 | struct radeon_device *rdev = dev->dev_private; | 256 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index ebe38724a976..37455f65107f 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -701,11 +701,18 @@ | |||
| 701 | #define RLC_UCODE_DATA 0x3f30 | 701 | #define RLC_UCODE_DATA 0x3f30 |
| 702 | 702 | ||
| 703 | #define SRBM_SOFT_RESET 0xe60 | 703 | #define SRBM_SOFT_RESET 0xe60 |
| 704 | # define SOFT_RESET_BIF (1 << 1) | ||
| 704 | # define SOFT_RESET_DMA (1 << 12) | 705 | # define SOFT_RESET_DMA (1 << 12) |
| 705 | # define SOFT_RESET_RLC (1 << 13) | 706 | # define SOFT_RESET_RLC (1 << 13) |
| 706 | # define SOFT_RESET_UVD (1 << 18) | 707 | # define SOFT_RESET_UVD (1 << 18) |
| 707 | # define RV770_SOFT_RESET_DMA (1 << 20) | 708 | # define RV770_SOFT_RESET_DMA (1 << 20) |
| 708 | 709 | ||
| 710 | #define BIF_SCRATCH0 0x5438 | ||
| 711 | |||
| 712 | #define BUS_CNTL 0x5420 | ||
| 713 | # define BIOS_ROM_DIS (1 << 1) | ||
| 714 | # define VGA_COHE_SPEC_TIMER_DIS (1 << 9) | ||
| 715 | |||
| 709 | #define CP_INT_CNTL 0xc124 | 716 | #define CP_INT_CNTL 0xc124 |
| 710 | # define CNTX_BUSY_INT_ENABLE (1 << 19) | 717 | # define CNTX_BUSY_INT_ENABLE (1 << 19) |
| 711 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) | 718 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) |
| @@ -1575,6 +1582,7 @@ | |||
| 1575 | # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) | 1582 | # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) |
| 1576 | #define PACKET3_SURFACE_SYNC 0x43 | 1583 | #define PACKET3_SURFACE_SYNC 0x43 |
| 1577 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | 1584 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
| 1585 | # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ | ||
| 1578 | # define PACKET3_TC_ACTION_ENA (1 << 23) | 1586 | # define PACKET3_TC_ACTION_ENA (1 << 23) |
| 1579 | # define PACKET3_VC_ACTION_ENA (1 << 24) | 1587 | # define PACKET3_VC_ACTION_ENA (1 << 24) |
| 1580 | # define PACKET3_CB_ACTION_ENA (1 << 25) | 1588 | # define PACKET3_CB_ACTION_ENA (1 << 25) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 45e1f447bc79..4a8ac1cd6b4c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -99,6 +99,7 @@ extern int radeon_fastfb; | |||
| 99 | extern int radeon_dpm; | 99 | extern int radeon_dpm; |
| 100 | extern int radeon_aspm; | 100 | extern int radeon_aspm; |
| 101 | extern int radeon_runtime_pm; | 101 | extern int radeon_runtime_pm; |
| 102 | extern int radeon_hard_reset; | ||
| 102 | 103 | ||
| 103 | /* | 104 | /* |
| 104 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 105 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
| @@ -139,6 +140,9 @@ extern int radeon_runtime_pm; | |||
| 139 | #define RADEON_VA_RESERVED_SIZE (8 << 20) | 140 | #define RADEON_VA_RESERVED_SIZE (8 << 20) |
| 140 | #define RADEON_IB_VM_MAX_SIZE (64 << 10) | 141 | #define RADEON_IB_VM_MAX_SIZE (64 << 10) |
| 141 | 142 | ||
| 143 | /* hard reset data */ | ||
| 144 | #define RADEON_ASIC_RESET_DATA 0x39d5e86b | ||
| 145 | |||
| 142 | /* reset flags */ | 146 | /* reset flags */ |
| 143 | #define RADEON_RESET_GFX (1 << 0) | 147 | #define RADEON_RESET_GFX (1 << 0) |
| 144 | #define RADEON_RESET_COMPUTE (1 << 1) | 148 | #define RADEON_RESET_COMPUTE (1 << 1) |
| @@ -252,6 +256,7 @@ struct radeon_clock { | |||
| 252 | * Power management | 256 | * Power management |
| 253 | */ | 257 | */ |
| 254 | int radeon_pm_init(struct radeon_device *rdev); | 258 | int radeon_pm_init(struct radeon_device *rdev); |
| 259 | int radeon_pm_late_init(struct radeon_device *rdev); | ||
| 255 | void radeon_pm_fini(struct radeon_device *rdev); | 260 | void radeon_pm_fini(struct radeon_device *rdev); |
| 256 | void radeon_pm_compute_clocks(struct radeon_device *rdev); | 261 | void radeon_pm_compute_clocks(struct radeon_device *rdev); |
| 257 | void radeon_pm_suspend(struct radeon_device *rdev); | 262 | void radeon_pm_suspend(struct radeon_device *rdev); |
| @@ -413,6 +418,11 @@ struct radeon_mman { | |||
| 413 | struct ttm_bo_device bdev; | 418 | struct ttm_bo_device bdev; |
| 414 | bool mem_global_referenced; | 419 | bool mem_global_referenced; |
| 415 | bool initialized; | 420 | bool initialized; |
| 421 | |||
| 422 | #if defined(CONFIG_DEBUG_FS) | ||
| 423 | struct dentry *vram; | ||
| 424 | struct dentry *gtt; | ||
| 425 | #endif | ||
| 416 | }; | 426 | }; |
| 417 | 427 | ||
| 418 | /* bo virtual address in a specific vm */ | 428 | /* bo virtual address in a specific vm */ |
| @@ -779,13 +789,11 @@ struct radeon_ring { | |||
| 779 | volatile uint32_t *ring; | 789 | volatile uint32_t *ring; |
| 780 | unsigned rptr; | 790 | unsigned rptr; |
| 781 | unsigned rptr_offs; | 791 | unsigned rptr_offs; |
| 782 | unsigned rptr_reg; | ||
| 783 | unsigned rptr_save_reg; | 792 | unsigned rptr_save_reg; |
| 784 | u64 next_rptr_gpu_addr; | 793 | u64 next_rptr_gpu_addr; |
| 785 | volatile u32 *next_rptr_cpu_addr; | 794 | volatile u32 *next_rptr_cpu_addr; |
| 786 | unsigned wptr; | 795 | unsigned wptr; |
| 787 | unsigned wptr_old; | 796 | unsigned wptr_old; |
| 788 | unsigned wptr_reg; | ||
| 789 | unsigned ring_size; | 797 | unsigned ring_size; |
| 790 | unsigned ring_free_dw; | 798 | unsigned ring_free_dw; |
| 791 | int count_dw; | 799 | int count_dw; |
| @@ -859,6 +867,8 @@ struct radeon_vm { | |||
| 859 | struct radeon_fence *fence; | 867 | struct radeon_fence *fence; |
| 860 | /* last flush or NULL if we still need to flush */ | 868 | /* last flush or NULL if we still need to flush */ |
| 861 | struct radeon_fence *last_flush; | 869 | struct radeon_fence *last_flush; |
| 870 | /* last use of vmid */ | ||
| 871 | struct radeon_fence *last_id_use; | ||
| 862 | }; | 872 | }; |
| 863 | 873 | ||
| 864 | struct radeon_vm_manager { | 874 | struct radeon_vm_manager { |
| @@ -949,7 +959,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring | |||
| 949 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, | 959 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, |
| 950 | unsigned size, uint32_t *data); | 960 | unsigned size, uint32_t *data); |
| 951 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, | 961 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, |
| 952 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop); | 962 | unsigned rptr_offs, u32 nop); |
| 953 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); | 963 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); |
| 954 | 964 | ||
| 955 | 965 | ||
| @@ -1775,6 +1785,7 @@ struct radeon_asic { | |||
| 1775 | int (*init)(struct radeon_device *rdev); | 1785 | int (*init)(struct radeon_device *rdev); |
| 1776 | void (*setup_asic)(struct radeon_device *rdev); | 1786 | void (*setup_asic)(struct radeon_device *rdev); |
| 1777 | int (*enable)(struct radeon_device *rdev); | 1787 | int (*enable)(struct radeon_device *rdev); |
| 1788 | int (*late_enable)(struct radeon_device *rdev); | ||
| 1778 | void (*disable)(struct radeon_device *rdev); | 1789 | void (*disable)(struct radeon_device *rdev); |
| 1779 | int (*pre_set_power_state)(struct radeon_device *rdev); | 1790 | int (*pre_set_power_state)(struct radeon_device *rdev); |
| 1780 | int (*set_power_state)(struct radeon_device *rdev); | 1791 | int (*set_power_state)(struct radeon_device *rdev); |
| @@ -2650,6 +2661,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
| 2650 | #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) | 2661 | #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) |
| 2651 | #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) | 2662 | #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) |
| 2652 | #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) | 2663 | #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) |
| 2664 | #define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev)) | ||
| 2653 | #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) | 2665 | #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) |
| 2654 | #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev)) | 2666 | #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev)) |
| 2655 | #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) | 2667 | #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) |
| @@ -2668,6 +2680,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
| 2668 | /* Common functions */ | 2680 | /* Common functions */ |
| 2669 | /* AGP */ | 2681 | /* AGP */ |
| 2670 | extern int radeon_gpu_reset(struct radeon_device *rdev); | 2682 | extern int radeon_gpu_reset(struct radeon_device *rdev); |
| 2683 | extern void radeon_pci_config_reset(struct radeon_device *rdev); | ||
| 2671 | extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung); | 2684 | extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung); |
| 2672 | extern void radeon_agp_disable(struct radeon_device *rdev); | 2685 | extern void radeon_agp_disable(struct radeon_device *rdev); |
| 2673 | extern int radeon_modeset_init(struct radeon_device *rdev); | 2686 | extern int radeon_modeset_init(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index c0425bb6223a..f74db43346fd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -182,9 +182,9 @@ static struct radeon_asic_ring r100_gfx_ring = { | |||
| 182 | .ring_test = &r100_ring_test, | 182 | .ring_test = &r100_ring_test, |
| 183 | .ib_test = &r100_ib_test, | 183 | .ib_test = &r100_ib_test, |
| 184 | .is_lockup = &r100_gpu_is_lockup, | 184 | .is_lockup = &r100_gpu_is_lockup, |
| 185 | .get_rptr = &radeon_ring_generic_get_rptr, | 185 | .get_rptr = &r100_gfx_get_rptr, |
| 186 | .get_wptr = &radeon_ring_generic_get_wptr, | 186 | .get_wptr = &r100_gfx_get_wptr, |
| 187 | .set_wptr = &radeon_ring_generic_set_wptr, | 187 | .set_wptr = &r100_gfx_set_wptr, |
| 188 | }; | 188 | }; |
| 189 | 189 | ||
| 190 | static struct radeon_asic r100_asic = { | 190 | static struct radeon_asic r100_asic = { |
| @@ -330,9 +330,9 @@ static struct radeon_asic_ring r300_gfx_ring = { | |||
| 330 | .ring_test = &r100_ring_test, | 330 | .ring_test = &r100_ring_test, |
| 331 | .ib_test = &r100_ib_test, | 331 | .ib_test = &r100_ib_test, |
| 332 | .is_lockup = &r100_gpu_is_lockup, | 332 | .is_lockup = &r100_gpu_is_lockup, |
| 333 | .get_rptr = &radeon_ring_generic_get_rptr, | 333 | .get_rptr = &r100_gfx_get_rptr, |
| 334 | .get_wptr = &radeon_ring_generic_get_wptr, | 334 | .get_wptr = &r100_gfx_get_wptr, |
| 335 | .set_wptr = &radeon_ring_generic_set_wptr, | 335 | .set_wptr = &r100_gfx_set_wptr, |
| 336 | }; | 336 | }; |
| 337 | 337 | ||
| 338 | static struct radeon_asic r300_asic = { | 338 | static struct radeon_asic r300_asic = { |
| @@ -883,9 +883,9 @@ static struct radeon_asic_ring r600_gfx_ring = { | |||
| 883 | .ring_test = &r600_ring_test, | 883 | .ring_test = &r600_ring_test, |
| 884 | .ib_test = &r600_ib_test, | 884 | .ib_test = &r600_ib_test, |
| 885 | .is_lockup = &r600_gfx_is_lockup, | 885 | .is_lockup = &r600_gfx_is_lockup, |
| 886 | .get_rptr = &radeon_ring_generic_get_rptr, | 886 | .get_rptr = &r600_gfx_get_rptr, |
| 887 | .get_wptr = &radeon_ring_generic_get_wptr, | 887 | .get_wptr = &r600_gfx_get_wptr, |
| 888 | .set_wptr = &radeon_ring_generic_set_wptr, | 888 | .set_wptr = &r600_gfx_set_wptr, |
| 889 | }; | 889 | }; |
| 890 | 890 | ||
| 891 | static struct radeon_asic_ring r600_dma_ring = { | 891 | static struct radeon_asic_ring r600_dma_ring = { |
| @@ -1045,6 +1045,7 @@ static struct radeon_asic rv6xx_asic = { | |||
| 1045 | .init = &rv6xx_dpm_init, | 1045 | .init = &rv6xx_dpm_init, |
| 1046 | .setup_asic = &rv6xx_setup_asic, | 1046 | .setup_asic = &rv6xx_setup_asic, |
| 1047 | .enable = &rv6xx_dpm_enable, | 1047 | .enable = &rv6xx_dpm_enable, |
| 1048 | .late_enable = &r600_dpm_late_enable, | ||
| 1048 | .disable = &rv6xx_dpm_disable, | 1049 | .disable = &rv6xx_dpm_disable, |
| 1049 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1050 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
| 1050 | .set_power_state = &rv6xx_dpm_set_power_state, | 1051 | .set_power_state = &rv6xx_dpm_set_power_state, |
| @@ -1135,6 +1136,7 @@ static struct radeon_asic rs780_asic = { | |||
| 1135 | .init = &rs780_dpm_init, | 1136 | .init = &rs780_dpm_init, |
| 1136 | .setup_asic = &rs780_dpm_setup_asic, | 1137 | .setup_asic = &rs780_dpm_setup_asic, |
| 1137 | .enable = &rs780_dpm_enable, | 1138 | .enable = &rs780_dpm_enable, |
| 1139 | .late_enable = &r600_dpm_late_enable, | ||
| 1138 | .disable = &rs780_dpm_disable, | 1140 | .disable = &rs780_dpm_disable, |
| 1139 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1141 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
| 1140 | .set_power_state = &rs780_dpm_set_power_state, | 1142 | .set_power_state = &rs780_dpm_set_power_state, |
| @@ -1239,6 +1241,7 @@ static struct radeon_asic rv770_asic = { | |||
| 1239 | .init = &rv770_dpm_init, | 1241 | .init = &rv770_dpm_init, |
| 1240 | .setup_asic = &rv770_dpm_setup_asic, | 1242 | .setup_asic = &rv770_dpm_setup_asic, |
| 1241 | .enable = &rv770_dpm_enable, | 1243 | .enable = &rv770_dpm_enable, |
| 1244 | .late_enable = &rv770_dpm_late_enable, | ||
| 1242 | .disable = &rv770_dpm_disable, | 1245 | .disable = &rv770_dpm_disable, |
| 1243 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1246 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
| 1244 | .set_power_state = &rv770_dpm_set_power_state, | 1247 | .set_power_state = &rv770_dpm_set_power_state, |
| @@ -1267,9 +1270,9 @@ static struct radeon_asic_ring evergreen_gfx_ring = { | |||
| 1267 | .ring_test = &r600_ring_test, | 1270 | .ring_test = &r600_ring_test, |
| 1268 | .ib_test = &r600_ib_test, | 1271 | .ib_test = &r600_ib_test, |
| 1269 | .is_lockup = &evergreen_gfx_is_lockup, | 1272 | .is_lockup = &evergreen_gfx_is_lockup, |
| 1270 | .get_rptr = &radeon_ring_generic_get_rptr, | 1273 | .get_rptr = &r600_gfx_get_rptr, |
| 1271 | .get_wptr = &radeon_ring_generic_get_wptr, | 1274 | .get_wptr = &r600_gfx_get_wptr, |
| 1272 | .set_wptr = &radeon_ring_generic_set_wptr, | 1275 | .set_wptr = &r600_gfx_set_wptr, |
| 1273 | }; | 1276 | }; |
| 1274 | 1277 | ||
| 1275 | static struct radeon_asic_ring evergreen_dma_ring = { | 1278 | static struct radeon_asic_ring evergreen_dma_ring = { |
| @@ -1357,6 +1360,7 @@ static struct radeon_asic evergreen_asic = { | |||
| 1357 | .init = &cypress_dpm_init, | 1360 | .init = &cypress_dpm_init, |
| 1358 | .setup_asic = &cypress_dpm_setup_asic, | 1361 | .setup_asic = &cypress_dpm_setup_asic, |
| 1359 | .enable = &cypress_dpm_enable, | 1362 | .enable = &cypress_dpm_enable, |
| 1363 | .late_enable = &rv770_dpm_late_enable, | ||
| 1360 | .disable = &cypress_dpm_disable, | 1364 | .disable = &cypress_dpm_disable, |
| 1361 | .pre_set_power_state = &r600_dpm_pre_set_power_state, | 1365 | .pre_set_power_state = &r600_dpm_pre_set_power_state, |
| 1362 | .set_power_state = &cypress_dpm_set_power_state, | 1366 | .set_power_state = &cypress_dpm_set_power_state, |
| @@ -1449,6 +1453,7 @@ static struct radeon_asic sumo_asic = { | |||
| 1449 | .init = &sumo_dpm_init, | 1453 | .init = &sumo_dpm_init, |
| 1450 | .setup_asic = &sumo_dpm_setup_asic, | 1454 | .setup_asic = &sumo_dpm_setup_asic, |
| 1451 | .enable = &sumo_dpm_enable, | 1455 | .enable = &sumo_dpm_enable, |
| 1456 | .late_enable = &sumo_dpm_late_enable, | ||
| 1452 | .disable = &sumo_dpm_disable, | 1457 | .disable = &sumo_dpm_disable, |
| 1453 | .pre_set_power_state = &sumo_dpm_pre_set_power_state, | 1458 | .pre_set_power_state = &sumo_dpm_pre_set_power_state, |
| 1454 | .set_power_state = &sumo_dpm_set_power_state, | 1459 | .set_power_state = &sumo_dpm_set_power_state, |
| @@ -1540,6 +1545,7 @@ static struct radeon_asic btc_asic = { | |||
| 1540 | .init = &btc_dpm_init, | 1545 | .init = &btc_dpm_init, |
| 1541 | .setup_asic = &btc_dpm_setup_asic, | 1546 | .setup_asic = &btc_dpm_setup_asic, |
| 1542 | .enable = &btc_dpm_enable, | 1547 | .enable = &btc_dpm_enable, |
| 1548 | .late_enable = &rv770_dpm_late_enable, | ||
| 1543 | .disable = &btc_dpm_disable, | 1549 | .disable = &btc_dpm_disable, |
| 1544 | .pre_set_power_state = &btc_dpm_pre_set_power_state, | 1550 | .pre_set_power_state = &btc_dpm_pre_set_power_state, |
| 1545 | .set_power_state = &btc_dpm_set_power_state, | 1551 | .set_power_state = &btc_dpm_set_power_state, |
| @@ -1570,9 +1576,9 @@ static struct radeon_asic_ring cayman_gfx_ring = { | |||
| 1570 | .ib_test = &r600_ib_test, | 1576 | .ib_test = &r600_ib_test, |
| 1571 | .is_lockup = &cayman_gfx_is_lockup, | 1577 | .is_lockup = &cayman_gfx_is_lockup, |
| 1572 | .vm_flush = &cayman_vm_flush, | 1578 | .vm_flush = &cayman_vm_flush, |
| 1573 | .get_rptr = &radeon_ring_generic_get_rptr, | 1579 | .get_rptr = &cayman_gfx_get_rptr, |
| 1574 | .get_wptr = &radeon_ring_generic_get_wptr, | 1580 | .get_wptr = &cayman_gfx_get_wptr, |
| 1575 | .set_wptr = &radeon_ring_generic_set_wptr, | 1581 | .set_wptr = &cayman_gfx_set_wptr, |
| 1576 | }; | 1582 | }; |
| 1577 | 1583 | ||
| 1578 | static struct radeon_asic_ring cayman_dma_ring = { | 1584 | static struct radeon_asic_ring cayman_dma_ring = { |
| @@ -1585,9 +1591,9 @@ static struct radeon_asic_ring cayman_dma_ring = { | |||
| 1585 | .ib_test = &r600_dma_ib_test, | 1591 | .ib_test = &r600_dma_ib_test, |
| 1586 | .is_lockup = &cayman_dma_is_lockup, | 1592 | .is_lockup = &cayman_dma_is_lockup, |
| 1587 | .vm_flush = &cayman_dma_vm_flush, | 1593 | .vm_flush = &cayman_dma_vm_flush, |
| 1588 | .get_rptr = &r600_dma_get_rptr, | 1594 | .get_rptr = &cayman_dma_get_rptr, |
| 1589 | .get_wptr = &r600_dma_get_wptr, | 1595 | .get_wptr = &cayman_dma_get_wptr, |
| 1590 | .set_wptr = &r600_dma_set_wptr | 1596 | .set_wptr = &cayman_dma_set_wptr |
| 1591 | }; | 1597 | }; |
| 1592 | 1598 | ||
| 1593 | static struct radeon_asic_ring cayman_uvd_ring = { | 1599 | static struct radeon_asic_ring cayman_uvd_ring = { |
| @@ -1683,6 +1689,7 @@ static struct radeon_asic cayman_asic = { | |||
| 1683 | .init = &ni_dpm_init, | 1689 | .init = &ni_dpm_init, |
| 1684 | .setup_asic = &ni_dpm_setup_asic, | 1690 | .setup_asic = &ni_dpm_setup_asic, |
| 1685 | .enable = &ni_dpm_enable, | 1691 | .enable = &ni_dpm_enable, |
| 1692 | .late_enable = &rv770_dpm_late_enable, | ||
| 1686 | .disable = &ni_dpm_disable, | 1693 | .disable = &ni_dpm_disable, |
| 1687 | .pre_set_power_state = &ni_dpm_pre_set_power_state, | 1694 | .pre_set_power_state = &ni_dpm_pre_set_power_state, |
| 1688 | .set_power_state = &ni_dpm_set_power_state, | 1695 | .set_power_state = &ni_dpm_set_power_state, |
| @@ -1783,6 +1790,7 @@ static struct radeon_asic trinity_asic = { | |||
| 1783 | .init = &trinity_dpm_init, | 1790 | .init = &trinity_dpm_init, |
| 1784 | .setup_asic = &trinity_dpm_setup_asic, | 1791 | .setup_asic = &trinity_dpm_setup_asic, |
| 1785 | .enable = &trinity_dpm_enable, | 1792 | .enable = &trinity_dpm_enable, |
| 1793 | .late_enable = &trinity_dpm_late_enable, | ||
| 1786 | .disable = &trinity_dpm_disable, | 1794 | .disable = &trinity_dpm_disable, |
| 1787 | .pre_set_power_state = &trinity_dpm_pre_set_power_state, | 1795 | .pre_set_power_state = &trinity_dpm_pre_set_power_state, |
| 1788 | .set_power_state = &trinity_dpm_set_power_state, | 1796 | .set_power_state = &trinity_dpm_set_power_state, |
| @@ -1813,9 +1821,9 @@ static struct radeon_asic_ring si_gfx_ring = { | |||
| 1813 | .ib_test = &r600_ib_test, | 1821 | .ib_test = &r600_ib_test, |
| 1814 | .is_lockup = &si_gfx_is_lockup, | 1822 | .is_lockup = &si_gfx_is_lockup, |
| 1815 | .vm_flush = &si_vm_flush, | 1823 | .vm_flush = &si_vm_flush, |
| 1816 | .get_rptr = &radeon_ring_generic_get_rptr, | 1824 | .get_rptr = &cayman_gfx_get_rptr, |
| 1817 | .get_wptr = &radeon_ring_generic_get_wptr, | 1825 | .get_wptr = &cayman_gfx_get_wptr, |
| 1818 | .set_wptr = &radeon_ring_generic_set_wptr, | 1826 | .set_wptr = &cayman_gfx_set_wptr, |
| 1819 | }; | 1827 | }; |
| 1820 | 1828 | ||
| 1821 | static struct radeon_asic_ring si_dma_ring = { | 1829 | static struct radeon_asic_ring si_dma_ring = { |
| @@ -1828,9 +1836,9 @@ static struct radeon_asic_ring si_dma_ring = { | |||
| 1828 | .ib_test = &r600_dma_ib_test, | 1836 | .ib_test = &r600_dma_ib_test, |
| 1829 | .is_lockup = &si_dma_is_lockup, | 1837 | .is_lockup = &si_dma_is_lockup, |
| 1830 | .vm_flush = &si_dma_vm_flush, | 1838 | .vm_flush = &si_dma_vm_flush, |
| 1831 | .get_rptr = &r600_dma_get_rptr, | 1839 | .get_rptr = &cayman_dma_get_rptr, |
| 1832 | .get_wptr = &r600_dma_get_wptr, | 1840 | .get_wptr = &cayman_dma_get_wptr, |
| 1833 | .set_wptr = &r600_dma_set_wptr, | 1841 | .set_wptr = &cayman_dma_set_wptr, |
| 1834 | }; | 1842 | }; |
| 1835 | 1843 | ||
| 1836 | static struct radeon_asic si_asic = { | 1844 | static struct radeon_asic si_asic = { |
| @@ -1913,6 +1921,7 @@ static struct radeon_asic si_asic = { | |||
| 1913 | .init = &si_dpm_init, | 1921 | .init = &si_dpm_init, |
| 1914 | .setup_asic = &si_dpm_setup_asic, | 1922 | .setup_asic = &si_dpm_setup_asic, |
| 1915 | .enable = &si_dpm_enable, | 1923 | .enable = &si_dpm_enable, |
| 1924 | .late_enable = &si_dpm_late_enable, | ||
| 1916 | .disable = &si_dpm_disable, | 1925 | .disable = &si_dpm_disable, |
| 1917 | .pre_set_power_state = &si_dpm_pre_set_power_state, | 1926 | .pre_set_power_state = &si_dpm_pre_set_power_state, |
| 1918 | .set_power_state = &si_dpm_set_power_state, | 1927 | .set_power_state = &si_dpm_set_power_state, |
| @@ -1943,9 +1952,9 @@ static struct radeon_asic_ring ci_gfx_ring = { | |||
| 1943 | .ib_test = &cik_ib_test, | 1952 | .ib_test = &cik_ib_test, |
| 1944 | .is_lockup = &cik_gfx_is_lockup, | 1953 | .is_lockup = &cik_gfx_is_lockup, |
| 1945 | .vm_flush = &cik_vm_flush, | 1954 | .vm_flush = &cik_vm_flush, |
| 1946 | .get_rptr = &radeon_ring_generic_get_rptr, | 1955 | .get_rptr = &cik_gfx_get_rptr, |
| 1947 | .get_wptr = &radeon_ring_generic_get_wptr, | 1956 | .get_wptr = &cik_gfx_get_wptr, |
| 1948 | .set_wptr = &radeon_ring_generic_set_wptr, | 1957 | .set_wptr = &cik_gfx_set_wptr, |
| 1949 | }; | 1958 | }; |
| 1950 | 1959 | ||
| 1951 | static struct radeon_asic_ring ci_cp_ring = { | 1960 | static struct radeon_asic_ring ci_cp_ring = { |
| @@ -1958,9 +1967,9 @@ static struct radeon_asic_ring ci_cp_ring = { | |||
| 1958 | .ib_test = &cik_ib_test, | 1967 | .ib_test = &cik_ib_test, |
| 1959 | .is_lockup = &cik_gfx_is_lockup, | 1968 | .is_lockup = &cik_gfx_is_lockup, |
| 1960 | .vm_flush = &cik_vm_flush, | 1969 | .vm_flush = &cik_vm_flush, |
| 1961 | .get_rptr = &cik_compute_ring_get_rptr, | 1970 | .get_rptr = &cik_compute_get_rptr, |
| 1962 | .get_wptr = &cik_compute_ring_get_wptr, | 1971 | .get_wptr = &cik_compute_get_wptr, |
| 1963 | .set_wptr = &cik_compute_ring_set_wptr, | 1972 | .set_wptr = &cik_compute_set_wptr, |
| 1964 | }; | 1973 | }; |
| 1965 | 1974 | ||
| 1966 | static struct radeon_asic_ring ci_dma_ring = { | 1975 | static struct radeon_asic_ring ci_dma_ring = { |
| @@ -1973,9 +1982,9 @@ static struct radeon_asic_ring ci_dma_ring = { | |||
| 1973 | .ib_test = &cik_sdma_ib_test, | 1982 | .ib_test = &cik_sdma_ib_test, |
| 1974 | .is_lockup = &cik_sdma_is_lockup, | 1983 | .is_lockup = &cik_sdma_is_lockup, |
| 1975 | .vm_flush = &cik_dma_vm_flush, | 1984 | .vm_flush = &cik_dma_vm_flush, |
| 1976 | .get_rptr = &r600_dma_get_rptr, | 1985 | .get_rptr = &cik_sdma_get_rptr, |
| 1977 | .get_wptr = &r600_dma_get_wptr, | 1986 | .get_wptr = &cik_sdma_get_wptr, |
| 1978 | .set_wptr = &r600_dma_set_wptr, | 1987 | .set_wptr = &cik_sdma_set_wptr, |
| 1979 | }; | 1988 | }; |
| 1980 | 1989 | ||
| 1981 | static struct radeon_asic ci_asic = { | 1990 | static struct radeon_asic ci_asic = { |
| @@ -2058,6 +2067,7 @@ static struct radeon_asic ci_asic = { | |||
| 2058 | .init = &ci_dpm_init, | 2067 | .init = &ci_dpm_init, |
| 2059 | .setup_asic = &ci_dpm_setup_asic, | 2068 | .setup_asic = &ci_dpm_setup_asic, |
| 2060 | .enable = &ci_dpm_enable, | 2069 | .enable = &ci_dpm_enable, |
| 2070 | .late_enable = &ci_dpm_late_enable, | ||
| 2061 | .disable = &ci_dpm_disable, | 2071 | .disable = &ci_dpm_disable, |
| 2062 | .pre_set_power_state = &ci_dpm_pre_set_power_state, | 2072 | .pre_set_power_state = &ci_dpm_pre_set_power_state, |
| 2063 | .set_power_state = &ci_dpm_set_power_state, | 2073 | .set_power_state = &ci_dpm_set_power_state, |
| @@ -2159,6 +2169,7 @@ static struct radeon_asic kv_asic = { | |||
| 2159 | .init = &kv_dpm_init, | 2169 | .init = &kv_dpm_init, |
| 2160 | .setup_asic = &kv_dpm_setup_asic, | 2170 | .setup_asic = &kv_dpm_setup_asic, |
| 2161 | .enable = &kv_dpm_enable, | 2171 | .enable = &kv_dpm_enable, |
| 2172 | .late_enable = &kv_dpm_late_enable, | ||
| 2162 | .disable = &kv_dpm_disable, | 2173 | .disable = &kv_dpm_disable, |
| 2163 | .pre_set_power_state = &kv_dpm_pre_set_power_state, | 2174 | .pre_set_power_state = &kv_dpm_pre_set_power_state, |
| 2164 | .set_power_state = &kv_dpm_set_power_state, | 2175 | .set_power_state = &kv_dpm_set_power_state, |
| @@ -2449,7 +2460,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 2449 | rdev->cg_flags = | 2460 | rdev->cg_flags = |
| 2450 | RADEON_CG_SUPPORT_GFX_MGCG | | 2461 | RADEON_CG_SUPPORT_GFX_MGCG | |
| 2451 | RADEON_CG_SUPPORT_GFX_MGLS | | 2462 | RADEON_CG_SUPPORT_GFX_MGLS | |
| 2452 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2463 | RADEON_CG_SUPPORT_GFX_CGCG | |
| 2453 | RADEON_CG_SUPPORT_GFX_CGLS | | 2464 | RADEON_CG_SUPPORT_GFX_CGLS | |
| 2454 | RADEON_CG_SUPPORT_GFX_CGTS | | 2465 | RADEON_CG_SUPPORT_GFX_CGTS | |
| 2455 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2466 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
| @@ -2468,7 +2479,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 2468 | rdev->cg_flags = | 2479 | rdev->cg_flags = |
| 2469 | RADEON_CG_SUPPORT_GFX_MGCG | | 2480 | RADEON_CG_SUPPORT_GFX_MGCG | |
| 2470 | RADEON_CG_SUPPORT_GFX_MGLS | | 2481 | RADEON_CG_SUPPORT_GFX_MGLS | |
| 2471 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2482 | RADEON_CG_SUPPORT_GFX_CGCG | |
| 2472 | RADEON_CG_SUPPORT_GFX_CGLS | | 2483 | RADEON_CG_SUPPORT_GFX_CGLS | |
| 2473 | RADEON_CG_SUPPORT_GFX_CGTS | | 2484 | RADEON_CG_SUPPORT_GFX_CGTS | |
| 2474 | RADEON_CG_SUPPORT_GFX_CP_LS | | 2485 | RADEON_CG_SUPPORT_GFX_CP_LS | |
| @@ -2493,7 +2504,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 2493 | rdev->cg_flags = | 2504 | rdev->cg_flags = |
| 2494 | RADEON_CG_SUPPORT_GFX_MGCG | | 2505 | RADEON_CG_SUPPORT_GFX_MGCG | |
| 2495 | RADEON_CG_SUPPORT_GFX_MGLS | | 2506 | RADEON_CG_SUPPORT_GFX_MGLS | |
| 2496 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2507 | RADEON_CG_SUPPORT_GFX_CGCG | |
| 2497 | RADEON_CG_SUPPORT_GFX_CGLS | | 2508 | RADEON_CG_SUPPORT_GFX_CGLS | |
| 2498 | RADEON_CG_SUPPORT_GFX_CGTS | | 2509 | RADEON_CG_SUPPORT_GFX_CGTS | |
| 2499 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2510 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
| @@ -2521,7 +2532,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 2521 | rdev->cg_flags = | 2532 | rdev->cg_flags = |
| 2522 | RADEON_CG_SUPPORT_GFX_MGCG | | 2533 | RADEON_CG_SUPPORT_GFX_MGCG | |
| 2523 | RADEON_CG_SUPPORT_GFX_MGLS | | 2534 | RADEON_CG_SUPPORT_GFX_MGLS | |
| 2524 | /*RADEON_CG_SUPPORT_GFX_CGCG |*/ | 2535 | RADEON_CG_SUPPORT_GFX_CGCG | |
| 2525 | RADEON_CG_SUPPORT_GFX_CGLS | | 2536 | RADEON_CG_SUPPORT_GFX_CGLS | |
| 2526 | RADEON_CG_SUPPORT_GFX_CGTS | | 2537 | RADEON_CG_SUPPORT_GFX_CGTS | |
| 2527 | RADEON_CG_SUPPORT_GFX_CGTS_LS | | 2538 | RADEON_CG_SUPPORT_GFX_CGTS_LS | |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c9fd97b58076..b3bc433eed4c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -47,13 +47,6 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder); | |||
| 47 | void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); | 47 | void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); |
| 48 | u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); | 48 | u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); |
| 49 | 49 | ||
| 50 | u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, | ||
| 51 | struct radeon_ring *ring); | ||
| 52 | u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, | ||
| 53 | struct radeon_ring *ring); | ||
| 54 | void radeon_ring_generic_set_wptr(struct radeon_device *rdev, | ||
| 55 | struct radeon_ring *ring); | ||
| 56 | |||
| 57 | /* | 50 | /* |
| 58 | * r100,rv100,rs100,rv200,rs200 | 51 | * r100,rv100,rs100,rv200,rs200 |
| 59 | */ | 52 | */ |
| @@ -148,6 +141,13 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
| 148 | extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); | 141 | extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); |
| 149 | extern int r100_mc_wait_for_idle(struct radeon_device *rdev); | 142 | extern int r100_mc_wait_for_idle(struct radeon_device *rdev); |
| 150 | 143 | ||
| 144 | u32 r100_gfx_get_rptr(struct radeon_device *rdev, | ||
| 145 | struct radeon_ring *ring); | ||
| 146 | u32 r100_gfx_get_wptr(struct radeon_device *rdev, | ||
| 147 | struct radeon_ring *ring); | ||
| 148 | void r100_gfx_set_wptr(struct radeon_device *rdev, | ||
| 149 | struct radeon_ring *ring); | ||
| 150 | |||
| 151 | /* | 151 | /* |
| 152 | * r200,rv250,rs300,rv280 | 152 | * r200,rv250,rs300,rv280 |
| 153 | */ | 153 | */ |
| @@ -368,6 +368,12 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
| 368 | int r600_pcie_gart_init(struct radeon_device *rdev); | 368 | int r600_pcie_gart_init(struct radeon_device *rdev); |
| 369 | void r600_scratch_init(struct radeon_device *rdev); | 369 | void r600_scratch_init(struct radeon_device *rdev); |
| 370 | int r600_init_microcode(struct radeon_device *rdev); | 370 | int r600_init_microcode(struct radeon_device *rdev); |
| 371 | u32 r600_gfx_get_rptr(struct radeon_device *rdev, | ||
| 372 | struct radeon_ring *ring); | ||
| 373 | u32 r600_gfx_get_wptr(struct radeon_device *rdev, | ||
| 374 | struct radeon_ring *ring); | ||
| 375 | void r600_gfx_set_wptr(struct radeon_device *rdev, | ||
| 376 | struct radeon_ring *ring); | ||
| 371 | /* r600 irq */ | 377 | /* r600 irq */ |
| 372 | int r600_irq_process(struct radeon_device *rdev); | 378 | int r600_irq_process(struct radeon_device *rdev); |
| 373 | int r600_irq_init(struct radeon_device *rdev); | 379 | int r600_irq_init(struct radeon_device *rdev); |
| @@ -392,6 +398,7 @@ int rv6xx_get_temp(struct radeon_device *rdev); | |||
| 392 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); | 398 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
| 393 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); | 399 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 394 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); | 400 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); |
| 401 | int r600_dpm_late_enable(struct radeon_device *rdev); | ||
| 395 | /* r600 dma */ | 402 | /* r600 dma */ |
| 396 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, | 403 | uint32_t r600_dma_get_rptr(struct radeon_device *rdev, |
| 397 | struct radeon_ring *ring); | 404 | struct radeon_ring *ring); |
| @@ -454,6 +461,7 @@ int rv770_get_temp(struct radeon_device *rdev); | |||
| 454 | /* rv7xx pm */ | 461 | /* rv7xx pm */ |
| 455 | int rv770_dpm_init(struct radeon_device *rdev); | 462 | int rv770_dpm_init(struct radeon_device *rdev); |
| 456 | int rv770_dpm_enable(struct radeon_device *rdev); | 463 | int rv770_dpm_enable(struct radeon_device *rdev); |
| 464 | int rv770_dpm_late_enable(struct radeon_device *rdev); | ||
| 457 | void rv770_dpm_disable(struct radeon_device *rdev); | 465 | void rv770_dpm_disable(struct radeon_device *rdev); |
| 458 | int rv770_dpm_set_power_state(struct radeon_device *rdev); | 466 | int rv770_dpm_set_power_state(struct radeon_device *rdev); |
| 459 | void rv770_dpm_setup_asic(struct radeon_device *rdev); | 467 | void rv770_dpm_setup_asic(struct radeon_device *rdev); |
| @@ -545,6 +553,7 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); | |||
| 545 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); | 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
| 546 | int sumo_dpm_init(struct radeon_device *rdev); | 554 | int sumo_dpm_init(struct radeon_device *rdev); |
| 547 | int sumo_dpm_enable(struct radeon_device *rdev); | 555 | int sumo_dpm_enable(struct radeon_device *rdev); |
| 556 | int sumo_dpm_late_enable(struct radeon_device *rdev); | ||
| 548 | void sumo_dpm_disable(struct radeon_device *rdev); | 557 | void sumo_dpm_disable(struct radeon_device *rdev); |
| 549 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev); | 558 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 550 | int sumo_dpm_set_power_state(struct radeon_device *rdev); | 559 | int sumo_dpm_set_power_state(struct radeon_device *rdev); |
| @@ -591,6 +600,19 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
| 591 | 600 | ||
| 592 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 601 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
| 593 | 602 | ||
| 603 | u32 cayman_gfx_get_rptr(struct radeon_device *rdev, | ||
| 604 | struct radeon_ring *ring); | ||
| 605 | u32 cayman_gfx_get_wptr(struct radeon_device *rdev, | ||
| 606 | struct radeon_ring *ring); | ||
| 607 | void cayman_gfx_set_wptr(struct radeon_device *rdev, | ||
| 608 | struct radeon_ring *ring); | ||
| 609 | uint32_t cayman_dma_get_rptr(struct radeon_device *rdev, | ||
| 610 | struct radeon_ring *ring); | ||
| 611 | uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, | ||
| 612 | struct radeon_ring *ring); | ||
| 613 | void cayman_dma_set_wptr(struct radeon_device *rdev, | ||
| 614 | struct radeon_ring *ring); | ||
| 615 | |||
| 594 | int ni_dpm_init(struct radeon_device *rdev); | 616 | int ni_dpm_init(struct radeon_device *rdev); |
| 595 | void ni_dpm_setup_asic(struct radeon_device *rdev); | 617 | void ni_dpm_setup_asic(struct radeon_device *rdev); |
| 596 | int ni_dpm_enable(struct radeon_device *rdev); | 618 | int ni_dpm_enable(struct radeon_device *rdev); |
| @@ -610,6 +632,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 610 | bool ni_dpm_vblank_too_short(struct radeon_device *rdev); | 632 | bool ni_dpm_vblank_too_short(struct radeon_device *rdev); |
| 611 | int trinity_dpm_init(struct radeon_device *rdev); | 633 | int trinity_dpm_init(struct radeon_device *rdev); |
| 612 | int trinity_dpm_enable(struct radeon_device *rdev); | 634 | int trinity_dpm_enable(struct radeon_device *rdev); |
| 635 | int trinity_dpm_late_enable(struct radeon_device *rdev); | ||
| 613 | void trinity_dpm_disable(struct radeon_device *rdev); | 636 | void trinity_dpm_disable(struct radeon_device *rdev); |
| 614 | int trinity_dpm_pre_set_power_state(struct radeon_device *rdev); | 637 | int trinity_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 615 | int trinity_dpm_set_power_state(struct radeon_device *rdev); | 638 | int trinity_dpm_set_power_state(struct radeon_device *rdev); |
| @@ -669,6 +692,7 @@ int si_get_temp(struct radeon_device *rdev); | |||
| 669 | int si_dpm_init(struct radeon_device *rdev); | 692 | int si_dpm_init(struct radeon_device *rdev); |
| 670 | void si_dpm_setup_asic(struct radeon_device *rdev); | 693 | void si_dpm_setup_asic(struct radeon_device *rdev); |
| 671 | int si_dpm_enable(struct radeon_device *rdev); | 694 | int si_dpm_enable(struct radeon_device *rdev); |
| 695 | int si_dpm_late_enable(struct radeon_device *rdev); | ||
| 672 | void si_dpm_disable(struct radeon_device *rdev); | 696 | void si_dpm_disable(struct radeon_device *rdev); |
| 673 | int si_dpm_pre_set_power_state(struct radeon_device *rdev); | 697 | int si_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 674 | int si_dpm_set_power_state(struct radeon_device *rdev); | 698 | int si_dpm_set_power_state(struct radeon_device *rdev); |
| @@ -739,17 +763,30 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
| 739 | uint32_t incr, uint32_t flags); | 763 | uint32_t incr, uint32_t flags); |
| 740 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 764 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
| 741 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 765 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
| 742 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 766 | u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
| 743 | struct radeon_ring *ring); | 767 | struct radeon_ring *ring); |
| 744 | u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | 768 | u32 cik_gfx_get_wptr(struct radeon_device *rdev, |
| 745 | struct radeon_ring *ring); | 769 | struct radeon_ring *ring); |
| 746 | void cik_compute_ring_set_wptr(struct radeon_device *rdev, | 770 | void cik_gfx_set_wptr(struct radeon_device *rdev, |
| 747 | struct radeon_ring *ring); | 771 | struct radeon_ring *ring); |
| 772 | u32 cik_compute_get_rptr(struct radeon_device *rdev, | ||
| 773 | struct radeon_ring *ring); | ||
| 774 | u32 cik_compute_get_wptr(struct radeon_device *rdev, | ||
| 775 | struct radeon_ring *ring); | ||
| 776 | void cik_compute_set_wptr(struct radeon_device *rdev, | ||
| 777 | struct radeon_ring *ring); | ||
| 778 | u32 cik_sdma_get_rptr(struct radeon_device *rdev, | ||
| 779 | struct radeon_ring *ring); | ||
| 780 | u32 cik_sdma_get_wptr(struct radeon_device *rdev, | ||
| 781 | struct radeon_ring *ring); | ||
| 782 | void cik_sdma_set_wptr(struct radeon_device *rdev, | ||
| 783 | struct radeon_ring *ring); | ||
| 748 | int ci_get_temp(struct radeon_device *rdev); | 784 | int ci_get_temp(struct radeon_device *rdev); |
| 749 | int kv_get_temp(struct radeon_device *rdev); | 785 | int kv_get_temp(struct radeon_device *rdev); |
| 750 | 786 | ||
| 751 | int ci_dpm_init(struct radeon_device *rdev); | 787 | int ci_dpm_init(struct radeon_device *rdev); |
| 752 | int ci_dpm_enable(struct radeon_device *rdev); | 788 | int ci_dpm_enable(struct radeon_device *rdev); |
| 789 | int ci_dpm_late_enable(struct radeon_device *rdev); | ||
| 753 | void ci_dpm_disable(struct radeon_device *rdev); | 790 | void ci_dpm_disable(struct radeon_device *rdev); |
| 754 | int ci_dpm_pre_set_power_state(struct radeon_device *rdev); | 791 | int ci_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 755 | int ci_dpm_set_power_state(struct radeon_device *rdev); | 792 | int ci_dpm_set_power_state(struct radeon_device *rdev); |
| @@ -770,6 +807,7 @@ void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); | |||
| 770 | 807 | ||
| 771 | int kv_dpm_init(struct radeon_device *rdev); | 808 | int kv_dpm_init(struct radeon_device *rdev); |
| 772 | int kv_dpm_enable(struct radeon_device *rdev); | 809 | int kv_dpm_enable(struct radeon_device *rdev); |
| 810 | int kv_dpm_late_enable(struct radeon_device *rdev); | ||
| 773 | void kv_dpm_disable(struct radeon_device *rdev); | 811 | void kv_dpm_disable(struct radeon_device *rdev); |
| 774 | int kv_dpm_pre_set_power_state(struct radeon_device *rdev); | 812 | int kv_dpm_pre_set_power_state(struct radeon_device *rdev); |
| 775 | int kv_dpm_set_power_state(struct radeon_device *rdev); | 813 | int kv_dpm_set_power_state(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5c39bf7c3d88..30844814c25a 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -30,27 +30,10 @@ | |||
| 30 | #include "atom.h" | 30 | #include "atom.h" |
| 31 | #include "atom-bits.h" | 31 | #include "atom-bits.h" |
| 32 | 32 | ||
| 33 | /* from radeon_encoder.c */ | ||
| 34 | extern uint32_t | ||
| 35 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
| 36 | uint8_t dac); | ||
| 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
| 38 | extern void | 33 | extern void |
| 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 34 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
| 40 | uint32_t supported_device, u16 caps); | 35 | uint32_t supported_device, u16 caps); |
| 41 | 36 | ||
| 42 | /* from radeon_connector.c */ | ||
| 43 | extern void | ||
| 44 | radeon_add_atom_connector(struct drm_device *dev, | ||
| 45 | uint32_t connector_id, | ||
| 46 | uint32_t supported_device, | ||
| 47 | int connector_type, | ||
| 48 | struct radeon_i2c_bus_rec *i2c_bus, | ||
| 49 | uint32_t igp_lane_info, | ||
| 50 | uint16_t connector_object_id, | ||
| 51 | struct radeon_hpd *hpd, | ||
| 52 | struct radeon_router *router); | ||
| 53 | |||
| 54 | /* from radeon_legacy_encoder.c */ | 37 | /* from radeon_legacy_encoder.c */ |
| 55 | extern void | 38 | extern void |
| 56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, | 39 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
| @@ -1528,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
| 1528 | le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); | 1511 | le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); |
| 1529 | ss->type = ss_assign->v1.ucSpreadSpectrumMode; | 1512 | ss->type = ss_assign->v1.ucSpreadSpectrumMode; |
| 1530 | ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); | 1513 | ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); |
| 1514 | ss->percentage_divider = 100; | ||
| 1531 | return true; | 1515 | return true; |
| 1532 | } | 1516 | } |
| 1533 | ss_assign = (union asic_ss_assignment *) | 1517 | ss_assign = (union asic_ss_assignment *) |
| @@ -1545,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
| 1545 | le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); | 1529 | le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); |
| 1546 | ss->type = ss_assign->v2.ucSpreadSpectrumMode; | 1530 | ss->type = ss_assign->v2.ucSpreadSpectrumMode; |
| 1547 | ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); | 1531 | ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); |
| 1532 | ss->percentage_divider = 100; | ||
| 1548 | if ((crev == 2) && | 1533 | if ((crev == 2) && |
| 1549 | ((id == ASIC_INTERNAL_ENGINE_SS) || | 1534 | ((id == ASIC_INTERNAL_ENGINE_SS) || |
| 1550 | (id == ASIC_INTERNAL_MEMORY_SS))) | 1535 | (id == ASIC_INTERNAL_MEMORY_SS))) |
| @@ -1566,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
| 1566 | le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); | 1551 | le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); |
| 1567 | ss->type = ss_assign->v3.ucSpreadSpectrumMode; | 1552 | ss->type = ss_assign->v3.ucSpreadSpectrumMode; |
| 1568 | ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); | 1553 | ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); |
| 1554 | if (ss_assign->v3.ucSpreadSpectrumMode & | ||
| 1555 | SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK) | ||
| 1556 | ss->percentage_divider = 1000; | ||
| 1557 | else | ||
| 1558 | ss->percentage_divider = 100; | ||
| 1569 | if ((id == ASIC_INTERNAL_ENGINE_SS) || | 1559 | if ((id == ASIC_INTERNAL_ENGINE_SS) || |
| 1570 | (id == ASIC_INTERNAL_MEMORY_SS)) | 1560 | (id == ASIC_INTERNAL_MEMORY_SS)) |
| 1571 | ss->rate /= 100; | 1561 | ss->rate /= 100; |
| @@ -1809,7 +1799,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
| 1809 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1799 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
| 1810 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; | 1800 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; |
| 1811 | 1801 | ||
| 1812 | mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; | 1802 | mode->crtc_clock = mode->clock = |
| 1803 | le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; | ||
| 1813 | 1804 | ||
| 1814 | if (index == 1) { | 1805 | if (index == 1) { |
| 1815 | /* PAL timings appear to have wrong values for totals */ | 1806 | /* PAL timings appear to have wrong values for totals */ |
| @@ -1852,7 +1843,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
| 1852 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1843 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
| 1853 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; | 1844 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; |
| 1854 | 1845 | ||
| 1855 | mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; | 1846 | mode->crtc_clock = mode->clock = |
| 1847 | le16_to_cpu(dtd_timings->usPixClk) * 10; | ||
| 1856 | break; | 1848 | break; |
| 1857 | } | 1849 | } |
| 1858 | return true; | 1850 | return true; |
| @@ -3884,16 +3876,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, | |||
| 3884 | ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); | 3876 | ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); |
| 3885 | } | 3877 | } |
| 3886 | reg_table->last = i; | 3878 | reg_table->last = i; |
| 3887 | while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && | 3879 | while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) && |
| 3888 | (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) { | 3880 | (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) { |
| 3889 | t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT); | 3881 | t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK) |
| 3882 | >> MEM_ID_SHIFT); | ||
| 3890 | if (module_index == t_mem_id) { | 3883 | if (module_index == t_mem_id) { |
| 3891 | reg_table->mc_reg_table_entry[num_ranges].mclk_max = | 3884 | reg_table->mc_reg_table_entry[num_ranges].mclk_max = |
| 3892 | (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT); | 3885 | (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK) |
| 3886 | >> CLOCK_RANGE_SHIFT); | ||
| 3893 | for (i = 0, j = 1; i < reg_table->last; i++) { | 3887 | for (i = 0, j = 1; i < reg_table->last; i++) { |
| 3894 | if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { | 3888 | if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { |
| 3895 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = | 3889 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = |
| 3896 | (u32)*((u32 *)reg_data + j); | 3890 | (u32)le32_to_cpu(*((u32 *)reg_data + j)); |
| 3897 | j++; | 3891 | j++; |
| 3898 | } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { | 3892 | } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { |
| 3899 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = | 3893 | reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = |
| @@ -3905,7 +3899,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, | |||
| 3905 | reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) | 3899 | reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) |
| 3906 | ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)); | 3900 | ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)); |
| 3907 | } | 3901 | } |
| 3908 | if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) | 3902 | if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) |
| 3909 | return -EINVAL; | 3903 | return -EINVAL; |
| 3910 | reg_table->num_entries = num_ranges; | 3904 | reg_table->num_entries = num_ranges; |
| 3911 | } else | 3905 | } else |
| @@ -3944,6 +3938,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | |||
| 3944 | /* tell the bios not to handle mode switching */ | 3938 | /* tell the bios not to handle mode switching */ |
| 3945 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; | 3939 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; |
| 3946 | 3940 | ||
| 3941 | /* clear the vbios dpms state */ | ||
| 3942 | if (ASIC_IS_DCE4(rdev)) | ||
| 3943 | bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; | ||
| 3944 | |||
| 3947 | if (rdev->family >= CHIP_R600) { | 3945 | if (rdev->family >= CHIP_R600) { |
| 3948 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); | 3946 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); |
| 3949 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); | 3947 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 68ce36056019..6651177110f0 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -37,22 +37,6 @@ | |||
| 37 | #include <asm/pci-bridge.h> | 37 | #include <asm/pci-bridge.h> |
| 38 | #endif /* CONFIG_PPC_PMAC */ | 38 | #endif /* CONFIG_PPC_PMAC */ |
| 39 | 39 | ||
| 40 | /* from radeon_encoder.c */ | ||
| 41 | extern uint32_t | ||
| 42 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
| 43 | uint8_t dac); | ||
| 44 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
| 45 | |||
| 46 | /* from radeon_connector.c */ | ||
| 47 | extern void | ||
| 48 | radeon_add_legacy_connector(struct drm_device *dev, | ||
| 49 | uint32_t connector_id, | ||
| 50 | uint32_t supported_device, | ||
| 51 | int connector_type, | ||
| 52 | struct radeon_i2c_bus_rec *i2c_bus, | ||
| 53 | uint16_t connector_object_id, | ||
| 54 | struct radeon_hpd *hpd); | ||
| 55 | |||
| 56 | /* from radeon_legacy_encoder.c */ | 40 | /* from radeon_legacy_encoder.c */ |
| 57 | extern void | 41 | extern void |
| 58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, | 42 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 20a768ac89a8..82d4f865546e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -33,15 +33,6 @@ | |||
| 33 | 33 | ||
| 34 | #include <linux/pm_runtime.h> | 34 | #include <linux/pm_runtime.h> |
| 35 | 35 | ||
| 36 | extern void | ||
| 37 | radeon_combios_connected_scratch_regs(struct drm_connector *connector, | ||
| 38 | struct drm_encoder *encoder, | ||
| 39 | bool connected); | ||
| 40 | extern void | ||
| 41 | radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | ||
| 42 | struct drm_encoder *encoder, | ||
| 43 | bool connected); | ||
| 44 | |||
| 45 | void radeon_connector_hotplug(struct drm_connector *connector) | 36 | void radeon_connector_hotplug(struct drm_connector *connector) |
| 46 | { | 37 | { |
| 47 | struct drm_device *dev = connector->dev; | 38 | struct drm_device *dev = connector->dev; |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 3cae2bbc1854..bb0d5c3a8311 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
| @@ -2020,10 +2020,10 @@ static int radeon_cp_get_buffers(struct drm_device *dev, | |||
| 2020 | 2020 | ||
| 2021 | buf->file_priv = file_priv; | 2021 | buf->file_priv = file_priv; |
| 2022 | 2022 | ||
| 2023 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, | 2023 | if (copy_to_user(&d->request_indices[i], &buf->idx, |
| 2024 | sizeof(buf->idx))) | 2024 | sizeof(buf->idx))) |
| 2025 | return -EFAULT; | 2025 | return -EFAULT; |
| 2026 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, | 2026 | if (copy_to_user(&d->request_sizes[i], &buf->total, |
| 2027 | sizeof(buf->total))) | 2027 | sizeof(buf->total))) |
| 2028 | return -EFAULT; | 2028 | return -EFAULT; |
| 2029 | 2029 | ||
| @@ -2228,7 +2228,7 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv) | |||
| 2228 | 2228 | ||
| 2229 | dev_priv->ring.tail &= dev_priv->ring.tail_mask; | 2229 | dev_priv->ring.tail &= dev_priv->ring.tail_mask; |
| 2230 | 2230 | ||
| 2231 | DRM_MEMORYBARRIER(); | 2231 | mb(); |
| 2232 | GET_RING_HEAD( dev_priv ); | 2232 | GET_RING_HEAD( dev_priv ); |
| 2233 | 2233 | ||
| 2234 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { | 2234 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 0b366169d64d..dfb5a1db87d4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
| @@ -138,7 +138,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority | |||
| 138 | p->ring = R600_RING_TYPE_DMA_INDEX; | 138 | p->ring = R600_RING_TYPE_DMA_INDEX; |
| 139 | else | 139 | else |
| 140 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; | 140 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; |
| 141 | } else if (p->rdev->family >= CHIP_R600) { | 141 | } else if (p->rdev->family >= CHIP_RV770) { |
| 142 | p->ring = R600_RING_TYPE_DMA_INDEX; | 142 | p->ring = R600_RING_TYPE_DMA_INDEX; |
| 143 | } else { | 143 | } else { |
| 144 | return -EINVAL; | 144 | return -EINVAL; |
| @@ -192,7 +192,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
| 192 | return -ENOMEM; | 192 | return -ENOMEM; |
| 193 | } | 193 | } |
| 194 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); | 194 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); |
| 195 | if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr, | 195 | if (copy_from_user(p->chunks_array, chunk_array_ptr, |
| 196 | sizeof(uint64_t)*cs->num_chunks)) { | 196 | sizeof(uint64_t)*cs->num_chunks)) { |
| 197 | return -EFAULT; | 197 | return -EFAULT; |
| 198 | } | 198 | } |
| @@ -208,7 +208,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
| 208 | uint32_t __user *cdata; | 208 | uint32_t __user *cdata; |
| 209 | 209 | ||
| 210 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; | 210 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; |
| 211 | if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, | 211 | if (copy_from_user(&user_chunk, chunk_ptr, |
| 212 | sizeof(struct drm_radeon_cs_chunk))) { | 212 | sizeof(struct drm_radeon_cs_chunk))) { |
| 213 | return -EFAULT; | 213 | return -EFAULT; |
| 214 | } | 214 | } |
| @@ -252,7 +252,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
| 252 | if (p->chunks[i].kdata == NULL) { | 252 | if (p->chunks[i].kdata == NULL) { |
| 253 | return -ENOMEM; | 253 | return -ENOMEM; |
| 254 | } | 254 | } |
| 255 | if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { | 255 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
| 256 | return -EFAULT; | 256 | return -EFAULT; |
| 257 | } | 257 | } |
| 258 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { | 258 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { |
| @@ -472,7 +472,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser | |||
| 472 | } | 472 | } |
| 473 | parser->const_ib.is_const_ib = true; | 473 | parser->const_ib.is_const_ib = true; |
| 474 | parser->const_ib.length_dw = ib_chunk->length_dw; | 474 | parser->const_ib.length_dw = ib_chunk->length_dw; |
| 475 | if (DRM_COPY_FROM_USER(parser->const_ib.ptr, | 475 | if (copy_from_user(parser->const_ib.ptr, |
| 476 | ib_chunk->user_ptr, | 476 | ib_chunk->user_ptr, |
| 477 | ib_chunk->length_dw * 4)) | 477 | ib_chunk->length_dw * 4)) |
| 478 | return -EFAULT; | 478 | return -EFAULT; |
| @@ -495,7 +495,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser | |||
| 495 | parser->ib.length_dw = ib_chunk->length_dw; | 495 | parser->ib.length_dw = ib_chunk->length_dw; |
| 496 | if (ib_chunk->kdata) | 496 | if (ib_chunk->kdata) |
| 497 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); | 497 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); |
| 498 | else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) | 498 | else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) |
| 499 | return -EFAULT; | 499 | return -EFAULT; |
| 500 | return 0; | 500 | return 0; |
| 501 | } | 501 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 39b033b441d2..b012cbbc3ed5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -144,6 +144,11 @@ void radeon_program_register_sequence(struct radeon_device *rdev, | |||
| 144 | } | 144 | } |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | void radeon_pci_config_reset(struct radeon_device *rdev) | ||
| 148 | { | ||
| 149 | pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); | ||
| 150 | } | ||
| 151 | |||
| 147 | /** | 152 | /** |
| 148 | * radeon_surface_init - Clear GPU surface registers. | 153 | * radeon_surface_init - Clear GPU surface registers. |
| 149 | * | 154 | * |
| @@ -249,7 +254,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
| 249 | * Init doorbell driver information (CIK) | 254 | * Init doorbell driver information (CIK) |
| 250 | * Returns 0 on success, error on failure. | 255 | * Returns 0 on success, error on failure. |
| 251 | */ | 256 | */ |
| 252 | int radeon_doorbell_init(struct radeon_device *rdev) | 257 | static int radeon_doorbell_init(struct radeon_device *rdev) |
| 253 | { | 258 | { |
| 254 | /* doorbell bar mapping */ | 259 | /* doorbell bar mapping */ |
| 255 | rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); | 260 | rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); |
| @@ -278,7 +283,7 @@ int radeon_doorbell_init(struct radeon_device *rdev) | |||
| 278 | * | 283 | * |
| 279 | * Tear down doorbell driver information (CIK) | 284 | * Tear down doorbell driver information (CIK) |
| 280 | */ | 285 | */ |
| 281 | void radeon_doorbell_fini(struct radeon_device *rdev) | 286 | static void radeon_doorbell_fini(struct radeon_device *rdev) |
| 282 | { | 287 | { |
| 283 | iounmap(rdev->doorbell.ptr); | 288 | iounmap(rdev->doorbell.ptr); |
| 284 | rdev->doorbell.ptr = NULL; | 289 | rdev->doorbell.ptr = NULL; |
| @@ -1330,6 +1335,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 1330 | if (r) | 1335 | if (r) |
| 1331 | return r; | 1336 | return r; |
| 1332 | } | 1337 | } |
| 1338 | |||
| 1333 | if ((radeon_testing & 1)) { | 1339 | if ((radeon_testing & 1)) { |
| 1334 | if (rdev->accel_working) | 1340 | if (rdev->accel_working) |
| 1335 | radeon_test_moves(rdev); | 1341 | radeon_test_moves(rdev); |
| @@ -1455,7 +1461,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
| 1455 | 1461 | ||
| 1456 | radeon_save_bios_scratch_regs(rdev); | 1462 | radeon_save_bios_scratch_regs(rdev); |
| 1457 | 1463 | ||
| 1458 | radeon_pm_suspend(rdev); | ||
| 1459 | radeon_suspend(rdev); | 1464 | radeon_suspend(rdev); |
| 1460 | radeon_hpd_fini(rdev); | 1465 | radeon_hpd_fini(rdev); |
| 1461 | /* evict remaining vram memory */ | 1466 | /* evict remaining vram memory */ |
| @@ -1516,14 +1521,22 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
| 1516 | if (r) | 1521 | if (r) |
| 1517 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); |
| 1518 | 1523 | ||
| 1519 | radeon_pm_resume(rdev); | 1524 | if (rdev->pm.dpm_enabled) { |
| 1525 | /* do dpm late init */ | ||
| 1526 | r = radeon_pm_late_init(rdev); | ||
| 1527 | if (r) { | ||
| 1528 | rdev->pm.dpm_enabled = false; | ||
| 1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
| 1530 | } | ||
| 1531 | } | ||
| 1532 | |||
| 1520 | radeon_restore_bios_scratch_regs(rdev); | 1533 | radeon_restore_bios_scratch_regs(rdev); |
| 1521 | 1534 | ||
| 1522 | if (fbcon) { | 1535 | if (fbcon) { |
| 1523 | radeon_fbdev_set_suspend(rdev, 0); | 1536 | radeon_fbdev_set_suspend(rdev, 0); |
| 1524 | console_unlock(); | 1537 | console_unlock(); |
| 1525 | } | 1538 | } |
| 1526 | 1539 | ||
| 1527 | /* init dig PHYs, disp eng pll */ | 1540 | /* init dig PHYs, disp eng pll */ |
| 1528 | if (rdev->is_atom_bios) { | 1541 | if (rdev->is_atom_bios) { |
| 1529 | radeon_atom_encoder_init(rdev); | 1542 | radeon_atom_encoder_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7b253815a323..d680608f6f5b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -306,7 +306,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | |||
| 306 | * to complete in this vblank? | 306 | * to complete in this vblank? |
| 307 | */ | 307 | */ |
| 308 | if (update_pending && | 308 | if (update_pending && |
| 309 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, | 309 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, |
| 310 | &vpos, &hpos, NULL, NULL)) && | 310 | &vpos, &hpos, NULL, NULL)) && |
| 311 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || | 311 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || |
| 312 | (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { | 312 | (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { |
| @@ -1464,12 +1464,22 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
| 1464 | /* setup afmt */ | 1464 | /* setup afmt */ |
| 1465 | radeon_afmt_init(rdev); | 1465 | radeon_afmt_init(rdev); |
| 1466 | 1466 | ||
| 1467 | /* Initialize power management */ | ||
| 1468 | radeon_pm_init(rdev); | ||
| 1469 | |||
| 1470 | radeon_fbdev_init(rdev); | 1467 | radeon_fbdev_init(rdev); |
| 1471 | drm_kms_helper_poll_init(rdev->ddev); | 1468 | drm_kms_helper_poll_init(rdev->ddev); |
| 1472 | 1469 | ||
| 1470 | if (rdev->pm.dpm_enabled) { | ||
| 1471 | /* do dpm late init */ | ||
| 1472 | ret = radeon_pm_late_init(rdev); | ||
| 1473 | if (ret) { | ||
| 1474 | rdev->pm.dpm_enabled = false; | ||
| 1475 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
| 1476 | } | ||
| 1477 | /* set the dpm state for PX since there won't be | ||
| 1478 | * a modeset to call this. | ||
| 1479 | */ | ||
| 1480 | radeon_pm_compute_clocks(rdev); | ||
| 1481 | } | ||
| 1482 | |||
| 1473 | return 0; | 1483 | return 0; |
| 1474 | } | 1484 | } |
| 1475 | 1485 | ||
| @@ -1477,7 +1487,6 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
| 1477 | { | 1487 | { |
| 1478 | radeon_fbdev_fini(rdev); | 1488 | radeon_fbdev_fini(rdev); |
| 1479 | kfree(rdev->mode_info.bios_hardcoded_edid); | 1489 | kfree(rdev->mode_info.bios_hardcoded_edid); |
| 1480 | radeon_pm_fini(rdev); | ||
| 1481 | 1490 | ||
| 1482 | if (rdev->mode_info.mode_config_initialized) { | 1491 | if (rdev->mode_info.mode_config_initialized) { |
| 1483 | radeon_afmt_fini(rdev); | 1492 | radeon_afmt_fini(rdev); |
| @@ -1601,6 +1610,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
| 1601 | * | 1610 | * |
| 1602 | * \param dev Device to query. | 1611 | * \param dev Device to query. |
| 1603 | * \param crtc Crtc to query. | 1612 | * \param crtc Crtc to query. |
| 1613 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | ||
| 1604 | * \param *vpos Location where vertical scanout position should be stored. | 1614 | * \param *vpos Location where vertical scanout position should be stored. |
| 1605 | * \param *hpos Location where horizontal scanout position should go. | 1615 | * \param *hpos Location where horizontal scanout position should go. |
| 1606 | * \param *stime Target location for timestamp taken immediately before | 1616 | * \param *stime Target location for timestamp taken immediately before |
| @@ -1622,8 +1632,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
| 1622 | * unknown small number of scanlines wrt. real scanout position. | 1632 | * unknown small number of scanlines wrt. real scanout position. |
| 1623 | * | 1633 | * |
| 1624 | */ | 1634 | */ |
| 1625 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos, | 1635 | int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, |
| 1626 | ktime_t *stime, ktime_t *etime) | 1636 | int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) |
| 1627 | { | 1637 | { |
| 1628 | u32 stat_crtc = 0, vbl = 0, position = 0; | 1638 | u32 stat_crtc = 0, vbl = 0, position = 0; |
| 1629 | int vbl_start, vbl_end, vtotal, ret = 0; | 1639 | int vbl_start, vbl_end, vtotal, ret = 0; |
| @@ -1765,5 +1775,27 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int | |||
| 1765 | if (in_vbl) | 1775 | if (in_vbl) |
| 1766 | ret |= DRM_SCANOUTPOS_INVBL; | 1776 | ret |= DRM_SCANOUTPOS_INVBL; |
| 1767 | 1777 | ||
| 1778 | /* Is vpos outside nominal vblank area, but less than | ||
| 1779 | * 1/100 of a frame height away from start of vblank? | ||
| 1780 | * If so, assume this isn't a massively delayed vblank | ||
| 1781 | * interrupt, but a vblank interrupt that fired a few | ||
| 1782 | * microseconds before true start of vblank. Compensate | ||
| 1783 | * by adding a full frame duration to the final timestamp. | ||
| 1784 | * Happens, e.g., on ATI R500, R600. | ||
| 1785 | * | ||
| 1786 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
| 1787 | */ | ||
| 1788 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
| 1789 | vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; | ||
| 1790 | vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; | ||
| 1791 | |||
| 1792 | if (vbl_start - *vpos < vtotal / 100) { | ||
| 1793 | *vpos -= vtotal; | ||
| 1794 | |||
| 1795 | /* Signal this correction as "applied". */ | ||
| 1796 | ret |= 0x8; | ||
| 1797 | } | ||
| 1798 | } | ||
| 1799 | |||
| 1768 | return ret; | 1800 | return ret; |
| 1769 | } | 1801 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index db39ea36bf22..ec8c388eec17 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -102,13 +102,14 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | |||
| 102 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); | 102 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev); |
| 103 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); | 103 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
| 104 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); | 104 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
| 105 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); | 105 | irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg); |
| 106 | void radeon_gem_object_free(struct drm_gem_object *obj); | 106 | void radeon_gem_object_free(struct drm_gem_object *obj); |
| 107 | int radeon_gem_object_open(struct drm_gem_object *obj, | 107 | int radeon_gem_object_open(struct drm_gem_object *obj, |
| 108 | struct drm_file *file_priv); | 108 | struct drm_file *file_priv); |
| 109 | void radeon_gem_object_close(struct drm_gem_object *obj, | 109 | void radeon_gem_object_close(struct drm_gem_object *obj, |
| 110 | struct drm_file *file_priv); | 110 | struct drm_file *file_priv); |
| 111 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | 111 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
| 112 | unsigned int flags, | ||
| 112 | int *vpos, int *hpos, ktime_t *stime, | 113 | int *vpos, int *hpos, ktime_t *stime, |
| 113 | ktime_t *etime); | 114 | ktime_t *etime); |
| 114 | extern const struct drm_ioctl_desc radeon_ioctls_kms[]; | 115 | extern const struct drm_ioctl_desc radeon_ioctls_kms[]; |
| @@ -168,6 +169,7 @@ int radeon_fastfb = 0; | |||
| 168 | int radeon_dpm = -1; | 169 | int radeon_dpm = -1; |
| 169 | int radeon_aspm = -1; | 170 | int radeon_aspm = -1; |
| 170 | int radeon_runtime_pm = -1; | 171 | int radeon_runtime_pm = -1; |
| 172 | int radeon_hard_reset = 0; | ||
| 171 | 173 | ||
| 172 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 174 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
| 173 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 175 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
| @@ -232,6 +234,9 @@ module_param_named(aspm, radeon_aspm, int, 0444); | |||
| 232 | MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); | 234 | MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); |
| 233 | module_param_named(runpm, radeon_runtime_pm, int, 0444); | 235 | module_param_named(runpm, radeon_runtime_pm, int, 0444); |
| 234 | 236 | ||
| 237 | MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); | ||
| 238 | module_param_named(hard_reset, radeon_hard_reset, int, 0444); | ||
| 239 | |||
| 235 | static struct pci_device_id pciidlist[] = { | 240 | static struct pci_device_id pciidlist[] = { |
| 236 | radeon_PCI_IDS | 241 | radeon_PCI_IDS |
| 237 | }; | 242 | }; |
| @@ -400,6 +405,9 @@ static int radeon_pmops_runtime_suspend(struct device *dev) | |||
| 400 | if (radeon_runtime_pm == 0) | 405 | if (radeon_runtime_pm == 0) |
| 401 | return -EINVAL; | 406 | return -EINVAL; |
| 402 | 407 | ||
| 408 | if (radeon_runtime_pm == -1 && !radeon_is_px()) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 403 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 411 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 404 | drm_kms_helper_poll_disable(drm_dev); | 412 | drm_kms_helper_poll_disable(drm_dev); |
| 405 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); | 413 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); |
| @@ -422,6 +430,9 @@ static int radeon_pmops_runtime_resume(struct device *dev) | |||
| 422 | if (radeon_runtime_pm == 0) | 430 | if (radeon_runtime_pm == 0) |
| 423 | return -EINVAL; | 431 | return -EINVAL; |
| 424 | 432 | ||
| 433 | if (radeon_runtime_pm == -1 && !radeon_is_px()) | ||
| 434 | return -EINVAL; | ||
| 435 | |||
| 425 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 436 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 426 | 437 | ||
| 427 | pci_set_power_state(pdev, PCI_D0); | 438 | pci_set_power_state(pdev, PCI_D0); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 00e0d449021c..dafd812e4571 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
| @@ -405,7 +405,7 @@ extern void radeon_do_release(struct drm_device * dev); | |||
| 405 | extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); | 405 | extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); |
| 406 | extern int radeon_enable_vblank(struct drm_device *dev, int crtc); | 406 | extern int radeon_enable_vblank(struct drm_device *dev, int crtc); |
| 407 | extern void radeon_disable_vblank(struct drm_device *dev, int crtc); | 407 | extern void radeon_disable_vblank(struct drm_device *dev, int crtc); |
| 408 | extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); | 408 | extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg); |
| 409 | extern void radeon_driver_irq_preinstall(struct drm_device * dev); | 409 | extern void radeon_driver_irq_preinstall(struct drm_device * dev); |
| 410 | extern int radeon_driver_irq_postinstall(struct drm_device *dev); | 410 | extern int radeon_driver_irq_postinstall(struct drm_device *dev); |
| 411 | extern void radeon_driver_irq_uninstall(struct drm_device * dev); | 411 | extern void radeon_driver_irq_uninstall(struct drm_device * dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d3a86e43c012..c37cb79a9489 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -121,7 +121,7 @@ int radeon_fence_emit(struct radeon_device *rdev, | |||
| 121 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; | 121 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
| 122 | (*fence)->ring = ring; | 122 | (*fence)->ring = ring; |
| 123 | radeon_fence_ring_emit(rdev, ring, *fence); | 123 | radeon_fence_ring_emit(rdev, ring, *fence); |
| 124 | trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); | 124 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
| 125 | return 0; | 125 | return 0; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| @@ -313,7 +313,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
| 313 | continue; | 313 | continue; |
| 314 | 314 | ||
| 315 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); | 315 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
| 316 | trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]); | 316 | trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
| 317 | radeon_irq_kms_sw_irq_get(rdev, i); | 317 | radeon_irq_kms_sw_irq_get(rdev, i); |
| 318 | } | 318 | } |
| 319 | 319 | ||
| @@ -332,7 +332,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
| 332 | continue; | 332 | continue; |
| 333 | 333 | ||
| 334 | radeon_irq_kms_sw_irq_put(rdev, i); | 334 | radeon_irq_kms_sw_irq_put(rdev, i); |
| 335 | trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]); | 335 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | if (unlikely(r < 0)) | 338 | if (unlikely(r < 0)) |
| @@ -841,6 +841,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) | |||
| 841 | if (!rdev->fence_drv[i].initialized) | 841 | if (!rdev->fence_drv[i].initialized) |
| 842 | continue; | 842 | continue; |
| 843 | 843 | ||
| 844 | radeon_fence_process(rdev, i); | ||
| 845 | |||
| 844 | seq_printf(m, "--- ring %d ---\n", i); | 846 | seq_printf(m, "--- ring %d ---\n", i); |
| 845 | seq_printf(m, "Last signaled fence 0x%016llx\n", | 847 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
| 846 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); | 848 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 96e440061bdb..a8f9b463bf2a 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -713,7 +713,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |||
| 713 | unsigned i; | 713 | unsigned i; |
| 714 | 714 | ||
| 715 | /* check if the id is still valid */ | 715 | /* check if the id is still valid */ |
| 716 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) | 716 | if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) |
| 717 | return NULL; | 717 | return NULL; |
| 718 | 718 | ||
| 719 | /* we definately need to flush */ | 719 | /* we definately need to flush */ |
| @@ -726,6 +726,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |||
| 726 | if (fence == NULL) { | 726 | if (fence == NULL) { |
| 727 | /* found a free one */ | 727 | /* found a free one */ |
| 728 | vm->id = i; | 728 | vm->id = i; |
| 729 | trace_radeon_vm_grab_id(vm->id, ring); | ||
| 729 | return NULL; | 730 | return NULL; |
| 730 | } | 731 | } |
| 731 | 732 | ||
| @@ -769,6 +770,9 @@ void radeon_vm_fence(struct radeon_device *rdev, | |||
| 769 | 770 | ||
| 770 | radeon_fence_unref(&vm->fence); | 771 | radeon_fence_unref(&vm->fence); |
| 771 | vm->fence = radeon_fence_ref(fence); | 772 | vm->fence = radeon_fence_ref(fence); |
| 773 | |||
| 774 | radeon_fence_unref(&vm->last_id_use); | ||
| 775 | vm->last_id_use = radeon_fence_ref(fence); | ||
| 772 | } | 776 | } |
| 773 | 777 | ||
| 774 | /** | 778 | /** |
| @@ -1303,6 +1307,8 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | |||
| 1303 | { | 1307 | { |
| 1304 | vm->id = 0; | 1308 | vm->id = 0; |
| 1305 | vm->fence = NULL; | 1309 | vm->fence = NULL; |
| 1310 | vm->last_flush = NULL; | ||
| 1311 | vm->last_id_use = NULL; | ||
| 1306 | mutex_init(&vm->mutex); | 1312 | mutex_init(&vm->mutex); |
| 1307 | INIT_LIST_HEAD(&vm->list); | 1313 | INIT_LIST_HEAD(&vm->list); |
| 1308 | INIT_LIST_HEAD(&vm->va); | 1314 | INIT_LIST_HEAD(&vm->va); |
| @@ -1341,5 +1347,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
| 1341 | } | 1347 | } |
| 1342 | radeon_fence_unref(&vm->fence); | 1348 | radeon_fence_unref(&vm->fence); |
| 1343 | radeon_fence_unref(&vm->last_flush); | 1349 | radeon_fence_unref(&vm->last_flush); |
| 1350 | radeon_fence_unref(&vm->last_id_use); | ||
| 1344 | mutex_unlock(&vm->mutex); | 1351 | mutex_unlock(&vm->mutex); |
| 1345 | } | 1352 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 805c5e566b9a..b96c819024b3 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -86,7 +86,7 @@ retry: | |||
| 86 | return 0; | 86 | return 0; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | int radeon_gem_set_domain(struct drm_gem_object *gobj, | 89 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
| 90 | uint32_t rdomain, uint32_t wdomain) | 90 | uint32_t rdomain, uint32_t wdomain) |
| 91 | { | 91 | { |
| 92 | struct radeon_bo *robj; | 92 | struct radeon_bo *robj; |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index fc60b74ee304..e24ca6ab96de 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) | |||
| 1020 | /* Add the default buses */ | 1020 | /* Add the default buses */ |
| 1021 | void radeon_i2c_init(struct radeon_device *rdev) | 1021 | void radeon_i2c_init(struct radeon_device *rdev) |
| 1022 | { | 1022 | { |
| 1023 | if (radeon_hw_i2c) | ||
| 1024 | DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); | ||
| 1025 | |||
| 1023 | if (rdev->is_atom_bios) | 1026 | if (rdev->is_atom_bios) |
| 1024 | radeon_atombios_i2c_init(rdev); | 1027 | radeon_atombios_i2c_init(rdev); |
| 1025 | else | 1028 | else |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 8d68e972789a..244b19bab2e7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
| @@ -181,7 +181,7 @@ static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_dis | |||
| 181 | * tied to dma at all, this is just a hangover from dri prehistory. | 181 | * tied to dma at all, this is just a hangover from dri prehistory. |
| 182 | */ | 182 | */ |
| 183 | 183 | ||
| 184 | irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) | 184 | irqreturn_t radeon_driver_irq_handler(int irq, void *arg) |
| 185 | { | 185 | { |
| 186 | struct drm_device *dev = (struct drm_device *) arg; | 186 | struct drm_device *dev = (struct drm_device *) arg; |
| 187 | drm_radeon_private_t *dev_priv = | 187 | drm_radeon_private_t *dev_priv = |
| @@ -203,7 +203,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 203 | 203 | ||
| 204 | /* SW interrupt */ | 204 | /* SW interrupt */ |
| 205 | if (stat & RADEON_SW_INT_TEST) | 205 | if (stat & RADEON_SW_INT_TEST) |
| 206 | DRM_WAKEUP(&dev_priv->swi_queue); | 206 | wake_up(&dev_priv->swi_queue); |
| 207 | 207 | ||
| 208 | /* VBLANK interrupt */ | 208 | /* VBLANK interrupt */ |
| 209 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) { | 209 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) { |
| @@ -249,7 +249,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr) | |||
| 249 | 249 | ||
| 250 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | 250 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; |
| 251 | 251 | ||
| 252 | DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ, | 252 | DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ, |
| 253 | RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); | 253 | RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); |
| 254 | 254 | ||
| 255 | return ret; | 255 | return ret; |
| @@ -302,7 +302,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr | |||
| 302 | 302 | ||
| 303 | result = radeon_emit_irq(dev); | 303 | result = radeon_emit_irq(dev); |
| 304 | 304 | ||
| 305 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 305 | if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { |
| 306 | DRM_ERROR("copy_to_user\n"); | 306 | DRM_ERROR("copy_to_user\n"); |
| 307 | return -EFAULT; | 307 | return -EFAULT; |
| 308 | } | 308 | } |
| @@ -354,7 +354,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) | |||
| 354 | (drm_radeon_private_t *) dev->dev_private; | 354 | (drm_radeon_private_t *) dev->dev_private; |
| 355 | 355 | ||
| 356 | atomic_set(&dev_priv->swi_emitted, 0); | 356 | atomic_set(&dev_priv->swi_emitted, 0); |
| 357 | DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); | 357 | init_waitqueue_head(&dev_priv->swi_queue); |
| 358 | 358 | ||
| 359 | dev->max_vblank_count = 0x001fffff; | 359 | dev->max_vblank_count = 0x001fffff; |
| 360 | 360 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index ec6240b00469..089c9ffb0aa9 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -39,13 +39,13 @@ | |||
| 39 | /** | 39 | /** |
| 40 | * radeon_driver_irq_handler_kms - irq handler for KMS | 40 | * radeon_driver_irq_handler_kms - irq handler for KMS |
| 41 | * | 41 | * |
| 42 | * @DRM_IRQ_ARGS: args | 42 | * @int irq, void *arg: args |
| 43 | * | 43 | * |
| 44 | * This is the irq handler for the radeon KMS driver (all asics). | 44 | * This is the irq handler for the radeon KMS driver (all asics). |
| 45 | * radeon_irq_process is a macro that points to the per-asic | 45 | * radeon_irq_process is a macro that points to the per-asic |
| 46 | * irq handler callback. | 46 | * irq handler callback. |
| 47 | */ | 47 | */ |
| 48 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) | 48 | irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) |
| 49 | { | 49 | { |
| 50 | struct drm_device *dev = (struct drm_device *) arg; | 50 | struct drm_device *dev = (struct drm_device *) arg; |
| 51 | struct radeon_device *rdev = dev->dev_private; | 51 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 21d593c0ecaf..114d1672d616 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -191,7 +191,7 @@ static void radeon_set_filp_rights(struct drm_device *dev, | |||
| 191 | * etc. (all asics). | 191 | * etc. (all asics). |
| 192 | * Returns 0 on success, -EINVAL on failure. | 192 | * Returns 0 on success, -EINVAL on failure. |
| 193 | */ | 193 | */ |
| 194 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 194 | static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
| 195 | { | 195 | { |
| 196 | struct radeon_device *rdev = dev->dev_private; | 196 | struct radeon_device *rdev = dev->dev_private; |
| 197 | struct drm_radeon_info *info = data; | 197 | struct drm_radeon_info *info = data; |
| @@ -223,7 +223,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 223 | *value = rdev->accel_working; | 223 | *value = rdev->accel_working; |
| 224 | break; | 224 | break; |
| 225 | case RADEON_INFO_CRTC_FROM_ID: | 225 | case RADEON_INFO_CRTC_FROM_ID: |
| 226 | if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) { | 226 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
| 227 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); | 227 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); |
| 228 | return -EFAULT; | 228 | return -EFAULT; |
| 229 | } | 229 | } |
| @@ -269,7 +269,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 269 | * | 269 | * |
| 270 | * When returning, the value is 1 if filp owns hyper-z access, | 270 | * When returning, the value is 1 if filp owns hyper-z access, |
| 271 | * 0 otherwise. */ | 271 | * 0 otherwise. */ |
| 272 | if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) { | 272 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
| 273 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); | 273 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); |
| 274 | return -EFAULT; | 274 | return -EFAULT; |
| 275 | } | 275 | } |
| @@ -281,7 +281,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 281 | break; | 281 | break; |
| 282 | case RADEON_INFO_WANT_CMASK: | 282 | case RADEON_INFO_WANT_CMASK: |
| 283 | /* The same logic as Hyper-Z. */ | 283 | /* The same logic as Hyper-Z. */ |
| 284 | if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) { | 284 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
| 285 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); | 285 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); |
| 286 | return -EFAULT; | 286 | return -EFAULT; |
| 287 | } | 287 | } |
| @@ -417,7 +417,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 417 | *value = rdev->fastfb_working; | 417 | *value = rdev->fastfb_working; |
| 418 | break; | 418 | break; |
| 419 | case RADEON_INFO_RING_WORKING: | 419 | case RADEON_INFO_RING_WORKING: |
| 420 | if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) { | 420 | if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { |
| 421 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); | 421 | DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); |
| 422 | return -EFAULT; | 422 | return -EFAULT; |
| 423 | } | 423 | } |
| @@ -470,11 +470,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 470 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); | 470 | DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); |
| 471 | } | 471 | } |
| 472 | break; | 472 | break; |
| 473 | case RADEON_INFO_MAX_SCLK: | ||
| 474 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && | ||
| 475 | rdev->pm.dpm_enabled) | ||
| 476 | *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; | ||
| 477 | else | ||
| 478 | *value = rdev->pm.default_sclk * 10; | ||
| 479 | break; | ||
| 473 | default: | 480 | default: |
| 474 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 481 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
| 475 | return -EINVAL; | 482 | return -EINVAL; |
| 476 | } | 483 | } |
| 477 | if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) { | 484 | if (copy_to_user(value_ptr, (char*)value, value_size)) { |
| 478 | DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); | 485 | DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); |
| 479 | return -EFAULT; | 486 | return -EFAULT; |
| 480 | } | 487 | } |
| @@ -712,11 +719,12 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | |||
| 712 | /* Helper routine in DRM core does all the work: */ | 719 | /* Helper routine in DRM core does all the work: */ |
| 713 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | 720 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, |
| 714 | vblank_time, flags, | 721 | vblank_time, flags, |
| 715 | drmcrtc); | 722 | drmcrtc, &drmcrtc->hwmode); |
| 716 | } | 723 | } |
| 717 | 724 | ||
| 718 | #define KMS_INVALID_IOCTL(name) \ | 725 | #define KMS_INVALID_IOCTL(name) \ |
| 719 | int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ | 726 | static int name(struct drm_device *dev, void *data, struct drm_file \ |
| 727 | *file_priv) \ | ||
| 720 | { \ | 728 | { \ |
| 721 | DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ | 729 | DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ |
| 722 | return -EINVAL; \ | 730 | return -EINVAL; \ |
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c index d54d2d7c9031..146d253f1131 100644 --- a/drivers/gpu/drm/radeon/radeon_mem.c +++ b/drivers/gpu/drm/radeon/radeon_mem.c | |||
| @@ -243,7 +243,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p | |||
| 243 | if (!block) | 243 | if (!block) |
| 244 | return -ENOMEM; | 244 | return -ENOMEM; |
| 245 | 245 | ||
| 246 | if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, | 246 | if (copy_to_user(alloc->region_offset, &block->start, |
| 247 | sizeof(int))) { | 247 | sizeof(int))) { |
| 248 | DRM_ERROR("copy_to_user\n"); | 248 | DRM_ERROR("copy_to_user\n"); |
| 249 | return -EFAULT; | 249 | return -EFAULT; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3f0dd664af90..402dbe32c234 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -291,6 +291,7 @@ struct radeon_tv_regs { | |||
| 291 | 291 | ||
| 292 | struct radeon_atom_ss { | 292 | struct radeon_atom_ss { |
| 293 | uint16_t percentage; | 293 | uint16_t percentage; |
| 294 | uint16_t percentage_divider; | ||
| 294 | uint8_t type; | 295 | uint8_t type; |
| 295 | uint16_t step; | 296 | uint16_t step; |
| 296 | uint8_t delay; | 297 | uint8_t delay; |
| @@ -624,6 +625,30 @@ struct atom_voltage_table | |||
| 624 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; | 625 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; |
| 625 | }; | 626 | }; |
| 626 | 627 | ||
| 628 | |||
| 629 | extern void | ||
| 630 | radeon_add_atom_connector(struct drm_device *dev, | ||
| 631 | uint32_t connector_id, | ||
| 632 | uint32_t supported_device, | ||
| 633 | int connector_type, | ||
| 634 | struct radeon_i2c_bus_rec *i2c_bus, | ||
| 635 | uint32_t igp_lane_info, | ||
| 636 | uint16_t connector_object_id, | ||
| 637 | struct radeon_hpd *hpd, | ||
| 638 | struct radeon_router *router); | ||
| 639 | extern void | ||
| 640 | radeon_add_legacy_connector(struct drm_device *dev, | ||
| 641 | uint32_t connector_id, | ||
| 642 | uint32_t supported_device, | ||
| 643 | int connector_type, | ||
| 644 | struct radeon_i2c_bus_rec *i2c_bus, | ||
| 645 | uint16_t connector_object_id, | ||
| 646 | struct radeon_hpd *hpd); | ||
| 647 | extern uint32_t | ||
| 648 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | ||
| 649 | uint8_t dac); | ||
| 650 | extern void radeon_link_encoder_connector(struct drm_device *dev); | ||
| 651 | |||
| 627 | extern enum radeon_tv_std | 652 | extern enum radeon_tv_std |
| 628 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 653 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
| 629 | extern enum radeon_tv_std | 654 | extern enum radeon_tv_std |
| @@ -631,6 +656,15 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); | |||
| 631 | extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev, | 656 | extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev, |
| 632 | u16 *vddc, u16 *vddci, u16 *mvdd); | 657 | u16 *vddc, u16 *vddci, u16 *mvdd); |
| 633 | 658 | ||
| 659 | extern void | ||
| 660 | radeon_combios_connected_scratch_regs(struct drm_connector *connector, | ||
| 661 | struct drm_encoder *encoder, | ||
| 662 | bool connected); | ||
| 663 | extern void | ||
| 664 | radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | ||
| 665 | struct drm_encoder *encoder, | ||
| 666 | bool connected); | ||
| 667 | |||
| 634 | extern struct drm_connector * | 668 | extern struct drm_connector * |
| 635 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); | 669 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); |
| 636 | extern struct drm_connector * | 670 | extern struct drm_connector * |
| @@ -666,6 +700,7 @@ extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); | |||
| 666 | extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); | 700 | extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); |
| 667 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 701 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
| 668 | u8 write_byte, u8 *read_byte); | 702 | u8 write_byte, u8 *read_byte); |
| 703 | void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | ||
| 669 | 704 | ||
| 670 | extern void radeon_i2c_init(struct radeon_device *rdev); | 705 | extern void radeon_i2c_init(struct radeon_device *rdev); |
| 671 | extern void radeon_i2c_fini(struct radeon_device *rdev); | 706 | extern void radeon_i2c_fini(struct radeon_device *rdev); |
| @@ -766,6 +801,7 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 766 | int x, int y); | 801 | int x, int y); |
| 767 | 802 | ||
| 768 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | 803 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
| 804 | unsigned int flags, | ||
| 769 | int *vpos, int *hpos, ktime_t *stime, | 805 | int *vpos, int *hpos, ktime_t *stime, |
| 770 | ktime_t *etime); | 806 | ktime_t *etime); |
| 771 | 807 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c0fa4aa9ceea..08595cf90b01 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -46,7 +46,7 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); | |||
| 46 | * function are calling it. | 46 | * function are calling it. |
| 47 | */ | 47 | */ |
| 48 | 48 | ||
| 49 | void radeon_bo_clear_va(struct radeon_bo *bo) | 49 | static void radeon_bo_clear_va(struct radeon_bo *bo) |
| 50 | { | 50 | { |
| 51 | struct radeon_bo_va *bo_va, *tmp; | 51 | struct radeon_bo_va *bo_va, *tmp; |
| 52 | 52 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 984097b907ef..8e8153e471c2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
| 924 | 924 | ||
| 925 | if (rdev->asic->dpm.powergate_uvd) { | 925 | if (rdev->asic->dpm.powergate_uvd) { |
| 926 | mutex_lock(&rdev->pm.mutex); | 926 | mutex_lock(&rdev->pm.mutex); |
| 927 | /* don't powergate anything if we | ||
| 928 | have active but pause streams */ | ||
| 929 | enable |= rdev->pm.dpm.sd > 0; | ||
| 930 | enable |= rdev->pm.dpm.hd > 0; | ||
| 927 | /* enable/disable UVD */ | 931 | /* enable/disable UVD */ |
| 928 | radeon_dpm_powergate_uvd(rdev, !enable); | 932 | radeon_dpm_powergate_uvd(rdev, !enable); |
| 929 | mutex_unlock(&rdev->pm.mutex); | 933 | mutex_unlock(&rdev->pm.mutex); |
| @@ -1010,8 +1014,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev) | |||
| 1010 | rdev->pm.current_clock_mode_index = 0; | 1014 | rdev->pm.current_clock_mode_index = 0; |
| 1011 | rdev->pm.current_sclk = rdev->pm.default_sclk; | 1015 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
| 1012 | rdev->pm.current_mclk = rdev->pm.default_mclk; | 1016 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
| 1013 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 1017 | if (rdev->pm.power_state) { |
| 1014 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | 1018 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
| 1019 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | ||
| 1020 | } | ||
| 1015 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 1021 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
| 1016 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 1022 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
| 1017 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 1023 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
| @@ -1032,25 +1038,27 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) | |||
| 1032 | radeon_dpm_setup_asic(rdev); | 1038 | radeon_dpm_setup_asic(rdev); |
| 1033 | ret = radeon_dpm_enable(rdev); | 1039 | ret = radeon_dpm_enable(rdev); |
| 1034 | mutex_unlock(&rdev->pm.mutex); | 1040 | mutex_unlock(&rdev->pm.mutex); |
| 1035 | if (ret) { | 1041 | if (ret) |
| 1036 | DRM_ERROR("radeon: dpm resume failed\n"); | 1042 | goto dpm_resume_fail; |
| 1037 | if ((rdev->family >= CHIP_BARTS) && | 1043 | rdev->pm.dpm_enabled = true; |
| 1038 | (rdev->family <= CHIP_CAYMAN) && | 1044 | radeon_pm_compute_clocks(rdev); |
| 1039 | rdev->mc_fw) { | 1045 | return; |
| 1040 | if (rdev->pm.default_vddc) | 1046 | |
| 1041 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1047 | dpm_resume_fail: |
| 1042 | SET_VOLTAGE_TYPE_ASIC_VDDC); | 1048 | DRM_ERROR("radeon: dpm resume failed\n"); |
| 1043 | if (rdev->pm.default_vddci) | 1049 | if ((rdev->family >= CHIP_BARTS) && |
| 1044 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | 1050 | (rdev->family <= CHIP_CAYMAN) && |
| 1045 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | 1051 | rdev->mc_fw) { |
| 1046 | if (rdev->pm.default_sclk) | 1052 | if (rdev->pm.default_vddc) |
| 1047 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 1053 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
| 1048 | if (rdev->pm.default_mclk) | 1054 | SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 1049 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | 1055 | if (rdev->pm.default_vddci) |
| 1050 | } | 1056 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, |
| 1051 | } else { | 1057 | SET_VOLTAGE_TYPE_ASIC_VDDCI); |
| 1052 | rdev->pm.dpm_enabled = true; | 1058 | if (rdev->pm.default_sclk) |
| 1053 | radeon_pm_compute_clocks(rdev); | 1059 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
| 1060 | if (rdev->pm.default_mclk) | ||
| 1061 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
| 1054 | } | 1062 | } |
| 1055 | } | 1063 | } |
| 1056 | 1064 | ||
| @@ -1170,51 +1178,50 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) | |||
| 1170 | radeon_dpm_setup_asic(rdev); | 1178 | radeon_dpm_setup_asic(rdev); |
| 1171 | ret = radeon_dpm_enable(rdev); | 1179 | ret = radeon_dpm_enable(rdev); |
| 1172 | mutex_unlock(&rdev->pm.mutex); | 1180 | mutex_unlock(&rdev->pm.mutex); |
| 1173 | if (ret) { | 1181 | if (ret) |
| 1174 | rdev->pm.dpm_enabled = false; | 1182 | goto dpm_failed; |
| 1175 | if ((rdev->family >= CHIP_BARTS) && | ||
| 1176 | (rdev->family <= CHIP_CAYMAN) && | ||
| 1177 | rdev->mc_fw) { | ||
| 1178 | if (rdev->pm.default_vddc) | ||
| 1179 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
| 1180 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 1181 | if (rdev->pm.default_vddci) | ||
| 1182 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
| 1183 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 1184 | if (rdev->pm.default_sclk) | ||
| 1185 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
| 1186 | if (rdev->pm.default_mclk) | ||
| 1187 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
| 1188 | } | ||
| 1189 | DRM_ERROR("radeon: dpm initialization failed\n"); | ||
| 1190 | return ret; | ||
| 1191 | } | ||
| 1192 | rdev->pm.dpm_enabled = true; | 1183 | rdev->pm.dpm_enabled = true; |
| 1193 | radeon_pm_compute_clocks(rdev); | ||
| 1194 | 1184 | ||
| 1195 | if (rdev->pm.num_power_states > 1) { | 1185 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); |
| 1196 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); | 1186 | if (ret) |
| 1197 | if (ret) | 1187 | DRM_ERROR("failed to create device file for dpm state\n"); |
| 1198 | DRM_ERROR("failed to create device file for dpm state\n"); | 1188 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); |
| 1199 | ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); | 1189 | if (ret) |
| 1200 | if (ret) | 1190 | DRM_ERROR("failed to create device file for dpm state\n"); |
| 1201 | DRM_ERROR("failed to create device file for dpm state\n"); | 1191 | /* XXX: these are noops for dpm but are here for backwards compat */ |
| 1202 | /* XXX: these are noops for dpm but are here for backwards compat */ | 1192 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
| 1203 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 1193 | if (ret) |
| 1204 | if (ret) | 1194 | DRM_ERROR("failed to create device file for power profile\n"); |
| 1205 | DRM_ERROR("failed to create device file for power profile\n"); | 1195 | ret = device_create_file(rdev->dev, &dev_attr_power_method); |
| 1206 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | 1196 | if (ret) |
| 1207 | if (ret) | 1197 | DRM_ERROR("failed to create device file for power method\n"); |
| 1208 | DRM_ERROR("failed to create device file for power method\n"); | ||
| 1209 | |||
| 1210 | if (radeon_debugfs_pm_init(rdev)) { | ||
| 1211 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | ||
| 1212 | } | ||
| 1213 | 1198 | ||
| 1214 | DRM_INFO("radeon: dpm initialized\n"); | 1199 | if (radeon_debugfs_pm_init(rdev)) { |
| 1200 | DRM_ERROR("Failed to register debugfs file for dpm!\n"); | ||
| 1215 | } | 1201 | } |
| 1216 | 1202 | ||
| 1203 | DRM_INFO("radeon: dpm initialized\n"); | ||
| 1204 | |||
| 1217 | return 0; | 1205 | return 0; |
| 1206 | |||
| 1207 | dpm_failed: | ||
| 1208 | rdev->pm.dpm_enabled = false; | ||
| 1209 | if ((rdev->family >= CHIP_BARTS) && | ||
| 1210 | (rdev->family <= CHIP_CAYMAN) && | ||
| 1211 | rdev->mc_fw) { | ||
| 1212 | if (rdev->pm.default_vddc) | ||
| 1213 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | ||
| 1214 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 1215 | if (rdev->pm.default_vddci) | ||
| 1216 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
| 1217 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 1218 | if (rdev->pm.default_sclk) | ||
| 1219 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
| 1220 | if (rdev->pm.default_mclk) | ||
| 1221 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
| 1222 | } | ||
| 1223 | DRM_ERROR("radeon: dpm initialization failed\n"); | ||
| 1224 | return ret; | ||
| 1218 | } | 1225 | } |
| 1219 | 1226 | ||
| 1220 | int radeon_pm_init(struct radeon_device *rdev) | 1227 | int radeon_pm_init(struct radeon_device *rdev) |
| @@ -1228,11 +1235,10 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1228 | case CHIP_RV670: | 1235 | case CHIP_RV670: |
| 1229 | case CHIP_RS780: | 1236 | case CHIP_RS780: |
| 1230 | case CHIP_RS880: | 1237 | case CHIP_RS880: |
| 1238 | case CHIP_BARTS: | ||
| 1239 | case CHIP_TURKS: | ||
| 1240 | case CHIP_CAICOS: | ||
| 1231 | case CHIP_CAYMAN: | 1241 | case CHIP_CAYMAN: |
| 1232 | case CHIP_BONAIRE: | ||
| 1233 | case CHIP_KABINI: | ||
| 1234 | case CHIP_KAVERI: | ||
| 1235 | case CHIP_HAWAII: | ||
| 1236 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1242 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
| 1237 | if (!rdev->rlc_fw) | 1243 | if (!rdev->rlc_fw) |
| 1238 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1244 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| @@ -1257,15 +1263,16 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1257 | case CHIP_PALM: | 1263 | case CHIP_PALM: |
| 1258 | case CHIP_SUMO: | 1264 | case CHIP_SUMO: |
| 1259 | case CHIP_SUMO2: | 1265 | case CHIP_SUMO2: |
| 1260 | case CHIP_BARTS: | ||
| 1261 | case CHIP_TURKS: | ||
| 1262 | case CHIP_CAICOS: | ||
| 1263 | case CHIP_ARUBA: | 1266 | case CHIP_ARUBA: |
| 1264 | case CHIP_TAHITI: | 1267 | case CHIP_TAHITI: |
| 1265 | case CHIP_PITCAIRN: | 1268 | case CHIP_PITCAIRN: |
| 1266 | case CHIP_VERDE: | 1269 | case CHIP_VERDE: |
| 1267 | case CHIP_OLAND: | 1270 | case CHIP_OLAND: |
| 1268 | case CHIP_HAINAN: | 1271 | case CHIP_HAINAN: |
| 1272 | case CHIP_BONAIRE: | ||
| 1273 | case CHIP_KABINI: | ||
| 1274 | case CHIP_KAVERI: | ||
| 1275 | case CHIP_HAWAII: | ||
| 1269 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1276 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
| 1270 | if (!rdev->rlc_fw) | 1277 | if (!rdev->rlc_fw) |
| 1271 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1278 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| @@ -1290,6 +1297,18 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1290 | return radeon_pm_init_old(rdev); | 1297 | return radeon_pm_init_old(rdev); |
| 1291 | } | 1298 | } |
| 1292 | 1299 | ||
| 1300 | int radeon_pm_late_init(struct radeon_device *rdev) | ||
| 1301 | { | ||
| 1302 | int ret = 0; | ||
| 1303 | |||
| 1304 | if (rdev->pm.pm_method == PM_METHOD_DPM) { | ||
| 1305 | mutex_lock(&rdev->pm.mutex); | ||
| 1306 | ret = radeon_dpm_late_enable(rdev); | ||
| 1307 | mutex_unlock(&rdev->pm.mutex); | ||
| 1308 | } | ||
| 1309 | return ret; | ||
| 1310 | } | ||
| 1311 | |||
| 1293 | static void radeon_pm_fini_old(struct radeon_device *rdev) | 1312 | static void radeon_pm_fini_old(struct radeon_device *rdev) |
| 1294 | { | 1313 | { |
| 1295 | if (rdev->pm.num_power_states > 1) { | 1314 | if (rdev->pm.num_power_states > 1) { |
| @@ -1420,6 +1439,9 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) | |||
| 1420 | struct drm_crtc *crtc; | 1439 | struct drm_crtc *crtc; |
| 1421 | struct radeon_crtc *radeon_crtc; | 1440 | struct radeon_crtc *radeon_crtc; |
| 1422 | 1441 | ||
| 1442 | if (!rdev->pm.dpm_enabled) | ||
| 1443 | return; | ||
| 1444 | |||
| 1423 | mutex_lock(&rdev->pm.mutex); | 1445 | mutex_lock(&rdev->pm.mutex); |
| 1424 | 1446 | ||
| 1425 | /* update active crtc counts */ | 1447 | /* update active crtc counts */ |
| @@ -1464,7 +1486,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) | |||
| 1464 | */ | 1486 | */ |
| 1465 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { | 1487 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
| 1466 | if (rdev->pm.active_crtcs & (1 << crtc)) { | 1488 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
| 1467 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL); | 1489 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); |
| 1468 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && | 1490 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
| 1469 | !(vbl_status & DRM_SCANOUTPOS_INVBL)) | 1491 | !(vbl_status & DRM_SCANOUTPOS_INVBL)) |
| 1470 | in_vbl = false; | 1492 | in_vbl = false; |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 9214403ae173..1b783f0e6d3a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -332,36 +332,6 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, | |||
| 332 | } | 332 | } |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev, | ||
| 336 | struct radeon_ring *ring) | ||
| 337 | { | ||
| 338 | u32 rptr; | ||
| 339 | |||
| 340 | if (rdev->wb.enabled) | ||
| 341 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | ||
| 342 | else | ||
| 343 | rptr = RREG32(ring->rptr_reg); | ||
| 344 | |||
| 345 | return rptr; | ||
| 346 | } | ||
| 347 | |||
| 348 | u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev, | ||
| 349 | struct radeon_ring *ring) | ||
| 350 | { | ||
| 351 | u32 wptr; | ||
| 352 | |||
| 353 | wptr = RREG32(ring->wptr_reg); | ||
| 354 | |||
| 355 | return wptr; | ||
| 356 | } | ||
| 357 | |||
| 358 | void radeon_ring_generic_set_wptr(struct radeon_device *rdev, | ||
| 359 | struct radeon_ring *ring) | ||
| 360 | { | ||
| 361 | WREG32(ring->wptr_reg, ring->wptr); | ||
| 362 | (void)RREG32(ring->wptr_reg); | ||
| 363 | } | ||
| 364 | |||
| 365 | /** | 335 | /** |
| 366 | * radeon_ring_free_size - update the free size | 336 | * radeon_ring_free_size - update the free size |
| 367 | * | 337 | * |
| @@ -463,7 +433,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 463 | while (ring->wptr & ring->align_mask) { | 433 | while (ring->wptr & ring->align_mask) { |
| 464 | radeon_ring_write(ring, ring->nop); | 434 | radeon_ring_write(ring, ring->nop); |
| 465 | } | 435 | } |
| 466 | DRM_MEMORYBARRIER(); | 436 | mb(); |
| 467 | radeon_ring_set_wptr(rdev, ring); | 437 | radeon_ring_set_wptr(rdev, ring); |
| 468 | } | 438 | } |
| 469 | 439 | ||
| @@ -689,22 +659,18 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 689 | * @ring: radeon_ring structure holding ring information | 659 | * @ring: radeon_ring structure holding ring information |
| 690 | * @ring_size: size of the ring | 660 | * @ring_size: size of the ring |
| 691 | * @rptr_offs: offset of the rptr writeback location in the WB buffer | 661 | * @rptr_offs: offset of the rptr writeback location in the WB buffer |
| 692 | * @rptr_reg: MMIO offset of the rptr register | ||
| 693 | * @wptr_reg: MMIO offset of the wptr register | ||
| 694 | * @nop: nop packet for this ring | 662 | * @nop: nop packet for this ring |
| 695 | * | 663 | * |
| 696 | * Initialize the driver information for the selected ring (all asics). | 664 | * Initialize the driver information for the selected ring (all asics). |
| 697 | * Returns 0 on success, error on failure. | 665 | * Returns 0 on success, error on failure. |
| 698 | */ | 666 | */ |
| 699 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, | 667 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
| 700 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop) | 668 | unsigned rptr_offs, u32 nop) |
| 701 | { | 669 | { |
| 702 | int r; | 670 | int r; |
| 703 | 671 | ||
| 704 | ring->ring_size = ring_size; | 672 | ring->ring_size = ring_size; |
| 705 | ring->rptr_offs = rptr_offs; | 673 | ring->rptr_offs = rptr_offs; |
| 706 | ring->rptr_reg = rptr_reg; | ||
| 707 | ring->wptr_reg = wptr_reg; | ||
| 708 | ring->nop = nop; | 674 | ring->nop = nop; |
| 709 | /* Allocate ring buffer */ | 675 | /* Allocate ring buffer */ |
| 710 | if (ring->ring_obj == NULL) { | 676 | if (ring->ring_obj == NULL) { |
| @@ -790,34 +756,54 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
| 790 | struct radeon_device *rdev = dev->dev_private; | 756 | struct radeon_device *rdev = dev->dev_private; |
| 791 | int ridx = *(int*)node->info_ent->data; | 757 | int ridx = *(int*)node->info_ent->data; |
| 792 | struct radeon_ring *ring = &rdev->ring[ridx]; | 758 | struct radeon_ring *ring = &rdev->ring[ridx]; |
| 759 | |||
| 760 | uint32_t rptr, wptr, rptr_next; | ||
| 793 | unsigned count, i, j; | 761 | unsigned count, i, j; |
| 794 | u32 tmp; | ||
| 795 | 762 | ||
| 796 | radeon_ring_free_size(rdev, ring); | 763 | radeon_ring_free_size(rdev, ring); |
| 797 | count = (ring->ring_size / 4) - ring->ring_free_dw; | 764 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
| 798 | tmp = radeon_ring_get_wptr(rdev, ring); | 765 | |
| 799 | seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); | 766 | wptr = radeon_ring_get_wptr(rdev, ring); |
| 800 | tmp = radeon_ring_get_rptr(rdev, ring); | 767 | seq_printf(m, "wptr: 0x%08x [%5d]\n", |
| 801 | seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); | 768 | wptr, wptr); |
| 769 | |||
| 770 | rptr = radeon_ring_get_rptr(rdev, ring); | ||
| 771 | seq_printf(m, "rptr: 0x%08x [%5d]\n", | ||
| 772 | rptr, rptr); | ||
| 773 | |||
| 802 | if (ring->rptr_save_reg) { | 774 | if (ring->rptr_save_reg) { |
| 803 | seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, | 775 | rptr_next = RREG32(ring->rptr_save_reg); |
| 804 | RREG32(ring->rptr_save_reg)); | 776 | seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n", |
| 805 | } | 777 | ring->rptr_save_reg, rptr_next, rptr_next); |
| 806 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); | 778 | } else |
| 807 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); | 779 | rptr_next = ~0; |
| 808 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); | 780 | |
| 809 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); | 781 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", |
| 782 | ring->wptr, ring->wptr); | ||
| 783 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", | ||
| 784 | ring->rptr, ring->rptr); | ||
| 785 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", | ||
| 786 | ring->last_semaphore_signal_addr); | ||
| 787 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", | ||
| 788 | ring->last_semaphore_wait_addr); | ||
| 810 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 789 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
| 811 | seq_printf(m, "%u dwords in ring\n", count); | 790 | seq_printf(m, "%u dwords in ring\n", count); |
| 791 | |||
| 792 | if (!ring->ready) | ||
| 793 | return 0; | ||
| 794 | |||
| 812 | /* print 8 dw before current rptr as often it's the last executed | 795 | /* print 8 dw before current rptr as often it's the last executed |
| 813 | * packet that is the root issue | 796 | * packet that is the root issue |
| 814 | */ | 797 | */ |
| 815 | i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; | 798 | i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
| 816 | if (ring->ready) { | 799 | for (j = 0; j <= (count + 32); j++) { |
| 817 | for (j = 0; j <= (count + 32); j++) { | 800 | seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); |
| 818 | seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); | 801 | if (rptr == i) |
| 819 | i = (i + 1) & ring->ptr_mask; | 802 | seq_puts(m, " *"); |
| 820 | } | 803 | if (rptr_next == i) |
| 804 | seq_puts(m, " #"); | ||
| 805 | seq_puts(m, "\n"); | ||
| 806 | i = (i + 1) & ring->ptr_mask; | ||
| 821 | } | 807 | } |
| 822 | return 0; | 808 | return 0; |
| 823 | } | 809 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index f0bac68254b7..c0625805cdd7 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
| @@ -402,13 +402,15 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, | |||
| 402 | 402 | ||
| 403 | spin_lock(&sa_manager->wq.lock); | 403 | spin_lock(&sa_manager->wq.lock); |
| 404 | list_for_each_entry(i, &sa_manager->olist, olist) { | 404 | list_for_each_entry(i, &sa_manager->olist, olist) { |
| 405 | uint64_t soffset = i->soffset + sa_manager->gpu_addr; | ||
| 406 | uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; | ||
| 405 | if (&i->olist == sa_manager->hole) { | 407 | if (&i->olist == sa_manager->hole) { |
| 406 | seq_printf(m, ">"); | 408 | seq_printf(m, ">"); |
| 407 | } else { | 409 | } else { |
| 408 | seq_printf(m, " "); | 410 | seq_printf(m, " "); |
| 409 | } | 411 | } |
| 410 | seq_printf(m, "[0x%08x 0x%08x] size %8d", | 412 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", |
| 411 | i->soffset, i->eoffset, i->eoffset - i->soffset); | 413 | soffset, eoffset, eoffset - soffset); |
| 412 | if (i->fence) { | 414 | if (i->fence) { |
| 413 | seq_printf(m, " protected by 0x%016llx on ring %d", | 415 | seq_printf(m, " protected by 0x%016llx on ring %d", |
| 414 | i->fence->seq, i->fence->ring); | 416 | i->fence->seq, i->fence->ring); |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 4d20910899d4..956ab7f14e16 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
| @@ -1810,7 +1810,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, | |||
| 1810 | } | 1810 | } |
| 1811 | if (!buf) { | 1811 | if (!buf) { |
| 1812 | DRM_DEBUG("EAGAIN\n"); | 1812 | DRM_DEBUG("EAGAIN\n"); |
| 1813 | if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) | 1813 | if (copy_to_user(tex->image, image, sizeof(*image))) |
| 1814 | return -EFAULT; | 1814 | return -EFAULT; |
| 1815 | return -EAGAIN; | 1815 | return -EAGAIN; |
| 1816 | } | 1816 | } |
| @@ -1823,7 +1823,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, | |||
| 1823 | 1823 | ||
| 1824 | #define RADEON_COPY_MT(_buf, _data, _width) \ | 1824 | #define RADEON_COPY_MT(_buf, _data, _width) \ |
| 1825 | do { \ | 1825 | do { \ |
| 1826 | if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ | 1826 | if (copy_from_user(_buf, _data, (_width))) {\ |
| 1827 | DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ | 1827 | DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ |
| 1828 | return -EFAULT; \ | 1828 | return -EFAULT; \ |
| 1829 | } \ | 1829 | } \ |
| @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * | |||
| 2168 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) | 2168 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) |
| 2169 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; | 2169 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; |
| 2170 | 2170 | ||
| 2171 | if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, | 2171 | if (copy_from_user(&depth_boxes, clear->depth_boxes, |
| 2172 | sarea_priv->nbox * sizeof(depth_boxes[0]))) | 2172 | sarea_priv->nbox * sizeof(depth_boxes[0]))) |
| 2173 | return -EFAULT; | 2173 | return -EFAULT; |
| 2174 | 2174 | ||
| @@ -2436,7 +2436,7 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file | |||
| 2436 | return -EINVAL; | 2436 | return -EINVAL; |
| 2437 | } | 2437 | } |
| 2438 | 2438 | ||
| 2439 | if (DRM_COPY_FROM_USER(&image, | 2439 | if (copy_from_user(&image, |
| 2440 | (drm_radeon_tex_image_t __user *) tex->image, | 2440 | (drm_radeon_tex_image_t __user *) tex->image, |
| 2441 | sizeof(image))) | 2441 | sizeof(image))) |
| 2442 | return -EFAULT; | 2442 | return -EFAULT; |
| @@ -2460,7 +2460,7 @@ static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file | |||
| 2460 | 2460 | ||
| 2461 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2461 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
| 2462 | 2462 | ||
| 2463 | if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) | 2463 | if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32))) |
| 2464 | return -EFAULT; | 2464 | return -EFAULT; |
| 2465 | 2465 | ||
| 2466 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2466 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
| @@ -2585,13 +2585,13 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
| 2585 | drm_radeon_prim_t prim; | 2585 | drm_radeon_prim_t prim; |
| 2586 | drm_radeon_tcl_prim_t tclprim; | 2586 | drm_radeon_tcl_prim_t tclprim; |
| 2587 | 2587 | ||
| 2588 | if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) | 2588 | if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim))) |
| 2589 | return -EFAULT; | 2589 | return -EFAULT; |
| 2590 | 2590 | ||
| 2591 | if (prim.stateidx != laststate) { | 2591 | if (prim.stateidx != laststate) { |
| 2592 | drm_radeon_state_t state; | 2592 | drm_radeon_state_t state; |
| 2593 | 2593 | ||
| 2594 | if (DRM_COPY_FROM_USER(&state, | 2594 | if (copy_from_user(&state, |
| 2595 | &vertex->state[prim.stateidx], | 2595 | &vertex->state[prim.stateidx], |
| 2596 | sizeof(state))) | 2596 | sizeof(state))) |
| 2597 | return -EFAULT; | 2597 | return -EFAULT; |
| @@ -2799,7 +2799,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev, | |||
| 2799 | 2799 | ||
| 2800 | do { | 2800 | do { |
| 2801 | if (i < cmdbuf->nbox) { | 2801 | if (i < cmdbuf->nbox) { |
| 2802 | if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) | 2802 | if (copy_from_user(&box, &boxes[i], sizeof(box))) |
| 2803 | return -EFAULT; | 2803 | return -EFAULT; |
| 2804 | /* FIXME The second and subsequent times round | 2804 | /* FIXME The second and subsequent times round |
| 2805 | * this loop, send a WAIT_UNTIL_3D_IDLE before | 2805 | * this loop, send a WAIT_UNTIL_3D_IDLE before |
| @@ -3116,7 +3116,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
| 3116 | return -EINVAL; | 3116 | return -EINVAL; |
| 3117 | } | 3117 | } |
| 3118 | 3118 | ||
| 3119 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | 3119 | if (copy_to_user(param->value, &value, sizeof(int))) { |
| 3120 | DRM_ERROR("copy_to_user\n"); | 3120 | DRM_ERROR("copy_to_user\n"); |
| 3121 | return -EFAULT; | 3121 | return -EFAULT; |
| 3122 | } | 3122 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index 0473257d4078..f749f2c3bbdb 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h | |||
| @@ -106,42 +106,45 @@ TRACE_EVENT(radeon_vm_set_page, | |||
| 106 | 106 | ||
| 107 | DECLARE_EVENT_CLASS(radeon_fence_request, | 107 | DECLARE_EVENT_CLASS(radeon_fence_request, |
| 108 | 108 | ||
| 109 | TP_PROTO(struct drm_device *dev, u32 seqno), | 109 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 110 | 110 | ||
| 111 | TP_ARGS(dev, seqno), | 111 | TP_ARGS(dev, ring, seqno), |
| 112 | 112 | ||
| 113 | TP_STRUCT__entry( | 113 | TP_STRUCT__entry( |
| 114 | __field(u32, dev) | 114 | __field(u32, dev) |
| 115 | __field(int, ring) | ||
| 115 | __field(u32, seqno) | 116 | __field(u32, seqno) |
| 116 | ), | 117 | ), |
| 117 | 118 | ||
| 118 | TP_fast_assign( | 119 | TP_fast_assign( |
| 119 | __entry->dev = dev->primary->index; | 120 | __entry->dev = dev->primary->index; |
| 121 | __entry->ring = ring; | ||
| 120 | __entry->seqno = seqno; | 122 | __entry->seqno = seqno; |
| 121 | ), | 123 | ), |
| 122 | 124 | ||
| 123 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 125 | TP_printk("dev=%u, ring=%d, seqno=%u", |
| 126 | __entry->dev, __entry->ring, __entry->seqno) | ||
| 124 | ); | 127 | ); |
| 125 | 128 | ||
| 126 | DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, | 129 | DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, |
| 127 | 130 | ||
| 128 | TP_PROTO(struct drm_device *dev, u32 seqno), | 131 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 129 | 132 | ||
| 130 | TP_ARGS(dev, seqno) | 133 | TP_ARGS(dev, ring, seqno) |
| 131 | ); | 134 | ); |
| 132 | 135 | ||
| 133 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, | 136 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, |
| 134 | 137 | ||
| 135 | TP_PROTO(struct drm_device *dev, u32 seqno), | 138 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 136 | 139 | ||
| 137 | TP_ARGS(dev, seqno) | 140 | TP_ARGS(dev, ring, seqno) |
| 138 | ); | 141 | ); |
| 139 | 142 | ||
| 140 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, | 143 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, |
| 141 | 144 | ||
| 142 | TP_PROTO(struct drm_device *dev, u32 seqno), | 145 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), |
| 143 | 146 | ||
| 144 | TP_ARGS(dev, seqno) | 147 | TP_ARGS(dev, ring, seqno) |
| 145 | ); | 148 | ); |
| 146 | 149 | ||
| 147 | DECLARE_EVENT_CLASS(radeon_semaphore_request, | 150 | DECLARE_EVENT_CLASS(radeon_semaphore_request, |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 71245d6f34a2..77f5b0c3edb8 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -39,12 +39,14 @@ | |||
| 39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
| 40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 41 | #include <linux/swiotlb.h> | 41 | #include <linux/swiotlb.h> |
| 42 | #include <linux/debugfs.h> | ||
| 42 | #include "radeon_reg.h" | 43 | #include "radeon_reg.h" |
| 43 | #include "radeon.h" | 44 | #include "radeon.h" |
| 44 | 45 | ||
| 45 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | 46 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
| 46 | 47 | ||
| 47 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); | 48 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
| 49 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); | ||
| 48 | 50 | ||
| 49 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) | 51 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
| 50 | { | 52 | { |
| @@ -142,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 142 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | 144 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
| 143 | #if __OS_HAS_AGP | 145 | #if __OS_HAS_AGP |
| 144 | if (rdev->flags & RADEON_IS_AGP) { | 146 | if (rdev->flags & RADEON_IS_AGP) { |
| 145 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | 147 | if (!rdev->ddev->agp) { |
| 146 | DRM_ERROR("AGP is not enabled for memory type %u\n", | 148 | DRM_ERROR("AGP is not enabled for memory type %u\n", |
| 147 | (unsigned)type); | 149 | (unsigned)type); |
| 148 | return -EINVAL; | 150 | return -EINVAL; |
| @@ -753,6 +755,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
| 753 | 755 | ||
| 754 | if (!rdev->mman.initialized) | 756 | if (!rdev->mman.initialized) |
| 755 | return; | 757 | return; |
| 758 | radeon_ttm_debugfs_fini(rdev); | ||
| 756 | if (rdev->stollen_vga_memory) { | 759 | if (rdev->stollen_vga_memory) { |
| 757 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); | 760 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
| 758 | if (r == 0) { | 761 | if (r == 0) { |
| @@ -832,16 +835,15 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 832 | return 0; | 835 | return 0; |
| 833 | } | 836 | } |
| 834 | 837 | ||
| 835 | |||
| 836 | #define RADEON_DEBUGFS_MEM_TYPES 2 | ||
| 837 | |||
| 838 | #if defined(CONFIG_DEBUG_FS) | 838 | #if defined(CONFIG_DEBUG_FS) |
| 839 | |||
| 839 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | 840 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
| 840 | { | 841 | { |
| 841 | struct drm_info_node *node = (struct drm_info_node *)m->private; | 842 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 842 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | 843 | unsigned ttm_pl = *(int *)node->info_ent->data; |
| 843 | struct drm_device *dev = node->minor->dev; | 844 | struct drm_device *dev = node->minor->dev; |
| 844 | struct radeon_device *rdev = dev->dev_private; | 845 | struct radeon_device *rdev = dev->dev_private; |
| 846 | struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; | ||
| 845 | int ret; | 847 | int ret; |
| 846 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | 848 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
| 847 | 849 | ||
| @@ -850,46 +852,169 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) | |||
| 850 | spin_unlock(&glob->lru_lock); | 852 | spin_unlock(&glob->lru_lock); |
| 851 | return ret; | 853 | return ret; |
| 852 | } | 854 | } |
| 855 | |||
| 856 | static int ttm_pl_vram = TTM_PL_VRAM; | ||
| 857 | static int ttm_pl_tt = TTM_PL_TT; | ||
| 858 | |||
| 859 | static struct drm_info_list radeon_ttm_debugfs_list[] = { | ||
| 860 | {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, | ||
| 861 | {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, | ||
| 862 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | ||
| 863 | #ifdef CONFIG_SWIOTLB | ||
| 864 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | ||
| 853 | #endif | 865 | #endif |
| 866 | }; | ||
| 854 | 867 | ||
| 855 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | 868 | static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) |
| 856 | { | 869 | { |
| 857 | #if defined(CONFIG_DEBUG_FS) | 870 | struct radeon_device *rdev = inode->i_private; |
| 858 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; | 871 | i_size_write(inode, rdev->mc.mc_vram_size); |
| 859 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; | 872 | filep->private_data = inode->i_private; |
| 860 | unsigned i; | 873 | return 0; |
| 874 | } | ||
| 861 | 875 | ||
| 862 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { | 876 | static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf, |
| 863 | if (i == 0) | 877 | size_t size, loff_t *pos) |
| 864 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | 878 | { |
| 865 | else | 879 | struct radeon_device *rdev = f->private_data; |
| 866 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | 880 | ssize_t result = 0; |
| 867 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 881 | int r; |
| 868 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | ||
| 869 | radeon_mem_types_list[i].driver_features = 0; | ||
| 870 | if (i == 0) | ||
| 871 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; | ||
| 872 | else | ||
| 873 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; | ||
| 874 | 882 | ||
| 883 | if (size & 0x3 || *pos & 0x3) | ||
| 884 | return -EINVAL; | ||
| 885 | |||
| 886 | while (size) { | ||
| 887 | unsigned long flags; | ||
| 888 | uint32_t value; | ||
| 889 | |||
| 890 | if (*pos >= rdev->mc.mc_vram_size) | ||
| 891 | return result; | ||
| 892 | |||
| 893 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); | ||
| 894 | WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000); | ||
| 895 | if (rdev->family >= CHIP_CEDAR) | ||
| 896 | WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31); | ||
| 897 | value = RREG32(RADEON_MM_DATA); | ||
| 898 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); | ||
| 899 | |||
| 900 | r = put_user(value, (uint32_t *)buf); | ||
| 901 | if (r) | ||
| 902 | return r; | ||
| 903 | |||
| 904 | result += 4; | ||
| 905 | buf += 4; | ||
| 906 | *pos += 4; | ||
| 907 | size -= 4; | ||
| 875 | } | 908 | } |
| 876 | /* Add ttm page pool to debugfs */ | 909 | |
| 877 | sprintf(radeon_mem_types_names[i], "ttm_page_pool"); | 910 | return result; |
| 878 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 911 | } |
| 879 | radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; | 912 | |
| 880 | radeon_mem_types_list[i].driver_features = 0; | 913 | static const struct file_operations radeon_ttm_vram_fops = { |
| 881 | radeon_mem_types_list[i++].data = NULL; | 914 | .owner = THIS_MODULE, |
| 882 | #ifdef CONFIG_SWIOTLB | 915 | .open = radeon_ttm_vram_open, |
| 883 | if (swiotlb_nr_tbl()) { | 916 | .read = radeon_ttm_vram_read, |
| 884 | sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); | 917 | .llseek = default_llseek |
| 885 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | 918 | }; |
| 886 | radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; | 919 | |
| 887 | radeon_mem_types_list[i].driver_features = 0; | 920 | static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep) |
| 888 | radeon_mem_types_list[i++].data = NULL; | 921 | { |
| 922 | struct radeon_device *rdev = inode->i_private; | ||
| 923 | i_size_write(inode, rdev->mc.gtt_size); | ||
| 924 | filep->private_data = inode->i_private; | ||
| 925 | return 0; | ||
| 926 | } | ||
| 927 | |||
| 928 | static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | ||
| 929 | size_t size, loff_t *pos) | ||
| 930 | { | ||
| 931 | struct radeon_device *rdev = f->private_data; | ||
| 932 | ssize_t result = 0; | ||
| 933 | int r; | ||
| 934 | |||
| 935 | while (size) { | ||
| 936 | loff_t p = *pos / PAGE_SIZE; | ||
| 937 | unsigned off = *pos & ~PAGE_MASK; | ||
| 938 | ssize_t cur_size = min(size, PAGE_SIZE - off); | ||
| 939 | struct page *page; | ||
| 940 | void *ptr; | ||
| 941 | |||
| 942 | if (p >= rdev->gart.num_cpu_pages) | ||
| 943 | return result; | ||
| 944 | |||
| 945 | page = rdev->gart.pages[p]; | ||
| 946 | if (page) { | ||
| 947 | ptr = kmap(page); | ||
| 948 | ptr += off; | ||
| 949 | |||
| 950 | r = copy_to_user(buf, ptr, cur_size); | ||
| 951 | kunmap(rdev->gart.pages[p]); | ||
| 952 | } else | ||
| 953 | r = clear_user(buf, cur_size); | ||
| 954 | |||
| 955 | if (r) | ||
| 956 | return -EFAULT; | ||
| 957 | |||
| 958 | result += cur_size; | ||
| 959 | buf += cur_size; | ||
| 960 | *pos += cur_size; | ||
| 961 | size -= cur_size; | ||
| 889 | } | 962 | } |
| 963 | |||
| 964 | return result; | ||
| 965 | } | ||
| 966 | |||
| 967 | static const struct file_operations radeon_ttm_gtt_fops = { | ||
| 968 | .owner = THIS_MODULE, | ||
| 969 | .open = radeon_ttm_gtt_open, | ||
| 970 | .read = radeon_ttm_gtt_read, | ||
| 971 | .llseek = default_llseek | ||
| 972 | }; | ||
| 973 | |||
| 890 | #endif | 974 | #endif |
| 891 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); | ||
| 892 | 975 | ||
| 976 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | ||
| 977 | { | ||
| 978 | #if defined(CONFIG_DEBUG_FS) | ||
| 979 | unsigned count; | ||
| 980 | |||
| 981 | struct drm_minor *minor = rdev->ddev->primary; | ||
| 982 | struct dentry *ent, *root = minor->debugfs_root; | ||
| 983 | |||
| 984 | ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root, | ||
| 985 | rdev, &radeon_ttm_vram_fops); | ||
| 986 | if (IS_ERR(ent)) | ||
| 987 | return PTR_ERR(ent); | ||
| 988 | rdev->mman.vram = ent; | ||
| 989 | |||
| 990 | ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root, | ||
| 991 | rdev, &radeon_ttm_gtt_fops); | ||
| 992 | if (IS_ERR(ent)) | ||
| 993 | return PTR_ERR(ent); | ||
| 994 | rdev->mman.gtt = ent; | ||
| 995 | |||
| 996 | count = ARRAY_SIZE(radeon_ttm_debugfs_list); | ||
| 997 | |||
| 998 | #ifdef CONFIG_SWIOTLB | ||
| 999 | if (!swiotlb_nr_tbl()) | ||
| 1000 | --count; | ||
| 893 | #endif | 1001 | #endif |
| 1002 | |||
| 1003 | return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); | ||
| 1004 | #else | ||
| 1005 | |||
| 894 | return 0; | 1006 | return 0; |
| 1007 | #endif | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev) | ||
| 1011 | { | ||
| 1012 | #if defined(CONFIG_DEBUG_FS) | ||
| 1013 | |||
| 1014 | debugfs_remove(rdev->mman.vram); | ||
| 1015 | rdev->mman.vram = NULL; | ||
| 1016 | |||
| 1017 | debugfs_remove(rdev->mman.gtt); | ||
| 1018 | rdev->mman.gtt = NULL; | ||
| 1019 | #endif | ||
| 895 | } | 1020 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index b9c0529b4a2e..6781fee1eaad 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 91 | case CHIP_VERDE: | 91 | case CHIP_VERDE: |
| 92 | case CHIP_PITCAIRN: | 92 | case CHIP_PITCAIRN: |
| 93 | case CHIP_ARUBA: | 93 | case CHIP_ARUBA: |
| 94 | case CHIP_OLAND: | ||
| 94 | fw_name = FIRMWARE_TAHITI; | 95 | fw_name = FIRMWARE_TAHITI; |
| 95 | break; | 96 | break; |
| 96 | 97 | ||
| @@ -778,6 +779,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work) | |||
| 778 | 779 | ||
| 779 | if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { | 780 | if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { |
| 780 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | 781 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
| 782 | radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, | ||
| 783 | &rdev->pm.dpm.hd); | ||
| 781 | radeon_dpm_enable_uvd(rdev, false); | 784 | radeon_dpm_enable_uvd(rdev, false); |
| 782 | } else { | 785 | } else { |
| 783 | radeon_set_uvd_clocks(rdev, 0, 0); | 786 | radeon_set_uvd_clocks(rdev, 0, 0); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9566b5940a5a..b5c2369cda2f 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -474,6 +474,8 @@ int rs400_resume(struct radeon_device *rdev) | |||
| 474 | /* Initialize surface registers */ | 474 | /* Initialize surface registers */ |
| 475 | radeon_surface_init(rdev); | 475 | radeon_surface_init(rdev); |
| 476 | 476 | ||
| 477 | radeon_pm_resume(rdev); | ||
| 478 | |||
| 477 | rdev->accel_working = true; | 479 | rdev->accel_working = true; |
| 478 | r = rs400_startup(rdev); | 480 | r = rs400_startup(rdev); |
| 479 | if (r) { | 481 | if (r) { |
| @@ -484,6 +486,7 @@ int rs400_resume(struct radeon_device *rdev) | |||
| 484 | 486 | ||
| 485 | int rs400_suspend(struct radeon_device *rdev) | 487 | int rs400_suspend(struct radeon_device *rdev) |
| 486 | { | 488 | { |
| 489 | radeon_pm_suspend(rdev); | ||
| 487 | r100_cp_disable(rdev); | 490 | r100_cp_disable(rdev); |
| 488 | radeon_wb_disable(rdev); | 491 | radeon_wb_disable(rdev); |
| 489 | r100_irq_disable(rdev); | 492 | r100_irq_disable(rdev); |
| @@ -493,6 +496,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
| 493 | 496 | ||
| 494 | void rs400_fini(struct radeon_device *rdev) | 497 | void rs400_fini(struct radeon_device *rdev) |
| 495 | { | 498 | { |
| 499 | radeon_pm_fini(rdev); | ||
| 496 | r100_cp_fini(rdev); | 500 | r100_cp_fini(rdev); |
| 497 | radeon_wb_fini(rdev); | 501 | radeon_wb_fini(rdev); |
| 498 | radeon_ib_pool_fini(rdev); | 502 | radeon_ib_pool_fini(rdev); |
| @@ -560,6 +564,9 @@ int rs400_init(struct radeon_device *rdev) | |||
| 560 | return r; | 564 | return r; |
| 561 | r300_set_reg_safe(rdev); | 565 | r300_set_reg_safe(rdev); |
| 562 | 566 | ||
| 567 | /* Initialize power management */ | ||
| 568 | radeon_pm_init(rdev); | ||
| 569 | |||
| 563 | rdev->accel_working = true; | 570 | rdev->accel_working = true; |
| 564 | r = rs400_startup(rdev); | 571 | r = rs400_startup(rdev); |
| 565 | if (r) { | 572 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 76cc8d3aafec..fdcde7693032 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -1048,6 +1048,8 @@ int rs600_resume(struct radeon_device *rdev) | |||
| 1048 | /* Initialize surface registers */ | 1048 | /* Initialize surface registers */ |
| 1049 | radeon_surface_init(rdev); | 1049 | radeon_surface_init(rdev); |
| 1050 | 1050 | ||
| 1051 | radeon_pm_resume(rdev); | ||
| 1052 | |||
| 1051 | rdev->accel_working = true; | 1053 | rdev->accel_working = true; |
| 1052 | r = rs600_startup(rdev); | 1054 | r = rs600_startup(rdev); |
| 1053 | if (r) { | 1055 | if (r) { |
| @@ -1058,6 +1060,7 @@ int rs600_resume(struct radeon_device *rdev) | |||
| 1058 | 1060 | ||
| 1059 | int rs600_suspend(struct radeon_device *rdev) | 1061 | int rs600_suspend(struct radeon_device *rdev) |
| 1060 | { | 1062 | { |
| 1063 | radeon_pm_suspend(rdev); | ||
| 1061 | r600_audio_fini(rdev); | 1064 | r600_audio_fini(rdev); |
| 1062 | r100_cp_disable(rdev); | 1065 | r100_cp_disable(rdev); |
| 1063 | radeon_wb_disable(rdev); | 1066 | radeon_wb_disable(rdev); |
| @@ -1068,6 +1071,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
| 1068 | 1071 | ||
| 1069 | void rs600_fini(struct radeon_device *rdev) | 1072 | void rs600_fini(struct radeon_device *rdev) |
| 1070 | { | 1073 | { |
| 1074 | radeon_pm_fini(rdev); | ||
| 1071 | r600_audio_fini(rdev); | 1075 | r600_audio_fini(rdev); |
| 1072 | r100_cp_fini(rdev); | 1076 | r100_cp_fini(rdev); |
| 1073 | radeon_wb_fini(rdev); | 1077 | radeon_wb_fini(rdev); |
| @@ -1136,6 +1140,9 @@ int rs600_init(struct radeon_device *rdev) | |||
| 1136 | return r; | 1140 | return r; |
| 1137 | rs600_set_safe_registers(rdev); | 1141 | rs600_set_safe_registers(rdev); |
| 1138 | 1142 | ||
| 1143 | /* Initialize power management */ | ||
| 1144 | radeon_pm_init(rdev); | ||
| 1145 | |||
| 1139 | rdev->accel_working = true; | 1146 | rdev->accel_working = true; |
| 1140 | r = rs600_startup(rdev); | 1147 | r = rs600_startup(rdev); |
| 1141 | if (r) { | 1148 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index e7dab069cccf..35950738bd5e 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -756,6 +756,8 @@ int rs690_resume(struct radeon_device *rdev) | |||
| 756 | /* Initialize surface registers */ | 756 | /* Initialize surface registers */ |
| 757 | radeon_surface_init(rdev); | 757 | radeon_surface_init(rdev); |
| 758 | 758 | ||
| 759 | radeon_pm_resume(rdev); | ||
| 760 | |||
| 759 | rdev->accel_working = true; | 761 | rdev->accel_working = true; |
| 760 | r = rs690_startup(rdev); | 762 | r = rs690_startup(rdev); |
| 761 | if (r) { | 763 | if (r) { |
| @@ -766,6 +768,7 @@ int rs690_resume(struct radeon_device *rdev) | |||
| 766 | 768 | ||
| 767 | int rs690_suspend(struct radeon_device *rdev) | 769 | int rs690_suspend(struct radeon_device *rdev) |
| 768 | { | 770 | { |
| 771 | radeon_pm_suspend(rdev); | ||
| 769 | r600_audio_fini(rdev); | 772 | r600_audio_fini(rdev); |
| 770 | r100_cp_disable(rdev); | 773 | r100_cp_disable(rdev); |
| 771 | radeon_wb_disable(rdev); | 774 | radeon_wb_disable(rdev); |
| @@ -776,6 +779,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
| 776 | 779 | ||
| 777 | void rs690_fini(struct radeon_device *rdev) | 780 | void rs690_fini(struct radeon_device *rdev) |
| 778 | { | 781 | { |
| 782 | radeon_pm_fini(rdev); | ||
| 779 | r600_audio_fini(rdev); | 783 | r600_audio_fini(rdev); |
| 780 | r100_cp_fini(rdev); | 784 | r100_cp_fini(rdev); |
| 781 | radeon_wb_fini(rdev); | 785 | radeon_wb_fini(rdev); |
| @@ -845,6 +849,9 @@ int rs690_init(struct radeon_device *rdev) | |||
| 845 | return r; | 849 | return r; |
| 846 | rs600_set_safe_registers(rdev); | 850 | rs600_set_safe_registers(rdev); |
| 847 | 851 | ||
| 852 | /* Initialize power management */ | ||
| 853 | radeon_pm_init(rdev); | ||
| 854 | |||
| 848 | rdev->accel_working = true; | 855 | rdev->accel_working = true; |
| 849 | r = rs690_startup(rdev); | 856 | r = rs690_startup(rdev); |
| 850 | if (r) { | 857 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 6af8505cf4d2..8512085b0aef 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c | |||
| @@ -623,14 +623,6 @@ int rs780_dpm_enable(struct radeon_device *rdev) | |||
| 623 | if (pi->gfx_clock_gating) | 623 | if (pi->gfx_clock_gating) |
| 624 | r600_gfx_clockgating_enable(rdev, true); | 624 | r600_gfx_clockgating_enable(rdev, true); |
| 625 | 625 | ||
| 626 | if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) { | ||
| 627 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
| 628 | if (ret) | ||
| 629 | return ret; | ||
| 630 | rdev->irq.dpm_thermal = true; | ||
| 631 | radeon_irq_set(rdev); | ||
| 632 | } | ||
| 633 | |||
| 634 | return 0; | 626 | return 0; |
| 635 | } | 627 | } |
| 636 | 628 | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 5d1c316115ef..98e8138ff779 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -586,6 +586,8 @@ int rv515_resume(struct radeon_device *rdev) | |||
| 586 | /* Initialize surface registers */ | 586 | /* Initialize surface registers */ |
| 587 | radeon_surface_init(rdev); | 587 | radeon_surface_init(rdev); |
| 588 | 588 | ||
| 589 | radeon_pm_resume(rdev); | ||
| 590 | |||
| 589 | rdev->accel_working = true; | 591 | rdev->accel_working = true; |
| 590 | r = rv515_startup(rdev); | 592 | r = rv515_startup(rdev); |
| 591 | if (r) { | 593 | if (r) { |
| @@ -596,6 +598,7 @@ int rv515_resume(struct radeon_device *rdev) | |||
| 596 | 598 | ||
| 597 | int rv515_suspend(struct radeon_device *rdev) | 599 | int rv515_suspend(struct radeon_device *rdev) |
| 598 | { | 600 | { |
| 601 | radeon_pm_suspend(rdev); | ||
| 599 | r100_cp_disable(rdev); | 602 | r100_cp_disable(rdev); |
| 600 | radeon_wb_disable(rdev); | 603 | radeon_wb_disable(rdev); |
| 601 | rs600_irq_disable(rdev); | 604 | rs600_irq_disable(rdev); |
| @@ -612,6 +615,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
| 612 | 615 | ||
| 613 | void rv515_fini(struct radeon_device *rdev) | 616 | void rv515_fini(struct radeon_device *rdev) |
| 614 | { | 617 | { |
| 618 | radeon_pm_fini(rdev); | ||
| 615 | r100_cp_fini(rdev); | 619 | r100_cp_fini(rdev); |
| 616 | radeon_wb_fini(rdev); | 620 | radeon_wb_fini(rdev); |
| 617 | radeon_ib_pool_fini(rdev); | 621 | radeon_ib_pool_fini(rdev); |
| @@ -685,6 +689,9 @@ int rv515_init(struct radeon_device *rdev) | |||
| 685 | return r; | 689 | return r; |
| 686 | rv515_set_safe_registers(rdev); | 690 | rv515_set_safe_registers(rdev); |
| 687 | 691 | ||
| 692 | /* Initialize power management */ | ||
| 693 | radeon_pm_init(rdev); | ||
| 694 | |||
| 688 | rdev->accel_working = true; | 695 | rdev->accel_working = true; |
| 689 | r = rv515_startup(rdev); | 696 | r = rv515_startup(rdev); |
| 690 | if (r) { | 697 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 26633a025252..bebf31c4d841 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
| @@ -1546,7 +1546,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev) | |||
| 1546 | { | 1546 | { |
| 1547 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | 1547 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); |
| 1548 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 1548 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
| 1549 | int ret; | ||
| 1550 | 1549 | ||
| 1551 | if (r600_dynamicpm_enabled(rdev)) | 1550 | if (r600_dynamicpm_enabled(rdev)) |
| 1552 | return -EINVAL; | 1551 | return -EINVAL; |
| @@ -1594,15 +1593,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev) | |||
| 1594 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true); | 1593 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true); |
| 1595 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true); | 1594 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true); |
| 1596 | 1595 | ||
| 1597 | if (rdev->irq.installed && | ||
| 1598 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | ||
| 1599 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | ||
| 1600 | if (ret) | ||
| 1601 | return ret; | ||
| 1602 | rdev->irq.dpm_thermal = true; | ||
| 1603 | radeon_irq_set(rdev); | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 1596 | rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 1607 | 1597 | ||
| 1608 | r600_start_dpm(rdev); | 1598 | r600_start_dpm(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9f5846743c9e..6c772e58c784 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -1071,7 +1071,8 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
| 1071 | */ | 1071 | */ |
| 1072 | void r700_cp_stop(struct radeon_device *rdev) | 1072 | void r700_cp_stop(struct radeon_device *rdev) |
| 1073 | { | 1073 | { |
| 1074 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 1074 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
| 1075 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 1075 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 1076 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
| 1076 | WREG32(SCRATCH_UMSK, 0); | 1077 | WREG32(SCRATCH_UMSK, 0); |
| 1077 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1078 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| @@ -1123,6 +1124,35 @@ void r700_cp_fini(struct radeon_device *rdev) | |||
| 1123 | radeon_scratch_free(rdev, ring->rptr_save_reg); | 1124 | radeon_scratch_free(rdev, ring->rptr_save_reg); |
| 1124 | } | 1125 | } |
| 1125 | 1126 | ||
| 1127 | void rv770_set_clk_bypass_mode(struct radeon_device *rdev) | ||
| 1128 | { | ||
| 1129 | u32 tmp, i; | ||
| 1130 | |||
| 1131 | if (rdev->flags & RADEON_IS_IGP) | ||
| 1132 | return; | ||
| 1133 | |||
| 1134 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
| 1135 | tmp &= SCLK_MUX_SEL_MASK; | ||
| 1136 | tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE; | ||
| 1137 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
| 1138 | |||
| 1139 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 1140 | if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS) | ||
| 1141 | break; | ||
| 1142 | udelay(1); | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | tmp &= ~SCLK_MUX_UPDATE; | ||
| 1146 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
| 1147 | |||
| 1148 | tmp = RREG32(MPLL_CNTL_MODE); | ||
| 1149 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) | ||
| 1150 | tmp &= ~RV730_MPLL_MCLK_SEL; | ||
| 1151 | else | ||
| 1152 | tmp &= ~MPLL_MCLK_SEL; | ||
| 1153 | WREG32(MPLL_CNTL_MODE, tmp); | ||
| 1154 | } | ||
| 1155 | |||
| 1126 | /* | 1156 | /* |
| 1127 | * Core functions | 1157 | * Core functions |
| 1128 | */ | 1158 | */ |
| @@ -1665,14 +1695,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1665 | 1695 | ||
| 1666 | rv770_mc_program(rdev); | 1696 | rv770_mc_program(rdev); |
| 1667 | 1697 | ||
| 1668 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 1669 | r = r600_init_microcode(rdev); | ||
| 1670 | if (r) { | ||
| 1671 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 1672 | return r; | ||
| 1673 | } | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | if (rdev->flags & RADEON_IS_AGP) { | 1698 | if (rdev->flags & RADEON_IS_AGP) { |
| 1677 | rv770_agp_enable(rdev); | 1699 | rv770_agp_enable(rdev); |
| 1678 | } else { | 1700 | } else { |
| @@ -1728,14 +1750,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1728 | 1750 | ||
| 1729 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 1751 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 1730 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1752 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 1731 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | ||
| 1732 | RADEON_CP_PACKET2); | 1753 | RADEON_CP_PACKET2); |
| 1733 | if (r) | 1754 | if (r) |
| 1734 | return r; | 1755 | return r; |
| 1735 | 1756 | ||
| 1736 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 1757 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
| 1737 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 1758 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
| 1738 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
| 1739 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | 1759 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
| 1740 | if (r) | 1760 | if (r) |
| 1741 | return r; | 1761 | return r; |
| @@ -1754,7 +1774,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1754 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 1774 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 1755 | if (ring->ring_size) { | 1775 | if (ring->ring_size) { |
| 1756 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 1776 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
| 1757 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
| 1758 | RADEON_CP_PACKET2); | 1777 | RADEON_CP_PACKET2); |
| 1759 | if (!r) | 1778 | if (!r) |
| 1760 | r = uvd_v1_0_init(rdev); | 1779 | r = uvd_v1_0_init(rdev); |
| @@ -1792,6 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1792 | /* init golden registers */ | 1811 | /* init golden registers */ |
| 1793 | rv770_init_golden_registers(rdev); | 1812 | rv770_init_golden_registers(rdev); |
| 1794 | 1813 | ||
| 1814 | radeon_pm_resume(rdev); | ||
| 1815 | |||
| 1795 | rdev->accel_working = true; | 1816 | rdev->accel_working = true; |
| 1796 | r = rv770_startup(rdev); | 1817 | r = rv770_startup(rdev); |
| 1797 | if (r) { | 1818 | if (r) { |
| @@ -1806,6 +1827,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1806 | 1827 | ||
| 1807 | int rv770_suspend(struct radeon_device *rdev) | 1828 | int rv770_suspend(struct radeon_device *rdev) |
| 1808 | { | 1829 | { |
| 1830 | radeon_pm_suspend(rdev); | ||
| 1809 | r600_audio_fini(rdev); | 1831 | r600_audio_fini(rdev); |
| 1810 | uvd_v1_0_fini(rdev); | 1832 | uvd_v1_0_fini(rdev); |
| 1811 | radeon_uvd_suspend(rdev); | 1833 | radeon_uvd_suspend(rdev); |
| @@ -1876,6 +1898,17 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1876 | if (r) | 1898 | if (r) |
| 1877 | return r; | 1899 | return r; |
| 1878 | 1900 | ||
| 1901 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 1902 | r = r600_init_microcode(rdev); | ||
| 1903 | if (r) { | ||
| 1904 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 1905 | return r; | ||
| 1906 | } | ||
| 1907 | } | ||
| 1908 | |||
| 1909 | /* Initialize power management */ | ||
| 1910 | radeon_pm_init(rdev); | ||
| 1911 | |||
| 1879 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 1912 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
| 1880 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 1913 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
| 1881 | 1914 | ||
| @@ -1915,6 +1948,7 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1915 | 1948 | ||
| 1916 | void rv770_fini(struct radeon_device *rdev) | 1949 | void rv770_fini(struct radeon_device *rdev) |
| 1917 | { | 1950 | { |
| 1951 | radeon_pm_fini(rdev); | ||
| 1918 | r700_cp_fini(rdev); | 1952 | r700_cp_fini(rdev); |
| 1919 | r600_dma_fini(rdev); | 1953 | r600_dma_fini(rdev); |
| 1920 | r600_irq_fini(rdev); | 1954 | r600_irq_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 374499db20c7..80c595aba359 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -1863,8 +1863,8 @@ void rv770_enable_auto_throttle_source(struct radeon_device *rdev, | |||
| 1863 | } | 1863 | } |
| 1864 | } | 1864 | } |
| 1865 | 1865 | ||
| 1866 | int rv770_set_thermal_temperature_range(struct radeon_device *rdev, | 1866 | static int rv770_set_thermal_temperature_range(struct radeon_device *rdev, |
| 1867 | int min_temp, int max_temp) | 1867 | int min_temp, int max_temp) |
| 1868 | { | 1868 | { |
| 1869 | int low_temp = 0 * 1000; | 1869 | int low_temp = 0 * 1000; |
| 1870 | int high_temp = 255 * 1000; | 1870 | int high_temp = 255 * 1000; |
| @@ -1966,6 +1966,15 @@ int rv770_dpm_enable(struct radeon_device *rdev) | |||
| 1966 | if (pi->mg_clock_gating) | 1966 | if (pi->mg_clock_gating) |
| 1967 | rv770_mg_clock_gating_enable(rdev, true); | 1967 | rv770_mg_clock_gating_enable(rdev, true); |
| 1968 | 1968 | ||
| 1969 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 1970 | |||
| 1971 | return 0; | ||
| 1972 | } | ||
| 1973 | |||
| 1974 | int rv770_dpm_late_enable(struct radeon_device *rdev) | ||
| 1975 | { | ||
| 1976 | int ret; | ||
| 1977 | |||
| 1969 | if (rdev->irq.installed && | 1978 | if (rdev->irq.installed && |
| 1970 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1979 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 1971 | PPSMC_Result result; | 1980 | PPSMC_Result result; |
| @@ -1981,8 +1990,6 @@ int rv770_dpm_enable(struct radeon_device *rdev) | |||
| 1981 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | 1990 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
| 1982 | } | 1991 | } |
| 1983 | 1992 | ||
| 1984 | rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 1985 | |||
| 1986 | return 0; | 1993 | return 0; |
| 1987 | } | 1994 | } |
| 1988 | 1995 | ||
| @@ -2244,14 +2251,12 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 2244 | pl->vddci = vddci; | 2251 | pl->vddci = vddci; |
| 2245 | } | 2252 | } |
| 2246 | 2253 | ||
| 2247 | if (rdev->family >= CHIP_BARTS) { | 2254 | if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == |
| 2248 | if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == | 2255 | ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { |
| 2249 | ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { | 2256 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; |
| 2250 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; | 2257 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; |
| 2251 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; | 2258 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; |
| 2252 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; | 2259 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; |
| 2253 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; | ||
| 2254 | } | ||
| 2255 | } | 2260 | } |
| 2256 | } | 2261 | } |
| 2257 | 2262 | ||
| @@ -2531,6 +2536,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) | |||
| 2531 | (rdev->pdev->subsystem_device == 0x1c42)) | 2536 | (rdev->pdev->subsystem_device == 0x1c42)) |
| 2532 | switch_limit = 200; | 2537 | switch_limit = 200; |
| 2533 | 2538 | ||
| 2539 | /* RV770 */ | ||
| 2540 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ | ||
| 2541 | if ((rdev->family == CHIP_RV770) && | ||
| 2542 | !(rdev->flags & RADEON_IS_MOBILITY)) | ||
| 2543 | switch_limit = 0xffffffff; /* disable mclk switching */ | ||
| 2544 | |||
| 2534 | if (vblank_time < switch_limit) | 2545 | if (vblank_time < switch_limit) |
| 2535 | return true; | 2546 | return true; |
| 2536 | else | 2547 | else |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 9244effc6b59..f776634840c9 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
| @@ -283,8 +283,4 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev, | |||
| 283 | int rv770_write_smc_soft_register(struct radeon_device *rdev, | 283 | int rv770_write_smc_soft_register(struct radeon_device *rdev, |
| 284 | u16 reg_offset, u32 value); | 284 | u16 reg_offset, u32 value); |
| 285 | 285 | ||
| 286 | /* thermal */ | ||
| 287 | int rv770_set_thermal_temperature_range(struct radeon_device *rdev, | ||
| 288 | int min_temp, int max_temp); | ||
| 289 | |||
| 290 | #endif | 286 | #endif |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 1ae277152cc7..3cf1e2921545 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
| @@ -100,14 +100,21 @@ | |||
| 100 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 100 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
| 101 | #define SCLK_MUX_SEL(x) ((x) << 0) | 101 | #define SCLK_MUX_SEL(x) ((x) << 0) |
| 102 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 102 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
| 103 | #define SCLK_MUX_UPDATE (1 << 26) | ||
| 103 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 104 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
| 104 | #define SPLL_FB_DIV(x) ((x) << 0) | 105 | #define SPLL_FB_DIV(x) ((x) << 0) |
| 105 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 106 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
| 106 | #define SPLL_DITHEN (1 << 28) | 107 | #define SPLL_DITHEN (1 << 28) |
| 108 | #define CG_SPLL_STATUS 0x60c | ||
| 109 | #define SPLL_CHG_STATUS (1 << 1) | ||
| 107 | 110 | ||
| 108 | #define SPLL_CNTL_MODE 0x610 | 111 | #define SPLL_CNTL_MODE 0x610 |
| 109 | #define SPLL_DIV_SYNC (1 << 5) | 112 | #define SPLL_DIV_SYNC (1 << 5) |
| 110 | 113 | ||
| 114 | #define MPLL_CNTL_MODE 0x61c | ||
| 115 | # define MPLL_MCLK_SEL (1 << 11) | ||
| 116 | # define RV730_MPLL_MCLK_SEL (1 << 25) | ||
| 117 | |||
| 111 | #define MPLL_AD_FUNC_CNTL 0x624 | 118 | #define MPLL_AD_FUNC_CNTL 0x624 |
| 112 | #define CLKF(x) ((x) << 0) | 119 | #define CLKF(x) ((x) << 0) |
| 113 | #define CLKF_MASK (0x7f << 0) | 120 | #define CLKF_MASK (0x7f << 0) |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 85e1edfaa3be..09ec4f6c53bb 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -80,6 +80,8 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); | |||
| 80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); | 80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); |
| 81 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, | 81 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, |
| 82 | bool enable); | 82 | bool enable); |
| 83 | static void si_init_pg(struct radeon_device *rdev); | ||
| 84 | static void si_init_cg(struct radeon_device *rdev); | ||
| 83 | static void si_fini_pg(struct radeon_device *rdev); | 85 | static void si_fini_pg(struct radeon_device *rdev); |
| 84 | static void si_fini_cg(struct radeon_device *rdev); | 86 | static void si_fini_cg(struct radeon_device *rdev); |
| 85 | static void si_rlc_stop(struct radeon_device *rdev); | 87 | static void si_rlc_stop(struct radeon_device *rdev); |
| @@ -1460,7 +1462,7 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { | |||
| 1460 | }; | 1462 | }; |
| 1461 | 1463 | ||
| 1462 | /* ucode loading */ | 1464 | /* ucode loading */ |
| 1463 | static int si_mc_load_microcode(struct radeon_device *rdev) | 1465 | int si_mc_load_microcode(struct radeon_device *rdev) |
| 1464 | { | 1466 | { |
| 1465 | const __be32 *fw_data; | 1467 | const __be32 *fw_data; |
| 1466 | u32 running, blackout = 0; | 1468 | u32 running, blackout = 0; |
| @@ -3247,7 +3249,8 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable) | |||
| 3247 | if (enable) | 3249 | if (enable) |
| 3248 | WREG32(CP_ME_CNTL, 0); | 3250 | WREG32(CP_ME_CNTL, 0); |
| 3249 | else { | 3251 | else { |
| 3250 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 3252 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
| 3253 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 3251 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); | 3254 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); |
| 3252 | WREG32(SCRATCH_UMSK, 0); | 3255 | WREG32(SCRATCH_UMSK, 0); |
| 3253 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 3256 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
| @@ -3508,6 +3511,9 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
| 3508 | 3511 | ||
| 3509 | si_enable_gui_idle_interrupt(rdev, true); | 3512 | si_enable_gui_idle_interrupt(rdev, true); |
| 3510 | 3513 | ||
| 3514 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
| 3515 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
| 3516 | |||
| 3511 | return 0; | 3517 | return 0; |
| 3512 | } | 3518 | } |
| 3513 | 3519 | ||
| @@ -3724,6 +3730,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
| 3724 | evergreen_print_gpu_status_regs(rdev); | 3730 | evergreen_print_gpu_status_regs(rdev); |
| 3725 | } | 3731 | } |
| 3726 | 3732 | ||
| 3733 | static void si_set_clk_bypass_mode(struct radeon_device *rdev) | ||
| 3734 | { | ||
| 3735 | u32 tmp, i; | ||
| 3736 | |||
| 3737 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
| 3738 | tmp |= SPLL_BYPASS_EN; | ||
| 3739 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
| 3740 | |||
| 3741 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
| 3742 | tmp |= SPLL_CTLREQ_CHG; | ||
| 3743 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
| 3744 | |||
| 3745 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 3746 | if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) | ||
| 3747 | break; | ||
| 3748 | udelay(1); | ||
| 3749 | } | ||
| 3750 | |||
| 3751 | tmp = RREG32(CG_SPLL_FUNC_CNTL_2); | ||
| 3752 | tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); | ||
| 3753 | WREG32(CG_SPLL_FUNC_CNTL_2, tmp); | ||
| 3754 | |||
| 3755 | tmp = RREG32(MPLL_CNTL_MODE); | ||
| 3756 | tmp &= ~MPLL_MCLK_SEL; | ||
| 3757 | WREG32(MPLL_CNTL_MODE, tmp); | ||
| 3758 | } | ||
| 3759 | |||
| 3760 | static void si_spll_powerdown(struct radeon_device *rdev) | ||
| 3761 | { | ||
| 3762 | u32 tmp; | ||
| 3763 | |||
| 3764 | tmp = RREG32(SPLL_CNTL_MODE); | ||
| 3765 | tmp |= SPLL_SW_DIR_CONTROL; | ||
| 3766 | WREG32(SPLL_CNTL_MODE, tmp); | ||
| 3767 | |||
| 3768 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
| 3769 | tmp |= SPLL_RESET; | ||
| 3770 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
| 3771 | |||
| 3772 | tmp = RREG32(CG_SPLL_FUNC_CNTL); | ||
| 3773 | tmp |= SPLL_SLEEP; | ||
| 3774 | WREG32(CG_SPLL_FUNC_CNTL, tmp); | ||
| 3775 | |||
| 3776 | tmp = RREG32(SPLL_CNTL_MODE); | ||
| 3777 | tmp &= ~SPLL_SW_DIR_CONTROL; | ||
| 3778 | WREG32(SPLL_CNTL_MODE, tmp); | ||
| 3779 | } | ||
| 3780 | |||
| 3781 | static void si_gpu_pci_config_reset(struct radeon_device *rdev) | ||
| 3782 | { | ||
| 3783 | struct evergreen_mc_save save; | ||
| 3784 | u32 tmp, i; | ||
| 3785 | |||
| 3786 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
| 3787 | |||
| 3788 | /* disable dpm? */ | ||
| 3789 | |||
| 3790 | /* disable cg/pg */ | ||
| 3791 | si_fini_pg(rdev); | ||
| 3792 | si_fini_cg(rdev); | ||
| 3793 | |||
| 3794 | /* Disable CP parsing/prefetching */ | ||
| 3795 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | ||
| 3796 | /* dma0 */ | ||
| 3797 | tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); | ||
| 3798 | tmp &= ~DMA_RB_ENABLE; | ||
| 3799 | WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); | ||
| 3800 | /* dma1 */ | ||
| 3801 | tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); | ||
| 3802 | tmp &= ~DMA_RB_ENABLE; | ||
| 3803 | WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); | ||
| 3804 | /* XXX other engines? */ | ||
| 3805 | |||
| 3806 | /* halt the rlc, disable cp internal ints */ | ||
| 3807 | si_rlc_stop(rdev); | ||
| 3808 | |||
| 3809 | udelay(50); | ||
| 3810 | |||
| 3811 | /* disable mem access */ | ||
| 3812 | evergreen_mc_stop(rdev, &save); | ||
| 3813 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
| 3814 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
| 3815 | } | ||
| 3816 | |||
| 3817 | /* set mclk/sclk to bypass */ | ||
| 3818 | si_set_clk_bypass_mode(rdev); | ||
| 3819 | /* powerdown spll */ | ||
| 3820 | si_spll_powerdown(rdev); | ||
| 3821 | /* disable BM */ | ||
| 3822 | pci_clear_master(rdev->pdev); | ||
| 3823 | /* reset */ | ||
| 3824 | radeon_pci_config_reset(rdev); | ||
| 3825 | /* wait for asic to come out of reset */ | ||
| 3826 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
| 3827 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
| 3828 | break; | ||
| 3829 | udelay(1); | ||
| 3830 | } | ||
| 3831 | } | ||
| 3832 | |||
| 3727 | int si_asic_reset(struct radeon_device *rdev) | 3833 | int si_asic_reset(struct radeon_device *rdev) |
| 3728 | { | 3834 | { |
| 3729 | u32 reset_mask; | 3835 | u32 reset_mask; |
| @@ -3733,10 +3839,17 @@ int si_asic_reset(struct radeon_device *rdev) | |||
| 3733 | if (reset_mask) | 3839 | if (reset_mask) |
| 3734 | r600_set_bios_scratch_engine_hung(rdev, true); | 3840 | r600_set_bios_scratch_engine_hung(rdev, true); |
| 3735 | 3841 | ||
| 3842 | /* try soft reset */ | ||
| 3736 | si_gpu_soft_reset(rdev, reset_mask); | 3843 | si_gpu_soft_reset(rdev, reset_mask); |
| 3737 | 3844 | ||
| 3738 | reset_mask = si_gpu_check_soft_reset(rdev); | 3845 | reset_mask = si_gpu_check_soft_reset(rdev); |
| 3739 | 3846 | ||
| 3847 | /* try pci config reset */ | ||
| 3848 | if (reset_mask && radeon_hard_reset) | ||
| 3849 | si_gpu_pci_config_reset(rdev); | ||
| 3850 | |||
| 3851 | reset_mask = si_gpu_check_soft_reset(rdev); | ||
| 3852 | |||
| 3740 | if (!reset_mask) | 3853 | if (!reset_mask) |
| 3741 | r600_set_bios_scratch_engine_hung(rdev, false); | 3854 | r600_set_bios_scratch_engine_hung(rdev, false); |
| 3742 | 3855 | ||
| @@ -5212,8 +5325,8 @@ static void si_enable_hdp_ls(struct radeon_device *rdev, | |||
| 5212 | WREG32(HDP_MEM_POWER_LS, data); | 5325 | WREG32(HDP_MEM_POWER_LS, data); |
| 5213 | } | 5326 | } |
| 5214 | 5327 | ||
| 5215 | void si_update_cg(struct radeon_device *rdev, | 5328 | static void si_update_cg(struct radeon_device *rdev, |
| 5216 | u32 block, bool enable) | 5329 | u32 block, bool enable) |
| 5217 | { | 5330 | { |
| 5218 | if (block & RADEON_CG_BLOCK_GFX) { | 5331 | if (block & RADEON_CG_BLOCK_GFX) { |
| 5219 | si_enable_gui_idle_interrupt(rdev, false); | 5332 | si_enable_gui_idle_interrupt(rdev, false); |
| @@ -5379,6 +5492,9 @@ static void si_init_pg(struct radeon_device *rdev) | |||
| 5379 | si_init_ao_cu_mask(rdev); | 5492 | si_init_ao_cu_mask(rdev); |
| 5380 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { | 5493 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { |
| 5381 | si_init_gfx_cgpg(rdev); | 5494 | si_init_gfx_cgpg(rdev); |
| 5495 | } else { | ||
| 5496 | WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); | ||
| 5497 | WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); | ||
| 5382 | } | 5498 | } |
| 5383 | si_enable_dma_pg(rdev, true); | 5499 | si_enable_dma_pg(rdev, true); |
| 5384 | si_enable_gfx_cgpg(rdev, true); | 5500 | si_enable_gfx_cgpg(rdev, true); |
| @@ -5566,7 +5682,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) | |||
| 5566 | } | 5682 | } |
| 5567 | 5683 | ||
| 5568 | if (!ASIC_IS_NODCE(rdev)) { | 5684 | if (!ASIC_IS_NODCE(rdev)) { |
| 5569 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | 5685 | WREG32(DAC_AUTODETECT_INT_CONTROL, 0); |
| 5570 | 5686 | ||
| 5571 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | 5687 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; |
| 5572 | WREG32(DC_HPD1_INT_CONTROL, tmp); | 5688 | WREG32(DC_HPD1_INT_CONTROL, tmp); |
| @@ -6324,21 +6440,14 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6324 | 6440 | ||
| 6325 | si_mc_program(rdev); | 6441 | si_mc_program(rdev); |
| 6326 | 6442 | ||
| 6327 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6443 | if (!rdev->pm.dpm_enabled) { |
| 6328 | !rdev->rlc_fw || !rdev->mc_fw) { | 6444 | r = si_mc_load_microcode(rdev); |
| 6329 | r = si_init_microcode(rdev); | ||
| 6330 | if (r) { | 6445 | if (r) { |
| 6331 | DRM_ERROR("Failed to load firmware!\n"); | 6446 | DRM_ERROR("Failed to load MC firmware!\n"); |
| 6332 | return r; | 6447 | return r; |
| 6333 | } | 6448 | } |
| 6334 | } | 6449 | } |
| 6335 | 6450 | ||
| 6336 | r = si_mc_load_microcode(rdev); | ||
| 6337 | if (r) { | ||
| 6338 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
| 6339 | return r; | ||
| 6340 | } | ||
| 6341 | |||
| 6342 | r = si_pcie_gart_enable(rdev); | 6451 | r = si_pcie_gart_enable(rdev); |
| 6343 | if (r) | 6452 | if (r) |
| 6344 | return r; | 6453 | return r; |
| @@ -6421,37 +6530,30 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6421 | 6530 | ||
| 6422 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 6531 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 6423 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 6532 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
| 6424 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
| 6425 | RADEON_CP_PACKET2); | 6533 | RADEON_CP_PACKET2); |
| 6426 | if (r) | 6534 | if (r) |
| 6427 | return r; | 6535 | return r; |
| 6428 | 6536 | ||
| 6429 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 6537 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
| 6430 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 6538 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
| 6431 | CP_RB1_RPTR, CP_RB1_WPTR, | ||
| 6432 | RADEON_CP_PACKET2); | 6539 | RADEON_CP_PACKET2); |
| 6433 | if (r) | 6540 | if (r) |
| 6434 | return r; | 6541 | return r; |
| 6435 | 6542 | ||
| 6436 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 6543 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
| 6437 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 6544 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
| 6438 | CP_RB2_RPTR, CP_RB2_WPTR, | ||
| 6439 | RADEON_CP_PACKET2); | 6545 | RADEON_CP_PACKET2); |
| 6440 | if (r) | 6546 | if (r) |
| 6441 | return r; | 6547 | return r; |
| 6442 | 6548 | ||
| 6443 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 6549 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
| 6444 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 6550 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
| 6445 | DMA_RB_RPTR + DMA0_REGISTER_OFFSET, | ||
| 6446 | DMA_RB_WPTR + DMA0_REGISTER_OFFSET, | ||
| 6447 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | 6551 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
| 6448 | if (r) | 6552 | if (r) |
| 6449 | return r; | 6553 | return r; |
| 6450 | 6554 | ||
| 6451 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 6555 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
| 6452 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 6556 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
| 6453 | DMA_RB_RPTR + DMA1_REGISTER_OFFSET, | ||
| 6454 | DMA_RB_WPTR + DMA1_REGISTER_OFFSET, | ||
| 6455 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | 6557 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
| 6456 | if (r) | 6558 | if (r) |
| 6457 | return r; | 6559 | return r; |
| @@ -6471,7 +6573,6 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6471 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 6573 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 6472 | if (ring->ring_size) { | 6574 | if (ring->ring_size) { |
| 6473 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 6575 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
| 6474 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
| 6475 | RADEON_CP_PACKET2); | 6576 | RADEON_CP_PACKET2); |
| 6476 | if (!r) | 6577 | if (!r) |
| 6477 | r = uvd_v1_0_init(rdev); | 6578 | r = uvd_v1_0_init(rdev); |
| @@ -6513,6 +6614,8 @@ int si_resume(struct radeon_device *rdev) | |||
| 6513 | /* init golden registers */ | 6614 | /* init golden registers */ |
| 6514 | si_init_golden_registers(rdev); | 6615 | si_init_golden_registers(rdev); |
| 6515 | 6616 | ||
| 6617 | radeon_pm_resume(rdev); | ||
| 6618 | |||
| 6516 | rdev->accel_working = true; | 6619 | rdev->accel_working = true; |
| 6517 | r = si_startup(rdev); | 6620 | r = si_startup(rdev); |
| 6518 | if (r) { | 6621 | if (r) { |
| @@ -6527,6 +6630,7 @@ int si_resume(struct radeon_device *rdev) | |||
| 6527 | 6630 | ||
| 6528 | int si_suspend(struct radeon_device *rdev) | 6631 | int si_suspend(struct radeon_device *rdev) |
| 6529 | { | 6632 | { |
| 6633 | radeon_pm_suspend(rdev); | ||
| 6530 | dce6_audio_fini(rdev); | 6634 | dce6_audio_fini(rdev); |
| 6531 | radeon_vm_manager_fini(rdev); | 6635 | radeon_vm_manager_fini(rdev); |
| 6532 | si_cp_enable(rdev, false); | 6636 | si_cp_enable(rdev, false); |
| @@ -6600,6 +6704,18 @@ int si_init(struct radeon_device *rdev) | |||
| 6600 | if (r) | 6704 | if (r) |
| 6601 | return r; | 6705 | return r; |
| 6602 | 6706 | ||
| 6707 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
| 6708 | !rdev->rlc_fw || !rdev->mc_fw) { | ||
| 6709 | r = si_init_microcode(rdev); | ||
| 6710 | if (r) { | ||
| 6711 | DRM_ERROR("Failed to load firmware!\n"); | ||
| 6712 | return r; | ||
| 6713 | } | ||
| 6714 | } | ||
| 6715 | |||
| 6716 | /* Initialize power management */ | ||
| 6717 | radeon_pm_init(rdev); | ||
| 6718 | |||
| 6603 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 6719 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| 6604 | ring->ring_obj = NULL; | 6720 | ring->ring_obj = NULL; |
| 6605 | r600_ring_init(rdev, ring, 1024 * 1024); | 6721 | r600_ring_init(rdev, ring, 1024 * 1024); |
| @@ -6666,6 +6782,7 @@ int si_init(struct radeon_device *rdev) | |||
| 6666 | 6782 | ||
| 6667 | void si_fini(struct radeon_device *rdev) | 6783 | void si_fini(struct radeon_device *rdev) |
| 6668 | { | 6784 | { |
| 6785 | radeon_pm_fini(rdev); | ||
| 6669 | si_cp_fini(rdev); | 6786 | si_cp_fini(rdev); |
| 6670 | cayman_dma_fini(rdev); | 6787 | cayman_dma_fini(rdev); |
| 6671 | si_fini_pg(rdev); | 6788 | si_fini_pg(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0b00c790fb77..0471501338fb 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -1738,6 +1738,8 @@ struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); | |||
| 1738 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev); | 1738 | struct ni_power_info *ni_get_pi(struct radeon_device *rdev); |
| 1739 | struct ni_ps *ni_get_ps(struct radeon_ps *rps); | 1739 | struct ni_ps *ni_get_ps(struct radeon_ps *rps); |
| 1740 | 1740 | ||
| 1741 | extern int si_mc_load_microcode(struct radeon_device *rdev); | ||
| 1742 | |||
| 1741 | static int si_populate_voltage_value(struct radeon_device *rdev, | 1743 | static int si_populate_voltage_value(struct radeon_device *rdev, |
| 1742 | const struct atom_voltage_table *table, | 1744 | const struct atom_voltage_table *table, |
| 1743 | u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); | 1745 | u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); |
| @@ -1753,9 +1755,6 @@ static int si_calculate_sclk_params(struct radeon_device *rdev, | |||
| 1753 | u32 engine_clock, | 1755 | u32 engine_clock, |
| 1754 | SISLANDS_SMC_SCLK_VALUE *sclk); | 1756 | SISLANDS_SMC_SCLK_VALUE *sclk); |
| 1755 | 1757 | ||
| 1756 | extern void si_update_cg(struct radeon_device *rdev, | ||
| 1757 | u32 block, bool enable); | ||
| 1758 | |||
| 1759 | static struct si_power_info *si_get_pi(struct radeon_device *rdev) | 1758 | static struct si_power_info *si_get_pi(struct radeon_device *rdev) |
| 1760 | { | 1759 | { |
| 1761 | struct si_power_info *pi = rdev->pm.dpm.priv; | 1760 | struct si_power_info *pi = rdev->pm.dpm.priv; |
| @@ -2396,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, | |||
| 2396 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) | 2395 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) |
| 2397 | enable_sq_ramping = false; | 2396 | enable_sq_ramping = false; |
| 2398 | 2397 | ||
| 2399 | if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) | 2398 | if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) |
| 2400 | enable_sq_ramping = false; | 2399 | enable_sq_ramping = false; |
| 2401 | 2400 | ||
| 2402 | for (i = 0; i < state->performance_level_count; i++) { | 2401 | for (i = 0; i < state->performance_level_count; i++) { |
| @@ -3591,10 +3590,9 @@ static void si_program_display_gap(struct radeon_device *rdev) | |||
| 3591 | 3590 | ||
| 3592 | /* Setting this to false forces the performance state to low if the crtcs are disabled. | 3591 | /* Setting this to false forces the performance state to low if the crtcs are disabled. |
| 3593 | * This can be a problem on PowerXpress systems or if you want to use the card | 3592 | * This can be a problem on PowerXpress systems or if you want to use the card |
| 3594 | * for offscreen rendering or compute if there are no crtcs enabled. Set it to | 3593 | * for offscreen rendering or compute if there are no crtcs enabled. |
| 3595 | * true for now so that performance scales even if the displays are off. | ||
| 3596 | */ | 3594 | */ |
| 3597 | si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/); | 3595 | si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0); |
| 3598 | } | 3596 | } |
| 3599 | 3597 | ||
| 3600 | static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable) | 3598 | static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable) |
| @@ -5414,7 +5412,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev, | |||
| 5414 | 5412 | ||
| 5415 | for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { | 5413 | for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { |
| 5416 | if (si_pi->mc_reg_table.valid_flag & (1 << j)) { | 5414 | if (si_pi->mc_reg_table.valid_flag & (1 << j)) { |
| 5417 | if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) | 5415 | if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) |
| 5418 | break; | 5416 | break; |
| 5419 | mc_reg_table->address[i].s0 = | 5417 | mc_reg_table->address[i].s0 = |
| 5420 | cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); | 5418 | cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); |
| @@ -5754,6 +5752,11 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev, | |||
| 5754 | 5752 | ||
| 5755 | void si_dpm_setup_asic(struct radeon_device *rdev) | 5753 | void si_dpm_setup_asic(struct radeon_device *rdev) |
| 5756 | { | 5754 | { |
| 5755 | int r; | ||
| 5756 | |||
| 5757 | r = si_mc_load_microcode(rdev); | ||
| 5758 | if (r) | ||
| 5759 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
| 5757 | rv770_get_memory_type(rdev); | 5760 | rv770_get_memory_type(rdev); |
| 5758 | si_read_clock_registers(rdev); | 5761 | si_read_clock_registers(rdev); |
| 5759 | si_enable_acpi_power_management(rdev); | 5762 | si_enable_acpi_power_management(rdev); |
| @@ -5791,13 +5794,6 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
| 5791 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 5794 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
| 5792 | int ret; | 5795 | int ret; |
| 5793 | 5796 | ||
| 5794 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 5795 | RADEON_CG_BLOCK_MC | | ||
| 5796 | RADEON_CG_BLOCK_SDMA | | ||
| 5797 | RADEON_CG_BLOCK_BIF | | ||
| 5798 | RADEON_CG_BLOCK_UVD | | ||
| 5799 | RADEON_CG_BLOCK_HDP), false); | ||
| 5800 | |||
| 5801 | if (si_is_smc_running(rdev)) | 5797 | if (si_is_smc_running(rdev)) |
| 5802 | return -EINVAL; | 5798 | return -EINVAL; |
| 5803 | if (pi->voltage_control) | 5799 | if (pi->voltage_control) |
| @@ -5900,6 +5896,17 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
| 5900 | si_enable_sclk_control(rdev, true); | 5896 | si_enable_sclk_control(rdev, true); |
| 5901 | si_start_dpm(rdev); | 5897 | si_start_dpm(rdev); |
| 5902 | 5898 | ||
| 5899 | si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 5900 | |||
| 5901 | ni_update_current_ps(rdev, boot_ps); | ||
| 5902 | |||
| 5903 | return 0; | ||
| 5904 | } | ||
| 5905 | |||
| 5906 | int si_dpm_late_enable(struct radeon_device *rdev) | ||
| 5907 | { | ||
| 5908 | int ret; | ||
| 5909 | |||
| 5903 | if (rdev->irq.installed && | 5910 | if (rdev->irq.installed && |
| 5904 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 5911 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 5905 | PPSMC_Result result; | 5912 | PPSMC_Result result; |
| @@ -5915,17 +5922,6 @@ int si_dpm_enable(struct radeon_device *rdev) | |||
| 5915 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); | 5922 | DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
| 5916 | } | 5923 | } |
| 5917 | 5924 | ||
| 5918 | si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | ||
| 5919 | |||
| 5920 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 5921 | RADEON_CG_BLOCK_MC | | ||
| 5922 | RADEON_CG_BLOCK_SDMA | | ||
| 5923 | RADEON_CG_BLOCK_BIF | | ||
| 5924 | RADEON_CG_BLOCK_UVD | | ||
| 5925 | RADEON_CG_BLOCK_HDP), true); | ||
| 5926 | |||
| 5927 | ni_update_current_ps(rdev, boot_ps); | ||
| 5928 | |||
| 5929 | return 0; | 5925 | return 0; |
| 5930 | } | 5926 | } |
| 5931 | 5927 | ||
| @@ -5934,13 +5930,6 @@ void si_dpm_disable(struct radeon_device *rdev) | |||
| 5934 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 5930 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
| 5935 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 5931 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
| 5936 | 5932 | ||
| 5937 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 5938 | RADEON_CG_BLOCK_MC | | ||
| 5939 | RADEON_CG_BLOCK_SDMA | | ||
| 5940 | RADEON_CG_BLOCK_BIF | | ||
| 5941 | RADEON_CG_BLOCK_UVD | | ||
| 5942 | RADEON_CG_BLOCK_HDP), false); | ||
| 5943 | |||
| 5944 | if (!si_is_smc_running(rdev)) | 5933 | if (!si_is_smc_running(rdev)) |
| 5945 | return; | 5934 | return; |
| 5946 | si_disable_ulv(rdev); | 5935 | si_disable_ulv(rdev); |
| @@ -6005,13 +5994,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
| 6005 | struct radeon_ps *old_ps = &eg_pi->current_rps; | 5994 | struct radeon_ps *old_ps = &eg_pi->current_rps; |
| 6006 | int ret; | 5995 | int ret; |
| 6007 | 5996 | ||
| 6008 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 6009 | RADEON_CG_BLOCK_MC | | ||
| 6010 | RADEON_CG_BLOCK_SDMA | | ||
| 6011 | RADEON_CG_BLOCK_BIF | | ||
| 6012 | RADEON_CG_BLOCK_UVD | | ||
| 6013 | RADEON_CG_BLOCK_HDP), false); | ||
| 6014 | |||
| 6015 | ret = si_disable_ulv(rdev); | 5997 | ret = si_disable_ulv(rdev); |
| 6016 | if (ret) { | 5998 | if (ret) { |
| 6017 | DRM_ERROR("si_disable_ulv failed\n"); | 5999 | DRM_ERROR("si_disable_ulv failed\n"); |
| @@ -6104,13 +6086,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
| 6104 | return ret; | 6086 | return ret; |
| 6105 | } | 6087 | } |
| 6106 | 6088 | ||
| 6107 | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | | ||
| 6108 | RADEON_CG_BLOCK_MC | | ||
| 6109 | RADEON_CG_BLOCK_SDMA | | ||
| 6110 | RADEON_CG_BLOCK_BIF | | ||
| 6111 | RADEON_CG_BLOCK_UVD | | ||
| 6112 | RADEON_CG_BLOCK_HDP), true); | ||
| 6113 | |||
| 6114 | return 0; | 6089 | return 0; |
| 6115 | } | 6090 | } |
| 6116 | 6091 | ||
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c index d422a1cbf727..e80efcf0c230 100644 --- a/drivers/gpu/drm/radeon/si_smc.c +++ b/drivers/gpu/drm/radeon/si_smc.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "sid.h" | 28 | #include "sid.h" |
| 29 | #include "ppsmc.h" | 29 | #include "ppsmc.h" |
| 30 | #include "radeon_ucode.h" | 30 | #include "radeon_ucode.h" |
| 31 | #include "sislands_smc.h" | ||
| 31 | 32 | ||
| 32 | static int si_set_smc_sram_address(struct radeon_device *rdev, | 33 | static int si_set_smc_sram_address(struct radeon_device *rdev, |
| 33 | u32 smc_address, u32 limit) | 34 | u32 smc_address, u32 limit) |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index b322acc48097..9239a6d29128 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
| @@ -94,6 +94,8 @@ | |||
| 94 | #define CG_SPLL_FUNC_CNTL_2 0x604 | 94 | #define CG_SPLL_FUNC_CNTL_2 0x604 |
| 95 | #define SCLK_MUX_SEL(x) ((x) << 0) | 95 | #define SCLK_MUX_SEL(x) ((x) << 0) |
| 96 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) | 96 | #define SCLK_MUX_SEL_MASK (0x1ff << 0) |
| 97 | #define SPLL_CTLREQ_CHG (1 << 23) | ||
| 98 | #define SCLK_MUX_UPDATE (1 << 26) | ||
| 97 | #define CG_SPLL_FUNC_CNTL_3 0x608 | 99 | #define CG_SPLL_FUNC_CNTL_3 0x608 |
| 98 | #define SPLL_FB_DIV(x) ((x) << 0) | 100 | #define SPLL_FB_DIV(x) ((x) << 0) |
| 99 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) | 101 | #define SPLL_FB_DIV_MASK (0x3ffffff << 0) |
| @@ -101,7 +103,10 @@ | |||
| 101 | #define SPLL_DITHEN (1 << 28) | 103 | #define SPLL_DITHEN (1 << 28) |
| 102 | #define CG_SPLL_FUNC_CNTL_4 0x60c | 104 | #define CG_SPLL_FUNC_CNTL_4 0x60c |
| 103 | 105 | ||
| 106 | #define SPLL_STATUS 0x614 | ||
| 107 | #define SPLL_CHG_STATUS (1 << 1) | ||
| 104 | #define SPLL_CNTL_MODE 0x618 | 108 | #define SPLL_CNTL_MODE 0x618 |
| 109 | #define SPLL_SW_DIR_CONTROL (1 << 0) | ||
| 105 | # define SPLL_REFCLK_SEL(x) ((x) << 8) | 110 | # define SPLL_REFCLK_SEL(x) ((x) << 8) |
| 106 | # define SPLL_REFCLK_SEL_MASK 0xFF00 | 111 | # define SPLL_REFCLK_SEL_MASK 0xFF00 |
| 107 | 112 | ||
| @@ -559,6 +564,8 @@ | |||
| 559 | # define MRDCK0_BYPASS (1 << 24) | 564 | # define MRDCK0_BYPASS (1 << 24) |
| 560 | # define MRDCK1_BYPASS (1 << 25) | 565 | # define MRDCK1_BYPASS (1 << 25) |
| 561 | 566 | ||
| 567 | #define MPLL_CNTL_MODE 0x2bb0 | ||
| 568 | # define MPLL_MCLK_SEL (1 << 11) | ||
| 562 | #define MPLL_FUNC_CNTL 0x2bb4 | 569 | #define MPLL_FUNC_CNTL 0x2bb4 |
| 563 | #define BWCTRL(x) ((x) << 20) | 570 | #define BWCTRL(x) ((x) << 20) |
| 564 | #define BWCTRL_MASK (0xff << 20) | 571 | #define BWCTRL_MASK (0xff << 20) |
| @@ -815,7 +822,7 @@ | |||
| 815 | # define GRPH_PFLIP_INT_MASK (1 << 0) | 822 | # define GRPH_PFLIP_INT_MASK (1 << 0) |
| 816 | # define GRPH_PFLIP_INT_TYPE (1 << 8) | 823 | # define GRPH_PFLIP_INT_TYPE (1 << 8) |
| 817 | 824 | ||
| 818 | #define DACA_AUTODETECT_INT_CONTROL 0x66c8 | 825 | #define DAC_AUTODETECT_INT_CONTROL 0x67c8 |
| 819 | 826 | ||
| 820 | #define DC_HPD1_INT_STATUS 0x601c | 827 | #define DC_HPD1_INT_STATUS 0x601c |
| 821 | #define DC_HPD2_INT_STATUS 0x6028 | 828 | #define DC_HPD2_INT_STATUS 0x6028 |
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h index 5578e9837026..10e945a49479 100644 --- a/drivers/gpu/drm/radeon/sislands_smc.h +++ b/drivers/gpu/drm/radeon/sislands_smc.h | |||
| @@ -374,8 +374,6 @@ typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; | |||
| 374 | 374 | ||
| 375 | #pragma pack(pop) | 375 | #pragma pack(pop) |
| 376 | 376 | ||
| 377 | int si_set_smc_sram_address(struct radeon_device *rdev, | ||
| 378 | u32 smc_address, u32 limit); | ||
| 379 | int si_copy_bytes_to_smc(struct radeon_device *rdev, | 377 | int si_copy_bytes_to_smc(struct radeon_device *rdev, |
| 380 | u32 smc_start_address, | 378 | u32 smc_start_address, |
| 381 | const u8 *src, u32 byte_count, u32 limit); | 379 | const u8 *src, u32 byte_count, u32 limit); |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 96ea6db8bf57..f121efe12dc5 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
| @@ -71,7 +71,7 @@ static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = | |||
| 71 | SUMO_DTC_DFLT_14, | 71 | SUMO_DTC_DFLT_14, |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) | 74 | static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) |
| 75 | { | 75 | { |
| 76 | struct sumo_ps *ps = rps->ps_priv; | 76 | struct sumo_ps *ps = rps->ps_priv; |
| 77 | 77 | ||
| @@ -1202,14 +1202,10 @@ static void sumo_update_requested_ps(struct radeon_device *rdev, | |||
| 1202 | int sumo_dpm_enable(struct radeon_device *rdev) | 1202 | int sumo_dpm_enable(struct radeon_device *rdev) |
| 1203 | { | 1203 | { |
| 1204 | struct sumo_power_info *pi = sumo_get_pi(rdev); | 1204 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
| 1205 | int ret; | ||
| 1206 | 1205 | ||
| 1207 | if (sumo_dpm_enabled(rdev)) | 1206 | if (sumo_dpm_enabled(rdev)) |
| 1208 | return -EINVAL; | 1207 | return -EINVAL; |
| 1209 | 1208 | ||
| 1210 | ret = sumo_enable_clock_power_gating(rdev); | ||
| 1211 | if (ret) | ||
| 1212 | return ret; | ||
| 1213 | sumo_program_bootup_state(rdev); | 1209 | sumo_program_bootup_state(rdev); |
| 1214 | sumo_init_bsp(rdev); | 1210 | sumo_init_bsp(rdev); |
| 1215 | sumo_reset_am(rdev); | 1211 | sumo_reset_am(rdev); |
| @@ -1233,6 +1229,19 @@ int sumo_dpm_enable(struct radeon_device *rdev) | |||
| 1233 | if (pi->enable_boost) | 1229 | if (pi->enable_boost) |
| 1234 | sumo_enable_boost_timer(rdev); | 1230 | sumo_enable_boost_timer(rdev); |
| 1235 | 1231 | ||
| 1232 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1233 | |||
| 1234 | return 0; | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | int sumo_dpm_late_enable(struct radeon_device *rdev) | ||
| 1238 | { | ||
| 1239 | int ret; | ||
| 1240 | |||
| 1241 | ret = sumo_enable_clock_power_gating(rdev); | ||
| 1242 | if (ret) | ||
| 1243 | return ret; | ||
| 1244 | |||
| 1236 | if (rdev->irq.installed && | 1245 | if (rdev->irq.installed && |
| 1237 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1246 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 1238 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1247 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
| @@ -1242,8 +1251,6 @@ int sumo_dpm_enable(struct radeon_device *rdev) | |||
| 1242 | radeon_irq_set(rdev); | 1251 | radeon_irq_set(rdev); |
| 1243 | } | 1252 | } |
| 1244 | 1253 | ||
| 1245 | sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1246 | |||
| 1247 | return 0; | 1254 | return 0; |
| 1248 | } | 1255 | } |
| 1249 | 1256 | ||
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c index 18abba5b5810..fb081d2ae374 100644 --- a/drivers/gpu/drm/radeon/sumo_smc.c +++ b/drivers/gpu/drm/radeon/sumo_smc.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27 | 31 | #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27 |
| 32 | #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20 | 32 | #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20 |
| 33 | 33 | ||
| 34 | struct sumo_ps *sumo_get_ps(struct radeon_ps *rps); | ||
| 35 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev); | 34 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev); |
| 36 | 35 | ||
| 37 | static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id) | 36 | static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id) |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index d700698a1f22..2d447192d6f7 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
| @@ -342,14 +342,14 @@ static void trinity_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 342 | struct radeon_ps *new_rps, | 342 | struct radeon_ps *new_rps, |
| 343 | struct radeon_ps *old_rps); | 343 | struct radeon_ps *old_rps); |
| 344 | 344 | ||
| 345 | struct trinity_ps *trinity_get_ps(struct radeon_ps *rps) | 345 | static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps) |
| 346 | { | 346 | { |
| 347 | struct trinity_ps *ps = rps->ps_priv; | 347 | struct trinity_ps *ps = rps->ps_priv; |
| 348 | 348 | ||
| 349 | return ps; | 349 | return ps; |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev) | 352 | static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev) |
| 353 | { | 353 | { |
| 354 | struct trinity_power_info *pi = rdev->pm.dpm.priv; | 354 | struct trinity_power_info *pi = rdev->pm.dpm.priv; |
| 355 | 355 | ||
| @@ -1082,7 +1082,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) | |||
| 1082 | int trinity_dpm_enable(struct radeon_device *rdev) | 1082 | int trinity_dpm_enable(struct radeon_device *rdev) |
| 1083 | { | 1083 | { |
| 1084 | struct trinity_power_info *pi = trinity_get_pi(rdev); | 1084 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
| 1085 | int ret; | ||
| 1086 | 1085 | ||
| 1087 | trinity_acquire_mutex(rdev); | 1086 | trinity_acquire_mutex(rdev); |
| 1088 | 1087 | ||
| @@ -1091,7 +1090,6 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
| 1091 | return -EINVAL; | 1090 | return -EINVAL; |
| 1092 | } | 1091 | } |
| 1093 | 1092 | ||
| 1094 | trinity_enable_clock_power_gating(rdev); | ||
| 1095 | trinity_program_bootup_state(rdev); | 1093 | trinity_program_bootup_state(rdev); |
| 1096 | sumo_program_vc(rdev, 0x00C00033); | 1094 | sumo_program_vc(rdev, 0x00C00033); |
| 1097 | trinity_start_am(rdev); | 1095 | trinity_start_am(rdev); |
| @@ -1105,6 +1103,18 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
| 1105 | trinity_dpm_bapm_enable(rdev, false); | 1103 | trinity_dpm_bapm_enable(rdev, false); |
| 1106 | trinity_release_mutex(rdev); | 1104 | trinity_release_mutex(rdev); |
| 1107 | 1105 | ||
| 1106 | trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1107 | |||
| 1108 | return 0; | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | int trinity_dpm_late_enable(struct radeon_device *rdev) | ||
| 1112 | { | ||
| 1113 | int ret; | ||
| 1114 | |||
| 1115 | trinity_acquire_mutex(rdev); | ||
| 1116 | trinity_enable_clock_power_gating(rdev); | ||
| 1117 | |||
| 1108 | if (rdev->irq.installed && | 1118 | if (rdev->irq.installed && |
| 1109 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1119 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
| 1110 | ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1120 | ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
| @@ -1115,8 +1125,7 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
| 1115 | rdev->irq.dpm_thermal = true; | 1125 | rdev->irq.dpm_thermal = true; |
| 1116 | radeon_irq_set(rdev); | 1126 | radeon_irq_set(rdev); |
| 1117 | } | 1127 | } |
| 1118 | 1128 | trinity_release_mutex(rdev); | |
| 1119 | trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps); | ||
| 1120 | 1129 | ||
| 1121 | return 0; | 1130 | return 0; |
| 1122 | } | 1131 | } |
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c index 9672bcbc7312..99dd0455334d 100644 --- a/drivers/gpu/drm/radeon/trinity_smc.c +++ b/drivers/gpu/drm/radeon/trinity_smc.c | |||
| @@ -27,9 +27,6 @@ | |||
| 27 | #include "trinity_dpm.h" | 27 | #include "trinity_dpm.h" |
| 28 | #include "ppsmc.h" | 28 | #include "ppsmc.h" |
| 29 | 29 | ||
| 30 | struct trinity_ps *trinity_get_ps(struct radeon_ps *rps); | ||
| 31 | struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev); | ||
| 32 | |||
| 33 | static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) | 30 | static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) |
| 34 | { | 31 | { |
| 35 | int i; | 32 | int i; |
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index b19ef4951085..824550db3fed 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c | |||
| @@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev) | |||
| 153 | chip_id = 0x01000015; | 153 | chip_id = 0x01000015; |
| 154 | break; | 154 | break; |
| 155 | case CHIP_PITCAIRN: | 155 | case CHIP_PITCAIRN: |
| 156 | case CHIP_OLAND: | ||
| 156 | chip_id = 0x01000016; | 157 | chip_id = 0x01000016; |
| 157 | break; | 158 | break; |
| 158 | case CHIP_ARUBA: | 159 | case CHIP_ARUBA: |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index a9d24e4bf792..fbf4be316d0b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c | |||
| @@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc, | |||
| 371 | goto error; | 371 | goto error; |
| 372 | 372 | ||
| 373 | rcrtc->plane->format = format; | 373 | rcrtc->plane->format = format; |
| 374 | rcrtc->plane->pitch = crtc->fb->pitches[0]; | ||
| 375 | 374 | ||
| 376 | rcrtc->plane->src_x = x; | 375 | rcrtc->plane->src_x = x; |
| 377 | rcrtc->plane->src_y = y; | 376 | rcrtc->plane->src_y = y; |
| @@ -413,7 +412,7 @@ static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 413 | rcrtc->plane->src_x = x; | 412 | rcrtc->plane->src_x = x; |
| 414 | rcrtc->plane->src_y = y; | 413 | rcrtc->plane->src_y = y; |
| 415 | 414 | ||
| 416 | rcar_du_crtc_update_base(to_rcar_crtc(crtc)); | 415 | rcar_du_crtc_update_base(rcrtc); |
| 417 | 416 | ||
| 418 | return 0; | 417 | return 0; |
| 419 | } | 418 | } |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 0023f9719cf1..792fd1d20e86 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c | |||
| @@ -224,7 +224,9 @@ static int rcar_du_probe(struct platform_device *pdev) | |||
| 224 | 224 | ||
| 225 | static int rcar_du_remove(struct platform_device *pdev) | 225 | static int rcar_du_remove(struct platform_device *pdev) |
| 226 | { | 226 | { |
| 227 | drm_platform_exit(&rcar_du_driver, pdev); | 227 | struct rcar_du_device *rcdu = platform_get_drvdata(pdev); |
| 228 | |||
| 229 | drm_put_dev(rcdu->ddev); | ||
| 228 | 230 | ||
| 229 | return 0; | 231 | return 0; |
| 230 | } | 232 | } |
| @@ -249,8 +251,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { | |||
| 249 | }; | 251 | }; |
| 250 | 252 | ||
| 251 | static const struct rcar_du_device_info rcar_du_r8a7790_info = { | 253 | static const struct rcar_du_device_info rcar_du_r8a7790_info = { |
| 252 | .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B | 254 | .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8, |
| 253 | | RCAR_DU_FEATURE_DEFR8, | 255 | .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES, |
| 254 | .num_crtcs = 3, | 256 | .num_crtcs = 3, |
| 255 | .routes = { | 257 | .routes = { |
| 256 | /* R8A7790 has one RGB output, two LVDS outputs and one | 258 | /* R8A7790 has one RGB output, two LVDS outputs and one |
| @@ -272,9 +274,29 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { | |||
| 272 | .num_lvds = 2, | 274 | .num_lvds = 2, |
| 273 | }; | 275 | }; |
| 274 | 276 | ||
| 277 | static const struct rcar_du_device_info rcar_du_r8a7791_info = { | ||
| 278 | .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8, | ||
| 279 | .num_crtcs = 2, | ||
| 280 | .routes = { | ||
| 281 | /* R8A7791 has one RGB output, one LVDS output and one | ||
| 282 | * (currently unsupported) TCON output. | ||
| 283 | */ | ||
| 284 | [RCAR_DU_OUTPUT_DPAD0] = { | ||
| 285 | .possible_crtcs = BIT(1), | ||
| 286 | .encoder_type = DRM_MODE_ENCODER_NONE, | ||
| 287 | }, | ||
| 288 | [RCAR_DU_OUTPUT_LVDS0] = { | ||
| 289 | .possible_crtcs = BIT(0), | ||
| 290 | .encoder_type = DRM_MODE_ENCODER_LVDS, | ||
| 291 | }, | ||
| 292 | }, | ||
| 293 | .num_lvds = 1, | ||
| 294 | }; | ||
| 295 | |||
| 275 | static const struct platform_device_id rcar_du_id_table[] = { | 296 | static const struct platform_device_id rcar_du_id_table[] = { |
| 276 | { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info }, | 297 | { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info }, |
| 277 | { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info }, | 298 | { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info }, |
| 299 | { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info }, | ||
| 278 | { } | 300 | { } |
| 279 | }; | 301 | }; |
| 280 | 302 | ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 65d2d636b002..e31b735d3f25 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h | |||
| @@ -28,8 +28,10 @@ struct rcar_du_device; | |||
| 28 | struct rcar_du_lvdsenc; | 28 | struct rcar_du_lvdsenc; |
| 29 | 29 | ||
| 30 | #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ | 30 | #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ |
| 31 | #define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */ | 31 | #define RCAR_DU_FEATURE_DEFR8 (1 << 1) /* Has DEFR8 register */ |
| 32 | #define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */ | 32 | |
| 33 | #define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */ | ||
| 34 | #define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */ | ||
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| 35 | * struct rcar_du_output_routing - Output routing specification | 37 | * struct rcar_du_output_routing - Output routing specification |
| @@ -48,12 +50,14 @@ struct rcar_du_output_routing { | |||
| 48 | /* | 50 | /* |
| 49 | * struct rcar_du_device_info - DU model-specific information | 51 | * struct rcar_du_device_info - DU model-specific information |
| 50 | * @features: device features (RCAR_DU_FEATURE_*) | 52 | * @features: device features (RCAR_DU_FEATURE_*) |
| 53 | * @quirks: device quirks (RCAR_DU_QUIRK_*) | ||
| 51 | * @num_crtcs: total number of CRTCs | 54 | * @num_crtcs: total number of CRTCs |
| 52 | * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) | 55 | * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) |
| 53 | * @num_lvds: number of internal LVDS encoders | 56 | * @num_lvds: number of internal LVDS encoders |
| 54 | */ | 57 | */ |
| 55 | struct rcar_du_device_info { | 58 | struct rcar_du_device_info { |
| 56 | unsigned int features; | 59 | unsigned int features; |
| 60 | unsigned int quirks; | ||
| 57 | unsigned int num_crtcs; | 61 | unsigned int num_crtcs; |
| 58 | struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; | 62 | struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; |
| 59 | unsigned int num_lvds; | 63 | unsigned int num_lvds; |
| @@ -84,6 +88,12 @@ static inline bool rcar_du_has(struct rcar_du_device *rcdu, | |||
| 84 | return rcdu->info->features & feature; | 88 | return rcdu->info->features & feature; |
| 85 | } | 89 | } |
| 86 | 90 | ||
| 91 | static inline bool rcar_du_needs(struct rcar_du_device *rcdu, | ||
| 92 | unsigned int quirk) | ||
| 93 | { | ||
| 94 | return rcdu->info->quirks & quirk; | ||
| 95 | } | ||
| 96 | |||
| 87 | static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) | 97 | static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg) |
| 88 | { | 98 | { |
| 89 | return ioread32(rcdu->mmio + reg); | 99 | return ioread32(rcdu->mmio + reg); |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index b31ac080c4a7..fbeabd9a281f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c | |||
| @@ -119,7 +119,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
| 119 | /* The R8A7779 DU requires a 16 pixels pitch alignment as documented, | 119 | /* The R8A7779 DU requires a 16 pixels pitch alignment as documented, |
| 120 | * but the R8A7790 DU seems to require a 128 bytes pitch alignment. | 120 | * but the R8A7790 DU seems to require a 128 bytes pitch alignment. |
| 121 | */ | 121 | */ |
| 122 | if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B)) | 122 | if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) |
| 123 | align = 128; | 123 | align = 128; |
| 124 | else | 124 | else |
| 125 | align = 16 * args->bpp / 8; | 125 | align = 16 * args->bpp / 8; |
| @@ -144,7 +144,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 144 | return ERR_PTR(-EINVAL); | 144 | return ERR_PTR(-EINVAL); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B)) | 147 | if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) |
| 148 | align = 128; | 148 | align = 128; |
| 149 | else | 149 | else |
| 150 | align = 16 * format->bpp / 8; | 150 | align = 16 * format->bpp / 8; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index a0f6a1781925..df30a075d793 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c | |||
| @@ -44,6 +44,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, | |||
| 44 | const struct drm_display_mode *mode = &rcrtc->crtc.mode; | 44 | const struct drm_display_mode *mode = &rcrtc->crtc.mode; |
| 45 | unsigned int freq = mode->clock; | 45 | unsigned int freq = mode->clock; |
| 46 | u32 lvdcr0; | 46 | u32 lvdcr0; |
| 47 | u32 lvdhcr; | ||
| 47 | u32 pllcr; | 48 | u32 pllcr; |
| 48 | int ret; | 49 | int ret; |
| 49 | 50 | ||
| @@ -72,15 +73,19 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, | |||
| 72 | * VSYNC -> CTRL1 | 73 | * VSYNC -> CTRL1 |
| 73 | * DISP -> CTRL2 | 74 | * DISP -> CTRL2 |
| 74 | * 0 -> CTRL3 | 75 | * 0 -> CTRL3 |
| 75 | * | ||
| 76 | * Channels 1 and 3 are switched on ES1. | ||
| 77 | */ | 76 | */ |
| 78 | rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO | | 77 | rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO | |
| 79 | LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC | | 78 | LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC | |
| 80 | LVDCTRCR_CTR0SEL_HSYNC); | 79 | LVDCTRCR_CTR0SEL_HSYNC); |
| 81 | rcar_lvds_write(lvds, LVDCHCR, | 80 | |
| 82 | LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) | | 81 | if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES)) |
| 83 | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1)); | 82 | lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
| 83 | | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1); | ||
| 84 | else | ||
| 85 | lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1) | ||
| 86 | | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3); | ||
| 87 | |||
| 88 | rcar_lvds_write(lvds, LVDCHCR, lvdhcr); | ||
| 84 | 89 | ||
| 85 | /* Select the input, hardcode mode 0, enable LVDS operation and turn | 90 | /* Select the input, hardcode mode 0, enable LVDS operation and turn |
| 86 | * bias circuitry on. | 91 | * bias circuitry on. |
| @@ -144,18 +149,9 @@ static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds, | |||
| 144 | sprintf(name, "lvds.%u", lvds->index); | 149 | sprintf(name, "lvds.%u", lvds->index); |
| 145 | 150 | ||
| 146 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | 151 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); |
| 147 | if (mem == NULL) { | ||
| 148 | dev_err(&pdev->dev, "failed to get memory resource for %s\n", | ||
| 149 | name); | ||
| 150 | return -EINVAL; | ||
| 151 | } | ||
| 152 | |||
| 153 | lvds->mmio = devm_ioremap_resource(&pdev->dev, mem); | 152 | lvds->mmio = devm_ioremap_resource(&pdev->dev, mem); |
| 154 | if (lvds->mmio == NULL) { | 153 | if (IS_ERR(lvds->mmio)) |
| 155 | dev_err(&pdev->dev, "failed to remap memory resource for %s\n", | 154 | return PTR_ERR(lvds->mmio); |
| 156 | name); | ||
| 157 | return -ENOMEM; | ||
| 158 | } | ||
| 159 | 155 | ||
| 160 | lvds->clock = devm_clk_get(&pdev->dev, name); | 156 | lvds->clock = devm_clk_get(&pdev->dev, name); |
| 161 | if (IS_ERR(lvds->clock)) { | 157 | if (IS_ERR(lvds->clock)) { |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 53000644733f..3fb69d9ae61b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c | |||
| @@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane) | |||
| 104 | { | 104 | { |
| 105 | struct rcar_du_group *rgrp = plane->group; | 105 | struct rcar_du_group *rgrp = plane->group; |
| 106 | unsigned int index = plane->hwindex; | 106 | unsigned int index = plane->hwindex; |
| 107 | u32 mwr; | ||
| 108 | |||
| 109 | /* Memory pitch (expressed in pixels) */ | ||
| 110 | if (plane->format->planes == 2) | ||
| 111 | mwr = plane->pitch; | ||
| 112 | else | ||
| 113 | mwr = plane->pitch * 8 / plane->format->bpp; | ||
| 114 | |||
| 115 | rcar_du_plane_write(rgrp, index, PnMWR, mwr); | ||
| 107 | 116 | ||
| 108 | /* The Y position is expressed in raster line units and must be doubled | 117 | /* The Y position is expressed in raster line units and must be doubled |
| 109 | * for 32bpp formats, according to the R8A7790 datasheet. No mention of | 118 | * for 32bpp formats, according to the R8A7790 datasheet. No mention of |
| @@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane, | |||
| 133 | { | 142 | { |
| 134 | struct drm_gem_cma_object *gem; | 143 | struct drm_gem_cma_object *gem; |
| 135 | 144 | ||
| 145 | plane->pitch = fb->pitches[0]; | ||
| 146 | |||
| 136 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 147 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
| 137 | plane->dma[0] = gem->paddr + fb->offsets[0]; | 148 | plane->dma[0] = gem->paddr + fb->offsets[0]; |
| 138 | 149 | ||
| @@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, | |||
| 209 | struct rcar_du_group *rgrp = plane->group; | 220 | struct rcar_du_group *rgrp = plane->group; |
| 210 | u32 ddcr2 = PnDDCR2_CODE; | 221 | u32 ddcr2 = PnDDCR2_CODE; |
| 211 | u32 ddcr4; | 222 | u32 ddcr4; |
| 212 | u32 mwr; | ||
| 213 | 223 | ||
| 214 | /* Data format | 224 | /* Data format |
| 215 | * | 225 | * |
| @@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, | |||
| 240 | rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2); | 250 | rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2); |
| 241 | rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); | 251 | rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); |
| 242 | 252 | ||
| 243 | /* Memory pitch (expressed in pixels) */ | ||
| 244 | if (plane->format->planes == 2) | ||
| 245 | mwr = plane->pitch; | ||
| 246 | else | ||
| 247 | mwr = plane->pitch * 8 / plane->format->bpp; | ||
| 248 | |||
| 249 | rcar_du_plane_write(rgrp, index, PnMWR, mwr); | ||
| 250 | |||
| 251 | /* Destination position and size */ | 253 | /* Destination position and size */ |
| 252 | rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); | 254 | rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); |
| 253 | rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); | 255 | rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); |
| @@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 309 | 311 | ||
| 310 | rplane->crtc = crtc; | 312 | rplane->crtc = crtc; |
| 311 | rplane->format = format; | 313 | rplane->format = format; |
| 312 | rplane->pitch = fb->pitches[0]; | ||
| 313 | 314 | ||
| 314 | rplane->src_x = src_x >> 16; | 315 | rplane->src_x = src_x >> 16; |
| 315 | rplane->src_y = src_y >> 16; | 316 | rplane->src_y = src_y >> 16; |
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index b17d0710871a..d2b2df9e26f3 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c | |||
| @@ -49,7 +49,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) | |||
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | 51 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { |
| 52 | DRM_MEMORYBARRIER(); | 52 | mb(); |
| 53 | status = dev_priv->status_ptr[0]; | 53 | status = dev_priv->status_ptr[0]; |
| 54 | if ((status & mask) < threshold) | 54 | if ((status & mask) < threshold) |
| 55 | return 0; | 55 | return 0; |
| @@ -123,7 +123,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) | |||
| 123 | int i; | 123 | int i; |
| 124 | 124 | ||
| 125 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { | 125 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { |
| 126 | DRM_MEMORYBARRIER(); | 126 | mb(); |
| 127 | status = dev_priv->status_ptr[1]; | 127 | status = dev_priv->status_ptr[1]; |
| 128 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || | 128 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || |
| 129 | (status & 0xffff) == 0) | 129 | (status & 0xffff) == 0) |
| @@ -449,7 +449,7 @@ static void savage_dma_flush(drm_savage_private_t * dev_priv) | |||
| 449 | } | 449 | } |
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | DRM_MEMORYBARRIER(); | 452 | mb(); |
| 453 | 453 | ||
| 454 | /* do flush ... */ | 454 | /* do flush ... */ |
| 455 | phys_addr = dev_priv->cmd_dma->offset + | 455 | phys_addr = dev_priv->cmd_dma->offset + |
| @@ -990,10 +990,10 @@ static int savage_bci_get_buffers(struct drm_device *dev, | |||
| 990 | 990 | ||
| 991 | buf->file_priv = file_priv; | 991 | buf->file_priv = file_priv; |
| 992 | 992 | ||
| 993 | if (DRM_COPY_TO_USER(&d->request_indices[i], | 993 | if (copy_to_user(&d->request_indices[i], |
| 994 | &buf->idx, sizeof(buf->idx))) | 994 | &buf->idx, sizeof(buf->idx))) |
| 995 | return -EFAULT; | 995 | return -EFAULT; |
| 996 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | 996 | if (copy_to_user(&d->request_sizes[i], |
| 997 | &buf->total, sizeof(buf->total))) | 997 | &buf->total, sizeof(buf->total))) |
| 998 | return -EFAULT; | 998 | return -EFAULT; |
| 999 | 999 | ||
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index b35e75ed890c..c01ad0aeaa58 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c | |||
| @@ -992,7 +992,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
| 992 | if (kcmd_addr == NULL) | 992 | if (kcmd_addr == NULL) |
| 993 | return -ENOMEM; | 993 | return -ENOMEM; |
| 994 | 994 | ||
| 995 | if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, | 995 | if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr, |
| 996 | cmdbuf->size * 8)) | 996 | cmdbuf->size * 8)) |
| 997 | { | 997 | { |
| 998 | kfree(kcmd_addr); | 998 | kfree(kcmd_addr); |
| @@ -1007,7 +1007,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
| 1007 | goto done; | 1007 | goto done; |
| 1008 | } | 1008 | } |
| 1009 | 1009 | ||
| 1010 | if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, | 1010 | if (copy_from_user(kvb_addr, cmdbuf->vb_addr, |
| 1011 | cmdbuf->vb_size)) { | 1011 | cmdbuf->vb_size)) { |
| 1012 | ret = -EFAULT; | 1012 | ret = -EFAULT; |
| 1013 | goto done; | 1013 | goto done; |
| @@ -1022,7 +1022,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
| 1022 | goto done; | 1022 | goto done; |
| 1023 | } | 1023 | } |
| 1024 | 1024 | ||
| 1025 | if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, | 1025 | if (copy_from_user(kbox_addr, cmdbuf->box_addr, |
| 1026 | cmdbuf->nbox * sizeof(struct drm_clip_rect))) { | 1026 | cmdbuf->nbox * sizeof(struct drm_clip_rect))) { |
| 1027 | ret = -EFAULT; | 1027 | ret = -EFAULT; |
| 1028 | goto done; | 1028 | goto done; |
| @@ -1032,7 +1032,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
| 1032 | 1032 | ||
| 1033 | /* Make sure writes to DMA buffers are finished before sending | 1033 | /* Make sure writes to DMA buffers are finished before sending |
| 1034 | * DMA commands to the graphics hardware. */ | 1034 | * DMA commands to the graphics hardware. */ |
| 1035 | DRM_MEMORYBARRIER(); | 1035 | mb(); |
| 1036 | 1036 | ||
| 1037 | /* Coming from user space. Don't know if the Xserver has | 1037 | /* Coming from user space. Don't know if the Xserver has |
| 1038 | * emitted wait commands. Assuming the worst. */ | 1038 | * emitted wait commands. Assuming the worst. */ |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 562f9a401cf6..0428076f1ce8 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c | |||
| @@ -37,14 +37,21 @@ | |||
| 37 | * Clock management | 37 | * Clock management |
| 38 | */ | 38 | */ |
| 39 | 39 | ||
| 40 | static void shmob_drm_clk_on(struct shmob_drm_device *sdev) | 40 | static int shmob_drm_clk_on(struct shmob_drm_device *sdev) |
| 41 | { | 41 | { |
| 42 | if (sdev->clock) | 42 | int ret; |
| 43 | clk_prepare_enable(sdev->clock); | 43 | |
| 44 | if (sdev->clock) { | ||
| 45 | ret = clk_prepare_enable(sdev->clock); | ||
| 46 | if (ret < 0) | ||
| 47 | return ret; | ||
| 48 | } | ||
| 44 | #if 0 | 49 | #if 0 |
| 45 | if (sdev->meram_dev && sdev->meram_dev->pdev) | 50 | if (sdev->meram_dev && sdev->meram_dev->pdev) |
| 46 | pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); | 51 | pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); |
| 47 | #endif | 52 | #endif |
| 53 | |||
| 54 | return 0; | ||
| 48 | } | 55 | } |
| 49 | 56 | ||
| 50 | static void shmob_drm_clk_off(struct shmob_drm_device *sdev) | 57 | static void shmob_drm_clk_off(struct shmob_drm_device *sdev) |
| @@ -161,6 +168,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc) | |||
| 161 | struct drm_device *dev = sdev->ddev; | 168 | struct drm_device *dev = sdev->ddev; |
| 162 | struct drm_plane *plane; | 169 | struct drm_plane *plane; |
| 163 | u32 value; | 170 | u32 value; |
| 171 | int ret; | ||
| 164 | 172 | ||
| 165 | if (scrtc->started) | 173 | if (scrtc->started) |
| 166 | return; | 174 | return; |
| @@ -170,7 +178,9 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc) | |||
| 170 | return; | 178 | return; |
| 171 | 179 | ||
| 172 | /* Enable clocks before accessing the hardware. */ | 180 | /* Enable clocks before accessing the hardware. */ |
| 173 | shmob_drm_clk_on(sdev); | 181 | ret = shmob_drm_clk_on(sdev); |
| 182 | if (ret < 0) | ||
| 183 | return; | ||
| 174 | 184 | ||
| 175 | /* Reset and enable the LCDC. */ | 185 | /* Reset and enable the LCDC. */ |
| 176 | lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR); | 186 | lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR); |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 015551866b4a..c839c9c89efb 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c | |||
| @@ -336,7 +336,9 @@ static int shmob_drm_probe(struct platform_device *pdev) | |||
| 336 | 336 | ||
| 337 | static int shmob_drm_remove(struct platform_device *pdev) | 337 | static int shmob_drm_remove(struct platform_device *pdev) |
| 338 | { | 338 | { |
| 339 | drm_platform_exit(&shmob_drm_driver, pdev); | 339 | struct shmob_drm_device *sdev = platform_get_drvdata(pdev); |
| 340 | |||
| 341 | drm_put_dev(sdev->ddev); | ||
| 340 | 342 | ||
| 341 | return 0; | 343 | return 0; |
| 342 | } | 344 | } |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 4383b74a3aa4..756f787b7143 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
| @@ -94,7 +94,7 @@ static int sis_driver_open(struct drm_device *dev, struct drm_file *file) | |||
| 94 | return 0; | 94 | return 0; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | void sis_driver_postclose(struct drm_device *dev, struct drm_file *file) | 97 | static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file) |
| 98 | { | 98 | { |
| 99 | struct sis_file_private *file_priv = file->driver_priv; | 99 | struct sis_file_private *file_priv = file->driver_priv; |
| 100 | 100 | ||
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 01857d836350..0573be0d2933 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c | |||
| @@ -266,7 +266,7 @@ int sis_idle(struct drm_device *dev) | |||
| 266 | * because its polling frequency is too low. | 266 | * because its polling frequency is too low. |
| 267 | */ | 267 | */ |
| 268 | 268 | ||
| 269 | end = jiffies + (DRM_HZ * 3); | 269 | end = jiffies + (HZ * 3); |
| 270 | 270 | ||
| 271 | for (i = 0; i < 4; ++i) { | 271 | for (i = 0; i < 4; ++i) { |
| 272 | do { | 272 | do { |
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 8db9b3bce001..354ddb29231f 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig | |||
| @@ -1,14 +1,12 @@ | |||
| 1 | config DRM_TEGRA | 1 | config DRM_TEGRA |
| 2 | bool "NVIDIA Tegra DRM" | 2 | tristate "NVIDIA Tegra DRM" |
| 3 | depends on ARCH_TEGRA || ARCH_MULTIPLATFORM | 3 | depends on ARCH_TEGRA || (ARM && COMPILE_TEST) |
| 4 | depends on DRM | 4 | depends on DRM |
| 5 | depends on RESET_CONTROLLER | 5 | depends on RESET_CONTROLLER |
| 6 | select TEGRA_HOST1X | ||
| 7 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
| 8 | select DRM_KMS_FB_HELPER | 7 | select DRM_MIPI_DSI |
| 9 | select FB_SYS_FILLRECT | 8 | select DRM_PANEL |
| 10 | select FB_SYS_COPYAREA | 9 | select TEGRA_HOST1X |
| 11 | select FB_SYS_IMAGEBLIT | ||
| 12 | help | 10 | help |
| 13 | Choose this option if you have an NVIDIA Tegra SoC. | 11 | Choose this option if you have an NVIDIA Tegra SoC. |
| 14 | 12 | ||
| @@ -17,6 +15,18 @@ config DRM_TEGRA | |||
| 17 | 15 | ||
| 18 | if DRM_TEGRA | 16 | if DRM_TEGRA |
| 19 | 17 | ||
| 18 | config DRM_TEGRA_FBDEV | ||
| 19 | bool "Enable legacy fbdev support" | ||
| 20 | select DRM_KMS_FB_HELPER | ||
| 21 | select FB_SYS_FILLRECT | ||
| 22 | select FB_SYS_COPYAREA | ||
| 23 | select FB_SYS_IMAGEBLIT | ||
| 24 | default y | ||
| 25 | help | ||
| 26 | Choose this option if you have a need for the legacy fbdev support. | ||
| 27 | Note that this support also provides the Linux console on top of | ||
| 28 | the Tegra modesetting driver. | ||
| 29 | |||
| 20 | config DRM_TEGRA_DEBUG | 30 | config DRM_TEGRA_DEBUG |
| 21 | bool "NVIDIA Tegra DRM debug support" | 31 | bool "NVIDIA Tegra DRM debug support" |
| 22 | help | 32 | help |
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index edc76abd58bb..8d220afbd85f 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile | |||
| @@ -9,6 +9,8 @@ tegra-drm-y := \ | |||
| 9 | output.o \ | 9 | output.o \ |
| 10 | rgb.o \ | 10 | rgb.o \ |
| 11 | hdmi.o \ | 11 | hdmi.o \ |
| 12 | mipi-phy.o \ | ||
| 13 | dsi.o \ | ||
| 12 | gr2d.o \ | 14 | gr2d.o \ |
| 13 | gr3d.o | 15 | gr3d.o |
| 14 | 16 | ||
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c index 565f8f7b9a47..e38e5967d77b 100644 --- a/drivers/gpu/drm/tegra/bus.c +++ b/drivers/gpu/drm/tegra/bus.c | |||
| @@ -46,7 +46,6 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device) | |||
| 46 | struct drm_device *drm; | 46 | struct drm_device *drm; |
| 47 | int ret; | 47 | int ret; |
| 48 | 48 | ||
| 49 | INIT_LIST_HEAD(&driver->device_list); | ||
| 50 | driver->bus = &drm_host1x_bus; | 49 | driver->bus = &drm_host1x_bus; |
| 51 | 50 | ||
| 52 | drm = drm_dev_alloc(driver, &device->dev); | 51 | drm = drm_dev_alloc(driver, &device->dev); |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index cd7f1e499616..9336006b475d 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
| @@ -15,6 +15,10 @@ | |||
| 15 | #include "drm.h" | 15 | #include "drm.h" |
| 16 | #include "gem.h" | 16 | #include "gem.h" |
| 17 | 17 | ||
| 18 | struct tegra_dc_soc_info { | ||
| 19 | bool supports_interlacing; | ||
| 20 | }; | ||
| 21 | |||
| 18 | struct tegra_plane { | 22 | struct tegra_plane { |
| 19 | struct drm_plane base; | 23 | struct drm_plane base; |
| 20 | unsigned int index; | 24 | unsigned int index; |
| @@ -658,19 +662,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc, | |||
| 658 | /* program display mode */ | 662 | /* program display mode */ |
| 659 | tegra_dc_set_timings(dc, mode); | 663 | tegra_dc_set_timings(dc, mode); |
| 660 | 664 | ||
| 661 | value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; | 665 | /* interlacing isn't supported yet, so disable it */ |
| 662 | tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS); | 666 | if (dc->soc->supports_interlacing) { |
| 663 | 667 | value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); | |
| 664 | value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1)); | 668 | value &= ~INTERLACE_ENABLE; |
| 665 | value &= ~LVS_OUTPUT_POLARITY_LOW; | 669 | tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL); |
| 666 | value &= ~LHS_OUTPUT_POLARITY_LOW; | 670 | } |
| 667 | tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); | ||
| 668 | |||
| 669 | value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB | | ||
| 670 | DISP_ORDER_RED_BLUE; | ||
| 671 | tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL); | ||
| 672 | |||
| 673 | tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS); | ||
| 674 | 671 | ||
| 675 | value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1; | 672 | value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1; |
| 676 | tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); | 673 | tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); |
| @@ -735,10 +732,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) | |||
| 735 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; | 732 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; |
| 736 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | 733 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); |
| 737 | 734 | ||
| 738 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); | ||
| 739 | value |= DISP_CTRL_MODE_C_DISPLAY; | ||
| 740 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 741 | |||
| 742 | /* initialize timer */ | 735 | /* initialize timer */ |
| 743 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | | 736 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | |
| 744 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); | 737 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); |
| @@ -1107,8 +1100,6 @@ static int tegra_dc_init(struct host1x_client *client) | |||
| 1107 | struct tegra_dc *dc = host1x_client_to_dc(client); | 1100 | struct tegra_dc *dc = host1x_client_to_dc(client); |
| 1108 | int err; | 1101 | int err; |
| 1109 | 1102 | ||
| 1110 | dc->pipe = tegra->drm->mode_config.num_crtc; | ||
| 1111 | |||
| 1112 | drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs); | 1103 | drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs); |
| 1113 | drm_mode_crtc_set_gamma_size(&dc->base, 256); | 1104 | drm_mode_crtc_set_gamma_size(&dc->base, 256); |
| 1114 | drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); | 1105 | drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); |
| @@ -1167,8 +1158,71 @@ static const struct host1x_client_ops dc_client_ops = { | |||
| 1167 | .exit = tegra_dc_exit, | 1158 | .exit = tegra_dc_exit, |
| 1168 | }; | 1159 | }; |
| 1169 | 1160 | ||
| 1161 | static const struct tegra_dc_soc_info tegra20_dc_soc_info = { | ||
| 1162 | .supports_interlacing = false, | ||
| 1163 | }; | ||
| 1164 | |||
| 1165 | static const struct tegra_dc_soc_info tegra30_dc_soc_info = { | ||
| 1166 | .supports_interlacing = false, | ||
| 1167 | }; | ||
| 1168 | |||
| 1169 | static const struct tegra_dc_soc_info tegra124_dc_soc_info = { | ||
| 1170 | .supports_interlacing = true, | ||
| 1171 | }; | ||
| 1172 | |||
| 1173 | static const struct of_device_id tegra_dc_of_match[] = { | ||
| 1174 | { | ||
| 1175 | .compatible = "nvidia,tegra124-dc", | ||
| 1176 | .data = &tegra124_dc_soc_info, | ||
| 1177 | }, { | ||
| 1178 | .compatible = "nvidia,tegra30-dc", | ||
| 1179 | .data = &tegra30_dc_soc_info, | ||
| 1180 | }, { | ||
| 1181 | .compatible = "nvidia,tegra20-dc", | ||
| 1182 | .data = &tegra20_dc_soc_info, | ||
| 1183 | }, { | ||
| 1184 | /* sentinel */ | ||
| 1185 | } | ||
| 1186 | }; | ||
| 1187 | |||
| 1188 | static int tegra_dc_parse_dt(struct tegra_dc *dc) | ||
| 1189 | { | ||
| 1190 | struct device_node *np; | ||
| 1191 | u32 value = 0; | ||
| 1192 | int err; | ||
| 1193 | |||
| 1194 | err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value); | ||
| 1195 | if (err < 0) { | ||
| 1196 | dev_err(dc->dev, "missing \"nvidia,head\" property\n"); | ||
| 1197 | |||
| 1198 | /* | ||
| 1199 | * If the nvidia,head property isn't present, try to find the | ||
| 1200 | * correct head number by looking up the position of this | ||
| 1201 | * display controller's node within the device tree. Assuming | ||
| 1202 | * that the nodes are ordered properly in the DTS file and | ||
| 1203 | * that the translation into a flattened device tree blob | ||
| 1204 | * preserves that ordering this will actually yield the right | ||
| 1205 | * head number. | ||
| 1206 | * | ||
| 1207 | * If those assumptions don't hold, this will still work for | ||
| 1208 | * cases where only a single display controller is used. | ||
| 1209 | */ | ||
| 1210 | for_each_matching_node(np, tegra_dc_of_match) { | ||
| 1211 | if (np == dc->dev->of_node) | ||
| 1212 | break; | ||
| 1213 | |||
| 1214 | value++; | ||
| 1215 | } | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | dc->pipe = value; | ||
| 1219 | |||
| 1220 | return 0; | ||
| 1221 | } | ||
| 1222 | |||
| 1170 | static int tegra_dc_probe(struct platform_device *pdev) | 1223 | static int tegra_dc_probe(struct platform_device *pdev) |
| 1171 | { | 1224 | { |
| 1225 | const struct of_device_id *id; | ||
| 1172 | struct resource *regs; | 1226 | struct resource *regs; |
| 1173 | struct tegra_dc *dc; | 1227 | struct tegra_dc *dc; |
| 1174 | int err; | 1228 | int err; |
| @@ -1177,9 +1231,18 @@ static int tegra_dc_probe(struct platform_device *pdev) | |||
| 1177 | if (!dc) | 1231 | if (!dc) |
| 1178 | return -ENOMEM; | 1232 | return -ENOMEM; |
| 1179 | 1233 | ||
| 1234 | id = of_match_node(tegra_dc_of_match, pdev->dev.of_node); | ||
| 1235 | if (!id) | ||
| 1236 | return -ENODEV; | ||
| 1237 | |||
| 1180 | spin_lock_init(&dc->lock); | 1238 | spin_lock_init(&dc->lock); |
| 1181 | INIT_LIST_HEAD(&dc->list); | 1239 | INIT_LIST_HEAD(&dc->list); |
| 1182 | dc->dev = &pdev->dev; | 1240 | dc->dev = &pdev->dev; |
| 1241 | dc->soc = id->data; | ||
| 1242 | |||
| 1243 | err = tegra_dc_parse_dt(dc); | ||
| 1244 | if (err < 0) | ||
| 1245 | return err; | ||
| 1183 | 1246 | ||
| 1184 | dc->clk = devm_clk_get(&pdev->dev, NULL); | 1247 | dc->clk = devm_clk_get(&pdev->dev, NULL); |
| 1185 | if (IS_ERR(dc->clk)) { | 1248 | if (IS_ERR(dc->clk)) { |
| @@ -1253,12 +1316,6 @@ static int tegra_dc_remove(struct platform_device *pdev) | |||
| 1253 | return 0; | 1316 | return 0; |
| 1254 | } | 1317 | } |
| 1255 | 1318 | ||
| 1256 | static struct of_device_id tegra_dc_of_match[] = { | ||
| 1257 | { .compatible = "nvidia,tegra30-dc", }, | ||
| 1258 | { .compatible = "nvidia,tegra20-dc", }, | ||
| 1259 | { }, | ||
| 1260 | }; | ||
| 1261 | |||
| 1262 | struct platform_driver tegra_dc_driver = { | 1319 | struct platform_driver tegra_dc_driver = { |
| 1263 | .driver = { | 1320 | .driver = { |
| 1264 | .name = "tegra-dc", | 1321 | .name = "tegra-dc", |
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 91bbda291470..3c2c0ea1cd87 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define DISP_CTRL_MODE_STOP (0 << 5) | 28 | #define DISP_CTRL_MODE_STOP (0 << 5) |
| 29 | #define DISP_CTRL_MODE_C_DISPLAY (1 << 5) | 29 | #define DISP_CTRL_MODE_C_DISPLAY (1 << 5) |
| 30 | #define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) | 30 | #define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) |
| 31 | #define DISP_CTRL_MODE_MASK (3 << 5) | ||
| 31 | #define DC_CMD_SIGNAL_RAISE 0x033 | 32 | #define DC_CMD_SIGNAL_RAISE 0x033 |
| 32 | #define DC_CMD_DISPLAY_POWER_CONTROL 0x036 | 33 | #define DC_CMD_DISPLAY_POWER_CONTROL 0x036 |
| 33 | #define PW0_ENABLE (1 << 0) | 34 | #define PW0_ENABLE (1 << 0) |
| @@ -116,6 +117,7 @@ | |||
| 116 | 117 | ||
| 117 | #define DC_DISP_DISP_WIN_OPTIONS 0x402 | 118 | #define DC_DISP_DISP_WIN_OPTIONS 0x402 |
| 118 | #define HDMI_ENABLE (1 << 30) | 119 | #define HDMI_ENABLE (1 << 30) |
| 120 | #define DSI_ENABLE (1 << 29) | ||
| 119 | 121 | ||
| 120 | #define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 | 122 | #define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 |
| 121 | #define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) | 123 | #define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) |
| @@ -238,6 +240,8 @@ | |||
| 238 | #define DITHER_CONTROL_ERRDIFF (3 << 8) | 240 | #define DITHER_CONTROL_ERRDIFF (3 << 8) |
| 239 | 241 | ||
| 240 | #define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 | 242 | #define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 |
| 243 | #define SC1_H_QUALIFIER_NONE (1 << 16) | ||
| 244 | #define SC0_H_QUALIFIER_NONE (1 << 0) | ||
| 241 | 245 | ||
| 242 | #define DC_DISP_DATA_ENABLE_OPTIONS 0x432 | 246 | #define DC_DISP_DATA_ENABLE_OPTIONS 0x432 |
| 243 | #define DE_SELECT_ACTIVE_BLANK (0 << 0) | 247 | #define DE_SELECT_ACTIVE_BLANK (0 << 0) |
| @@ -292,6 +296,11 @@ | |||
| 292 | #define DC_DISP_SD_HW_K_VALUES 0x4dd | 296 | #define DC_DISP_SD_HW_K_VALUES 0x4dd |
| 293 | #define DC_DISP_SD_MAN_K_VALUES 0x4de | 297 | #define DC_DISP_SD_MAN_K_VALUES 0x4de |
| 294 | 298 | ||
| 299 | #define DC_DISP_INTERLACE_CONTROL 0x4e5 | ||
| 300 | #define INTERLACE_STATUS (1 << 2) | ||
| 301 | #define INTERLACE_START (1 << 1) | ||
| 302 | #define INTERLACE_ENABLE (1 << 0) | ||
| 303 | |||
| 295 | #define DC_WIN_CSC_YOF 0x611 | 304 | #define DC_WIN_CSC_YOF 0x611 |
| 296 | #define DC_WIN_CSC_KYRGB 0x612 | 305 | #define DC_WIN_CSC_KYRGB 0x612 |
| 297 | #define DC_WIN_CSC_KUR 0x613 | 306 | #define DC_WIN_CSC_KUR 0x613 |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 07eba596d458..88a529008ce0 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
| @@ -104,9 +104,11 @@ static void tegra_drm_context_free(struct tegra_drm_context *context) | |||
| 104 | 104 | ||
| 105 | static void tegra_drm_lastclose(struct drm_device *drm) | 105 | static void tegra_drm_lastclose(struct drm_device *drm) |
| 106 | { | 106 | { |
| 107 | #ifdef CONFIG_TEGRA_DRM_FBDEV | ||
| 107 | struct tegra_drm *tegra = drm->dev_private; | 108 | struct tegra_drm *tegra = drm->dev_private; |
| 108 | 109 | ||
| 109 | tegra_fbdev_restore_mode(tegra->fbdev); | 110 | tegra_fbdev_restore_mode(tegra->fbdev); |
| 111 | #endif | ||
| 110 | } | 112 | } |
| 111 | 113 | ||
| 112 | static struct host1x_bo * | 114 | static struct host1x_bo * |
| @@ -578,7 +580,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor) | |||
| 578 | #endif | 580 | #endif |
| 579 | 581 | ||
| 580 | static struct drm_driver tegra_drm_driver = { | 582 | static struct drm_driver tegra_drm_driver = { |
| 581 | .driver_features = DRIVER_MODESET | DRIVER_GEM, | 583 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, |
| 582 | .load = tegra_drm_load, | 584 | .load = tegra_drm_load, |
| 583 | .unload = tegra_drm_unload, | 585 | .unload = tegra_drm_unload, |
| 584 | .open = tegra_drm_open, | 586 | .open = tegra_drm_open, |
| @@ -596,6 +598,12 @@ static struct drm_driver tegra_drm_driver = { | |||
| 596 | 598 | ||
| 597 | .gem_free_object = tegra_bo_free_object, | 599 | .gem_free_object = tegra_bo_free_object, |
| 598 | .gem_vm_ops = &tegra_bo_vm_ops, | 600 | .gem_vm_ops = &tegra_bo_vm_ops, |
| 601 | |||
| 602 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 603 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 604 | .gem_prime_export = tegra_gem_prime_export, | ||
| 605 | .gem_prime_import = tegra_gem_prime_import, | ||
| 606 | |||
| 599 | .dumb_create = tegra_bo_dumb_create, | 607 | .dumb_create = tegra_bo_dumb_create, |
| 600 | .dumb_map_offset = tegra_bo_dumb_map_offset, | 608 | .dumb_map_offset = tegra_bo_dumb_map_offset, |
| 601 | .dumb_destroy = drm_gem_dumb_destroy, | 609 | .dumb_destroy = drm_gem_dumb_destroy, |
| @@ -653,8 +661,10 @@ static const struct of_device_id host1x_drm_subdevs[] = { | |||
| 653 | { .compatible = "nvidia,tegra30-hdmi", }, | 661 | { .compatible = "nvidia,tegra30-hdmi", }, |
| 654 | { .compatible = "nvidia,tegra30-gr2d", }, | 662 | { .compatible = "nvidia,tegra30-gr2d", }, |
| 655 | { .compatible = "nvidia,tegra30-gr3d", }, | 663 | { .compatible = "nvidia,tegra30-gr3d", }, |
| 664 | { .compatible = "nvidia,tegra114-dsi", }, | ||
| 656 | { .compatible = "nvidia,tegra114-hdmi", }, | 665 | { .compatible = "nvidia,tegra114-hdmi", }, |
| 657 | { .compatible = "nvidia,tegra114-gr3d", }, | 666 | { .compatible = "nvidia,tegra114-gr3d", }, |
| 667 | { .compatible = "nvidia,tegra124-dc", }, | ||
| 658 | { /* sentinel */ } | 668 | { /* sentinel */ } |
| 659 | }; | 669 | }; |
| 660 | 670 | ||
| @@ -677,10 +687,14 @@ static int __init host1x_drm_init(void) | |||
| 677 | if (err < 0) | 687 | if (err < 0) |
| 678 | goto unregister_host1x; | 688 | goto unregister_host1x; |
| 679 | 689 | ||
| 680 | err = platform_driver_register(&tegra_hdmi_driver); | 690 | err = platform_driver_register(&tegra_dsi_driver); |
| 681 | if (err < 0) | 691 | if (err < 0) |
| 682 | goto unregister_dc; | 692 | goto unregister_dc; |
| 683 | 693 | ||
| 694 | err = platform_driver_register(&tegra_hdmi_driver); | ||
| 695 | if (err < 0) | ||
| 696 | goto unregister_dsi; | ||
| 697 | |||
| 684 | err = platform_driver_register(&tegra_gr2d_driver); | 698 | err = platform_driver_register(&tegra_gr2d_driver); |
| 685 | if (err < 0) | 699 | if (err < 0) |
| 686 | goto unregister_hdmi; | 700 | goto unregister_hdmi; |
| @@ -695,6 +709,8 @@ unregister_gr2d: | |||
| 695 | platform_driver_unregister(&tegra_gr2d_driver); | 709 | platform_driver_unregister(&tegra_gr2d_driver); |
| 696 | unregister_hdmi: | 710 | unregister_hdmi: |
| 697 | platform_driver_unregister(&tegra_hdmi_driver); | 711 | platform_driver_unregister(&tegra_hdmi_driver); |
| 712 | unregister_dsi: | ||
| 713 | platform_driver_unregister(&tegra_dsi_driver); | ||
| 698 | unregister_dc: | 714 | unregister_dc: |
| 699 | platform_driver_unregister(&tegra_dc_driver); | 715 | platform_driver_unregister(&tegra_dc_driver); |
| 700 | unregister_host1x: | 716 | unregister_host1x: |
| @@ -708,6 +724,7 @@ static void __exit host1x_drm_exit(void) | |||
| 708 | platform_driver_unregister(&tegra_gr3d_driver); | 724 | platform_driver_unregister(&tegra_gr3d_driver); |
| 709 | platform_driver_unregister(&tegra_gr2d_driver); | 725 | platform_driver_unregister(&tegra_gr2d_driver); |
| 710 | platform_driver_unregister(&tegra_hdmi_driver); | 726 | platform_driver_unregister(&tegra_hdmi_driver); |
| 727 | platform_driver_unregister(&tegra_dsi_driver); | ||
| 711 | platform_driver_unregister(&tegra_dc_driver); | 728 | platform_driver_unregister(&tegra_dc_driver); |
| 712 | host1x_driver_unregister(&host1x_drm_driver); | 729 | host1x_driver_unregister(&host1x_drm_driver); |
| 713 | } | 730 | } |
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 266aae08a3bd..bf1cac7658f8 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h | |||
| @@ -27,10 +27,12 @@ struct tegra_fb { | |||
| 27 | unsigned int num_planes; | 27 | unsigned int num_planes; |
| 28 | }; | 28 | }; |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 30 | struct tegra_fbdev { | 31 | struct tegra_fbdev { |
| 31 | struct drm_fb_helper base; | 32 | struct drm_fb_helper base; |
| 32 | struct tegra_fb *fb; | 33 | struct tegra_fb *fb; |
| 33 | }; | 34 | }; |
| 35 | #endif | ||
| 34 | 36 | ||
| 35 | struct tegra_drm { | 37 | struct tegra_drm { |
| 36 | struct drm_device *drm; | 38 | struct drm_device *drm; |
| @@ -38,7 +40,9 @@ struct tegra_drm { | |||
| 38 | struct mutex clients_lock; | 40 | struct mutex clients_lock; |
| 39 | struct list_head clients; | 41 | struct list_head clients; |
| 40 | 42 | ||
| 43 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 41 | struct tegra_fbdev *fbdev; | 44 | struct tegra_fbdev *fbdev; |
| 45 | #endif | ||
| 42 | }; | 46 | }; |
| 43 | 47 | ||
| 44 | struct tegra_drm_client; | 48 | struct tegra_drm_client; |
| @@ -84,6 +88,7 @@ extern int tegra_drm_unregister_client(struct tegra_drm *tegra, | |||
| 84 | extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); | 88 | extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); |
| 85 | extern int tegra_drm_exit(struct tegra_drm *tegra); | 89 | extern int tegra_drm_exit(struct tegra_drm *tegra); |
| 86 | 90 | ||
| 91 | struct tegra_dc_soc_info; | ||
| 87 | struct tegra_output; | 92 | struct tegra_output; |
| 88 | 93 | ||
| 89 | struct tegra_dc { | 94 | struct tegra_dc { |
| @@ -109,6 +114,8 @@ struct tegra_dc { | |||
| 109 | 114 | ||
| 110 | /* page-flip handling */ | 115 | /* page-flip handling */ |
| 111 | struct drm_pending_vblank_event *event; | 116 | struct drm_pending_vblank_event *event; |
| 117 | |||
| 118 | const struct tegra_dc_soc_info *soc; | ||
| 112 | }; | 119 | }; |
| 113 | 120 | ||
| 114 | static inline struct tegra_dc * | 121 | static inline struct tegra_dc * |
| @@ -177,6 +184,7 @@ struct tegra_output_ops { | |||
| 177 | enum tegra_output_type { | 184 | enum tegra_output_type { |
| 178 | TEGRA_OUTPUT_RGB, | 185 | TEGRA_OUTPUT_RGB, |
| 179 | TEGRA_OUTPUT_HDMI, | 186 | TEGRA_OUTPUT_HDMI, |
| 187 | TEGRA_OUTPUT_DSI, | ||
| 180 | }; | 188 | }; |
| 181 | 189 | ||
| 182 | struct tegra_output { | 190 | struct tegra_output { |
| @@ -186,6 +194,7 @@ struct tegra_output { | |||
| 186 | const struct tegra_output_ops *ops; | 194 | const struct tegra_output_ops *ops; |
| 187 | enum tegra_output_type type; | 195 | enum tegra_output_type type; |
| 188 | 196 | ||
| 197 | struct drm_panel *panel; | ||
| 189 | struct i2c_adapter *ddc; | 198 | struct i2c_adapter *ddc; |
| 190 | const struct edid *edid; | 199 | const struct edid *edid; |
| 191 | unsigned int hpd_irq; | 200 | unsigned int hpd_irq; |
| @@ -263,9 +272,12 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); | |||
| 263 | bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); | 272 | bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); |
| 264 | extern int tegra_drm_fb_init(struct drm_device *drm); | 273 | extern int tegra_drm_fb_init(struct drm_device *drm); |
| 265 | extern void tegra_drm_fb_exit(struct drm_device *drm); | 274 | extern void tegra_drm_fb_exit(struct drm_device *drm); |
| 275 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 266 | extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); | 276 | extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); |
| 277 | #endif | ||
| 267 | 278 | ||
| 268 | extern struct platform_driver tegra_dc_driver; | 279 | extern struct platform_driver tegra_dc_driver; |
| 280 | extern struct platform_driver tegra_dsi_driver; | ||
| 269 | extern struct platform_driver tegra_hdmi_driver; | 281 | extern struct platform_driver tegra_hdmi_driver; |
| 270 | extern struct platform_driver tegra_gr2d_driver; | 282 | extern struct platform_driver tegra_gr2d_driver; |
| 271 | extern struct platform_driver tegra_gr3d_driver; | 283 | extern struct platform_driver tegra_gr3d_driver; |
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c new file mode 100644 index 000000000000..d452faab0235 --- /dev/null +++ b/drivers/gpu/drm/tegra/dsi.c | |||
| @@ -0,0 +1,971 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 NVIDIA Corporation | ||
| 3 | * | ||
| 4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
| 5 | * documentation for any purpose is hereby granted without fee, provided that | ||
| 6 | * the above copyright notice appear in all copies and that both that copyright | ||
| 7 | * notice and this permission notice appear in supporting documentation, and | ||
| 8 | * that the name of the copyright holders not be used in advertising or | ||
| 9 | * publicity pertaining to distribution of the software without specific, | ||
| 10 | * written prior permission. The copyright holders make no representations | ||
| 11 | * about the suitability of this software for any purpose. It is provided "as | ||
| 12 | * is" without express or implied warranty. | ||
| 13 | * | ||
| 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
| 15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
| 16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
| 17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
| 18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
| 19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
| 20 | * OF THIS SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/clk.h> | ||
| 24 | #include <linux/debugfs.h> | ||
| 25 | #include <linux/host1x.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/of.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/reset.h> | ||
| 30 | |||
| 31 | #include <drm/drm_mipi_dsi.h> | ||
| 32 | #include <drm/drm_panel.h> | ||
| 33 | |||
| 34 | #include <video/mipi_display.h> | ||
| 35 | |||
| 36 | #include "dc.h" | ||
| 37 | #include "drm.h" | ||
| 38 | #include "dsi.h" | ||
| 39 | #include "mipi-phy.h" | ||
| 40 | |||
| 41 | #define DSI_VIDEO_FIFO_DEPTH (1920 / 4) | ||
| 42 | #define DSI_HOST_FIFO_DEPTH 64 | ||
| 43 | |||
| 44 | struct tegra_dsi { | ||
| 45 | struct host1x_client client; | ||
| 46 | struct tegra_output output; | ||
| 47 | struct device *dev; | ||
| 48 | |||
| 49 | void __iomem *regs; | ||
| 50 | |||
| 51 | struct reset_control *rst; | ||
| 52 | struct clk *clk_parent; | ||
| 53 | struct clk *clk_lp; | ||
| 54 | struct clk *clk; | ||
| 55 | |||
| 56 | struct drm_info_list *debugfs_files; | ||
| 57 | struct drm_minor *minor; | ||
| 58 | struct dentry *debugfs; | ||
| 59 | |||
| 60 | enum mipi_dsi_pixel_format format; | ||
| 61 | unsigned int lanes; | ||
| 62 | |||
| 63 | struct tegra_mipi_device *mipi; | ||
| 64 | struct mipi_dsi_host host; | ||
| 65 | }; | ||
| 66 | |||
| 67 | static inline struct tegra_dsi * | ||
| 68 | host1x_client_to_dsi(struct host1x_client *client) | ||
| 69 | { | ||
| 70 | return container_of(client, struct tegra_dsi, client); | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host) | ||
| 74 | { | ||
| 75 | return container_of(host, struct tegra_dsi, host); | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline struct tegra_dsi *to_dsi(struct tegra_output *output) | ||
| 79 | { | ||
| 80 | return container_of(output, struct tegra_dsi, output); | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi, | ||
| 84 | unsigned long reg) | ||
| 85 | { | ||
| 86 | return readl(dsi->regs + (reg << 2)); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value, | ||
| 90 | unsigned long reg) | ||
| 91 | { | ||
| 92 | writel(value, dsi->regs + (reg << 2)); | ||
| 93 | } | ||
| 94 | |||
| 95 | static int tegra_dsi_show_regs(struct seq_file *s, void *data) | ||
| 96 | { | ||
| 97 | struct drm_info_node *node = s->private; | ||
| 98 | struct tegra_dsi *dsi = node->info_ent->data; | ||
| 99 | |||
| 100 | #define DUMP_REG(name) \ | ||
| 101 | seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \ | ||
| 102 | tegra_dsi_readl(dsi, name)) | ||
| 103 | |||
| 104 | DUMP_REG(DSI_INCR_SYNCPT); | ||
| 105 | DUMP_REG(DSI_INCR_SYNCPT_CONTROL); | ||
| 106 | DUMP_REG(DSI_INCR_SYNCPT_ERROR); | ||
| 107 | DUMP_REG(DSI_CTXSW); | ||
| 108 | DUMP_REG(DSI_RD_DATA); | ||
| 109 | DUMP_REG(DSI_WR_DATA); | ||
| 110 | DUMP_REG(DSI_POWER_CONTROL); | ||
| 111 | DUMP_REG(DSI_INT_ENABLE); | ||
| 112 | DUMP_REG(DSI_INT_STATUS); | ||
| 113 | DUMP_REG(DSI_INT_MASK); | ||
| 114 | DUMP_REG(DSI_HOST_CONTROL); | ||
| 115 | DUMP_REG(DSI_CONTROL); | ||
| 116 | DUMP_REG(DSI_SOL_DELAY); | ||
| 117 | DUMP_REG(DSI_MAX_THRESHOLD); | ||
| 118 | DUMP_REG(DSI_TRIGGER); | ||
| 119 | DUMP_REG(DSI_TX_CRC); | ||
| 120 | DUMP_REG(DSI_STATUS); | ||
| 121 | |||
| 122 | DUMP_REG(DSI_INIT_SEQ_CONTROL); | ||
| 123 | DUMP_REG(DSI_INIT_SEQ_DATA_0); | ||
| 124 | DUMP_REG(DSI_INIT_SEQ_DATA_1); | ||
| 125 | DUMP_REG(DSI_INIT_SEQ_DATA_2); | ||
| 126 | DUMP_REG(DSI_INIT_SEQ_DATA_3); | ||
| 127 | DUMP_REG(DSI_INIT_SEQ_DATA_4); | ||
| 128 | DUMP_REG(DSI_INIT_SEQ_DATA_5); | ||
| 129 | DUMP_REG(DSI_INIT_SEQ_DATA_6); | ||
| 130 | DUMP_REG(DSI_INIT_SEQ_DATA_7); | ||
| 131 | |||
| 132 | DUMP_REG(DSI_PKT_SEQ_0_LO); | ||
| 133 | DUMP_REG(DSI_PKT_SEQ_0_HI); | ||
| 134 | DUMP_REG(DSI_PKT_SEQ_1_LO); | ||
| 135 | DUMP_REG(DSI_PKT_SEQ_1_HI); | ||
| 136 | DUMP_REG(DSI_PKT_SEQ_2_LO); | ||
| 137 | DUMP_REG(DSI_PKT_SEQ_2_HI); | ||
| 138 | DUMP_REG(DSI_PKT_SEQ_3_LO); | ||
| 139 | DUMP_REG(DSI_PKT_SEQ_3_HI); | ||
| 140 | DUMP_REG(DSI_PKT_SEQ_4_LO); | ||
| 141 | DUMP_REG(DSI_PKT_SEQ_4_HI); | ||
| 142 | DUMP_REG(DSI_PKT_SEQ_5_LO); | ||
| 143 | DUMP_REG(DSI_PKT_SEQ_5_HI); | ||
| 144 | |||
| 145 | DUMP_REG(DSI_DCS_CMDS); | ||
| 146 | |||
| 147 | DUMP_REG(DSI_PKT_LEN_0_1); | ||
| 148 | DUMP_REG(DSI_PKT_LEN_2_3); | ||
| 149 | DUMP_REG(DSI_PKT_LEN_4_5); | ||
| 150 | DUMP_REG(DSI_PKT_LEN_6_7); | ||
| 151 | |||
| 152 | DUMP_REG(DSI_PHY_TIMING_0); | ||
| 153 | DUMP_REG(DSI_PHY_TIMING_1); | ||
| 154 | DUMP_REG(DSI_PHY_TIMING_2); | ||
| 155 | DUMP_REG(DSI_BTA_TIMING); | ||
| 156 | |||
| 157 | DUMP_REG(DSI_TIMEOUT_0); | ||
| 158 | DUMP_REG(DSI_TIMEOUT_1); | ||
| 159 | DUMP_REG(DSI_TO_TALLY); | ||
| 160 | |||
| 161 | DUMP_REG(DSI_PAD_CONTROL_0); | ||
| 162 | DUMP_REG(DSI_PAD_CONTROL_CD); | ||
| 163 | DUMP_REG(DSI_PAD_CD_STATUS); | ||
| 164 | DUMP_REG(DSI_VIDEO_MODE_CONTROL); | ||
| 165 | DUMP_REG(DSI_PAD_CONTROL_1); | ||
| 166 | DUMP_REG(DSI_PAD_CONTROL_2); | ||
| 167 | DUMP_REG(DSI_PAD_CONTROL_3); | ||
| 168 | DUMP_REG(DSI_PAD_CONTROL_4); | ||
| 169 | |||
| 170 | DUMP_REG(DSI_GANGED_MODE_CONTROL); | ||
| 171 | DUMP_REG(DSI_GANGED_MODE_START); | ||
| 172 | DUMP_REG(DSI_GANGED_MODE_SIZE); | ||
| 173 | |||
| 174 | DUMP_REG(DSI_RAW_DATA_BYTE_COUNT); | ||
| 175 | DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL); | ||
| 176 | |||
| 177 | DUMP_REG(DSI_INIT_SEQ_DATA_8); | ||
| 178 | DUMP_REG(DSI_INIT_SEQ_DATA_9); | ||
| 179 | DUMP_REG(DSI_INIT_SEQ_DATA_10); | ||
| 180 | DUMP_REG(DSI_INIT_SEQ_DATA_11); | ||
| 181 | DUMP_REG(DSI_INIT_SEQ_DATA_12); | ||
| 182 | DUMP_REG(DSI_INIT_SEQ_DATA_13); | ||
| 183 | DUMP_REG(DSI_INIT_SEQ_DATA_14); | ||
| 184 | DUMP_REG(DSI_INIT_SEQ_DATA_15); | ||
| 185 | |||
| 186 | #undef DUMP_REG | ||
| 187 | |||
| 188 | return 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | static struct drm_info_list debugfs_files[] = { | ||
| 192 | { "regs", tegra_dsi_show_regs, 0, NULL }, | ||
| 193 | }; | ||
| 194 | |||
| 195 | static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi, | ||
| 196 | struct drm_minor *minor) | ||
| 197 | { | ||
| 198 | const char *name = dev_name(dsi->dev); | ||
| 199 | unsigned int i; | ||
| 200 | int err; | ||
| 201 | |||
| 202 | dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root); | ||
| 203 | if (!dsi->debugfs) | ||
| 204 | return -ENOMEM; | ||
| 205 | |||
| 206 | dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), | ||
| 207 | GFP_KERNEL); | ||
| 208 | if (!dsi->debugfs_files) { | ||
| 209 | err = -ENOMEM; | ||
| 210 | goto remove; | ||
| 211 | } | ||
| 212 | |||
| 213 | for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) | ||
| 214 | dsi->debugfs_files[i].data = dsi; | ||
| 215 | |||
| 216 | err = drm_debugfs_create_files(dsi->debugfs_files, | ||
| 217 | ARRAY_SIZE(debugfs_files), | ||
| 218 | dsi->debugfs, minor); | ||
| 219 | if (err < 0) | ||
| 220 | goto free; | ||
| 221 | |||
| 222 | dsi->minor = minor; | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | |||
| 226 | free: | ||
| 227 | kfree(dsi->debugfs_files); | ||
| 228 | dsi->debugfs_files = NULL; | ||
| 229 | remove: | ||
| 230 | debugfs_remove(dsi->debugfs); | ||
| 231 | dsi->debugfs = NULL; | ||
| 232 | |||
| 233 | return err; | ||
| 234 | } | ||
| 235 | |||
| 236 | static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) | ||
| 237 | { | ||
| 238 | drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files), | ||
| 239 | dsi->minor); | ||
| 240 | dsi->minor = NULL; | ||
| 241 | |||
| 242 | kfree(dsi->debugfs_files); | ||
| 243 | dsi->debugfs_files = NULL; | ||
| 244 | |||
| 245 | debugfs_remove(dsi->debugfs); | ||
| 246 | dsi->debugfs = NULL; | ||
| 247 | |||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | #define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9)) | ||
| 252 | #define PKT_LEN0(len) (((len) & 0x07) << 0) | ||
| 253 | #define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19)) | ||
| 254 | #define PKT_LEN1(len) (((len) & 0x07) << 10) | ||
| 255 | #define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29)) | ||
| 256 | #define PKT_LEN2(len) (((len) & 0x07) << 20) | ||
| 257 | |||
| 258 | #define PKT_LP (1 << 30) | ||
| 259 | #define NUM_PKT_SEQ 12 | ||
| 260 | |||
| 261 | /* non-burst mode with sync-end */ | ||
| 262 | static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = { | ||
| 263 | [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) | | ||
| 264 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 265 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | | ||
| 266 | PKT_LP, | ||
| 267 | [ 1] = 0, | ||
| 268 | [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) | | ||
| 269 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 270 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | | ||
| 271 | PKT_LP, | ||
| 272 | [ 3] = 0, | ||
| 273 | [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | | ||
| 274 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 275 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | | ||
| 276 | PKT_LP, | ||
| 277 | [ 5] = 0, | ||
| 278 | [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | | ||
| 279 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 280 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0), | ||
| 281 | [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) | | ||
| 282 | PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) | | ||
| 283 | PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4), | ||
| 284 | [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | | ||
| 285 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 286 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | | ||
| 287 | PKT_LP, | ||
| 288 | [ 9] = 0, | ||
| 289 | [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) | | ||
| 290 | PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | | ||
| 291 | PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0), | ||
| 292 | [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) | | ||
| 293 | PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) | | ||
| 294 | PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4), | ||
| 295 | }; | ||
| 296 | |||
| 297 | static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi) | ||
| 298 | { | ||
| 299 | struct mipi_dphy_timing timing; | ||
| 300 | unsigned long value, period; | ||
| 301 | long rate; | ||
| 302 | int err; | ||
| 303 | |||
| 304 | rate = clk_get_rate(dsi->clk); | ||
| 305 | if (rate < 0) | ||
| 306 | return rate; | ||
| 307 | |||
| 308 | period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2); | ||
| 309 | |||
| 310 | err = mipi_dphy_timing_get_default(&timing, period); | ||
| 311 | if (err < 0) | ||
| 312 | return err; | ||
| 313 | |||
| 314 | err = mipi_dphy_timing_validate(&timing, period); | ||
| 315 | if (err < 0) { | ||
| 316 | dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); | ||
| 317 | return err; | ||
| 318 | } | ||
| 319 | |||
| 320 | /* | ||
| 321 | * The D-PHY timing fields below are expressed in byte-clock cycles, | ||
| 322 | * so multiply the period by 8. | ||
| 323 | */ | ||
| 324 | period *= 8; | ||
| 325 | |||
| 326 | value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 | | ||
| 327 | DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 | | ||
| 328 | DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 | | ||
| 329 | DSI_TIMING_FIELD(timing.hsprepare, period, 1); | ||
| 330 | tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0); | ||
| 331 | |||
| 332 | value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 | | ||
| 333 | DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 | | ||
| 334 | DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 | | ||
| 335 | DSI_TIMING_FIELD(timing.lpx, period, 1); | ||
| 336 | tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1); | ||
| 337 | |||
| 338 | value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 | | ||
| 339 | DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 | | ||
| 340 | DSI_TIMING_FIELD(0xff * period, period, 0) << 0; | ||
| 341 | tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2); | ||
| 342 | |||
| 343 | value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 | | ||
| 344 | DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 | | ||
| 345 | DSI_TIMING_FIELD(timing.tago, period, 1); | ||
| 346 | tegra_dsi_writel(dsi, value, DSI_BTA_TIMING); | ||
| 347 | |||
| 348 | return 0; | ||
| 349 | } | ||
| 350 | |||
| 351 | static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format, | ||
| 352 | unsigned int *mulp, unsigned int *divp) | ||
| 353 | { | ||
| 354 | switch (format) { | ||
| 355 | case MIPI_DSI_FMT_RGB666_PACKED: | ||
| 356 | case MIPI_DSI_FMT_RGB888: | ||
| 357 | *mulp = 3; | ||
| 358 | *divp = 1; | ||
| 359 | break; | ||
| 360 | |||
| 361 | case MIPI_DSI_FMT_RGB565: | ||
| 362 | *mulp = 2; | ||
| 363 | *divp = 1; | ||
| 364 | break; | ||
| 365 | |||
| 366 | case MIPI_DSI_FMT_RGB666: | ||
| 367 | *mulp = 9; | ||
| 368 | *divp = 4; | ||
| 369 | break; | ||
| 370 | |||
| 371 | default: | ||
| 372 | return -EINVAL; | ||
| 373 | } | ||
| 374 | |||
| 375 | return 0; | ||
| 376 | } | ||
| 377 | |||
| 378 | static int tegra_output_dsi_enable(struct tegra_output *output) | ||
| 379 | { | ||
| 380 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | ||
| 381 | struct drm_display_mode *mode = &dc->base.mode; | ||
| 382 | unsigned int hact, hsw, hbp, hfp, i, mul, div; | ||
| 383 | struct tegra_dsi *dsi = to_dsi(output); | ||
| 384 | /* FIXME: don't hardcode this */ | ||
| 385 | const u32 *pkt_seq = pkt_seq_vnb_syne; | ||
| 386 | unsigned long value; | ||
| 387 | int err; | ||
| 388 | |||
| 389 | err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); | ||
| 390 | if (err < 0) | ||
| 391 | return err; | ||
| 392 | |||
| 393 | err = clk_enable(dsi->clk); | ||
| 394 | if (err < 0) | ||
| 395 | return err; | ||
| 396 | |||
| 397 | reset_control_deassert(dsi->rst); | ||
| 398 | |||
| 399 | value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) | | ||
| 400 | DSI_CONTROL_LANES(dsi->lanes - 1) | | ||
| 401 | DSI_CONTROL_SOURCE(dc->pipe); | ||
| 402 | tegra_dsi_writel(dsi, value, DSI_CONTROL); | ||
| 403 | |||
| 404 | tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD); | ||
| 405 | |||
| 406 | value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS | | ||
| 407 | DSI_HOST_CONTROL_ECC; | ||
| 408 | tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); | ||
| 409 | |||
| 410 | value = tegra_dsi_readl(dsi, DSI_CONTROL); | ||
| 411 | value |= DSI_CONTROL_HS_CLK_CTRL; | ||
| 412 | value &= ~DSI_CONTROL_TX_TRIG(3); | ||
| 413 | value &= ~DSI_CONTROL_DCS_ENABLE; | ||
| 414 | value |= DSI_CONTROL_VIDEO_ENABLE; | ||
| 415 | value &= ~DSI_CONTROL_HOST_ENABLE; | ||
| 416 | tegra_dsi_writel(dsi, value, DSI_CONTROL); | ||
| 417 | |||
| 418 | err = tegra_dsi_set_phy_timing(dsi); | ||
| 419 | if (err < 0) | ||
| 420 | return err; | ||
| 421 | |||
| 422 | for (i = 0; i < NUM_PKT_SEQ; i++) | ||
| 423 | tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i); | ||
| 424 | |||
| 425 | /* horizontal active pixels */ | ||
| 426 | hact = mode->hdisplay * mul / div; | ||
| 427 | |||
| 428 | /* horizontal sync width */ | ||
| 429 | hsw = (mode->hsync_end - mode->hsync_start) * mul / div; | ||
| 430 | hsw -= 10; | ||
| 431 | |||
| 432 | /* horizontal back porch */ | ||
| 433 | hbp = (mode->htotal - mode->hsync_end) * mul / div; | ||
| 434 | hbp -= 14; | ||
| 435 | |||
| 436 | /* horizontal front porch */ | ||
| 437 | hfp = (mode->hsync_start - mode->hdisplay) * mul / div; | ||
| 438 | hfp -= 8; | ||
| 439 | |||
| 440 | tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1); | ||
| 441 | tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); | ||
| 442 | tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); | ||
| 443 | tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); | ||
| 444 | |||
| 445 | /* set SOL delay */ | ||
| 446 | tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); | ||
| 447 | |||
| 448 | /* enable display controller */ | ||
| 449 | value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); | ||
| 450 | value |= DSI_ENABLE; | ||
| 451 | tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); | ||
| 452 | |||
| 453 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); | ||
| 454 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 455 | value |= DISP_CTRL_MODE_C_DISPLAY; | ||
| 456 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 457 | |||
| 458 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 459 | value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 460 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; | ||
| 461 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 462 | |||
| 463 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 464 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 465 | |||
| 466 | /* enable DSI controller */ | ||
| 467 | value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); | ||
| 468 | value |= DSI_POWER_CONTROL_ENABLE; | ||
| 469 | tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); | ||
| 470 | |||
| 471 | return 0; | ||
| 472 | } | ||
| 473 | |||
| 474 | static int tegra_output_dsi_disable(struct tegra_output *output) | ||
| 475 | { | ||
| 476 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | ||
| 477 | struct tegra_dsi *dsi = to_dsi(output); | ||
| 478 | unsigned long value; | ||
| 479 | |||
| 480 | /* disable DSI controller */ | ||
| 481 | value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); | ||
| 482 | value &= DSI_POWER_CONTROL_ENABLE; | ||
| 483 | tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); | ||
| 484 | |||
| 485 | /* | ||
| 486 | * The following accesses registers of the display controller, so make | ||
| 487 | * sure it's only executed when the output is attached to one. | ||
| 488 | */ | ||
| 489 | if (dc) { | ||
| 490 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 491 | value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 492 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); | ||
| 493 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 494 | |||
| 495 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); | ||
| 496 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 497 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 498 | |||
| 499 | value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); | ||
| 500 | value &= ~DSI_ENABLE; | ||
| 501 | tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); | ||
| 502 | |||
| 503 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 504 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 505 | } | ||
| 506 | |||
| 507 | clk_disable(dsi->clk); | ||
| 508 | |||
| 509 | return 0; | ||
| 510 | } | ||
| 511 | |||
| 512 | static int tegra_output_dsi_setup_clock(struct tegra_output *output, | ||
| 513 | struct clk *clk, unsigned long pclk) | ||
| 514 | { | ||
| 515 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | ||
| 516 | struct drm_display_mode *mode = &dc->base.mode; | ||
| 517 | unsigned int timeout, mul, div, vrefresh; | ||
| 518 | struct tegra_dsi *dsi = to_dsi(output); | ||
| 519 | unsigned long bclk, plld, value; | ||
| 520 | struct clk *base; | ||
| 521 | int err; | ||
| 522 | |||
| 523 | err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); | ||
| 524 | if (err < 0) | ||
| 525 | return err; | ||
| 526 | |||
| 527 | vrefresh = drm_mode_vrefresh(mode); | ||
| 528 | |||
| 529 | pclk = mode->htotal * mode->vtotal * vrefresh; | ||
| 530 | bclk = (pclk * mul) / (div * dsi->lanes); | ||
| 531 | plld = DIV_ROUND_UP(bclk * 8, 1000000); | ||
| 532 | pclk = (plld * 1000000) / 2; | ||
| 533 | |||
| 534 | err = clk_set_parent(clk, dsi->clk_parent); | ||
| 535 | if (err < 0) { | ||
| 536 | dev_err(dsi->dev, "failed to set parent clock: %d\n", err); | ||
| 537 | return err; | ||
| 538 | } | ||
| 539 | |||
| 540 | base = clk_get_parent(dsi->clk_parent); | ||
| 541 | |||
| 542 | /* | ||
| 543 | * This assumes that the parent clock is pll_d_out0 or pll_d2_out | ||
| 544 | * respectively, each of which divides the base pll_d by 2. | ||
| 545 | */ | ||
| 546 | err = clk_set_rate(base, pclk * 2); | ||
| 547 | if (err < 0) { | ||
| 548 | dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n", | ||
| 549 | pclk * 2); | ||
| 550 | return err; | ||
| 551 | } | ||
| 552 | |||
| 553 | /* | ||
| 554 | * XXX: Move the below somewhere else so that we don't need to have | ||
| 555 | * access to the vrefresh in this function? | ||
| 556 | */ | ||
| 557 | |||
| 558 | /* one frame high-speed transmission timeout */ | ||
| 559 | timeout = (bclk / vrefresh) / 512; | ||
| 560 | value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); | ||
| 561 | tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); | ||
| 562 | |||
| 563 | /* 2 ms peripheral timeout for panel */ | ||
| 564 | timeout = 2 * bclk / 512 * 1000; | ||
| 565 | value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); | ||
| 566 | tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); | ||
| 567 | |||
| 568 | value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); | ||
| 569 | tegra_dsi_writel(dsi, value, DSI_TO_TALLY); | ||
| 570 | |||
| 571 | return 0; | ||
| 572 | } | ||
| 573 | |||
| 574 | static int tegra_output_dsi_check_mode(struct tegra_output *output, | ||
| 575 | struct drm_display_mode *mode, | ||
| 576 | enum drm_mode_status *status) | ||
| 577 | { | ||
| 578 | /* | ||
| 579 | * FIXME: For now, always assume that the mode is okay. | ||
| 580 | */ | ||
| 581 | |||
| 582 | *status = MODE_OK; | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | static const struct tegra_output_ops dsi_ops = { | ||
| 588 | .enable = tegra_output_dsi_enable, | ||
| 589 | .disable = tegra_output_dsi_disable, | ||
| 590 | .setup_clock = tegra_output_dsi_setup_clock, | ||
| 591 | .check_mode = tegra_output_dsi_check_mode, | ||
| 592 | }; | ||
| 593 | |||
| 594 | static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) | ||
| 595 | { | ||
| 596 | unsigned long value; | ||
| 597 | |||
| 598 | value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); | ||
| 599 | tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); | ||
| 600 | |||
| 601 | return 0; | ||
| 602 | } | ||
| 603 | |||
| 604 | static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) | ||
| 605 | { | ||
| 606 | unsigned long value; | ||
| 607 | |||
| 608 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); | ||
| 609 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); | ||
| 610 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2); | ||
| 611 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3); | ||
| 612 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4); | ||
| 613 | |||
| 614 | /* start calibration */ | ||
| 615 | tegra_dsi_pad_enable(dsi); | ||
| 616 | |||
| 617 | value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) | | ||
| 618 | DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) | | ||
| 619 | DSI_PAD_OUT_CLK(0x0); | ||
| 620 | tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); | ||
| 621 | |||
| 622 | return tegra_mipi_calibrate(dsi->mipi); | ||
| 623 | } | ||
| 624 | |||
| 625 | static int tegra_dsi_init(struct host1x_client *client) | ||
| 626 | { | ||
| 627 | struct tegra_drm *tegra = dev_get_drvdata(client->parent); | ||
| 628 | struct tegra_dsi *dsi = host1x_client_to_dsi(client); | ||
| 629 | unsigned long value, i; | ||
| 630 | int err; | ||
| 631 | |||
| 632 | dsi->output.type = TEGRA_OUTPUT_DSI; | ||
| 633 | dsi->output.dev = client->dev; | ||
| 634 | dsi->output.ops = &dsi_ops; | ||
| 635 | |||
| 636 | err = tegra_output_init(tegra->drm, &dsi->output); | ||
| 637 | if (err < 0) { | ||
| 638 | dev_err(client->dev, "output setup failed: %d\n", err); | ||
| 639 | return err; | ||
| 640 | } | ||
| 641 | |||
| 642 | if (IS_ENABLED(CONFIG_DEBUG_FS)) { | ||
| 643 | err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary); | ||
| 644 | if (err < 0) | ||
| 645 | dev_err(dsi->dev, "debugfs setup failed: %d\n", err); | ||
| 646 | } | ||
| 647 | |||
| 648 | /* | ||
| 649 | * enable high-speed mode, checksum generation, ECC generation and | ||
| 650 | * disable raw mode | ||
| 651 | */ | ||
| 652 | value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL); | ||
| 653 | value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS | | ||
| 654 | DSI_HOST_CONTROL_HS; | ||
| 655 | value &= ~DSI_HOST_CONTROL_RAW; | ||
| 656 | tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); | ||
| 657 | |||
| 658 | tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY); | ||
| 659 | tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD); | ||
| 660 | |||
| 661 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL); | ||
| 662 | |||
| 663 | for (i = 0; i < 8; i++) { | ||
| 664 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i); | ||
| 665 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i); | ||
| 666 | } | ||
| 667 | |||
| 668 | for (i = 0; i < 12; i++) | ||
| 669 | tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i); | ||
| 670 | |||
| 671 | tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS); | ||
| 672 | |||
| 673 | err = tegra_dsi_pad_calibrate(dsi); | ||
| 674 | if (err < 0) { | ||
| 675 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
| 676 | return err; | ||
| 677 | } | ||
| 678 | |||
| 679 | tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL); | ||
| 680 | usleep_range(300, 1000); | ||
| 681 | |||
| 682 | return 0; | ||
| 683 | } | ||
| 684 | |||
| 685 | static int tegra_dsi_exit(struct host1x_client *client) | ||
| 686 | { | ||
| 687 | struct tegra_dsi *dsi = host1x_client_to_dsi(client); | ||
| 688 | int err; | ||
| 689 | |||
| 690 | if (IS_ENABLED(CONFIG_DEBUG_FS)) { | ||
| 691 | err = tegra_dsi_debugfs_exit(dsi); | ||
| 692 | if (err < 0) | ||
| 693 | dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err); | ||
| 694 | } | ||
| 695 | |||
| 696 | err = tegra_output_disable(&dsi->output); | ||
| 697 | if (err < 0) { | ||
| 698 | dev_err(client->dev, "output failed to disable: %d\n", err); | ||
| 699 | return err; | ||
| 700 | } | ||
| 701 | |||
| 702 | err = tegra_output_exit(&dsi->output); | ||
| 703 | if (err < 0) { | ||
| 704 | dev_err(client->dev, "output cleanup failed: %d\n", err); | ||
| 705 | return err; | ||
| 706 | } | ||
| 707 | |||
| 708 | return 0; | ||
| 709 | } | ||
| 710 | |||
| 711 | static const struct host1x_client_ops dsi_client_ops = { | ||
| 712 | .init = tegra_dsi_init, | ||
| 713 | .exit = tegra_dsi_exit, | ||
| 714 | }; | ||
| 715 | |||
| 716 | static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi) | ||
| 717 | { | ||
| 718 | struct clk *parent; | ||
| 719 | int err; | ||
| 720 | |||
| 721 | parent = clk_get_parent(dsi->clk); | ||
| 722 | if (!parent) | ||
| 723 | return -EINVAL; | ||
| 724 | |||
| 725 | err = clk_set_parent(parent, dsi->clk_parent); | ||
| 726 | if (err < 0) | ||
| 727 | return err; | ||
| 728 | |||
| 729 | return 0; | ||
| 730 | } | ||
| 731 | |||
| 732 | static void tegra_dsi_initialize(struct tegra_dsi *dsi) | ||
| 733 | { | ||
| 734 | unsigned int i; | ||
| 735 | |||
| 736 | tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL); | ||
| 737 | |||
| 738 | tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE); | ||
| 739 | tegra_dsi_writel(dsi, 0, DSI_INT_STATUS); | ||
| 740 | tegra_dsi_writel(dsi, 0, DSI_INT_MASK); | ||
| 741 | |||
| 742 | tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL); | ||
| 743 | tegra_dsi_writel(dsi, 0, DSI_CONTROL); | ||
| 744 | |||
| 745 | tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY); | ||
| 746 | tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD); | ||
| 747 | |||
| 748 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL); | ||
| 749 | |||
| 750 | for (i = 0; i < 8; i++) { | ||
| 751 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i); | ||
| 752 | tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i); | ||
| 753 | } | ||
| 754 | |||
| 755 | for (i = 0; i < 12; i++) | ||
| 756 | tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i); | ||
| 757 | |||
| 758 | tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS); | ||
| 759 | |||
| 760 | for (i = 0; i < 4; i++) | ||
| 761 | tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i); | ||
| 762 | |||
| 763 | tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0); | ||
| 764 | tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1); | ||
| 765 | tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2); | ||
| 766 | tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING); | ||
| 767 | |||
| 768 | tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0); | ||
| 769 | tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1); | ||
| 770 | tegra_dsi_writel(dsi, 0, DSI_TO_TALLY); | ||
| 771 | |||
| 772 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); | ||
| 773 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD); | ||
| 774 | tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS); | ||
| 775 | tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL); | ||
| 776 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); | ||
| 777 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2); | ||
| 778 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3); | ||
| 779 | tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4); | ||
| 780 | |||
| 781 | tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); | ||
| 782 | tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START); | ||
| 783 | tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE); | ||
| 784 | } | ||
| 785 | |||
| 786 | static int tegra_dsi_host_attach(struct mipi_dsi_host *host, | ||
| 787 | struct mipi_dsi_device *device) | ||
| 788 | { | ||
| 789 | struct tegra_dsi *dsi = host_to_tegra(host); | ||
| 790 | struct tegra_output *output = &dsi->output; | ||
| 791 | |||
| 792 | dsi->format = device->format; | ||
| 793 | dsi->lanes = device->lanes; | ||
| 794 | |||
| 795 | output->panel = of_drm_find_panel(device->dev.of_node); | ||
| 796 | if (output->panel) { | ||
| 797 | if (output->connector.dev) | ||
| 798 | drm_helper_hpd_irq_event(output->connector.dev); | ||
| 799 | } | ||
| 800 | |||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | static int tegra_dsi_host_detach(struct mipi_dsi_host *host, | ||
| 805 | struct mipi_dsi_device *device) | ||
| 806 | { | ||
| 807 | struct tegra_dsi *dsi = host_to_tegra(host); | ||
| 808 | struct tegra_output *output = &dsi->output; | ||
| 809 | |||
| 810 | if (output->panel && &device->dev == output->panel->dev) { | ||
| 811 | if (output->connector.dev) | ||
| 812 | drm_helper_hpd_irq_event(output->connector.dev); | ||
| 813 | |||
| 814 | output->panel = NULL; | ||
| 815 | } | ||
| 816 | |||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | static const struct mipi_dsi_host_ops tegra_dsi_host_ops = { | ||
| 821 | .attach = tegra_dsi_host_attach, | ||
| 822 | .detach = tegra_dsi_host_detach, | ||
| 823 | }; | ||
| 824 | |||
| 825 | static int tegra_dsi_probe(struct platform_device *pdev) | ||
| 826 | { | ||
| 827 | struct tegra_dsi *dsi; | ||
| 828 | struct resource *regs; | ||
| 829 | int err; | ||
| 830 | |||
| 831 | dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); | ||
| 832 | if (!dsi) | ||
| 833 | return -ENOMEM; | ||
| 834 | |||
| 835 | dsi->output.dev = dsi->dev = &pdev->dev; | ||
| 836 | |||
| 837 | err = tegra_output_probe(&dsi->output); | ||
| 838 | if (err < 0) | ||
| 839 | return err; | ||
| 840 | |||
| 841 | /* | ||
| 842 | * Assume these values by default. When a DSI peripheral driver | ||
| 843 | * attaches to the DSI host, the parameters will be taken from | ||
| 844 | * the attached device. | ||
| 845 | */ | ||
| 846 | dsi->format = MIPI_DSI_FMT_RGB888; | ||
| 847 | dsi->lanes = 4; | ||
| 848 | |||
| 849 | dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); | ||
| 850 | if (IS_ERR(dsi->rst)) | ||
| 851 | return PTR_ERR(dsi->rst); | ||
| 852 | |||
| 853 | dsi->clk = devm_clk_get(&pdev->dev, NULL); | ||
| 854 | if (IS_ERR(dsi->clk)) { | ||
| 855 | dev_err(&pdev->dev, "cannot get DSI clock\n"); | ||
| 856 | return PTR_ERR(dsi->clk); | ||
| 857 | } | ||
| 858 | |||
| 859 | err = clk_prepare_enable(dsi->clk); | ||
| 860 | if (err < 0) { | ||
| 861 | dev_err(&pdev->dev, "cannot enable DSI clock\n"); | ||
| 862 | return err; | ||
| 863 | } | ||
| 864 | |||
| 865 | dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); | ||
| 866 | if (IS_ERR(dsi->clk_lp)) { | ||
| 867 | dev_err(&pdev->dev, "cannot get low-power clock\n"); | ||
| 868 | return PTR_ERR(dsi->clk_lp); | ||
| 869 | } | ||
| 870 | |||
| 871 | err = clk_prepare_enable(dsi->clk_lp); | ||
| 872 | if (err < 0) { | ||
| 873 | dev_err(&pdev->dev, "cannot enable low-power clock\n"); | ||
| 874 | return err; | ||
| 875 | } | ||
| 876 | |||
| 877 | dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); | ||
| 878 | if (IS_ERR(dsi->clk_parent)) { | ||
| 879 | dev_err(&pdev->dev, "cannot get parent clock\n"); | ||
| 880 | return PTR_ERR(dsi->clk_parent); | ||
| 881 | } | ||
| 882 | |||
| 883 | err = clk_prepare_enable(dsi->clk_parent); | ||
| 884 | if (err < 0) { | ||
| 885 | dev_err(&pdev->dev, "cannot enable parent clock\n"); | ||
| 886 | return err; | ||
| 887 | } | ||
| 888 | |||
| 889 | err = tegra_dsi_setup_clocks(dsi); | ||
| 890 | if (err < 0) { | ||
| 891 | dev_err(&pdev->dev, "cannot setup clocks\n"); | ||
| 892 | return err; | ||
| 893 | } | ||
| 894 | |||
| 895 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 896 | dsi->regs = devm_ioremap_resource(&pdev->dev, regs); | ||
| 897 | if (IS_ERR(dsi->regs)) | ||
| 898 | return PTR_ERR(dsi->regs); | ||
| 899 | |||
| 900 | tegra_dsi_initialize(dsi); | ||
| 901 | |||
| 902 | dsi->mipi = tegra_mipi_request(&pdev->dev); | ||
| 903 | if (IS_ERR(dsi->mipi)) | ||
| 904 | return PTR_ERR(dsi->mipi); | ||
| 905 | |||
| 906 | dsi->host.ops = &tegra_dsi_host_ops; | ||
| 907 | dsi->host.dev = &pdev->dev; | ||
| 908 | |||
| 909 | err = mipi_dsi_host_register(&dsi->host); | ||
| 910 | if (err < 0) { | ||
| 911 | dev_err(&pdev->dev, "failed to register DSI host: %d\n", err); | ||
| 912 | return err; | ||
| 913 | } | ||
| 914 | |||
| 915 | INIT_LIST_HEAD(&dsi->client.list); | ||
| 916 | dsi->client.ops = &dsi_client_ops; | ||
| 917 | dsi->client.dev = &pdev->dev; | ||
| 918 | |||
| 919 | err = host1x_client_register(&dsi->client); | ||
| 920 | if (err < 0) { | ||
| 921 | dev_err(&pdev->dev, "failed to register host1x client: %d\n", | ||
| 922 | err); | ||
| 923 | return err; | ||
| 924 | } | ||
| 925 | |||
| 926 | platform_set_drvdata(pdev, dsi); | ||
| 927 | |||
| 928 | return 0; | ||
| 929 | } | ||
| 930 | |||
| 931 | static int tegra_dsi_remove(struct platform_device *pdev) | ||
| 932 | { | ||
| 933 | struct tegra_dsi *dsi = platform_get_drvdata(pdev); | ||
| 934 | int err; | ||
| 935 | |||
| 936 | err = host1x_client_unregister(&dsi->client); | ||
| 937 | if (err < 0) { | ||
| 938 | dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", | ||
| 939 | err); | ||
| 940 | return err; | ||
| 941 | } | ||
| 942 | |||
| 943 | mipi_dsi_host_unregister(&dsi->host); | ||
| 944 | tegra_mipi_free(dsi->mipi); | ||
| 945 | |||
| 946 | clk_disable_unprepare(dsi->clk_parent); | ||
| 947 | clk_disable_unprepare(dsi->clk_lp); | ||
| 948 | clk_disable_unprepare(dsi->clk); | ||
| 949 | |||
| 950 | err = tegra_output_remove(&dsi->output); | ||
| 951 | if (err < 0) { | ||
| 952 | dev_err(&pdev->dev, "failed to remove output: %d\n", err); | ||
| 953 | return err; | ||
| 954 | } | ||
| 955 | |||
| 956 | return 0; | ||
| 957 | } | ||
| 958 | |||
| 959 | static const struct of_device_id tegra_dsi_of_match[] = { | ||
| 960 | { .compatible = "nvidia,tegra114-dsi", }, | ||
| 961 | { }, | ||
| 962 | }; | ||
| 963 | |||
| 964 | struct platform_driver tegra_dsi_driver = { | ||
| 965 | .driver = { | ||
| 966 | .name = "tegra-dsi", | ||
| 967 | .of_match_table = tegra_dsi_of_match, | ||
| 968 | }, | ||
| 969 | .probe = tegra_dsi_probe, | ||
| 970 | .remove = tegra_dsi_remove, | ||
| 971 | }; | ||
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h new file mode 100644 index 000000000000..00e79c1f448c --- /dev/null +++ b/drivers/gpu/drm/tegra/dsi.h | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 NVIDIA Corporation | ||
| 3 | * | ||
| 4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
| 5 | * documentation for any purpose is hereby granted without fee, provided that | ||
| 6 | * the above copyright notice appear in all copies and that both that copyright | ||
| 7 | * notice and this permission notice appear in supporting documentation, and | ||
| 8 | * that the name of the copyright holders not be used in advertising or | ||
| 9 | * publicity pertaining to distribution of the software without specific, | ||
| 10 | * written prior permission. The copyright holders make no representations | ||
| 11 | * about the suitability of this software for any purpose. It is provided "as | ||
| 12 | * is" without express or implied warranty. | ||
| 13 | * | ||
| 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
| 15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
| 16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
| 17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
| 18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
| 19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
| 20 | * OF THIS SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef DRM_TEGRA_DSI_H | ||
| 24 | #define DRM_TEGRA_DSI_H | ||
| 25 | |||
| 26 | #define DSI_INCR_SYNCPT 0x00 | ||
| 27 | #define DSI_INCR_SYNCPT_CONTROL 0x01 | ||
| 28 | #define DSI_INCR_SYNCPT_ERROR 0x02 | ||
| 29 | #define DSI_CTXSW 0x08 | ||
| 30 | #define DSI_RD_DATA 0x09 | ||
| 31 | #define DSI_WR_DATA 0x0a | ||
| 32 | #define DSI_POWER_CONTROL 0x0b | ||
| 33 | #define DSI_POWER_CONTROL_ENABLE (1 << 0) | ||
| 34 | #define DSI_INT_ENABLE 0x0c | ||
| 35 | #define DSI_INT_STATUS 0x0d | ||
| 36 | #define DSI_INT_MASK 0x0e | ||
| 37 | #define DSI_HOST_CONTROL 0x0f | ||
| 38 | #define DSI_HOST_CONTROL_RAW (1 << 6) | ||
| 39 | #define DSI_HOST_CONTROL_HS (1 << 5) | ||
| 40 | #define DSI_HOST_CONTROL_BTA (1 << 2) | ||
| 41 | #define DSI_HOST_CONTROL_CS (1 << 1) | ||
| 42 | #define DSI_HOST_CONTROL_ECC (1 << 0) | ||
| 43 | #define DSI_CONTROL 0x10 | ||
| 44 | #define DSI_CONTROL_HS_CLK_CTRL (1 << 20) | ||
| 45 | #define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16) | ||
| 46 | #define DSI_CONTROL_FORMAT(f) (((f) & 0x3) << 12) | ||
| 47 | #define DSI_CONTROL_TX_TRIG(x) (((x) & 0x3) << 8) | ||
| 48 | #define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4) | ||
| 49 | #define DSI_CONTROL_DCS_ENABLE (1 << 3) | ||
| 50 | #define DSI_CONTROL_SOURCE(s) (((s) & 0x1) << 2) | ||
| 51 | #define DSI_CONTROL_VIDEO_ENABLE (1 << 1) | ||
| 52 | #define DSI_CONTROL_HOST_ENABLE (1 << 0) | ||
| 53 | #define DSI_SOL_DELAY 0x11 | ||
| 54 | #define DSI_MAX_THRESHOLD 0x12 | ||
| 55 | #define DSI_TRIGGER 0x13 | ||
| 56 | #define DSI_TX_CRC 0x14 | ||
| 57 | #define DSI_STATUS 0x15 | ||
| 58 | #define DSI_STATUS_IDLE (1 << 10) | ||
| 59 | #define DSI_INIT_SEQ_CONTROL 0x1a | ||
| 60 | #define DSI_INIT_SEQ_DATA_0 0x1b | ||
| 61 | #define DSI_INIT_SEQ_DATA_1 0x1c | ||
| 62 | #define DSI_INIT_SEQ_DATA_2 0x1d | ||
| 63 | #define DSI_INIT_SEQ_DATA_3 0x1e | ||
| 64 | #define DSI_INIT_SEQ_DATA_4 0x1f | ||
| 65 | #define DSI_INIT_SEQ_DATA_5 0x20 | ||
| 66 | #define DSI_INIT_SEQ_DATA_6 0x21 | ||
| 67 | #define DSI_INIT_SEQ_DATA_7 0x22 | ||
| 68 | #define DSI_PKT_SEQ_0_LO 0x23 | ||
| 69 | #define DSI_PKT_SEQ_0_HI 0x24 | ||
| 70 | #define DSI_PKT_SEQ_1_LO 0x25 | ||
| 71 | #define DSI_PKT_SEQ_1_HI 0x26 | ||
| 72 | #define DSI_PKT_SEQ_2_LO 0x27 | ||
| 73 | #define DSI_PKT_SEQ_2_HI 0x28 | ||
| 74 | #define DSI_PKT_SEQ_3_LO 0x29 | ||
| 75 | #define DSI_PKT_SEQ_3_HI 0x2a | ||
| 76 | #define DSI_PKT_SEQ_4_LO 0x2b | ||
| 77 | #define DSI_PKT_SEQ_4_HI 0x2c | ||
| 78 | #define DSI_PKT_SEQ_5_LO 0x2d | ||
| 79 | #define DSI_PKT_SEQ_5_HI 0x2e | ||
| 80 | #define DSI_DCS_CMDS 0x33 | ||
| 81 | #define DSI_PKT_LEN_0_1 0x34 | ||
| 82 | #define DSI_PKT_LEN_2_3 0x35 | ||
| 83 | #define DSI_PKT_LEN_4_5 0x36 | ||
| 84 | #define DSI_PKT_LEN_6_7 0x37 | ||
| 85 | #define DSI_PHY_TIMING_0 0x3c | ||
| 86 | #define DSI_PHY_TIMING_1 0x3d | ||
| 87 | #define DSI_PHY_TIMING_2 0x3e | ||
| 88 | #define DSI_BTA_TIMING 0x3f | ||
| 89 | |||
| 90 | #define DSI_TIMING_FIELD(value, period, hwinc) \ | ||
| 91 | ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff) | ||
| 92 | |||
| 93 | #define DSI_TIMEOUT_0 0x44 | ||
| 94 | #define DSI_TIMEOUT_LRX(x) (((x) & 0xffff) << 16) | ||
| 95 | #define DSI_TIMEOUT_HTX(x) (((x) & 0xffff) << 0) | ||
| 96 | #define DSI_TIMEOUT_1 0x45 | ||
| 97 | #define DSI_TIMEOUT_PR(x) (((x) & 0xffff) << 16) | ||
| 98 | #define DSI_TIMEOUT_TA(x) (((x) & 0xffff) << 0) | ||
| 99 | #define DSI_TO_TALLY 0x46 | ||
| 100 | #define DSI_TALLY_TA(x) (((x) & 0xff) << 16) | ||
| 101 | #define DSI_TALLY_LRX(x) (((x) & 0xff) << 8) | ||
| 102 | #define DSI_TALLY_HTX(x) (((x) & 0xff) << 0) | ||
| 103 | #define DSI_PAD_CONTROL_0 0x4b | ||
| 104 | #define DSI_PAD_CONTROL_VS1_PDIO(x) (((x) & 0xf) << 0) | ||
| 105 | #define DSI_PAD_CONTROL_VS1_PDIO_CLK (1 << 8) | ||
| 106 | #define DSI_PAD_CONTROL_VS1_PULLDN(x) (((x) & 0xf) << 16) | ||
| 107 | #define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24) | ||
| 108 | #define DSI_PAD_CONTROL_CD 0x4c | ||
| 109 | #define DSI_PAD_CD_STATUS 0x4d | ||
| 110 | #define DSI_VIDEO_MODE_CONTROL 0x4e | ||
| 111 | #define DSI_PAD_CONTROL_1 0x4f | ||
| 112 | #define DSI_PAD_CONTROL_2 0x50 | ||
| 113 | #define DSI_PAD_OUT_CLK(x) (((x) & 0x7) << 0) | ||
| 114 | #define DSI_PAD_LP_DN(x) (((x) & 0x7) << 4) | ||
| 115 | #define DSI_PAD_LP_UP(x) (((x) & 0x7) << 8) | ||
| 116 | #define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12) | ||
| 117 | #define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16) | ||
| 118 | #define DSI_PAD_CONTROL_3 0x51 | ||
| 119 | #define DSI_PAD_CONTROL_4 0x52 | ||
| 120 | #define DSI_GANGED_MODE_CONTROL 0x53 | ||
| 121 | #define DSI_GANGED_MODE_START 0x54 | ||
| 122 | #define DSI_GANGED_MODE_SIZE 0x55 | ||
| 123 | #define DSI_RAW_DATA_BYTE_COUNT 0x56 | ||
| 124 | #define DSI_ULTRA_LOW_POWER_CONTROL 0x57 | ||
| 125 | #define DSI_INIT_SEQ_DATA_8 0x58 | ||
| 126 | #define DSI_INIT_SEQ_DATA_9 0x59 | ||
| 127 | #define DSI_INIT_SEQ_DATA_10 0x5a | ||
| 128 | #define DSI_INIT_SEQ_DATA_11 0x5b | ||
| 129 | #define DSI_INIT_SEQ_DATA_12 0x5c | ||
| 130 | #define DSI_INIT_SEQ_DATA_13 0x5d | ||
| 131 | #define DSI_INIT_SEQ_DATA_14 0x5e | ||
| 132 | #define DSI_INIT_SEQ_DATA_15 0x5f | ||
| 133 | |||
| 134 | #endif | ||
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index a3835e7de184..f7fca09d4921 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c | |||
| @@ -18,10 +18,12 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb) | |||
| 18 | return container_of(fb, struct tegra_fb, base); | 18 | return container_of(fb, struct tegra_fb, base); |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 21 | static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) | 22 | static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) |
| 22 | { | 23 | { |
| 23 | return container_of(helper, struct tegra_fbdev, base); | 24 | return container_of(helper, struct tegra_fbdev, base); |
| 24 | } | 25 | } |
| 26 | #endif | ||
| 25 | 27 | ||
| 26 | struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, | 28 | struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, |
| 27 | unsigned int index) | 29 | unsigned int index) |
| @@ -98,8 +100,10 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, | |||
| 98 | return ERR_PTR(-ENOMEM); | 100 | return ERR_PTR(-ENOMEM); |
| 99 | 101 | ||
| 100 | fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL); | 102 | fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL); |
| 101 | if (!fb->planes) | 103 | if (!fb->planes) { |
| 104 | kfree(fb); | ||
| 102 | return ERR_PTR(-ENOMEM); | 105 | return ERR_PTR(-ENOMEM); |
| 106 | } | ||
| 103 | 107 | ||
| 104 | fb->num_planes = num_planes; | 108 | fb->num_planes = num_planes; |
| 105 | 109 | ||
| @@ -172,6 +176,7 @@ unreference: | |||
| 172 | return ERR_PTR(err); | 176 | return ERR_PTR(err); |
| 173 | } | 177 | } |
| 174 | 178 | ||
| 179 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 175 | static struct fb_ops tegra_fb_ops = { | 180 | static struct fb_ops tegra_fb_ops = { |
| 176 | .owner = THIS_MODULE, | 181 | .owner = THIS_MODULE, |
| 177 | .fb_fillrect = sys_fillrect, | 182 | .fb_fillrect = sys_fillrect, |
| @@ -339,6 +344,15 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev) | |||
| 339 | kfree(fbdev); | 344 | kfree(fbdev); |
| 340 | } | 345 | } |
| 341 | 346 | ||
| 347 | void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) | ||
| 348 | { | ||
| 349 | if (fbdev) { | ||
| 350 | drm_modeset_lock_all(fbdev->base.dev); | ||
| 351 | drm_fb_helper_restore_fbdev_mode(&fbdev->base); | ||
| 352 | drm_modeset_unlock_all(fbdev->base.dev); | ||
| 353 | } | ||
| 354 | } | ||
| 355 | |||
| 342 | static void tegra_fb_output_poll_changed(struct drm_device *drm) | 356 | static void tegra_fb_output_poll_changed(struct drm_device *drm) |
| 343 | { | 357 | { |
| 344 | struct tegra_drm *tegra = drm->dev_private; | 358 | struct tegra_drm *tegra = drm->dev_private; |
| @@ -346,16 +360,20 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm) | |||
| 346 | if (tegra->fbdev) | 360 | if (tegra->fbdev) |
| 347 | drm_fb_helper_hotplug_event(&tegra->fbdev->base); | 361 | drm_fb_helper_hotplug_event(&tegra->fbdev->base); |
| 348 | } | 362 | } |
| 363 | #endif | ||
| 349 | 364 | ||
| 350 | static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { | 365 | static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { |
| 351 | .fb_create = tegra_fb_create, | 366 | .fb_create = tegra_fb_create, |
| 367 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 352 | .output_poll_changed = tegra_fb_output_poll_changed, | 368 | .output_poll_changed = tegra_fb_output_poll_changed, |
| 369 | #endif | ||
| 353 | }; | 370 | }; |
| 354 | 371 | ||
| 355 | int tegra_drm_fb_init(struct drm_device *drm) | 372 | int tegra_drm_fb_init(struct drm_device *drm) |
| 356 | { | 373 | { |
| 374 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 357 | struct tegra_drm *tegra = drm->dev_private; | 375 | struct tegra_drm *tegra = drm->dev_private; |
| 358 | struct tegra_fbdev *fbdev; | 376 | #endif |
| 359 | 377 | ||
| 360 | drm->mode_config.min_width = 0; | 378 | drm->mode_config.min_width = 0; |
| 361 | drm->mode_config.min_height = 0; | 379 | drm->mode_config.min_height = 0; |
| @@ -365,28 +383,21 @@ int tegra_drm_fb_init(struct drm_device *drm) | |||
| 365 | 383 | ||
| 366 | drm->mode_config.funcs = &tegra_drm_mode_funcs; | 384 | drm->mode_config.funcs = &tegra_drm_mode_funcs; |
| 367 | 385 | ||
| 368 | fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc, | 386 | #ifdef CONFIG_DRM_TEGRA_FBDEV |
| 369 | drm->mode_config.num_connector); | 387 | tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc, |
| 370 | if (IS_ERR(fbdev)) | 388 | drm->mode_config.num_connector); |
| 371 | return PTR_ERR(fbdev); | 389 | if (IS_ERR(tegra->fbdev)) |
| 372 | 390 | return PTR_ERR(tegra->fbdev); | |
| 373 | tegra->fbdev = fbdev; | 391 | #endif |
| 374 | 392 | ||
| 375 | return 0; | 393 | return 0; |
| 376 | } | 394 | } |
| 377 | 395 | ||
| 378 | void tegra_drm_fb_exit(struct drm_device *drm) | 396 | void tegra_drm_fb_exit(struct drm_device *drm) |
| 379 | { | 397 | { |
| 398 | #ifdef CONFIG_DRM_TEGRA_FBDEV | ||
| 380 | struct tegra_drm *tegra = drm->dev_private; | 399 | struct tegra_drm *tegra = drm->dev_private; |
| 381 | 400 | ||
| 382 | tegra_fbdev_free(tegra->fbdev); | 401 | tegra_fbdev_free(tegra->fbdev); |
| 383 | } | 402 | #endif |
| 384 | |||
| 385 | void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) | ||
| 386 | { | ||
| 387 | if (fbdev) { | ||
| 388 | drm_modeset_lock_all(fbdev->base.dev); | ||
| 389 | drm_fb_helper_restore_fbdev_mode(&fbdev->base); | ||
| 390 | drm_modeset_unlock_all(fbdev->base.dev); | ||
| 391 | } | ||
| 392 | } | 403 | } |
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 28a9cbc07ab9..ef853e558036 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #include <linux/dma-buf.h> | ||
| 21 | #include <drm/tegra_drm.h> | 22 | #include <drm/tegra_drm.h> |
| 22 | 23 | ||
| 23 | #include "gem.h" | 24 | #include "gem.h" |
| @@ -83,7 +84,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) | |||
| 83 | return bo; | 84 | return bo; |
| 84 | } | 85 | } |
| 85 | 86 | ||
| 86 | const struct host1x_bo_ops tegra_bo_ops = { | 87 | static const struct host1x_bo_ops tegra_bo_ops = { |
| 87 | .get = tegra_bo_get, | 88 | .get = tegra_bo_get, |
| 88 | .put = tegra_bo_put, | 89 | .put = tegra_bo_put, |
| 89 | .pin = tegra_bo_pin, | 90 | .pin = tegra_bo_pin, |
| @@ -145,7 +146,6 @@ err_dma: | |||
| 145 | kfree(bo); | 146 | kfree(bo); |
| 146 | 147 | ||
| 147 | return ERR_PTR(err); | 148 | return ERR_PTR(err); |
| 148 | |||
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, | 151 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, |
| @@ -174,13 +174,87 @@ err: | |||
| 174 | return ERR_PTR(ret); | 174 | return ERR_PTR(ret); |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) | ||
| 178 | { | ||
| 179 | struct dma_buf_attachment *attach; | ||
| 180 | struct tegra_bo *bo; | ||
| 181 | ssize_t size; | ||
| 182 | int err; | ||
| 183 | |||
| 184 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | ||
| 185 | if (!bo) | ||
| 186 | return ERR_PTR(-ENOMEM); | ||
| 187 | |||
| 188 | host1x_bo_init(&bo->base, &tegra_bo_ops); | ||
| 189 | size = round_up(buf->size, PAGE_SIZE); | ||
| 190 | |||
| 191 | err = drm_gem_object_init(drm, &bo->gem, size); | ||
| 192 | if (err < 0) | ||
| 193 | goto free; | ||
| 194 | |||
| 195 | err = drm_gem_create_mmap_offset(&bo->gem); | ||
| 196 | if (err < 0) | ||
| 197 | goto release; | ||
| 198 | |||
| 199 | attach = dma_buf_attach(buf, drm->dev); | ||
| 200 | if (IS_ERR(attach)) { | ||
| 201 | err = PTR_ERR(attach); | ||
| 202 | goto free_mmap; | ||
| 203 | } | ||
| 204 | |||
| 205 | get_dma_buf(buf); | ||
| 206 | |||
| 207 | bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); | ||
| 208 | if (!bo->sgt) { | ||
| 209 | err = -ENOMEM; | ||
| 210 | goto detach; | ||
| 211 | } | ||
| 212 | |||
| 213 | if (IS_ERR(bo->sgt)) { | ||
| 214 | err = PTR_ERR(bo->sgt); | ||
| 215 | goto detach; | ||
| 216 | } | ||
| 217 | |||
| 218 | if (bo->sgt->nents > 1) { | ||
| 219 | err = -EINVAL; | ||
| 220 | goto detach; | ||
| 221 | } | ||
| 222 | |||
| 223 | bo->paddr = sg_dma_address(bo->sgt->sgl); | ||
| 224 | bo->gem.import_attach = attach; | ||
| 225 | |||
| 226 | return bo; | ||
| 227 | |||
| 228 | detach: | ||
| 229 | if (!IS_ERR_OR_NULL(bo->sgt)) | ||
| 230 | dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); | ||
| 231 | |||
| 232 | dma_buf_detach(buf, attach); | ||
| 233 | dma_buf_put(buf); | ||
| 234 | free_mmap: | ||
| 235 | drm_gem_free_mmap_offset(&bo->gem); | ||
| 236 | release: | ||
| 237 | drm_gem_object_release(&bo->gem); | ||
| 238 | free: | ||
| 239 | kfree(bo); | ||
| 240 | |||
| 241 | return ERR_PTR(err); | ||
| 242 | } | ||
| 243 | |||
| 177 | void tegra_bo_free_object(struct drm_gem_object *gem) | 244 | void tegra_bo_free_object(struct drm_gem_object *gem) |
| 178 | { | 245 | { |
| 179 | struct tegra_bo *bo = to_tegra_bo(gem); | 246 | struct tegra_bo *bo = to_tegra_bo(gem); |
| 180 | 247 | ||
| 248 | if (gem->import_attach) { | ||
| 249 | dma_buf_unmap_attachment(gem->import_attach, bo->sgt, | ||
| 250 | DMA_TO_DEVICE); | ||
| 251 | drm_prime_gem_destroy(gem, NULL); | ||
| 252 | } else { | ||
| 253 | tegra_bo_destroy(gem->dev, bo); | ||
| 254 | } | ||
| 255 | |||
| 181 | drm_gem_free_mmap_offset(gem); | 256 | drm_gem_free_mmap_offset(gem); |
| 182 | drm_gem_object_release(gem); | 257 | drm_gem_object_release(gem); |
| 183 | tegra_bo_destroy(gem->dev, bo); | ||
| 184 | 258 | ||
| 185 | kfree(bo); | 259 | kfree(bo); |
| 186 | } | 260 | } |
| @@ -256,3 +330,106 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 256 | 330 | ||
| 257 | return ret; | 331 | return ret; |
| 258 | } | 332 | } |
| 333 | |||
| 334 | static struct sg_table * | ||
| 335 | tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, | ||
| 336 | enum dma_data_direction dir) | ||
| 337 | { | ||
| 338 | struct drm_gem_object *gem = attach->dmabuf->priv; | ||
| 339 | struct tegra_bo *bo = to_tegra_bo(gem); | ||
| 340 | struct sg_table *sgt; | ||
| 341 | |||
| 342 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | ||
| 343 | if (!sgt) | ||
| 344 | return NULL; | ||
| 345 | |||
| 346 | if (sg_alloc_table(sgt, 1, GFP_KERNEL)) { | ||
| 347 | kfree(sgt); | ||
| 348 | return NULL; | ||
| 349 | } | ||
| 350 | |||
| 351 | sg_dma_address(sgt->sgl) = bo->paddr; | ||
| 352 | sg_dma_len(sgt->sgl) = gem->size; | ||
| 353 | |||
| 354 | return sgt; | ||
| 355 | } | ||
| 356 | |||
| 357 | static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, | ||
| 358 | struct sg_table *sgt, | ||
| 359 | enum dma_data_direction dir) | ||
| 360 | { | ||
| 361 | sg_free_table(sgt); | ||
| 362 | kfree(sgt); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void tegra_gem_prime_release(struct dma_buf *buf) | ||
| 366 | { | ||
| 367 | drm_gem_dmabuf_release(buf); | ||
| 368 | } | ||
| 369 | |||
| 370 | static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, | ||
| 371 | unsigned long page) | ||
| 372 | { | ||
| 373 | return NULL; | ||
| 374 | } | ||
| 375 | |||
| 376 | static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, | ||
| 377 | unsigned long page, | ||
| 378 | void *addr) | ||
| 379 | { | ||
| 380 | } | ||
| 381 | |||
| 382 | static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) | ||
| 383 | { | ||
| 384 | return NULL; | ||
| 385 | } | ||
| 386 | |||
| 387 | static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, | ||
| 388 | void *addr) | ||
| 389 | { | ||
| 390 | } | ||
| 391 | |||
| 392 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) | ||
| 393 | { | ||
| 394 | return -EINVAL; | ||
| 395 | } | ||
| 396 | |||
| 397 | static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { | ||
| 398 | .map_dma_buf = tegra_gem_prime_map_dma_buf, | ||
| 399 | .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, | ||
| 400 | .release = tegra_gem_prime_release, | ||
| 401 | .kmap_atomic = tegra_gem_prime_kmap_atomic, | ||
| 402 | .kunmap_atomic = tegra_gem_prime_kunmap_atomic, | ||
| 403 | .kmap = tegra_gem_prime_kmap, | ||
| 404 | .kunmap = tegra_gem_prime_kunmap, | ||
| 405 | .mmap = tegra_gem_prime_mmap, | ||
| 406 | }; | ||
| 407 | |||
| 408 | struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, | ||
| 409 | struct drm_gem_object *gem, | ||
| 410 | int flags) | ||
| 411 | { | ||
| 412 | return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, | ||
| 413 | flags); | ||
| 414 | } | ||
| 415 | |||
| 416 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | ||
| 417 | struct dma_buf *buf) | ||
| 418 | { | ||
| 419 | struct tegra_bo *bo; | ||
| 420 | |||
| 421 | if (buf->ops == &tegra_gem_prime_dmabuf_ops) { | ||
| 422 | struct drm_gem_object *gem = buf->priv; | ||
| 423 | |||
| 424 | if (gem->dev == drm) { | ||
| 425 | drm_gem_object_reference(gem); | ||
| 426 | return gem; | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 430 | bo = tegra_bo_import(drm, buf); | ||
| 431 | if (IS_ERR(bo)) | ||
| 432 | return ERR_CAST(bo); | ||
| 433 | |||
| 434 | return &bo->gem; | ||
| 435 | } | ||
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 7674000bf47d..ffd4f792b410 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h | |||
| @@ -31,6 +31,7 @@ struct tegra_bo { | |||
| 31 | struct drm_gem_object gem; | 31 | struct drm_gem_object gem; |
| 32 | struct host1x_bo base; | 32 | struct host1x_bo base; |
| 33 | unsigned long flags; | 33 | unsigned long flags; |
| 34 | struct sg_table *sgt; | ||
| 34 | dma_addr_t paddr; | 35 | dma_addr_t paddr; |
| 35 | void *vaddr; | 36 | void *vaddr; |
| 36 | }; | 37 | }; |
| @@ -40,8 +41,6 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem) | |||
| 40 | return container_of(gem, struct tegra_bo, gem); | 41 | return container_of(gem, struct tegra_bo, gem); |
| 41 | } | 42 | } |
| 42 | 43 | ||
| 43 | extern const struct host1x_bo_ops tegra_bo_ops; | ||
| 44 | |||
| 45 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, | 44 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, |
| 46 | unsigned long flags); | 45 | unsigned long flags); |
| 47 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, | 46 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, |
| @@ -59,4 +58,10 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); | |||
| 59 | 58 | ||
| 60 | extern const struct vm_operations_struct tegra_bo_vm_ops; | 59 | extern const struct vm_operations_struct tegra_bo_vm_ops; |
| 61 | 60 | ||
| 61 | struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, | ||
| 62 | struct drm_gem_object *gem, | ||
| 63 | int flags); | ||
| 64 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | ||
| 65 | struct dma_buf *buf); | ||
| 66 | |||
| 62 | #endif | 67 | #endif |
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 7f6253ea5cb5..6928015d11a4 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c | |||
| @@ -40,6 +40,7 @@ struct tegra_hdmi { | |||
| 40 | struct host1x_client client; | 40 | struct host1x_client client; |
| 41 | struct tegra_output output; | 41 | struct tegra_output output; |
| 42 | struct device *dev; | 42 | struct device *dev; |
| 43 | bool enabled; | ||
| 43 | 44 | ||
| 44 | struct regulator *vdd; | 45 | struct regulator *vdd; |
| 45 | struct regulator *pll; | 46 | struct regulator *pll; |
| @@ -379,7 +380,7 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) | |||
| 379 | 380 | ||
| 380 | if (f > 96000) | 381 | if (f > 96000) |
| 381 | delta = 2; | 382 | delta = 2; |
| 382 | else if (f > 480000) | 383 | else if (f > 48000) |
| 383 | delta = 6; | 384 | delta = 6; |
| 384 | else | 385 | else |
| 385 | delta = 9; | 386 | delta = 9; |
| @@ -699,6 +700,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) | |||
| 699 | int retries = 1000; | 700 | int retries = 1000; |
| 700 | int err; | 701 | int err; |
| 701 | 702 | ||
| 703 | if (hdmi->enabled) | ||
| 704 | return 0; | ||
| 705 | |||
| 702 | hdmi->dvi = !tegra_output_is_hdmi(output); | 706 | hdmi->dvi = !tegra_output_is_hdmi(output); |
| 703 | 707 | ||
| 704 | pclk = mode->clock * 1000; | 708 | pclk = mode->clock * 1000; |
| @@ -839,10 +843,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) | |||
| 839 | value |= SOR_CSTM_ROTCLK(2); | 843 | value |= SOR_CSTM_ROTCLK(2); |
| 840 | tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); | 844 | tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); |
| 841 | 845 | ||
| 842 | tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); | ||
| 843 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 844 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 845 | |||
| 846 | /* start SOR */ | 846 | /* start SOR */ |
| 847 | tegra_hdmi_writel(hdmi, | 847 | tegra_hdmi_writel(hdmi, |
| 848 | SOR_PWR_NORMAL_STATE_PU | | 848 | SOR_PWR_NORMAL_STATE_PU | |
| @@ -892,31 +892,67 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) | |||
| 892 | HDMI_NV_PDISP_SOR_STATE1); | 892 | HDMI_NV_PDISP_SOR_STATE1); |
| 893 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); | 893 | tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); |
| 894 | 894 | ||
| 895 | tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); | 895 | value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); |
| 896 | 896 | value |= HDMI_ENABLE; | |
| 897 | value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | 897 | tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); |
| 898 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; | ||
| 899 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 900 | 898 | ||
| 901 | value = DISP_CTRL_MODE_C_DISPLAY; | 899 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); |
| 900 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 901 | value |= DISP_CTRL_MODE_C_DISPLAY; | ||
| 902 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); | 902 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); |
| 903 | 903 | ||
| 904 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 905 | value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 906 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; | ||
| 907 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 908 | |||
| 904 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | 909 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); |
| 905 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | 910 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); |
| 906 | 911 | ||
| 907 | /* TODO: add HDCP support */ | 912 | /* TODO: add HDCP support */ |
| 908 | 913 | ||
| 914 | hdmi->enabled = true; | ||
| 915 | |||
| 909 | return 0; | 916 | return 0; |
| 910 | } | 917 | } |
| 911 | 918 | ||
| 912 | static int tegra_output_hdmi_disable(struct tegra_output *output) | 919 | static int tegra_output_hdmi_disable(struct tegra_output *output) |
| 913 | { | 920 | { |
| 921 | struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); | ||
| 914 | struct tegra_hdmi *hdmi = to_hdmi(output); | 922 | struct tegra_hdmi *hdmi = to_hdmi(output); |
| 923 | unsigned long value; | ||
| 924 | |||
| 925 | if (!hdmi->enabled) | ||
| 926 | return 0; | ||
| 927 | |||
| 928 | /* | ||
| 929 | * The following accesses registers of the display controller, so make | ||
| 930 | * sure it's only executed when the output is attached to one. | ||
| 931 | */ | ||
| 932 | if (dc) { | ||
| 933 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 934 | value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 935 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); | ||
| 936 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 937 | |||
| 938 | value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); | ||
| 939 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 940 | tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 941 | |||
| 942 | value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); | ||
| 943 | value &= ~HDMI_ENABLE; | ||
| 944 | tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); | ||
| 945 | |||
| 946 | tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 947 | tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 948 | } | ||
| 915 | 949 | ||
| 916 | reset_control_assert(hdmi->rst); | 950 | reset_control_assert(hdmi->rst); |
| 917 | clk_disable(hdmi->clk); | 951 | clk_disable(hdmi->clk); |
| 918 | regulator_disable(hdmi->pll); | 952 | regulator_disable(hdmi->pll); |
| 919 | 953 | ||
| 954 | hdmi->enabled = false; | ||
| 955 | |||
| 920 | return 0; | 956 | return 0; |
| 921 | } | 957 | } |
| 922 | 958 | ||
| @@ -960,7 +996,7 @@ static int tegra_output_hdmi_check_mode(struct tegra_output *output, | |||
| 960 | parent = clk_get_parent(hdmi->clk_parent); | 996 | parent = clk_get_parent(hdmi->clk_parent); |
| 961 | 997 | ||
| 962 | err = clk_round_rate(parent, pclk * 4); | 998 | err = clk_round_rate(parent, pclk * 4); |
| 963 | if (err < 0) | 999 | if (err <= 0) |
| 964 | *status = MODE_NOCLOCK; | 1000 | *status = MODE_NOCLOCK; |
| 965 | else | 1001 | else |
| 966 | *status = MODE_OK; | 1002 | *status = MODE_OK; |
| @@ -1382,9 +1418,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev) | |||
| 1382 | return err; | 1418 | return err; |
| 1383 | 1419 | ||
| 1384 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1420 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1385 | if (!regs) | ||
| 1386 | return -ENXIO; | ||
| 1387 | |||
| 1388 | hdmi->regs = devm_ioremap_resource(&pdev->dev, regs); | 1421 | hdmi->regs = devm_ioremap_resource(&pdev->dev, regs); |
| 1389 | if (IS_ERR(hdmi->regs)) | 1422 | if (IS_ERR(hdmi->regs)) |
| 1390 | return PTR_ERR(hdmi->regs); | 1423 | return PTR_ERR(hdmi->regs); |
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c new file mode 100644 index 000000000000..e2c4aedaee78 --- /dev/null +++ b/drivers/gpu/drm/tegra/mipi-phy.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 NVIDIA Corporation | ||
| 3 | * | ||
| 4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
| 5 | * documentation for any purpose is hereby granted without fee, provided that | ||
| 6 | * the above copyright notice appear in all copies and that both that copyright | ||
| 7 | * notice and this permission notice appear in supporting documentation, and | ||
| 8 | * that the name of the copyright holders not be used in advertising or | ||
| 9 | * publicity pertaining to distribution of the software without specific, | ||
| 10 | * written prior permission. The copyright holders make no representations | ||
| 11 | * about the suitability of this software for any purpose. It is provided "as | ||
| 12 | * is" without express or implied warranty. | ||
| 13 | * | ||
| 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
| 15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
| 16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
| 17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
| 18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
| 19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
| 20 | * OF THIS SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/errno.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | |||
| 26 | #include "mipi-phy.h" | ||
| 27 | |||
| 28 | /* | ||
| 29 | * Default D-PHY timings based on MIPI D-PHY specification. Derived from | ||
| 30 | * the valid ranges specified in Section 5.9 of the D-PHY specification | ||
| 31 | * with minor adjustments. | ||
| 32 | */ | ||
| 33 | int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, | ||
| 34 | unsigned long period) | ||
| 35 | { | ||
| 36 | timing->clkmiss = 0; | ||
| 37 | timing->clkpost = 70 + 52 * period; | ||
| 38 | timing->clkpre = 8; | ||
| 39 | timing->clkprepare = 65; | ||
| 40 | timing->clksettle = 95; | ||
| 41 | timing->clktermen = 0; | ||
| 42 | timing->clktrail = 80; | ||
| 43 | timing->clkzero = 260; | ||
| 44 | timing->dtermen = 0; | ||
| 45 | timing->eot = 0; | ||
| 46 | timing->hsexit = 120; | ||
| 47 | timing->hsprepare = 65 + 5 * period; | ||
| 48 | timing->hszero = 145 + 5 * period; | ||
| 49 | timing->hssettle = 85 + 6 * period; | ||
| 50 | timing->hsskip = 40; | ||
| 51 | timing->hstrail = max(8 * period, 60 + 4 * period); | ||
| 52 | timing->init = 100000; | ||
| 53 | timing->lpx = 60; | ||
| 54 | timing->taget = 5 * timing->lpx; | ||
| 55 | timing->tago = 4 * timing->lpx; | ||
| 56 | timing->tasure = 2 * timing->lpx; | ||
| 57 | timing->wakeup = 1000000; | ||
| 58 | |||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY, | ||
| 64 | * Section 5.9 "Global Operation Timing Parameters". | ||
| 65 | */ | ||
| 66 | int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, | ||
| 67 | unsigned long period) | ||
| 68 | { | ||
| 69 | if (timing->clkmiss > 60) | ||
| 70 | return -EINVAL; | ||
| 71 | |||
| 72 | if (timing->clkpost < (60 + 52 * period)) | ||
| 73 | return -EINVAL; | ||
| 74 | |||
| 75 | if (timing->clkpre < 8) | ||
| 76 | return -EINVAL; | ||
| 77 | |||
| 78 | if (timing->clkprepare < 38 || timing->clkprepare > 95) | ||
| 79 | return -EINVAL; | ||
| 80 | |||
| 81 | if (timing->clksettle < 95 || timing->clksettle > 300) | ||
| 82 | return -EINVAL; | ||
| 83 | |||
| 84 | if (timing->clktermen > 38) | ||
| 85 | return -EINVAL; | ||
| 86 | |||
| 87 | if (timing->clktrail < 60) | ||
| 88 | return -EINVAL; | ||
| 89 | |||
| 90 | if (timing->clkprepare + timing->clkzero < 300) | ||
| 91 | return -EINVAL; | ||
| 92 | |||
| 93 | if (timing->dtermen > 35 + 4 * period) | ||
| 94 | return -EINVAL; | ||
| 95 | |||
| 96 | if (timing->eot > 105 + 12 * period) | ||
| 97 | return -EINVAL; | ||
| 98 | |||
| 99 | if (timing->hsexit < 100) | ||
| 100 | return -EINVAL; | ||
| 101 | |||
| 102 | if (timing->hsprepare < 40 + 4 * period || | ||
| 103 | timing->hsprepare > 85 + 6 * period) | ||
| 104 | return -EINVAL; | ||
| 105 | |||
| 106 | if (timing->hsprepare + timing->hszero < 145 + 10 * period) | ||
| 107 | return -EINVAL; | ||
| 108 | |||
| 109 | if ((timing->hssettle < 85 + 6 * period) || | ||
| 110 | (timing->hssettle > 145 + 10 * period)) | ||
| 111 | return -EINVAL; | ||
| 112 | |||
| 113 | if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period) | ||
| 114 | return -EINVAL; | ||
| 115 | |||
| 116 | if (timing->hstrail < max(8 * period, 60 + 4 * period)) | ||
| 117 | return -EINVAL; | ||
| 118 | |||
| 119 | if (timing->init < 100000) | ||
| 120 | return -EINVAL; | ||
| 121 | |||
| 122 | if (timing->lpx < 50) | ||
| 123 | return -EINVAL; | ||
| 124 | |||
| 125 | if (timing->taget != 5 * timing->lpx) | ||
| 126 | return -EINVAL; | ||
| 127 | |||
| 128 | if (timing->tago != 4 * timing->lpx) | ||
| 129 | return -EINVAL; | ||
| 130 | |||
| 131 | if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx) | ||
| 132 | return -EINVAL; | ||
| 133 | |||
| 134 | if (timing->wakeup < 1000000) | ||
| 135 | return -EINVAL; | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h new file mode 100644 index 000000000000..d3591694432d --- /dev/null +++ b/drivers/gpu/drm/tegra/mipi-phy.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 NVIDIA Corporation | ||
| 3 | * | ||
| 4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
| 5 | * documentation for any purpose is hereby granted without fee, provided that | ||
| 6 | * the above copyright notice appear in all copies and that both that copyright | ||
| 7 | * notice and this permission notice appear in supporting documentation, and | ||
| 8 | * that the name of the copyright holders not be used in advertising or | ||
| 9 | * publicity pertaining to distribution of the software without specific, | ||
| 10 | * written prior permission. The copyright holders make no representations | ||
| 11 | * about the suitability of this software for any purpose. It is provided "as | ||
| 12 | * is" without express or implied warranty. | ||
| 13 | * | ||
| 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
| 15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
| 16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
| 17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
| 18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
| 19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
| 20 | * OF THIS SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef DRM_TEGRA_MIPI_PHY_H | ||
| 24 | #define DRM_TEGRA_MIPI_PHY_H | ||
| 25 | |||
| 26 | /* | ||
| 27 | * D-PHY timing parameters | ||
| 28 | * | ||
| 29 | * A detailed description of these parameters can be found in the MIPI | ||
| 30 | * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing | ||
| 31 | * Parameters". | ||
| 32 | * | ||
| 33 | * All parameters are specified in nanoseconds. | ||
| 34 | */ | ||
| 35 | struct mipi_dphy_timing { | ||
| 36 | unsigned int clkmiss; | ||
| 37 | unsigned int clkpost; | ||
| 38 | unsigned int clkpre; | ||
| 39 | unsigned int clkprepare; | ||
| 40 | unsigned int clksettle; | ||
| 41 | unsigned int clktermen; | ||
| 42 | unsigned int clktrail; | ||
| 43 | unsigned int clkzero; | ||
| 44 | unsigned int dtermen; | ||
| 45 | unsigned int eot; | ||
| 46 | unsigned int hsexit; | ||
| 47 | unsigned int hsprepare; | ||
| 48 | unsigned int hszero; | ||
| 49 | unsigned int hssettle; | ||
| 50 | unsigned int hsskip; | ||
| 51 | unsigned int hstrail; | ||
| 52 | unsigned int init; | ||
| 53 | unsigned int lpx; | ||
| 54 | unsigned int taget; | ||
| 55 | unsigned int tago; | ||
| 56 | unsigned int tasure; | ||
| 57 | unsigned int wakeup; | ||
| 58 | }; | ||
| 59 | |||
| 60 | int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, | ||
| 61 | unsigned long period); | ||
| 62 | int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, | ||
| 63 | unsigned long period); | ||
| 64 | |||
| 65 | #endif | ||
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 2cb0065e0578..57cecbd18ca8 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/of_gpio.h> | 10 | #include <linux/of_gpio.h> |
| 11 | 11 | ||
| 12 | #include <drm/drm_panel.h> | ||
| 12 | #include "drm.h" | 13 | #include "drm.h" |
| 13 | 14 | ||
| 14 | static int tegra_connector_get_modes(struct drm_connector *connector) | 15 | static int tegra_connector_get_modes(struct drm_connector *connector) |
| @@ -17,6 +18,16 @@ static int tegra_connector_get_modes(struct drm_connector *connector) | |||
| 17 | struct edid *edid = NULL; | 18 | struct edid *edid = NULL; |
| 18 | int err = 0; | 19 | int err = 0; |
| 19 | 20 | ||
| 21 | /* | ||
| 22 | * If the panel provides one or more modes, use them exclusively and | ||
| 23 | * ignore any other means of obtaining a mode. | ||
| 24 | */ | ||
| 25 | if (output->panel) { | ||
| 26 | err = output->panel->funcs->get_modes(output->panel); | ||
| 27 | if (err > 0) | ||
| 28 | return err; | ||
| 29 | } | ||
| 30 | |||
| 20 | if (output->edid) | 31 | if (output->edid) |
| 21 | edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL); | 32 | edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL); |
| 22 | else if (output->ddc) | 33 | else if (output->ddc) |
| @@ -72,6 +83,11 @@ tegra_connector_detect(struct drm_connector *connector, bool force) | |||
| 72 | else | 83 | else |
| 73 | status = connector_status_connected; | 84 | status = connector_status_connected; |
| 74 | } else { | 85 | } else { |
| 86 | if (!output->panel) | ||
| 87 | status = connector_status_disconnected; | ||
| 88 | else | ||
| 89 | status = connector_status_connected; | ||
| 90 | |||
| 75 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | 91 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) |
| 76 | status = connector_status_connected; | 92 | status = connector_status_connected; |
| 77 | } | 93 | } |
| @@ -115,6 +131,16 @@ static const struct drm_encoder_funcs encoder_funcs = { | |||
| 115 | 131 | ||
| 116 | static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) | 132 | static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) |
| 117 | { | 133 | { |
| 134 | struct tegra_output *output = encoder_to_output(encoder); | ||
| 135 | struct drm_panel *panel = output->panel; | ||
| 136 | |||
| 137 | if (mode != DRM_MODE_DPMS_ON) { | ||
| 138 | drm_panel_disable(panel); | ||
| 139 | tegra_output_disable(output); | ||
| 140 | } else { | ||
| 141 | tegra_output_enable(output); | ||
| 142 | drm_panel_enable(panel); | ||
| 143 | } | ||
| 118 | } | 144 | } |
| 119 | 145 | ||
| 120 | static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, | 146 | static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, |
| @@ -163,14 +189,22 @@ static irqreturn_t hpd_irq(int irq, void *data) | |||
| 163 | 189 | ||
| 164 | int tegra_output_probe(struct tegra_output *output) | 190 | int tegra_output_probe(struct tegra_output *output) |
| 165 | { | 191 | { |
| 192 | struct device_node *ddc, *panel; | ||
| 166 | enum of_gpio_flags flags; | 193 | enum of_gpio_flags flags; |
| 167 | struct device_node *ddc; | 194 | int err, size; |
| 168 | size_t size; | ||
| 169 | int err; | ||
| 170 | 195 | ||
| 171 | if (!output->of_node) | 196 | if (!output->of_node) |
| 172 | output->of_node = output->dev->of_node; | 197 | output->of_node = output->dev->of_node; |
| 173 | 198 | ||
| 199 | panel = of_parse_phandle(output->of_node, "nvidia,panel", 0); | ||
| 200 | if (panel) { | ||
| 201 | output->panel = of_drm_find_panel(panel); | ||
| 202 | if (!output->panel) | ||
| 203 | return -EPROBE_DEFER; | ||
| 204 | |||
| 205 | of_node_put(panel); | ||
| 206 | } | ||
| 207 | |||
| 174 | output->edid = of_get_property(output->of_node, "nvidia,edid", &size); | 208 | output->edid = of_get_property(output->of_node, "nvidia,edid", &size); |
| 175 | 209 | ||
| 176 | ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); | 210 | ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); |
| @@ -185,9 +219,6 @@ int tegra_output_probe(struct tegra_output *output) | |||
| 185 | of_node_put(ddc); | 219 | of_node_put(ddc); |
| 186 | } | 220 | } |
| 187 | 221 | ||
| 188 | if (!output->edid && !output->ddc) | ||
| 189 | return -ENODEV; | ||
| 190 | |||
| 191 | output->hpd_gpio = of_get_named_gpio_flags(output->of_node, | 222 | output->hpd_gpio = of_get_named_gpio_flags(output->of_node, |
| 192 | "nvidia,hpd-gpio", 0, | 223 | "nvidia,hpd-gpio", 0, |
| 193 | &flags); | 224 | &flags); |
| @@ -256,6 +287,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) | |||
| 256 | encoder = DRM_MODE_ENCODER_TMDS; | 287 | encoder = DRM_MODE_ENCODER_TMDS; |
| 257 | break; | 288 | break; |
| 258 | 289 | ||
| 290 | case TEGRA_OUTPUT_DSI: | ||
| 291 | connector = DRM_MODE_CONNECTOR_DSI; | ||
| 292 | encoder = DRM_MODE_ENCODER_DSI; | ||
| 293 | break; | ||
| 294 | |||
| 259 | default: | 295 | default: |
| 260 | connector = DRM_MODE_CONNECTOR_Unknown; | 296 | connector = DRM_MODE_CONNECTOR_Unknown; |
| 261 | encoder = DRM_MODE_ENCODER_NONE; | 297 | encoder = DRM_MODE_ENCODER_NONE; |
| @@ -267,6 +303,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) | |||
| 267 | drm_connector_helper_add(&output->connector, &connector_helper_funcs); | 303 | drm_connector_helper_add(&output->connector, &connector_helper_funcs); |
| 268 | output->connector.dpms = DRM_MODE_DPMS_OFF; | 304 | output->connector.dpms = DRM_MODE_DPMS_OFF; |
| 269 | 305 | ||
| 306 | if (output->panel) | ||
| 307 | drm_panel_attach(output->panel, &output->connector); | ||
| 308 | |||
| 270 | drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); | 309 | drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); |
| 271 | drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); | 310 | drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); |
| 272 | 311 | ||
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 3b29018913a5..338f7f6561d7 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c | |||
| @@ -87,15 +87,60 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, | |||
| 87 | static int tegra_output_rgb_enable(struct tegra_output *output) | 87 | static int tegra_output_rgb_enable(struct tegra_output *output) |
| 88 | { | 88 | { |
| 89 | struct tegra_rgb *rgb = to_rgb(output); | 89 | struct tegra_rgb *rgb = to_rgb(output); |
| 90 | unsigned long value; | ||
| 90 | 91 | ||
| 91 | tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); | 92 | tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); |
| 92 | 93 | ||
| 94 | value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; | ||
| 95 | tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS); | ||
| 96 | |||
| 97 | /* XXX: parameterize? */ | ||
| 98 | value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1)); | ||
| 99 | value &= ~LVS_OUTPUT_POLARITY_LOW; | ||
| 100 | value &= ~LHS_OUTPUT_POLARITY_LOW; | ||
| 101 | tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); | ||
| 102 | |||
| 103 | /* XXX: parameterize? */ | ||
| 104 | value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB | | ||
| 105 | DISP_ORDER_RED_BLUE; | ||
| 106 | tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL); | ||
| 107 | |||
| 108 | /* XXX: parameterize? */ | ||
| 109 | value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; | ||
| 110 | tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); | ||
| 111 | |||
| 112 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); | ||
| 113 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 114 | value |= DISP_CTRL_MODE_C_DISPLAY; | ||
| 115 | tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 116 | |||
| 117 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 118 | value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 119 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; | ||
| 120 | tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 121 | |||
| 122 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 123 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 124 | |||
| 93 | return 0; | 125 | return 0; |
| 94 | } | 126 | } |
| 95 | 127 | ||
| 96 | static int tegra_output_rgb_disable(struct tegra_output *output) | 128 | static int tegra_output_rgb_disable(struct tegra_output *output) |
| 97 | { | 129 | { |
| 98 | struct tegra_rgb *rgb = to_rgb(output); | 130 | struct tegra_rgb *rgb = to_rgb(output); |
| 131 | unsigned long value; | ||
| 132 | |||
| 133 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 134 | value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | ||
| 135 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); | ||
| 136 | tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); | ||
| 137 | |||
| 138 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); | ||
| 139 | value &= ~DISP_CTRL_MODE_MASK; | ||
| 140 | tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); | ||
| 141 | |||
| 142 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | ||
| 143 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | ||
| 99 | 144 | ||
| 100 | tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); | 145 | tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); |
| 101 | 146 | ||
| @@ -213,7 +258,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) | |||
| 213 | * RGB outputs are an exception, so we make sure they can be attached | 258 | * RGB outputs are an exception, so we make sure they can be attached |
| 214 | * to only their parent display controller. | 259 | * to only their parent display controller. |
| 215 | */ | 260 | */ |
| 216 | rgb->output.encoder.possible_crtcs = 1 << dc->pipe; | 261 | rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base); |
| 217 | 262 | ||
| 218 | return 0; | 263 | return 0; |
| 219 | } | 264 | } |
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 116da199b942..171a8203892c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c | |||
| @@ -311,7 +311,7 @@ static void tilcdc_lastclose(struct drm_device *dev) | |||
| 311 | drm_fbdev_cma_restore_mode(priv->fbdev); | 311 | drm_fbdev_cma_restore_mode(priv->fbdev); |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS) | 314 | static irqreturn_t tilcdc_irq(int irq, void *arg) |
| 315 | { | 315 | { |
| 316 | struct drm_device *dev = arg; | 316 | struct drm_device *dev = arg; |
| 317 | struct tilcdc_drm_private *priv = dev->dev_private; | 317 | struct tilcdc_drm_private *priv = dev->dev_private; |
| @@ -444,7 +444,7 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg) | |||
| 444 | { | 444 | { |
| 445 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 445 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 446 | struct drm_device *dev = node->minor->dev; | 446 | struct drm_device *dev = node->minor->dev; |
| 447 | return drm_mm_dump_table(m, dev->mm_private); | 447 | return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | static struct drm_info_list tilcdc_debugfs_list[] = { | 450 | static struct drm_info_list tilcdc_debugfs_list[] = { |
| @@ -594,7 +594,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev) | |||
| 594 | 594 | ||
| 595 | static int tilcdc_pdev_remove(struct platform_device *pdev) | 595 | static int tilcdc_pdev_remove(struct platform_device *pdev) |
| 596 | { | 596 | { |
| 597 | drm_platform_exit(&tilcdc_driver, pdev); | 597 | drm_put_dev(platform_get_drvdata(pdev)); |
| 598 | 598 | ||
| 599 | return 0; | 599 | return 0; |
| 600 | } | 600 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 07e02c4bf5a8..a06651309388 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -957,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
| 957 | } | 957 | } |
| 958 | EXPORT_SYMBOL(ttm_bo_mem_space); | 958 | EXPORT_SYMBOL(ttm_bo_mem_space); |
| 959 | 959 | ||
| 960 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | 960 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
| 961 | struct ttm_placement *placement, | 961 | struct ttm_placement *placement, |
| 962 | bool interruptible, | 962 | bool interruptible, |
| 963 | bool no_wait_gpu) | 963 | bool no_wait_gpu) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 406152152315..1df856f78568 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) | |||
| 187 | } | 187 | } |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 190 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
| 191 | void **virtual) | 191 | void **virtual) |
| 192 | { | 192 | { |
| 193 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 193 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| @@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
| 219 | return 0; | 219 | return 0; |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 222 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
| 223 | void *virtual) | 223 | void *virtual) |
| 224 | { | 224 | { |
| 225 | struct ttm_mem_type_manager *man; | 225 | struct ttm_mem_type_manager *man; |
| @@ -594,7 +594,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
| 594 | if (start_page > bo->num_pages) | 594 | if (start_page > bo->num_pages) |
| 595 | return -EINVAL; | 595 | return -EINVAL; |
| 596 | #if 0 | 596 | #if 0 |
| 597 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | 597 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
| 598 | return -EPERM; | 598 | return -EPERM; |
| 599 | #endif | 599 | #endif |
| 600 | (void) ttm_mem_io_lock(man, false); | 600 | (void) ttm_mem_io_lock(man, false); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 6440eeac22d2..801231c9ae48 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 132 | return VM_FAULT_NOPAGE; | 132 | return VM_FAULT_NOPAGE; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* | ||
| 136 | * Refuse to fault imported pages. This should be handled | ||
| 137 | * (if at all) by redirecting mmap to the exporter. | ||
| 138 | */ | ||
| 139 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { | ||
| 140 | retval = VM_FAULT_SIGBUS; | ||
| 141 | goto out_unlock; | ||
| 142 | } | ||
| 143 | |||
| 135 | if (bdev->driver->fault_reserve_notify) { | 144 | if (bdev->driver->fault_reserve_notify) { |
| 136 | ret = bdev->driver->fault_reserve_notify(bo); | 145 | ret = bdev->driver->fault_reserve_notify(bo); |
| 137 | switch (ret) { | 146 | switch (ret) { |
| @@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 217 | } else if (unlikely(!page)) { | 226 | } else if (unlikely(!page)) { |
| 218 | break; | 227 | break; |
| 219 | } | 228 | } |
| 229 | page->mapping = vma->vm_file->f_mapping; | ||
| 230 | page->index = drm_vma_node_start(&bo->vma_node) + | ||
| 231 | page_offset; | ||
| 220 | pfn = page_to_pfn(page); | 232 | pfn = page_to_pfn(page); |
| 221 | } | 233 | } |
| 222 | 234 | ||
| 223 | ret = vm_insert_mixed(&cvma, address, pfn); | 235 | if (vma->vm_flags & VM_MIXEDMAP) |
| 236 | ret = vm_insert_mixed(&cvma, address, pfn); | ||
| 237 | else | ||
| 238 | ret = vm_insert_pfn(&cvma, address, pfn); | ||
| 239 | |||
| 224 | /* | 240 | /* |
| 225 | * Somebody beat us to this PTE or prefaulting to | 241 | * Somebody beat us to this PTE or prefaulting to |
| 226 | * an already populated PTE, or prefaulting error. | 242 | * an already populated PTE, or prefaulting error. |
| @@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) | |||
| 250 | struct ttm_buffer_object *bo = | 266 | struct ttm_buffer_object *bo = |
| 251 | (struct ttm_buffer_object *)vma->vm_private_data; | 267 | (struct ttm_buffer_object *)vma->vm_private_data; |
| 252 | 268 | ||
| 269 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); | ||
| 270 | |||
| 253 | (void)ttm_bo_reference(bo); | 271 | (void)ttm_bo_reference(bo); |
| 254 | } | 272 | } |
| 255 | 273 | ||
| @@ -319,7 +337,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
| 319 | */ | 337 | */ |
| 320 | 338 | ||
| 321 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
| 322 | vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; | 340 | |
| 341 | /* | ||
| 342 | * PFNMAP is faster than MIXEDMAP due to reduced page | ||
| 343 | * administration. So use MIXEDMAP only if private VMA, where | ||
| 344 | * we need to support COW. | ||
| 345 | */ | ||
| 346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | ||
| 347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | ||
| 323 | return 0; | 348 | return 0; |
| 324 | out_unref: | 349 | out_unref: |
| 325 | ttm_bo_unref(&bo); | 350 | ttm_bo_unref(&bo); |
| @@ -334,7 +359,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
| 334 | 359 | ||
| 335 | vma->vm_ops = &ttm_bo_vm_ops; | 360 | vma->vm_ops = &ttm_bo_vm_ops; |
| 336 | vma->vm_private_data = ttm_bo_reference(bo); | 361 | vma->vm_private_data = ttm_bo_reference(bo); |
| 337 | vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; | 362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; |
| 363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | ||
| 338 | return 0; | 364 | return 0; |
| 339 | } | 365 | } |
| 340 | EXPORT_SYMBOL(ttm_fbdev_mmap); | 366 | EXPORT_SYMBOL(ttm_fbdev_mmap); |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 3daa9a3930b8..6a954544727f 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
| @@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | |||
| 186 | } | 186 | } |
| 187 | EXPORT_SYMBOL(ttm_write_lock); | 187 | EXPORT_SYMBOL(ttm_write_lock); |
| 188 | 188 | ||
| 189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
| 190 | { | ||
| 191 | spin_lock(&lock->lock); | ||
| 192 | lock->rw = 1; | ||
| 193 | wake_up_all(&lock->queue); | ||
| 194 | spin_unlock(&lock->lock); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | 189 | static int __ttm_vt_unlock(struct ttm_lock *lock) |
| 198 | { | 190 | { |
| 199 | int ret = 0; | 191 | int ret = 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 6fe7b92a82d1..37079859afc8 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
| @@ -68,7 +68,7 @@ | |||
| 68 | 68 | ||
| 69 | struct ttm_object_file { | 69 | struct ttm_object_file { |
| 70 | struct ttm_object_device *tdev; | 70 | struct ttm_object_device *tdev; |
| 71 | rwlock_t lock; | 71 | spinlock_t lock; |
| 72 | struct list_head ref_list; | 72 | struct list_head ref_list; |
| 73 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | 73 | struct drm_open_hash ref_hash[TTM_REF_NUM]; |
| 74 | struct kref refcount; | 74 | struct kref refcount; |
| @@ -118,6 +118,7 @@ struct ttm_object_device { | |||
| 118 | */ | 118 | */ |
| 119 | 119 | ||
| 120 | struct ttm_ref_object { | 120 | struct ttm_ref_object { |
| 121 | struct rcu_head rcu_head; | ||
| 121 | struct drm_hash_item hash; | 122 | struct drm_hash_item hash; |
| 122 | struct list_head head; | 123 | struct list_head head; |
| 123 | struct kref kref; | 124 | struct kref kref; |
| @@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref) | |||
| 210 | * call_rcu() or ttm_base_object_kfree(). | 211 | * call_rcu() or ttm_base_object_kfree(). |
| 211 | */ | 212 | */ |
| 212 | 213 | ||
| 213 | if (base->refcount_release) { | 214 | ttm_object_file_unref(&base->tfile); |
| 214 | ttm_object_file_unref(&base->tfile); | 215 | if (base->refcount_release) |
| 215 | base->refcount_release(&base); | 216 | base->refcount_release(&base); |
| 216 | } | ||
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | void ttm_base_object_unref(struct ttm_base_object **p_base) | 219 | void ttm_base_object_unref(struct ttm_base_object **p_base) |
| @@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref); | |||
| 229 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | 229 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, |
| 230 | uint32_t key) | 230 | uint32_t key) |
| 231 | { | 231 | { |
| 232 | struct ttm_object_device *tdev = tfile->tdev; | 232 | struct ttm_base_object *base = NULL; |
| 233 | struct ttm_base_object *uninitialized_var(base); | ||
| 234 | struct drm_hash_item *hash; | 233 | struct drm_hash_item *hash; |
| 234 | struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; | ||
| 235 | int ret; | 235 | int ret; |
| 236 | 236 | ||
| 237 | rcu_read_lock(); | 237 | rcu_read_lock(); |
| 238 | ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); | 238 | ret = drm_ht_find_item_rcu(ht, key, &hash); |
| 239 | 239 | ||
| 240 | if (likely(ret == 0)) { | 240 | if (likely(ret == 0)) { |
| 241 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | 241 | base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; |
| 242 | ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; | 242 | if (!kref_get_unless_zero(&base->refcount)) |
| 243 | base = NULL; | ||
| 243 | } | 244 | } |
| 244 | rcu_read_unlock(); | 245 | rcu_read_unlock(); |
| 245 | 246 | ||
| 246 | if (unlikely(ret != 0)) | 247 | return base; |
| 247 | return NULL; | 248 | } |
| 249 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
| 248 | 250 | ||
| 249 | if (tfile != base->tfile && !base->shareable) { | 251 | struct ttm_base_object * |
| 250 | pr_err("Attempted access of non-shareable object\n"); | 252 | ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) |
| 251 | ttm_base_object_unref(&base); | 253 | { |
| 252 | return NULL; | 254 | struct ttm_base_object *base = NULL; |
| 255 | struct drm_hash_item *hash; | ||
| 256 | struct drm_open_hash *ht = &tdev->object_hash; | ||
| 257 | int ret; | ||
| 258 | |||
| 259 | rcu_read_lock(); | ||
| 260 | ret = drm_ht_find_item_rcu(ht, key, &hash); | ||
| 261 | |||
| 262 | if (likely(ret == 0)) { | ||
| 263 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
| 264 | if (!kref_get_unless_zero(&base->refcount)) | ||
| 265 | base = NULL; | ||
| 253 | } | 266 | } |
| 267 | rcu_read_unlock(); | ||
| 254 | 268 | ||
| 255 | return base; | 269 | return base; |
| 256 | } | 270 | } |
| 257 | EXPORT_SYMBOL(ttm_base_object_lookup); | 271 | EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); |
| 258 | 272 | ||
| 259 | int ttm_ref_object_add(struct ttm_object_file *tfile, | 273 | int ttm_ref_object_add(struct ttm_object_file *tfile, |
| 260 | struct ttm_base_object *base, | 274 | struct ttm_base_object *base, |
| @@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
| 266 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | 280 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; |
| 267 | int ret = -EINVAL; | 281 | int ret = -EINVAL; |
| 268 | 282 | ||
| 283 | if (base->tfile != tfile && !base->shareable) | ||
| 284 | return -EPERM; | ||
| 285 | |||
| 269 | if (existed != NULL) | 286 | if (existed != NULL) |
| 270 | *existed = true; | 287 | *existed = true; |
| 271 | 288 | ||
| 272 | while (ret == -EINVAL) { | 289 | while (ret == -EINVAL) { |
| 273 | read_lock(&tfile->lock); | 290 | rcu_read_lock(); |
| 274 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | 291 | ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); |
| 275 | 292 | ||
| 276 | if (ret == 0) { | 293 | if (ret == 0) { |
| 277 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | 294 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
| 278 | kref_get(&ref->kref); | 295 | if (!kref_get_unless_zero(&ref->kref)) { |
| 279 | read_unlock(&tfile->lock); | 296 | rcu_read_unlock(); |
| 280 | break; | 297 | break; |
| 298 | } | ||
| 281 | } | 299 | } |
| 282 | 300 | ||
| 283 | read_unlock(&tfile->lock); | 301 | rcu_read_unlock(); |
| 284 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | 302 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), |
| 285 | false, false); | 303 | false, false); |
| 286 | if (unlikely(ret != 0)) | 304 | if (unlikely(ret != 0)) |
| @@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
| 297 | ref->ref_type = ref_type; | 315 | ref->ref_type = ref_type; |
| 298 | kref_init(&ref->kref); | 316 | kref_init(&ref->kref); |
| 299 | 317 | ||
| 300 | write_lock(&tfile->lock); | 318 | spin_lock(&tfile->lock); |
| 301 | ret = drm_ht_insert_item(ht, &ref->hash); | 319 | ret = drm_ht_insert_item_rcu(ht, &ref->hash); |
| 302 | 320 | ||
| 303 | if (likely(ret == 0)) { | 321 | if (likely(ret == 0)) { |
| 304 | list_add_tail(&ref->head, &tfile->ref_list); | 322 | list_add_tail(&ref->head, &tfile->ref_list); |
| 305 | kref_get(&base->refcount); | 323 | kref_get(&base->refcount); |
| 306 | write_unlock(&tfile->lock); | 324 | spin_unlock(&tfile->lock); |
| 307 | if (existed != NULL) | 325 | if (existed != NULL) |
| 308 | *existed = false; | 326 | *existed = false; |
| 309 | break; | 327 | break; |
| 310 | } | 328 | } |
| 311 | 329 | ||
| 312 | write_unlock(&tfile->lock); | 330 | spin_unlock(&tfile->lock); |
| 313 | BUG_ON(ret != -EINVAL); | 331 | BUG_ON(ret != -EINVAL); |
| 314 | 332 | ||
| 315 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | 333 | ttm_mem_global_free(mem_glob, sizeof(*ref)); |
| @@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref) | |||
| 330 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | 348 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; |
| 331 | 349 | ||
| 332 | ht = &tfile->ref_hash[ref->ref_type]; | 350 | ht = &tfile->ref_hash[ref->ref_type]; |
| 333 | (void)drm_ht_remove_item(ht, &ref->hash); | 351 | (void)drm_ht_remove_item_rcu(ht, &ref->hash); |
| 334 | list_del(&ref->head); | 352 | list_del(&ref->head); |
| 335 | write_unlock(&tfile->lock); | 353 | spin_unlock(&tfile->lock); |
| 336 | 354 | ||
| 337 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | 355 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) |
| 338 | base->ref_obj_release(base, ref->ref_type); | 356 | base->ref_obj_release(base, ref->ref_type); |
| 339 | 357 | ||
| 340 | ttm_base_object_unref(&ref->obj); | 358 | ttm_base_object_unref(&ref->obj); |
| 341 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | 359 | ttm_mem_global_free(mem_glob, sizeof(*ref)); |
| 342 | kfree(ref); | 360 | kfree_rcu(ref, rcu_head); |
| 343 | write_lock(&tfile->lock); | 361 | spin_lock(&tfile->lock); |
| 344 | } | 362 | } |
| 345 | 363 | ||
| 346 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | 364 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, |
| @@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | |||
| 351 | struct drm_hash_item *hash; | 369 | struct drm_hash_item *hash; |
| 352 | int ret; | 370 | int ret; |
| 353 | 371 | ||
| 354 | write_lock(&tfile->lock); | 372 | spin_lock(&tfile->lock); |
| 355 | ret = drm_ht_find_item(ht, key, &hash); | 373 | ret = drm_ht_find_item(ht, key, &hash); |
| 356 | if (unlikely(ret != 0)) { | 374 | if (unlikely(ret != 0)) { |
| 357 | write_unlock(&tfile->lock); | 375 | spin_unlock(&tfile->lock); |
| 358 | return -EINVAL; | 376 | return -EINVAL; |
| 359 | } | 377 | } |
| 360 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | 378 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
| 361 | kref_put(&ref->kref, ttm_ref_object_release); | 379 | kref_put(&ref->kref, ttm_ref_object_release); |
| 362 | write_unlock(&tfile->lock); | 380 | spin_unlock(&tfile->lock); |
| 363 | return 0; | 381 | return 0; |
| 364 | } | 382 | } |
| 365 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | 383 | EXPORT_SYMBOL(ttm_ref_object_base_unref); |
| @@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) | |||
| 372 | struct ttm_object_file *tfile = *p_tfile; | 390 | struct ttm_object_file *tfile = *p_tfile; |
| 373 | 391 | ||
| 374 | *p_tfile = NULL; | 392 | *p_tfile = NULL; |
| 375 | write_lock(&tfile->lock); | 393 | spin_lock(&tfile->lock); |
| 376 | 394 | ||
| 377 | /* | 395 | /* |
| 378 | * Since we release the lock within the loop, we have to | 396 | * Since we release the lock within the loop, we have to |
| @@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) | |||
| 388 | for (i = 0; i < TTM_REF_NUM; ++i) | 406 | for (i = 0; i < TTM_REF_NUM; ++i) |
| 389 | drm_ht_remove(&tfile->ref_hash[i]); | 407 | drm_ht_remove(&tfile->ref_hash[i]); |
| 390 | 408 | ||
| 391 | write_unlock(&tfile->lock); | 409 | spin_unlock(&tfile->lock); |
| 392 | ttm_object_file_unref(&tfile); | 410 | ttm_object_file_unref(&tfile); |
| 393 | } | 411 | } |
| 394 | EXPORT_SYMBOL(ttm_object_file_release); | 412 | EXPORT_SYMBOL(ttm_object_file_release); |
| @@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | |||
| 404 | if (unlikely(tfile == NULL)) | 422 | if (unlikely(tfile == NULL)) |
| 405 | return NULL; | 423 | return NULL; |
| 406 | 424 | ||
| 407 | rwlock_init(&tfile->lock); | 425 | spin_lock_init(&tfile->lock); |
| 408 | tfile->tdev = tdev; | 426 | tfile->tdev = tdev; |
| 409 | kref_init(&tfile->refcount); | 427 | kref_init(&tfile->refcount); |
| 410 | INIT_LIST_HEAD(&tfile->ref_list); | 428 | INIT_LIST_HEAD(&tfile->ref_list); |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 210d50365162..9af99084b344 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
| @@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm) | |||
| 170 | ttm_tt_unbind(ttm); | 170 | ttm_tt_unbind(ttm); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | if (ttm->state == tt_unbound) { | 173 | if (ttm->state == tt_unbound) |
| 174 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | 174 | ttm_tt_unpopulate(ttm); |
| 175 | } | ||
| 176 | 175 | ||
| 177 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && | 176 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && |
| 178 | ttm->swap_storage) | 177 | ttm->swap_storage) |
| @@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) | |||
| 362 | page_cache_release(to_page); | 361 | page_cache_release(to_page); |
| 363 | } | 362 | } |
| 364 | 363 | ||
| 365 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | 364 | ttm_tt_unpopulate(ttm); |
| 366 | ttm->swap_storage = swap_storage; | 365 | ttm->swap_storage = swap_storage; |
| 367 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | 366 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; |
| 368 | if (persistent_swap_storage) | 367 | if (persistent_swap_storage) |
| @@ -375,3 +374,23 @@ out_err: | |||
| 375 | 374 | ||
| 376 | return ret; | 375 | return ret; |
| 377 | } | 376 | } |
| 377 | |||
| 378 | static void ttm_tt_clear_mapping(struct ttm_tt *ttm) | ||
| 379 | { | ||
| 380 | pgoff_t i; | ||
| 381 | struct page **page = ttm->pages; | ||
| 382 | |||
| 383 | for (i = 0; i < ttm->num_pages; ++i) { | ||
| 384 | (*page)->mapping = NULL; | ||
| 385 | (*page++)->index = 0; | ||
| 386 | } | ||
| 387 | } | ||
| 388 | |||
| 389 | void ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
| 390 | { | ||
| 391 | if (ttm->state == tt_unpopulated) | ||
| 392 | return; | ||
| 393 | |||
| 394 | ttm_tt_clear_mapping(ttm); | ||
| 395 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | ||
| 396 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 97e9d614700f..dbadd49e4c4a 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
| @@ -403,15 +403,17 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 403 | int i; | 403 | int i; |
| 404 | int ret = 0; | 404 | int ret = 0; |
| 405 | 405 | ||
| 406 | drm_modeset_lock_all(fb->dev); | ||
| 407 | |||
| 406 | if (!ufb->active_16) | 408 | if (!ufb->active_16) |
| 407 | return 0; | 409 | goto unlock; |
| 408 | 410 | ||
| 409 | if (ufb->obj->base.import_attach) { | 411 | if (ufb->obj->base.import_attach) { |
| 410 | ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, | 412 | ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, |
| 411 | 0, ufb->obj->base.size, | 413 | 0, ufb->obj->base.size, |
| 412 | DMA_FROM_DEVICE); | 414 | DMA_FROM_DEVICE); |
| 413 | if (ret) | 415 | if (ret) |
| 414 | return ret; | 416 | goto unlock; |
| 415 | } | 417 | } |
| 416 | 418 | ||
| 417 | for (i = 0; i < num_clips; i++) { | 419 | for (i = 0; i < num_clips; i++) { |
| @@ -419,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 419 | clips[i].x2 - clips[i].x1, | 421 | clips[i].x2 - clips[i].x1, |
| 420 | clips[i].y2 - clips[i].y1); | 422 | clips[i].y2 - clips[i].y1); |
| 421 | if (ret) | 423 | if (ret) |
| 422 | break; | 424 | goto unlock; |
| 423 | } | 425 | } |
| 424 | 426 | ||
| 425 | if (ufb->obj->base.import_attach) { | 427 | if (ufb->obj->base.import_attach) { |
| @@ -427,6 +429,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 427 | 0, ufb->obj->base.size, | 429 | 0, ufb->obj->base.size, |
| 428 | DMA_FROM_DEVICE); | 430 | DMA_FROM_DEVICE); |
| 429 | } | 431 | } |
| 432 | |||
| 433 | unlock: | ||
| 434 | drm_modeset_unlock_all(fb->dev); | ||
| 435 | |||
| 430 | return ret; | 436 | return ret; |
| 431 | } | 437 | } |
| 432 | 438 | ||
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 652f9b43ec9d..a18479c6b6da 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c | |||
| @@ -60,7 +60,7 @@ | |||
| 60 | dev_priv->dma_low += 8; \ | 60 | dev_priv->dma_low += 8; \ |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | #define via_flush_write_combine() DRM_MEMORYBARRIER() | 63 | #define via_flush_write_combine() mb() |
| 64 | 64 | ||
| 65 | #define VIA_OUT_RING_QW(w1, w2) do { \ | 65 | #define VIA_OUT_RING_QW(w1, w2) do { \ |
| 66 | *vb++ = (w1); \ | 66 | *vb++ = (w1); \ |
| @@ -234,13 +234,13 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil | |||
| 234 | 234 | ||
| 235 | switch (init->func) { | 235 | switch (init->func) { |
| 236 | case VIA_INIT_DMA: | 236 | case VIA_INIT_DMA: |
| 237 | if (!DRM_SUSER(DRM_CURPROC)) | 237 | if (!capable(CAP_SYS_ADMIN)) |
| 238 | retcode = -EPERM; | 238 | retcode = -EPERM; |
| 239 | else | 239 | else |
| 240 | retcode = via_initialize(dev, dev_priv, init); | 240 | retcode = via_initialize(dev, dev_priv, init); |
| 241 | break; | 241 | break; |
| 242 | case VIA_CLEANUP_DMA: | 242 | case VIA_CLEANUP_DMA: |
| 243 | if (!DRM_SUSER(DRM_CURPROC)) | 243 | if (!capable(CAP_SYS_ADMIN)) |
| 244 | retcode = -EPERM; | 244 | retcode = -EPERM; |
| 245 | else | 245 | else |
| 246 | retcode = via_dma_cleanup(dev); | 246 | retcode = via_dma_cleanup(dev); |
| @@ -273,7 +273,7 @@ static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *c | |||
| 273 | if (cmd->size > VIA_PCI_BUF_SIZE) | 273 | if (cmd->size > VIA_PCI_BUF_SIZE) |
| 274 | return -ENOMEM; | 274 | return -ENOMEM; |
| 275 | 275 | ||
| 276 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | 276 | if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) |
| 277 | return -EFAULT; | 277 | return -EFAULT; |
| 278 | 278 | ||
| 279 | /* | 279 | /* |
| @@ -346,7 +346,7 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device *dev, | |||
| 346 | 346 | ||
| 347 | if (cmd->size > VIA_PCI_BUF_SIZE) | 347 | if (cmd->size > VIA_PCI_BUF_SIZE) |
| 348 | return -ENOMEM; | 348 | return -ENOMEM; |
| 349 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | 349 | if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) |
| 350 | return -EFAULT; | 350 | return -EFAULT; |
| 351 | 351 | ||
| 352 | if ((ret = | 352 | if ((ret = |
| @@ -543,7 +543,7 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv) | |||
| 543 | 543 | ||
| 544 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); | 544 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); |
| 545 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); | 545 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); |
| 546 | DRM_WRITEMEMORYBARRIER(); | 546 | wmb(); |
| 547 | VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); | 547 | VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); |
| 548 | VIA_READ(VIA_REG_TRANSPACE); | 548 | VIA_READ(VIA_REG_TRANSPACE); |
| 549 | 549 | ||
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 8b0f25904e6d..ba33cf679180 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
| @@ -217,7 +217,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) | |||
| 217 | VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); | 217 | VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); |
| 218 | VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); | 218 | VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); |
| 219 | VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); | 219 | VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); |
| 220 | DRM_WRITEMEMORYBARRIER(); | 220 | wmb(); |
| 221 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); | 221 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); |
| 222 | VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); | 222 | VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); |
| 223 | } | 223 | } |
| @@ -338,7 +338,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) | |||
| 338 | 338 | ||
| 339 | blitq->blits[cur]->aborted = blitq->aborting; | 339 | blitq->blits[cur]->aborted = blitq->aborting; |
| 340 | blitq->done_blit_handle++; | 340 | blitq->done_blit_handle++; |
| 341 | DRM_WAKEUP(blitq->blit_queue + cur); | 341 | wake_up(blitq->blit_queue + cur); |
| 342 | 342 | ||
| 343 | cur++; | 343 | cur++; |
| 344 | if (cur >= VIA_NUM_BLIT_SLOTS) | 344 | if (cur >= VIA_NUM_BLIT_SLOTS) |
| @@ -363,7 +363,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) | |||
| 363 | 363 | ||
| 364 | via_abort_dmablit(dev, engine); | 364 | via_abort_dmablit(dev, engine); |
| 365 | blitq->aborting = 1; | 365 | blitq->aborting = 1; |
| 366 | blitq->end = jiffies + DRM_HZ; | 366 | blitq->end = jiffies + HZ; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | if (!blitq->is_active) { | 369 | if (!blitq->is_active) { |
| @@ -372,7 +372,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) | |||
| 372 | blitq->is_active = 1; | 372 | blitq->is_active = 1; |
| 373 | blitq->cur = cur; | 373 | blitq->cur = cur; |
| 374 | blitq->num_outstanding--; | 374 | blitq->num_outstanding--; |
| 375 | blitq->end = jiffies + DRM_HZ; | 375 | blitq->end = jiffies + HZ; |
| 376 | if (!timer_pending(&blitq->poll_timer)) | 376 | if (!timer_pending(&blitq->poll_timer)) |
| 377 | mod_timer(&blitq->poll_timer, jiffies + 1); | 377 | mod_timer(&blitq->poll_timer, jiffies + 1); |
| 378 | } else { | 378 | } else { |
| @@ -436,7 +436,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) | |||
| 436 | int ret = 0; | 436 | int ret = 0; |
| 437 | 437 | ||
| 438 | if (via_dmablit_active(blitq, engine, handle, &queue)) { | 438 | if (via_dmablit_active(blitq, engine, handle, &queue)) { |
| 439 | DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, | 439 | DRM_WAIT_ON(ret, *queue, 3 * HZ, |
| 440 | !via_dmablit_active(blitq, engine, handle, NULL)); | 440 | !via_dmablit_active(blitq, engine, handle, NULL)); |
| 441 | } | 441 | } |
| 442 | DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", | 442 | DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", |
| @@ -521,7 +521,7 @@ via_dmablit_workqueue(struct work_struct *work) | |||
| 521 | 521 | ||
| 522 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | 522 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 523 | 523 | ||
| 524 | DRM_WAKEUP(&blitq->busy_queue); | 524 | wake_up(&blitq->busy_queue); |
| 525 | 525 | ||
| 526 | via_free_sg_info(dev->pdev, cur_sg); | 526 | via_free_sg_info(dev->pdev, cur_sg); |
| 527 | kfree(cur_sg); | 527 | kfree(cur_sg); |
| @@ -561,8 +561,8 @@ via_init_dmablit(struct drm_device *dev) | |||
| 561 | blitq->aborting = 0; | 561 | blitq->aborting = 0; |
| 562 | spin_lock_init(&blitq->blit_lock); | 562 | spin_lock_init(&blitq->blit_lock); |
| 563 | for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) | 563 | for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) |
| 564 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | 564 | init_waitqueue_head(blitq->blit_queue + j); |
| 565 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); | 565 | init_waitqueue_head(&blitq->busy_queue); |
| 566 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); | 566 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); |
| 567 | setup_timer(&blitq->poll_timer, via_dmablit_timer, | 567 | setup_timer(&blitq->poll_timer, via_dmablit_timer, |
| 568 | (unsigned long)blitq); | 568 | (unsigned long)blitq); |
| @@ -688,7 +688,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) | |||
| 688 | while (blitq->num_free == 0) { | 688 | while (blitq->num_free == 0) { |
| 689 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | 689 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 690 | 690 | ||
| 691 | DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); | 691 | DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); |
| 692 | if (ret) | 692 | if (ret) |
| 693 | return (-EINTR == ret) ? -EAGAIN : ret; | 693 | return (-EINTR == ret) ? -EAGAIN : ret; |
| 694 | 694 | ||
| @@ -713,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) | |||
| 713 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | 713 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
| 714 | blitq->num_free++; | 714 | blitq->num_free++; |
| 715 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | 715 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); |
| 716 | DRM_WAKEUP(&blitq->busy_queue); | 716 | wake_up(&blitq->busy_queue); |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | /* | 719 | /* |
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 92684a9b7e34..50abc2adfaee 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c | |||
| @@ -46,7 +46,7 @@ static int via_driver_open(struct drm_device *dev, struct drm_file *file) | |||
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | void via_driver_postclose(struct drm_device *dev, struct drm_file *file) | 49 | static void via_driver_postclose(struct drm_device *dev, struct drm_file *file) |
| 50 | { | 50 | { |
| 51 | struct via_file_private *file_priv = file->driver_priv; | 51 | struct via_file_private *file_priv = file->driver_priv; |
| 52 | 52 | ||
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h index a811ef2b505f..ad0273256beb 100644 --- a/drivers/gpu/drm/via/via_drv.h +++ b/drivers/gpu/drm/via/via_drv.h | |||
| @@ -138,7 +138,7 @@ extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); | |||
| 138 | extern int via_enable_vblank(struct drm_device *dev, int crtc); | 138 | extern int via_enable_vblank(struct drm_device *dev, int crtc); |
| 139 | extern void via_disable_vblank(struct drm_device *dev, int crtc); | 139 | extern void via_disable_vblank(struct drm_device *dev, int crtc); |
| 140 | 140 | ||
| 141 | extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); | 141 | extern irqreturn_t via_driver_irq_handler(int irq, void *arg); |
| 142 | extern void via_driver_irq_preinstall(struct drm_device *dev); | 142 | extern void via_driver_irq_preinstall(struct drm_device *dev); |
| 143 | extern int via_driver_irq_postinstall(struct drm_device *dev); | 143 | extern int via_driver_irq_postinstall(struct drm_device *dev); |
| 144 | extern void via_driver_irq_uninstall(struct drm_device *dev); | 144 | extern void via_driver_irq_uninstall(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index ac98964297cf..1319433816d3 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c | |||
| @@ -104,7 +104,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc) | |||
| 104 | return atomic_read(&dev_priv->vbl_received); | 104 | return atomic_read(&dev_priv->vbl_received); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) | 107 | irqreturn_t via_driver_irq_handler(int irq, void *arg) |
| 108 | { | 108 | { |
| 109 | struct drm_device *dev = (struct drm_device *) arg; | 109 | struct drm_device *dev = (struct drm_device *) arg; |
| 110 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; | 110 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; |
| @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 138 | for (i = 0; i < dev_priv->num_irqs; ++i) { | 138 | for (i = 0; i < dev_priv->num_irqs; ++i) { |
| 139 | if (status & cur_irq->pending_mask) { | 139 | if (status & cur_irq->pending_mask) { |
| 140 | atomic_inc(&cur_irq->irq_received); | 140 | atomic_inc(&cur_irq->irq_received); |
| 141 | DRM_WAKEUP(&cur_irq->irq_queue); | 141 | wake_up(&cur_irq->irq_queue); |
| 142 | handled = 1; | 142 | handled = 1; |
| 143 | if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) | 143 | if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) |
| 144 | via_dmablit_handler(dev, 0, 1); | 144 | via_dmablit_handler(dev, 0, 1); |
| @@ -239,12 +239,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence | |||
| 239 | cur_irq = dev_priv->via_irqs + real_irq; | 239 | cur_irq = dev_priv->via_irqs + real_irq; |
| 240 | 240 | ||
| 241 | if (masks[real_irq][2] && !force_sequence) { | 241 | if (masks[real_irq][2] && !force_sequence) { |
| 242 | DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, | 242 | DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, |
| 243 | ((VIA_READ(masks[irq][2]) & masks[irq][3]) == | 243 | ((VIA_READ(masks[irq][2]) & masks[irq][3]) == |
| 244 | masks[irq][4])); | 244 | masks[irq][4])); |
| 245 | cur_irq_sequence = atomic_read(&cur_irq->irq_received); | 245 | cur_irq_sequence = atomic_read(&cur_irq->irq_received); |
| 246 | } else { | 246 | } else { |
| 247 | DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, | 247 | DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, |
| 248 | (((cur_irq_sequence = | 248 | (((cur_irq_sequence = |
| 249 | atomic_read(&cur_irq->irq_received)) - | 249 | atomic_read(&cur_irq->irq_received)) - |
| 250 | *sequence) <= (1 << 23))); | 250 | *sequence) <= (1 << 23))); |
| @@ -287,7 +287,7 @@ void via_driver_irq_preinstall(struct drm_device *dev) | |||
| 287 | atomic_set(&cur_irq->irq_received, 0); | 287 | atomic_set(&cur_irq->irq_received, 0); |
| 288 | cur_irq->enable_mask = dev_priv->irq_masks[i][0]; | 288 | cur_irq->enable_mask = dev_priv->irq_masks[i][0]; |
| 289 | cur_irq->pending_mask = dev_priv->irq_masks[i][1]; | 289 | cur_irq->pending_mask = dev_priv->irq_masks[i][1]; |
| 290 | DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); | 290 | init_waitqueue_head(&cur_irq->irq_queue); |
| 291 | dev_priv->irq_enable_mask |= cur_irq->enable_mask; | 291 | dev_priv->irq_enable_mask |= cur_irq->enable_mask; |
| 292 | dev_priv->irq_pending_mask |= cur_irq->pending_mask; | 292 | dev_priv->irq_pending_mask |= cur_irq->pending_mask; |
| 293 | cur_irq++; | 293 | cur_irq++; |
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c index 6569efa2ff6e..a9ffbad1cfdd 100644 --- a/drivers/gpu/drm/via/via_video.c +++ b/drivers/gpu/drm/via/via_video.c | |||
| @@ -36,7 +36,7 @@ void via_init_futex(drm_via_private_t *dev_priv) | |||
| 36 | DRM_DEBUG("\n"); | 36 | DRM_DEBUG("\n"); |
| 37 | 37 | ||
| 38 | for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { | 38 | for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { |
| 39 | DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i])); | 39 | init_waitqueue_head(&(dev_priv->decoder_queue[i])); |
| 40 | XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; | 40 | XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; |
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| @@ -58,7 +58,7 @@ void via_release_futex(drm_via_private_t *dev_priv, int context) | |||
| 58 | if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { | 58 | if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { |
| 59 | if (_DRM_LOCK_IS_HELD(*lock) | 59 | if (_DRM_LOCK_IS_HELD(*lock) |
| 60 | && (*lock & _DRM_LOCK_CONT)) { | 60 | && (*lock & _DRM_LOCK_CONT)) { |
| 61 | DRM_WAKEUP(&(dev_priv->decoder_queue[i])); | 61 | wake_up(&(dev_priv->decoder_queue[i])); |
| 62 | } | 62 | } |
| 63 | *lock = 0; | 63 | *lock = 0; |
| 64 | } | 64 | } |
| @@ -83,10 +83,10 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_ | |||
| 83 | switch (fx->func) { | 83 | switch (fx->func) { |
| 84 | case VIA_FUTEX_WAIT: | 84 | case VIA_FUTEX_WAIT: |
| 85 | DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], | 85 | DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], |
| 86 | (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val); | 86 | (fx->ms / 10) * (HZ / 100), *lock != fx->val); |
| 87 | return ret; | 87 | return ret; |
| 88 | case VIA_FUTEX_WAKE: | 88 | case VIA_FUTEX_WAKE: |
| 89 | DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock])); | 89 | wake_up(&(dev_priv->decoder_queue[fx->lock])); |
| 90 | return 0; | 90 | return 0; |
| 91 | } | 91 | } |
| 92 | return 0; | 92 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 9f8b690bcf52..458cdf6d81e8 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
| @@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
| 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
| 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
| 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
| 9 | vmwgfx_surface.o vmwgfx_prime.o | 9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o |
| 10 | 10 | ||
| 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d0e085ee8249..d95335cb90bd 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
| @@ -34,6 +34,8 @@ | |||
| 34 | 34 | ||
| 35 | #include "svga_reg.h" | 35 | #include "svga_reg.h" |
| 36 | 36 | ||
| 37 | typedef uint32 PPN; | ||
| 38 | typedef __le64 PPN64; | ||
| 37 | 39 | ||
| 38 | /* | 40 | /* |
| 39 | * 3D Hardware Version | 41 | * 3D Hardware Version |
| @@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
| 71 | #define SVGA3D_MAX_CONTEXT_IDS 256 | 73 | #define SVGA3D_MAX_CONTEXT_IDS 256 |
| 72 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) | 74 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) |
| 73 | 75 | ||
| 76 | #define SVGA3D_NUM_TEXTURE_UNITS 32 | ||
| 77 | #define SVGA3D_NUM_LIGHTS 8 | ||
| 78 | |||
| 74 | /* | 79 | /* |
| 75 | * Surface formats. | 80 | * Surface formats. |
| 76 | * | 81 | * |
| @@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
| 81 | */ | 86 | */ |
| 82 | 87 | ||
| 83 | typedef enum SVGA3dSurfaceFormat { | 88 | typedef enum SVGA3dSurfaceFormat { |
| 89 | SVGA3D_FORMAT_MIN = 0, | ||
| 84 | SVGA3D_FORMAT_INVALID = 0, | 90 | SVGA3D_FORMAT_INVALID = 0, |
| 85 | 91 | ||
| 86 | SVGA3D_X8R8G8B8 = 1, | 92 | SVGA3D_X8R8G8B8 = 1, |
| @@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 134 | SVGA3D_RG_S10E5 = 35, | 140 | SVGA3D_RG_S10E5 = 35, |
| 135 | SVGA3D_RG_S23E8 = 36, | 141 | SVGA3D_RG_S23E8 = 36, |
| 136 | 142 | ||
| 137 | /* | ||
| 138 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is | ||
| 139 | * the most efficient format to use when creating new surfaces | ||
| 140 | * expressly for index or vertex data. | ||
| 141 | */ | ||
| 142 | |||
| 143 | SVGA3D_BUFFER = 37, | 143 | SVGA3D_BUFFER = 37, |
| 144 | 144 | ||
| 145 | SVGA3D_Z_D24X8 = 38, | 145 | SVGA3D_Z_D24X8 = 38, |
| @@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 159 | /* Video format with alpha */ | 159 | /* Video format with alpha */ |
| 160 | SVGA3D_AYUV = 45, | 160 | SVGA3D_AYUV = 45, |
| 161 | 161 | ||
| 162 | SVGA3D_R32G32B32A32_TYPELESS = 46, | ||
| 163 | SVGA3D_R32G32B32A32_FLOAT = 25, | ||
| 164 | SVGA3D_R32G32B32A32_UINT = 47, | ||
| 165 | SVGA3D_R32G32B32A32_SINT = 48, | ||
| 166 | SVGA3D_R32G32B32_TYPELESS = 49, | ||
| 167 | SVGA3D_R32G32B32_FLOAT = 50, | ||
| 168 | SVGA3D_R32G32B32_UINT = 51, | ||
| 169 | SVGA3D_R32G32B32_SINT = 52, | ||
| 170 | SVGA3D_R16G16B16A16_TYPELESS = 53, | ||
| 171 | SVGA3D_R16G16B16A16_FLOAT = 24, | ||
| 172 | SVGA3D_R16G16B16A16_UNORM = 41, | ||
| 173 | SVGA3D_R16G16B16A16_UINT = 54, | ||
| 174 | SVGA3D_R16G16B16A16_SNORM = 55, | ||
| 175 | SVGA3D_R16G16B16A16_SINT = 56, | ||
| 176 | SVGA3D_R32G32_TYPELESS = 57, | ||
| 177 | SVGA3D_R32G32_FLOAT = 36, | ||
| 178 | SVGA3D_R32G32_UINT = 58, | ||
| 179 | SVGA3D_R32G32_SINT = 59, | ||
| 180 | SVGA3D_R32G8X24_TYPELESS = 60, | ||
| 181 | SVGA3D_D32_FLOAT_S8X24_UINT = 61, | ||
| 182 | SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, | ||
| 183 | SVGA3D_X32_TYPELESS_G8X24_UINT = 63, | ||
| 184 | SVGA3D_R10G10B10A2_TYPELESS = 64, | ||
| 185 | SVGA3D_R10G10B10A2_UNORM = 26, | ||
| 186 | SVGA3D_R10G10B10A2_UINT = 65, | ||
| 187 | SVGA3D_R11G11B10_FLOAT = 66, | ||
| 188 | SVGA3D_R8G8B8A8_TYPELESS = 67, | ||
| 189 | SVGA3D_R8G8B8A8_UNORM = 68, | ||
| 190 | SVGA3D_R8G8B8A8_UNORM_SRGB = 69, | ||
| 191 | SVGA3D_R8G8B8A8_UINT = 70, | ||
| 192 | SVGA3D_R8G8B8A8_SNORM = 28, | ||
| 193 | SVGA3D_R8G8B8A8_SINT = 71, | ||
| 194 | SVGA3D_R16G16_TYPELESS = 72, | ||
| 195 | SVGA3D_R16G16_FLOAT = 35, | ||
| 196 | SVGA3D_R16G16_UNORM = 40, | ||
| 197 | SVGA3D_R16G16_UINT = 73, | ||
| 198 | SVGA3D_R16G16_SNORM = 39, | ||
| 199 | SVGA3D_R16G16_SINT = 74, | ||
| 200 | SVGA3D_R32_TYPELESS = 75, | ||
| 201 | SVGA3D_D32_FLOAT = 76, | ||
| 202 | SVGA3D_R32_FLOAT = 34, | ||
| 203 | SVGA3D_R32_UINT = 77, | ||
| 204 | SVGA3D_R32_SINT = 78, | ||
| 205 | SVGA3D_R24G8_TYPELESS = 79, | ||
| 206 | SVGA3D_D24_UNORM_S8_UINT = 80, | ||
| 207 | SVGA3D_R24_UNORM_X8_TYPELESS = 81, | ||
| 208 | SVGA3D_X24_TYPELESS_G8_UINT = 82, | ||
| 209 | SVGA3D_R8G8_TYPELESS = 83, | ||
| 210 | SVGA3D_R8G8_UNORM = 84, | ||
| 211 | SVGA3D_R8G8_UINT = 85, | ||
| 212 | SVGA3D_R8G8_SNORM = 27, | ||
| 213 | SVGA3D_R8G8_SINT = 86, | ||
| 214 | SVGA3D_R16_TYPELESS = 87, | ||
| 215 | SVGA3D_R16_FLOAT = 33, | ||
| 216 | SVGA3D_D16_UNORM = 8, | ||
| 217 | SVGA3D_R16_UNORM = 88, | ||
| 218 | SVGA3D_R16_UINT = 89, | ||
| 219 | SVGA3D_R16_SNORM = 90, | ||
| 220 | SVGA3D_R16_SINT = 91, | ||
| 221 | SVGA3D_R8_TYPELESS = 92, | ||
| 222 | SVGA3D_R8_UNORM = 93, | ||
| 223 | SVGA3D_R8_UINT = 94, | ||
| 224 | SVGA3D_R8_SNORM = 95, | ||
| 225 | SVGA3D_R8_SINT = 96, | ||
| 226 | SVGA3D_A8_UNORM = 32, | ||
| 227 | SVGA3D_R1_UNORM = 97, | ||
| 228 | SVGA3D_R9G9B9E5_SHAREDEXP = 98, | ||
| 229 | SVGA3D_R8G8_B8G8_UNORM = 99, | ||
| 230 | SVGA3D_G8R8_G8B8_UNORM = 100, | ||
| 231 | SVGA3D_BC1_TYPELESS = 101, | ||
| 232 | SVGA3D_BC1_UNORM = 15, | ||
| 233 | SVGA3D_BC1_UNORM_SRGB = 102, | ||
| 234 | SVGA3D_BC2_TYPELESS = 103, | ||
| 235 | SVGA3D_BC2_UNORM = 17, | ||
| 236 | SVGA3D_BC2_UNORM_SRGB = 104, | ||
| 237 | SVGA3D_BC3_TYPELESS = 105, | ||
| 238 | SVGA3D_BC3_UNORM = 19, | ||
| 239 | SVGA3D_BC3_UNORM_SRGB = 106, | ||
| 240 | SVGA3D_BC4_TYPELESS = 107, | ||
| 162 | SVGA3D_BC4_UNORM = 108, | 241 | SVGA3D_BC4_UNORM = 108, |
| 242 | SVGA3D_BC4_SNORM = 109, | ||
| 243 | SVGA3D_BC5_TYPELESS = 110, | ||
| 163 | SVGA3D_BC5_UNORM = 111, | 244 | SVGA3D_BC5_UNORM = 111, |
| 245 | SVGA3D_BC5_SNORM = 112, | ||
| 246 | SVGA3D_B5G6R5_UNORM = 3, | ||
| 247 | SVGA3D_B5G5R5A1_UNORM = 5, | ||
| 248 | SVGA3D_B8G8R8A8_UNORM = 2, | ||
| 249 | SVGA3D_B8G8R8X8_UNORM = 1, | ||
| 250 | SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, | ||
| 251 | SVGA3D_B8G8R8A8_TYPELESS = 114, | ||
| 252 | SVGA3D_B8G8R8A8_UNORM_SRGB = 115, | ||
| 253 | SVGA3D_B8G8R8X8_TYPELESS = 116, | ||
| 254 | SVGA3D_B8G8R8X8_UNORM_SRGB = 117, | ||
| 164 | 255 | ||
| 165 | /* Advanced D3D9 depth formats. */ | 256 | /* Advanced D3D9 depth formats. */ |
| 166 | SVGA3D_Z_DF16 = 118, | 257 | SVGA3D_Z_DF16 = 118, |
| 167 | SVGA3D_Z_DF24 = 119, | 258 | SVGA3D_Z_DF24 = 119, |
| 168 | SVGA3D_Z_D24S8_INT = 120, | 259 | SVGA3D_Z_D24S8_INT = 120, |
| 169 | 260 | ||
| 170 | SVGA3D_FORMAT_MAX | 261 | /* Planar video formats. */ |
| 262 | SVGA3D_YV12 = 121, | ||
| 263 | |||
| 264 | /* Shader constant formats. */ | ||
| 265 | SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, | ||
| 266 | SVGA3D_SURFACE_SHADERCONST_INT = 123, | ||
| 267 | SVGA3D_SURFACE_SHADERCONST_BOOL = 124, | ||
| 268 | |||
| 269 | SVGA3D_FORMAT_MAX = 125, | ||
| 171 | } SVGA3dSurfaceFormat; | 270 | } SVGA3dSurfaceFormat; |
| 172 | 271 | ||
| 173 | typedef uint32 SVGA3dColor; /* a, r, g, b */ | 272 | typedef uint32 SVGA3dColor; /* a, r, g, b */ |
| @@ -957,15 +1056,21 @@ typedef enum { | |||
| 957 | } SVGA3dCubeFace; | 1056 | } SVGA3dCubeFace; |
| 958 | 1057 | ||
| 959 | typedef enum { | 1058 | typedef enum { |
| 1059 | SVGA3D_SHADERTYPE_INVALID = 0, | ||
| 1060 | SVGA3D_SHADERTYPE_MIN = 1, | ||
| 960 | SVGA3D_SHADERTYPE_VS = 1, | 1061 | SVGA3D_SHADERTYPE_VS = 1, |
| 961 | SVGA3D_SHADERTYPE_PS = 2, | 1062 | SVGA3D_SHADERTYPE_PS = 2, |
| 962 | SVGA3D_SHADERTYPE_MAX | 1063 | SVGA3D_SHADERTYPE_MAX = 3, |
| 1064 | SVGA3D_SHADERTYPE_GS = 3, | ||
| 963 | } SVGA3dShaderType; | 1065 | } SVGA3dShaderType; |
| 964 | 1066 | ||
| 1067 | #define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) | ||
| 1068 | |||
| 965 | typedef enum { | 1069 | typedef enum { |
| 966 | SVGA3D_CONST_TYPE_FLOAT = 0, | 1070 | SVGA3D_CONST_TYPE_FLOAT = 0, |
| 967 | SVGA3D_CONST_TYPE_INT = 1, | 1071 | SVGA3D_CONST_TYPE_INT = 1, |
| 968 | SVGA3D_CONST_TYPE_BOOL = 2, | 1072 | SVGA3D_CONST_TYPE_BOOL = 2, |
| 1073 | SVGA3D_CONST_TYPE_MAX | ||
| 969 | } SVGA3dShaderConstType; | 1074 | } SVGA3dShaderConstType; |
| 970 | 1075 | ||
| 971 | #define SVGA3D_MAX_SURFACE_FACES 6 | 1076 | #define SVGA3D_MAX_SURFACE_FACES 6 |
| @@ -1056,9 +1161,74 @@ typedef enum { | |||
| 1056 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 | 1161 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 |
| 1057 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 | 1162 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 |
| 1058 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 | 1163 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 |
| 1059 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 | 1164 | #define SVGA_3D_CMD_SCREEN_DMA 1082 |
| 1060 | 1165 | #define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 | |
| 1061 | #define SVGA_3D_CMD_FUTURE_MAX 2000 | 1166 | #define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 |
| 1167 | |||
| 1168 | #define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 | ||
| 1169 | #define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 | ||
| 1170 | #define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 | ||
| 1171 | #define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 | ||
| 1172 | #define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 | ||
| 1173 | #define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 | ||
| 1174 | |||
| 1175 | #define SVGA_3D_CMD_SET_OTABLE_BASE 1091 | ||
| 1176 | #define SVGA_3D_CMD_READBACK_OTABLE 1092 | ||
| 1177 | |||
| 1178 | #define SVGA_3D_CMD_DEFINE_GB_MOB 1093 | ||
| 1179 | #define SVGA_3D_CMD_DESTROY_GB_MOB 1094 | ||
| 1180 | #define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 | ||
| 1181 | #define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 | ||
| 1182 | |||
| 1183 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 | ||
| 1184 | #define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 | ||
| 1185 | #define SVGA_3D_CMD_BIND_GB_SURFACE 1099 | ||
| 1186 | #define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 | ||
| 1187 | #define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 | ||
| 1188 | #define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 | ||
| 1189 | #define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 | ||
| 1190 | #define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 | ||
| 1191 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 | ||
| 1192 | #define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 | ||
| 1193 | |||
| 1194 | #define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 | ||
| 1195 | #define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 | ||
| 1196 | #define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 | ||
| 1197 | #define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 | ||
| 1198 | #define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 | ||
| 1199 | |||
| 1200 | #define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 | ||
| 1201 | #define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 | ||
| 1202 | #define SVGA_3D_CMD_BIND_GB_SHADER 1114 | ||
| 1203 | |||
| 1204 | #define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 | ||
| 1205 | |||
| 1206 | #define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 | ||
| 1207 | #define SVGA_3D_CMD_END_GB_QUERY 1117 | ||
| 1208 | #define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 | ||
| 1209 | |||
| 1210 | #define SVGA_3D_CMD_NOP 1119 | ||
| 1211 | |||
| 1212 | #define SVGA_3D_CMD_ENABLE_GART 1120 | ||
| 1213 | #define SVGA_3D_CMD_DISABLE_GART 1121 | ||
| 1214 | #define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 | ||
| 1215 | #define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 | ||
| 1216 | |||
| 1217 | #define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 | ||
| 1218 | #define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 | ||
| 1219 | #define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 | ||
| 1220 | #define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 | ||
| 1221 | |||
| 1222 | #define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 | ||
| 1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | ||
| 1224 | |||
| 1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | ||
| 1226 | |||
| 1227 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | ||
| 1228 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | ||
| 1229 | |||
| 1230 | #define SVGA_3D_CMD_MAX 1142 | ||
| 1231 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | ||
| 1062 | 1232 | ||
| 1063 | /* | 1233 | /* |
| 1064 | * Common substructures used in multiple FIFO commands: | 1234 | * Common substructures used in multiple FIFO commands: |
| @@ -1750,6 +1920,495 @@ struct { | |||
| 1750 | 1920 | ||
| 1751 | 1921 | ||
| 1752 | /* | 1922 | /* |
| 1923 | * Guest-backed surface definitions. | ||
| 1924 | */ | ||
| 1925 | |||
| 1926 | typedef uint32 SVGAMobId; | ||
| 1927 | |||
| 1928 | typedef enum SVGAMobFormat { | ||
| 1929 | SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, | ||
| 1930 | SVGA3D_MOBFMT_PTDEPTH_0 = 0, | ||
| 1931 | SVGA3D_MOBFMT_PTDEPTH_1 = 1, | ||
| 1932 | SVGA3D_MOBFMT_PTDEPTH_2 = 2, | ||
| 1933 | SVGA3D_MOBFMT_RANGE = 3, | ||
| 1934 | SVGA3D_MOBFMT_PTDEPTH64_0 = 4, | ||
| 1935 | SVGA3D_MOBFMT_PTDEPTH64_1 = 5, | ||
| 1936 | SVGA3D_MOBFMT_PTDEPTH64_2 = 6, | ||
| 1937 | SVGA3D_MOBFMT_MAX, | ||
| 1938 | } SVGAMobFormat; | ||
| 1939 | |||
| 1940 | /* | ||
| 1941 | * Sizes of opaque types. | ||
| 1942 | */ | ||
| 1943 | |||
| 1944 | #define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 | ||
| 1945 | #define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 | ||
| 1946 | #define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 | ||
| 1947 | #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 | ||
| 1948 | #define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 | ||
| 1949 | #define SVGA3D_CONTEXT_DATA_SIZE 16384 | ||
| 1950 | |||
| 1951 | /* | ||
| 1952 | * SVGA3dCmdSetOTableBase -- | ||
| 1953 | * | ||
| 1954 | * This command allows the guest to specify the base PPN of the | ||
| 1955 | * specified object table. | ||
| 1956 | */ | ||
| 1957 | |||
| 1958 | typedef enum { | ||
| 1959 | SVGA_OTABLE_MOB = 0, | ||
| 1960 | SVGA_OTABLE_MIN = 0, | ||
| 1961 | SVGA_OTABLE_SURFACE = 1, | ||
| 1962 | SVGA_OTABLE_CONTEXT = 2, | ||
| 1963 | SVGA_OTABLE_SHADER = 3, | ||
| 1964 | SVGA_OTABLE_SCREEN_TARGET = 4, | ||
| 1965 | SVGA_OTABLE_DX9_MAX = 5, | ||
| 1966 | SVGA_OTABLE_MAX = 8 | ||
| 1967 | } SVGAOTableType; | ||
| 1968 | |||
| 1969 | typedef | ||
| 1970 | struct { | ||
| 1971 | SVGAOTableType type; | ||
| 1972 | PPN baseAddress; | ||
| 1973 | uint32 sizeInBytes; | ||
| 1974 | uint32 validSizeInBytes; | ||
| 1975 | SVGAMobFormat ptDepth; | ||
| 1976 | } | ||
| 1977 | __attribute__((__packed__)) | ||
| 1978 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | ||
| 1979 | |||
| 1980 | typedef | ||
| 1981 | struct { | ||
| 1982 | SVGAOTableType type; | ||
| 1983 | PPN64 baseAddress; | ||
| 1984 | uint32 sizeInBytes; | ||
| 1985 | uint32 validSizeInBytes; | ||
| 1986 | SVGAMobFormat ptDepth; | ||
| 1987 | } | ||
| 1988 | __attribute__((__packed__)) | ||
| 1989 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | ||
| 1990 | |||
| 1991 | typedef | ||
| 1992 | struct { | ||
| 1993 | SVGAOTableType type; | ||
| 1994 | } | ||
| 1995 | __attribute__((__packed__)) | ||
| 1996 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | ||
| 1997 | |||
| 1998 | /* | ||
| 1999 | * Define a memory object (Mob) in the OTable. | ||
| 2000 | */ | ||
| 2001 | |||
| 2002 | typedef | ||
| 2003 | struct SVGA3dCmdDefineGBMob { | ||
| 2004 | SVGAMobId mobid; | ||
| 2005 | SVGAMobFormat ptDepth; | ||
| 2006 | PPN base; | ||
| 2007 | uint32 sizeInBytes; | ||
| 2008 | } | ||
| 2009 | __attribute__((__packed__)) | ||
| 2010 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | ||
| 2011 | |||
| 2012 | |||
| 2013 | /* | ||
| 2014 | * Destroys an object in the OTable. | ||
| 2015 | */ | ||
| 2016 | |||
| 2017 | typedef | ||
| 2018 | struct SVGA3dCmdDestroyGBMob { | ||
| 2019 | SVGAMobId mobid; | ||
| 2020 | } | ||
| 2021 | __attribute__((__packed__)) | ||
| 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | ||
| 2023 | |||
| 2024 | /* | ||
| 2025 | * Redefine an object in the OTable. | ||
| 2026 | */ | ||
| 2027 | |||
| 2028 | typedef | ||
| 2029 | struct SVGA3dCmdRedefineGBMob { | ||
| 2030 | SVGAMobId mobid; | ||
| 2031 | SVGAMobFormat ptDepth; | ||
| 2032 | PPN base; | ||
| 2033 | uint32 sizeInBytes; | ||
| 2034 | } | ||
| 2035 | __attribute__((__packed__)) | ||
| 2036 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | ||
| 2037 | |||
| 2038 | /* | ||
| 2039 | * Define a memory object (Mob) in the OTable with a PPN64 base. | ||
| 2040 | */ | ||
| 2041 | |||
| 2042 | typedef | ||
| 2043 | struct SVGA3dCmdDefineGBMob64 { | ||
| 2044 | SVGAMobId mobid; | ||
| 2045 | SVGAMobFormat ptDepth; | ||
| 2046 | PPN64 base; | ||
| 2047 | uint32 sizeInBytes; | ||
| 2048 | } | ||
| 2049 | __attribute__((__packed__)) | ||
| 2050 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | ||
| 2051 | |||
| 2052 | /* | ||
| 2053 | * Redefine an object in the OTable with PPN64 base. | ||
| 2054 | */ | ||
| 2055 | |||
| 2056 | typedef | ||
| 2057 | struct SVGA3dCmdRedefineGBMob64 { | ||
| 2058 | SVGAMobId mobid; | ||
| 2059 | SVGAMobFormat ptDepth; | ||
| 2060 | PPN64 base; | ||
| 2061 | uint32 sizeInBytes; | ||
| 2062 | } | ||
| 2063 | __attribute__((__packed__)) | ||
| 2064 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | ||
| 2065 | |||
| 2066 | /* | ||
| 2067 | * Notification that the page tables have been modified. | ||
| 2068 | */ | ||
| 2069 | |||
| 2070 | typedef | ||
| 2071 | struct SVGA3dCmdUpdateGBMobMapping { | ||
| 2072 | SVGAMobId mobid; | ||
| 2073 | } | ||
| 2074 | __attribute__((__packed__)) | ||
| 2075 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | ||
| 2076 | |||
| 2077 | /* | ||
| 2078 | * Define a guest-backed surface. | ||
| 2079 | */ | ||
| 2080 | |||
| 2081 | typedef | ||
| 2082 | struct SVGA3dCmdDefineGBSurface { | ||
| 2083 | uint32 sid; | ||
| 2084 | SVGA3dSurfaceFlags surfaceFlags; | ||
| 2085 | SVGA3dSurfaceFormat format; | ||
| 2086 | uint32 numMipLevels; | ||
| 2087 | uint32 multisampleCount; | ||
| 2088 | SVGA3dTextureFilter autogenFilter; | ||
| 2089 | SVGA3dSize size; | ||
| 2090 | } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
| 2091 | |||
| 2092 | /* | ||
| 2093 | * Destroy a guest-backed surface. | ||
| 2094 | */ | ||
| 2095 | |||
| 2096 | typedef | ||
| 2097 | struct SVGA3dCmdDestroyGBSurface { | ||
| 2098 | uint32 sid; | ||
| 2099 | } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
| 2100 | |||
| 2101 | /* | ||
| 2102 | * Bind a guest-backed surface to an object. | ||
| 2103 | */ | ||
| 2104 | |||
| 2105 | typedef | ||
| 2106 | struct SVGA3dCmdBindGBSurface { | ||
| 2107 | uint32 sid; | ||
| 2108 | SVGAMobId mobid; | ||
| 2109 | } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
| 2110 | |||
| 2111 | /* | ||
| 2112 | * Conditionally bind a mob to a guest backed surface if testMobid | ||
| 2113 | * matches the currently bound mob. Optionally issue a readback on | ||
| 2114 | * the surface while it is still bound to the old mobid if the mobid | ||
| 2115 | * is changed by this command. | ||
| 2116 | */ | ||
| 2117 | |||
| 2118 | #define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) | ||
| 2119 | |||
| 2120 | typedef | ||
| 2121 | struct{ | ||
| 2122 | uint32 sid; | ||
| 2123 | SVGAMobId testMobid; | ||
| 2124 | SVGAMobId mobid; | ||
| 2125 | uint32 flags; | ||
| 2126 | } | ||
| 2127 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | ||
| 2128 | |||
| 2129 | /* | ||
| 2130 | * Update an image in a guest-backed surface. | ||
| 2131 | * (Inform the device that the guest-contents have been updated.) | ||
| 2132 | */ | ||
| 2133 | |||
| 2134 | typedef | ||
| 2135 | struct SVGA3dCmdUpdateGBImage { | ||
| 2136 | SVGA3dSurfaceImageId image; | ||
| 2137 | SVGA3dBox box; | ||
| 2138 | } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
| 2139 | |||
| 2140 | /* | ||
| 2141 | * Update an entire guest-backed surface. | ||
| 2142 | * (Inform the device that the guest-contents have been updated.) | ||
| 2143 | */ | ||
| 2144 | |||
| 2145 | typedef | ||
| 2146 | struct SVGA3dCmdUpdateGBSurface { | ||
| 2147 | uint32 sid; | ||
| 2148 | } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
| 2149 | |||
| 2150 | /* | ||
| 2151 | * Readback an image in a guest-backed surface. | ||
| 2152 | * (Request the device to flush the dirty contents into the guest.) | ||
| 2153 | */ | ||
| 2154 | |||
| 2155 | typedef | ||
| 2156 | struct SVGA3dCmdReadbackGBImage { | ||
| 2157 | SVGA3dSurfaceImageId image; | ||
| 2158 | } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
| 2159 | |||
| 2160 | /* | ||
| 2161 | * Readback an entire guest-backed surface. | ||
| 2162 | * (Request the device to flush the dirty contents into the guest.) | ||
| 2163 | */ | ||
| 2164 | |||
| 2165 | typedef | ||
| 2166 | struct SVGA3dCmdReadbackGBSurface { | ||
| 2167 | uint32 sid; | ||
| 2168 | } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
| 2169 | |||
| 2170 | /* | ||
| 2171 | * Readback a sub rect of an image in a guest-backed surface. After | ||
| 2172 | * issuing this command the driver is required to issue an update call | ||
| 2173 | * of the same region before issuing any other commands that reference | ||
| 2174 | * this surface or rendering is not guaranteed. | ||
| 2175 | */ | ||
| 2176 | |||
| 2177 | typedef | ||
| 2178 | struct SVGA3dCmdReadbackGBImagePartial { | ||
| 2179 | SVGA3dSurfaceImageId image; | ||
| 2180 | SVGA3dBox box; | ||
| 2181 | uint32 invertBox; | ||
| 2182 | } | ||
| 2183 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | ||
| 2184 | |||
| 2185 | /* | ||
| 2186 | * Invalidate an image in a guest-backed surface. | ||
| 2187 | * (Notify the device that the contents can be lost.) | ||
| 2188 | */ | ||
| 2189 | |||
| 2190 | typedef | ||
| 2191 | struct SVGA3dCmdInvalidateGBImage { | ||
| 2192 | SVGA3dSurfaceImageId image; | ||
| 2193 | } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
| 2194 | |||
| 2195 | /* | ||
| 2196 | * Invalidate an entire guest-backed surface. | ||
| 2197 | * (Notify the device that the contents if all images can be lost.) | ||
| 2198 | */ | ||
| 2199 | |||
| 2200 | typedef | ||
| 2201 | struct SVGA3dCmdInvalidateGBSurface { | ||
| 2202 | uint32 sid; | ||
| 2203 | } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
| 2204 | |||
| 2205 | /* | ||
| 2206 | * Invalidate a sub rect of an image in a guest-backed surface. After | ||
| 2207 | * issuing this command the driver is required to issue an update call | ||
| 2208 | * of the same region before issuing any other commands that reference | ||
| 2209 | * this surface or rendering is not guaranteed. | ||
| 2210 | */ | ||
| 2211 | |||
| 2212 | typedef | ||
| 2213 | struct SVGA3dCmdInvalidateGBImagePartial { | ||
| 2214 | SVGA3dSurfaceImageId image; | ||
| 2215 | SVGA3dBox box; | ||
| 2216 | uint32 invertBox; | ||
| 2217 | } | ||
| 2218 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | ||
| 2219 | |||
| 2220 | /* | ||
| 2221 | * Define a guest-backed context. | ||
| 2222 | */ | ||
| 2223 | |||
| 2224 | typedef | ||
| 2225 | struct SVGA3dCmdDefineGBContext { | ||
| 2226 | uint32 cid; | ||
| 2227 | } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
| 2228 | |||
| 2229 | /* | ||
| 2230 | * Destroy a guest-backed context. | ||
| 2231 | */ | ||
| 2232 | |||
| 2233 | typedef | ||
| 2234 | struct SVGA3dCmdDestroyGBContext { | ||
| 2235 | uint32 cid; | ||
| 2236 | } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
| 2237 | |||
| 2238 | /* | ||
| 2239 | * Bind a guest-backed context. | ||
| 2240 | * | ||
| 2241 | * validContents should be set to 0 for new contexts, | ||
| 2242 | * and 1 if this is an old context which is getting paged | ||
| 2243 | * back on to the device. | ||
| 2244 | * | ||
| 2245 | * For new contexts, it is recommended that the driver | ||
| 2246 | * issue commands to initialize all interesting state | ||
| 2247 | * prior to rendering. | ||
| 2248 | */ | ||
| 2249 | |||
| 2250 | typedef | ||
| 2251 | struct SVGA3dCmdBindGBContext { | ||
| 2252 | uint32 cid; | ||
| 2253 | SVGAMobId mobid; | ||
| 2254 | uint32 validContents; | ||
| 2255 | } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
| 2256 | |||
| 2257 | /* | ||
| 2258 | * Readback a guest-backed context. | ||
| 2259 | * (Request that the device flush the contents back into guest memory.) | ||
| 2260 | */ | ||
| 2261 | |||
| 2262 | typedef | ||
| 2263 | struct SVGA3dCmdReadbackGBContext { | ||
| 2264 | uint32 cid; | ||
| 2265 | } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
| 2266 | |||
| 2267 | /* | ||
| 2268 | * Invalidate a guest-backed context. | ||
| 2269 | */ | ||
| 2270 | typedef | ||
| 2271 | struct SVGA3dCmdInvalidateGBContext { | ||
| 2272 | uint32 cid; | ||
| 2273 | } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
| 2274 | |||
| 2275 | /* | ||
| 2276 | * Define a guest-backed shader. | ||
| 2277 | */ | ||
| 2278 | |||
| 2279 | typedef | ||
| 2280 | struct SVGA3dCmdDefineGBShader { | ||
| 2281 | uint32 shid; | ||
| 2282 | SVGA3dShaderType type; | ||
| 2283 | uint32 sizeInBytes; | ||
| 2284 | } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
| 2285 | |||
| 2286 | /* | ||
| 2287 | * Bind a guest-backed shader. | ||
| 2288 | */ | ||
| 2289 | |||
| 2290 | typedef struct SVGA3dCmdBindGBShader { | ||
| 2291 | uint32 shid; | ||
| 2292 | SVGAMobId mobid; | ||
| 2293 | uint32 offsetInBytes; | ||
| 2294 | } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
| 2295 | |||
| 2296 | /* | ||
| 2297 | * Destroy a guest-backed shader. | ||
| 2298 | */ | ||
| 2299 | |||
| 2300 | typedef struct SVGA3dCmdDestroyGBShader { | ||
| 2301 | uint32 shid; | ||
| 2302 | } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
| 2303 | |||
| 2304 | typedef | ||
| 2305 | struct { | ||
| 2306 | uint32 cid; | ||
| 2307 | uint32 regStart; | ||
| 2308 | SVGA3dShaderType shaderType; | ||
| 2309 | SVGA3dShaderConstType constType; | ||
| 2310 | |||
| 2311 | /* | ||
| 2312 | * Followed by a variable number of shader constants. | ||
| 2313 | * | ||
| 2314 | * Note that FLOAT and INT constants are 4-dwords in length, while | ||
| 2315 | * BOOL constants are 1-dword in length. | ||
| 2316 | */ | ||
| 2317 | } SVGA3dCmdSetGBShaderConstInline; | ||
| 2318 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | ||
| 2319 | |||
| 2320 | typedef | ||
| 2321 | struct { | ||
| 2322 | uint32 cid; | ||
| 2323 | SVGA3dQueryType type; | ||
| 2324 | } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
| 2325 | |||
| 2326 | typedef | ||
| 2327 | struct { | ||
| 2328 | uint32 cid; | ||
| 2329 | SVGA3dQueryType type; | ||
| 2330 | SVGAMobId mobid; | ||
| 2331 | uint32 offset; | ||
| 2332 | } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
| 2333 | |||
| 2334 | |||
| 2335 | /* | ||
| 2336 | * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- | ||
| 2337 | * | ||
| 2338 | * The semantics of this command are identical to the | ||
| 2339 | * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written | ||
| 2340 | * to a Mob instead of a GMR. | ||
| 2341 | */ | ||
| 2342 | |||
| 2343 | typedef | ||
| 2344 | struct { | ||
| 2345 | uint32 cid; | ||
| 2346 | SVGA3dQueryType type; | ||
| 2347 | SVGAMobId mobid; | ||
| 2348 | uint32 offset; | ||
| 2349 | } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
| 2350 | |||
| 2351 | typedef | ||
| 2352 | struct { | ||
| 2353 | SVGAMobId mobid; | ||
| 2354 | uint32 fbOffset; | ||
| 2355 | uint32 initalized; | ||
| 2356 | } | ||
| 2357 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | ||
| 2358 | |||
| 2359 | typedef | ||
| 2360 | struct { | ||
| 2361 | SVGAMobId mobid; | ||
| 2362 | uint32 gartOffset; | ||
| 2363 | } | ||
| 2364 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | ||
| 2365 | |||
| 2366 | |||
| 2367 | typedef | ||
| 2368 | struct { | ||
| 2369 | uint32 gartOffset; | ||
| 2370 | uint32 numPages; | ||
| 2371 | } | ||
| 2372 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | ||
| 2373 | |||
| 2374 | |||
| 2375 | /* | ||
| 2376 | * Screen Targets | ||
| 2377 | */ | ||
| 2378 | #define SVGA_STFLAG_PRIMARY (1 << 0) | ||
| 2379 | |||
| 2380 | typedef | ||
| 2381 | struct { | ||
| 2382 | uint32 stid; | ||
| 2383 | uint32 width; | ||
| 2384 | uint32 height; | ||
| 2385 | int32 xRoot; | ||
| 2386 | int32 yRoot; | ||
| 2387 | uint32 flags; | ||
| 2388 | } | ||
| 2389 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | ||
| 2390 | |||
| 2391 | typedef | ||
| 2392 | struct { | ||
| 2393 | uint32 stid; | ||
| 2394 | } | ||
| 2395 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | ||
| 2396 | |||
| 2397 | typedef | ||
| 2398 | struct { | ||
| 2399 | uint32 stid; | ||
| 2400 | SVGA3dSurfaceImageId image; | ||
| 2401 | } | ||
| 2402 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | ||
| 2403 | |||
| 2404 | typedef | ||
| 2405 | struct { | ||
| 2406 | uint32 stid; | ||
| 2407 | SVGA3dBox box; | ||
| 2408 | } | ||
| 2409 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | ||
| 2410 | |||
| 2411 | /* | ||
| 1753 | * Capability query index. | 2412 | * Capability query index. |
| 1754 | * | 2413 | * |
| 1755 | * Notes: | 2414 | * Notes: |
| @@ -1879,10 +2538,41 @@ typedef enum { | |||
| 1879 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, | 2538 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, |
| 1880 | 2539 | ||
| 1881 | /* | 2540 | /* |
| 1882 | * Don't add new caps into the previous section; the values in this | 2541 | * Deprecated. |
| 1883 | * enumeration must not change. You can put new values right before | ||
| 1884 | * SVGA3D_DEVCAP_MAX. | ||
| 1885 | */ | 2542 | */ |
| 2543 | SVGA3D_DEVCAP_VGPU10 = 84, | ||
| 2544 | |||
| 2545 | /* | ||
| 2546 | * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements | ||
| 2547 | * ored together, one for every type of video decoding supported. | ||
| 2548 | */ | ||
| 2549 | SVGA3D_DEVCAP_VIDEO_DECODE = 85, | ||
| 2550 | |||
| 2551 | /* | ||
| 2552 | * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements | ||
| 2553 | * ored together, one for every type of video processing supported. | ||
| 2554 | */ | ||
| 2555 | SVGA3D_DEVCAP_VIDEO_PROCESS = 86, | ||
| 2556 | |||
| 2557 | SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ | ||
| 2558 | SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ | ||
| 2559 | SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ | ||
| 2560 | SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ | ||
| 2561 | |||
| 2562 | SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, | ||
| 2563 | |||
| 2564 | /* | ||
| 2565 | * Does the host support the SVGA logic ops commands? | ||
| 2566 | */ | ||
| 2567 | SVGA3D_DEVCAP_LOGICOPS = 92, | ||
| 2568 | |||
| 2569 | /* | ||
| 2570 | * What support does the host have for screen targets? | ||
| 2571 | * | ||
| 2572 | * See the SVGA3D_SCREENTARGET_CAP bits below. | ||
| 2573 | */ | ||
| 2574 | SVGA3D_DEVCAP_SCREENTARGETS = 93, | ||
| 2575 | |||
| 1886 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ | 2576 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ |
| 1887 | } SVGA3dDevCapIndex; | 2577 | } SVGA3dDevCapIndex; |
| 1888 | 2578 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 01f63cb49678..71defa4d2d75 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
| @@ -169,7 +169,10 @@ enum { | |||
| 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
| 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
| 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
| 172 | SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ | 172 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
| 173 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | ||
| 174 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | ||
| 175 | SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ | ||
| 173 | 176 | ||
| 174 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 177 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
| 175 | /* Next 768 (== 256*3) registers exist for colormap */ | 178 | /* Next 768 (== 256*3) registers exist for colormap */ |
| @@ -431,7 +434,10 @@ struct SVGASignedPoint { | |||
| 431 | #define SVGA_CAP_TRACES 0x00200000 | 434 | #define SVGA_CAP_TRACES 0x00200000 |
| 432 | #define SVGA_CAP_GMR2 0x00400000 | 435 | #define SVGA_CAP_GMR2 0x00400000 |
| 433 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 | 436 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 |
| 434 | 437 | #define SVGA_CAP_COMMAND_BUFFERS 0x01000000 | |
| 438 | #define SVGA_CAP_DEAD1 0x02000000 | ||
| 439 | #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 | ||
| 440 | #define SVGA_CAP_GBOBJECTS 0x08000000 | ||
| 435 | 441 | ||
| 436 | /* | 442 | /* |
| 437 | * FIFO register indices. | 443 | * FIFO register indices. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 0489c6152482..6327cfc36805 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
| @@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
| 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
| 41 | TTM_PL_FLAG_CACHED; | 41 | TTM_PL_FLAG_CACHED; |
| 42 | 42 | ||
| 43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
| 44 | TTM_PL_FLAG_CACHED | | ||
| 45 | TTM_PL_FLAG_NO_EVICT; | ||
| 46 | |||
| 43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
| 44 | TTM_PL_FLAG_CACHED; | 48 | TTM_PL_FLAG_CACHED; |
| 45 | 49 | ||
| @@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | |||
| 47 | TTM_PL_FLAG_CACHED | | 51 | TTM_PL_FLAG_CACHED | |
| 48 | TTM_PL_FLAG_NO_EVICT; | 52 | TTM_PL_FLAG_NO_EVICT; |
| 49 | 53 | ||
| 54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | | ||
| 55 | TTM_PL_FLAG_CACHED; | ||
| 56 | |||
| 50 | struct ttm_placement vmw_vram_placement = { | 57 | struct ttm_placement vmw_vram_placement = { |
| 51 | .fpfn = 0, | 58 | .fpfn = 0, |
| 52 | .lpfn = 0, | 59 | .lpfn = 0, |
| @@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = { | |||
| 116 | .busy_placement = &sys_placement_flags | 123 | .busy_placement = &sys_placement_flags |
| 117 | }; | 124 | }; |
| 118 | 125 | ||
| 126 | struct ttm_placement vmw_sys_ne_placement = { | ||
| 127 | .fpfn = 0, | ||
| 128 | .lpfn = 0, | ||
| 129 | .num_placement = 1, | ||
| 130 | .placement = &sys_ne_placement_flags, | ||
| 131 | .num_busy_placement = 1, | ||
| 132 | .busy_placement = &sys_ne_placement_flags | ||
| 133 | }; | ||
| 134 | |||
| 119 | static uint32_t evictable_placement_flags[] = { | 135 | static uint32_t evictable_placement_flags[] = { |
| 120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | 136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
| 121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | 137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
| 122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
| 139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | ||
| 123 | }; | 140 | }; |
| 124 | 141 | ||
| 125 | struct ttm_placement vmw_evictable_placement = { | 142 | struct ttm_placement vmw_evictable_placement = { |
| 126 | .fpfn = 0, | 143 | .fpfn = 0, |
| 127 | .lpfn = 0, | 144 | .lpfn = 0, |
| 128 | .num_placement = 3, | 145 | .num_placement = 4, |
| 129 | .placement = evictable_placement_flags, | 146 | .placement = evictable_placement_flags, |
| 130 | .num_busy_placement = 1, | 147 | .num_busy_placement = 1, |
| 131 | .busy_placement = &sys_placement_flags | 148 | .busy_placement = &sys_placement_flags |
| @@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = { | |||
| 140 | .busy_placement = gmr_vram_placement_flags | 157 | .busy_placement = gmr_vram_placement_flags |
| 141 | }; | 158 | }; |
| 142 | 159 | ||
| 160 | struct ttm_placement vmw_mob_placement = { | ||
| 161 | .fpfn = 0, | ||
| 162 | .lpfn = 0, | ||
| 163 | .num_placement = 1, | ||
| 164 | .num_busy_placement = 1, | ||
| 165 | .placement = &mob_placement_flags, | ||
| 166 | .busy_placement = &mob_placement_flags | ||
| 167 | }; | ||
| 168 | |||
| 143 | struct vmw_ttm_tt { | 169 | struct vmw_ttm_tt { |
| 144 | struct ttm_dma_tt dma_ttm; | 170 | struct ttm_dma_tt dma_ttm; |
| 145 | struct vmw_private *dev_priv; | 171 | struct vmw_private *dev_priv; |
| 146 | int gmr_id; | 172 | int gmr_id; |
| 173 | struct vmw_mob *mob; | ||
| 174 | int mem_type; | ||
| 147 | struct sg_table sgt; | 175 | struct sg_table sgt; |
| 148 | struct vmw_sg_table vsgt; | 176 | struct vmw_sg_table vsgt; |
| 149 | uint64_t sg_alloc_size; | 177 | uint64_t sg_alloc_size; |
| @@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |||
| 244 | viter->dma_address = &__vmw_piter_dma_addr; | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
| 245 | viter->page = &__vmw_piter_non_sg_page; | 273 | viter->page = &__vmw_piter_non_sg_page; |
| 246 | viter->addrs = vsgt->addrs; | 274 | viter->addrs = vsgt->addrs; |
| 275 | viter->pages = vsgt->pages; | ||
| 247 | break; | 276 | break; |
| 248 | case vmw_dma_map_populate: | 277 | case vmw_dma_map_populate: |
| 249 | case vmw_dma_map_bind: | 278 | case vmw_dma_map_bind: |
| @@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |||
| 424 | vmw_tt->mapped = false; | 453 | vmw_tt->mapped = false; |
| 425 | } | 454 | } |
| 426 | 455 | ||
| 456 | |||
| 457 | /** | ||
| 458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | ||
| 459 | * | ||
| 460 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 461 | * | ||
| 462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | ||
| 463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
| 464 | * Note that the buffer object must be either pinned or reserved before | ||
| 465 | * calling this function. | ||
| 466 | */ | ||
| 467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | ||
| 468 | { | ||
| 469 | struct vmw_ttm_tt *vmw_tt = | ||
| 470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 471 | |||
| 472 | return vmw_ttm_map_dma(vmw_tt); | ||
| 473 | } | ||
| 474 | |||
| 475 | |||
| 476 | /** | ||
| 477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | ||
| 478 | * | ||
| 479 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 480 | * | ||
| 481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | ||
| 482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
| 483 | */ | ||
| 484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | ||
| 485 | { | ||
| 486 | struct vmw_ttm_tt *vmw_tt = | ||
| 487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 488 | |||
| 489 | vmw_ttm_unmap_dma(vmw_tt); | ||
| 490 | } | ||
| 491 | |||
| 492 | |||
| 493 | /** | ||
| 494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | ||
| 495 | * TTM buffer object | ||
| 496 | * | ||
| 497 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 498 | * | ||
| 499 | * Returns a pointer to a struct vmw_sg_table object. The object should | ||
| 500 | * not be freed after use. | ||
| 501 | * Note that for the device addresses to be valid, the buffer object must | ||
| 502 | * either be reserved or pinned. | ||
| 503 | */ | ||
| 504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | ||
| 505 | { | ||
| 506 | struct vmw_ttm_tt *vmw_tt = | ||
| 507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 508 | |||
| 509 | return &vmw_tt->vsgt; | ||
| 510 | } | ||
| 511 | |||
| 512 | |||
| 427 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
| 428 | { | 514 | { |
| 429 | struct vmw_ttm_tt *vmw_be = | 515 | struct vmw_ttm_tt *vmw_be = |
| @@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
| 435 | return ret; | 521 | return ret; |
| 436 | 522 | ||
| 437 | vmw_be->gmr_id = bo_mem->start; | 523 | vmw_be->gmr_id = bo_mem->start; |
| 524 | vmw_be->mem_type = bo_mem->mem_type; | ||
| 525 | |||
| 526 | switch (bo_mem->mem_type) { | ||
| 527 | case VMW_PL_GMR: | ||
| 528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | ||
| 529 | ttm->num_pages, vmw_be->gmr_id); | ||
| 530 | case VMW_PL_MOB: | ||
| 531 | if (unlikely(vmw_be->mob == NULL)) { | ||
| 532 | vmw_be->mob = | ||
| 533 | vmw_mob_create(ttm->num_pages); | ||
| 534 | if (unlikely(vmw_be->mob == NULL)) | ||
| 535 | return -ENOMEM; | ||
| 536 | } | ||
| 438 | 537 | ||
| 439 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
| 440 | ttm->num_pages, vmw_be->gmr_id); | 539 | &vmw_be->vsgt, ttm->num_pages, |
| 540 | vmw_be->gmr_id); | ||
| 541 | default: | ||
| 542 | BUG(); | ||
| 543 | } | ||
| 544 | return 0; | ||
| 441 | } | 545 | } |
| 442 | 546 | ||
| 443 | static int vmw_ttm_unbind(struct ttm_tt *ttm) | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
| @@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
| 445 | struct vmw_ttm_tt *vmw_be = | 549 | struct vmw_ttm_tt *vmw_be = |
| 446 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | 550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 447 | 551 | ||
| 448 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | 552 | switch (vmw_be->mem_type) { |
| 553 | case VMW_PL_GMR: | ||
| 554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
| 555 | break; | ||
| 556 | case VMW_PL_MOB: | ||
| 557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | ||
| 558 | break; | ||
| 559 | default: | ||
| 560 | BUG(); | ||
| 561 | } | ||
| 449 | 562 | ||
| 450 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | 563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
| 451 | vmw_ttm_unmap_dma(vmw_be); | 564 | vmw_ttm_unmap_dma(vmw_be); |
| @@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
| 453 | return 0; | 566 | return 0; |
| 454 | } | 567 | } |
| 455 | 568 | ||
| 569 | |||
| 456 | static void vmw_ttm_destroy(struct ttm_tt *ttm) | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
| 457 | { | 571 | { |
| 458 | struct vmw_ttm_tt *vmw_be = | 572 | struct vmw_ttm_tt *vmw_be = |
| @@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) | |||
| 463 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | 577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
| 464 | else | 578 | else |
| 465 | ttm_tt_fini(ttm); | 579 | ttm_tt_fini(ttm); |
| 580 | |||
| 581 | if (vmw_be->mob) | ||
| 582 | vmw_mob_destroy(vmw_be->mob); | ||
| 583 | |||
| 466 | kfree(vmw_be); | 584 | kfree(vmw_be); |
| 467 | } | 585 | } |
| 468 | 586 | ||
| 587 | |||
| 469 | static int vmw_ttm_populate(struct ttm_tt *ttm) | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
| 470 | { | 589 | { |
| 471 | struct vmw_ttm_tt *vmw_tt = | 590 | struct vmw_ttm_tt *vmw_tt = |
| @@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |||
| 500 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | 619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 501 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | 620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
| 502 | 621 | ||
| 622 | |||
| 623 | if (vmw_tt->mob) { | ||
| 624 | vmw_mob_destroy(vmw_tt->mob); | ||
| 625 | vmw_tt->mob = NULL; | ||
| 626 | } | ||
| 627 | |||
| 503 | vmw_ttm_unmap_dma(vmw_tt); | 628 | vmw_ttm_unmap_dma(vmw_tt); |
| 504 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | 629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
| 505 | size_t size = | 630 | size_t size = |
| @@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = { | |||
| 517 | .destroy = vmw_ttm_destroy, | 642 | .destroy = vmw_ttm_destroy, |
| 518 | }; | 643 | }; |
| 519 | 644 | ||
| 520 | struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | 645 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
| 521 | unsigned long size, uint32_t page_flags, | 646 | unsigned long size, uint32_t page_flags, |
| 522 | struct page *dummy_read_page) | 647 | struct page *dummy_read_page) |
| 523 | { | 648 | { |
| @@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | |||
| 530 | 655 | ||
| 531 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
| 532 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
| 658 | vmw_be->mob = NULL; | ||
| 533 | 659 | ||
| 534 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | 660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 535 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | 661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
| @@ -546,12 +672,12 @@ out_no_init: | |||
| 546 | return NULL; | 672 | return NULL; |
| 547 | } | 673 | } |
| 548 | 674 | ||
| 549 | int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | 675 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 550 | { | 676 | { |
| 551 | return 0; | 677 | return 0; |
| 552 | } | 678 | } |
| 553 | 679 | ||
| 554 | int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | 680 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 555 | struct ttm_mem_type_manager *man) | 681 | struct ttm_mem_type_manager *man) |
| 556 | { | 682 | { |
| 557 | switch (type) { | 683 | switch (type) { |
| @@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 571 | man->default_caching = TTM_PL_FLAG_CACHED; | 697 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 572 | break; | 698 | break; |
| 573 | case VMW_PL_GMR: | 699 | case VMW_PL_GMR: |
| 700 | case VMW_PL_MOB: | ||
| 574 | /* | 701 | /* |
| 575 | * "Guest Memory Regions" is an aperture like feature with | 702 | * "Guest Memory Regions" is an aperture like feature with |
| 576 | * one slot per bo. There is an upper limit of the number of | 703 | * one slot per bo. There is an upper limit of the number of |
| @@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 589 | return 0; | 716 | return 0; |
| 590 | } | 717 | } |
| 591 | 718 | ||
| 592 | void vmw_evict_flags(struct ttm_buffer_object *bo, | 719 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
| 593 | struct ttm_placement *placement) | 720 | struct ttm_placement *placement) |
| 594 | { | 721 | { |
| 595 | *placement = vmw_sys_placement; | 722 | *placement = vmw_sys_placement; |
| @@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
| 618 | switch (mem->mem_type) { | 745 | switch (mem->mem_type) { |
| 619 | case TTM_PL_SYSTEM: | 746 | case TTM_PL_SYSTEM: |
| 620 | case VMW_PL_GMR: | 747 | case VMW_PL_GMR: |
| 748 | case VMW_PL_MOB: | ||
| 621 | return 0; | 749 | return 0; |
| 622 | case TTM_PL_VRAM: | 750 | case TTM_PL_VRAM: |
| 623 | mem->bus.offset = mem->start << PAGE_SHIFT; | 751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| @@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | |||
| 677 | VMW_FENCE_WAIT_TIMEOUT); | 805 | VMW_FENCE_WAIT_TIMEOUT); |
| 678 | } | 806 | } |
| 679 | 807 | ||
| 808 | /** | ||
| 809 | * vmw_move_notify - TTM move_notify_callback | ||
| 810 | * | ||
| 811 | * @bo: The TTM buffer object about to move. | ||
| 812 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
| 813 | * region the move is taking place. | ||
| 814 | * | ||
| 815 | * Calls move_notify for all subsystems needing it. | ||
| 816 | * (currently only resources). | ||
| 817 | */ | ||
| 818 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
| 819 | struct ttm_mem_reg *mem) | ||
| 820 | { | ||
| 821 | vmw_resource_move_notify(bo, mem); | ||
| 822 | } | ||
| 823 | |||
| 824 | |||
| 825 | /** | ||
| 826 | * vmw_swap_notify - TTM move_notify_callback | ||
| 827 | * | ||
| 828 | * @bo: The TTM buffer object about to be swapped out. | ||
| 829 | */ | ||
| 830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
| 831 | { | ||
| 832 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 833 | |||
| 834 | spin_lock(&bdev->fence_lock); | ||
| 835 | ttm_bo_wait(bo, false, false, false); | ||
| 836 | spin_unlock(&bdev->fence_lock); | ||
| 837 | } | ||
| 838 | |||
| 839 | |||
| 680 | struct ttm_bo_driver vmw_bo_driver = { | 840 | struct ttm_bo_driver vmw_bo_driver = { |
| 681 | .ttm_tt_create = &vmw_ttm_tt_create, | 841 | .ttm_tt_create = &vmw_ttm_tt_create, |
| 682 | .ttm_tt_populate = &vmw_ttm_populate, | 842 | .ttm_tt_populate = &vmw_ttm_populate, |
| @@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
| 691 | .sync_obj_flush = vmw_sync_obj_flush, | 851 | .sync_obj_flush = vmw_sync_obj_flush, |
| 692 | .sync_obj_unref = vmw_sync_obj_unref, | 852 | .sync_obj_unref = vmw_sync_obj_unref, |
| 693 | .sync_obj_ref = vmw_sync_obj_ref, | 853 | .sync_obj_ref = vmw_sync_obj_ref, |
| 694 | .move_notify = NULL, | 854 | .move_notify = vmw_move_notify, |
| 695 | .swap_notify = NULL, | 855 | .swap_notify = vmw_swap_notify, |
| 696 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
| 697 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
| 698 | .io_mem_free = &vmw_ttm_io_mem_free, | 858 | .io_mem_free = &vmw_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 00ae0925aca8..82c41daebc0e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
| @@ -32,12 +32,28 @@ | |||
| 32 | struct vmw_user_context { | 32 | struct vmw_user_context { |
| 33 | struct ttm_base_object base; | 33 | struct ttm_base_object base; |
| 34 | struct vmw_resource res; | 34 | struct vmw_resource res; |
| 35 | struct vmw_ctx_binding_state cbs; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 38 | |||
| 39 | |||
| 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); | ||
| 41 | |||
| 37 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
| 38 | static struct vmw_resource * | 43 | static struct vmw_resource * |
| 39 | vmw_user_context_base_to_res(struct ttm_base_object *base); | 44 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
| 40 | 45 | ||
| 46 | static int vmw_gb_context_create(struct vmw_resource *res); | ||
| 47 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
| 48 | struct ttm_validate_buffer *val_buf); | ||
| 49 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
| 50 | bool readback, | ||
| 51 | struct ttm_validate_buffer *val_buf); | ||
| 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); | ||
| 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); | ||
| 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); | ||
| 55 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); | ||
| 56 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | ||
| 41 | static uint64_t vmw_user_context_size; | 57 | static uint64_t vmw_user_context_size; |
| 42 | 58 | ||
| 43 | static const struct vmw_user_resource_conv user_context_conv = { | 59 | static const struct vmw_user_resource_conv user_context_conv = { |
| @@ -62,6 +78,23 @@ static const struct vmw_res_func vmw_legacy_context_func = { | |||
| 62 | .unbind = NULL | 78 | .unbind = NULL |
| 63 | }; | 79 | }; |
| 64 | 80 | ||
| 81 | static const struct vmw_res_func vmw_gb_context_func = { | ||
| 82 | .res_type = vmw_res_context, | ||
| 83 | .needs_backup = true, | ||
| 84 | .may_evict = true, | ||
| 85 | .type_name = "guest backed contexts", | ||
| 86 | .backup_placement = &vmw_mob_placement, | ||
| 87 | .create = vmw_gb_context_create, | ||
| 88 | .destroy = vmw_gb_context_destroy, | ||
| 89 | .bind = vmw_gb_context_bind, | ||
| 90 | .unbind = vmw_gb_context_unbind | ||
| 91 | }; | ||
| 92 | |||
| 93 | static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { | ||
| 94 | [vmw_ctx_binding_shader] = vmw_context_scrub_shader, | ||
| 95 | [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, | ||
| 96 | [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; | ||
| 97 | |||
| 65 | /** | 98 | /** |
| 66 | * Context management: | 99 | * Context management: |
| 67 | */ | 100 | */ |
| @@ -76,6 +109,16 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 76 | } *cmd; | 109 | } *cmd; |
| 77 | 110 | ||
| 78 | 111 | ||
| 112 | if (res->func->destroy == vmw_gb_context_destroy) { | ||
| 113 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
| 114 | (void) vmw_gb_context_destroy(res); | ||
| 115 | if (dev_priv->pinned_bo != NULL && | ||
| 116 | !dev_priv->query_cid_valid) | ||
| 117 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
| 118 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 119 | return; | ||
| 120 | } | ||
| 121 | |||
| 79 | vmw_execbuf_release_pinned_bo(dev_priv); | 122 | vmw_execbuf_release_pinned_bo(dev_priv); |
| 80 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 123 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 81 | if (unlikely(cmd == NULL)) { | 124 | if (unlikely(cmd == NULL)) { |
| @@ -92,6 +135,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 92 | vmw_3d_resource_dec(dev_priv, false); | 135 | vmw_3d_resource_dec(dev_priv, false); |
| 93 | } | 136 | } |
| 94 | 137 | ||
| 138 | static int vmw_gb_context_init(struct vmw_private *dev_priv, | ||
| 139 | struct vmw_resource *res, | ||
| 140 | void (*res_free) (struct vmw_resource *res)) | ||
| 141 | { | ||
| 142 | int ret; | ||
| 143 | struct vmw_user_context *uctx = | ||
| 144 | container_of(res, struct vmw_user_context, res); | ||
| 145 | |||
| 146 | ret = vmw_resource_init(dev_priv, res, true, | ||
| 147 | res_free, &vmw_gb_context_func); | ||
| 148 | res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; | ||
| 149 | |||
| 150 | if (unlikely(ret != 0)) { | ||
| 151 | if (res_free) | ||
| 152 | res_free(res); | ||
| 153 | else | ||
| 154 | kfree(res); | ||
| 155 | return ret; | ||
| 156 | } | ||
| 157 | |||
| 158 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | ||
| 159 | INIT_LIST_HEAD(&uctx->cbs.list); | ||
| 160 | |||
| 161 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 95 | static int vmw_context_init(struct vmw_private *dev_priv, | 165 | static int vmw_context_init(struct vmw_private *dev_priv, |
| 96 | struct vmw_resource *res, | 166 | struct vmw_resource *res, |
| 97 | void (*res_free) (struct vmw_resource *res)) | 167 | void (*res_free) (struct vmw_resource *res)) |
| @@ -103,6 +173,9 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
| 103 | SVGA3dCmdDefineContext body; | 173 | SVGA3dCmdDefineContext body; |
| 104 | } *cmd; | 174 | } *cmd; |
| 105 | 175 | ||
| 176 | if (dev_priv->has_mob) | ||
| 177 | return vmw_gb_context_init(dev_priv, res, res_free); | ||
| 178 | |||
| 106 | ret = vmw_resource_init(dev_priv, res, false, | 179 | ret = vmw_resource_init(dev_priv, res, false, |
| 107 | res_free, &vmw_legacy_context_func); | 180 | res_free, &vmw_legacy_context_func); |
| 108 | 181 | ||
| @@ -154,6 +227,184 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | |||
| 154 | return (ret == 0) ? res : NULL; | 227 | return (ret == 0) ? res : NULL; |
| 155 | } | 228 | } |
| 156 | 229 | ||
| 230 | |||
| 231 | static int vmw_gb_context_create(struct vmw_resource *res) | ||
| 232 | { | ||
| 233 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 234 | int ret; | ||
| 235 | struct { | ||
| 236 | SVGA3dCmdHeader header; | ||
| 237 | SVGA3dCmdDefineGBContext body; | ||
| 238 | } *cmd; | ||
| 239 | |||
| 240 | if (likely(res->id != -1)) | ||
| 241 | return 0; | ||
| 242 | |||
| 243 | ret = vmw_resource_alloc_id(res); | ||
| 244 | if (unlikely(ret != 0)) { | ||
| 245 | DRM_ERROR("Failed to allocate a context id.\n"); | ||
| 246 | goto out_no_id; | ||
| 247 | } | ||
| 248 | |||
| 249 | if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { | ||
| 250 | ret = -EBUSY; | ||
| 251 | goto out_no_fifo; | ||
| 252 | } | ||
| 253 | |||
| 254 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 255 | if (unlikely(cmd == NULL)) { | ||
| 256 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 257 | "creation.\n"); | ||
| 258 | ret = -ENOMEM; | ||
| 259 | goto out_no_fifo; | ||
| 260 | } | ||
| 261 | |||
| 262 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; | ||
| 263 | cmd->header.size = sizeof(cmd->body); | ||
| 264 | cmd->body.cid = res->id; | ||
| 265 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 266 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | |||
| 270 | out_no_fifo: | ||
| 271 | vmw_resource_release_id(res); | ||
| 272 | out_no_id: | ||
| 273 | return ret; | ||
| 274 | } | ||
| 275 | |||
| 276 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
| 277 | struct ttm_validate_buffer *val_buf) | ||
| 278 | { | ||
| 279 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 280 | struct { | ||
| 281 | SVGA3dCmdHeader header; | ||
| 282 | SVGA3dCmdBindGBContext body; | ||
| 283 | } *cmd; | ||
| 284 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 285 | |||
| 286 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 287 | |||
| 288 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 289 | if (unlikely(cmd == NULL)) { | ||
| 290 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 291 | "binding.\n"); | ||
| 292 | return -ENOMEM; | ||
| 293 | } | ||
| 294 | |||
| 295 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
| 296 | cmd->header.size = sizeof(cmd->body); | ||
| 297 | cmd->body.cid = res->id; | ||
| 298 | cmd->body.mobid = bo->mem.start; | ||
| 299 | cmd->body.validContents = res->backup_dirty; | ||
| 300 | res->backup_dirty = false; | ||
| 301 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 302 | |||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 306 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
| 307 | bool readback, | ||
| 308 | struct ttm_validate_buffer *val_buf) | ||
| 309 | { | ||
| 310 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 311 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 312 | struct vmw_fence_obj *fence; | ||
| 313 | struct vmw_user_context *uctx = | ||
| 314 | container_of(res, struct vmw_user_context, res); | ||
| 315 | |||
| 316 | struct { | ||
| 317 | SVGA3dCmdHeader header; | ||
| 318 | SVGA3dCmdReadbackGBContext body; | ||
| 319 | } *cmd1; | ||
| 320 | struct { | ||
| 321 | SVGA3dCmdHeader header; | ||
| 322 | SVGA3dCmdBindGBContext body; | ||
| 323 | } *cmd2; | ||
| 324 | uint32_t submit_size; | ||
| 325 | uint8_t *cmd; | ||
| 326 | |||
| 327 | |||
| 328 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 329 | |||
| 330 | mutex_lock(&dev_priv->binding_mutex); | ||
| 331 | vmw_context_binding_state_kill(&uctx->cbs); | ||
| 332 | |||
| 333 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||
| 334 | |||
| 335 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 336 | if (unlikely(cmd == NULL)) { | ||
| 337 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 338 | "unbinding.\n"); | ||
| 339 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 340 | return -ENOMEM; | ||
| 341 | } | ||
| 342 | |||
| 343 | cmd2 = (void *) cmd; | ||
| 344 | if (readback) { | ||
| 345 | cmd1 = (void *) cmd; | ||
| 346 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; | ||
| 347 | cmd1->header.size = sizeof(cmd1->body); | ||
| 348 | cmd1->body.cid = res->id; | ||
| 349 | cmd2 = (void *) (&cmd1[1]); | ||
| 350 | } | ||
| 351 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
| 352 | cmd2->header.size = sizeof(cmd2->body); | ||
| 353 | cmd2->body.cid = res->id; | ||
| 354 | cmd2->body.mobid = SVGA3D_INVALID_ID; | ||
| 355 | |||
| 356 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 357 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Create a fence object and fence the backup buffer. | ||
| 361 | */ | ||
| 362 | |||
| 363 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 364 | &fence, NULL); | ||
| 365 | |||
| 366 | vmw_fence_single_bo(bo, fence); | ||
| 367 | |||
| 368 | if (likely(fence != NULL)) | ||
| 369 | vmw_fence_obj_unreference(&fence); | ||
| 370 | |||
| 371 | return 0; | ||
| 372 | } | ||
| 373 | |||
| 374 | static int vmw_gb_context_destroy(struct vmw_resource *res) | ||
| 375 | { | ||
| 376 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 377 | struct { | ||
| 378 | SVGA3dCmdHeader header; | ||
| 379 | SVGA3dCmdDestroyGBContext body; | ||
| 380 | } *cmd; | ||
| 381 | struct vmw_user_context *uctx = | ||
| 382 | container_of(res, struct vmw_user_context, res); | ||
| 383 | |||
| 384 | BUG_ON(!list_empty(&uctx->cbs.list)); | ||
| 385 | |||
| 386 | if (likely(res->id == -1)) | ||
| 387 | return 0; | ||
| 388 | |||
| 389 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 390 | if (unlikely(cmd == NULL)) { | ||
| 391 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 392 | "destruction.\n"); | ||
| 393 | return -ENOMEM; | ||
| 394 | } | ||
| 395 | |||
| 396 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; | ||
| 397 | cmd->header.size = sizeof(cmd->body); | ||
| 398 | cmd->body.cid = res->id; | ||
| 399 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 400 | if (dev_priv->query_cid == res->id) | ||
| 401 | dev_priv->query_cid_valid = false; | ||
| 402 | vmw_resource_release_id(res); | ||
| 403 | vmw_3d_resource_dec(dev_priv, false); | ||
| 404 | |||
| 405 | return 0; | ||
| 406 | } | ||
| 407 | |||
| 157 | /** | 408 | /** |
| 158 | * User-space context management: | 409 | * User-space context management: |
| 159 | */ | 410 | */ |
| @@ -272,3 +523,283 @@ out_unlock: | |||
| 272 | return ret; | 523 | return ret; |
| 273 | 524 | ||
| 274 | } | 525 | } |
| 526 | |||
| 527 | /** | ||
| 528 | * vmw_context_scrub_shader - scrub a shader binding from a context. | ||
| 529 | * | ||
| 530 | * @bi: single binding information. | ||
| 531 | */ | ||
| 532 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | ||
| 533 | { | ||
| 534 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 535 | struct { | ||
| 536 | SVGA3dCmdHeader header; | ||
| 537 | SVGA3dCmdSetShader body; | ||
| 538 | } *cmd; | ||
| 539 | |||
| 540 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 541 | if (unlikely(cmd == NULL)) { | ||
| 542 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 543 | "unbinding.\n"); | ||
| 544 | return -ENOMEM; | ||
| 545 | } | ||
| 546 | |||
| 547 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; | ||
| 548 | cmd->header.size = sizeof(cmd->body); | ||
| 549 | cmd->body.cid = bi->ctx->id; | ||
| 550 | cmd->body.type = bi->i1.shader_type; | ||
| 551 | cmd->body.shid = SVGA3D_INVALID_ID; | ||
| 552 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 553 | |||
| 554 | return 0; | ||
| 555 | } | ||
| 556 | |||
| 557 | /** | ||
| 558 | * vmw_context_scrub_render_target - scrub a render target binding | ||
| 559 | * from a context. | ||
| 560 | * | ||
| 561 | * @bi: single binding information. | ||
| 562 | */ | ||
| 563 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | ||
| 564 | { | ||
| 565 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 566 | struct { | ||
| 567 | SVGA3dCmdHeader header; | ||
| 568 | SVGA3dCmdSetRenderTarget body; | ||
| 569 | } *cmd; | ||
| 570 | |||
| 571 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 572 | if (unlikely(cmd == NULL)) { | ||
| 573 | DRM_ERROR("Failed reserving FIFO space for render target " | ||
| 574 | "unbinding.\n"); | ||
| 575 | return -ENOMEM; | ||
| 576 | } | ||
| 577 | |||
| 578 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; | ||
| 579 | cmd->header.size = sizeof(cmd->body); | ||
| 580 | cmd->body.cid = bi->ctx->id; | ||
| 581 | cmd->body.type = bi->i1.rt_type; | ||
| 582 | cmd->body.target.sid = SVGA3D_INVALID_ID; | ||
| 583 | cmd->body.target.face = 0; | ||
| 584 | cmd->body.target.mipmap = 0; | ||
| 585 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 586 | |||
| 587 | return 0; | ||
| 588 | } | ||
| 589 | |||
| 590 | /** | ||
| 591 | * vmw_context_scrub_texture - scrub a texture binding from a context. | ||
| 592 | * | ||
| 593 | * @bi: single binding information. | ||
| 594 | * | ||
| 595 | * TODO: Possibly complement this function with a function that takes | ||
| 596 | * a list of texture bindings and combines them to a single command. | ||
| 597 | */ | ||
| 598 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | ||
| 599 | { | ||
| 600 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 601 | struct { | ||
| 602 | SVGA3dCmdHeader header; | ||
| 603 | struct { | ||
| 604 | SVGA3dCmdSetTextureState c; | ||
| 605 | SVGA3dTextureState s1; | ||
| 606 | } body; | ||
| 607 | } *cmd; | ||
| 608 | |||
| 609 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 610 | if (unlikely(cmd == NULL)) { | ||
| 611 | DRM_ERROR("Failed reserving FIFO space for texture " | ||
| 612 | "unbinding.\n"); | ||
| 613 | return -ENOMEM; | ||
| 614 | } | ||
| 615 | |||
| 616 | |||
| 617 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; | ||
| 618 | cmd->header.size = sizeof(cmd->body); | ||
| 619 | cmd->body.c.cid = bi->ctx->id; | ||
| 620 | cmd->body.s1.stage = bi->i1.texture_stage; | ||
| 621 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | ||
| 622 | cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; | ||
| 623 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 624 | |||
| 625 | return 0; | ||
| 626 | } | ||
| 627 | |||
| 628 | /** | ||
| 629 | * vmw_context_binding_drop: Stop tracking a context binding | ||
| 630 | * | ||
| 631 | * @cb: Pointer to binding tracker storage. | ||
| 632 | * | ||
| 633 | * Stops tracking a context binding, and re-initializes its storage. | ||
| 634 | * Typically used when the context binding is replaced with a binding to | ||
| 635 | * another (or the same, for that matter) resource. | ||
| 636 | */ | ||
| 637 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) | ||
| 638 | { | ||
| 639 | list_del(&cb->ctx_list); | ||
| 640 | if (!list_empty(&cb->res_list)) | ||
| 641 | list_del(&cb->res_list); | ||
| 642 | cb->bi.ctx = NULL; | ||
| 643 | } | ||
| 644 | |||
| 645 | /** | ||
| 646 | * vmw_context_binding_add: Start tracking a context binding | ||
| 647 | * | ||
| 648 | * @cbs: Pointer to the context binding state tracker. | ||
| 649 | * @bi: Information about the binding to track. | ||
| 650 | * | ||
| 651 | * Performs basic checks on the binding to make sure arguments are within | ||
| 652 | * bounds and then starts tracking the binding in the context binding | ||
| 653 | * state structure @cbs. | ||
| 654 | */ | ||
| 655 | int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
| 656 | const struct vmw_ctx_bindinfo *bi) | ||
| 657 | { | ||
| 658 | struct vmw_ctx_binding *loc; | ||
| 659 | |||
| 660 | switch (bi->bt) { | ||
| 661 | case vmw_ctx_binding_rt: | ||
| 662 | if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { | ||
| 663 | DRM_ERROR("Illegal render target type %u.\n", | ||
| 664 | (unsigned) bi->i1.rt_type); | ||
| 665 | return -EINVAL; | ||
| 666 | } | ||
| 667 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
| 668 | break; | ||
| 669 | case vmw_ctx_binding_tex: | ||
| 670 | if (unlikely((unsigned)bi->i1.texture_stage >= | ||
| 671 | SVGA3D_NUM_TEXTURE_UNITS)) { | ||
| 672 | DRM_ERROR("Illegal texture/sampler unit %u.\n", | ||
| 673 | (unsigned) bi->i1.texture_stage); | ||
| 674 | return -EINVAL; | ||
| 675 | } | ||
| 676 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
| 677 | break; | ||
| 678 | case vmw_ctx_binding_shader: | ||
| 679 | if (unlikely((unsigned)bi->i1.shader_type >= | ||
| 680 | SVGA3D_SHADERTYPE_MAX)) { | ||
| 681 | DRM_ERROR("Illegal shader type %u.\n", | ||
| 682 | (unsigned) bi->i1.shader_type); | ||
| 683 | return -EINVAL; | ||
| 684 | } | ||
| 685 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
| 686 | break; | ||
| 687 | default: | ||
| 688 | BUG(); | ||
| 689 | } | ||
| 690 | |||
| 691 | if (loc->bi.ctx != NULL) | ||
| 692 | vmw_context_binding_drop(loc); | ||
| 693 | |||
| 694 | loc->bi = *bi; | ||
| 695 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
| 696 | INIT_LIST_HEAD(&loc->res_list); | ||
| 697 | |||
| 698 | return 0; | ||
| 699 | } | ||
| 700 | |||
| 701 | /** | ||
| 702 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. | ||
| 703 | * | ||
| 704 | * @cbs: Pointer to the persistent context binding state tracker. | ||
| 705 | * @bi: Information about the binding to track. | ||
| 706 | * | ||
| 707 | */ | ||
| 708 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
| 709 | const struct vmw_ctx_bindinfo *bi) | ||
| 710 | { | ||
| 711 | struct vmw_ctx_binding *loc; | ||
| 712 | |||
| 713 | switch (bi->bt) { | ||
| 714 | case vmw_ctx_binding_rt: | ||
| 715 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
| 716 | break; | ||
| 717 | case vmw_ctx_binding_tex: | ||
| 718 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
| 719 | break; | ||
| 720 | case vmw_ctx_binding_shader: | ||
| 721 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
| 722 | break; | ||
| 723 | default: | ||
| 724 | BUG(); | ||
| 725 | } | ||
| 726 | |||
| 727 | if (loc->bi.ctx != NULL) | ||
| 728 | vmw_context_binding_drop(loc); | ||
| 729 | |||
| 730 | loc->bi = *bi; | ||
| 731 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
| 732 | if (bi->res != NULL) | ||
| 733 | list_add_tail(&loc->res_list, &bi->res->binding_head); | ||
| 734 | else | ||
| 735 | INIT_LIST_HEAD(&loc->res_list); | ||
| 736 | } | ||
| 737 | |||
| 738 | /** | ||
| 739 | * vmw_context_binding_kill - Kill a binding on the device | ||
| 740 | * and stop tracking it. | ||
| 741 | * | ||
| 742 | * @cb: Pointer to binding tracker storage. | ||
| 743 | * | ||
| 744 | * Emits FIFO commands to scrub a binding represented by @cb. | ||
| 745 | * Then stops tracking the binding and re-initializes its storage. | ||
| 746 | */ | ||
| 747 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | ||
| 748 | { | ||
| 749 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); | ||
| 750 | vmw_context_binding_drop(cb); | ||
| 751 | } | ||
| 752 | |||
| 753 | /** | ||
| 754 | * vmw_context_binding_state_kill - Kill all bindings associated with a | ||
| 755 | * struct vmw_ctx_binding state structure, and re-initialize the structure. | ||
| 756 | * | ||
| 757 | * @cbs: Pointer to the context binding state tracker. | ||
| 758 | * | ||
| 759 | * Emits commands to scrub all bindings associated with the | ||
| 760 | * context binding state tracker. Then re-initializes the whole structure. | ||
| 761 | */ | ||
| 762 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | ||
| 763 | { | ||
| 764 | struct vmw_ctx_binding *entry, *next; | ||
| 765 | |||
| 766 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
| 767 | vmw_context_binding_kill(entry); | ||
| 768 | } | ||
| 769 | |||
| 770 | /** | ||
| 771 | * vmw_context_binding_res_list_kill - Kill all bindings on a | ||
| 772 | * resource binding list | ||
| 773 | * | ||
| 774 | * @head: list head of resource binding list | ||
| 775 | * | ||
| 776 | * Kills all bindings associated with a specific resource. Typically | ||
| 777 | * called before the resource is destroyed. | ||
| 778 | */ | ||
| 779 | void vmw_context_binding_res_list_kill(struct list_head *head) | ||
| 780 | { | ||
| 781 | struct vmw_ctx_binding *entry, *next; | ||
| 782 | |||
| 783 | list_for_each_entry_safe(entry, next, head, res_list) | ||
| 784 | vmw_context_binding_kill(entry); | ||
| 785 | } | ||
| 786 | |||
| 787 | /** | ||
| 788 | * vmw_context_binding_state_transfer - Commit staged binding info | ||
| 789 | * | ||
| 790 | * @ctx: Pointer to context to commit the staged binding info to. | ||
| 791 | * @from: Staged binding info built during execbuf. | ||
| 792 | * | ||
| 793 | * Transfers binding info from a temporary structure to the persistent | ||
| 794 | * structure in the context. This can be done once commands | ||
| 795 | */ | ||
| 796 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | ||
| 797 | struct vmw_ctx_binding_state *from) | ||
| 798 | { | ||
| 799 | struct vmw_user_context *uctx = | ||
| 800 | container_of(ctx, struct vmw_user_context, res); | ||
| 801 | struct vmw_ctx_binding *entry, *next; | ||
| 802 | |||
| 803 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | ||
| 804 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | ||
| 805 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index d4e54fcc0acd..a75840211b3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
| @@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, | |||
| 290 | /** | 290 | /** |
| 291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. | 291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. |
| 292 | * | 292 | * |
| 293 | * @bo: The buffer object. Must be reserved, and present either in VRAM | 293 | * @bo: The buffer object. Must be reserved. |
| 294 | * or GMR memory. | ||
| 295 | * @pin: Whether to pin or unpin. | 294 | * @pin: Whether to pin or unpin. |
| 296 | * | 295 | * |
| 297 | */ | 296 | */ |
| @@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) | |||
| 303 | int ret; | 302 | int ret; |
| 304 | 303 | ||
| 305 | lockdep_assert_held(&bo->resv->lock.base); | 304 | lockdep_assert_held(&bo->resv->lock.base); |
| 306 | BUG_ON(old_mem_type != TTM_PL_VRAM && | ||
| 307 | old_mem_type != VMW_PL_GMR); | ||
| 308 | 305 | ||
| 309 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; | 306 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB |
| 307 | | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; | ||
| 310 | if (pin) | 308 | if (pin) |
| 311 | pl_flags |= TTM_PL_FLAG_NO_EVICT; | 309 | pl_flags |= TTM_PL_FLAG_NO_EVICT; |
| 312 | 310 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c7a549694e59..9893328f8fdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -112,6 +112,21 @@ | |||
| 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
| 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
| 114 | struct drm_vmw_update_layout_arg) | 114 | struct drm_vmw_update_layout_arg) |
| 115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ | ||
| 116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ | ||
| 117 | struct drm_vmw_shader_create_arg) | ||
| 118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ | ||
| 119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ | ||
| 120 | struct drm_vmw_shader_arg) | ||
| 121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ | ||
| 122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | ||
| 123 | union drm_vmw_gb_surface_create_arg) | ||
| 124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | ||
| 125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | ||
| 126 | union drm_vmw_gb_surface_reference_arg) | ||
| 127 | #define DRM_IOCTL_VMW_SYNCCPU \ | ||
| 128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | ||
| 129 | struct drm_vmw_synccpu_arg) | ||
| 115 | 130 | ||
| 116 | /** | 131 | /** |
| 117 | * The core DRM version of this macro doesn't account for | 132 | * The core DRM version of this macro doesn't account for |
| @@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
| 177 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | 192 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
| 178 | vmw_kms_update_layout_ioctl, | 193 | vmw_kms_update_layout_ioctl, |
| 179 | DRM_MASTER | DRM_UNLOCKED), | 194 | DRM_MASTER | DRM_UNLOCKED), |
| 195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, | ||
| 196 | vmw_shader_define_ioctl, | ||
| 197 | DRM_AUTH | DRM_UNLOCKED), | ||
| 198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, | ||
| 199 | vmw_shader_destroy_ioctl, | ||
| 200 | DRM_AUTH | DRM_UNLOCKED), | ||
| 201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | ||
| 202 | vmw_gb_surface_define_ioctl, | ||
| 203 | DRM_AUTH | DRM_UNLOCKED), | ||
| 204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | ||
| 205 | vmw_gb_surface_reference_ioctl, | ||
| 206 | DRM_AUTH | DRM_UNLOCKED), | ||
| 207 | VMW_IOCTL_DEF(VMW_SYNCCPU, | ||
| 208 | vmw_user_dmabuf_synccpu_ioctl, | ||
| 209 | DRM_AUTH | DRM_UNLOCKED), | ||
| 180 | }; | 210 | }; |
| 181 | 211 | ||
| 182 | static struct pci_device_id vmw_pci_id_list[] = { | 212 | static struct pci_device_id vmw_pci_id_list[] = { |
| @@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); | |||
| 189 | static int vmw_force_iommu; | 219 | static int vmw_force_iommu; |
| 190 | static int vmw_restrict_iommu; | 220 | static int vmw_restrict_iommu; |
| 191 | static int vmw_force_coherent; | 221 | static int vmw_force_coherent; |
| 222 | static int vmw_restrict_dma_mask; | ||
| 192 | 223 | ||
| 193 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 224 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
| 194 | static void vmw_master_init(struct vmw_master *); | 225 | static void vmw_master_init(struct vmw_master *); |
| @@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | |||
| 203 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 234 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
| 204 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 235 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
| 205 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 236 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
| 237 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | ||
| 238 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | ||
| 206 | 239 | ||
| 207 | 240 | ||
| 208 | static void vmw_print_capabilities(uint32_t capabilities) | 241 | static void vmw_print_capabilities(uint32_t capabilities) |
| @@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
| 240 | DRM_INFO(" GMR2.\n"); | 273 | DRM_INFO(" GMR2.\n"); |
| 241 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | 274 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
| 242 | DRM_INFO(" Screen Object 2.\n"); | 275 | DRM_INFO(" Screen Object 2.\n"); |
| 276 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) | ||
| 277 | DRM_INFO(" Command Buffers.\n"); | ||
| 278 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) | ||
| 279 | DRM_INFO(" Command Buffers 2.\n"); | ||
| 280 | if (capabilities & SVGA_CAP_GBOBJECTS) | ||
| 281 | DRM_INFO(" Guest Backed Resources.\n"); | ||
| 243 | } | 282 | } |
| 244 | 283 | ||
| 245 | |||
| 246 | /** | 284 | /** |
| 247 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | 285 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
| 248 | * the start of a buffer object. | ||
| 249 | * | 286 | * |
| 250 | * @dev_priv: The device private structure. | 287 | * @dev_priv: A device private structure. |
| 251 | * | 288 | * |
| 252 | * This function will idle the buffer using an uninterruptible wait, then | 289 | * This function creates a small buffer object that holds the query |
| 253 | * map the first page and initialize a pending occlusion query result structure, | 290 | * result for dummy queries emitted as query barriers. |
| 254 | * Finally it will unmap the buffer. | 291 | * The function will then map the first page and initialize a pending |
| 292 | * occlusion query result structure, Finally it will unmap the buffer. | ||
| 293 | * No interruptible waits are done within this function. | ||
| 255 | * | 294 | * |
| 256 | * TODO: Since we're only mapping a single page, we should optimize the map | 295 | * Returns an error if bo creation or initialization fails. |
| 257 | * to use kmap_atomic / iomap_atomic. | ||
| 258 | */ | 296 | */ |
| 259 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | 297 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
| 260 | { | 298 | { |
| 299 | int ret; | ||
| 300 | struct ttm_buffer_object *bo; | ||
| 261 | struct ttm_bo_kmap_obj map; | 301 | struct ttm_bo_kmap_obj map; |
| 262 | volatile SVGA3dQueryResult *result; | 302 | volatile SVGA3dQueryResult *result; |
| 263 | bool dummy; | 303 | bool dummy; |
| 264 | int ret; | ||
| 265 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
| 266 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
| 267 | 304 | ||
| 268 | ttm_bo_reserve(bo, false, false, false, 0); | 305 | /* |
| 269 | spin_lock(&bdev->fence_lock); | 306 | * Create the bo as pinned, so that a tryreserve will |
| 270 | ret = ttm_bo_wait(bo, false, false, false); | 307 | * immediately succeed. This is because we're the only |
| 271 | spin_unlock(&bdev->fence_lock); | 308 | * user of the bo currently. |
| 309 | */ | ||
| 310 | ret = ttm_bo_create(&dev_priv->bdev, | ||
| 311 | PAGE_SIZE, | ||
| 312 | ttm_bo_type_device, | ||
| 313 | &vmw_sys_ne_placement, | ||
| 314 | 0, false, NULL, | ||
| 315 | &bo); | ||
| 316 | |||
| 272 | if (unlikely(ret != 0)) | 317 | if (unlikely(ret != 0)) |
| 273 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | 318 | return ret; |
| 274 | 10*HZ); | 319 | |
| 320 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
| 321 | BUG_ON(ret != 0); | ||
| 275 | 322 | ||
| 276 | ret = ttm_bo_kmap(bo, 0, 1, &map); | 323 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
| 277 | if (likely(ret == 0)) { | 324 | if (likely(ret == 0)) { |
| @@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | |||
| 280 | result->state = SVGA3D_QUERYSTATE_PENDING; | 327 | result->state = SVGA3D_QUERYSTATE_PENDING; |
| 281 | result->result32 = 0xff; | 328 | result->result32 = 0xff; |
| 282 | ttm_bo_kunmap(&map); | 329 | ttm_bo_kunmap(&map); |
| 283 | } else | 330 | } |
| 284 | DRM_ERROR("Dummy query buffer map failed.\n"); | 331 | vmw_bo_pin(bo, false); |
| 285 | ttm_bo_unreserve(bo); | 332 | ttm_bo_unreserve(bo); |
| 286 | } | ||
| 287 | 333 | ||
| 334 | if (unlikely(ret != 0)) { | ||
| 335 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
| 336 | ttm_bo_unref(&bo); | ||
| 337 | } else | ||
| 338 | dev_priv->dummy_query_bo = bo; | ||
| 288 | 339 | ||
| 289 | /** | 340 | return ret; |
| 290 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
| 291 | * | ||
| 292 | * @dev_priv: A device private structure. | ||
| 293 | * | ||
| 294 | * This function creates a small buffer object that holds the query | ||
| 295 | * result for dummy queries emitted as query barriers. | ||
| 296 | * No interruptible waits are done within this function. | ||
| 297 | * | ||
| 298 | * Returns an error if bo creation fails. | ||
| 299 | */ | ||
| 300 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
| 301 | { | ||
| 302 | return ttm_bo_create(&dev_priv->bdev, | ||
| 303 | PAGE_SIZE, | ||
| 304 | ttm_bo_type_device, | ||
| 305 | &vmw_vram_sys_placement, | ||
| 306 | 0, false, NULL, | ||
| 307 | &dev_priv->dummy_query_bo); | ||
| 308 | } | 341 | } |
| 309 | 342 | ||
| 310 | |||
| 311 | static int vmw_request_device(struct vmw_private *dev_priv) | 343 | static int vmw_request_device(struct vmw_private *dev_priv) |
| 312 | { | 344 | { |
| 313 | int ret; | 345 | int ret; |
| @@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
| 318 | return ret; | 350 | return ret; |
| 319 | } | 351 | } |
| 320 | vmw_fence_fifo_up(dev_priv->fman); | 352 | vmw_fence_fifo_up(dev_priv->fman); |
| 353 | if (dev_priv->has_mob) { | ||
| 354 | ret = vmw_otables_setup(dev_priv); | ||
| 355 | if (unlikely(ret != 0)) { | ||
| 356 | DRM_ERROR("Unable to initialize " | ||
| 357 | "guest Memory OBjects.\n"); | ||
| 358 | goto out_no_mob; | ||
| 359 | } | ||
| 360 | } | ||
| 321 | ret = vmw_dummy_query_bo_create(dev_priv); | 361 | ret = vmw_dummy_query_bo_create(dev_priv); |
| 322 | if (unlikely(ret != 0)) | 362 | if (unlikely(ret != 0)) |
| 323 | goto out_no_query_bo; | 363 | goto out_no_query_bo; |
| 324 | vmw_dummy_query_bo_prepare(dev_priv); | ||
| 325 | 364 | ||
| 326 | return 0; | 365 | return 0; |
| 327 | 366 | ||
| 328 | out_no_query_bo: | 367 | out_no_query_bo: |
| 368 | if (dev_priv->has_mob) | ||
| 369 | vmw_otables_takedown(dev_priv); | ||
| 370 | out_no_mob: | ||
| 329 | vmw_fence_fifo_down(dev_priv->fman); | 371 | vmw_fence_fifo_down(dev_priv->fman); |
| 330 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 372 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 331 | return ret; | 373 | return ret; |
| @@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv) | |||
| 341 | BUG_ON(dev_priv->pinned_bo != NULL); | 383 | BUG_ON(dev_priv->pinned_bo != NULL); |
| 342 | 384 | ||
| 343 | ttm_bo_unref(&dev_priv->dummy_query_bo); | 385 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
| 386 | if (dev_priv->has_mob) | ||
| 387 | vmw_otables_takedown(dev_priv); | ||
| 344 | vmw_fence_fifo_down(dev_priv->fman); | 388 | vmw_fence_fifo_down(dev_priv->fman); |
| 345 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 389 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 346 | } | 390 | } |
| 347 | 391 | ||
| 392 | |||
| 348 | /** | 393 | /** |
| 349 | * Increase the 3d resource refcount. | 394 | * Increase the 3d resource refcount. |
| 350 | * If the count was prevously zero, initialize the fifo, switching to svga | 395 | * If the count was prevously zero, initialize the fifo, switching to svga |
| @@ -510,6 +555,33 @@ out_fixup: | |||
| 510 | return 0; | 555 | return 0; |
| 511 | } | 556 | } |
| 512 | 557 | ||
| 558 | /** | ||
| 559 | * vmw_dma_masks - set required page- and dma masks | ||
| 560 | * | ||
| 561 | * @dev: Pointer to struct drm-device | ||
| 562 | * | ||
| 563 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | ||
| 564 | * restriction also for 64-bit systems. | ||
| 565 | */ | ||
| 566 | #ifdef CONFIG_INTEL_IOMMU | ||
| 567 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 568 | { | ||
| 569 | struct drm_device *dev = dev_priv->dev; | ||
| 570 | |||
| 571 | if (intel_iommu_enabled && | ||
| 572 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | ||
| 573 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | ||
| 574 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | ||
| 575 | } | ||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | #else | ||
| 579 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 580 | { | ||
| 581 | return 0; | ||
| 582 | } | ||
| 583 | #endif | ||
| 584 | |||
| 513 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 585 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 514 | { | 586 | { |
| 515 | struct vmw_private *dev_priv; | 587 | struct vmw_private *dev_priv; |
| @@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 532 | mutex_init(&dev_priv->hw_mutex); | 604 | mutex_init(&dev_priv->hw_mutex); |
| 533 | mutex_init(&dev_priv->cmdbuf_mutex); | 605 | mutex_init(&dev_priv->cmdbuf_mutex); |
| 534 | mutex_init(&dev_priv->release_mutex); | 606 | mutex_init(&dev_priv->release_mutex); |
| 607 | mutex_init(&dev_priv->binding_mutex); | ||
| 535 | rwlock_init(&dev_priv->resource_lock); | 608 | rwlock_init(&dev_priv->resource_lock); |
| 536 | 609 | ||
| 537 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| @@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 578 | 651 | ||
| 579 | vmw_get_initial_size(dev_priv); | 652 | vmw_get_initial_size(dev_priv); |
| 580 | 653 | ||
| 581 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 654 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 582 | dev_priv->max_gmr_descriptors = | ||
| 583 | vmw_read(dev_priv, | ||
| 584 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
| 585 | dev_priv->max_gmr_ids = | 655 | dev_priv->max_gmr_ids = |
| 586 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | 656 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
| 587 | } | ||
| 588 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
| 589 | dev_priv->max_gmr_pages = | 657 | dev_priv->max_gmr_pages = |
| 590 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | 658 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
| 591 | dev_priv->memory_size = | 659 | dev_priv->memory_size = |
| @@ -598,23 +666,42 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 598 | */ | 666 | */ |
| 599 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
| 600 | } | 668 | } |
| 669 | dev_priv->max_mob_pages = 0; | ||
| 670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 671 | uint64_t mem_size = | ||
| 672 | vmw_read(dev_priv, | ||
| 673 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); | ||
| 674 | |||
| 675 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; | ||
| 676 | dev_priv->prim_bb_mem = | ||
| 677 | vmw_read(dev_priv, | ||
| 678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | ||
| 679 | } else | ||
| 680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
| 681 | |||
| 682 | ret = vmw_dma_masks(dev_priv); | ||
| 683 | if (unlikely(ret != 0)) { | ||
| 684 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 685 | goto out_err0; | ||
| 686 | } | ||
| 687 | |||
| 688 | if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) | ||
| 689 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
| 601 | 690 | ||
| 602 | mutex_unlock(&dev_priv->hw_mutex); | 691 | mutex_unlock(&dev_priv->hw_mutex); |
| 603 | 692 | ||
| 604 | vmw_print_capabilities(dev_priv->capabilities); | 693 | vmw_print_capabilities(dev_priv->capabilities); |
| 605 | 694 | ||
| 606 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 695 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 607 | DRM_INFO("Max GMR ids is %u\n", | 696 | DRM_INFO("Max GMR ids is %u\n", |
| 608 | (unsigned)dev_priv->max_gmr_ids); | 697 | (unsigned)dev_priv->max_gmr_ids); |
| 609 | DRM_INFO("Max GMR descriptors is %u\n", | ||
| 610 | (unsigned)dev_priv->max_gmr_descriptors); | ||
| 611 | } | ||
| 612 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
| 613 | DRM_INFO("Max number of GMR pages is %u\n", | 698 | DRM_INFO("Max number of GMR pages is %u\n", |
| 614 | (unsigned)dev_priv->max_gmr_pages); | 699 | (unsigned)dev_priv->max_gmr_pages); |
| 615 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", | 700 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
| 616 | (unsigned)dev_priv->memory_size / 1024); | 701 | (unsigned)dev_priv->memory_size / 1024); |
| 617 | } | 702 | } |
| 703 | DRM_INFO("Maximum display memory size is %u kiB\n", | ||
| 704 | dev_priv->prim_bb_mem / 1024); | ||
| 618 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 705 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
| 619 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 706 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
| 620 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 707 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
| @@ -649,12 +736,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 649 | dev_priv->has_gmr = true; | 736 | dev_priv->has_gmr = true; |
| 650 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | 737 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
| 651 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | 738 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
| 652 | dev_priv->max_gmr_ids) != 0) { | 739 | VMW_PL_GMR) != 0) { |
| 653 | DRM_INFO("No GMR memory available. " | 740 | DRM_INFO("No GMR memory available. " |
| 654 | "Graphics memory resources are very limited.\n"); | 741 | "Graphics memory resources are very limited.\n"); |
| 655 | dev_priv->has_gmr = false; | 742 | dev_priv->has_gmr = false; |
| 656 | } | 743 | } |
| 657 | 744 | ||
| 745 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 746 | dev_priv->has_mob = true; | ||
| 747 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
| 748 | VMW_PL_MOB) != 0) { | ||
| 749 | DRM_INFO("No MOB memory available. " | ||
| 750 | "3D will be disabled.\n"); | ||
| 751 | dev_priv->has_mob = false; | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 658 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 755 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
| 659 | dev_priv->mmio_size); | 756 | dev_priv->mmio_size); |
| 660 | 757 | ||
| @@ -757,6 +854,8 @@ out_err4: | |||
| 757 | iounmap(dev_priv->mmio_virt); | 854 | iounmap(dev_priv->mmio_virt); |
| 758 | out_err3: | 855 | out_err3: |
| 759 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 856 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 857 | if (dev_priv->has_mob) | ||
| 858 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 760 | if (dev_priv->has_gmr) | 859 | if (dev_priv->has_gmr) |
| 761 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 860 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 762 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 861 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| @@ -801,6 +900,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 801 | ttm_object_device_release(&dev_priv->tdev); | 900 | ttm_object_device_release(&dev_priv->tdev); |
| 802 | iounmap(dev_priv->mmio_virt); | 901 | iounmap(dev_priv->mmio_virt); |
| 803 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 902 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 903 | if (dev_priv->has_mob) | ||
| 904 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 804 | if (dev_priv->has_gmr) | 905 | if (dev_priv->has_gmr) |
| 805 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 906 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 806 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 907 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 20890ad8408b..554e7fa33082 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -40,9 +40,9 @@ | |||
| 40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
| 41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
| 42 | 42 | ||
| 43 | #define VMWGFX_DRIVER_DATE "20120209" | 43 | #define VMWGFX_DRIVER_DATE "20121114" |
| 44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
| 45 | #define VMWGFX_DRIVER_MINOR 4 | 45 | #define VMWGFX_DRIVER_MINOR 5 |
| 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| @@ -50,14 +50,30 @@ | |||
| 50 | #define VMWGFX_MAX_VALIDATIONS 2048 | 50 | #define VMWGFX_MAX_VALIDATIONS 2048 |
| 51 | #define VMWGFX_MAX_DISPLAYS 16 | 51 | #define VMWGFX_MAX_DISPLAYS 16 |
| 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
| 53 | #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Perhaps we should have sysfs entries for these. | ||
| 57 | */ | ||
| 58 | #define VMWGFX_NUM_GB_CONTEXT 256 | ||
| 59 | #define VMWGFX_NUM_GB_SHADER 20000 | ||
| 60 | #define VMWGFX_NUM_GB_SURFACE 32768 | ||
| 61 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | ||
| 62 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | ||
| 63 | VMWGFX_NUM_GB_SHADER +\ | ||
| 64 | VMWGFX_NUM_GB_SURFACE +\ | ||
| 65 | VMWGFX_NUM_GB_SCREEN_TARGET) | ||
| 53 | 66 | ||
| 54 | #define VMW_PL_GMR TTM_PL_PRIV0 | 67 | #define VMW_PL_GMR TTM_PL_PRIV0 |
| 55 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 68 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
| 69 | #define VMW_PL_MOB TTM_PL_PRIV1 | ||
| 70 | #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 | ||
| 56 | 71 | ||
| 57 | #define VMW_RES_CONTEXT ttm_driver_type0 | 72 | #define VMW_RES_CONTEXT ttm_driver_type0 |
| 58 | #define VMW_RES_SURFACE ttm_driver_type1 | 73 | #define VMW_RES_SURFACE ttm_driver_type1 |
| 59 | #define VMW_RES_STREAM ttm_driver_type2 | 74 | #define VMW_RES_STREAM ttm_driver_type2 |
| 60 | #define VMW_RES_FENCE ttm_driver_type3 | 75 | #define VMW_RES_FENCE ttm_driver_type3 |
| 76 | #define VMW_RES_SHADER ttm_driver_type4 | ||
| 61 | 77 | ||
| 62 | struct vmw_fpriv { | 78 | struct vmw_fpriv { |
| 63 | struct drm_master *locked_master; | 79 | struct drm_master *locked_master; |
| @@ -82,6 +98,7 @@ struct vmw_dma_buffer { | |||
| 82 | struct vmw_validate_buffer { | 98 | struct vmw_validate_buffer { |
| 83 | struct ttm_validate_buffer base; | 99 | struct ttm_validate_buffer base; |
| 84 | struct drm_hash_item hash; | 100 | struct drm_hash_item hash; |
| 101 | bool validate_as_mob; | ||
| 85 | }; | 102 | }; |
| 86 | 103 | ||
| 87 | struct vmw_res_func; | 104 | struct vmw_res_func; |
| @@ -98,6 +115,7 @@ struct vmw_resource { | |||
| 98 | const struct vmw_res_func *func; | 115 | const struct vmw_res_func *func; |
| 99 | struct list_head lru_head; /* Protected by the resource lock */ | 116 | struct list_head lru_head; /* Protected by the resource lock */ |
| 100 | struct list_head mob_head; /* Protected by @backup reserved */ | 117 | struct list_head mob_head; /* Protected by @backup reserved */ |
| 118 | struct list_head binding_head; /* Protected by binding_mutex */ | ||
| 101 | void (*res_free) (struct vmw_resource *res); | 119 | void (*res_free) (struct vmw_resource *res); |
| 102 | void (*hw_destroy) (struct vmw_resource *res); | 120 | void (*hw_destroy) (struct vmw_resource *res); |
| 103 | }; | 121 | }; |
| @@ -106,6 +124,7 @@ enum vmw_res_type { | |||
| 106 | vmw_res_context, | 124 | vmw_res_context, |
| 107 | vmw_res_surface, | 125 | vmw_res_surface, |
| 108 | vmw_res_stream, | 126 | vmw_res_stream, |
| 127 | vmw_res_shader, | ||
| 109 | vmw_res_max | 128 | vmw_res_max |
| 110 | }; | 129 | }; |
| 111 | 130 | ||
| @@ -154,6 +173,7 @@ struct vmw_fifo_state { | |||
| 154 | }; | 173 | }; |
| 155 | 174 | ||
| 156 | struct vmw_relocation { | 175 | struct vmw_relocation { |
| 176 | SVGAMobId *mob_loc; | ||
| 157 | SVGAGuestPtr *location; | 177 | SVGAGuestPtr *location; |
| 158 | uint32_t index; | 178 | uint32_t index; |
| 159 | }; | 179 | }; |
| @@ -229,6 +249,71 @@ struct vmw_piter { | |||
| 229 | struct page *(*page)(struct vmw_piter *); | 249 | struct page *(*page)(struct vmw_piter *); |
| 230 | }; | 250 | }; |
| 231 | 251 | ||
| 252 | /* | ||
| 253 | * enum vmw_ctx_binding_type - abstract resource to context binding types | ||
| 254 | */ | ||
| 255 | enum vmw_ctx_binding_type { | ||
| 256 | vmw_ctx_binding_shader, | ||
| 257 | vmw_ctx_binding_rt, | ||
| 258 | vmw_ctx_binding_tex, | ||
| 259 | vmw_ctx_binding_max | ||
| 260 | }; | ||
| 261 | |||
| 262 | /** | ||
| 263 | * struct vmw_ctx_bindinfo - structure representing a single context binding | ||
| 264 | * | ||
| 265 | * @ctx: Pointer to the context structure. NULL means the binding is not | ||
| 266 | * active. | ||
| 267 | * @res: Non ref-counted pointer to the bound resource. | ||
| 268 | * @bt: The binding type. | ||
| 269 | * @i1: Union of information needed to unbind. | ||
| 270 | */ | ||
| 271 | struct vmw_ctx_bindinfo { | ||
| 272 | struct vmw_resource *ctx; | ||
| 273 | struct vmw_resource *res; | ||
| 274 | enum vmw_ctx_binding_type bt; | ||
| 275 | union { | ||
| 276 | SVGA3dShaderType shader_type; | ||
| 277 | SVGA3dRenderTargetType rt_type; | ||
| 278 | uint32 texture_stage; | ||
| 279 | } i1; | ||
| 280 | }; | ||
| 281 | |||
| 282 | /** | ||
| 283 | * struct vmw_ctx_binding - structure representing a single context binding | ||
| 284 | * - suitable for tracking in a context | ||
| 285 | * | ||
| 286 | * @ctx_list: List head for context. | ||
| 287 | * @res_list: List head for bound resource. | ||
| 288 | * @bi: Binding info | ||
| 289 | */ | ||
| 290 | struct vmw_ctx_binding { | ||
| 291 | struct list_head ctx_list; | ||
| 292 | struct list_head res_list; | ||
| 293 | struct vmw_ctx_bindinfo bi; | ||
| 294 | }; | ||
| 295 | |||
| 296 | |||
| 297 | /** | ||
| 298 | * struct vmw_ctx_binding_state - context binding state | ||
| 299 | * | ||
| 300 | * @list: linked list of individual bindings. | ||
| 301 | * @render_targets: Render target bindings. | ||
| 302 | * @texture_units: Texture units/samplers bindings. | ||
| 303 | * @shaders: Shader bindings. | ||
| 304 | * | ||
| 305 | * Note that this structure also provides storage space for the individual | ||
| 306 | * struct vmw_ctx_binding objects, so that no dynamic allocation is needed | ||
| 307 | * for individual bindings. | ||
| 308 | * | ||
| 309 | */ | ||
| 310 | struct vmw_ctx_binding_state { | ||
| 311 | struct list_head list; | ||
| 312 | struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; | ||
| 313 | struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; | ||
| 314 | struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; | ||
| 315 | }; | ||
| 316 | |||
| 232 | struct vmw_sw_context{ | 317 | struct vmw_sw_context{ |
| 233 | struct drm_open_hash res_ht; | 318 | struct drm_open_hash res_ht; |
| 234 | bool res_ht_initialized; | 319 | bool res_ht_initialized; |
| @@ -250,6 +335,7 @@ struct vmw_sw_context{ | |||
| 250 | struct vmw_resource *last_query_ctx; | 335 | struct vmw_resource *last_query_ctx; |
| 251 | bool needs_post_query_barrier; | 336 | bool needs_post_query_barrier; |
| 252 | struct vmw_resource *error_resource; | 337 | struct vmw_resource *error_resource; |
| 338 | struct vmw_ctx_binding_state staged_bindings; | ||
| 253 | }; | 339 | }; |
| 254 | 340 | ||
| 255 | struct vmw_legacy_display; | 341 | struct vmw_legacy_display; |
| @@ -281,6 +367,7 @@ struct vmw_private { | |||
| 281 | unsigned int io_start; | 367 | unsigned int io_start; |
| 282 | uint32_t vram_start; | 368 | uint32_t vram_start; |
| 283 | uint32_t vram_size; | 369 | uint32_t vram_size; |
| 370 | uint32_t prim_bb_mem; | ||
| 284 | uint32_t mmio_start; | 371 | uint32_t mmio_start; |
| 285 | uint32_t mmio_size; | 372 | uint32_t mmio_size; |
| 286 | uint32_t fb_max_width; | 373 | uint32_t fb_max_width; |
| @@ -290,11 +377,12 @@ struct vmw_private { | |||
| 290 | __le32 __iomem *mmio_virt; | 377 | __le32 __iomem *mmio_virt; |
| 291 | int mmio_mtrr; | 378 | int mmio_mtrr; |
| 292 | uint32_t capabilities; | 379 | uint32_t capabilities; |
| 293 | uint32_t max_gmr_descriptors; | ||
| 294 | uint32_t max_gmr_ids; | 380 | uint32_t max_gmr_ids; |
| 295 | uint32_t max_gmr_pages; | 381 | uint32_t max_gmr_pages; |
| 382 | uint32_t max_mob_pages; | ||
| 296 | uint32_t memory_size; | 383 | uint32_t memory_size; |
| 297 | bool has_gmr; | 384 | bool has_gmr; |
| 385 | bool has_mob; | ||
| 298 | struct mutex hw_mutex; | 386 | struct mutex hw_mutex; |
| 299 | 387 | ||
| 300 | /* | 388 | /* |
| @@ -370,6 +458,7 @@ struct vmw_private { | |||
| 370 | 458 | ||
| 371 | struct vmw_sw_context ctx; | 459 | struct vmw_sw_context ctx; |
| 372 | struct mutex cmdbuf_mutex; | 460 | struct mutex cmdbuf_mutex; |
| 461 | struct mutex binding_mutex; | ||
| 373 | 462 | ||
| 374 | /** | 463 | /** |
| 375 | * Operating mode. | 464 | * Operating mode. |
| @@ -415,6 +504,12 @@ struct vmw_private { | |||
| 415 | * DMA mapping stuff. | 504 | * DMA mapping stuff. |
| 416 | */ | 505 | */ |
| 417 | enum vmw_dma_map_mode map_mode; | 506 | enum vmw_dma_map_mode map_mode; |
| 507 | |||
| 508 | /* | ||
| 509 | * Guest Backed stuff | ||
| 510 | */ | ||
| 511 | struct ttm_buffer_object *otable_bo; | ||
| 512 | struct vmw_otable *otables; | ||
| 418 | }; | 513 | }; |
| 419 | 514 | ||
| 420 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 515 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
| @@ -471,23 +566,12 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | |||
| 471 | * Resource utilities - vmwgfx_resource.c | 566 | * Resource utilities - vmwgfx_resource.c |
| 472 | */ | 567 | */ |
| 473 | struct vmw_user_resource_conv; | 568 | struct vmw_user_resource_conv; |
| 474 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
| 475 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
| 476 | 569 | ||
| 477 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
| 478 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 570 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 479 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 571 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
| 480 | extern int vmw_resource_validate(struct vmw_resource *res); | 572 | extern int vmw_resource_validate(struct vmw_resource *res); |
| 481 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | 573 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
| 482 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 574 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
| 483 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 484 | struct drm_file *file_priv); | ||
| 485 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 486 | struct drm_file *file_priv); | ||
| 487 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
| 488 | struct ttm_object_file *tfile, | ||
| 489 | int id, | ||
| 490 | struct vmw_resource **p_res); | ||
| 491 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, | 575 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
| 492 | struct ttm_object_file *tfile, | 576 | struct ttm_object_file *tfile, |
| 493 | uint32_t handle, | 577 | uint32_t handle, |
| @@ -499,18 +583,6 @@ extern int vmw_user_resource_lookup_handle( | |||
| 499 | uint32_t handle, | 583 | uint32_t handle, |
| 500 | const struct vmw_user_resource_conv *converter, | 584 | const struct vmw_user_resource_conv *converter, |
| 501 | struct vmw_resource **p_res); | 585 | struct vmw_resource **p_res); |
| 502 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
| 503 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 504 | struct drm_file *file_priv); | ||
| 505 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 506 | struct drm_file *file_priv); | ||
| 507 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 508 | struct drm_file *file_priv); | ||
| 509 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 510 | struct ttm_object_file *tfile, | ||
| 511 | uint32_t handle, int *id); | ||
| 512 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
| 513 | struct vmw_surface *srf); | ||
| 514 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 586 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
| 515 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 587 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
| 516 | struct vmw_dma_buffer *vmw_bo, | 588 | struct vmw_dma_buffer *vmw_bo, |
| @@ -519,10 +591,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
| 519 | void (*bo_free) (struct ttm_buffer_object *bo)); | 591 | void (*bo_free) (struct ttm_buffer_object *bo)); |
| 520 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | 592 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
| 521 | struct ttm_object_file *tfile); | 593 | struct ttm_object_file *tfile); |
| 594 | extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
| 595 | struct ttm_object_file *tfile, | ||
| 596 | uint32_t size, | ||
| 597 | bool shareable, | ||
| 598 | uint32_t *handle, | ||
| 599 | struct vmw_dma_buffer **p_dma_buf); | ||
| 600 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
| 601 | struct vmw_dma_buffer *dma_buf, | ||
| 602 | uint32_t *handle); | ||
| 522 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 603 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 523 | struct drm_file *file_priv); | 604 | struct drm_file *file_priv); |
| 524 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 605 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
| 525 | struct drm_file *file_priv); | 606 | struct drm_file *file_priv); |
| 607 | extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
| 608 | struct drm_file *file_priv); | ||
| 526 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | 609 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, |
| 527 | uint32_t cur_validate_node); | 610 | uint32_t cur_validate_node); |
| 528 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 611 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
| @@ -622,10 +705,16 @@ extern struct ttm_placement vmw_vram_sys_placement; | |||
| 622 | extern struct ttm_placement vmw_vram_gmr_placement; | 705 | extern struct ttm_placement vmw_vram_gmr_placement; |
| 623 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | 706 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
| 624 | extern struct ttm_placement vmw_sys_placement; | 707 | extern struct ttm_placement vmw_sys_placement; |
| 708 | extern struct ttm_placement vmw_sys_ne_placement; | ||
| 625 | extern struct ttm_placement vmw_evictable_placement; | 709 | extern struct ttm_placement vmw_evictable_placement; |
| 626 | extern struct ttm_placement vmw_srf_placement; | 710 | extern struct ttm_placement vmw_srf_placement; |
| 711 | extern struct ttm_placement vmw_mob_placement; | ||
| 627 | extern struct ttm_bo_driver vmw_bo_driver; | 712 | extern struct ttm_bo_driver vmw_bo_driver; |
| 628 | extern int vmw_dma_quiescent(struct drm_device *dev); | 713 | extern int vmw_dma_quiescent(struct drm_device *dev); |
| 714 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); | ||
| 715 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); | ||
| 716 | extern const struct vmw_sg_table * | ||
| 717 | vmw_bo_sg_table(struct ttm_buffer_object *bo); | ||
| 629 | extern void vmw_piter_start(struct vmw_piter *viter, | 718 | extern void vmw_piter_start(struct vmw_piter *viter, |
| 630 | const struct vmw_sg_table *vsgt, | 719 | const struct vmw_sg_table *vsgt, |
| 631 | unsigned long p_offs); | 720 | unsigned long p_offs); |
| @@ -701,7 +790,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
| 701 | * IRQs and wating - vmwgfx_irq.c | 790 | * IRQs and wating - vmwgfx_irq.c |
| 702 | */ | 791 | */ |
| 703 | 792 | ||
| 704 | extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); | 793 | extern irqreturn_t vmw_irq_handler(int irq, void *arg); |
| 705 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, | 794 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
| 706 | uint32_t seqno, bool interruptible, | 795 | uint32_t seqno, bool interruptible, |
| 707 | unsigned long timeout); | 796 | unsigned long timeout); |
| @@ -832,6 +921,76 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, | |||
| 832 | uint32_t handle, uint32_t flags, | 921 | uint32_t handle, uint32_t flags, |
| 833 | int *prime_fd); | 922 | int *prime_fd); |
| 834 | 923 | ||
| 924 | /* | ||
| 925 | * MemoryOBject management - vmwgfx_mob.c | ||
| 926 | */ | ||
| 927 | struct vmw_mob; | ||
| 928 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, | ||
| 929 | const struct vmw_sg_table *vsgt, | ||
| 930 | unsigned long num_data_pages, int32_t mob_id); | ||
| 931 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
| 932 | struct vmw_mob *mob); | ||
| 933 | extern void vmw_mob_destroy(struct vmw_mob *mob); | ||
| 934 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); | ||
| 935 | extern int vmw_otables_setup(struct vmw_private *dev_priv); | ||
| 936 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); | ||
| 937 | |||
| 938 | /* | ||
| 939 | * Context management - vmwgfx_context.c | ||
| 940 | */ | ||
| 941 | |||
| 942 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
| 943 | |||
| 944 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
| 945 | |||
| 946 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
| 947 | struct ttm_object_file *tfile, | ||
| 948 | int id, | ||
| 949 | struct vmw_resource **p_res); | ||
| 950 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 951 | struct drm_file *file_priv); | ||
| 952 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 953 | struct drm_file *file_priv); | ||
| 954 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
| 955 | const struct vmw_ctx_bindinfo *ci); | ||
| 956 | extern void | ||
| 957 | vmw_context_binding_state_transfer(struct vmw_resource *res, | ||
| 958 | struct vmw_ctx_binding_state *cbs); | ||
| 959 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | ||
| 960 | |||
| 961 | /* | ||
| 962 | * Surface management - vmwgfx_surface.c | ||
| 963 | */ | ||
| 964 | |||
| 965 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
| 966 | |||
| 967 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
| 968 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 969 | struct drm_file *file_priv); | ||
| 970 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 971 | struct drm_file *file_priv); | ||
| 972 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 973 | struct drm_file *file_priv); | ||
| 974 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 975 | struct drm_file *file_priv); | ||
| 976 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 977 | struct drm_file *file_priv); | ||
| 978 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 979 | struct ttm_object_file *tfile, | ||
| 980 | uint32_t handle, int *id); | ||
| 981 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
| 982 | struct vmw_surface *srf); | ||
| 983 | |||
| 984 | /* | ||
| 985 | * Shader management - vmwgfx_shader.c | ||
| 986 | */ | ||
| 987 | |||
| 988 | extern const struct vmw_user_resource_conv *user_shader_converter; | ||
| 989 | |||
| 990 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
| 991 | struct drm_file *file_priv); | ||
| 992 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 993 | struct drm_file *file_priv); | ||
| 835 | 994 | ||
| 836 | /** | 995 | /** |
| 837 | * Inline helper functions | 996 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 599f6469a1eb..7a5f1eb55c5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -54,6 +54,8 @@ struct vmw_resource_relocation { | |||
| 54 | * @res: Ref-counted pointer to the resource. | 54 | * @res: Ref-counted pointer to the resource. |
| 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
| 56 | * @new_backup: Refcounted pointer to the new backup buffer. | 56 | * @new_backup: Refcounted pointer to the new backup buffer. |
| 57 | * @staged_bindings: If @res is a context, tracks bindings set up during | ||
| 58 | * the command batch. Otherwise NULL. | ||
| 57 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | 59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
| 58 | * @first_usage: Set to true the first time the resource is referenced in | 60 | * @first_usage: Set to true the first time the resource is referenced in |
| 59 | * the command stream. | 61 | * the command stream. |
| @@ -65,12 +67,32 @@ struct vmw_resource_val_node { | |||
| 65 | struct drm_hash_item hash; | 67 | struct drm_hash_item hash; |
| 66 | struct vmw_resource *res; | 68 | struct vmw_resource *res; |
| 67 | struct vmw_dma_buffer *new_backup; | 69 | struct vmw_dma_buffer *new_backup; |
| 70 | struct vmw_ctx_binding_state *staged_bindings; | ||
| 68 | unsigned long new_backup_offset; | 71 | unsigned long new_backup_offset; |
| 69 | bool first_usage; | 72 | bool first_usage; |
| 70 | bool no_buffer_needed; | 73 | bool no_buffer_needed; |
| 71 | }; | 74 | }; |
| 72 | 75 | ||
| 73 | /** | 76 | /** |
| 77 | * struct vmw_cmd_entry - Describe a command for the verifier | ||
| 78 | * | ||
| 79 | * @user_allow: Whether allowed from the execbuf ioctl. | ||
| 80 | * @gb_disable: Whether disabled if guest-backed objects are available. | ||
| 81 | * @gb_enable: Whether enabled iff guest-backed objects are available. | ||
| 82 | */ | ||
| 83 | struct vmw_cmd_entry { | ||
| 84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, | ||
| 85 | SVGA3dCmdHeader *); | ||
| 86 | bool user_allow; | ||
| 87 | bool gb_disable; | ||
| 88 | bool gb_enable; | ||
| 89 | }; | ||
| 90 | |||
| 91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ | ||
| 92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | ||
| 93 | (_gb_disable), (_gb_enable)} | ||
| 94 | |||
| 95 | /** | ||
| 74 | * vmw_resource_unreserve - unreserve resources previously reserved for | 96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
| 75 | * command submission. | 97 | * command submission. |
| 76 | * | 98 | * |
| @@ -87,6 +109,16 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
| 87 | struct vmw_dma_buffer *new_backup = | 109 | struct vmw_dma_buffer *new_backup = |
| 88 | backoff ? NULL : val->new_backup; | 110 | backoff ? NULL : val->new_backup; |
| 89 | 111 | ||
| 112 | /* | ||
| 113 | * Transfer staged context bindings to the | ||
| 114 | * persistent context binding tracker. | ||
| 115 | */ | ||
| 116 | if (unlikely(val->staged_bindings)) { | ||
| 117 | vmw_context_binding_state_transfer | ||
| 118 | (val->res, val->staged_bindings); | ||
| 119 | kfree(val->staged_bindings); | ||
| 120 | val->staged_bindings = NULL; | ||
| 121 | } | ||
| 90 | vmw_resource_unreserve(res, new_backup, | 122 | vmw_resource_unreserve(res, new_backup, |
| 91 | val->new_backup_offset); | 123 | val->new_backup_offset); |
| 92 | vmw_dmabuf_unreference(&val->new_backup); | 124 | vmw_dmabuf_unreference(&val->new_backup); |
| @@ -224,6 +256,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
| 224 | * | 256 | * |
| 225 | * @sw_context: The software context used for this command submission batch. | 257 | * @sw_context: The software context used for this command submission batch. |
| 226 | * @bo: The buffer object to add. | 258 | * @bo: The buffer object to add. |
| 259 | * @validate_as_mob: Validate this buffer as a MOB. | ||
| 227 | * @p_val_node: If non-NULL Will be updated with the validate node number | 260 | * @p_val_node: If non-NULL Will be updated with the validate node number |
| 228 | * on return. | 261 | * on return. |
| 229 | * | 262 | * |
| @@ -232,6 +265,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
| 232 | */ | 265 | */ |
| 233 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | 266 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
| 234 | struct ttm_buffer_object *bo, | 267 | struct ttm_buffer_object *bo, |
| 268 | bool validate_as_mob, | ||
| 235 | uint32_t *p_val_node) | 269 | uint32_t *p_val_node) |
| 236 | { | 270 | { |
| 237 | uint32_t val_node; | 271 | uint32_t val_node; |
| @@ -244,6 +278,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
| 244 | &hash) == 0)) { | 278 | &hash) == 0)) { |
| 245 | vval_buf = container_of(hash, struct vmw_validate_buffer, | 279 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
| 246 | hash); | 280 | hash); |
| 281 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { | ||
| 282 | DRM_ERROR("Inconsistent buffer usage.\n"); | ||
| 283 | return -EINVAL; | ||
| 284 | } | ||
| 247 | val_buf = &vval_buf->base; | 285 | val_buf = &vval_buf->base; |
| 248 | val_node = vval_buf - sw_context->val_bufs; | 286 | val_node = vval_buf - sw_context->val_bufs; |
| 249 | } else { | 287 | } else { |
| @@ -266,6 +304,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
| 266 | val_buf->bo = ttm_bo_reference(bo); | 304 | val_buf->bo = ttm_bo_reference(bo); |
| 267 | val_buf->reserved = false; | 305 | val_buf->reserved = false; |
| 268 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 306 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
| 307 | vval_buf->validate_as_mob = validate_as_mob; | ||
| 269 | } | 308 | } |
| 270 | 309 | ||
| 271 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 310 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
| @@ -302,7 +341,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | |||
| 302 | struct ttm_buffer_object *bo = &res->backup->base; | 341 | struct ttm_buffer_object *bo = &res->backup->base; |
| 303 | 342 | ||
| 304 | ret = vmw_bo_to_validate_list | 343 | ret = vmw_bo_to_validate_list |
| 305 | (sw_context, bo, NULL); | 344 | (sw_context, bo, |
| 345 | vmw_resource_needs_backup(res), NULL); | ||
| 306 | 346 | ||
| 307 | if (unlikely(ret != 0)) | 347 | if (unlikely(ret != 0)) |
| 308 | return ret; | 348 | return ret; |
| @@ -362,8 +402,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 362 | struct vmw_resource_val_node *node; | 402 | struct vmw_resource_val_node *node; |
| 363 | int ret; | 403 | int ret; |
| 364 | 404 | ||
| 365 | if (*id == SVGA3D_INVALID_ID) | 405 | if (*id == SVGA3D_INVALID_ID) { |
| 406 | if (p_val) | ||
| 407 | *p_val = NULL; | ||
| 408 | if (res_type == vmw_res_context) { | ||
| 409 | DRM_ERROR("Illegal context invalid id.\n"); | ||
| 410 | return -EINVAL; | ||
| 411 | } | ||
| 366 | return 0; | 412 | return 0; |
| 413 | } | ||
| 367 | 414 | ||
| 368 | /* | 415 | /* |
| 369 | * Fastpath in case of repeated commands referencing the same | 416 | * Fastpath in case of repeated commands referencing the same |
| @@ -411,6 +458,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 411 | rcache->node = node; | 458 | rcache->node = node; |
| 412 | if (p_val) | 459 | if (p_val) |
| 413 | *p_val = node; | 460 | *p_val = node; |
| 461 | |||
| 462 | if (node->first_usage && res_type == vmw_res_context) { | ||
| 463 | node->staged_bindings = | ||
| 464 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | ||
| 465 | if (node->staged_bindings == NULL) { | ||
| 466 | DRM_ERROR("Failed to allocate context binding " | ||
| 467 | "information.\n"); | ||
| 468 | goto out_no_reloc; | ||
| 469 | } | ||
| 470 | INIT_LIST_HEAD(&node->staged_bindings->list); | ||
| 471 | } | ||
| 472 | |||
| 414 | vmw_resource_unreference(&res); | 473 | vmw_resource_unreference(&res); |
| 415 | return 0; | 474 | return 0; |
| 416 | 475 | ||
| @@ -453,17 +512,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
| 453 | SVGA3dCmdHeader header; | 512 | SVGA3dCmdHeader header; |
| 454 | SVGA3dCmdSetRenderTarget body; | 513 | SVGA3dCmdSetRenderTarget body; |
| 455 | } *cmd; | 514 | } *cmd; |
| 515 | struct vmw_resource_val_node *ctx_node; | ||
| 516 | struct vmw_resource_val_node *res_node; | ||
| 456 | int ret; | 517 | int ret; |
| 457 | 518 | ||
| 458 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 519 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 520 | |||
| 521 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 522 | user_context_converter, &cmd->body.cid, | ||
| 523 | &ctx_node); | ||
| 459 | if (unlikely(ret != 0)) | 524 | if (unlikely(ret != 0)) |
| 460 | return ret; | 525 | return ret; |
| 461 | 526 | ||
| 462 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 463 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 527 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 464 | user_surface_converter, | 528 | user_surface_converter, |
| 465 | &cmd->body.target.sid, NULL); | 529 | &cmd->body.target.sid, &res_node); |
| 466 | return ret; | 530 | if (unlikely(ret != 0)) |
| 531 | return ret; | ||
| 532 | |||
| 533 | if (dev_priv->has_mob) { | ||
| 534 | struct vmw_ctx_bindinfo bi; | ||
| 535 | |||
| 536 | bi.ctx = ctx_node->res; | ||
| 537 | bi.res = res_node ? res_node->res : NULL; | ||
| 538 | bi.bt = vmw_ctx_binding_rt; | ||
| 539 | bi.i1.rt_type = cmd->body.type; | ||
| 540 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
| 541 | } | ||
| 542 | |||
| 543 | return 0; | ||
| 467 | } | 544 | } |
| 468 | 545 | ||
| 469 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | 546 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
| @@ -519,11 +596,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
| 519 | 596 | ||
| 520 | cmd = container_of(header, struct vmw_sid_cmd, header); | 597 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 521 | 598 | ||
| 522 | if (unlikely(!sw_context->kernel)) { | ||
| 523 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
| 524 | return -EPERM; | ||
| 525 | } | ||
| 526 | |||
| 527 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 599 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 528 | user_surface_converter, | 600 | user_surface_converter, |
| 529 | &cmd->body.srcImage.sid, NULL); | 601 | &cmd->body.srcImage.sid, NULL); |
| @@ -541,11 +613,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
| 541 | 613 | ||
| 542 | cmd = container_of(header, struct vmw_sid_cmd, header); | 614 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 543 | 615 | ||
| 544 | if (unlikely(!sw_context->kernel)) { | ||
| 545 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
| 546 | return -EPERM; | ||
| 547 | } | ||
| 548 | |||
| 549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 616 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 550 | user_surface_converter, &cmd->body.sid, | 617 | user_surface_converter, &cmd->body.sid, |
| 551 | NULL); | 618 | NULL); |
| @@ -586,7 +653,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
| 586 | sw_context->needs_post_query_barrier = true; | 653 | sw_context->needs_post_query_barrier = true; |
| 587 | ret = vmw_bo_to_validate_list(sw_context, | 654 | ret = vmw_bo_to_validate_list(sw_context, |
| 588 | sw_context->cur_query_bo, | 655 | sw_context->cur_query_bo, |
| 589 | NULL); | 656 | dev_priv->has_mob, NULL); |
| 590 | if (unlikely(ret != 0)) | 657 | if (unlikely(ret != 0)) |
| 591 | return ret; | 658 | return ret; |
| 592 | } | 659 | } |
| @@ -594,7 +661,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
| 594 | 661 | ||
| 595 | ret = vmw_bo_to_validate_list(sw_context, | 662 | ret = vmw_bo_to_validate_list(sw_context, |
| 596 | dev_priv->dummy_query_bo, | 663 | dev_priv->dummy_query_bo, |
| 597 | NULL); | 664 | dev_priv->has_mob, NULL); |
| 598 | if (unlikely(ret != 0)) | 665 | if (unlikely(ret != 0)) |
| 599 | return ret; | 666 | return ret; |
| 600 | 667 | ||
| @@ -672,6 +739,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | |||
| 672 | } | 739 | } |
| 673 | 740 | ||
| 674 | /** | 741 | /** |
| 742 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer | ||
| 743 | * handle to a MOB id. | ||
| 744 | * | ||
| 745 | * @dev_priv: Pointer to a device private structure. | ||
| 746 | * @sw_context: The software context used for this command batch validation. | ||
| 747 | * @id: Pointer to the user-space handle to be translated. | ||
| 748 | * @vmw_bo_p: Points to a location that, on successful return will carry | ||
| 749 | * a reference-counted pointer to the DMA buffer identified by the | ||
| 750 | * user-space handle in @id. | ||
| 751 | * | ||
| 752 | * This function saves information needed to translate a user-space buffer | ||
| 753 | * handle to a MOB id. The translation does not take place immediately, but | ||
| 754 | * during a call to vmw_apply_relocations(). This function builds a relocation | ||
| 755 | * list and a list of buffers to validate. The former needs to be freed using | ||
| 756 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter | ||
| 757 | * needs to be freed using vmw_clear_validations. | ||
| 758 | */ | ||
| 759 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | ||
| 760 | struct vmw_sw_context *sw_context, | ||
| 761 | SVGAMobId *id, | ||
| 762 | struct vmw_dma_buffer **vmw_bo_p) | ||
| 763 | { | ||
| 764 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
| 765 | struct ttm_buffer_object *bo; | ||
| 766 | uint32_t handle = *id; | ||
| 767 | struct vmw_relocation *reloc; | ||
| 768 | int ret; | ||
| 769 | |||
| 770 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | ||
| 771 | if (unlikely(ret != 0)) { | ||
| 772 | DRM_ERROR("Could not find or use MOB buffer.\n"); | ||
| 773 | return -EINVAL; | ||
| 774 | } | ||
| 775 | bo = &vmw_bo->base; | ||
| 776 | |||
| 777 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | ||
| 778 | DRM_ERROR("Max number relocations per submission" | ||
| 779 | " exceeded\n"); | ||
| 780 | ret = -EINVAL; | ||
| 781 | goto out_no_reloc; | ||
| 782 | } | ||
| 783 | |||
| 784 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | ||
| 785 | reloc->mob_loc = id; | ||
| 786 | reloc->location = NULL; | ||
| 787 | |||
| 788 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); | ||
| 789 | if (unlikely(ret != 0)) | ||
| 790 | goto out_no_reloc; | ||
| 791 | |||
| 792 | *vmw_bo_p = vmw_bo; | ||
| 793 | return 0; | ||
| 794 | |||
| 795 | out_no_reloc: | ||
| 796 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 797 | vmw_bo_p = NULL; | ||
| 798 | return ret; | ||
| 799 | } | ||
| 800 | |||
| 801 | /** | ||
| 675 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer | 802 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
| 676 | * handle to a valid SVGAGuestPtr | 803 | * handle to a valid SVGAGuestPtr |
| 677 | * | 804 | * |
| @@ -718,7 +845,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 718 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 845 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
| 719 | reloc->location = ptr; | 846 | reloc->location = ptr; |
| 720 | 847 | ||
| 721 | ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); | 848 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
| 722 | if (unlikely(ret != 0)) | 849 | if (unlikely(ret != 0)) |
| 723 | goto out_no_reloc; | 850 | goto out_no_reloc; |
| 724 | 851 | ||
| @@ -732,6 +859,30 @@ out_no_reloc: | |||
| 732 | } | 859 | } |
| 733 | 860 | ||
| 734 | /** | 861 | /** |
| 862 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. | ||
| 863 | * | ||
| 864 | * @dev_priv: Pointer to a device private struct. | ||
| 865 | * @sw_context: The software context used for this command submission. | ||
| 866 | * @header: Pointer to the command header in the command stream. | ||
| 867 | */ | ||
| 868 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, | ||
| 869 | struct vmw_sw_context *sw_context, | ||
| 870 | SVGA3dCmdHeader *header) | ||
| 871 | { | ||
| 872 | struct vmw_begin_gb_query_cmd { | ||
| 873 | SVGA3dCmdHeader header; | ||
| 874 | SVGA3dCmdBeginGBQuery q; | ||
| 875 | } *cmd; | ||
| 876 | |||
| 877 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, | ||
| 878 | header); | ||
| 879 | |||
| 880 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 881 | user_context_converter, &cmd->q.cid, | ||
| 882 | NULL); | ||
| 883 | } | ||
| 884 | |||
| 885 | /** | ||
| 735 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | 886 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
| 736 | * | 887 | * |
| 737 | * @dev_priv: Pointer to a device private struct. | 888 | * @dev_priv: Pointer to a device private struct. |
| @@ -750,12 +901,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | |||
| 750 | cmd = container_of(header, struct vmw_begin_query_cmd, | 901 | cmd = container_of(header, struct vmw_begin_query_cmd, |
| 751 | header); | 902 | header); |
| 752 | 903 | ||
| 904 | if (unlikely(dev_priv->has_mob)) { | ||
| 905 | struct { | ||
| 906 | SVGA3dCmdHeader header; | ||
| 907 | SVGA3dCmdBeginGBQuery q; | ||
| 908 | } gb_cmd; | ||
| 909 | |||
| 910 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 911 | |||
| 912 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; | ||
| 913 | gb_cmd.header.size = cmd->header.size; | ||
| 914 | gb_cmd.q.cid = cmd->q.cid; | ||
| 915 | gb_cmd.q.type = cmd->q.type; | ||
| 916 | |||
| 917 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 918 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); | ||
| 919 | } | ||
| 920 | |||
| 753 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 921 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
| 754 | user_context_converter, &cmd->q.cid, | 922 | user_context_converter, &cmd->q.cid, |
| 755 | NULL); | 923 | NULL); |
| 756 | } | 924 | } |
| 757 | 925 | ||
| 758 | /** | 926 | /** |
| 927 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. | ||
| 928 | * | ||
| 929 | * @dev_priv: Pointer to a device private struct. | ||
| 930 | * @sw_context: The software context used for this command submission. | ||
| 931 | * @header: Pointer to the command header in the command stream. | ||
| 932 | */ | ||
| 933 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, | ||
| 934 | struct vmw_sw_context *sw_context, | ||
| 935 | SVGA3dCmdHeader *header) | ||
| 936 | { | ||
| 937 | struct vmw_dma_buffer *vmw_bo; | ||
| 938 | struct vmw_query_cmd { | ||
| 939 | SVGA3dCmdHeader header; | ||
| 940 | SVGA3dCmdEndGBQuery q; | ||
| 941 | } *cmd; | ||
| 942 | int ret; | ||
| 943 | |||
| 944 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 945 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 946 | if (unlikely(ret != 0)) | ||
| 947 | return ret; | ||
| 948 | |||
| 949 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
| 950 | &cmd->q.mobid, | ||
| 951 | &vmw_bo); | ||
| 952 | if (unlikely(ret != 0)) | ||
| 953 | return ret; | ||
| 954 | |||
| 955 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); | ||
| 956 | |||
| 957 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 958 | return ret; | ||
| 959 | } | ||
| 960 | |||
| 961 | /** | ||
| 759 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | 962 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
| 760 | * | 963 | * |
| 761 | * @dev_priv: Pointer to a device private struct. | 964 | * @dev_priv: Pointer to a device private struct. |
| @@ -774,6 +977,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
| 774 | int ret; | 977 | int ret; |
| 775 | 978 | ||
| 776 | cmd = container_of(header, struct vmw_query_cmd, header); | 979 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 980 | if (dev_priv->has_mob) { | ||
| 981 | struct { | ||
| 982 | SVGA3dCmdHeader header; | ||
| 983 | SVGA3dCmdEndGBQuery q; | ||
| 984 | } gb_cmd; | ||
| 985 | |||
| 986 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 987 | |||
| 988 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; | ||
| 989 | gb_cmd.header.size = cmd->header.size; | ||
| 990 | gb_cmd.q.cid = cmd->q.cid; | ||
| 991 | gb_cmd.q.type = cmd->q.type; | ||
| 992 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
| 993 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
| 994 | |||
| 995 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 996 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); | ||
| 997 | } | ||
| 998 | |||
| 777 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 999 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 778 | if (unlikely(ret != 0)) | 1000 | if (unlikely(ret != 0)) |
| 779 | return ret; | 1001 | return ret; |
| @@ -790,7 +1012,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
| 790 | return ret; | 1012 | return ret; |
| 791 | } | 1013 | } |
| 792 | 1014 | ||
| 793 | /* | 1015 | /** |
| 1016 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. | ||
| 1017 | * | ||
| 1018 | * @dev_priv: Pointer to a device private struct. | ||
| 1019 | * @sw_context: The software context used for this command submission. | ||
| 1020 | * @header: Pointer to the command header in the command stream. | ||
| 1021 | */ | ||
| 1022 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, | ||
| 1023 | struct vmw_sw_context *sw_context, | ||
| 1024 | SVGA3dCmdHeader *header) | ||
| 1025 | { | ||
| 1026 | struct vmw_dma_buffer *vmw_bo; | ||
| 1027 | struct vmw_query_cmd { | ||
| 1028 | SVGA3dCmdHeader header; | ||
| 1029 | SVGA3dCmdWaitForGBQuery q; | ||
| 1030 | } *cmd; | ||
| 1031 | int ret; | ||
| 1032 | |||
| 1033 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 1034 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 1035 | if (unlikely(ret != 0)) | ||
| 1036 | return ret; | ||
| 1037 | |||
| 1038 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
| 1039 | &cmd->q.mobid, | ||
| 1040 | &vmw_bo); | ||
| 1041 | if (unlikely(ret != 0)) | ||
| 1042 | return ret; | ||
| 1043 | |||
| 1044 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 1045 | return 0; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | /** | ||
| 794 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | 1049 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
| 795 | * | 1050 | * |
| 796 | * @dev_priv: Pointer to a device private struct. | 1051 | * @dev_priv: Pointer to a device private struct. |
| @@ -809,6 +1064,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
| 809 | int ret; | 1064 | int ret; |
| 810 | 1065 | ||
| 811 | cmd = container_of(header, struct vmw_query_cmd, header); | 1066 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 1067 | if (dev_priv->has_mob) { | ||
| 1068 | struct { | ||
| 1069 | SVGA3dCmdHeader header; | ||
| 1070 | SVGA3dCmdWaitForGBQuery q; | ||
| 1071 | } gb_cmd; | ||
| 1072 | |||
| 1073 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 1074 | |||
| 1075 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
| 1076 | gb_cmd.header.size = cmd->header.size; | ||
| 1077 | gb_cmd.q.cid = cmd->q.cid; | ||
| 1078 | gb_cmd.q.type = cmd->q.type; | ||
| 1079 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
| 1080 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
| 1081 | |||
| 1082 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 1083 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); | ||
| 1084 | } | ||
| 1085 | |||
| 812 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1086 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 813 | if (unlikely(ret != 0)) | 1087 | if (unlikely(ret != 0)) |
| 814 | return ret; | 1088 | return ret; |
| @@ -921,15 +1195,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
| 921 | struct vmw_tex_state_cmd { | 1195 | struct vmw_tex_state_cmd { |
| 922 | SVGA3dCmdHeader header; | 1196 | SVGA3dCmdHeader header; |
| 923 | SVGA3dCmdSetTextureState state; | 1197 | SVGA3dCmdSetTextureState state; |
| 924 | }; | 1198 | } *cmd; |
| 925 | 1199 | ||
| 926 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | 1200 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
| 927 | ((unsigned long) header + header->size + sizeof(header)); | 1201 | ((unsigned long) header + header->size + sizeof(header)); |
| 928 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1202 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
| 929 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1203 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
| 1204 | struct vmw_resource_val_node *ctx_node; | ||
| 1205 | struct vmw_resource_val_node *res_node; | ||
| 930 | int ret; | 1206 | int ret; |
| 931 | 1207 | ||
| 932 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1208 | cmd = container_of(header, struct vmw_tex_state_cmd, |
| 1209 | header); | ||
| 1210 | |||
| 1211 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1212 | user_context_converter, &cmd->state.cid, | ||
| 1213 | &ctx_node); | ||
| 933 | if (unlikely(ret != 0)) | 1214 | if (unlikely(ret != 0)) |
| 934 | return ret; | 1215 | return ret; |
| 935 | 1216 | ||
| @@ -939,9 +1220,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
| 939 | 1220 | ||
| 940 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1221 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 941 | user_surface_converter, | 1222 | user_surface_converter, |
| 942 | &cur_state->value, NULL); | 1223 | &cur_state->value, &res_node); |
| 943 | if (unlikely(ret != 0)) | 1224 | if (unlikely(ret != 0)) |
| 944 | return ret; | 1225 | return ret; |
| 1226 | |||
| 1227 | if (dev_priv->has_mob) { | ||
| 1228 | struct vmw_ctx_bindinfo bi; | ||
| 1229 | |||
| 1230 | bi.ctx = ctx_node->res; | ||
| 1231 | bi.res = res_node ? res_node->res : NULL; | ||
| 1232 | bi.bt = vmw_ctx_binding_tex; | ||
| 1233 | bi.i1.texture_stage = cur_state->stage; | ||
| 1234 | vmw_context_binding_add(ctx_node->staged_bindings, | ||
| 1235 | &bi); | ||
| 1236 | } | ||
| 945 | } | 1237 | } |
| 946 | 1238 | ||
| 947 | return 0; | 1239 | return 0; |
| @@ -971,6 +1263,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
| 971 | } | 1263 | } |
| 972 | 1264 | ||
| 973 | /** | 1265 | /** |
| 1266 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | ||
| 1267 | * | ||
| 1268 | * @dev_priv: Pointer to a device private struct. | ||
| 1269 | * @sw_context: The software context being used for this batch. | ||
| 1270 | * @res_type: The resource type. | ||
| 1271 | * @converter: Information about user-space binding for this resource type. | ||
| 1272 | * @res_id: Pointer to the user-space resource handle in the command stream. | ||
| 1273 | * @buf_id: Pointer to the user-space backup buffer handle in the command | ||
| 1274 | * stream. | ||
| 1275 | * @backup_offset: Offset of backup into MOB. | ||
| 1276 | * | ||
| 1277 | * This function prepares for registering a switch of backup buffers | ||
| 1278 | * in the resource metadata just prior to unreserving. | ||
| 1279 | */ | ||
| 1280 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | ||
| 1281 | struct vmw_sw_context *sw_context, | ||
| 1282 | enum vmw_res_type res_type, | ||
| 1283 | const struct vmw_user_resource_conv | ||
| 1284 | *converter, | ||
| 1285 | uint32_t *res_id, | ||
| 1286 | uint32_t *buf_id, | ||
| 1287 | unsigned long backup_offset) | ||
| 1288 | { | ||
| 1289 | int ret; | ||
| 1290 | struct vmw_dma_buffer *dma_buf; | ||
| 1291 | struct vmw_resource_val_node *val_node; | ||
| 1292 | |||
| 1293 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | ||
| 1294 | converter, res_id, &val_node); | ||
| 1295 | if (unlikely(ret != 0)) | ||
| 1296 | return ret; | ||
| 1297 | |||
| 1298 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
| 1299 | if (unlikely(ret != 0)) | ||
| 1300 | return ret; | ||
| 1301 | |||
| 1302 | if (val_node->first_usage) | ||
| 1303 | val_node->no_buffer_needed = true; | ||
| 1304 | |||
| 1305 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
| 1306 | val_node->new_backup = dma_buf; | ||
| 1307 | val_node->new_backup_offset = backup_offset; | ||
| 1308 | |||
| 1309 | return 0; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | /** | ||
| 1313 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE | ||
| 1314 | * command | ||
| 1315 | * | ||
| 1316 | * @dev_priv: Pointer to a device private struct. | ||
| 1317 | * @sw_context: The software context being used for this batch. | ||
| 1318 | * @header: Pointer to the command header in the command stream. | ||
| 1319 | */ | ||
| 1320 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, | ||
| 1321 | struct vmw_sw_context *sw_context, | ||
| 1322 | SVGA3dCmdHeader *header) | ||
| 1323 | { | ||
| 1324 | struct vmw_bind_gb_surface_cmd { | ||
| 1325 | SVGA3dCmdHeader header; | ||
| 1326 | SVGA3dCmdBindGBSurface body; | ||
| 1327 | } *cmd; | ||
| 1328 | |||
| 1329 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); | ||
| 1330 | |||
| 1331 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, | ||
| 1332 | user_surface_converter, | ||
| 1333 | &cmd->body.sid, &cmd->body.mobid, | ||
| 1334 | 0); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | /** | ||
| 1338 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE | ||
| 1339 | * command | ||
| 1340 | * | ||
| 1341 | * @dev_priv: Pointer to a device private struct. | ||
| 1342 | * @sw_context: The software context being used for this batch. | ||
| 1343 | * @header: Pointer to the command header in the command stream. | ||
| 1344 | */ | ||
| 1345 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, | ||
| 1346 | struct vmw_sw_context *sw_context, | ||
| 1347 | SVGA3dCmdHeader *header) | ||
| 1348 | { | ||
| 1349 | struct vmw_gb_surface_cmd { | ||
| 1350 | SVGA3dCmdHeader header; | ||
| 1351 | SVGA3dCmdUpdateGBImage body; | ||
| 1352 | } *cmd; | ||
| 1353 | |||
| 1354 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1355 | |||
| 1356 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1357 | user_surface_converter, | ||
| 1358 | &cmd->body.image.sid, NULL); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | /** | ||
| 1362 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE | ||
| 1363 | * command | ||
| 1364 | * | ||
| 1365 | * @dev_priv: Pointer to a device private struct. | ||
| 1366 | * @sw_context: The software context being used for this batch. | ||
| 1367 | * @header: Pointer to the command header in the command stream. | ||
| 1368 | */ | ||
| 1369 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, | ||
| 1370 | struct vmw_sw_context *sw_context, | ||
| 1371 | SVGA3dCmdHeader *header) | ||
| 1372 | { | ||
| 1373 | struct vmw_gb_surface_cmd { | ||
| 1374 | SVGA3dCmdHeader header; | ||
| 1375 | SVGA3dCmdUpdateGBSurface body; | ||
| 1376 | } *cmd; | ||
| 1377 | |||
| 1378 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1379 | |||
| 1380 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1381 | user_surface_converter, | ||
| 1382 | &cmd->body.sid, NULL); | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | /** | ||
| 1386 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE | ||
| 1387 | * command | ||
| 1388 | * | ||
| 1389 | * @dev_priv: Pointer to a device private struct. | ||
| 1390 | * @sw_context: The software context being used for this batch. | ||
| 1391 | * @header: Pointer to the command header in the command stream. | ||
| 1392 | */ | ||
| 1393 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, | ||
| 1394 | struct vmw_sw_context *sw_context, | ||
| 1395 | SVGA3dCmdHeader *header) | ||
| 1396 | { | ||
| 1397 | struct vmw_gb_surface_cmd { | ||
| 1398 | SVGA3dCmdHeader header; | ||
| 1399 | SVGA3dCmdReadbackGBImage body; | ||
| 1400 | } *cmd; | ||
| 1401 | |||
| 1402 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1403 | |||
| 1404 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1405 | user_surface_converter, | ||
| 1406 | &cmd->body.image.sid, NULL); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | /** | ||
| 1410 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE | ||
| 1411 | * command | ||
| 1412 | * | ||
| 1413 | * @dev_priv: Pointer to a device private struct. | ||
| 1414 | * @sw_context: The software context being used for this batch. | ||
| 1415 | * @header: Pointer to the command header in the command stream. | ||
| 1416 | */ | ||
| 1417 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, | ||
| 1418 | struct vmw_sw_context *sw_context, | ||
| 1419 | SVGA3dCmdHeader *header) | ||
| 1420 | { | ||
| 1421 | struct vmw_gb_surface_cmd { | ||
| 1422 | SVGA3dCmdHeader header; | ||
| 1423 | SVGA3dCmdReadbackGBSurface body; | ||
| 1424 | } *cmd; | ||
| 1425 | |||
| 1426 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1427 | |||
| 1428 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1429 | user_surface_converter, | ||
| 1430 | &cmd->body.sid, NULL); | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | /** | ||
| 1434 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE | ||
| 1435 | * command | ||
| 1436 | * | ||
| 1437 | * @dev_priv: Pointer to a device private struct. | ||
| 1438 | * @sw_context: The software context being used for this batch. | ||
| 1439 | * @header: Pointer to the command header in the command stream. | ||
| 1440 | */ | ||
| 1441 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, | ||
| 1442 | struct vmw_sw_context *sw_context, | ||
| 1443 | SVGA3dCmdHeader *header) | ||
| 1444 | { | ||
| 1445 | struct vmw_gb_surface_cmd { | ||
| 1446 | SVGA3dCmdHeader header; | ||
| 1447 | SVGA3dCmdInvalidateGBImage body; | ||
| 1448 | } *cmd; | ||
| 1449 | |||
| 1450 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1451 | |||
| 1452 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1453 | user_surface_converter, | ||
| 1454 | &cmd->body.image.sid, NULL); | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | /** | ||
| 1458 | * vmw_cmd_invalidate_gb_surface - Validate an | ||
| 1459 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command | ||
| 1460 | * | ||
| 1461 | * @dev_priv: Pointer to a device private struct. | ||
| 1462 | * @sw_context: The software context being used for this batch. | ||
| 1463 | * @header: Pointer to the command header in the command stream. | ||
| 1464 | */ | ||
| 1465 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | ||
| 1466 | struct vmw_sw_context *sw_context, | ||
| 1467 | SVGA3dCmdHeader *header) | ||
| 1468 | { | ||
| 1469 | struct vmw_gb_surface_cmd { | ||
| 1470 | SVGA3dCmdHeader header; | ||
| 1471 | SVGA3dCmdInvalidateGBSurface body; | ||
| 1472 | } *cmd; | ||
| 1473 | |||
| 1474 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1475 | |||
| 1476 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1477 | user_surface_converter, | ||
| 1478 | &cmd->body.sid, NULL); | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | /** | ||
| 974 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1482 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
| 975 | * command | 1483 | * command |
| 976 | * | 1484 | * |
| @@ -986,18 +1494,64 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 986 | SVGA3dCmdHeader header; | 1494 | SVGA3dCmdHeader header; |
| 987 | SVGA3dCmdSetShader body; | 1495 | SVGA3dCmdSetShader body; |
| 988 | } *cmd; | 1496 | } *cmd; |
| 1497 | struct vmw_resource_val_node *ctx_node; | ||
| 989 | int ret; | 1498 | int ret; |
| 990 | 1499 | ||
| 991 | cmd = container_of(header, struct vmw_set_shader_cmd, | 1500 | cmd = container_of(header, struct vmw_set_shader_cmd, |
| 992 | header); | 1501 | header); |
| 993 | 1502 | ||
| 994 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1503 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
| 1504 | user_context_converter, &cmd->body.cid, | ||
| 1505 | &ctx_node); | ||
| 995 | if (unlikely(ret != 0)) | 1506 | if (unlikely(ret != 0)) |
| 996 | return ret; | 1507 | return ret; |
| 997 | 1508 | ||
| 1509 | if (dev_priv->has_mob) { | ||
| 1510 | struct vmw_ctx_bindinfo bi; | ||
| 1511 | struct vmw_resource_val_node *res_node; | ||
| 1512 | |||
| 1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, | ||
| 1514 | user_shader_converter, | ||
| 1515 | &cmd->body.shid, &res_node); | ||
| 1516 | if (unlikely(ret != 0)) | ||
| 1517 | return ret; | ||
| 1518 | |||
| 1519 | bi.ctx = ctx_node->res; | ||
| 1520 | bi.res = res_node ? res_node->res : NULL; | ||
| 1521 | bi.bt = vmw_ctx_binding_shader; | ||
| 1522 | bi.i1.shader_type = cmd->body.type; | ||
| 1523 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
| 1524 | } | ||
| 1525 | |||
| 998 | return 0; | 1526 | return 0; |
| 999 | } | 1527 | } |
| 1000 | 1528 | ||
| 1529 | /** | ||
| 1530 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | ||
| 1531 | * command | ||
| 1532 | * | ||
| 1533 | * @dev_priv: Pointer to a device private struct. | ||
| 1534 | * @sw_context: The software context being used for this batch. | ||
| 1535 | * @header: Pointer to the command header in the command stream. | ||
| 1536 | */ | ||
| 1537 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, | ||
| 1538 | struct vmw_sw_context *sw_context, | ||
| 1539 | SVGA3dCmdHeader *header) | ||
| 1540 | { | ||
| 1541 | struct vmw_bind_gb_shader_cmd { | ||
| 1542 | SVGA3dCmdHeader header; | ||
| 1543 | SVGA3dCmdBindGBShader body; | ||
| 1544 | } *cmd; | ||
| 1545 | |||
| 1546 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, | ||
| 1547 | header); | ||
| 1548 | |||
| 1549 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, | ||
| 1550 | user_shader_converter, | ||
| 1551 | &cmd->body.shid, &cmd->body.mobid, | ||
| 1552 | cmd->body.offsetInBytes); | ||
| 1553 | } | ||
| 1554 | |||
| 1001 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1555 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
| 1002 | struct vmw_sw_context *sw_context, | 1556 | struct vmw_sw_context *sw_context, |
| 1003 | void *buf, uint32_t *size) | 1557 | void *buf, uint32_t *size) |
| @@ -1041,50 +1595,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
| 1041 | return 0; | 1595 | return 0; |
| 1042 | } | 1596 | } |
| 1043 | 1597 | ||
| 1044 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 1598 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
| 1045 | struct vmw_sw_context *, | 1599 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
| 1046 | SVGA3dCmdHeader *); | 1600 | false, false, false), |
| 1047 | 1601 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | |
| 1048 | #define VMW_CMD_DEF(cmd, func) \ | 1602 | false, false, false), |
| 1049 | [cmd - SVGA_3D_CMD_BASE] = func | 1603 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
| 1050 | 1604 | true, false, false), | |
| 1051 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | 1605 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
| 1052 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), | 1606 | true, false, false), |
| 1053 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), | 1607 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
| 1054 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), | 1608 | true, false, false), |
| 1055 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), | 1609 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
| 1056 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), | 1610 | false, false, false), |
| 1057 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), | 1611 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
| 1058 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), | 1612 | false, false, false), |
| 1059 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), | 1613 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
| 1060 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), | 1614 | true, false, false), |
| 1061 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | 1615 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
| 1616 | true, false, false), | ||
| 1617 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, | ||
| 1618 | true, false, false), | ||
| 1062 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | 1619 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
| 1063 | &vmw_cmd_set_render_target_check), | 1620 | &vmw_cmd_set_render_target_check, true, false, false), |
| 1064 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), | 1621 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
| 1065 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), | 1622 | true, false, false), |
| 1066 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | 1623 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
| 1067 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | 1624 | true, false, false), |
| 1068 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), | 1625 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
| 1069 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), | 1626 | true, false, false), |
| 1070 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), | 1627 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
| 1071 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | 1628 | true, false, false), |
| 1072 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | 1629 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
| 1073 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 1630 | true, false, false), |
| 1074 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), | 1631 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
| 1075 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 1632 | true, false, false), |
| 1076 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 1633 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
| 1077 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 1634 | true, false, false), |
| 1078 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), | 1635 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
| 1079 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), | 1636 | false, false, false), |
| 1080 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | 1637 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, |
| 1081 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 1638 | true, true, false), |
| 1639 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, | ||
| 1640 | true, true, false), | ||
| 1641 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | ||
| 1642 | true, false, false), | ||
| 1643 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, | ||
| 1644 | true, true, false), | ||
| 1645 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | ||
| 1646 | true, false, false), | ||
| 1647 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | ||
| 1648 | true, false, false), | ||
| 1649 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, | ||
| 1650 | true, false, false), | ||
| 1651 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, | ||
| 1652 | true, false, false), | ||
| 1653 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, | ||
| 1654 | true, false, false), | ||
| 1655 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, | ||
| 1656 | true, false, false), | ||
| 1082 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1657 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
| 1083 | &vmw_cmd_blt_surf_screen_check), | 1658 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
| 1084 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), | 1659 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
| 1085 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | 1660 | false, false, false), |
| 1086 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | 1661 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
| 1087 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | 1662 | false, false, false), |
| 1663 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
| 1664 | false, false, false), | ||
| 1665 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
| 1666 | false, false, false), | ||
| 1667 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, | ||
| 1668 | false, false, false), | ||
| 1669 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, | ||
| 1670 | false, false, false), | ||
| 1671 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, | ||
| 1672 | false, false, false), | ||
| 1673 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, | ||
| 1674 | false, false, false), | ||
| 1675 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, | ||
| 1676 | false, false, false), | ||
| 1677 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, | ||
| 1678 | false, false, false), | ||
| 1679 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, | ||
| 1680 | false, false, false), | ||
| 1681 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, | ||
| 1682 | false, false, false), | ||
| 1683 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, | ||
| 1684 | false, false, false), | ||
| 1685 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, | ||
| 1686 | false, false, true), | ||
| 1687 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, | ||
| 1688 | false, false, true), | ||
| 1689 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, | ||
| 1690 | false, false, true), | ||
| 1691 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, | ||
| 1692 | false, false, true), | ||
| 1693 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, | ||
| 1694 | false, false, true), | ||
| 1695 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, | ||
| 1696 | false, false, true), | ||
| 1697 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1698 | false, false, true), | ||
| 1699 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1700 | false, false, true), | ||
| 1701 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, | ||
| 1702 | true, false, true), | ||
| 1703 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1704 | false, false, true), | ||
| 1705 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, | ||
| 1706 | true, false, true), | ||
| 1707 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, | ||
| 1708 | &vmw_cmd_update_gb_surface, true, false, true), | ||
| 1709 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, | ||
| 1710 | &vmw_cmd_readback_gb_image, true, false, true), | ||
| 1711 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, | ||
| 1712 | &vmw_cmd_readback_gb_surface, true, false, true), | ||
| 1713 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, | ||
| 1714 | &vmw_cmd_invalidate_gb_image, true, false, true), | ||
| 1715 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, | ||
| 1716 | &vmw_cmd_invalidate_gb_surface, true, false, true), | ||
| 1717 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1718 | false, false, true), | ||
| 1719 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1720 | false, false, true), | ||
| 1721 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1722 | false, false, true), | ||
| 1723 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1724 | false, false, true), | ||
| 1725 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1726 | false, false, true), | ||
| 1727 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, | ||
| 1728 | false, false, true), | ||
| 1729 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, | ||
| 1730 | true, false, true), | ||
| 1731 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, | ||
| 1732 | false, false, true), | ||
| 1733 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, | ||
| 1734 | false, false, false), | ||
| 1735 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, | ||
| 1736 | true, false, true), | ||
| 1737 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, | ||
| 1738 | true, false, true), | ||
| 1739 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, | ||
| 1740 | true, false, true), | ||
| 1741 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, | ||
| 1742 | true, false, true), | ||
| 1743 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, | ||
| 1744 | false, false, true), | ||
| 1745 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, | ||
| 1746 | false, false, true), | ||
| 1747 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, | ||
| 1748 | false, false, true), | ||
| 1749 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, | ||
| 1750 | false, false, true), | ||
| 1751 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1752 | false, false, true), | ||
| 1753 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1754 | false, false, true), | ||
| 1755 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1756 | false, false, true), | ||
| 1757 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1758 | false, false, true), | ||
| 1759 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
| 1760 | false, false, true), | ||
| 1761 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
| 1762 | false, false, true), | ||
| 1763 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, | ||
| 1764 | true, false, true) | ||
| 1088 | }; | 1765 | }; |
| 1089 | 1766 | ||
| 1090 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 1767 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
| @@ -1095,6 +1772,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1095 | uint32_t size_remaining = *size; | 1772 | uint32_t size_remaining = *size; |
| 1096 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 1773 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
| 1097 | int ret; | 1774 | int ret; |
| 1775 | const struct vmw_cmd_entry *entry; | ||
| 1776 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; | ||
| 1098 | 1777 | ||
| 1099 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | 1778 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
| 1100 | /* Handle any none 3D commands */ | 1779 | /* Handle any none 3D commands */ |
| @@ -1107,18 +1786,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1107 | 1786 | ||
| 1108 | cmd_id -= SVGA_3D_CMD_BASE; | 1787 | cmd_id -= SVGA_3D_CMD_BASE; |
| 1109 | if (unlikely(*size > size_remaining)) | 1788 | if (unlikely(*size > size_remaining)) |
| 1110 | goto out_err; | 1789 | goto out_invalid; |
| 1111 | 1790 | ||
| 1112 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | 1791 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
| 1113 | goto out_err; | 1792 | goto out_invalid; |
| 1793 | |||
| 1794 | entry = &vmw_cmd_entries[cmd_id]; | ||
| 1795 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | ||
| 1796 | goto out_privileged; | ||
| 1114 | 1797 | ||
| 1115 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); | 1798 | if (unlikely(entry->gb_disable && gb)) |
| 1799 | goto out_old; | ||
| 1800 | |||
| 1801 | if (unlikely(entry->gb_enable && !gb)) | ||
| 1802 | goto out_new; | ||
| 1803 | |||
| 1804 | ret = entry->func(dev_priv, sw_context, header); | ||
| 1116 | if (unlikely(ret != 0)) | 1805 | if (unlikely(ret != 0)) |
| 1117 | goto out_err; | 1806 | goto out_invalid; |
| 1118 | 1807 | ||
| 1119 | return 0; | 1808 | return 0; |
| 1120 | out_err: | 1809 | out_invalid: |
| 1121 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", | 1810 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
| 1811 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 1812 | return -EINVAL; | ||
| 1813 | out_privileged: | ||
| 1814 | DRM_ERROR("Privileged SVGA3D command: %d\n", | ||
| 1815 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 1816 | return -EPERM; | ||
| 1817 | out_old: | ||
| 1818 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", | ||
| 1819 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 1820 | return -EINVAL; | ||
| 1821 | out_new: | ||
| 1822 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", | ||
| 1122 | cmd_id + SVGA_3D_CMD_BASE); | 1823 | cmd_id + SVGA_3D_CMD_BASE); |
| 1123 | return -EINVAL; | 1824 | return -EINVAL; |
| 1124 | } | 1825 | } |
| @@ -1174,6 +1875,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
| 1174 | case VMW_PL_GMR: | 1875 | case VMW_PL_GMR: |
| 1175 | reloc->location->gmrId = bo->mem.start; | 1876 | reloc->location->gmrId = bo->mem.start; |
| 1176 | break; | 1877 | break; |
| 1878 | case VMW_PL_MOB: | ||
| 1879 | *reloc->mob_loc = bo->mem.start; | ||
| 1880 | break; | ||
| 1177 | default: | 1881 | default: |
| 1178 | BUG(); | 1882 | BUG(); |
| 1179 | } | 1883 | } |
| @@ -1198,6 +1902,8 @@ static void vmw_resource_list_unreference(struct list_head *list) | |||
| 1198 | list_for_each_entry_safe(val, val_next, list, head) { | 1902 | list_for_each_entry_safe(val, val_next, list, head) { |
| 1199 | list_del_init(&val->head); | 1903 | list_del_init(&val->head); |
| 1200 | vmw_resource_unreference(&val->res); | 1904 | vmw_resource_unreference(&val->res); |
| 1905 | if (unlikely(val->staged_bindings)) | ||
| 1906 | kfree(val->staged_bindings); | ||
| 1201 | kfree(val); | 1907 | kfree(val); |
| 1202 | } | 1908 | } |
| 1203 | } | 1909 | } |
| @@ -1224,7 +1930,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
| 1224 | } | 1930 | } |
| 1225 | 1931 | ||
| 1226 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 1932 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
| 1227 | struct ttm_buffer_object *bo) | 1933 | struct ttm_buffer_object *bo, |
| 1934 | bool validate_as_mob) | ||
| 1228 | { | 1935 | { |
| 1229 | int ret; | 1936 | int ret; |
| 1230 | 1937 | ||
| @@ -1238,6 +1945,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 1238 | dev_priv->dummy_query_bo_pinned)) | 1945 | dev_priv->dummy_query_bo_pinned)) |
| 1239 | return 0; | 1946 | return 0; |
| 1240 | 1947 | ||
| 1948 | if (validate_as_mob) | ||
| 1949 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); | ||
| 1950 | |||
| 1241 | /** | 1951 | /** |
| 1242 | * Put BO in VRAM if there is space, otherwise as a GMR. | 1952 | * Put BO in VRAM if there is space, otherwise as a GMR. |
| 1243 | * If there is no space in VRAM and GMR ids are all used up, | 1953 | * If there is no space in VRAM and GMR ids are all used up, |
| @@ -1259,7 +1969,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 1259 | return ret; | 1969 | return ret; |
| 1260 | } | 1970 | } |
| 1261 | 1971 | ||
| 1262 | |||
| 1263 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 1972 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
| 1264 | struct vmw_sw_context *sw_context) | 1973 | struct vmw_sw_context *sw_context) |
| 1265 | { | 1974 | { |
| @@ -1267,7 +1976,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, | |||
| 1267 | int ret; | 1976 | int ret; |
| 1268 | 1977 | ||
| 1269 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { | 1978 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
| 1270 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); | 1979 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
| 1980 | entry->validate_as_mob); | ||
| 1271 | if (unlikely(ret != 0)) | 1981 | if (unlikely(ret != 0)) |
| 1272 | return ret; | 1982 | return ret; |
| 1273 | } | 1983 | } |
| @@ -1509,11 +2219,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1509 | goto out_err; | 2219 | goto out_err; |
| 1510 | } | 2220 | } |
| 1511 | 2221 | ||
| 2222 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | ||
| 2223 | if (unlikely(ret != 0)) { | ||
| 2224 | ret = -ERESTARTSYS; | ||
| 2225 | goto out_err; | ||
| 2226 | } | ||
| 2227 | |||
| 1512 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2228 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
| 1513 | if (unlikely(cmd == NULL)) { | 2229 | if (unlikely(cmd == NULL)) { |
| 1514 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2230 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
| 1515 | ret = -ENOMEM; | 2231 | ret = -ENOMEM; |
| 1516 | goto out_err; | 2232 | goto out_unlock_binding; |
| 1517 | } | 2233 | } |
| 1518 | 2234 | ||
| 1519 | vmw_apply_relocations(sw_context); | 2235 | vmw_apply_relocations(sw_context); |
| @@ -1538,6 +2254,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1538 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2254 | DRM_ERROR("Fence submission error. Syncing.\n"); |
| 1539 | 2255 | ||
| 1540 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 2256 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
| 2257 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 2258 | |||
| 1541 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 2259 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
| 1542 | (void *) fence); | 2260 | (void *) fence); |
| 1543 | 2261 | ||
| @@ -1568,6 +2286,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1568 | 2286 | ||
| 1569 | return 0; | 2287 | return 0; |
| 1570 | 2288 | ||
| 2289 | out_unlock_binding: | ||
| 2290 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1571 | out_err: | 2291 | out_err: |
| 1572 | vmw_resource_relocations_free(&sw_context->res_relocations); | 2292 | vmw_resource_relocations_free(&sw_context->res_relocations); |
| 1573 | vmw_free_relocations(sw_context); | 2293 | vmw_free_relocations(sw_context); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c62d20e8a6f1..436b013b4231 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | |||
| 271 | spin_unlock_irq(&fman->lock); | 271 | spin_unlock_irq(&fman->lock); |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | 274 | static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
| 275 | struct list_head *list) | 275 | struct list_head *list) |
| 276 | { | 276 | { |
| 277 | struct vmw_fence_action *action, *next_action; | 277 | struct vmw_fence_action *action, *next_action; |
| @@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | |||
| 897 | * Note that the action callbacks may be executed before this function | 897 | * Note that the action callbacks may be executed before this function |
| 898 | * returns. | 898 | * returns. |
| 899 | */ | 899 | */ |
| 900 | void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | 900 | static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
| 901 | struct vmw_fence_action *action) | 901 | struct vmw_fence_action *action) |
| 902 | { | 902 | { |
| 903 | struct vmw_fence_manager *fman = fence->fman; | 903 | struct vmw_fence_manager *fman = fence->fman; |
| @@ -993,7 +993,7 @@ struct vmw_event_fence_pending { | |||
| 993 | struct drm_vmw_event_fence event; | 993 | struct drm_vmw_event_fence event; |
| 994 | }; | 994 | }; |
| 995 | 995 | ||
| 996 | int vmw_event_fence_action_create(struct drm_file *file_priv, | 996 | static int vmw_event_fence_action_create(struct drm_file *file_priv, |
| 997 | struct vmw_fence_obj *fence, | 997 | struct vmw_fence_obj *fence, |
| 998 | uint32_t flags, | 998 | uint32_t flags, |
| 999 | uint64_t user_data, | 999 | uint64_t user_data, |
| @@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
| 1080 | */ | 1080 | */ |
| 1081 | if (arg->handle) { | 1081 | if (arg->handle) { |
| 1082 | struct ttm_base_object *base = | 1082 | struct ttm_base_object *base = |
| 1083 | ttm_base_object_lookup(vmw_fp->tfile, arg->handle); | 1083 | ttm_base_object_lookup_for_ref(dev_priv->tdev, |
| 1084 | arg->handle); | ||
| 1084 | 1085 | ||
| 1085 | if (unlikely(base == NULL)) { | 1086 | if (unlikely(base == NULL)) { |
| 1086 | DRM_ERROR("Fence event invalid fence object handle " | 1087 | DRM_ERROR("Fence event invalid fence object handle " |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 3eb148667d63..6ccd993e26bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
| 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 37 | 37 | ||
| 38 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) | ||
| 39 | return false; | ||
| 40 | |||
| 41 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 42 | uint32_t result; | ||
| 43 | |||
| 44 | if (!dev_priv->has_mob) | ||
| 45 | return false; | ||
| 46 | |||
| 47 | mutex_lock(&dev_priv->hw_mutex); | ||
| 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | ||
| 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 50 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 51 | |||
| 52 | return (result != 0); | ||
| 53 | } | ||
| 54 | |||
| 38 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | 55 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
| 39 | return false; | 56 | return false; |
| 40 | 57 | ||
| @@ -511,24 +528,16 @@ out_err: | |||
| 511 | } | 528 | } |
| 512 | 529 | ||
| 513 | /** | 530 | /** |
| 514 | * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. | 531 | * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using |
| 532 | * legacy query commands. | ||
| 515 | * | 533 | * |
| 516 | * @dev_priv: The device private structure. | 534 | * @dev_priv: The device private structure. |
| 517 | * @cid: The hardware context id used for the query. | 535 | * @cid: The hardware context id used for the query. |
| 518 | * | 536 | * |
| 519 | * This function is used to emit a dummy occlusion query with | 537 | * See the vmw_fifo_emit_dummy_query documentation. |
| 520 | * no primitives rendered between query begin and query end. | ||
| 521 | * It's used to provide a query barrier, in order to know that when | ||
| 522 | * this query is finished, all preceding queries are also finished. | ||
| 523 | * | ||
| 524 | * A Query results structure should have been initialized at the start | ||
| 525 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
| 526 | * must also be either reserved or pinned when this function is called. | ||
| 527 | * | ||
| 528 | * Returns -ENOMEM on failure to reserve fifo space. | ||
| 529 | */ | 538 | */ |
| 530 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | 539 | static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
| 531 | uint32_t cid) | 540 | uint32_t cid) |
| 532 | { | 541 | { |
| 533 | /* | 542 | /* |
| 534 | * A query wait without a preceding query end will | 543 | * A query wait without a preceding query end will |
| @@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | |||
| 566 | 575 | ||
| 567 | return 0; | 576 | return 0; |
| 568 | } | 577 | } |
| 578 | |||
| 579 | /** | ||
| 580 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
| 581 | * guest-backed resource query commands. | ||
| 582 | * | ||
| 583 | * @dev_priv: The device private structure. | ||
| 584 | * @cid: The hardware context id used for the query. | ||
| 585 | * | ||
| 586 | * See the vmw_fifo_emit_dummy_query documentation. | ||
| 587 | */ | ||
| 588 | static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, | ||
| 589 | uint32_t cid) | ||
| 590 | { | ||
| 591 | /* | ||
| 592 | * A query wait without a preceding query end will | ||
| 593 | * actually finish all queries for this cid | ||
| 594 | * without writing to the query result structure. | ||
| 595 | */ | ||
| 596 | |||
| 597 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
| 598 | struct { | ||
| 599 | SVGA3dCmdHeader header; | ||
| 600 | SVGA3dCmdWaitForGBQuery body; | ||
| 601 | } *cmd; | ||
| 602 | |||
| 603 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 604 | |||
| 605 | if (unlikely(cmd == NULL)) { | ||
| 606 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
| 607 | return -ENOMEM; | ||
| 608 | } | ||
| 609 | |||
| 610 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
| 611 | cmd->header.size = sizeof(cmd->body); | ||
| 612 | cmd->body.cid = cid; | ||
| 613 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | ||
| 614 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 615 | cmd->body.mobid = bo->mem.start; | ||
| 616 | cmd->body.offset = 0; | ||
| 617 | |||
| 618 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 619 | |||
| 620 | return 0; | ||
| 621 | } | ||
| 622 | |||
| 623 | |||
| 624 | /** | ||
| 625 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
| 626 | * appropriate resource query commands. | ||
| 627 | * | ||
| 628 | * @dev_priv: The device private structure. | ||
| 629 | * @cid: The hardware context id used for the query. | ||
| 630 | * | ||
| 631 | * This function is used to emit a dummy occlusion query with | ||
| 632 | * no primitives rendered between query begin and query end. | ||
| 633 | * It's used to provide a query barrier, in order to know that when | ||
| 634 | * this query is finished, all preceding queries are also finished. | ||
| 635 | * | ||
| 636 | * A Query results structure should have been initialized at the start | ||
| 637 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
| 638 | * must also be either reserved or pinned when this function is called. | ||
| 639 | * | ||
| 640 | * Returns -ENOMEM on failure to reserve fifo space. | ||
| 641 | */ | ||
| 642 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
| 643 | uint32_t cid) | ||
| 644 | { | ||
| 645 | if (dev_priv->has_mob) | ||
| 646 | return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); | ||
| 647 | |||
| 648 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | ||
| 649 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6ef0b035becb..61d8d803199f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
| @@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | 127 | ||
| 128 | static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, | ||
| 129 | struct list_head *desc_pages) | ||
| 130 | { | ||
| 131 | struct page *page, *next; | ||
| 132 | struct svga_guest_mem_descriptor *page_virtual; | ||
| 133 | unsigned int desc_per_page = PAGE_SIZE / | ||
| 134 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
| 135 | |||
| 136 | if (list_empty(desc_pages)) | ||
| 137 | return; | ||
| 138 | |||
| 139 | list_for_each_entry_safe(page, next, desc_pages, lru) { | ||
| 140 | list_del_init(&page->lru); | ||
| 141 | |||
| 142 | if (likely(desc_dma != DMA_ADDR_INVALID)) { | ||
| 143 | dma_unmap_page(dev, desc_dma, PAGE_SIZE, | ||
| 144 | DMA_TO_DEVICE); | ||
| 145 | } | ||
| 146 | |||
| 147 | page_virtual = kmap_atomic(page); | ||
| 148 | desc_dma = (dma_addr_t) | ||
| 149 | le32_to_cpu(page_virtual[desc_per_page].ppn) << | ||
| 150 | PAGE_SHIFT; | ||
| 151 | kunmap_atomic(page_virtual); | ||
| 152 | |||
| 153 | __free_page(page); | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | /** | ||
| 158 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | ||
| 159 | * the number of used descriptors. | ||
| 160 | * | ||
| 161 | */ | ||
| 162 | |||
| 163 | static int vmw_gmr_build_descriptors(struct device *dev, | ||
| 164 | struct list_head *desc_pages, | ||
| 165 | struct vmw_piter *iter, | ||
| 166 | unsigned long num_pages, | ||
| 167 | dma_addr_t *first_dma) | ||
| 168 | { | ||
| 169 | struct page *page; | ||
| 170 | struct svga_guest_mem_descriptor *page_virtual = NULL; | ||
| 171 | struct svga_guest_mem_descriptor *desc_virtual = NULL; | ||
| 172 | unsigned int desc_per_page; | ||
| 173 | unsigned long prev_pfn; | ||
| 174 | unsigned long pfn; | ||
| 175 | int ret; | ||
| 176 | dma_addr_t desc_dma; | ||
| 177 | |||
| 178 | desc_per_page = PAGE_SIZE / | ||
| 179 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
| 180 | |||
| 181 | while (likely(num_pages != 0)) { | ||
| 182 | page = alloc_page(__GFP_HIGHMEM); | ||
| 183 | if (unlikely(page == NULL)) { | ||
| 184 | ret = -ENOMEM; | ||
| 185 | goto out_err; | ||
| 186 | } | ||
| 187 | |||
| 188 | list_add_tail(&page->lru, desc_pages); | ||
| 189 | page_virtual = kmap_atomic(page); | ||
| 190 | desc_virtual = page_virtual - 1; | ||
| 191 | prev_pfn = ~(0UL); | ||
| 192 | |||
| 193 | while (likely(num_pages != 0)) { | ||
| 194 | pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; | ||
| 195 | |||
| 196 | if (pfn != prev_pfn + 1) { | ||
| 197 | |||
| 198 | if (desc_virtual - page_virtual == | ||
| 199 | desc_per_page - 1) | ||
| 200 | break; | ||
| 201 | |||
| 202 | (++desc_virtual)->ppn = cpu_to_le32(pfn); | ||
| 203 | desc_virtual->num_pages = cpu_to_le32(1); | ||
| 204 | } else { | ||
| 205 | uint32_t tmp = | ||
| 206 | le32_to_cpu(desc_virtual->num_pages); | ||
| 207 | desc_virtual->num_pages = cpu_to_le32(tmp + 1); | ||
| 208 | } | ||
| 209 | prev_pfn = pfn; | ||
| 210 | --num_pages; | ||
| 211 | vmw_piter_next(iter); | ||
| 212 | } | ||
| 213 | |||
| 214 | (++desc_virtual)->ppn = DMA_PAGE_INVALID; | ||
| 215 | desc_virtual->num_pages = cpu_to_le32(0); | ||
| 216 | kunmap_atomic(page_virtual); | ||
| 217 | } | ||
| 218 | |||
| 219 | desc_dma = 0; | ||
| 220 | list_for_each_entry_reverse(page, desc_pages, lru) { | ||
| 221 | page_virtual = kmap_atomic(page); | ||
| 222 | page_virtual[desc_per_page].ppn = cpu_to_le32 | ||
| 223 | (desc_dma >> PAGE_SHIFT); | ||
| 224 | kunmap_atomic(page_virtual); | ||
| 225 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
| 226 | DMA_TO_DEVICE); | ||
| 227 | |||
| 228 | if (unlikely(dma_mapping_error(dev, desc_dma))) | ||
| 229 | goto out_err; | ||
| 230 | } | ||
| 231 | *first_dma = desc_dma; | ||
| 232 | |||
| 233 | return 0; | ||
| 234 | out_err: | ||
| 235 | vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, | ||
| 240 | int gmr_id, dma_addr_t desc_dma) | ||
| 241 | { | ||
| 242 | mutex_lock(&dev_priv->hw_mutex); | ||
| 243 | |||
| 244 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 245 | wmb(); | ||
| 246 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); | ||
| 247 | mb(); | ||
| 248 | |||
| 249 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 250 | |||
| 251 | } | ||
| 252 | |||
| 253 | int vmw_gmr_bind(struct vmw_private *dev_priv, | 128 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
| 254 | const struct vmw_sg_table *vsgt, | 129 | const struct vmw_sg_table *vsgt, |
| 255 | unsigned long num_pages, | 130 | unsigned long num_pages, |
| 256 | int gmr_id) | 131 | int gmr_id) |
| 257 | { | 132 | { |
| 258 | struct list_head desc_pages; | ||
| 259 | dma_addr_t desc_dma = 0; | ||
| 260 | struct device *dev = dev_priv->dev->dev; | ||
| 261 | struct vmw_piter data_iter; | 133 | struct vmw_piter data_iter; |
| 262 | int ret; | ||
| 263 | 134 | ||
| 264 | vmw_piter_start(&data_iter, vsgt, 0); | 135 | vmw_piter_start(&data_iter, vsgt, 0); |
| 265 | 136 | ||
| 266 | if (unlikely(!vmw_piter_next(&data_iter))) | 137 | if (unlikely(!vmw_piter_next(&data_iter))) |
| 267 | return 0; | 138 | return 0; |
| 268 | 139 | ||
| 269 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) | 140 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) |
| 270 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); | ||
| 271 | |||
| 272 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) | ||
| 273 | return -EINVAL; | ||
| 274 | |||
| 275 | if (vsgt->num_regions > dev_priv->max_gmr_descriptors) | ||
| 276 | return -EINVAL; | 141 | return -EINVAL; |
| 277 | 142 | ||
| 278 | INIT_LIST_HEAD(&desc_pages); | 143 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); |
| 279 | |||
| 280 | ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, | ||
| 281 | num_pages, &desc_dma); | ||
| 282 | if (unlikely(ret != 0)) | ||
| 283 | return ret; | ||
| 284 | |||
| 285 | vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); | ||
| 286 | vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); | ||
| 287 | |||
| 288 | return 0; | ||
| 289 | } | 144 | } |
| 290 | 145 | ||
| 291 | 146 | ||
| 292 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | 147 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
| 293 | { | 148 | { |
| 294 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { | 149 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
| 295 | vmw_gmr2_unbind(dev_priv, gmr_id); | 150 | vmw_gmr2_unbind(dev_priv, gmr_id); |
| 296 | return; | ||
| 297 | } | ||
| 298 | |||
| 299 | mutex_lock(&dev_priv->hw_mutex); | ||
| 300 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 301 | wmb(); | ||
| 302 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); | ||
| 303 | mb(); | ||
| 304 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 305 | } | 151 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c5c054ae9056..b1273e8e9a69 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
| @@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | |||
| 125 | return -ENOMEM; | 125 | return -ENOMEM; |
| 126 | 126 | ||
| 127 | spin_lock_init(&gman->lock); | 127 | spin_lock_init(&gman->lock); |
| 128 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
| 129 | gman->used_gmr_pages = 0; | 128 | gman->used_gmr_pages = 0; |
| 130 | ida_init(&gman->gmr_ida); | 129 | ida_init(&gman->gmr_ida); |
| 131 | gman->max_gmr_ids = p_size; | 130 | |
| 131 | switch (p_size) { | ||
| 132 | case VMW_PL_GMR: | ||
| 133 | gman->max_gmr_ids = dev_priv->max_gmr_ids; | ||
| 134 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
| 135 | break; | ||
| 136 | case VMW_PL_MOB: | ||
| 137 | gman->max_gmr_ids = VMWGFX_NUM_MOB; | ||
| 138 | gman->max_gmr_pages = dev_priv->max_mob_pages; | ||
| 139 | break; | ||
| 140 | default: | ||
| 141 | BUG(); | ||
| 142 | } | ||
| 132 | man->priv = (void *) gman; | 143 | man->priv = (void *) gman; |
| 133 | return 0; | 144 | return 0; |
| 134 | } | 145 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 45d5b5ab6ca9..116c49736763 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -53,7 +53,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 53 | param->value = dev_priv->fifo.capabilities; | 53 | param->value = dev_priv->fifo.capabilities; |
| 54 | break; | 54 | break; |
| 55 | case DRM_VMW_PARAM_MAX_FB_SIZE: | 55 | case DRM_VMW_PARAM_MAX_FB_SIZE: |
| 56 | param->value = dev_priv->vram_size; | 56 | param->value = dev_priv->prim_bb_mem; |
| 57 | break; | 57 | break; |
| 58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: | 58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: |
| 59 | { | 59 | { |
| @@ -71,6 +71,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: |
| 72 | param->value = dev_priv->memory_size; | 72 | param->value = dev_priv->memory_size; |
| 73 | break; | 73 | break; |
| 74 | case DRM_VMW_PARAM_3D_CAPS_SIZE: | ||
| 75 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
| 76 | param->value = SVGA3D_DEVCAP_MAX; | ||
| 77 | else | ||
| 78 | param->value = (SVGA_FIFO_3D_CAPS_LAST - | ||
| 79 | SVGA_FIFO_3D_CAPS + 1); | ||
| 80 | param->value *= sizeof(uint32_t); | ||
| 81 | break; | ||
| 82 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: | ||
| 83 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | ||
| 84 | break; | ||
| 74 | default: | 85 | default: |
| 75 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 86 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| 76 | param->param); | 87 | param->param); |
| @@ -92,13 +103,19 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 92 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); | 103 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); |
| 93 | void *bounce; | 104 | void *bounce; |
| 94 | int ret; | 105 | int ret; |
| 106 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | ||
| 95 | 107 | ||
| 96 | if (unlikely(arg->pad64 != 0)) { | 108 | if (unlikely(arg->pad64 != 0)) { |
| 97 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 109 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
| 98 | return -EINVAL; | 110 | return -EINVAL; |
| 99 | } | 111 | } |
| 100 | 112 | ||
| 101 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; | 113 | if (gb_objects) |
| 114 | size = SVGA3D_DEVCAP_MAX; | ||
| 115 | else | ||
| 116 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); | ||
| 117 | |||
| 118 | size *= sizeof(uint32_t); | ||
| 102 | 119 | ||
| 103 | if (arg->max_size < size) | 120 | if (arg->max_size < size) |
| 104 | size = arg->max_size; | 121 | size = arg->max_size; |
| @@ -109,8 +126,22 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 109 | return -ENOMEM; | 126 | return -ENOMEM; |
| 110 | } | 127 | } |
| 111 | 128 | ||
| 112 | fifo_mem = dev_priv->mmio_virt; | 129 | if (gb_objects) { |
| 113 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 130 | int i; |
| 131 | uint32_t *bounce32 = (uint32_t *) bounce; | ||
| 132 | |||
| 133 | mutex_lock(&dev_priv->hw_mutex); | ||
| 134 | for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { | ||
| 135 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
| 136 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 137 | } | ||
| 138 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 139 | |||
| 140 | } else { | ||
| 141 | |||
| 142 | fifo_mem = dev_priv->mmio_virt; | ||
| 143 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | ||
| 144 | } | ||
| 114 | 145 | ||
| 115 | ret = copy_to_user(buffer, bounce, size); | 146 | ret = copy_to_user(buffer, bounce, size); |
| 116 | if (ret) | 147 | if (ret) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4640adbcaf91..0c423766c441 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | #define VMW_FENCE_WRAP (1 << 24) | 31 | #define VMW_FENCE_WRAP (1 << 24) |
| 32 | 32 | ||
| 33 | irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | 33 | irqreturn_t vmw_irq_handler(int irq, void *arg) |
| 34 | { | 34 | { |
| 35 | struct drm_device *dev = (struct drm_device *)arg; | 35 | struct drm_device *dev = (struct drm_device *)arg; |
| 36 | struct vmw_private *dev_priv = vmw_priv(dev); | 36 | struct vmw_private *dev_priv = vmw_priv(dev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 03f1c2038631..8a650413dea5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -40,7 +40,7 @@ struct vmw_clip_rect { | |||
| 40 | * Clip @num_rects number of @rects against @clip storing the | 40 | * Clip @num_rects number of @rects against @clip storing the |
| 41 | * results in @out_rects and the number of passed rects in @out_num. | 41 | * results in @out_rects and the number of passed rects in @out_num. |
| 42 | */ | 42 | */ |
| 43 | void vmw_clip_cliprects(struct drm_clip_rect *rects, | 43 | static void vmw_clip_cliprects(struct drm_clip_rect *rects, |
| 44 | int num_rects, | 44 | int num_rects, |
| 45 | struct vmw_clip_rect clip, | 45 | struct vmw_clip_rect clip, |
| 46 | SVGASignedRect *out_rects, | 46 | SVGASignedRect *out_rects, |
| @@ -423,7 +423,7 @@ struct vmw_framebuffer_surface { | |||
| 423 | struct drm_master *master; | 423 | struct drm_master *master; |
| 424 | }; | 424 | }; |
| 425 | 425 | ||
| 426 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 426 | static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
| 427 | { | 427 | { |
| 428 | struct vmw_framebuffer_surface *vfbs = | 428 | struct vmw_framebuffer_surface *vfbs = |
| 429 | vmw_framebuffer_to_vfbs(framebuffer); | 429 | vmw_framebuffer_to_vfbs(framebuffer); |
| @@ -589,7 +589,7 @@ out_free_tmp: | |||
| 589 | return ret; | 589 | return ret; |
| 590 | } | 590 | } |
| 591 | 591 | ||
| 592 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 592 | static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
| 593 | struct drm_file *file_priv, | 593 | struct drm_file *file_priv, |
| 594 | unsigned flags, unsigned color, | 594 | unsigned flags, unsigned color, |
| 595 | struct drm_clip_rect *clips, | 595 | struct drm_clip_rect *clips, |
| @@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
| 609 | if (!dev_priv->sou_priv) | 609 | if (!dev_priv->sou_priv) |
| 610 | return -EINVAL; | 610 | return -EINVAL; |
| 611 | 611 | ||
| 612 | drm_modeset_lock_all(dev_priv->dev); | ||
| 613 | |||
| 612 | ret = ttm_read_lock(&vmaster->lock, true); | 614 | ret = ttm_read_lock(&vmaster->lock, true); |
| 613 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) { |
| 616 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 614 | return ret; | 617 | return ret; |
| 618 | } | ||
| 615 | 619 | ||
| 616 | if (!num_clips) { | 620 | if (!num_clips) { |
| 617 | num_clips = 1; | 621 | num_clips = 1; |
| @@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
| 629 | clips, num_clips, inc, NULL); | 633 | clips, num_clips, inc, NULL); |
| 630 | 634 | ||
| 631 | ttm_read_unlock(&vmaster->lock); | 635 | ttm_read_unlock(&vmaster->lock); |
| 636 | |||
| 637 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 638 | |||
| 632 | return 0; | 639 | return 0; |
| 633 | } | 640 | } |
| 634 | 641 | ||
| @@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
| 665 | 672 | ||
| 666 | if (unlikely(surface->mip_levels[0] != 1 || | 673 | if (unlikely(surface->mip_levels[0] != 1 || |
| 667 | surface->num_sizes != 1 || | 674 | surface->num_sizes != 1 || |
| 668 | surface->sizes[0].width < mode_cmd->width || | 675 | surface->base_size.width < mode_cmd->width || |
| 669 | surface->sizes[0].height < mode_cmd->height || | 676 | surface->base_size.height < mode_cmd->height || |
| 670 | surface->sizes[0].depth != 1)) { | 677 | surface->base_size.depth != 1)) { |
| 671 | DRM_ERROR("Incompatible surface dimensions " | 678 | DRM_ERROR("Incompatible surface dimensions " |
| 672 | "for requested mode.\n"); | 679 | "for requested mode.\n"); |
| 673 | return -EINVAL; | 680 | return -EINVAL; |
| @@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf { | |||
| 754 | struct vmw_dma_buffer *buffer; | 761 | struct vmw_dma_buffer *buffer; |
| 755 | }; | 762 | }; |
| 756 | 763 | ||
| 757 | void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | 764 | static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) |
| 758 | { | 765 | { |
| 759 | struct vmw_framebuffer_dmabuf *vfbd = | 766 | struct vmw_framebuffer_dmabuf *vfbd = |
| 760 | vmw_framebuffer_to_vfbd(framebuffer); | 767 | vmw_framebuffer_to_vfbd(framebuffer); |
| @@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv, | |||
| 940 | return ret; | 947 | return ret; |
| 941 | } | 948 | } |
| 942 | 949 | ||
| 943 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 950 | static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
| 944 | struct drm_file *file_priv, | 951 | struct drm_file *file_priv, |
| 945 | unsigned flags, unsigned color, | 952 | unsigned flags, unsigned color, |
| 946 | struct drm_clip_rect *clips, | 953 | struct drm_clip_rect *clips, |
| @@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
| 953 | struct drm_clip_rect norect; | 960 | struct drm_clip_rect norect; |
| 954 | int ret, increment = 1; | 961 | int ret, increment = 1; |
| 955 | 962 | ||
| 963 | drm_modeset_lock_all(dev_priv->dev); | ||
| 964 | |||
| 956 | ret = ttm_read_lock(&vmaster->lock, true); | 965 | ret = ttm_read_lock(&vmaster->lock, true); |
| 957 | if (unlikely(ret != 0)) | 966 | if (unlikely(ret != 0)) { |
| 967 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 958 | return ret; | 968 | return ret; |
| 969 | } | ||
| 959 | 970 | ||
| 960 | if (!num_clips) { | 971 | if (!num_clips) { |
| 961 | num_clips = 1; | 972 | num_clips = 1; |
| @@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
| 979 | } | 990 | } |
| 980 | 991 | ||
| 981 | ttm_read_unlock(&vmaster->lock); | 992 | ttm_read_unlock(&vmaster->lock); |
| 993 | |||
| 994 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 995 | |||
| 982 | return ret; | 996 | return ret; |
| 983 | } | 997 | } |
| 984 | 998 | ||
| @@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | |||
| 1631 | uint32_t pitch, | 1645 | uint32_t pitch, |
| 1632 | uint32_t height) | 1646 | uint32_t height) |
| 1633 | { | 1647 | { |
| 1634 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | 1648 | return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; |
| 1635 | } | 1649 | } |
| 1636 | 1650 | ||
| 1637 | 1651 | ||
| @@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc) | |||
| 1663 | * Small shared kms functions. | 1677 | * Small shared kms functions. |
| 1664 | */ | 1678 | */ |
| 1665 | 1679 | ||
| 1666 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | 1680 | static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, |
| 1667 | struct drm_vmw_rect *rects) | 1681 | struct drm_vmw_rect *rects) |
| 1668 | { | 1682 | { |
| 1669 | struct drm_device *dev = dev_priv->dev; | 1683 | struct drm_device *dev = dev_priv->dev; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c new file mode 100644 index 000000000000..4910e7b81811 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
| @@ -0,0 +1,652 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | |||
| 30 | /* | ||
| 31 | * If we set up the screen target otable, screen objects stop working. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) | ||
| 35 | |||
| 36 | #ifdef CONFIG_64BIT | ||
| 37 | #define VMW_PPN_SIZE 8 | ||
| 38 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 | ||
| 39 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 | ||
| 40 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 | ||
| 41 | #else | ||
| 42 | #define VMW_PPN_SIZE 4 | ||
| 43 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 | ||
| 44 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 | ||
| 45 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 | ||
| 46 | #endif | ||
| 47 | |||
| 48 | /* | ||
| 49 | * struct vmw_mob - Structure containing page table and metadata for a | ||
| 50 | * Guest Memory OBject. | ||
| 51 | * | ||
| 52 | * @num_pages Number of pages that make up the page table. | ||
| 53 | * @pt_level The indirection level of the page table. 0-2. | ||
| 54 | * @pt_root_page DMA address of the level 0 page of the page table. | ||
| 55 | */ | ||
| 56 | struct vmw_mob { | ||
| 57 | struct ttm_buffer_object *pt_bo; | ||
| 58 | unsigned long num_pages; | ||
| 59 | unsigned pt_level; | ||
| 60 | dma_addr_t pt_root_page; | ||
| 61 | uint32_t id; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * struct vmw_otable - Guest Memory OBject table metadata | ||
| 66 | * | ||
| 67 | * @size: Size of the table (page-aligned). | ||
| 68 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
| 69 | */ | ||
| 70 | struct vmw_otable { | ||
| 71 | unsigned long size; | ||
| 72 | struct vmw_mob *page_table; | ||
| 73 | }; | ||
| 74 | |||
| 75 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
| 76 | struct vmw_mob *mob); | ||
| 77 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
| 78 | struct vmw_piter data_iter, | ||
| 79 | unsigned long num_data_pages); | ||
| 80 | |||
| 81 | /* | ||
| 82 | * vmw_setup_otable_base - Issue an object table base setup command to | ||
| 83 | * the device | ||
| 84 | * | ||
| 85 | * @dev_priv: Pointer to a device private structure | ||
| 86 | * @type: Type of object table base | ||
| 87 | * @offset Start of table offset into dev_priv::otable_bo | ||
| 88 | * @otable Pointer to otable metadata; | ||
| 89 | * | ||
| 90 | * This function returns -ENOMEM if it fails to reserve fifo space, | ||
| 91 | * and may block waiting for fifo space. | ||
| 92 | */ | ||
| 93 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||
| 94 | SVGAOTableType type, | ||
| 95 | unsigned long offset, | ||
| 96 | struct vmw_otable *otable) | ||
| 97 | { | ||
| 98 | struct { | ||
| 99 | SVGA3dCmdHeader header; | ||
| 100 | SVGA3dCmdSetOTableBase64 body; | ||
| 101 | } *cmd; | ||
| 102 | struct vmw_mob *mob; | ||
| 103 | const struct vmw_sg_table *vsgt; | ||
| 104 | struct vmw_piter iter; | ||
| 105 | int ret; | ||
| 106 | |||
| 107 | BUG_ON(otable->page_table != NULL); | ||
| 108 | |||
| 109 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | ||
| 110 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||
| 111 | WARN_ON(!vmw_piter_next(&iter)); | ||
| 112 | |||
| 113 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | ||
| 114 | if (unlikely(mob == NULL)) { | ||
| 115 | DRM_ERROR("Failed creating OTable page table.\n"); | ||
| 116 | return -ENOMEM; | ||
| 117 | } | ||
| 118 | |||
| 119 | if (otable->size <= PAGE_SIZE) { | ||
| 120 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
| 121 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
| 122 | } else if (vsgt->num_regions == 1) { | ||
| 123 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
| 124 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
| 125 | } else { | ||
| 126 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
| 127 | if (unlikely(ret != 0)) | ||
| 128 | goto out_no_populate; | ||
| 129 | |||
| 130 | vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); | ||
| 131 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
| 132 | } | ||
| 133 | |||
| 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 135 | if (unlikely(cmd == NULL)) { | ||
| 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
| 137 | goto out_no_fifo; | ||
| 138 | } | ||
| 139 | |||
| 140 | memset(cmd, 0, sizeof(*cmd)); | ||
| 141 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; | ||
| 142 | cmd->header.size = sizeof(cmd->body); | ||
| 143 | cmd->body.type = type; | ||
| 144 | cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); | ||
| 145 | cmd->body.sizeInBytes = otable->size; | ||
| 146 | cmd->body.validSizeInBytes = 0; | ||
| 147 | cmd->body.ptDepth = mob->pt_level; | ||
| 148 | |||
| 149 | /* | ||
| 150 | * The device doesn't support this, But the otable size is | ||
| 151 | * determined at compile-time, so this BUG shouldn't trigger | ||
| 152 | * randomly. | ||
| 153 | */ | ||
| 154 | BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); | ||
| 155 | |||
| 156 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 157 | otable->page_table = mob; | ||
| 158 | |||
| 159 | return 0; | ||
| 160 | |||
| 161 | out_no_fifo: | ||
| 162 | out_no_populate: | ||
| 163 | vmw_mob_destroy(mob); | ||
| 164 | return ret; | ||
| 165 | } | ||
| 166 | |||
| 167 | /* | ||
| 168 | * vmw_takedown_otable_base - Issue an object table base takedown command | ||
| 169 | * to the device | ||
| 170 | * | ||
| 171 | * @dev_priv: Pointer to a device private structure | ||
| 172 | * @type: Type of object table base | ||
| 173 | * | ||
| 174 | */ | ||
| 175 | static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||
| 176 | SVGAOTableType type, | ||
| 177 | struct vmw_otable *otable) | ||
| 178 | { | ||
| 179 | struct { | ||
| 180 | SVGA3dCmdHeader header; | ||
| 181 | SVGA3dCmdSetOTableBase body; | ||
| 182 | } *cmd; | ||
| 183 | struct ttm_buffer_object *bo; | ||
| 184 | |||
| 185 | if (otable->page_table == NULL) | ||
| 186 | return; | ||
| 187 | |||
| 188 | bo = otable->page_table->pt_bo; | ||
| 189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 190 | if (unlikely(cmd == NULL)) | ||
| 191 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
| 192 | |||
| 193 | memset(cmd, 0, sizeof(*cmd)); | ||
| 194 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
| 195 | cmd->header.size = sizeof(cmd->body); | ||
| 196 | cmd->body.type = type; | ||
| 197 | cmd->body.baseAddress = 0; | ||
| 198 | cmd->body.sizeInBytes = 0; | ||
| 199 | cmd->body.validSizeInBytes = 0; | ||
| 200 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | ||
| 201 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 202 | |||
| 203 | if (bo) { | ||
| 204 | int ret; | ||
| 205 | |||
| 206 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 207 | BUG_ON(ret != 0); | ||
| 208 | |||
| 209 | vmw_fence_single_bo(bo, NULL); | ||
| 210 | ttm_bo_unreserve(bo); | ||
| 211 | } | ||
| 212 | |||
| 213 | vmw_mob_destroy(otable->page_table); | ||
| 214 | otable->page_table = NULL; | ||
| 215 | } | ||
| 216 | |||
| 217 | /* | ||
| 218 | * vmw_otables_setup - Set up guest backed memory object tables | ||
| 219 | * | ||
| 220 | * @dev_priv: Pointer to a device private structure | ||
| 221 | * | ||
| 222 | * Takes care of the device guest backed surface | ||
| 223 | * initialization, by setting up the guest backed memory object tables. | ||
| 224 | * Returns 0 on success and various error codes on failure. A succesful return | ||
| 225 | * means the object tables can be taken down using the vmw_otables_takedown | ||
| 226 | * function. | ||
| 227 | */ | ||
| 228 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
| 229 | { | ||
| 230 | unsigned long offset; | ||
| 231 | unsigned long bo_size; | ||
| 232 | struct vmw_otable *otables; | ||
| 233 | SVGAOTableType i; | ||
| 234 | int ret; | ||
| 235 | |||
| 236 | otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), | ||
| 237 | GFP_KERNEL); | ||
| 238 | if (unlikely(otables == NULL)) { | ||
| 239 | DRM_ERROR("Failed to allocate space for otable " | ||
| 240 | "metadata.\n"); | ||
| 241 | return -ENOMEM; | ||
| 242 | } | ||
| 243 | |||
| 244 | otables[SVGA_OTABLE_MOB].size = | ||
| 245 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
| 246 | otables[SVGA_OTABLE_SURFACE].size = | ||
| 247 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
| 248 | otables[SVGA_OTABLE_CONTEXT].size = | ||
| 249 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
| 250 | otables[SVGA_OTABLE_SHADER].size = | ||
| 251 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
| 252 | otables[SVGA_OTABLE_SCREEN_TARGET].size = | ||
| 253 | VMWGFX_NUM_GB_SCREEN_TARGET * | ||
| 254 | SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; | ||
| 255 | |||
| 256 | bo_size = 0; | ||
| 257 | for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | ||
| 258 | otables[i].size = | ||
| 259 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||
| 260 | bo_size += otables[i].size; | ||
| 261 | } | ||
| 262 | |||
| 263 | ret = ttm_bo_create(&dev_priv->bdev, bo_size, | ||
| 264 | ttm_bo_type_device, | ||
| 265 | &vmw_sys_ne_placement, | ||
| 266 | 0, false, NULL, | ||
| 267 | &dev_priv->otable_bo); | ||
| 268 | |||
| 269 | if (unlikely(ret != 0)) | ||
| 270 | goto out_no_bo; | ||
| 271 | |||
| 272 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); | ||
| 273 | BUG_ON(ret != 0); | ||
| 274 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | ||
| 275 | if (unlikely(ret != 0)) | ||
| 276 | goto out_unreserve; | ||
| 277 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | ||
| 278 | if (unlikely(ret != 0)) | ||
| 279 | goto out_unreserve; | ||
| 280 | |||
| 281 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
| 282 | |||
| 283 | offset = 0; | ||
| 284 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | ||
| 285 | ret = vmw_setup_otable_base(dev_priv, i, offset, | ||
| 286 | &otables[i]); | ||
| 287 | if (unlikely(ret != 0)) | ||
| 288 | goto out_no_setup; | ||
| 289 | offset += otables[i].size; | ||
| 290 | } | ||
| 291 | |||
| 292 | dev_priv->otables = otables; | ||
| 293 | return 0; | ||
| 294 | |||
| 295 | out_unreserve: | ||
| 296 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
| 297 | out_no_setup: | ||
| 298 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
| 299 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | ||
| 300 | |||
| 301 | ttm_bo_unref(&dev_priv->otable_bo); | ||
| 302 | out_no_bo: | ||
| 303 | kfree(otables); | ||
| 304 | return ret; | ||
| 305 | } | ||
| 306 | |||
| 307 | |||
| 308 | /* | ||
| 309 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
| 310 | * | ||
| 311 | * @dev_priv: Pointer to a device private structure | ||
| 312 | * | ||
| 313 | * Take down the Guest Memory Object tables. | ||
| 314 | */ | ||
| 315 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
| 316 | { | ||
| 317 | SVGAOTableType i; | ||
| 318 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | ||
| 319 | int ret; | ||
| 320 | |||
| 321 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
| 322 | vmw_takedown_otable_base(dev_priv, i, | ||
| 323 | &dev_priv->otables[i]); | ||
| 324 | |||
| 325 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 326 | BUG_ON(ret != 0); | ||
| 327 | |||
| 328 | vmw_fence_single_bo(bo, NULL); | ||
| 329 | ttm_bo_unreserve(bo); | ||
| 330 | |||
| 331 | ttm_bo_unref(&dev_priv->otable_bo); | ||
| 332 | kfree(dev_priv->otables); | ||
| 333 | dev_priv->otables = NULL; | ||
| 334 | } | ||
| 335 | |||
| 336 | |||
| 337 | /* | ||
| 338 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||
| 339 | * needed for a guest backed memory object. | ||
| 340 | * | ||
| 341 | * @data_pages: Number of data pages in the memory object buffer. | ||
| 342 | */ | ||
| 343 | static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) | ||
| 344 | { | ||
| 345 | unsigned long data_size = data_pages * PAGE_SIZE; | ||
| 346 | unsigned long tot_size = 0; | ||
| 347 | |||
| 348 | while (likely(data_size > PAGE_SIZE)) { | ||
| 349 | data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); | ||
| 350 | data_size *= VMW_PPN_SIZE; | ||
| 351 | tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
| 352 | } | ||
| 353 | |||
| 354 | return tot_size >> PAGE_SHIFT; | ||
| 355 | } | ||
| 356 | |||
| 357 | /* | ||
| 358 | * vmw_mob_create - Create a mob, but don't populate it. | ||
| 359 | * | ||
| 360 | * @data_pages: Number of data pages of the underlying buffer object. | ||
| 361 | */ | ||
| 362 | struct vmw_mob *vmw_mob_create(unsigned long data_pages) | ||
| 363 | { | ||
| 364 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | ||
| 365 | |||
| 366 | if (unlikely(mob == NULL)) | ||
| 367 | return NULL; | ||
| 368 | |||
| 369 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | ||
| 370 | |||
| 371 | return mob; | ||
| 372 | } | ||
| 373 | |||
| 374 | /* | ||
| 375 | * vmw_mob_pt_populate - Populate the mob pagetable | ||
| 376 | * | ||
| 377 | * @mob: Pointer to the mob the pagetable of which we want to | ||
| 378 | * populate. | ||
| 379 | * | ||
| 380 | * This function allocates memory to be used for the pagetable, and | ||
| 381 | * adjusts TTM memory accounting accordingly. Returns ENOMEM if | ||
| 382 | * memory resources aren't sufficient and may cause TTM buffer objects | ||
| 383 | * to be swapped out by using the TTM memory accounting function. | ||
| 384 | */ | ||
| 385 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
| 386 | struct vmw_mob *mob) | ||
| 387 | { | ||
| 388 | int ret; | ||
| 389 | BUG_ON(mob->pt_bo != NULL); | ||
| 390 | |||
| 391 | ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, | ||
| 392 | ttm_bo_type_device, | ||
| 393 | &vmw_sys_ne_placement, | ||
| 394 | 0, false, NULL, &mob->pt_bo); | ||
| 395 | if (unlikely(ret != 0)) | ||
| 396 | return ret; | ||
| 397 | |||
| 398 | ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); | ||
| 399 | |||
| 400 | BUG_ON(ret != 0); | ||
| 401 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | ||
| 402 | if (unlikely(ret != 0)) | ||
| 403 | goto out_unreserve; | ||
| 404 | ret = vmw_bo_map_dma(mob->pt_bo); | ||
| 405 | if (unlikely(ret != 0)) | ||
| 406 | goto out_unreserve; | ||
| 407 | |||
| 408 | ttm_bo_unreserve(mob->pt_bo); | ||
| 409 | |||
| 410 | return 0; | ||
| 411 | |||
| 412 | out_unreserve: | ||
| 413 | ttm_bo_unreserve(mob->pt_bo); | ||
| 414 | ttm_bo_unref(&mob->pt_bo); | ||
| 415 | |||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | /** | ||
| 420 | * vmw_mob_assign_ppn - Assign a value to a page table entry | ||
| 421 | * | ||
| 422 | * @addr: Pointer to pointer to page table entry. | ||
| 423 | * @val: The page table entry | ||
| 424 | * | ||
| 425 | * Assigns a value to a page table entry pointed to by *@addr and increments | ||
| 426 | * *@addr according to the page table entry size. | ||
| 427 | */ | ||
| 428 | #if (VMW_PPN_SIZE == 8) | ||
| 429 | static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) | ||
| 430 | { | ||
| 431 | *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); | ||
| 432 | *addr += 2; | ||
| 433 | } | ||
| 434 | #else | ||
| 435 | static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) | ||
| 436 | { | ||
| 437 | *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); | ||
| 438 | } | ||
| 439 | #endif | ||
| 440 | |||
| 441 | /* | ||
| 442 | * vmw_mob_build_pt - Build a pagetable | ||
| 443 | * | ||
| 444 | * @data_addr: Array of DMA addresses to the underlying buffer | ||
| 445 | * object's data pages. | ||
| 446 | * @num_data_pages: Number of buffer object data pages. | ||
| 447 | * @pt_pages: Array of page pointers to the page table pages. | ||
| 448 | * | ||
| 449 | * Returns the number of page table pages actually used. | ||
| 450 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | ||
| 451 | */ | ||
| 452 | static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, | ||
| 453 | unsigned long num_data_pages, | ||
| 454 | struct vmw_piter *pt_iter) | ||
| 455 | { | ||
| 456 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | ||
| 457 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | ||
| 458 | unsigned long pt_page; | ||
| 459 | __le32 *addr, *save_addr; | ||
| 460 | unsigned long i; | ||
| 461 | struct page *page; | ||
| 462 | |||
| 463 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | ||
| 464 | page = vmw_piter_page(pt_iter); | ||
| 465 | |||
| 466 | save_addr = addr = kmap_atomic(page); | ||
| 467 | |||
| 468 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | ||
| 469 | vmw_mob_assign_ppn(&addr, | ||
| 470 | vmw_piter_dma_addr(data_iter)); | ||
| 471 | if (unlikely(--num_data_pages == 0)) | ||
| 472 | break; | ||
| 473 | WARN_ON(!vmw_piter_next(data_iter)); | ||
| 474 | } | ||
| 475 | kunmap_atomic(save_addr); | ||
| 476 | vmw_piter_next(pt_iter); | ||
| 477 | } | ||
| 478 | |||
| 479 | return num_pt_pages; | ||
| 480 | } | ||
| 481 | |||
| 482 | /* | ||
| 483 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | ||
| 484 | * | ||
| 485 | * @mob: Pointer to a mob whose page table needs setting up. | ||
| 486 | * @data_addr Array of DMA addresses to the buffer object's data | ||
| 487 | * pages. | ||
| 488 | * @num_data_pages: Number of buffer object data pages. | ||
| 489 | * | ||
| 490 | * Uses tail recursion to set up a multilevel mob page table. | ||
| 491 | */ | ||
| 492 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
| 493 | struct vmw_piter data_iter, | ||
| 494 | unsigned long num_data_pages) | ||
| 495 | { | ||
| 496 | unsigned long num_pt_pages = 0; | ||
| 497 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
| 498 | struct vmw_piter save_pt_iter; | ||
| 499 | struct vmw_piter pt_iter; | ||
| 500 | const struct vmw_sg_table *vsgt; | ||
| 501 | int ret; | ||
| 502 | |||
| 503 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 504 | BUG_ON(ret != 0); | ||
| 505 | |||
| 506 | vsgt = vmw_bo_sg_table(bo); | ||
| 507 | vmw_piter_start(&pt_iter, vsgt, 0); | ||
| 508 | BUG_ON(!vmw_piter_next(&pt_iter)); | ||
| 509 | mob->pt_level = 0; | ||
| 510 | while (likely(num_data_pages > 1)) { | ||
| 511 | ++mob->pt_level; | ||
| 512 | BUG_ON(mob->pt_level > 2); | ||
| 513 | save_pt_iter = pt_iter; | ||
| 514 | num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, | ||
| 515 | &pt_iter); | ||
| 516 | data_iter = save_pt_iter; | ||
| 517 | num_data_pages = num_pt_pages; | ||
| 518 | } | ||
| 519 | |||
| 520 | mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); | ||
| 521 | ttm_bo_unreserve(bo); | ||
| 522 | } | ||
| 523 | |||
| 524 | /* | ||
| 525 | * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. | ||
| 526 | * | ||
| 527 | * @mob: Pointer to a mob to destroy. | ||
| 528 | */ | ||
| 529 | void vmw_mob_destroy(struct vmw_mob *mob) | ||
| 530 | { | ||
| 531 | if (mob->pt_bo) | ||
| 532 | ttm_bo_unref(&mob->pt_bo); | ||
| 533 | kfree(mob); | ||
| 534 | } | ||
| 535 | |||
| 536 | /* | ||
| 537 | * vmw_mob_unbind - Hide a mob from the device. | ||
| 538 | * | ||
| 539 | * @dev_priv: Pointer to a device private. | ||
| 540 | * @mob_id: Device id of the mob to unbind. | ||
| 541 | */ | ||
| 542 | void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
| 543 | struct vmw_mob *mob) | ||
| 544 | { | ||
| 545 | struct { | ||
| 546 | SVGA3dCmdHeader header; | ||
| 547 | SVGA3dCmdDestroyGBMob body; | ||
| 548 | } *cmd; | ||
| 549 | int ret; | ||
| 550 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
| 551 | |||
| 552 | if (bo) { | ||
| 553 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 554 | /* | ||
| 555 | * Noone else should be using this buffer. | ||
| 556 | */ | ||
| 557 | BUG_ON(ret != 0); | ||
| 558 | } | ||
| 559 | |||
| 560 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 561 | if (unlikely(cmd == NULL)) { | ||
| 562 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
| 563 | "Object unbinding.\n"); | ||
| 564 | } | ||
| 565 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 566 | cmd->header.size = sizeof(cmd->body); | ||
| 567 | cmd->body.mobid = mob->id; | ||
| 568 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 569 | if (bo) { | ||
| 570 | vmw_fence_single_bo(bo, NULL); | ||
| 571 | ttm_bo_unreserve(bo); | ||
| 572 | } | ||
| 573 | vmw_3d_resource_dec(dev_priv, false); | ||
| 574 | } | ||
| 575 | |||
| 576 | /* | ||
| 577 | * vmw_mob_bind - Make a mob visible to the device after first | ||
| 578 | * populating it if necessary. | ||
| 579 | * | ||
| 580 | * @dev_priv: Pointer to a device private. | ||
| 581 | * @mob: Pointer to the mob we're making visible. | ||
| 582 | * @data_addr: Array of DMA addresses to the data pages of the underlying | ||
| 583 | * buffer object. | ||
| 584 | * @num_data_pages: Number of data pages of the underlying buffer | ||
| 585 | * object. | ||
| 586 | * @mob_id: Device id of the mob to bind | ||
| 587 | * | ||
| 588 | * This function is intended to be interfaced with the ttm_tt backend | ||
| 589 | * code. | ||
| 590 | */ | ||
| 591 | int vmw_mob_bind(struct vmw_private *dev_priv, | ||
| 592 | struct vmw_mob *mob, | ||
| 593 | const struct vmw_sg_table *vsgt, | ||
| 594 | unsigned long num_data_pages, | ||
| 595 | int32_t mob_id) | ||
| 596 | { | ||
| 597 | int ret; | ||
| 598 | bool pt_set_up = false; | ||
| 599 | struct vmw_piter data_iter; | ||
| 600 | struct { | ||
| 601 | SVGA3dCmdHeader header; | ||
| 602 | SVGA3dCmdDefineGBMob64 body; | ||
| 603 | } *cmd; | ||
| 604 | |||
| 605 | mob->id = mob_id; | ||
| 606 | vmw_piter_start(&data_iter, vsgt, 0); | ||
| 607 | if (unlikely(!vmw_piter_next(&data_iter))) | ||
| 608 | return 0; | ||
| 609 | |||
| 610 | if (likely(num_data_pages == 1)) { | ||
| 611 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
| 612 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
| 613 | } else if (vsgt->num_regions == 1) { | ||
| 614 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
| 615 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
| 616 | } else if (unlikely(mob->pt_bo == NULL)) { | ||
| 617 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
| 618 | if (unlikely(ret != 0)) | ||
| 619 | return ret; | ||
| 620 | |||
| 621 | vmw_mob_pt_setup(mob, data_iter, num_data_pages); | ||
| 622 | pt_set_up = true; | ||
| 623 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
| 624 | } | ||
| 625 | |||
| 626 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 627 | |||
| 628 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 629 | if (unlikely(cmd == NULL)) { | ||
| 630 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
| 631 | "Object binding.\n"); | ||
| 632 | goto out_no_cmd_space; | ||
| 633 | } | ||
| 634 | |||
| 635 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; | ||
| 636 | cmd->header.size = sizeof(cmd->body); | ||
| 637 | cmd->body.mobid = mob_id; | ||
| 638 | cmd->body.ptDepth = mob->pt_level; | ||
| 639 | cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); | ||
| 640 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | ||
| 641 | |||
| 642 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 643 | |||
| 644 | return 0; | ||
| 645 | |||
| 646 | out_no_cmd_space: | ||
| 647 | vmw_3d_resource_dec(dev_priv, false); | ||
| 648 | if (pt_set_up) | ||
| 649 | ttm_bo_unref(&mob->pt_bo); | ||
| 650 | |||
| 651 | return -ENOMEM; | ||
| 652 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 9b5ea2ac7ddf..6fdd82d42f65 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -215,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | |||
| 215 | res->func = func; | 215 | res->func = func; |
| 216 | INIT_LIST_HEAD(&res->lru_head); | 216 | INIT_LIST_HEAD(&res->lru_head); |
| 217 | INIT_LIST_HEAD(&res->mob_head); | 217 | INIT_LIST_HEAD(&res->mob_head); |
| 218 | INIT_LIST_HEAD(&res->binding_head); | ||
| 218 | res->id = -1; | 219 | res->id = -1; |
| 219 | res->backup = NULL; | 220 | res->backup = NULL; |
| 220 | res->backup_offset = 0; | 221 | res->backup_offset = 0; |
| @@ -441,6 +442,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |||
| 441 | ttm_bo_unref(&bo); | 442 | ttm_bo_unref(&bo); |
| 442 | } | 443 | } |
| 443 | 444 | ||
| 445 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, | ||
| 446 | enum ttm_ref_type ref_type) | ||
| 447 | { | ||
| 448 | struct vmw_user_dma_buffer *user_bo; | ||
| 449 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); | ||
| 450 | |||
| 451 | switch (ref_type) { | ||
| 452 | case TTM_REF_SYNCCPU_WRITE: | ||
| 453 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
| 454 | break; | ||
| 455 | default: | ||
| 456 | BUG(); | ||
| 457 | } | ||
| 458 | } | ||
| 459 | |||
| 444 | /** | 460 | /** |
| 445 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | 461 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
| 446 | * | 462 | * |
| @@ -471,6 +487,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
| 471 | } | 487 | } |
| 472 | 488 | ||
| 473 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | 489 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
| 490 | (dev_priv->has_mob) ? | ||
| 491 | &vmw_sys_placement : | ||
| 474 | &vmw_vram_sys_placement, true, | 492 | &vmw_vram_sys_placement, true, |
| 475 | &vmw_user_dmabuf_destroy); | 493 | &vmw_user_dmabuf_destroy); |
| 476 | if (unlikely(ret != 0)) | 494 | if (unlikely(ret != 0)) |
| @@ -482,7 +500,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
| 482 | &user_bo->prime, | 500 | &user_bo->prime, |
| 483 | shareable, | 501 | shareable, |
| 484 | ttm_buffer_type, | 502 | ttm_buffer_type, |
| 485 | &vmw_user_dmabuf_release, NULL); | 503 | &vmw_user_dmabuf_release, |
| 504 | &vmw_user_dmabuf_ref_obj_release); | ||
| 486 | if (unlikely(ret != 0)) { | 505 | if (unlikely(ret != 0)) { |
| 487 | ttm_bo_unref(&tmp); | 506 | ttm_bo_unref(&tmp); |
| 488 | goto out_no_base_object; | 507 | goto out_no_base_object; |
| @@ -515,6 +534,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | |||
| 515 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; | 534 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; |
| 516 | } | 535 | } |
| 517 | 536 | ||
| 537 | /** | ||
| 538 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu | ||
| 539 | * access, idling previous GPU operations on the buffer and optionally | ||
| 540 | * blocking it for further command submissions. | ||
| 541 | * | ||
| 542 | * @user_bo: Pointer to the buffer object being grabbed for CPU access | ||
| 543 | * @tfile: Identifying the caller. | ||
| 544 | * @flags: Flags indicating how the grab should be performed. | ||
| 545 | * | ||
| 546 | * A blocking grab will be automatically released when @tfile is closed. | ||
| 547 | */ | ||
| 548 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | ||
| 549 | struct ttm_object_file *tfile, | ||
| 550 | uint32_t flags) | ||
| 551 | { | ||
| 552 | struct ttm_buffer_object *bo = &user_bo->dma.base; | ||
| 553 | bool existed; | ||
| 554 | int ret; | ||
| 555 | |||
| 556 | if (flags & drm_vmw_synccpu_allow_cs) { | ||
| 557 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 558 | |||
| 559 | spin_lock(&bdev->fence_lock); | ||
| 560 | ret = ttm_bo_wait(bo, false, true, | ||
| 561 | !!(flags & drm_vmw_synccpu_dontblock)); | ||
| 562 | spin_unlock(&bdev->fence_lock); | ||
| 563 | return ret; | ||
| 564 | } | ||
| 565 | |||
| 566 | ret = ttm_bo_synccpu_write_grab | ||
| 567 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); | ||
| 568 | if (unlikely(ret != 0)) | ||
| 569 | return ret; | ||
| 570 | |||
| 571 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, | ||
| 572 | TTM_REF_SYNCCPU_WRITE, &existed); | ||
| 573 | if (ret != 0 || existed) | ||
| 574 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
| 575 | |||
| 576 | return ret; | ||
| 577 | } | ||
| 578 | |||
| 579 | /** | ||
| 580 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, | ||
| 581 | * and unblock command submission on the buffer if blocked. | ||
| 582 | * | ||
| 583 | * @handle: Handle identifying the buffer object. | ||
| 584 | * @tfile: Identifying the caller. | ||
| 585 | * @flags: Flags indicating the type of release. | ||
| 586 | */ | ||
| 587 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, | ||
| 588 | struct ttm_object_file *tfile, | ||
| 589 | uint32_t flags) | ||
| 590 | { | ||
| 591 | if (!(flags & drm_vmw_synccpu_allow_cs)) | ||
| 592 | return ttm_ref_object_base_unref(tfile, handle, | ||
| 593 | TTM_REF_SYNCCPU_WRITE); | ||
| 594 | |||
| 595 | return 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | /** | ||
| 599 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu | ||
| 600 | * functionality. | ||
| 601 | * | ||
| 602 | * @dev: Identifies the drm device. | ||
| 603 | * @data: Pointer to the ioctl argument. | ||
| 604 | * @file_priv: Identifies the caller. | ||
| 605 | * | ||
| 606 | * This function checks the ioctl arguments for validity and calls the | ||
| 607 | * relevant synccpu functions. | ||
| 608 | */ | ||
| 609 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
| 610 | struct drm_file *file_priv) | ||
| 611 | { | ||
| 612 | struct drm_vmw_synccpu_arg *arg = | ||
| 613 | (struct drm_vmw_synccpu_arg *) data; | ||
| 614 | struct vmw_dma_buffer *dma_buf; | ||
| 615 | struct vmw_user_dma_buffer *user_bo; | ||
| 616 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 617 | int ret; | ||
| 618 | |||
| 619 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | ||
| 620 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | | ||
| 621 | drm_vmw_synccpu_dontblock | | ||
| 622 | drm_vmw_synccpu_allow_cs)) != 0) { | ||
| 623 | DRM_ERROR("Illegal synccpu flags.\n"); | ||
| 624 | return -EINVAL; | ||
| 625 | } | ||
| 626 | |||
| 627 | switch (arg->op) { | ||
| 628 | case drm_vmw_synccpu_grab: | ||
| 629 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); | ||
| 630 | if (unlikely(ret != 0)) | ||
| 631 | return ret; | ||
| 632 | |||
| 633 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, | ||
| 634 | dma); | ||
| 635 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); | ||
| 636 | vmw_dmabuf_unreference(&dma_buf); | ||
| 637 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && | ||
| 638 | ret != -EBUSY)) { | ||
| 639 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", | ||
| 640 | (unsigned int) arg->handle); | ||
| 641 | return ret; | ||
| 642 | } | ||
| 643 | break; | ||
| 644 | case drm_vmw_synccpu_release: | ||
| 645 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, | ||
| 646 | arg->flags); | ||
| 647 | if (unlikely(ret != 0)) { | ||
| 648 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", | ||
| 649 | (unsigned int) arg->handle); | ||
| 650 | return ret; | ||
| 651 | } | ||
| 652 | break; | ||
| 653 | default: | ||
| 654 | DRM_ERROR("Invalid synccpu operation.\n"); | ||
| 655 | return -EINVAL; | ||
| 656 | } | ||
| 657 | |||
| 658 | return 0; | ||
| 659 | } | ||
| 660 | |||
| 518 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 661 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 519 | struct drm_file *file_priv) | 662 | struct drm_file *file_priv) |
| 520 | { | 663 | { |
| @@ -591,7 +734,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
| 591 | } | 734 | } |
| 592 | 735 | ||
| 593 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 736 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
| 594 | struct vmw_dma_buffer *dma_buf) | 737 | struct vmw_dma_buffer *dma_buf, |
| 738 | uint32_t *handle) | ||
| 595 | { | 739 | { |
| 596 | struct vmw_user_dma_buffer *user_bo; | 740 | struct vmw_user_dma_buffer *user_bo; |
| 597 | 741 | ||
| @@ -599,6 +743,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | |||
| 599 | return -EINVAL; | 743 | return -EINVAL; |
| 600 | 744 | ||
| 601 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | 745 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
| 746 | |||
| 747 | *handle = user_bo->prime.base.hash.key; | ||
| 602 | return ttm_ref_object_add(tfile, &user_bo->prime.base, | 748 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 603 | TTM_REF_USAGE, NULL); | 749 | TTM_REF_USAGE, NULL); |
| 604 | } | 750 | } |
| @@ -1291,11 +1437,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
| 1291 | * @mem: The truct ttm_mem_reg indicating to what memory | 1437 | * @mem: The truct ttm_mem_reg indicating to what memory |
| 1292 | * region the move is taking place. | 1438 | * region the move is taking place. |
| 1293 | * | 1439 | * |
| 1294 | * For now does nothing. | 1440 | * Evicts the Guest Backed hardware resource if the backup |
| 1441 | * buffer is being moved out of MOB memory. | ||
| 1442 | * Note that this function should not race with the resource | ||
| 1443 | * validation code as long as it accesses only members of struct | ||
| 1444 | * resource that remain static while bo::res is !NULL and | ||
| 1445 | * while we have @bo reserved. struct resource::backup is *not* a | ||
| 1446 | * static member. The resource validation code will take care | ||
| 1447 | * to set @bo::res to NULL, while having @bo reserved when the | ||
| 1448 | * buffer is no longer bound to the resource, so @bo:res can be | ||
| 1449 | * used to determine whether there is a need to unbind and whether | ||
| 1450 | * it is safe to unbind. | ||
| 1295 | */ | 1451 | */ |
| 1296 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | 1452 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
| 1297 | struct ttm_mem_reg *mem) | 1453 | struct ttm_mem_reg *mem) |
| 1298 | { | 1454 | { |
| 1455 | struct vmw_dma_buffer *dma_buf; | ||
| 1456 | |||
| 1457 | if (mem == NULL) | ||
| 1458 | return; | ||
| 1459 | |||
| 1460 | if (bo->destroy != vmw_dmabuf_bo_free && | ||
| 1461 | bo->destroy != vmw_user_dmabuf_destroy) | ||
| 1462 | return; | ||
| 1463 | |||
| 1464 | dma_buf = container_of(bo, struct vmw_dma_buffer, base); | ||
| 1465 | |||
| 1466 | if (mem->mem_type != VMW_PL_MOB) { | ||
| 1467 | struct vmw_resource *res, *n; | ||
| 1468 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 1469 | struct ttm_validate_buffer val_buf; | ||
| 1470 | |||
| 1471 | val_buf.bo = bo; | ||
| 1472 | |||
| 1473 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { | ||
| 1474 | |||
| 1475 | if (unlikely(res->func->unbind == NULL)) | ||
| 1476 | continue; | ||
| 1477 | |||
| 1478 | (void) res->func->unbind(res, true, &val_buf); | ||
| 1479 | res->backup_dirty = true; | ||
| 1480 | res->res_dirty = false; | ||
| 1481 | list_del_init(&res->mob_head); | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | spin_lock(&bdev->fence_lock); | ||
| 1485 | (void) ttm_bo_wait(bo, false, false, false); | ||
| 1486 | spin_unlock(&bdev->fence_lock); | ||
| 1487 | } | ||
| 1299 | } | 1488 | } |
| 1300 | 1489 | ||
| 1301 | /** | 1490 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c new file mode 100644 index 000000000000..1457ec4b7125 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
| @@ -0,0 +1,441 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "vmwgfx_resource_priv.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | |||
| 32 | struct vmw_shader { | ||
| 33 | struct vmw_resource res; | ||
| 34 | SVGA3dShaderType type; | ||
| 35 | uint32_t size; | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct vmw_user_shader { | ||
| 39 | struct ttm_base_object base; | ||
| 40 | struct vmw_shader shader; | ||
| 41 | }; | ||
| 42 | |||
| 43 | static void vmw_user_shader_free(struct vmw_resource *res); | ||
| 44 | static struct vmw_resource * | ||
| 45 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | ||
| 46 | |||
| 47 | static int vmw_gb_shader_create(struct vmw_resource *res); | ||
| 48 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
| 49 | struct ttm_validate_buffer *val_buf); | ||
| 50 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
| 51 | bool readback, | ||
| 52 | struct ttm_validate_buffer *val_buf); | ||
| 53 | static int vmw_gb_shader_destroy(struct vmw_resource *res); | ||
| 54 | |||
| 55 | static uint64_t vmw_user_shader_size; | ||
| 56 | |||
| 57 | static const struct vmw_user_resource_conv user_shader_conv = { | ||
| 58 | .object_type = VMW_RES_SHADER, | ||
| 59 | .base_obj_to_res = vmw_user_shader_base_to_res, | ||
| 60 | .res_free = vmw_user_shader_free | ||
| 61 | }; | ||
| 62 | |||
| 63 | const struct vmw_user_resource_conv *user_shader_converter = | ||
| 64 | &user_shader_conv; | ||
| 65 | |||
| 66 | |||
| 67 | static const struct vmw_res_func vmw_gb_shader_func = { | ||
| 68 | .res_type = vmw_res_shader, | ||
| 69 | .needs_backup = true, | ||
| 70 | .may_evict = true, | ||
| 71 | .type_name = "guest backed shaders", | ||
| 72 | .backup_placement = &vmw_mob_placement, | ||
| 73 | .create = vmw_gb_shader_create, | ||
| 74 | .destroy = vmw_gb_shader_destroy, | ||
| 75 | .bind = vmw_gb_shader_bind, | ||
| 76 | .unbind = vmw_gb_shader_unbind | ||
| 77 | }; | ||
| 78 | |||
| 79 | /** | ||
| 80 | * Shader management: | ||
| 81 | */ | ||
| 82 | |||
| 83 | static inline struct vmw_shader * | ||
| 84 | vmw_res_to_shader(struct vmw_resource *res) | ||
| 85 | { | ||
| 86 | return container_of(res, struct vmw_shader, res); | ||
| 87 | } | ||
| 88 | |||
| 89 | static void vmw_hw_shader_destroy(struct vmw_resource *res) | ||
| 90 | { | ||
| 91 | (void) vmw_gb_shader_destroy(res); | ||
| 92 | } | ||
| 93 | |||
| 94 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||
| 95 | struct vmw_resource *res, | ||
| 96 | uint32_t size, | ||
| 97 | uint64_t offset, | ||
| 98 | SVGA3dShaderType type, | ||
| 99 | struct vmw_dma_buffer *byte_code, | ||
| 100 | void (*res_free) (struct vmw_resource *res)) | ||
| 101 | { | ||
| 102 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
| 103 | int ret; | ||
| 104 | |||
| 105 | ret = vmw_resource_init(dev_priv, res, true, | ||
| 106 | res_free, &vmw_gb_shader_func); | ||
| 107 | |||
| 108 | |||
| 109 | if (unlikely(ret != 0)) { | ||
| 110 | if (res_free) | ||
| 111 | res_free(res); | ||
| 112 | else | ||
| 113 | kfree(res); | ||
| 114 | return ret; | ||
| 115 | } | ||
| 116 | |||
| 117 | res->backup_size = size; | ||
| 118 | if (byte_code) { | ||
| 119 | res->backup = vmw_dmabuf_reference(byte_code); | ||
| 120 | res->backup_offset = offset; | ||
| 121 | } | ||
| 122 | shader->size = size; | ||
| 123 | shader->type = type; | ||
| 124 | |||
| 125 | vmw_resource_activate(res, vmw_hw_shader_destroy); | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | static int vmw_gb_shader_create(struct vmw_resource *res) | ||
| 130 | { | ||
| 131 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 132 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
| 133 | int ret; | ||
| 134 | struct { | ||
| 135 | SVGA3dCmdHeader header; | ||
| 136 | SVGA3dCmdDefineGBShader body; | ||
| 137 | } *cmd; | ||
| 138 | |||
| 139 | if (likely(res->id != -1)) | ||
| 140 | return 0; | ||
| 141 | |||
| 142 | ret = vmw_resource_alloc_id(res); | ||
| 143 | if (unlikely(ret != 0)) { | ||
| 144 | DRM_ERROR("Failed to allocate a shader id.\n"); | ||
| 145 | goto out_no_id; | ||
| 146 | } | ||
| 147 | |||
| 148 | if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { | ||
| 149 | ret = -EBUSY; | ||
| 150 | goto out_no_fifo; | ||
| 151 | } | ||
| 152 | |||
| 153 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 154 | if (unlikely(cmd == NULL)) { | ||
| 155 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 156 | "creation.\n"); | ||
| 157 | ret = -ENOMEM; | ||
| 158 | goto out_no_fifo; | ||
| 159 | } | ||
| 160 | |||
| 161 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; | ||
| 162 | cmd->header.size = sizeof(cmd->body); | ||
| 163 | cmd->body.shid = res->id; | ||
| 164 | cmd->body.type = shader->type; | ||
| 165 | cmd->body.sizeInBytes = shader->size; | ||
| 166 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 167 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 168 | |||
| 169 | return 0; | ||
| 170 | |||
| 171 | out_no_fifo: | ||
| 172 | vmw_resource_release_id(res); | ||
| 173 | out_no_id: | ||
| 174 | return ret; | ||
| 175 | } | ||
| 176 | |||
| 177 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
| 178 | struct ttm_validate_buffer *val_buf) | ||
| 179 | { | ||
| 180 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 181 | struct { | ||
| 182 | SVGA3dCmdHeader header; | ||
| 183 | SVGA3dCmdBindGBShader body; | ||
| 184 | } *cmd; | ||
| 185 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 186 | |||
| 187 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 188 | |||
| 189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 190 | if (unlikely(cmd == NULL)) { | ||
| 191 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 192 | "binding.\n"); | ||
| 193 | return -ENOMEM; | ||
| 194 | } | ||
| 195 | |||
| 196 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
| 197 | cmd->header.size = sizeof(cmd->body); | ||
| 198 | cmd->body.shid = res->id; | ||
| 199 | cmd->body.mobid = bo->mem.start; | ||
| 200 | cmd->body.offsetInBytes = 0; | ||
| 201 | res->backup_dirty = false; | ||
| 202 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
| 208 | bool readback, | ||
| 209 | struct ttm_validate_buffer *val_buf) | ||
| 210 | { | ||
| 211 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 212 | struct { | ||
| 213 | SVGA3dCmdHeader header; | ||
| 214 | SVGA3dCmdBindGBShader body; | ||
| 215 | } *cmd; | ||
| 216 | struct vmw_fence_obj *fence; | ||
| 217 | |||
| 218 | BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||
| 219 | |||
| 220 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 221 | if (unlikely(cmd == NULL)) { | ||
| 222 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 223 | "unbinding.\n"); | ||
| 224 | return -ENOMEM; | ||
| 225 | } | ||
| 226 | |||
| 227 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
| 228 | cmd->header.size = sizeof(cmd->body); | ||
| 229 | cmd->body.shid = res->id; | ||
| 230 | cmd->body.mobid = SVGA3D_INVALID_ID; | ||
| 231 | cmd->body.offsetInBytes = 0; | ||
| 232 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 233 | |||
| 234 | /* | ||
| 235 | * Create a fence object and fence the backup buffer. | ||
| 236 | */ | ||
| 237 | |||
| 238 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 239 | &fence, NULL); | ||
| 240 | |||
| 241 | vmw_fence_single_bo(val_buf->bo, fence); | ||
| 242 | |||
| 243 | if (likely(fence != NULL)) | ||
| 244 | vmw_fence_obj_unreference(&fence); | ||
| 245 | |||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||
| 250 | { | ||
| 251 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 252 | struct { | ||
| 253 | SVGA3dCmdHeader header; | ||
| 254 | SVGA3dCmdDestroyGBShader body; | ||
| 255 | } *cmd; | ||
| 256 | |||
| 257 | if (likely(res->id == -1)) | ||
| 258 | return 0; | ||
| 259 | |||
| 260 | mutex_lock(&dev_priv->binding_mutex); | ||
| 261 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
| 262 | |||
| 263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 264 | if (unlikely(cmd == NULL)) { | ||
| 265 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 266 | "destruction.\n"); | ||
| 267 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 268 | return -ENOMEM; | ||
| 269 | } | ||
| 270 | |||
| 271 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; | ||
| 272 | cmd->header.size = sizeof(cmd->body); | ||
| 273 | cmd->body.shid = res->id; | ||
| 274 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 275 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 276 | vmw_resource_release_id(res); | ||
| 277 | vmw_3d_resource_dec(dev_priv, false); | ||
| 278 | |||
| 279 | return 0; | ||
| 280 | } | ||
| 281 | |||
| 282 | /** | ||
| 283 | * User-space shader management: | ||
| 284 | */ | ||
| 285 | |||
| 286 | static struct vmw_resource * | ||
| 287 | vmw_user_shader_base_to_res(struct ttm_base_object *base) | ||
| 288 | { | ||
| 289 | return &(container_of(base, struct vmw_user_shader, base)-> | ||
| 290 | shader.res); | ||
| 291 | } | ||
| 292 | |||
| 293 | static void vmw_user_shader_free(struct vmw_resource *res) | ||
| 294 | { | ||
| 295 | struct vmw_user_shader *ushader = | ||
| 296 | container_of(res, struct vmw_user_shader, shader.res); | ||
| 297 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 298 | |||
| 299 | ttm_base_object_kfree(ushader, base); | ||
| 300 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 301 | vmw_user_shader_size); | ||
| 302 | } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * This function is called when user space has no more references on the | ||
| 306 | * base object. It releases the base-object's reference on the resource object. | ||
| 307 | */ | ||
| 308 | |||
| 309 | static void vmw_user_shader_base_release(struct ttm_base_object **p_base) | ||
| 310 | { | ||
| 311 | struct ttm_base_object *base = *p_base; | ||
| 312 | struct vmw_resource *res = vmw_user_shader_base_to_res(base); | ||
| 313 | |||
| 314 | *p_base = NULL; | ||
| 315 | vmw_resource_unreference(&res); | ||
| 316 | } | ||
| 317 | |||
| 318 | int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 319 | struct drm_file *file_priv) | ||
| 320 | { | ||
| 321 | struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; | ||
| 322 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 323 | |||
| 324 | return ttm_ref_object_base_unref(tfile, arg->handle, | ||
| 325 | TTM_REF_USAGE); | ||
| 326 | } | ||
| 327 | |||
| 328 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
| 329 | struct drm_file *file_priv) | ||
| 330 | { | ||
| 331 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 332 | struct vmw_user_shader *ushader; | ||
| 333 | struct vmw_resource *res; | ||
| 334 | struct vmw_resource *tmp; | ||
| 335 | struct drm_vmw_shader_create_arg *arg = | ||
| 336 | (struct drm_vmw_shader_create_arg *)data; | ||
| 337 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 338 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 339 | struct vmw_dma_buffer *buffer = NULL; | ||
| 340 | SVGA3dShaderType shader_type; | ||
| 341 | int ret; | ||
| 342 | |||
| 343 | if (arg->buffer_handle != SVGA3D_INVALID_ID) { | ||
| 344 | ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | ||
| 345 | &buffer); | ||
| 346 | if (unlikely(ret != 0)) { | ||
| 347 | DRM_ERROR("Could not find buffer for shader " | ||
| 348 | "creation.\n"); | ||
| 349 | return ret; | ||
| 350 | } | ||
| 351 | |||
| 352 | if ((u64)buffer->base.num_pages * PAGE_SIZE < | ||
| 353 | (u64)arg->size + (u64)arg->offset) { | ||
| 354 | DRM_ERROR("Illegal buffer- or shader size.\n"); | ||
| 355 | ret = -EINVAL; | ||
| 356 | goto out_bad_arg; | ||
| 357 | } | ||
| 358 | } | ||
| 359 | |||
| 360 | switch (arg->shader_type) { | ||
| 361 | case drm_vmw_shader_type_vs: | ||
| 362 | shader_type = SVGA3D_SHADERTYPE_VS; | ||
| 363 | break; | ||
| 364 | case drm_vmw_shader_type_ps: | ||
| 365 | shader_type = SVGA3D_SHADERTYPE_PS; | ||
| 366 | break; | ||
| 367 | case drm_vmw_shader_type_gs: | ||
| 368 | shader_type = SVGA3D_SHADERTYPE_GS; | ||
| 369 | break; | ||
| 370 | default: | ||
| 371 | DRM_ERROR("Illegal shader type.\n"); | ||
| 372 | ret = -EINVAL; | ||
| 373 | goto out_bad_arg; | ||
| 374 | } | ||
| 375 | |||
| 376 | /* | ||
| 377 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
| 378 | * by maximum number_of shaders anyway. | ||
| 379 | */ | ||
| 380 | |||
| 381 | if (unlikely(vmw_user_shader_size == 0)) | ||
| 382 | vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) | ||
| 383 | + 128; | ||
| 384 | |||
| 385 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 386 | if (unlikely(ret != 0)) | ||
| 387 | return ret; | ||
| 388 | |||
| 389 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 390 | vmw_user_shader_size, | ||
| 391 | false, true); | ||
| 392 | if (unlikely(ret != 0)) { | ||
| 393 | if (ret != -ERESTARTSYS) | ||
| 394 | DRM_ERROR("Out of graphics memory for shader" | ||
| 395 | " creation.\n"); | ||
| 396 | goto out_unlock; | ||
| 397 | } | ||
| 398 | |||
| 399 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
| 400 | if (unlikely(ushader == NULL)) { | ||
| 401 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 402 | vmw_user_shader_size); | ||
| 403 | ret = -ENOMEM; | ||
| 404 | goto out_unlock; | ||
| 405 | } | ||
| 406 | |||
| 407 | res = &ushader->shader.res; | ||
| 408 | ushader->base.shareable = false; | ||
| 409 | ushader->base.tfile = NULL; | ||
| 410 | |||
| 411 | /* | ||
| 412 | * From here on, the destructor takes over resource freeing. | ||
| 413 | */ | ||
| 414 | |||
| 415 | ret = vmw_gb_shader_init(dev_priv, res, arg->size, | ||
| 416 | arg->offset, shader_type, buffer, | ||
| 417 | vmw_user_shader_free); | ||
| 418 | if (unlikely(ret != 0)) | ||
| 419 | goto out_unlock; | ||
| 420 | |||
| 421 | tmp = vmw_resource_reference(res); | ||
| 422 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
| 423 | VMW_RES_SHADER, | ||
| 424 | &vmw_user_shader_base_release, NULL); | ||
| 425 | |||
| 426 | if (unlikely(ret != 0)) { | ||
| 427 | vmw_resource_unreference(&tmp); | ||
| 428 | goto out_err; | ||
| 429 | } | ||
| 430 | |||
| 431 | arg->shader_handle = ushader->base.hash.key; | ||
| 432 | out_err: | ||
| 433 | vmw_resource_unreference(&res); | ||
| 434 | out_unlock: | ||
| 435 | ttm_read_unlock(&vmaster->lock); | ||
| 436 | out_bad_arg: | ||
| 437 | vmw_dmabuf_unreference(&buffer); | ||
| 438 | |||
| 439 | return ret; | ||
| 440 | |||
| 441 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7de2ea8bd553..979da1c246a5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -41,7 +41,6 @@ struct vmw_user_surface { | |||
| 41 | struct ttm_prime_object prime; | 41 | struct ttm_prime_object prime; |
| 42 | struct vmw_surface srf; | 42 | struct vmw_surface srf; |
| 43 | uint32_t size; | 43 | uint32_t size; |
| 44 | uint32_t backup_handle; | ||
| 45 | }; | 44 | }; |
| 46 | 45 | ||
| 47 | /** | 46 | /** |
| @@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |||
| 68 | struct ttm_validate_buffer *val_buf); | 67 | struct ttm_validate_buffer *val_buf); |
| 69 | static int vmw_legacy_srf_create(struct vmw_resource *res); | 68 | static int vmw_legacy_srf_create(struct vmw_resource *res); |
| 70 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | 69 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); |
| 70 | static int vmw_gb_surface_create(struct vmw_resource *res); | ||
| 71 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
| 72 | struct ttm_validate_buffer *val_buf); | ||
| 73 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
| 74 | bool readback, | ||
| 75 | struct ttm_validate_buffer *val_buf); | ||
| 76 | static int vmw_gb_surface_destroy(struct vmw_resource *res); | ||
| 77 | |||
| 71 | 78 | ||
| 72 | static const struct vmw_user_resource_conv user_surface_conv = { | 79 | static const struct vmw_user_resource_conv user_surface_conv = { |
| 73 | .object_type = VMW_RES_SURFACE, | 80 | .object_type = VMW_RES_SURFACE, |
| @@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = { | |||
| 93 | .unbind = &vmw_legacy_srf_unbind | 100 | .unbind = &vmw_legacy_srf_unbind |
| 94 | }; | 101 | }; |
| 95 | 102 | ||
| 103 | static const struct vmw_res_func vmw_gb_surface_func = { | ||
| 104 | .res_type = vmw_res_surface, | ||
| 105 | .needs_backup = true, | ||
| 106 | .may_evict = true, | ||
| 107 | .type_name = "guest backed surfaces", | ||
| 108 | .backup_placement = &vmw_mob_placement, | ||
| 109 | .create = vmw_gb_surface_create, | ||
| 110 | .destroy = vmw_gb_surface_destroy, | ||
| 111 | .bind = vmw_gb_surface_bind, | ||
| 112 | .unbind = vmw_gb_surface_unbind | ||
| 113 | }; | ||
| 114 | |||
| 96 | /** | 115 | /** |
| 97 | * struct vmw_surface_dma - SVGA3D DMA command | 116 | * struct vmw_surface_dma - SVGA3D DMA command |
| 98 | */ | 117 | */ |
| @@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
| 291 | struct vmw_surface *srf; | 310 | struct vmw_surface *srf; |
| 292 | void *cmd; | 311 | void *cmd; |
| 293 | 312 | ||
| 313 | if (res->func->destroy == vmw_gb_surface_destroy) { | ||
| 314 | (void) vmw_gb_surface_destroy(res); | ||
| 315 | return; | ||
| 316 | } | ||
| 317 | |||
| 294 | if (res->id != -1) { | 318 | if (res->id != -1) { |
| 295 | 319 | ||
| 296 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 320 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
| @@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
| 549 | struct vmw_resource *res = &srf->res; | 573 | struct vmw_resource *res = &srf->res; |
| 550 | 574 | ||
| 551 | BUG_ON(res_free == NULL); | 575 | BUG_ON(res_free == NULL); |
| 552 | (void) vmw_3d_resource_inc(dev_priv, false); | 576 | if (!dev_priv->has_mob) |
| 577 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 553 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 578 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
| 579 | (dev_priv->has_mob) ? &vmw_gb_surface_func : | ||
| 554 | &vmw_legacy_surface_func); | 580 | &vmw_legacy_surface_func); |
| 555 | 581 | ||
| 556 | if (unlikely(ret != 0)) { | 582 | if (unlikely(ret != 0)) { |
| 557 | vmw_3d_resource_dec(dev_priv, false); | 583 | if (!dev_priv->has_mob) |
| 584 | vmw_3d_resource_dec(dev_priv, false); | ||
| 558 | res_free(res); | 585 | res_free(res); |
| 559 | return ret; | 586 | return ret; |
| 560 | } | 587 | } |
| @@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 750 | 777 | ||
| 751 | srf->base_size = *srf->sizes; | 778 | srf->base_size = *srf->sizes; |
| 752 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 779 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
| 753 | srf->multisample_count = 1; | 780 | srf->multisample_count = 0; |
| 754 | 781 | ||
| 755 | cur_bo_offset = 0; | 782 | cur_bo_offset = 0; |
| 756 | cur_offset = srf->offsets; | 783 | cur_offset = srf->offsets; |
| @@ -843,6 +870,7 @@ out_unlock: | |||
| 843 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | 870 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 844 | struct drm_file *file_priv) | 871 | struct drm_file *file_priv) |
| 845 | { | 872 | { |
| 873 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 846 | union drm_vmw_surface_reference_arg *arg = | 874 | union drm_vmw_surface_reference_arg *arg = |
| 847 | (union drm_vmw_surface_reference_arg *)data; | 875 | (union drm_vmw_surface_reference_arg *)data; |
| 848 | struct drm_vmw_surface_arg *req = &arg->req; | 876 | struct drm_vmw_surface_arg *req = &arg->req; |
| @@ -854,7 +882,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 854 | struct ttm_base_object *base; | 882 | struct ttm_base_object *base; |
| 855 | int ret = -EINVAL; | 883 | int ret = -EINVAL; |
| 856 | 884 | ||
| 857 | base = ttm_base_object_lookup(tfile, req->sid); | 885 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); |
| 858 | if (unlikely(base == NULL)) { | 886 | if (unlikely(base == NULL)) { |
| 859 | DRM_ERROR("Could not find surface to reference.\n"); | 887 | DRM_ERROR("Could not find surface to reference.\n"); |
| 860 | return -EINVAL; | 888 | return -EINVAL; |
| @@ -893,3 +921,436 @@ out_no_reference: | |||
| 893 | 921 | ||
| 894 | return ret; | 922 | return ret; |
| 895 | } | 923 | } |
| 924 | |||
| 925 | /** | ||
| 926 | * vmw_surface_define_encode - Encode a surface_define command. | ||
| 927 | * | ||
| 928 | * @srf: Pointer to a struct vmw_surface object. | ||
| 929 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
| 930 | */ | ||
| 931 | static int vmw_gb_surface_create(struct vmw_resource *res) | ||
| 932 | { | ||
| 933 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 934 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
| 935 | uint32_t cmd_len, submit_len; | ||
| 936 | int ret; | ||
| 937 | struct { | ||
| 938 | SVGA3dCmdHeader header; | ||
| 939 | SVGA3dCmdDefineGBSurface body; | ||
| 940 | } *cmd; | ||
| 941 | |||
| 942 | if (likely(res->id != -1)) | ||
| 943 | return 0; | ||
| 944 | |||
| 945 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 946 | ret = vmw_resource_alloc_id(res); | ||
| 947 | if (unlikely(ret != 0)) { | ||
| 948 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
| 949 | goto out_no_id; | ||
| 950 | } | ||
| 951 | |||
| 952 | if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { | ||
| 953 | ret = -EBUSY; | ||
| 954 | goto out_no_fifo; | ||
| 955 | } | ||
| 956 | |||
| 957 | cmd_len = sizeof(cmd->body); | ||
| 958 | submit_len = sizeof(*cmd); | ||
| 959 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | ||
| 960 | if (unlikely(cmd == NULL)) { | ||
| 961 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 962 | "creation.\n"); | ||
| 963 | ret = -ENOMEM; | ||
| 964 | goto out_no_fifo; | ||
| 965 | } | ||
| 966 | |||
| 967 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||
| 968 | cmd->header.size = cmd_len; | ||
| 969 | cmd->body.sid = srf->res.id; | ||
| 970 | cmd->body.surfaceFlags = srf->flags; | ||
| 971 | cmd->body.format = cpu_to_le32(srf->format); | ||
| 972 | cmd->body.numMipLevels = srf->mip_levels[0]; | ||
| 973 | cmd->body.multisampleCount = srf->multisample_count; | ||
| 974 | cmd->body.autogenFilter = srf->autogen_filter; | ||
| 975 | cmd->body.size.width = srf->base_size.width; | ||
| 976 | cmd->body.size.height = srf->base_size.height; | ||
| 977 | cmd->body.size.depth = srf->base_size.depth; | ||
| 978 | vmw_fifo_commit(dev_priv, submit_len); | ||
| 979 | |||
| 980 | return 0; | ||
| 981 | |||
| 982 | out_no_fifo: | ||
| 983 | vmw_resource_release_id(res); | ||
| 984 | out_no_id: | ||
| 985 | vmw_3d_resource_dec(dev_priv, false); | ||
| 986 | return ret; | ||
| 987 | } | ||
| 988 | |||
| 989 | |||
| 990 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
| 991 | struct ttm_validate_buffer *val_buf) | ||
| 992 | { | ||
| 993 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 994 | struct { | ||
| 995 | SVGA3dCmdHeader header; | ||
| 996 | SVGA3dCmdBindGBSurface body; | ||
| 997 | } *cmd1; | ||
| 998 | struct { | ||
| 999 | SVGA3dCmdHeader header; | ||
| 1000 | SVGA3dCmdUpdateGBSurface body; | ||
| 1001 | } *cmd2; | ||
| 1002 | uint32_t submit_size; | ||
| 1003 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 1004 | |||
| 1005 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 1006 | |||
| 1007 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | ||
| 1008 | |||
| 1009 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 1010 | if (unlikely(cmd1 == NULL)) { | ||
| 1011 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1012 | "binding.\n"); | ||
| 1013 | return -ENOMEM; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
| 1017 | cmd1->header.size = sizeof(cmd1->body); | ||
| 1018 | cmd1->body.sid = res->id; | ||
| 1019 | cmd1->body.mobid = bo->mem.start; | ||
| 1020 | if (res->backup_dirty) { | ||
| 1021 | cmd2 = (void *) &cmd1[1]; | ||
| 1022 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; | ||
| 1023 | cmd2->header.size = sizeof(cmd2->body); | ||
| 1024 | cmd2->body.sid = res->id; | ||
| 1025 | res->backup_dirty = false; | ||
| 1026 | } | ||
| 1027 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 1028 | |||
| 1029 | return 0; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
| 1033 | bool readback, | ||
| 1034 | struct ttm_validate_buffer *val_buf) | ||
| 1035 | { | ||
| 1036 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1037 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 1038 | struct vmw_fence_obj *fence; | ||
| 1039 | |||
| 1040 | struct { | ||
| 1041 | SVGA3dCmdHeader header; | ||
| 1042 | SVGA3dCmdReadbackGBSurface body; | ||
| 1043 | } *cmd1; | ||
| 1044 | struct { | ||
| 1045 | SVGA3dCmdHeader header; | ||
| 1046 | SVGA3dCmdInvalidateGBSurface body; | ||
| 1047 | } *cmd2; | ||
| 1048 | struct { | ||
| 1049 | SVGA3dCmdHeader header; | ||
| 1050 | SVGA3dCmdBindGBSurface body; | ||
| 1051 | } *cmd3; | ||
| 1052 | uint32_t submit_size; | ||
| 1053 | uint8_t *cmd; | ||
| 1054 | |||
| 1055 | |||
| 1056 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 1057 | |||
| 1058 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); | ||
| 1059 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 1060 | if (unlikely(cmd == NULL)) { | ||
| 1061 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1062 | "unbinding.\n"); | ||
| 1063 | return -ENOMEM; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | if (readback) { | ||
| 1067 | cmd1 = (void *) cmd; | ||
| 1068 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; | ||
| 1069 | cmd1->header.size = sizeof(cmd1->body); | ||
| 1070 | cmd1->body.sid = res->id; | ||
| 1071 | cmd3 = (void *) &cmd1[1]; | ||
| 1072 | } else { | ||
| 1073 | cmd2 = (void *) cmd; | ||
| 1074 | cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; | ||
| 1075 | cmd2->header.size = sizeof(cmd2->body); | ||
| 1076 | cmd2->body.sid = res->id; | ||
| 1077 | cmd3 = (void *) &cmd2[1]; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
| 1081 | cmd3->header.size = sizeof(cmd3->body); | ||
| 1082 | cmd3->body.sid = res->id; | ||
| 1083 | cmd3->body.mobid = SVGA3D_INVALID_ID; | ||
| 1084 | |||
| 1085 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 1086 | |||
| 1087 | /* | ||
| 1088 | * Create a fence object and fence the backup buffer. | ||
| 1089 | */ | ||
| 1090 | |||
| 1091 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 1092 | &fence, NULL); | ||
| 1093 | |||
| 1094 | vmw_fence_single_bo(val_buf->bo, fence); | ||
| 1095 | |||
| 1096 | if (likely(fence != NULL)) | ||
| 1097 | vmw_fence_obj_unreference(&fence); | ||
| 1098 | |||
| 1099 | return 0; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||
| 1103 | { | ||
| 1104 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1105 | struct { | ||
| 1106 | SVGA3dCmdHeader header; | ||
| 1107 | SVGA3dCmdDestroyGBSurface body; | ||
| 1108 | } *cmd; | ||
| 1109 | |||
| 1110 | if (likely(res->id == -1)) | ||
| 1111 | return 0; | ||
| 1112 | |||
| 1113 | mutex_lock(&dev_priv->binding_mutex); | ||
| 1114 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
| 1115 | |||
| 1116 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 1117 | if (unlikely(cmd == NULL)) { | ||
| 1118 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1119 | "destruction.\n"); | ||
| 1120 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1121 | return -ENOMEM; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; | ||
| 1125 | cmd->header.size = sizeof(cmd->body); | ||
| 1126 | cmd->body.sid = res->id; | ||
| 1127 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 1128 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1129 | vmw_resource_release_id(res); | ||
| 1130 | vmw_3d_resource_dec(dev_priv, false); | ||
| 1131 | |||
| 1132 | return 0; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | /** | ||
| 1136 | * vmw_gb_surface_define_ioctl - Ioctl function implementing | ||
| 1137 | * the user surface define functionality. | ||
| 1138 | * | ||
| 1139 | * @dev: Pointer to a struct drm_device. | ||
| 1140 | * @data: Pointer to data copied from / to user-space. | ||
| 1141 | * @file_priv: Pointer to a drm file private structure. | ||
| 1142 | */ | ||
| 1143 | int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 1144 | struct drm_file *file_priv) | ||
| 1145 | { | ||
| 1146 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1147 | struct vmw_user_surface *user_srf; | ||
| 1148 | struct vmw_surface *srf; | ||
| 1149 | struct vmw_resource *res; | ||
| 1150 | struct vmw_resource *tmp; | ||
| 1151 | union drm_vmw_gb_surface_create_arg *arg = | ||
| 1152 | (union drm_vmw_gb_surface_create_arg *)data; | ||
| 1153 | struct drm_vmw_gb_surface_create_req *req = &arg->req; | ||
| 1154 | struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; | ||
| 1155 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1156 | int ret; | ||
| 1157 | uint32_t size; | ||
| 1158 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 1159 | const struct svga3d_surface_desc *desc; | ||
| 1160 | uint32_t backup_handle; | ||
| 1161 | |||
| 1162 | if (unlikely(vmw_user_surface_size == 0)) | ||
| 1163 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
| 1164 | 128; | ||
| 1165 | |||
| 1166 | size = vmw_user_surface_size + 128; | ||
| 1167 | |||
| 1168 | desc = svga3dsurface_get_desc(req->format); | ||
| 1169 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | ||
| 1170 | DRM_ERROR("Invalid surface format for surface creation.\n"); | ||
| 1171 | return -EINVAL; | ||
| 1172 | } | ||
| 1173 | |||
| 1174 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 1175 | if (unlikely(ret != 0)) | ||
| 1176 | return ret; | ||
| 1177 | |||
| 1178 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 1179 | size, false, true); | ||
| 1180 | if (unlikely(ret != 0)) { | ||
| 1181 | if (ret != -ERESTARTSYS) | ||
| 1182 | DRM_ERROR("Out of graphics memory for surface" | ||
| 1183 | " creation.\n"); | ||
| 1184 | goto out_unlock; | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
| 1188 | if (unlikely(user_srf == NULL)) { | ||
| 1189 | ret = -ENOMEM; | ||
| 1190 | goto out_no_user_srf; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | srf = &user_srf->srf; | ||
| 1194 | res = &srf->res; | ||
| 1195 | |||
| 1196 | srf->flags = req->svga3d_flags; | ||
| 1197 | srf->format = req->format; | ||
| 1198 | srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; | ||
| 1199 | srf->mip_levels[0] = req->mip_levels; | ||
| 1200 | srf->num_sizes = 1; | ||
| 1201 | srf->sizes = NULL; | ||
| 1202 | srf->offsets = NULL; | ||
| 1203 | user_srf->size = size; | ||
| 1204 | srf->base_size = req->base_size; | ||
| 1205 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
| 1206 | srf->multisample_count = req->multisample_count; | ||
| 1207 | res->backup_size = svga3dsurface_get_serialized_size | ||
| 1208 | (srf->format, srf->base_size, srf->mip_levels[0], | ||
| 1209 | srf->flags & SVGA3D_SURFACE_CUBEMAP); | ||
| 1210 | |||
| 1211 | user_srf->prime.base.shareable = false; | ||
| 1212 | user_srf->prime.base.tfile = NULL; | ||
| 1213 | |||
| 1214 | /** | ||
| 1215 | * From this point, the generic resource management functions | ||
| 1216 | * destroy the object on failure. | ||
| 1217 | */ | ||
| 1218 | |||
| 1219 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
| 1220 | if (unlikely(ret != 0)) | ||
| 1221 | goto out_unlock; | ||
| 1222 | |||
| 1223 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | ||
| 1224 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | ||
| 1225 | &res->backup); | ||
| 1226 | } else if (req->drm_surface_flags & | ||
| 1227 | drm_vmw_surface_flag_create_buffer) | ||
| 1228 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 1229 | res->backup_size, | ||
| 1230 | req->drm_surface_flags & | ||
| 1231 | drm_vmw_surface_flag_shareable, | ||
| 1232 | &backup_handle, | ||
| 1233 | &res->backup); | ||
| 1234 | |||
| 1235 | if (unlikely(ret != 0)) { | ||
| 1236 | vmw_resource_unreference(&res); | ||
| 1237 | goto out_unlock; | ||
| 1238 | } | ||
| 1239 | |||
| 1240 | tmp = vmw_resource_reference(&srf->res); | ||
| 1241 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | ||
| 1242 | req->drm_surface_flags & | ||
| 1243 | drm_vmw_surface_flag_shareable, | ||
| 1244 | VMW_RES_SURFACE, | ||
| 1245 | &vmw_user_surface_base_release, NULL); | ||
| 1246 | |||
| 1247 | if (unlikely(ret != 0)) { | ||
| 1248 | vmw_resource_unreference(&tmp); | ||
| 1249 | vmw_resource_unreference(&res); | ||
| 1250 | goto out_unlock; | ||
| 1251 | } | ||
| 1252 | |||
| 1253 | rep->handle = user_srf->prime.base.hash.key; | ||
| 1254 | rep->backup_size = res->backup_size; | ||
| 1255 | if (res->backup) { | ||
| 1256 | rep->buffer_map_handle = | ||
| 1257 | drm_vma_node_offset_addr(&res->backup->base.vma_node); | ||
| 1258 | rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; | ||
| 1259 | rep->buffer_handle = backup_handle; | ||
| 1260 | } else { | ||
| 1261 | rep->buffer_map_handle = 0; | ||
| 1262 | rep->buffer_size = 0; | ||
| 1263 | rep->buffer_handle = SVGA3D_INVALID_ID; | ||
| 1264 | } | ||
| 1265 | |||
| 1266 | vmw_resource_unreference(&res); | ||
| 1267 | |||
| 1268 | ttm_read_unlock(&vmaster->lock); | ||
| 1269 | return 0; | ||
| 1270 | out_no_user_srf: | ||
| 1271 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
| 1272 | out_unlock: | ||
| 1273 | ttm_read_unlock(&vmaster->lock); | ||
| 1274 | return ret; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | /** | ||
| 1278 | * vmw_gb_surface_reference_ioctl - Ioctl function implementing | ||
| 1279 | * the user surface reference functionality. | ||
| 1280 | * | ||
| 1281 | * @dev: Pointer to a struct drm_device. | ||
| 1282 | * @data: Pointer to data copied from / to user-space. | ||
| 1283 | * @file_priv: Pointer to a drm file private structure. | ||
| 1284 | */ | ||
| 1285 | int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 1286 | struct drm_file *file_priv) | ||
| 1287 | { | ||
| 1288 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1289 | union drm_vmw_gb_surface_reference_arg *arg = | ||
| 1290 | (union drm_vmw_gb_surface_reference_arg *)data; | ||
| 1291 | struct drm_vmw_surface_arg *req = &arg->req; | ||
| 1292 | struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; | ||
| 1293 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1294 | struct vmw_surface *srf; | ||
| 1295 | struct vmw_user_surface *user_srf; | ||
| 1296 | struct ttm_base_object *base; | ||
| 1297 | uint32_t backup_handle; | ||
| 1298 | int ret = -EINVAL; | ||
| 1299 | |||
| 1300 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | ||
| 1301 | if (unlikely(base == NULL)) { | ||
| 1302 | DRM_ERROR("Could not find surface to reference.\n"); | ||
| 1303 | return -EINVAL; | ||
| 1304 | } | ||
| 1305 | |||
| 1306 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | ||
| 1307 | goto out_bad_resource; | ||
| 1308 | |||
| 1309 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | ||
| 1310 | srf = &user_srf->srf; | ||
| 1311 | if (srf->res.backup == NULL) { | ||
| 1312 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | ||
| 1313 | goto out_bad_resource; | ||
| 1314 | } | ||
| 1315 | |||
| 1316 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
| 1317 | TTM_REF_USAGE, NULL); | ||
| 1318 | if (unlikely(ret != 0)) { | ||
| 1319 | DRM_ERROR("Could not add a reference to a GB surface.\n"); | ||
| 1320 | goto out_bad_resource; | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ | ||
| 1324 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, | ||
| 1325 | &backup_handle); | ||
| 1326 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 1327 | |||
| 1328 | if (unlikely(ret != 0)) { | ||
| 1329 | DRM_ERROR("Could not add a reference to a GB surface " | ||
| 1330 | "backup buffer.\n"); | ||
| 1331 | (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
| 1332 | req->sid, | ||
| 1333 | TTM_REF_USAGE); | ||
| 1334 | goto out_bad_resource; | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | rep->creq.svga3d_flags = srf->flags; | ||
| 1338 | rep->creq.format = srf->format; | ||
| 1339 | rep->creq.mip_levels = srf->mip_levels[0]; | ||
| 1340 | rep->creq.drm_surface_flags = 0; | ||
| 1341 | rep->creq.multisample_count = srf->multisample_count; | ||
| 1342 | rep->creq.autogen_filter = srf->autogen_filter; | ||
| 1343 | rep->creq.buffer_handle = backup_handle; | ||
| 1344 | rep->creq.base_size = srf->base_size; | ||
| 1345 | rep->crep.handle = user_srf->prime.base.hash.key; | ||
| 1346 | rep->crep.backup_size = srf->res.backup_size; | ||
| 1347 | rep->crep.buffer_handle = backup_handle; | ||
| 1348 | rep->crep.buffer_map_handle = | ||
| 1349 | drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); | ||
| 1350 | rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; | ||
| 1351 | |||
| 1352 | out_bad_resource: | ||
| 1353 | ttm_base_object_unref(&base); | ||
| 1354 | |||
| 1355 | return ret; | ||
| 1356 | } | ||
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig index 7d6bed222542..b2fd029d67b3 100644 --- a/drivers/gpu/host1x/Kconfig +++ b/drivers/gpu/host1x/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config TEGRA_HOST1X | 1 | config TEGRA_HOST1X |
| 2 | tristate "NVIDIA Tegra host1x driver" | 2 | tristate "NVIDIA Tegra host1x driver" |
| 3 | depends on ARCH_TEGRA || ARCH_MULTIPLATFORM | 3 | depends on ARCH_TEGRA || (ARM && COMPILE_TEST) |
| 4 | help | 4 | help |
| 5 | Driver for the NVIDIA Tegra host1x hardware. | 5 | Driver for the NVIDIA Tegra host1x hardware. |
| 6 | 6 | ||
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile index afa1e9e4e512..c1189f004441 100644 --- a/drivers/gpu/host1x/Makefile +++ b/drivers/gpu/host1x/Makefile | |||
| @@ -7,7 +7,9 @@ host1x-y = \ | |||
| 7 | channel.o \ | 7 | channel.o \ |
| 8 | job.o \ | 8 | job.o \ |
| 9 | debug.o \ | 9 | debug.o \ |
| 10 | mipi.o \ | ||
| 10 | hw/host1x01.o \ | 11 | hw/host1x01.o \ |
| 11 | hw/host1x02.o | 12 | hw/host1x02.o \ |
| 13 | hw/host1x04.o | ||
| 12 | 14 | ||
| 13 | obj-$(CONFIG_TEGRA_HOST1X) += host1x.o | 15 | obj-$(CONFIG_TEGRA_HOST1X) += host1x.o |
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 6a929591aa73..ccdd2e6da5e3 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c | |||
| @@ -188,6 +188,7 @@ int host1x_device_init(struct host1x_device *device) | |||
| 188 | 188 | ||
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
| 191 | EXPORT_SYMBOL(host1x_device_init); | ||
| 191 | 192 | ||
| 192 | int host1x_device_exit(struct host1x_device *device) | 193 | int host1x_device_exit(struct host1x_device *device) |
| 193 | { | 194 | { |
| @@ -213,6 +214,7 @@ int host1x_device_exit(struct host1x_device *device) | |||
| 213 | 214 | ||
| 214 | return 0; | 215 | return 0; |
| 215 | } | 216 | } |
| 217 | EXPORT_SYMBOL(host1x_device_exit); | ||
| 216 | 218 | ||
| 217 | static int host1x_register_client(struct host1x *host1x, | 219 | static int host1x_register_client(struct host1x *host1x, |
| 218 | struct host1x_client *client) | 220 | struct host1x_client *client) |
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 83ea51b9f0fc..b4ae3affb987 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c | |||
| @@ -43,6 +43,7 @@ int host1x_job_submit(struct host1x_job *job) | |||
| 43 | 43 | ||
| 44 | return host1x_hw_channel_submit(host, job); | 44 | return host1x_hw_channel_submit(host, job); |
| 45 | } | 45 | } |
| 46 | EXPORT_SYMBOL(host1x_job_submit); | ||
| 46 | 47 | ||
| 47 | struct host1x_channel *host1x_channel_get(struct host1x_channel *channel) | 48 | struct host1x_channel *host1x_channel_get(struct host1x_channel *channel) |
| 48 | { | 49 | { |
| @@ -60,6 +61,7 @@ struct host1x_channel *host1x_channel_get(struct host1x_channel *channel) | |||
| 60 | 61 | ||
| 61 | return err ? NULL : channel; | 62 | return err ? NULL : channel; |
| 62 | } | 63 | } |
| 64 | EXPORT_SYMBOL(host1x_channel_get); | ||
| 63 | 65 | ||
| 64 | void host1x_channel_put(struct host1x_channel *channel) | 66 | void host1x_channel_put(struct host1x_channel *channel) |
| 65 | { | 67 | { |
| @@ -76,6 +78,7 @@ void host1x_channel_put(struct host1x_channel *channel) | |||
| 76 | 78 | ||
| 77 | mutex_unlock(&channel->reflock); | 79 | mutex_unlock(&channel->reflock); |
| 78 | } | 80 | } |
| 81 | EXPORT_SYMBOL(host1x_channel_put); | ||
| 79 | 82 | ||
| 80 | struct host1x_channel *host1x_channel_request(struct device *dev) | 83 | struct host1x_channel *host1x_channel_request(struct device *dev) |
| 81 | { | 84 | { |
| @@ -115,6 +118,7 @@ fail: | |||
| 115 | mutex_unlock(&host->chlist_mutex); | 118 | mutex_unlock(&host->chlist_mutex); |
| 116 | return NULL; | 119 | return NULL; |
| 117 | } | 120 | } |
| 121 | EXPORT_SYMBOL(host1x_channel_request); | ||
| 118 | 122 | ||
| 119 | void host1x_channel_free(struct host1x_channel *channel) | 123 | void host1x_channel_free(struct host1x_channel *channel) |
| 120 | { | 124 | { |
| @@ -124,3 +128,4 @@ void host1x_channel_free(struct host1x_channel *channel) | |||
| 124 | list_del(&channel->list); | 128 | list_del(&channel->list); |
| 125 | kfree(channel); | 129 | kfree(channel); |
| 126 | } | 130 | } |
| 131 | EXPORT_SYMBOL(host1x_channel_free); | ||
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 3ec7d77de24d..ee3d12b51c50 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c | |||
| @@ -96,7 +96,6 @@ static void show_all(struct host1x *m, struct output *o) | |||
| 96 | show_channels(ch, o, true); | 96 | show_channels(ch, o, true); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | #ifdef CONFIG_DEBUG_FS | ||
| 100 | static void show_all_no_fifo(struct host1x *host1x, struct output *o) | 99 | static void show_all_no_fifo(struct host1x *host1x, struct output *o) |
| 101 | { | 100 | { |
| 102 | struct host1x_channel *ch; | 101 | struct host1x_channel *ch; |
| @@ -153,7 +152,7 @@ static const struct file_operations host1x_debug_fops = { | |||
| 153 | .release = single_release, | 152 | .release = single_release, |
| 154 | }; | 153 | }; |
| 155 | 154 | ||
| 156 | void host1x_debug_init(struct host1x *host1x) | 155 | static void host1x_debugfs_init(struct host1x *host1x) |
| 157 | { | 156 | { |
| 158 | struct dentry *de = debugfs_create_dir("tegra-host1x", NULL); | 157 | struct dentry *de = debugfs_create_dir("tegra-host1x", NULL); |
| 159 | 158 | ||
| @@ -180,18 +179,22 @@ void host1x_debug_init(struct host1x *host1x) | |||
| 180 | &host1x_debug_force_timeout_channel); | 179 | &host1x_debug_force_timeout_channel); |
| 181 | } | 180 | } |
| 182 | 181 | ||
| 183 | void host1x_debug_deinit(struct host1x *host1x) | 182 | static void host1x_debugfs_exit(struct host1x *host1x) |
| 184 | { | 183 | { |
| 185 | debugfs_remove_recursive(host1x->debugfs); | 184 | debugfs_remove_recursive(host1x->debugfs); |
| 186 | } | 185 | } |
| 187 | #else | 186 | |
| 188 | void host1x_debug_init(struct host1x *host1x) | 187 | void host1x_debug_init(struct host1x *host1x) |
| 189 | { | 188 | { |
| 189 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
| 190 | host1x_debugfs_init(host1x); | ||
| 190 | } | 191 | } |
| 192 | |||
| 191 | void host1x_debug_deinit(struct host1x *host1x) | 193 | void host1x_debug_deinit(struct host1x *host1x) |
| 192 | { | 194 | { |
| 195 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
| 196 | host1x_debugfs_exit(host1x); | ||
| 193 | } | 197 | } |
| 194 | #endif | ||
| 195 | 198 | ||
| 196 | void host1x_debug_dump(struct host1x *host1x) | 199 | void host1x_debug_dump(struct host1x *host1x) |
| 197 | { | 200 | { |
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 80da003d63de..2529908d304b 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include "debug.h" | 34 | #include "debug.h" |
| 35 | #include "hw/host1x01.h" | 35 | #include "hw/host1x01.h" |
| 36 | #include "hw/host1x02.h" | 36 | #include "hw/host1x02.h" |
| 37 | #include "hw/host1x04.h" | ||
| 37 | 38 | ||
| 38 | void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) | 39 | void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) |
| 39 | { | 40 | { |
| @@ -77,7 +78,17 @@ static const struct host1x_info host1x02_info = { | |||
| 77 | .sync_offset = 0x3000, | 78 | .sync_offset = 0x3000, |
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| 81 | static const struct host1x_info host1x04_info = { | ||
| 82 | .nb_channels = 12, | ||
| 83 | .nb_pts = 192, | ||
| 84 | .nb_mlocks = 16, | ||
| 85 | .nb_bases = 64, | ||
| 86 | .init = host1x04_init, | ||
| 87 | .sync_offset = 0x2100, | ||
| 88 | }; | ||
| 89 | |||
| 80 | static struct of_device_id host1x_of_match[] = { | 90 | static struct of_device_id host1x_of_match[] = { |
| 91 | { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, | ||
| 81 | { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, | 92 | { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, |
| 82 | { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, | 93 | { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, |
| 83 | { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, | 94 | { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, |
| @@ -210,17 +221,26 @@ static int __init tegra_host1x_init(void) | |||
| 210 | return err; | 221 | return err; |
| 211 | 222 | ||
| 212 | err = platform_driver_register(&tegra_host1x_driver); | 223 | err = platform_driver_register(&tegra_host1x_driver); |
| 213 | if (err < 0) { | 224 | if (err < 0) |
| 214 | host1x_bus_exit(); | 225 | goto unregister_bus; |
| 215 | return err; | 226 | |
| 216 | } | 227 | err = platform_driver_register(&tegra_mipi_driver); |
| 228 | if (err < 0) | ||
| 229 | goto unregister_host1x; | ||
| 217 | 230 | ||
| 218 | return 0; | 231 | return 0; |
| 232 | |||
| 233 | unregister_host1x: | ||
| 234 | platform_driver_unregister(&tegra_host1x_driver); | ||
| 235 | unregister_bus: | ||
| 236 | host1x_bus_exit(); | ||
| 237 | return err; | ||
| 219 | } | 238 | } |
| 220 | module_init(tegra_host1x_init); | 239 | module_init(tegra_host1x_init); |
| 221 | 240 | ||
| 222 | static void __exit tegra_host1x_exit(void) | 241 | static void __exit tegra_host1x_exit(void) |
| 223 | { | 242 | { |
| 243 | platform_driver_unregister(&tegra_mipi_driver); | ||
| 224 | platform_driver_unregister(&tegra_host1x_driver); | 244 | platform_driver_unregister(&tegra_host1x_driver); |
| 225 | host1x_bus_exit(); | 245 | host1x_bus_exit(); |
| 226 | } | 246 | } |
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index a61a976e7a42..0b6e8e9629c5 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h | |||
| @@ -306,4 +306,6 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o) | |||
| 306 | host->debug_op->show_mlocks(host, o); | 306 | host->debug_op->show_mlocks(host, o); |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | extern struct platform_driver tegra_mipi_driver; | ||
| 310 | |||
| 309 | #endif | 311 | #endif |
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c index e98caca0ca42..928946c2144b 100644 --- a/drivers/gpu/host1x/hw/host1x02.c +++ b/drivers/gpu/host1x/hw/host1x02.c | |||
| @@ -17,8 +17,8 @@ | |||
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | /* include hw specification */ | 19 | /* include hw specification */ |
| 20 | #include "host1x01.h" | 20 | #include "host1x02.h" |
| 21 | #include "host1x01_hardware.h" | 21 | #include "host1x02_hardware.h" |
| 22 | 22 | ||
| 23 | /* include code */ | 23 | /* include code */ |
| 24 | #include "cdma_hw.c" | 24 | #include "cdma_hw.c" |
diff --git a/drivers/gpu/host1x/hw/host1x02_hardware.h b/drivers/gpu/host1x/hw/host1x02_hardware.h new file mode 100644 index 000000000000..154901860bc6 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x02_hardware.h | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | /* | ||
| 2 | * Tegra host1x Register Offsets for Tegra114 | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010-2013 NVIDIA Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef __HOST1X_HOST1X02_HARDWARE_H | ||
| 20 | #define __HOST1X_HOST1X02_HARDWARE_H | ||
| 21 | |||
| 22 | #include <linux/types.h> | ||
| 23 | #include <linux/bitops.h> | ||
| 24 | |||
| 25 | #include "hw_host1x02_channel.h" | ||
| 26 | #include "hw_host1x02_sync.h" | ||
| 27 | #include "hw_host1x02_uclass.h" | ||
| 28 | |||
| 29 | static inline u32 host1x_class_host_wait_syncpt( | ||
| 30 | unsigned indx, unsigned threshold) | ||
| 31 | { | ||
| 32 | return host1x_uclass_wait_syncpt_indx_f(indx) | ||
| 33 | | host1x_uclass_wait_syncpt_thresh_f(threshold); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline u32 host1x_class_host_load_syncpt_base( | ||
| 37 | unsigned indx, unsigned threshold) | ||
| 38 | { | ||
| 39 | return host1x_uclass_load_syncpt_base_base_indx_f(indx) | ||
| 40 | | host1x_uclass_load_syncpt_base_value_f(threshold); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline u32 host1x_class_host_wait_syncpt_base( | ||
| 44 | unsigned indx, unsigned base_indx, unsigned offset) | ||
| 45 | { | ||
| 46 | return host1x_uclass_wait_syncpt_base_indx_f(indx) | ||
| 47 | | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx) | ||
| 48 | | host1x_uclass_wait_syncpt_base_offset_f(offset); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline u32 host1x_class_host_incr_syncpt_base( | ||
| 52 | unsigned base_indx, unsigned offset) | ||
| 53 | { | ||
| 54 | return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx) | ||
| 55 | | host1x_uclass_incr_syncpt_base_offset_f(offset); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline u32 host1x_class_host_incr_syncpt( | ||
| 59 | unsigned cond, unsigned indx) | ||
| 60 | { | ||
| 61 | return host1x_uclass_incr_syncpt_cond_f(cond) | ||
| 62 | | host1x_uclass_incr_syncpt_indx_f(indx); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline u32 host1x_class_host_indoff_reg_write( | ||
| 66 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 67 | { | ||
| 68 | u32 v = host1x_uclass_indoff_indbe_f(0xf) | ||
| 69 | | host1x_uclass_indoff_indmodid_f(mod_id) | ||
| 70 | | host1x_uclass_indoff_indroffset_f(offset); | ||
| 71 | if (auto_inc) | ||
| 72 | v |= host1x_uclass_indoff_autoinc_f(1); | ||
| 73 | return v; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline u32 host1x_class_host_indoff_reg_read( | ||
| 77 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 78 | { | ||
| 79 | u32 v = host1x_uclass_indoff_indmodid_f(mod_id) | ||
| 80 | | host1x_uclass_indoff_indroffset_f(offset) | ||
| 81 | | host1x_uclass_indoff_rwn_read_v(); | ||
| 82 | if (auto_inc) | ||
| 83 | v |= host1x_uclass_indoff_autoinc_f(1); | ||
| 84 | return v; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* cdma opcodes */ | ||
| 88 | static inline u32 host1x_opcode_setclass( | ||
| 89 | unsigned class_id, unsigned offset, unsigned mask) | ||
| 90 | { | ||
| 91 | return (0 << 28) | (offset << 16) | (class_id << 6) | mask; | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline u32 host1x_opcode_incr(unsigned offset, unsigned count) | ||
| 95 | { | ||
| 96 | return (1 << 28) | (offset << 16) | count; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count) | ||
| 100 | { | ||
| 101 | return (2 << 28) | (offset << 16) | count; | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask) | ||
| 105 | { | ||
| 106 | return (3 << 28) | (offset << 16) | mask; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline u32 host1x_opcode_imm(unsigned offset, unsigned value) | ||
| 110 | { | ||
| 111 | return (4 << 28) | (offset << 16) | value; | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) | ||
| 115 | { | ||
| 116 | return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(), | ||
| 117 | host1x_class_host_incr_syncpt(cond, indx)); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline u32 host1x_opcode_restart(unsigned address) | ||
| 121 | { | ||
| 122 | return (5 << 28) | (address >> 4); | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline u32 host1x_opcode_gather(unsigned count) | ||
| 126 | { | ||
| 127 | return (6 << 28) | count; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count) | ||
| 131 | { | ||
| 132 | return (6 << 28) | (offset << 16) | BIT(15) | count; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count) | ||
| 136 | { | ||
| 137 | return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; | ||
| 138 | } | ||
| 139 | |||
| 140 | #define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0) | ||
| 141 | |||
| 142 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c new file mode 100644 index 000000000000..8007c70fa9c4 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x04.c | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * Host1x init for Tegra124 SoCs | ||
| 3 | * | ||
| 4 | * Copyright (c) 2013 NVIDIA Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | /* include hw specification */ | ||
| 20 | #include "host1x04.h" | ||
| 21 | #include "host1x04_hardware.h" | ||
| 22 | |||
| 23 | /* include code */ | ||
| 24 | #include "cdma_hw.c" | ||
| 25 | #include "channel_hw.c" | ||
| 26 | #include "debug_hw.c" | ||
| 27 | #include "intr_hw.c" | ||
| 28 | #include "syncpt_hw.c" | ||
| 29 | |||
| 30 | #include "../dev.h" | ||
| 31 | |||
| 32 | int host1x04_init(struct host1x *host) | ||
| 33 | { | ||
| 34 | host->channel_op = &host1x_channel_ops; | ||
| 35 | host->cdma_op = &host1x_cdma_ops; | ||
| 36 | host->cdma_pb_op = &host1x_pushbuffer_ops; | ||
| 37 | host->syncpt_op = &host1x_syncpt_ops; | ||
| 38 | host->intr_op = &host1x_intr_ops; | ||
| 39 | host->debug_op = &host1x_debug_ops; | ||
| 40 | |||
| 41 | return 0; | ||
| 42 | } | ||
diff --git a/drivers/gpu/host1x/hw/host1x04.h b/drivers/gpu/host1x/hw/host1x04.h new file mode 100644 index 000000000000..a9ab7496c06e --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x04.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* | ||
| 2 | * Host1x init for Tegra124 SoCs | ||
| 3 | * | ||
| 4 | * Copyright (c) 2013 NVIDIA Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef HOST1X_HOST1X04_H | ||
| 20 | #define HOST1X_HOST1X04_H | ||
| 21 | |||
| 22 | struct host1x; | ||
| 23 | |||
| 24 | int host1x04_init(struct host1x *host); | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/host1x04_hardware.h b/drivers/gpu/host1x/hw/host1x04_hardware.h new file mode 100644 index 000000000000..de1a38175328 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x04_hardware.h | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | /* | ||
| 2 | * Tegra host1x Register Offsets for Tegra124 | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010-2013 NVIDIA Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef __HOST1X_HOST1X04_HARDWARE_H | ||
| 20 | #define __HOST1X_HOST1X04_HARDWARE_H | ||
| 21 | |||
| 22 | #include <linux/types.h> | ||
| 23 | #include <linux/bitops.h> | ||
| 24 | |||
| 25 | #include "hw_host1x04_channel.h" | ||
| 26 | #include "hw_host1x04_sync.h" | ||
| 27 | #include "hw_host1x04_uclass.h" | ||
| 28 | |||
| 29 | static inline u32 host1x_class_host_wait_syncpt( | ||
| 30 | unsigned indx, unsigned threshold) | ||
| 31 | { | ||
| 32 | return host1x_uclass_wait_syncpt_indx_f(indx) | ||
| 33 | | host1x_uclass_wait_syncpt_thresh_f(threshold); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline u32 host1x_class_host_load_syncpt_base( | ||
| 37 | unsigned indx, unsigned threshold) | ||
| 38 | { | ||
| 39 | return host1x_uclass_load_syncpt_base_base_indx_f(indx) | ||
| 40 | | host1x_uclass_load_syncpt_base_value_f(threshold); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline u32 host1x_class_host_wait_syncpt_base( | ||
| 44 | unsigned indx, unsigned base_indx, unsigned offset) | ||
| 45 | { | ||
| 46 | return host1x_uclass_wait_syncpt_base_indx_f(indx) | ||
| 47 | | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx) | ||
| 48 | | host1x_uclass_wait_syncpt_base_offset_f(offset); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline u32 host1x_class_host_incr_syncpt_base( | ||
| 52 | unsigned base_indx, unsigned offset) | ||
| 53 | { | ||
| 54 | return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx) | ||
| 55 | | host1x_uclass_incr_syncpt_base_offset_f(offset); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline u32 host1x_class_host_incr_syncpt( | ||
| 59 | unsigned cond, unsigned indx) | ||
| 60 | { | ||
| 61 | return host1x_uclass_incr_syncpt_cond_f(cond) | ||
| 62 | | host1x_uclass_incr_syncpt_indx_f(indx); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline u32 host1x_class_host_indoff_reg_write( | ||
| 66 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 67 | { | ||
| 68 | u32 v = host1x_uclass_indoff_indbe_f(0xf) | ||
| 69 | | host1x_uclass_indoff_indmodid_f(mod_id) | ||
| 70 | | host1x_uclass_indoff_indroffset_f(offset); | ||
| 71 | if (auto_inc) | ||
| 72 | v |= host1x_uclass_indoff_autoinc_f(1); | ||
| 73 | return v; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline u32 host1x_class_host_indoff_reg_read( | ||
| 77 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 78 | { | ||
| 79 | u32 v = host1x_uclass_indoff_indmodid_f(mod_id) | ||
| 80 | | host1x_uclass_indoff_indroffset_f(offset) | ||
| 81 | | host1x_uclass_indoff_rwn_read_v(); | ||
| 82 | if (auto_inc) | ||
| 83 | v |= host1x_uclass_indoff_autoinc_f(1); | ||
| 84 | return v; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* cdma opcodes */ | ||
| 88 | static inline u32 host1x_opcode_setclass( | ||
| 89 | unsigned class_id, unsigned offset, unsigned mask) | ||
| 90 | { | ||
| 91 | return (0 << 28) | (offset << 16) | (class_id << 6) | mask; | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline u32 host1x_opcode_incr(unsigned offset, unsigned count) | ||
| 95 | { | ||
| 96 | return (1 << 28) | (offset << 16) | count; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count) | ||
| 100 | { | ||
| 101 | return (2 << 28) | (offset << 16) | count; | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask) | ||
| 105 | { | ||
| 106 | return (3 << 28) | (offset << 16) | mask; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline u32 host1x_opcode_imm(unsigned offset, unsigned value) | ||
| 110 | { | ||
| 111 | return (4 << 28) | (offset << 16) | value; | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) | ||
| 115 | { | ||
| 116 | return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(), | ||
| 117 | host1x_class_host_incr_syncpt(cond, indx)); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline u32 host1x_opcode_restart(unsigned address) | ||
| 121 | { | ||
| 122 | return (5 << 28) | (address >> 4); | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline u32 host1x_opcode_gather(unsigned count) | ||
| 126 | { | ||
| 127 | return (6 << 28) | count; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count) | ||
| 131 | { | ||
| 132 | return (6 << 28) | (offset << 16) | BIT(15) | count; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count) | ||
| 136 | { | ||
| 137 | return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; | ||
| 138 | } | ||
| 139 | |||
| 140 | #define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0) | ||
| 141 | |||
| 142 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h index a3b3c9874413..028e49d9bac9 100644 --- a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h +++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h | |||
| @@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v) | |||
| 111 | } | 111 | } |
| 112 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ | 112 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ |
| 113 | host1x_uclass_wait_syncpt_base_offset_f(v) | 113 | host1x_uclass_wait_syncpt_base_offset_f(v) |
| 114 | static inline u32 host1x_uclass_load_syncpt_base_r(void) | ||
| 115 | { | ||
| 116 | return 0xb; | ||
| 117 | } | ||
| 118 | #define HOST1X_UCLASS_LOAD_SYNCPT_BASE \ | ||
| 119 | host1x_uclass_load_syncpt_base_r() | ||
| 114 | static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) | 120 | static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) |
| 115 | { | 121 | { |
| 116 | return (v & 0xff) << 24; | 122 | return (v & 0xff) << 24; |
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h new file mode 100644 index 000000000000..95e6f96142b9 --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h | |||
| @@ -0,0 +1,121 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013 NVIDIA Corporation. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Function naming determines intended use: | ||
| 20 | * | ||
| 21 | * <x>_r(void) : Returns the offset for register <x>. | ||
| 22 | * | ||
| 23 | * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. | ||
| 24 | * | ||
| 25 | * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. | ||
| 26 | * | ||
| 27 | * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted | ||
| 28 | * and masked to place it at field <y> of register <x>. This value | ||
| 29 | * can be |'d with others to produce a full register value for | ||
| 30 | * register <x>. | ||
| 31 | * | ||
| 32 | * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This | ||
| 33 | * value can be ~'d and then &'d to clear the value of field <y> for | ||
| 34 | * register <x>. | ||
| 35 | * | ||
| 36 | * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted | ||
| 37 | * to place it at field <y> of register <x>. This value can be |'d | ||
| 38 | * with others to produce a full register value for <x>. | ||
| 39 | * | ||
| 40 | * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register | ||
| 41 | * <x> value 'r' after being shifted to place its LSB at bit 0. | ||
| 42 | * This value is suitable for direct comparison with other unshifted | ||
| 43 | * values appropriate for use in field <y> of register <x>. | ||
| 44 | * | ||
| 45 | * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for | ||
| 46 | * field <y> of register <x>. This value is suitable for direct | ||
| 47 | * comparison with unshifted values appropriate for use in field <y> | ||
| 48 | * of register <x>. | ||
| 49 | */ | ||
| 50 | |||
| 51 | #ifndef HOST1X_HW_HOST1X04_CHANNEL_H | ||
| 52 | #define HOST1X_HW_HOST1X04_CHANNEL_H | ||
| 53 | |||
| 54 | static inline u32 host1x_channel_fifostat_r(void) | ||
| 55 | { | ||
| 56 | return 0x0; | ||
| 57 | } | ||
| 58 | #define HOST1X_CHANNEL_FIFOSTAT \ | ||
| 59 | host1x_channel_fifostat_r() | ||
| 60 | static inline u32 host1x_channel_fifostat_cfempty_v(u32 r) | ||
| 61 | { | ||
| 62 | return (r >> 11) & 0x1; | ||
| 63 | } | ||
| 64 | #define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \ | ||
| 65 | host1x_channel_fifostat_cfempty_v(r) | ||
| 66 | static inline u32 host1x_channel_dmastart_r(void) | ||
| 67 | { | ||
| 68 | return 0x14; | ||
| 69 | } | ||
| 70 | #define HOST1X_CHANNEL_DMASTART \ | ||
| 71 | host1x_channel_dmastart_r() | ||
| 72 | static inline u32 host1x_channel_dmaput_r(void) | ||
| 73 | { | ||
| 74 | return 0x18; | ||
| 75 | } | ||
| 76 | #define HOST1X_CHANNEL_DMAPUT \ | ||
| 77 | host1x_channel_dmaput_r() | ||
| 78 | static inline u32 host1x_channel_dmaget_r(void) | ||
| 79 | { | ||
| 80 | return 0x1c; | ||
| 81 | } | ||
| 82 | #define HOST1X_CHANNEL_DMAGET \ | ||
| 83 | host1x_channel_dmaget_r() | ||
| 84 | static inline u32 host1x_channel_dmaend_r(void) | ||
| 85 | { | ||
| 86 | return 0x20; | ||
| 87 | } | ||
| 88 | #define HOST1X_CHANNEL_DMAEND \ | ||
| 89 | host1x_channel_dmaend_r() | ||
| 90 | static inline u32 host1x_channel_dmactrl_r(void) | ||
| 91 | { | ||
| 92 | return 0x24; | ||
| 93 | } | ||
| 94 | #define HOST1X_CHANNEL_DMACTRL \ | ||
| 95 | host1x_channel_dmactrl_r() | ||
| 96 | static inline u32 host1x_channel_dmactrl_dmastop(void) | ||
| 97 | { | ||
| 98 | return 1 << 0; | ||
| 99 | } | ||
| 100 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP \ | ||
| 101 | host1x_channel_dmactrl_dmastop() | ||
| 102 | static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r) | ||
| 103 | { | ||
| 104 | return (r >> 0) & 0x1; | ||
| 105 | } | ||
| 106 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \ | ||
| 107 | host1x_channel_dmactrl_dmastop_v(r) | ||
| 108 | static inline u32 host1x_channel_dmactrl_dmagetrst(void) | ||
| 109 | { | ||
| 110 | return 1 << 1; | ||
| 111 | } | ||
| 112 | #define HOST1X_CHANNEL_DMACTRL_DMAGETRST \ | ||
| 113 | host1x_channel_dmactrl_dmagetrst() | ||
| 114 | static inline u32 host1x_channel_dmactrl_dmainitget(void) | ||
| 115 | { | ||
| 116 | return 1 << 2; | ||
| 117 | } | ||
| 118 | #define HOST1X_CHANNEL_DMACTRL_DMAINITGET \ | ||
| 119 | host1x_channel_dmactrl_dmainitget() | ||
| 120 | |||
| 121 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h new file mode 100644 index 000000000000..ef2275b5407a --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x04_sync.h | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013 NVIDIA Corporation. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Function naming determines intended use: | ||
| 20 | * | ||
| 21 | * <x>_r(void) : Returns the offset for register <x>. | ||
| 22 | * | ||
| 23 | * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. | ||
| 24 | * | ||
| 25 | * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. | ||
| 26 | * | ||
| 27 | * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted | ||
| 28 | * and masked to place it at field <y> of register <x>. This value | ||
| 29 | * can be |'d with others to produce a full register value for | ||
| 30 | * register <x>. | ||
| 31 | * | ||
| 32 | * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This | ||
| 33 | * value can be ~'d and then &'d to clear the value of field <y> for | ||
| 34 | * register <x>. | ||
| 35 | * | ||
| 36 | * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted | ||
| 37 | * to place it at field <y> of register <x>. This value can be |'d | ||
| 38 | * with others to produce a full register value for <x>. | ||
| 39 | * | ||
| 40 | * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register | ||
| 41 | * <x> value 'r' after being shifted to place its LSB at bit 0. | ||
| 42 | * This value is suitable for direct comparison with other unshifted | ||
| 43 | * values appropriate for use in field <y> of register <x>. | ||
| 44 | * | ||
| 45 | * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for | ||
| 46 | * field <y> of register <x>. This value is suitable for direct | ||
| 47 | * comparison with unshifted values appropriate for use in field <y> | ||
| 48 | * of register <x>. | ||
| 49 | */ | ||
| 50 | |||
| 51 | #ifndef HOST1X_HW_HOST1X04_SYNC_H | ||
| 52 | #define HOST1X_HW_HOST1X04_SYNC_H | ||
| 53 | |||
| 54 | #define REGISTER_STRIDE 4 | ||
| 55 | |||
| 56 | static inline u32 host1x_sync_syncpt_r(unsigned int id) | ||
| 57 | { | ||
| 58 | return 0xf80 + id * REGISTER_STRIDE; | ||
| 59 | } | ||
| 60 | #define HOST1X_SYNC_SYNCPT(id) \ | ||
| 61 | host1x_sync_syncpt_r(id) | ||
| 62 | static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id) | ||
| 63 | { | ||
| 64 | return 0xe80 + id * REGISTER_STRIDE; | ||
| 65 | } | ||
| 66 | #define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \ | ||
| 67 | host1x_sync_syncpt_thresh_cpu0_int_status_r(id) | ||
| 68 | static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id) | ||
| 69 | { | ||
| 70 | return 0xf00 + id * REGISTER_STRIDE; | ||
| 71 | } | ||
| 72 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \ | ||
| 73 | host1x_sync_syncpt_thresh_int_disable_r(id) | ||
| 74 | static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id) | ||
| 75 | { | ||
| 76 | return 0xf20 + id * REGISTER_STRIDE; | ||
| 77 | } | ||
| 78 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \ | ||
| 79 | host1x_sync_syncpt_thresh_int_enable_cpu0_r(id) | ||
| 80 | static inline u32 host1x_sync_cf_setup_r(unsigned int channel) | ||
| 81 | { | ||
| 82 | return 0xc00 + channel * REGISTER_STRIDE; | ||
| 83 | } | ||
| 84 | #define HOST1X_SYNC_CF_SETUP(channel) \ | ||
| 85 | host1x_sync_cf_setup_r(channel) | ||
| 86 | static inline u32 host1x_sync_cf_setup_base_v(u32 r) | ||
| 87 | { | ||
| 88 | return (r >> 0) & 0x3ff; | ||
| 89 | } | ||
| 90 | #define HOST1X_SYNC_CF_SETUP_BASE_V(r) \ | ||
| 91 | host1x_sync_cf_setup_base_v(r) | ||
| 92 | static inline u32 host1x_sync_cf_setup_limit_v(u32 r) | ||
| 93 | { | ||
| 94 | return (r >> 16) & 0x3ff; | ||
| 95 | } | ||
| 96 | #define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \ | ||
| 97 | host1x_sync_cf_setup_limit_v(r) | ||
| 98 | static inline u32 host1x_sync_cmdproc_stop_r(void) | ||
| 99 | { | ||
| 100 | return 0xac; | ||
| 101 | } | ||
| 102 | #define HOST1X_SYNC_CMDPROC_STOP \ | ||
| 103 | host1x_sync_cmdproc_stop_r() | ||
| 104 | static inline u32 host1x_sync_ch_teardown_r(void) | ||
| 105 | { | ||
| 106 | return 0xb0; | ||
| 107 | } | ||
| 108 | #define HOST1X_SYNC_CH_TEARDOWN \ | ||
| 109 | host1x_sync_ch_teardown_r() | ||
| 110 | static inline u32 host1x_sync_usec_clk_r(void) | ||
| 111 | { | ||
| 112 | return 0x1a4; | ||
| 113 | } | ||
| 114 | #define HOST1X_SYNC_USEC_CLK \ | ||
| 115 | host1x_sync_usec_clk_r() | ||
| 116 | static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void) | ||
| 117 | { | ||
| 118 | return 0x1a8; | ||
| 119 | } | ||
| 120 | #define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \ | ||
| 121 | host1x_sync_ctxsw_timeout_cfg_r() | ||
| 122 | static inline u32 host1x_sync_ip_busy_timeout_r(void) | ||
| 123 | { | ||
| 124 | return 0x1bc; | ||
| 125 | } | ||
| 126 | #define HOST1X_SYNC_IP_BUSY_TIMEOUT \ | ||
| 127 | host1x_sync_ip_busy_timeout_r() | ||
| 128 | static inline u32 host1x_sync_mlock_owner_r(unsigned int id) | ||
| 129 | { | ||
| 130 | return 0x340 + id * REGISTER_STRIDE; | ||
| 131 | } | ||
| 132 | #define HOST1X_SYNC_MLOCK_OWNER(id) \ | ||
| 133 | host1x_sync_mlock_owner_r(id) | ||
| 134 | static inline u32 host1x_sync_mlock_owner_chid_f(u32 v) | ||
| 135 | { | ||
| 136 | return (v & 0xf) << 8; | ||
| 137 | } | ||
| 138 | #define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \ | ||
| 139 | host1x_sync_mlock_owner_chid_f(v) | ||
| 140 | static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r) | ||
| 141 | { | ||
| 142 | return (r >> 1) & 0x1; | ||
| 143 | } | ||
| 144 | #define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \ | ||
| 145 | host1x_sync_mlock_owner_cpu_owns_v(r) | ||
| 146 | static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r) | ||
| 147 | { | ||
| 148 | return (r >> 0) & 0x1; | ||
| 149 | } | ||
| 150 | #define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \ | ||
| 151 | host1x_sync_mlock_owner_ch_owns_v(r) | ||
| 152 | static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id) | ||
| 153 | { | ||
| 154 | return 0x1380 + id * REGISTER_STRIDE; | ||
| 155 | } | ||
| 156 | #define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \ | ||
| 157 | host1x_sync_syncpt_int_thresh_r(id) | ||
| 158 | static inline u32 host1x_sync_syncpt_base_r(unsigned int id) | ||
| 159 | { | ||
| 160 | return 0x600 + id * REGISTER_STRIDE; | ||
| 161 | } | ||
| 162 | #define HOST1X_SYNC_SYNCPT_BASE(id) \ | ||
| 163 | host1x_sync_syncpt_base_r(id) | ||
| 164 | static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id) | ||
| 165 | { | ||
| 166 | return 0xf60 + id * REGISTER_STRIDE; | ||
| 167 | } | ||
| 168 | #define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \ | ||
| 169 | host1x_sync_syncpt_cpu_incr_r(id) | ||
| 170 | static inline u32 host1x_sync_cbread_r(unsigned int channel) | ||
| 171 | { | ||
| 172 | return 0xc80 + channel * REGISTER_STRIDE; | ||
| 173 | } | ||
| 174 | #define HOST1X_SYNC_CBREAD(channel) \ | ||
| 175 | host1x_sync_cbread_r(channel) | ||
| 176 | static inline u32 host1x_sync_cfpeek_ctrl_r(void) | ||
| 177 | { | ||
| 178 | return 0x74c; | ||
| 179 | } | ||
| 180 | #define HOST1X_SYNC_CFPEEK_CTRL \ | ||
| 181 | host1x_sync_cfpeek_ctrl_r() | ||
| 182 | static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v) | ||
| 183 | { | ||
| 184 | return (v & 0x3ff) << 0; | ||
| 185 | } | ||
| 186 | #define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \ | ||
| 187 | host1x_sync_cfpeek_ctrl_addr_f(v) | ||
| 188 | static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v) | ||
| 189 | { | ||
| 190 | return (v & 0xf) << 16; | ||
| 191 | } | ||
| 192 | #define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \ | ||
| 193 | host1x_sync_cfpeek_ctrl_channr_f(v) | ||
| 194 | static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v) | ||
| 195 | { | ||
| 196 | return (v & 0x1) << 31; | ||
| 197 | } | ||
| 198 | #define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \ | ||
| 199 | host1x_sync_cfpeek_ctrl_ena_f(v) | ||
| 200 | static inline u32 host1x_sync_cfpeek_read_r(void) | ||
| 201 | { | ||
| 202 | return 0x750; | ||
| 203 | } | ||
| 204 | #define HOST1X_SYNC_CFPEEK_READ \ | ||
| 205 | host1x_sync_cfpeek_read_r() | ||
| 206 | static inline u32 host1x_sync_cfpeek_ptrs_r(void) | ||
| 207 | { | ||
| 208 | return 0x754; | ||
| 209 | } | ||
| 210 | #define HOST1X_SYNC_CFPEEK_PTRS \ | ||
| 211 | host1x_sync_cfpeek_ptrs_r() | ||
| 212 | static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r) | ||
| 213 | { | ||
| 214 | return (r >> 0) & 0x3ff; | ||
| 215 | } | ||
| 216 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \ | ||
| 217 | host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r) | ||
| 218 | static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r) | ||
| 219 | { | ||
| 220 | return (r >> 16) & 0x3ff; | ||
| 221 | } | ||
| 222 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \ | ||
| 223 | host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r) | ||
| 224 | static inline u32 host1x_sync_cbstat_r(unsigned int channel) | ||
| 225 | { | ||
| 226 | return 0xcc0 + channel * REGISTER_STRIDE; | ||
| 227 | } | ||
| 228 | #define HOST1X_SYNC_CBSTAT(channel) \ | ||
| 229 | host1x_sync_cbstat_r(channel) | ||
| 230 | static inline u32 host1x_sync_cbstat_cboffset_v(u32 r) | ||
| 231 | { | ||
| 232 | return (r >> 0) & 0xffff; | ||
| 233 | } | ||
| 234 | #define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \ | ||
| 235 | host1x_sync_cbstat_cboffset_v(r) | ||
| 236 | static inline u32 host1x_sync_cbstat_cbclass_v(u32 r) | ||
| 237 | { | ||
| 238 | return (r >> 16) & 0x3ff; | ||
| 239 | } | ||
| 240 | #define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \ | ||
| 241 | host1x_sync_cbstat_cbclass_v(r) | ||
| 242 | |||
| 243 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h new file mode 100644 index 000000000000..d1460e971493 --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h | |||
| @@ -0,0 +1,181 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013 NVIDIA Corporation. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Function naming determines intended use: | ||
| 20 | * | ||
| 21 | * <x>_r(void) : Returns the offset for register <x>. | ||
| 22 | * | ||
| 23 | * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. | ||
| 24 | * | ||
| 25 | * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. | ||
| 26 | * | ||
| 27 | * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted | ||
| 28 | * and masked to place it at field <y> of register <x>. This value | ||
| 29 | * can be |'d with others to produce a full register value for | ||
| 30 | * register <x>. | ||
| 31 | * | ||
| 32 | * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This | ||
| 33 | * value can be ~'d and then &'d to clear the value of field <y> for | ||
| 34 | * register <x>. | ||
| 35 | * | ||
| 36 | * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted | ||
| 37 | * to place it at field <y> of register <x>. This value can be |'d | ||
| 38 | * with others to produce a full register value for <x>. | ||
| 39 | * | ||
| 40 | * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register | ||
| 41 | * <x> value 'r' after being shifted to place its LSB at bit 0. | ||
| 42 | * This value is suitable for direct comparison with other unshifted | ||
| 43 | * values appropriate for use in field <y> of register <x>. | ||
| 44 | * | ||
| 45 | * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for | ||
| 46 | * field <y> of register <x>. This value is suitable for direct | ||
| 47 | * comparison with unshifted values appropriate for use in field <y> | ||
| 48 | * of register <x>. | ||
| 49 | */ | ||
| 50 | |||
| 51 | #ifndef HOST1X_HW_HOST1X04_UCLASS_H | ||
| 52 | #define HOST1X_HW_HOST1X04_UCLASS_H | ||
| 53 | |||
| 54 | static inline u32 host1x_uclass_incr_syncpt_r(void) | ||
| 55 | { | ||
| 56 | return 0x0; | ||
| 57 | } | ||
| 58 | #define HOST1X_UCLASS_INCR_SYNCPT \ | ||
| 59 | host1x_uclass_incr_syncpt_r() | ||
| 60 | static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) | ||
| 61 | { | ||
| 62 | return (v & 0xff) << 8; | ||
| 63 | } | ||
| 64 | #define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \ | ||
| 65 | host1x_uclass_incr_syncpt_cond_f(v) | ||
| 66 | static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) | ||
| 67 | { | ||
| 68 | return (v & 0xff) << 0; | ||
| 69 | } | ||
| 70 | #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \ | ||
| 71 | host1x_uclass_incr_syncpt_indx_f(v) | ||
| 72 | static inline u32 host1x_uclass_wait_syncpt_r(void) | ||
| 73 | { | ||
| 74 | return 0x8; | ||
| 75 | } | ||
| 76 | #define HOST1X_UCLASS_WAIT_SYNCPT \ | ||
| 77 | host1x_uclass_wait_syncpt_r() | ||
| 78 | static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v) | ||
| 79 | { | ||
| 80 | return (v & 0xff) << 24; | ||
| 81 | } | ||
| 82 | #define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \ | ||
| 83 | host1x_uclass_wait_syncpt_indx_f(v) | ||
| 84 | static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v) | ||
| 85 | { | ||
| 86 | return (v & 0xffffff) << 0; | ||
| 87 | } | ||
| 88 | #define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \ | ||
| 89 | host1x_uclass_wait_syncpt_thresh_f(v) | ||
| 90 | static inline u32 host1x_uclass_wait_syncpt_base_r(void) | ||
| 91 | { | ||
| 92 | return 0x9; | ||
| 93 | } | ||
| 94 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE \ | ||
| 95 | host1x_uclass_wait_syncpt_base_r() | ||
| 96 | static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v) | ||
| 97 | { | ||
| 98 | return (v & 0xff) << 24; | ||
| 99 | } | ||
| 100 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \ | ||
| 101 | host1x_uclass_wait_syncpt_base_indx_f(v) | ||
| 102 | static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v) | ||
| 103 | { | ||
| 104 | return (v & 0xff) << 16; | ||
| 105 | } | ||
| 106 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \ | ||
| 107 | host1x_uclass_wait_syncpt_base_base_indx_f(v) | ||
| 108 | static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v) | ||
| 109 | { | ||
| 110 | return (v & 0xffff) << 0; | ||
| 111 | } | ||
| 112 | #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ | ||
| 113 | host1x_uclass_wait_syncpt_base_offset_f(v) | ||
| 114 | static inline u32 host1x_uclass_load_syncpt_base_r(void) | ||
| 115 | { | ||
| 116 | return 0xb; | ||
| 117 | } | ||
| 118 | #define HOST1X_UCLASS_LOAD_SYNCPT_BASE \ | ||
| 119 | host1x_uclass_load_syncpt_base_r() | ||
| 120 | static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) | ||
| 121 | { | ||
| 122 | return (v & 0xff) << 24; | ||
| 123 | } | ||
| 124 | #define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \ | ||
| 125 | host1x_uclass_load_syncpt_base_base_indx_f(v) | ||
| 126 | static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v) | ||
| 127 | { | ||
| 128 | return (v & 0xffffff) << 0; | ||
| 129 | } | ||
| 130 | #define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \ | ||
| 131 | host1x_uclass_load_syncpt_base_value_f(v) | ||
| 132 | static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v) | ||
| 133 | { | ||
| 134 | return (v & 0xff) << 24; | ||
| 135 | } | ||
| 136 | #define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \ | ||
| 137 | host1x_uclass_incr_syncpt_base_base_indx_f(v) | ||
| 138 | static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v) | ||
| 139 | { | ||
| 140 | return (v & 0xffffff) << 0; | ||
| 141 | } | ||
| 142 | #define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \ | ||
| 143 | host1x_uclass_incr_syncpt_base_offset_f(v) | ||
| 144 | static inline u32 host1x_uclass_indoff_r(void) | ||
| 145 | { | ||
| 146 | return 0x2d; | ||
| 147 | } | ||
| 148 | #define HOST1X_UCLASS_INDOFF \ | ||
| 149 | host1x_uclass_indoff_r() | ||
| 150 | static inline u32 host1x_uclass_indoff_indbe_f(u32 v) | ||
| 151 | { | ||
| 152 | return (v & 0xf) << 28; | ||
| 153 | } | ||
| 154 | #define HOST1X_UCLASS_INDOFF_INDBE_F(v) \ | ||
| 155 | host1x_uclass_indoff_indbe_f(v) | ||
| 156 | static inline u32 host1x_uclass_indoff_autoinc_f(u32 v) | ||
| 157 | { | ||
| 158 | return (v & 0x1) << 27; | ||
| 159 | } | ||
| 160 | #define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \ | ||
| 161 | host1x_uclass_indoff_autoinc_f(v) | ||
| 162 | static inline u32 host1x_uclass_indoff_indmodid_f(u32 v) | ||
| 163 | { | ||
| 164 | return (v & 0xff) << 18; | ||
| 165 | } | ||
| 166 | #define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \ | ||
| 167 | host1x_uclass_indoff_indmodid_f(v) | ||
| 168 | static inline u32 host1x_uclass_indoff_indroffset_f(u32 v) | ||
| 169 | { | ||
| 170 | return (v & 0xffff) << 2; | ||
| 171 | } | ||
| 172 | #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \ | ||
| 173 | host1x_uclass_indoff_indroffset_f(v) | ||
| 174 | static inline u32 host1x_uclass_indoff_rwn_read_v(void) | ||
| 175 | { | ||
| 176 | return 1; | ||
| 177 | } | ||
| 178 | #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \ | ||
| 179 | host1x_uclass_indoff_indroffset_f(v) | ||
| 180 | |||
| 181 | #endif | ||
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index b26dcc83bc1b..db9017adfe2b 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
| 22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 23 | #include <asm/mach/irq.h> | ||
| 24 | 23 | ||
| 25 | #include "../intr.h" | 24 | #include "../intr.h" |
| 26 | #include "../dev.h" | 25 | #include "../dev.h" |
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index de5ec333ce1a..1146e3bba6e1 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c | |||
| @@ -75,12 +75,14 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, | |||
| 75 | 75 | ||
| 76 | return job; | 76 | return job; |
| 77 | } | 77 | } |
| 78 | EXPORT_SYMBOL(host1x_job_alloc); | ||
| 78 | 79 | ||
| 79 | struct host1x_job *host1x_job_get(struct host1x_job *job) | 80 | struct host1x_job *host1x_job_get(struct host1x_job *job) |
| 80 | { | 81 | { |
| 81 | kref_get(&job->ref); | 82 | kref_get(&job->ref); |
| 82 | return job; | 83 | return job; |
| 83 | } | 84 | } |
| 85 | EXPORT_SYMBOL(host1x_job_get); | ||
| 84 | 86 | ||
| 85 | static void job_free(struct kref *ref) | 87 | static void job_free(struct kref *ref) |
| 86 | { | 88 | { |
| @@ -93,6 +95,7 @@ void host1x_job_put(struct host1x_job *job) | |||
| 93 | { | 95 | { |
| 94 | kref_put(&job->ref, job_free); | 96 | kref_put(&job->ref, job_free); |
| 95 | } | 97 | } |
| 98 | EXPORT_SYMBOL(host1x_job_put); | ||
| 96 | 99 | ||
| 97 | void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, | 100 | void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, |
| 98 | u32 words, u32 offset) | 101 | u32 words, u32 offset) |
| @@ -104,6 +107,7 @@ void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, | |||
| 104 | cur_gather->offset = offset; | 107 | cur_gather->offset = offset; |
| 105 | job->num_gathers++; | 108 | job->num_gathers++; |
| 106 | } | 109 | } |
| 110 | EXPORT_SYMBOL(host1x_job_add_gather); | ||
| 107 | 111 | ||
| 108 | /* | 112 | /* |
| 109 | * NULL an already satisfied WAIT_SYNCPT host method, by patching its | 113 | * NULL an already satisfied WAIT_SYNCPT host method, by patching its |
| @@ -560,6 +564,7 @@ out: | |||
| 560 | 564 | ||
| 561 | return err; | 565 | return err; |
| 562 | } | 566 | } |
| 567 | EXPORT_SYMBOL(host1x_job_pin); | ||
| 563 | 568 | ||
| 564 | void host1x_job_unpin(struct host1x_job *job) | 569 | void host1x_job_unpin(struct host1x_job *job) |
| 565 | { | 570 | { |
| @@ -577,6 +582,7 @@ void host1x_job_unpin(struct host1x_job *job) | |||
| 577 | job->gather_copy_mapped, | 582 | job->gather_copy_mapped, |
| 578 | job->gather_copy); | 583 | job->gather_copy); |
| 579 | } | 584 | } |
| 585 | EXPORT_SYMBOL(host1x_job_unpin); | ||
| 580 | 586 | ||
| 581 | /* | 587 | /* |
| 582 | * Debug routine used to dump job entries | 588 | * Debug routine used to dump job entries |
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c new file mode 100644 index 000000000000..9882ea122024 --- /dev/null +++ b/drivers/gpu/host1x/mipi.c | |||
| @@ -0,0 +1,275 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 NVIDIA Corporation | ||
| 3 | * | ||
| 4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
| 5 | * documentation for any purpose is hereby granted without fee, provided that | ||
| 6 | * the above copyright notice appear in all copies and that both that copyright | ||
| 7 | * notice and this permission notice appear in supporting documentation, and | ||
| 8 | * that the name of the copyright holders not be used in advertising or | ||
| 9 | * publicity pertaining to distribution of the software without specific, | ||
| 10 | * written prior permission. The copyright holders make no representations | ||
| 11 | * about the suitability of this software for any purpose. It is provided "as | ||
| 12 | * is" without express or implied warranty. | ||
| 13 | * | ||
| 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
| 15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
| 16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
| 17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
| 18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
| 19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
| 20 | * OF THIS SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/clk.h> | ||
| 24 | #include <linux/delay.h> | ||
| 25 | #include <linux/host1x.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/of_platform.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | |||
| 31 | #include "dev.h" | ||
| 32 | |||
| 33 | #define MIPI_CAL_CTRL 0x00 | ||
| 34 | #define MIPI_CAL_CTRL_START (1 << 0) | ||
| 35 | |||
| 36 | #define MIPI_CAL_AUTOCAL_CTRL 0x01 | ||
| 37 | |||
| 38 | #define MIPI_CAL_STATUS 0x02 | ||
| 39 | #define MIPI_CAL_STATUS_DONE (1 << 16) | ||
| 40 | #define MIPI_CAL_STATUS_ACTIVE (1 << 0) | ||
| 41 | |||
| 42 | #define MIPI_CAL_CONFIG_CSIA 0x05 | ||
| 43 | #define MIPI_CAL_CONFIG_CSIB 0x06 | ||
| 44 | #define MIPI_CAL_CONFIG_CSIC 0x07 | ||
| 45 | #define MIPI_CAL_CONFIG_CSID 0x08 | ||
| 46 | #define MIPI_CAL_CONFIG_CSIE 0x09 | ||
| 47 | #define MIPI_CAL_CONFIG_DSIA 0x0e | ||
| 48 | #define MIPI_CAL_CONFIG_DSIB 0x0f | ||
| 49 | #define MIPI_CAL_CONFIG_DSIC 0x10 | ||
| 50 | #define MIPI_CAL_CONFIG_DSID 0x11 | ||
| 51 | |||
| 52 | #define MIPI_CAL_CONFIG_SELECT (1 << 21) | ||
| 53 | #define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16) | ||
| 54 | #define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8) | ||
| 55 | #define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0) | ||
| 56 | |||
| 57 | #define MIPI_CAL_BIAS_PAD_CFG0 0x16 | ||
| 58 | #define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1) | ||
| 59 | #define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0) | ||
| 60 | |||
| 61 | #define MIPI_CAL_BIAS_PAD_CFG1 0x17 | ||
| 62 | |||
| 63 | #define MIPI_CAL_BIAS_PAD_CFG2 0x18 | ||
| 64 | #define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1) | ||
| 65 | |||
| 66 | static const struct module { | ||
| 67 | unsigned long reg; | ||
| 68 | } modules[] = { | ||
| 69 | { .reg = MIPI_CAL_CONFIG_CSIA }, | ||
| 70 | { .reg = MIPI_CAL_CONFIG_CSIB }, | ||
| 71 | { .reg = MIPI_CAL_CONFIG_CSIC }, | ||
| 72 | { .reg = MIPI_CAL_CONFIG_CSID }, | ||
| 73 | { .reg = MIPI_CAL_CONFIG_CSIE }, | ||
| 74 | { .reg = MIPI_CAL_CONFIG_DSIA }, | ||
| 75 | { .reg = MIPI_CAL_CONFIG_DSIB }, | ||
| 76 | { .reg = MIPI_CAL_CONFIG_DSIC }, | ||
| 77 | { .reg = MIPI_CAL_CONFIG_DSID }, | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct tegra_mipi { | ||
| 81 | void __iomem *regs; | ||
| 82 | struct mutex lock; | ||
| 83 | struct clk *clk; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct tegra_mipi_device { | ||
| 87 | struct platform_device *pdev; | ||
| 88 | struct tegra_mipi *mipi; | ||
| 89 | struct device *device; | ||
| 90 | unsigned long pads; | ||
| 91 | }; | ||
| 92 | |||
| 93 | static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi, | ||
| 94 | unsigned long reg) | ||
| 95 | { | ||
| 96 | return readl(mipi->regs + (reg << 2)); | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline void tegra_mipi_writel(struct tegra_mipi *mipi, | ||
| 100 | unsigned long value, unsigned long reg) | ||
| 101 | { | ||
| 102 | writel(value, mipi->regs + (reg << 2)); | ||
| 103 | } | ||
| 104 | |||
| 105 | struct tegra_mipi_device *tegra_mipi_request(struct device *device) | ||
| 106 | { | ||
| 107 | struct device_node *np = device->of_node; | ||
| 108 | struct tegra_mipi_device *dev; | ||
| 109 | struct of_phandle_args args; | ||
| 110 | int err; | ||
| 111 | |||
| 112 | err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate", | ||
| 113 | "#nvidia,mipi-calibrate-cells", 0, | ||
| 114 | &args); | ||
| 115 | if (err < 0) | ||
| 116 | return ERR_PTR(err); | ||
| 117 | |||
| 118 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
| 119 | if (!dev) { | ||
| 120 | of_node_put(args.np); | ||
| 121 | err = -ENOMEM; | ||
| 122 | goto out; | ||
| 123 | } | ||
| 124 | |||
| 125 | dev->pdev = of_find_device_by_node(args.np); | ||
| 126 | if (!dev->pdev) { | ||
| 127 | of_node_put(args.np); | ||
| 128 | err = -ENODEV; | ||
| 129 | goto free; | ||
| 130 | } | ||
| 131 | |||
| 132 | of_node_put(args.np); | ||
| 133 | |||
| 134 | dev->mipi = platform_get_drvdata(dev->pdev); | ||
| 135 | if (!dev->mipi) { | ||
| 136 | err = -EPROBE_DEFER; | ||
| 137 | goto pdev_put; | ||
| 138 | } | ||
| 139 | |||
| 140 | dev->pads = args.args[0]; | ||
| 141 | dev->device = device; | ||
| 142 | |||
| 143 | return dev; | ||
| 144 | |||
| 145 | pdev_put: | ||
| 146 | platform_device_put(dev->pdev); | ||
| 147 | free: | ||
| 148 | kfree(dev); | ||
| 149 | out: | ||
| 150 | return ERR_PTR(err); | ||
| 151 | } | ||
| 152 | EXPORT_SYMBOL(tegra_mipi_request); | ||
| 153 | |||
| 154 | void tegra_mipi_free(struct tegra_mipi_device *device) | ||
| 155 | { | ||
| 156 | platform_device_put(device->pdev); | ||
| 157 | kfree(device); | ||
| 158 | } | ||
| 159 | EXPORT_SYMBOL(tegra_mipi_free); | ||
| 160 | |||
| 161 | static int tegra_mipi_wait(struct tegra_mipi *mipi) | ||
| 162 | { | ||
| 163 | unsigned long timeout = jiffies + msecs_to_jiffies(250); | ||
| 164 | unsigned long value; | ||
| 165 | |||
| 166 | while (time_before(jiffies, timeout)) { | ||
| 167 | value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS); | ||
| 168 | if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 && | ||
| 169 | (value & MIPI_CAL_STATUS_DONE) != 0) | ||
| 170 | return 0; | ||
| 171 | |||
| 172 | usleep_range(10, 50); | ||
| 173 | } | ||
| 174 | |||
| 175 | return -ETIMEDOUT; | ||
| 176 | } | ||
| 177 | |||
| 178 | int tegra_mipi_calibrate(struct tegra_mipi_device *device) | ||
| 179 | { | ||
| 180 | unsigned long value; | ||
| 181 | unsigned int i; | ||
| 182 | int err; | ||
| 183 | |||
| 184 | err = clk_enable(device->mipi->clk); | ||
| 185 | if (err < 0) | ||
| 186 | return err; | ||
| 187 | |||
| 188 | mutex_lock(&device->mipi->lock); | ||
| 189 | |||
| 190 | value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0); | ||
| 191 | value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP; | ||
| 192 | value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF; | ||
| 193 | tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0); | ||
| 194 | |||
| 195 | value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2); | ||
| 196 | value &= ~MIPI_CAL_BIAS_PAD_PDVREG; | ||
| 197 | tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2); | ||
| 198 | |||
| 199 | for (i = 0; i < ARRAY_SIZE(modules); i++) { | ||
| 200 | if (device->pads & BIT(i)) | ||
| 201 | value = MIPI_CAL_CONFIG_SELECT | | ||
| 202 | MIPI_CAL_CONFIG_HSPDOS(0) | | ||
| 203 | MIPI_CAL_CONFIG_HSPUOS(4) | | ||
| 204 | MIPI_CAL_CONFIG_TERMOS(5); | ||
| 205 | else | ||
| 206 | value = 0; | ||
| 207 | |||
| 208 | tegra_mipi_writel(device->mipi, value, modules[i].reg); | ||
| 209 | } | ||
| 210 | |||
| 211 | tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL); | ||
| 212 | |||
| 213 | err = tegra_mipi_wait(device->mipi); | ||
| 214 | |||
| 215 | mutex_unlock(&device->mipi->lock); | ||
| 216 | clk_disable(device->mipi->clk); | ||
| 217 | |||
| 218 | return err; | ||
| 219 | } | ||
| 220 | EXPORT_SYMBOL(tegra_mipi_calibrate); | ||
| 221 | |||
| 222 | static int tegra_mipi_probe(struct platform_device *pdev) | ||
| 223 | { | ||
| 224 | struct tegra_mipi *mipi; | ||
| 225 | struct resource *res; | ||
| 226 | int err; | ||
| 227 | |||
| 228 | mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL); | ||
| 229 | if (!mipi) | ||
| 230 | return -ENOMEM; | ||
| 231 | |||
| 232 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 233 | mipi->regs = devm_ioremap_resource(&pdev->dev, res); | ||
| 234 | if (IS_ERR(mipi->regs)) | ||
| 235 | return PTR_ERR(mipi->regs); | ||
| 236 | |||
| 237 | mutex_init(&mipi->lock); | ||
| 238 | |||
| 239 | mipi->clk = devm_clk_get(&pdev->dev, NULL); | ||
| 240 | if (IS_ERR(mipi->clk)) { | ||
| 241 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
| 242 | return PTR_ERR(mipi->clk); | ||
| 243 | } | ||
| 244 | |||
| 245 | err = clk_prepare(mipi->clk); | ||
| 246 | if (err < 0) | ||
| 247 | return err; | ||
| 248 | |||
| 249 | platform_set_drvdata(pdev, mipi); | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | static int tegra_mipi_remove(struct platform_device *pdev) | ||
| 255 | { | ||
| 256 | struct tegra_mipi *mipi = platform_get_drvdata(pdev); | ||
| 257 | |||
| 258 | clk_unprepare(mipi->clk); | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | static struct of_device_id tegra_mipi_of_match[] = { | ||
| 264 | { .compatible = "nvidia,tegra114-mipi", }, | ||
| 265 | { }, | ||
| 266 | }; | ||
| 267 | |||
| 268 | struct platform_driver tegra_mipi_driver = { | ||
| 269 | .driver = { | ||
| 270 | .name = "tegra-mipi", | ||
| 271 | .of_match_table = tegra_mipi_of_match, | ||
| 272 | }, | ||
| 273 | .probe = tegra_mipi_probe, | ||
| 274 | .remove = tegra_mipi_remove, | ||
| 275 | }; | ||
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index 159c479829c9..bfb09d802abd 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c | |||
| @@ -93,6 +93,7 @@ u32 host1x_syncpt_id(struct host1x_syncpt *sp) | |||
| 93 | { | 93 | { |
| 94 | return sp->id; | 94 | return sp->id; |
| 95 | } | 95 | } |
| 96 | EXPORT_SYMBOL(host1x_syncpt_id); | ||
| 96 | 97 | ||
| 97 | /* | 98 | /* |
| 98 | * Updates the value sent to hardware. | 99 | * Updates the value sent to hardware. |
| @@ -168,6 +169,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp) | |||
| 168 | { | 169 | { |
| 169 | return host1x_hw_syncpt_cpu_incr(sp->host, sp); | 170 | return host1x_hw_syncpt_cpu_incr(sp->host, sp); |
| 170 | } | 171 | } |
| 172 | EXPORT_SYMBOL(host1x_syncpt_incr); | ||
| 171 | 173 | ||
| 172 | /* | 174 | /* |
| 173 | * Updated sync point form hardware, and returns true if syncpoint is expired, | 175 | * Updated sync point form hardware, and returns true if syncpoint is expired, |
| @@ -377,6 +379,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev, | |||
| 377 | struct host1x *host = dev_get_drvdata(dev->parent); | 379 | struct host1x *host = dev_get_drvdata(dev->parent); |
| 378 | return host1x_syncpt_alloc(host, dev, flags); | 380 | return host1x_syncpt_alloc(host, dev, flags); |
| 379 | } | 381 | } |
| 382 | EXPORT_SYMBOL(host1x_syncpt_request); | ||
| 380 | 383 | ||
| 381 | void host1x_syncpt_free(struct host1x_syncpt *sp) | 384 | void host1x_syncpt_free(struct host1x_syncpt *sp) |
| 382 | { | 385 | { |
| @@ -390,6 +393,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp) | |||
| 390 | sp->name = NULL; | 393 | sp->name = NULL; |
| 391 | sp->client_managed = false; | 394 | sp->client_managed = false; |
| 392 | } | 395 | } |
| 396 | EXPORT_SYMBOL(host1x_syncpt_free); | ||
| 393 | 397 | ||
| 394 | void host1x_syncpt_deinit(struct host1x *host) | 398 | void host1x_syncpt_deinit(struct host1x *host) |
| 395 | { | 399 | { |
| @@ -408,6 +412,7 @@ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) | |||
| 408 | smp_rmb(); | 412 | smp_rmb(); |
| 409 | return (u32)atomic_read(&sp->max_val); | 413 | return (u32)atomic_read(&sp->max_val); |
| 410 | } | 414 | } |
| 415 | EXPORT_SYMBOL(host1x_syncpt_read_max); | ||
| 411 | 416 | ||
| 412 | /* | 417 | /* |
| 413 | * Read min, which is a shadow of the current sync point value in hardware. | 418 | * Read min, which is a shadow of the current sync point value in hardware. |
| @@ -417,6 +422,7 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) | |||
| 417 | smp_rmb(); | 422 | smp_rmb(); |
| 418 | return (u32)atomic_read(&sp->min_val); | 423 | return (u32)atomic_read(&sp->min_val); |
| 419 | } | 424 | } |
| 425 | EXPORT_SYMBOL(host1x_syncpt_read_min); | ||
| 420 | 426 | ||
| 421 | int host1x_syncpt_nb_pts(struct host1x *host) | 427 | int host1x_syncpt_nb_pts(struct host1x *host) |
| 422 | { | 428 | { |
| @@ -439,13 +445,16 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id) | |||
| 439 | return NULL; | 445 | return NULL; |
| 440 | return host->syncpt + id; | 446 | return host->syncpt + id; |
| 441 | } | 447 | } |
| 448 | EXPORT_SYMBOL(host1x_syncpt_get); | ||
| 442 | 449 | ||
| 443 | struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) | 450 | struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) |
| 444 | { | 451 | { |
| 445 | return sp ? sp->base : NULL; | 452 | return sp ? sp->base : NULL; |
| 446 | } | 453 | } |
| 454 | EXPORT_SYMBOL(host1x_syncpt_get_base); | ||
| 447 | 455 | ||
| 448 | u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) | 456 | u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) |
| 449 | { | 457 | { |
| 450 | return base->id; | 458 | return base->id; |
| 451 | } | 459 | } |
| 460 | EXPORT_SYMBOL(host1x_syncpt_base_id); | ||
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index cde461932760..7309ac704e26 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
| @@ -1577,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, | |||
| 1577 | static int do_unregister_framebuffer(struct fb_info *fb_info); | 1577 | static int do_unregister_framebuffer(struct fb_info *fb_info); |
| 1578 | 1578 | ||
| 1579 | #define VGA_FB_PHYS 0xA0000 | 1579 | #define VGA_FB_PHYS 0xA0000 |
| 1580 | static void do_remove_conflicting_framebuffers(struct apertures_struct *a, | 1580 | static int do_remove_conflicting_framebuffers(struct apertures_struct *a, |
| 1581 | const char *name, bool primary) | 1581 | const char *name, bool primary) |
| 1582 | { | 1582 | { |
| 1583 | int i; | 1583 | int i, ret; |
| 1584 | 1584 | ||
| 1585 | /* check all firmware fbs and kick off if the base addr overlaps */ | 1585 | /* check all firmware fbs and kick off if the base addr overlaps */ |
| 1586 | for (i = 0 ; i < FB_MAX; i++) { | 1586 | for (i = 0 ; i < FB_MAX; i++) { |
| @@ -1599,22 +1599,29 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, | |||
| 1599 | printk(KERN_INFO "fb: conflicting fb hw usage " | 1599 | printk(KERN_INFO "fb: conflicting fb hw usage " |
| 1600 | "%s vs %s - removing generic driver\n", | 1600 | "%s vs %s - removing generic driver\n", |
| 1601 | name, registered_fb[i]->fix.id); | 1601 | name, registered_fb[i]->fix.id); |
| 1602 | do_unregister_framebuffer(registered_fb[i]); | 1602 | ret = do_unregister_framebuffer(registered_fb[i]); |
| 1603 | if (ret) | ||
| 1604 | return ret; | ||
| 1603 | } | 1605 | } |
| 1604 | } | 1606 | } |
| 1607 | |||
| 1608 | return 0; | ||
| 1605 | } | 1609 | } |
| 1606 | 1610 | ||
| 1607 | static int do_register_framebuffer(struct fb_info *fb_info) | 1611 | static int do_register_framebuffer(struct fb_info *fb_info) |
| 1608 | { | 1612 | { |
| 1609 | int i; | 1613 | int i, ret; |
| 1610 | struct fb_event event; | 1614 | struct fb_event event; |
| 1611 | struct fb_videomode mode; | 1615 | struct fb_videomode mode; |
| 1612 | 1616 | ||
| 1613 | if (fb_check_foreignness(fb_info)) | 1617 | if (fb_check_foreignness(fb_info)) |
| 1614 | return -ENOSYS; | 1618 | return -ENOSYS; |
| 1615 | 1619 | ||
| 1616 | do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, | 1620 | ret = do_remove_conflicting_framebuffers(fb_info->apertures, |
| 1617 | fb_is_primary_device(fb_info)); | 1621 | fb_info->fix.id, |
| 1622 | fb_is_primary_device(fb_info)); | ||
| 1623 | if (ret) | ||
| 1624 | return ret; | ||
| 1618 | 1625 | ||
| 1619 | if (num_registered_fb == FB_MAX) | 1626 | if (num_registered_fb == FB_MAX) |
| 1620 | return -ENXIO; | 1627 | return -ENXIO; |
| @@ -1739,12 +1746,16 @@ int unlink_framebuffer(struct fb_info *fb_info) | |||
| 1739 | } | 1746 | } |
| 1740 | EXPORT_SYMBOL(unlink_framebuffer); | 1747 | EXPORT_SYMBOL(unlink_framebuffer); |
| 1741 | 1748 | ||
| 1742 | void remove_conflicting_framebuffers(struct apertures_struct *a, | 1749 | int remove_conflicting_framebuffers(struct apertures_struct *a, |
| 1743 | const char *name, bool primary) | 1750 | const char *name, bool primary) |
| 1744 | { | 1751 | { |
| 1752 | int ret; | ||
| 1753 | |||
| 1745 | mutex_lock(®istration_lock); | 1754 | mutex_lock(®istration_lock); |
| 1746 | do_remove_conflicting_framebuffers(a, name, primary); | 1755 | ret = do_remove_conflicting_framebuffers(a, name, primary); |
| 1747 | mutex_unlock(®istration_lock); | 1756 | mutex_unlock(®istration_lock); |
| 1757 | |||
| 1758 | return ret; | ||
| 1748 | } | 1759 | } |
| 1749 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | 1760 | EXPORT_SYMBOL(remove_conflicting_framebuffers); |
| 1750 | 1761 | ||
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index f51646f15cf2..bbeb8dd7f108 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
| @@ -3726,7 +3726,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev) | |||
| 3726 | } | 3726 | } |
| 3727 | 3727 | ||
| 3728 | pm_runtime_enable(&pdev->dev); | 3728 | pm_runtime_enable(&pdev->dev); |
| 3729 | pm_runtime_irq_safe(&pdev->dev); | ||
| 3730 | 3729 | ||
| 3731 | r = dispc_runtime_get(); | 3730 | r = dispc_runtime_get(); |
| 3732 | if (r) | 3731 | if (r) |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1d4a920ef7ff..04086c5be930 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | #include <linux/mutex.h> | 56 | #include <linux/mutex.h> |
| 57 | #include <linux/io.h> | 57 | #include <linux/io.h> |
| 58 | #include <linux/slab.h> | 58 | #include <linux/slab.h> |
| 59 | #include <linux/ratelimit.h> | ||
| 59 | #if defined(__alpha__) || defined(__powerpc__) | 60 | #if defined(__alpha__) || defined(__powerpc__) |
| 60 | #include <asm/pgtable.h> /* For pte_wrprotect */ | 61 | #include <asm/pgtable.h> /* For pte_wrprotect */ |
| 61 | #endif | 62 | #endif |
| @@ -136,7 +137,6 @@ int drm_err(const char *func, const char *format, ...); | |||
| 136 | 137 | ||
| 137 | /* driver capabilities and requirements mask */ | 138 | /* driver capabilities and requirements mask */ |
| 138 | #define DRIVER_USE_AGP 0x1 | 139 | #define DRIVER_USE_AGP 0x1 |
| 139 | #define DRIVER_REQUIRE_AGP 0x2 | ||
| 140 | #define DRIVER_PCI_DMA 0x8 | 140 | #define DRIVER_PCI_DMA 0x8 |
| 141 | #define DRIVER_SG 0x10 | 141 | #define DRIVER_SG 0x10 |
| 142 | #define DRIVER_HAVE_DMA 0x20 | 142 | #define DRIVER_HAVE_DMA 0x20 |
| @@ -180,6 +180,22 @@ int drm_err(const char *func, const char *format, ...); | |||
| 180 | #define DRM_ERROR(fmt, ...) \ | 180 | #define DRM_ERROR(fmt, ...) \ |
| 181 | drm_err(__func__, fmt, ##__VA_ARGS__) | 181 | drm_err(__func__, fmt, ##__VA_ARGS__) |
| 182 | 182 | ||
| 183 | /** | ||
| 184 | * Rate limited error output. Like DRM_ERROR() but won't flood the log. | ||
| 185 | * | ||
| 186 | * \param fmt printf() like format string. | ||
| 187 | * \param arg arguments | ||
| 188 | */ | ||
| 189 | #define DRM_ERROR_RATELIMITED(fmt, ...) \ | ||
| 190 | ({ \ | ||
| 191 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
| 192 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
| 193 | DEFAULT_RATELIMIT_BURST); \ | ||
| 194 | \ | ||
| 195 | if (__ratelimit(&_rs)) \ | ||
| 196 | drm_err(__func__, fmt, ##__VA_ARGS__); \ | ||
| 197 | }) | ||
| 198 | |||
| 183 | #define DRM_INFO(fmt, ...) \ | 199 | #define DRM_INFO(fmt, ...) \ |
| 184 | printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) | 200 | printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) |
| 185 | 201 | ||
| @@ -422,7 +438,6 @@ struct drm_file { | |||
| 422 | struct pid *pid; | 438 | struct pid *pid; |
| 423 | kuid_t uid; | 439 | kuid_t uid; |
| 424 | drm_magic_t magic; | 440 | drm_magic_t magic; |
| 425 | unsigned long ioctl_count; | ||
| 426 | struct list_head lhead; | 441 | struct list_head lhead; |
| 427 | struct drm_minor *minor; | 442 | struct drm_minor *minor; |
| 428 | unsigned long lock_count; | 443 | unsigned long lock_count; |
| @@ -511,7 +526,7 @@ struct drm_device_dma { | |||
| 511 | */ | 526 | */ |
| 512 | struct drm_agp_mem { | 527 | struct drm_agp_mem { |
| 513 | unsigned long handle; /**< handle */ | 528 | unsigned long handle; /**< handle */ |
| 514 | DRM_AGP_MEM *memory; | 529 | struct agp_memory *memory; |
| 515 | unsigned long bound; /**< address */ | 530 | unsigned long bound; /**< address */ |
| 516 | int pages; | 531 | int pages; |
| 517 | struct list_head head; | 532 | struct list_head head; |
| @@ -523,7 +538,7 @@ struct drm_agp_mem { | |||
| 523 | * \sa drm_agp_init() and drm_device::agp. | 538 | * \sa drm_agp_init() and drm_device::agp. |
| 524 | */ | 539 | */ |
| 525 | struct drm_agp_head { | 540 | struct drm_agp_head { |
| 526 | DRM_AGP_KERN agp_info; /**< AGP device information */ | 541 | struct agp_kern_info agp_info; /**< AGP device information */ |
| 527 | struct list_head memory; | 542 | struct list_head memory; |
| 528 | unsigned long mode; /**< AGP mode */ | 543 | unsigned long mode; /**< AGP mode */ |
| 529 | struct agp_bridge_data *bridge; | 544 | struct agp_bridge_data *bridge; |
| @@ -607,13 +622,6 @@ struct drm_ati_pcigart_info { | |||
| 607 | }; | 622 | }; |
| 608 | 623 | ||
| 609 | /** | 624 | /** |
| 610 | * GEM specific mm private for tracking GEM objects | ||
| 611 | */ | ||
| 612 | struct drm_gem_mm { | ||
| 613 | struct drm_vma_offset_manager vma_manager; | ||
| 614 | }; | ||
| 615 | |||
| 616 | /** | ||
| 617 | * This structure defines the drm_mm memory object, which will be used by the | 625 | * This structure defines the drm_mm memory object, which will be used by the |
| 618 | * DRM for its buffer objects. | 626 | * DRM for its buffer objects. |
| 619 | */ | 627 | */ |
| @@ -750,10 +758,6 @@ struct drm_bus { | |||
| 750 | int (*set_unique)(struct drm_device *dev, struct drm_master *master, | 758 | int (*set_unique)(struct drm_device *dev, struct drm_master *master, |
| 751 | struct drm_unique *unique); | 759 | struct drm_unique *unique); |
| 752 | int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); | 760 | int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); |
| 753 | /* hooks that are for PCI */ | ||
| 754 | int (*agp_init)(struct drm_device *dev); | ||
| 755 | void (*agp_destroy)(struct drm_device *dev); | ||
| 756 | |||
| 757 | }; | 761 | }; |
| 758 | 762 | ||
| 759 | /** | 763 | /** |
| @@ -841,6 +845,7 @@ struct drm_driver { | |||
| 841 | * | 845 | * |
| 842 | * \param dev DRM device. | 846 | * \param dev DRM device. |
| 843 | * \param crtc Id of the crtc to query. | 847 | * \param crtc Id of the crtc to query. |
| 848 | * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0). | ||
| 844 | * \param *vpos Target location for current vertical scanout position. | 849 | * \param *vpos Target location for current vertical scanout position. |
| 845 | * \param *hpos Target location for current horizontal scanout position. | 850 | * \param *hpos Target location for current horizontal scanout position. |
| 846 | * \param *stime Target location for timestamp taken immediately before | 851 | * \param *stime Target location for timestamp taken immediately before |
| @@ -863,6 +868,7 @@ struct drm_driver { | |||
| 863 | * | 868 | * |
| 864 | */ | 869 | */ |
| 865 | int (*get_scanout_position) (struct drm_device *dev, int crtc, | 870 | int (*get_scanout_position) (struct drm_device *dev, int crtc, |
| 871 | unsigned int flags, | ||
| 866 | int *vpos, int *hpos, ktime_t *stime, | 872 | int *vpos, int *hpos, ktime_t *stime, |
| 867 | ktime_t *etime); | 873 | ktime_t *etime); |
| 868 | 874 | ||
| @@ -903,7 +909,7 @@ struct drm_driver { | |||
| 903 | 909 | ||
| 904 | /* these have to be filled in */ | 910 | /* these have to be filled in */ |
| 905 | 911 | ||
| 906 | irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); | 912 | irqreturn_t(*irq_handler) (int irq, void *arg); |
| 907 | void (*irq_preinstall) (struct drm_device *dev); | 913 | void (*irq_preinstall) (struct drm_device *dev); |
| 908 | int (*irq_postinstall) (struct drm_device *dev); | 914 | int (*irq_postinstall) (struct drm_device *dev); |
| 909 | void (*irq_uninstall) (struct drm_device *dev); | 915 | void (*irq_uninstall) (struct drm_device *dev); |
| @@ -995,8 +1001,8 @@ struct drm_driver { | |||
| 995 | } kdriver; | 1001 | } kdriver; |
| 996 | struct drm_bus *bus; | 1002 | struct drm_bus *bus; |
| 997 | 1003 | ||
| 998 | /* List of devices hanging off this driver */ | 1004 | /* List of devices hanging off this driver with stealth attach. */ |
| 999 | struct list_head device_list; | 1005 | struct list_head legacy_dev_list; |
| 1000 | }; | 1006 | }; |
| 1001 | 1007 | ||
| 1002 | #define DRM_MINOR_UNASSIGNED 0 | 1008 | #define DRM_MINOR_UNASSIGNED 0 |
| @@ -1085,7 +1091,7 @@ struct drm_vblank_crtc { | |||
| 1085 | * may contain multiple heads. | 1091 | * may contain multiple heads. |
| 1086 | */ | 1092 | */ |
| 1087 | struct drm_device { | 1093 | struct drm_device { |
| 1088 | struct list_head driver_item; /**< list of devices per driver */ | 1094 | struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ |
| 1089 | char *devname; /**< For /proc/interrupts */ | 1095 | char *devname; /**< For /proc/interrupts */ |
| 1090 | int if_version; /**< Highest interface version set */ | 1096 | int if_version; /**< Highest interface version set */ |
| 1091 | 1097 | ||
| @@ -1098,8 +1104,6 @@ struct drm_device { | |||
| 1098 | /** \name Usage Counters */ | 1104 | /** \name Usage Counters */ |
| 1099 | /*@{ */ | 1105 | /*@{ */ |
| 1100 | int open_count; /**< Outstanding files open */ | 1106 | int open_count; /**< Outstanding files open */ |
| 1101 | atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ | ||
| 1102 | atomic_t vma_count; /**< Outstanding vma areas open */ | ||
| 1103 | int buf_use; /**< Buffers in use -- cannot alloc */ | 1107 | int buf_use; /**< Buffers in use -- cannot alloc */ |
| 1104 | atomic_t buf_alloc; /**< Buffer allocation in progress */ | 1108 | atomic_t buf_alloc; /**< Buffer allocation in progress */ |
| 1105 | /*@} */ | 1109 | /*@} */ |
| @@ -1176,7 +1180,6 @@ struct drm_device { | |||
| 1176 | struct drm_sg_mem *sg; /**< Scatter gather memory */ | 1180 | struct drm_sg_mem *sg; /**< Scatter gather memory */ |
| 1177 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ | 1181 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
| 1178 | void *dev_private; /**< device private data */ | 1182 | void *dev_private; /**< device private data */ |
| 1179 | void *mm_private; | ||
| 1180 | struct address_space *dev_mapping; | 1183 | struct address_space *dev_mapping; |
| 1181 | struct drm_sigdata sigdata; /**< For block_all_signals */ | 1184 | struct drm_sigdata sigdata; /**< For block_all_signals */ |
| 1182 | sigset_t sigmask; | 1185 | sigset_t sigmask; |
| @@ -1194,6 +1197,7 @@ struct drm_device { | |||
| 1194 | /*@{ */ | 1197 | /*@{ */ |
| 1195 | struct mutex object_name_lock; | 1198 | struct mutex object_name_lock; |
| 1196 | struct idr object_name_idr; | 1199 | struct idr object_name_idr; |
| 1200 | struct drm_vma_offset_manager *vma_offset_manager; | ||
| 1197 | /*@} */ | 1201 | /*@} */ |
| 1198 | int switch_power_state; | 1202 | int switch_power_state; |
| 1199 | 1203 | ||
| @@ -1268,6 +1272,7 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | |||
| 1268 | /* Memory management support (drm_memory.h) */ | 1272 | /* Memory management support (drm_memory.h) */ |
| 1269 | #include <drm/drm_memory.h> | 1273 | #include <drm/drm_memory.h> |
| 1270 | 1274 | ||
| 1275 | |||
| 1271 | /* Misc. IOCTL support (drm_ioctl.h) */ | 1276 | /* Misc. IOCTL support (drm_ioctl.h) */ |
| 1272 | extern int drm_irq_by_busid(struct drm_device *dev, void *data, | 1277 | extern int drm_irq_by_busid(struct drm_device *dev, void *data, |
| 1273 | struct drm_file *file_priv); | 1278 | struct drm_file *file_priv); |
| @@ -1398,8 +1403,10 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, | |||
| 1398 | int crtc, int *max_error, | 1403 | int crtc, int *max_error, |
| 1399 | struct timeval *vblank_time, | 1404 | struct timeval *vblank_time, |
| 1400 | unsigned flags, | 1405 | unsigned flags, |
| 1401 | struct drm_crtc *refcrtc); | 1406 | const struct drm_crtc *refcrtc, |
| 1402 | extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); | 1407 | const struct drm_display_mode *mode); |
| 1408 | extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, | ||
| 1409 | const struct drm_display_mode *mode); | ||
| 1403 | 1410 | ||
| 1404 | extern bool | 1411 | extern bool |
| 1405 | drm_mode_parse_command_line_for_connector(const char *mode_option, | 1412 | drm_mode_parse_command_line_for_connector(const char *mode_option, |
| @@ -1461,6 +1468,30 @@ extern int drm_debugfs_create_files(const struct drm_info_list *files, | |||
| 1461 | extern int drm_debugfs_remove_files(const struct drm_info_list *files, | 1468 | extern int drm_debugfs_remove_files(const struct drm_info_list *files, |
| 1462 | int count, struct drm_minor *minor); | 1469 | int count, struct drm_minor *minor); |
| 1463 | extern int drm_debugfs_cleanup(struct drm_minor *minor); | 1470 | extern int drm_debugfs_cleanup(struct drm_minor *minor); |
| 1471 | #else | ||
| 1472 | static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, | ||
| 1473 | struct dentry *root) | ||
| 1474 | { | ||
| 1475 | return 0; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | static inline int drm_debugfs_create_files(const struct drm_info_list *files, | ||
| 1479 | int count, struct dentry *root, | ||
| 1480 | struct drm_minor *minor) | ||
| 1481 | { | ||
| 1482 | return 0; | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | static inline int drm_debugfs_remove_files(const struct drm_info_list *files, | ||
| 1486 | int count, struct drm_minor *minor) | ||
| 1487 | { | ||
| 1488 | return 0; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | static inline int drm_debugfs_cleanup(struct drm_minor *minor) | ||
| 1492 | { | ||
| 1493 | return 0; | ||
| 1494 | } | ||
| 1464 | #endif | 1495 | #endif |
| 1465 | 1496 | ||
| 1466 | /* Info file support */ | 1497 | /* Info file support */ |
| @@ -1645,6 +1676,7 @@ static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) | |||
| 1645 | 1676 | ||
| 1646 | return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); | 1677 | return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); |
| 1647 | } | 1678 | } |
| 1679 | void drm_pci_agp_destroy(struct drm_device *dev); | ||
| 1648 | 1680 | ||
| 1649 | extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); | 1681 | extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); |
| 1650 | extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); | 1682 | extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); |
| @@ -1660,7 +1692,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); | |||
| 1660 | 1692 | ||
| 1661 | /* platform section */ | 1693 | /* platform section */ |
| 1662 | extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); | 1694 | extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); |
| 1663 | extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device); | ||
| 1664 | 1695 | ||
| 1665 | /* returns true if currently okay to sleep */ | 1696 | /* returns true if currently okay to sleep */ |
| 1666 | static __inline__ bool drm_can_sleep(void) | 1697 | static __inline__ bool drm_can_sleep(void) |
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h index a184eeee9c96..86a02188074b 100644 --- a/include/drm/drm_agpsupport.h +++ b/include/drm/drm_agpsupport.h | |||
| @@ -10,17 +10,16 @@ | |||
| 10 | 10 | ||
| 11 | #if __OS_HAS_AGP | 11 | #if __OS_HAS_AGP |
| 12 | 12 | ||
| 13 | void drm_free_agp(DRM_AGP_MEM * handle, int pages); | 13 | void drm_free_agp(struct agp_memory * handle, int pages); |
| 14 | int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); | 14 | int drm_bind_agp(struct agp_memory * handle, unsigned int start); |
| 15 | int drm_unbind_agp(DRM_AGP_MEM * handle); | 15 | int drm_unbind_agp(struct agp_memory * handle); |
| 16 | DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | 16 | struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, |
| 17 | struct page **pages, | 17 | struct page **pages, |
| 18 | unsigned long num_pages, | 18 | unsigned long num_pages, |
| 19 | uint32_t gtt_offset, | 19 | uint32_t gtt_offset, |
| 20 | uint32_t type); | 20 | uint32_t type); |
| 21 | 21 | ||
| 22 | struct drm_agp_head *drm_agp_init(struct drm_device *dev); | 22 | struct drm_agp_head *drm_agp_init(struct drm_device *dev); |
| 23 | void drm_agp_destroy(struct drm_agp_head *agp); | ||
| 24 | void drm_agp_clear(struct drm_device *dev); | 23 | void drm_agp_clear(struct drm_device *dev); |
| 25 | int drm_agp_acquire(struct drm_device *dev); | 24 | int drm_agp_acquire(struct drm_device *dev); |
| 26 | int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, | 25 | int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, |
| @@ -46,29 +45,23 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, | |||
| 46 | int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); | 45 | int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); |
| 47 | int drm_agp_bind_ioctl(struct drm_device *dev, void *data, | 46 | int drm_agp_bind_ioctl(struct drm_device *dev, void *data, |
| 48 | struct drm_file *file_priv); | 47 | struct drm_file *file_priv); |
| 49 | |||
| 50 | static inline int drm_core_has_AGP(struct drm_device *dev) | ||
| 51 | { | ||
| 52 | return drm_core_check_feature(dev, DRIVER_USE_AGP); | ||
| 53 | } | ||
| 54 | |||
| 55 | #else /* __OS_HAS_AGP */ | 48 | #else /* __OS_HAS_AGP */ |
| 56 | 49 | ||
| 57 | static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages) | 50 | static inline void drm_free_agp(struct agp_memory * handle, int pages) |
| 58 | { | 51 | { |
| 59 | } | 52 | } |
| 60 | 53 | ||
| 61 | static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) | 54 | static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start) |
| 62 | { | 55 | { |
| 63 | return -ENODEV; | 56 | return -ENODEV; |
| 64 | } | 57 | } |
| 65 | 58 | ||
| 66 | static inline int drm_unbind_agp(DRM_AGP_MEM * handle) | 59 | static inline int drm_unbind_agp(struct agp_memory * handle) |
| 67 | { | 60 | { |
| 68 | return -ENODEV; | 61 | return -ENODEV; |
| 69 | } | 62 | } |
| 70 | 63 | ||
| 71 | static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | 64 | static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, |
| 72 | struct page **pages, | 65 | struct page **pages, |
| 73 | unsigned long num_pages, | 66 | unsigned long num_pages, |
| 74 | uint32_t gtt_offset, | 67 | uint32_t gtt_offset, |
| @@ -82,10 +75,6 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) | |||
| 82 | return NULL; | 75 | return NULL; |
| 83 | } | 76 | } |
| 84 | 77 | ||
| 85 | static inline void drm_agp_destroy(struct drm_agp_head *agp) | ||
| 86 | { | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void drm_agp_clear(struct drm_device *dev) | 78 | static inline void drm_agp_clear(struct drm_device *dev) |
| 90 | { | 79 | { |
| 91 | } | 80 | } |
| @@ -183,12 +172,6 @@ static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data, | |||
| 183 | { | 172 | { |
| 184 | return -ENODEV; | 173 | return -ENODEV; |
| 185 | } | 174 | } |
| 186 | |||
| 187 | static inline int drm_core_has_AGP(struct drm_device *dev) | ||
| 188 | { | ||
| 189 | return 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | #endif /* __OS_HAS_AGP */ | 175 | #endif /* __OS_HAS_AGP */ |
| 193 | 176 | ||
| 194 | #endif /* _DRM_AGPSUPPORT_H_ */ | 177 | #endif /* _DRM_AGPSUPPORT_H_ */ |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f32c5cd51f41..71727b6210ae 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| 31 | #include <linux/idr.h> | 31 | #include <linux/idr.h> |
| 32 | #include <linux/fb.h> | 32 | #include <linux/fb.h> |
| 33 | #include <linux/hdmi.h> | ||
| 33 | #include <drm/drm_mode.h> | 34 | #include <drm/drm_mode.h> |
| 34 | 35 | ||
| 35 | #include <drm/drm_fourcc.h> | 36 | #include <drm/drm_fourcc.h> |
| @@ -181,6 +182,7 @@ struct drm_display_mode { | |||
| 181 | 182 | ||
| 182 | int vrefresh; /* in Hz */ | 183 | int vrefresh; /* in Hz */ |
| 183 | int hsync; /* in kHz */ | 184 | int hsync; /* in kHz */ |
| 185 | enum hdmi_picture_aspect picture_aspect_ratio; | ||
| 184 | }; | 186 | }; |
| 185 | 187 | ||
| 186 | static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) | 188 | static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) |
| @@ -447,7 +449,7 @@ struct drm_crtc { | |||
| 447 | uint16_t *gamma_store; | 449 | uint16_t *gamma_store; |
| 448 | 450 | ||
| 449 | /* Constants needed for precise vblank and swap timestamping. */ | 451 | /* Constants needed for precise vblank and swap timestamping. */ |
| 450 | s64 framedur_ns, linedur_ns, pixeldur_ns; | 452 | int framedur_ns, linedur_ns, pixeldur_ns; |
| 451 | 453 | ||
| 452 | /* if you are using the helper */ | 454 | /* if you are using the helper */ |
| 453 | void *helper_private; | 455 | void *helper_private; |
| @@ -929,6 +931,19 @@ extern int drm_crtc_init(struct drm_device *dev, | |||
| 929 | struct drm_crtc *crtc, | 931 | struct drm_crtc *crtc, |
| 930 | const struct drm_crtc_funcs *funcs); | 932 | const struct drm_crtc_funcs *funcs); |
| 931 | extern void drm_crtc_cleanup(struct drm_crtc *crtc); | 933 | extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
| 934 | extern unsigned int drm_crtc_index(struct drm_crtc *crtc); | ||
| 935 | |||
| 936 | /** | ||
| 937 | * drm_crtc_mask - find the mask of a registered CRTC | ||
| 938 | * @crtc: CRTC to find mask for | ||
| 939 | * | ||
| 940 | * Given a registered CRTC, return the mask bit of that CRTC for an | ||
| 941 | * encoder's possible_crtcs field. | ||
| 942 | */ | ||
| 943 | static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc) | ||
| 944 | { | ||
| 945 | return 1 << drm_crtc_index(crtc); | ||
| 946 | } | ||
| 932 | 947 | ||
| 933 | extern void drm_connector_ida_init(void); | 948 | extern void drm_connector_ida_init(void); |
| 934 | extern void drm_connector_ida_destroy(void); | 949 | extern void drm_connector_ida_destroy(void); |
| @@ -950,6 +965,19 @@ extern int drm_encoder_init(struct drm_device *dev, | |||
| 950 | const struct drm_encoder_funcs *funcs, | 965 | const struct drm_encoder_funcs *funcs, |
| 951 | int encoder_type); | 966 | int encoder_type); |
| 952 | 967 | ||
| 968 | /** | ||
| 969 | * drm_encoder_crtc_ok - can a given crtc drive a given encoder? | ||
| 970 | * @encoder: encoder to test | ||
| 971 | * @crtc: crtc to test | ||
| 972 | * | ||
| 973 | * Return false if @encoder can't be driven by @crtc, true otherwise. | ||
| 974 | */ | ||
| 975 | static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | ||
| 976 | struct drm_crtc *crtc) | ||
| 977 | { | ||
| 978 | return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); | ||
| 979 | } | ||
| 980 | |||
| 953 | extern int drm_plane_init(struct drm_device *dev, | 981 | extern int drm_plane_init(struct drm_device *dev, |
| 954 | struct drm_plane *plane, | 982 | struct drm_plane *plane, |
| 955 | unsigned long possible_crtcs, | 983 | unsigned long possible_crtcs, |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index ef6ad3a8e58e..b1388b5fe7ac 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -120,8 +120,8 @@ struct drm_encoder_helper_funcs { | |||
| 120 | */ | 120 | */ |
| 121 | struct drm_connector_helper_funcs { | 121 | struct drm_connector_helper_funcs { |
| 122 | int (*get_modes)(struct drm_connector *connector); | 122 | int (*get_modes)(struct drm_connector *connector); |
| 123 | int (*mode_valid)(struct drm_connector *connector, | 123 | enum drm_mode_status (*mode_valid)(struct drm_connector *connector, |
| 124 | struct drm_display_mode *mode); | 124 | struct drm_display_mode *mode); |
| 125 | struct drm_encoder *(*best_encoder)(struct drm_connector *connector); | 125 | struct drm_encoder *(*best_encoder)(struct drm_connector *connector); |
| 126 | }; | 126 | }; |
| 127 | 127 | ||
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index a92c3754e3bb..1d09050a8c00 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
| @@ -41,22 +41,22 @@ | |||
| 41 | * 1.2 formally includes both eDP and DPI definitions. | 41 | * 1.2 formally includes both eDP and DPI definitions. |
| 42 | */ | 42 | */ |
| 43 | 43 | ||
| 44 | #define AUX_NATIVE_WRITE 0x8 | 44 | #define DP_AUX_I2C_WRITE 0x0 |
| 45 | #define AUX_NATIVE_READ 0x9 | 45 | #define DP_AUX_I2C_READ 0x1 |
| 46 | #define AUX_I2C_WRITE 0x0 | 46 | #define DP_AUX_I2C_STATUS 0x2 |
| 47 | #define AUX_I2C_READ 0x1 | 47 | #define DP_AUX_I2C_MOT 0x4 |
| 48 | #define AUX_I2C_STATUS 0x2 | 48 | #define DP_AUX_NATIVE_WRITE 0x8 |
| 49 | #define AUX_I2C_MOT 0x4 | 49 | #define DP_AUX_NATIVE_READ 0x9 |
| 50 | 50 | ||
| 51 | #define AUX_NATIVE_REPLY_ACK (0x0 << 4) | 51 | #define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) |
| 52 | #define AUX_NATIVE_REPLY_NACK (0x1 << 4) | 52 | #define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) |
| 53 | #define AUX_NATIVE_REPLY_DEFER (0x2 << 4) | 53 | #define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) |
| 54 | #define AUX_NATIVE_REPLY_MASK (0x3 << 4) | 54 | #define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) |
| 55 | 55 | ||
| 56 | #define AUX_I2C_REPLY_ACK (0x0 << 6) | 56 | #define DP_AUX_I2C_REPLY_ACK (0x0 << 2) |
| 57 | #define AUX_I2C_REPLY_NACK (0x1 << 6) | 57 | #define DP_AUX_I2C_REPLY_NACK (0x1 << 2) |
| 58 | #define AUX_I2C_REPLY_DEFER (0x2 << 6) | 58 | #define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) |
| 59 | #define AUX_I2C_REPLY_MASK (0x3 << 6) | 59 | #define DP_AUX_I2C_REPLY_MASK (0x3 << 2) |
| 60 | 60 | ||
| 61 | /* AUX CH addresses */ | 61 | /* AUX CH addresses */ |
| 62 | /* DPCD */ | 62 | /* DPCD */ |
| @@ -266,9 +266,10 @@ | |||
| 266 | 266 | ||
| 267 | #define DP_TEST_REQUEST 0x218 | 267 | #define DP_TEST_REQUEST 0x218 |
| 268 | # define DP_TEST_LINK_TRAINING (1 << 0) | 268 | # define DP_TEST_LINK_TRAINING (1 << 0) |
| 269 | # define DP_TEST_LINK_PATTERN (1 << 1) | 269 | # define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) |
| 270 | # define DP_TEST_LINK_EDID_READ (1 << 2) | 270 | # define DP_TEST_LINK_EDID_READ (1 << 2) |
| 271 | # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ | 271 | # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ |
| 272 | # define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ | ||
| 272 | 273 | ||
| 273 | #define DP_TEST_LINK_RATE 0x219 | 274 | #define DP_TEST_LINK_RATE 0x219 |
| 274 | # define DP_LINK_RATE_162 (0x6) | 275 | # define DP_LINK_RATE_162 (0x6) |
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h new file mode 100644 index 000000000000..d32628acdd90 --- /dev/null +++ b/include/drm/drm_mipi_dsi.h | |||
| @@ -0,0 +1,158 @@ | |||
| 1 | /* | ||
| 2 | * MIPI DSI Bus | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. | ||
| 5 | * Andrzej Hajda <a.hajda@samsung.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef __DRM_MIPI_DSI_H__ | ||
| 13 | #define __DRM_MIPI_DSI_H__ | ||
| 14 | |||
| 15 | #include <linux/device.h> | ||
| 16 | |||
| 17 | struct mipi_dsi_host; | ||
| 18 | struct mipi_dsi_device; | ||
| 19 | |||
| 20 | /** | ||
| 21 | * struct mipi_dsi_msg - read/write DSI buffer | ||
| 22 | * @channel: virtual channel id | ||
| 23 | * @type: payload data type | ||
| 24 | * @tx_len: length of @tx_buf | ||
| 25 | * @tx_buf: data to be written | ||
| 26 | * @rx_len: length of @rx_buf | ||
| 27 | * @rx_buf: data to be read, or NULL | ||
| 28 | */ | ||
| 29 | struct mipi_dsi_msg { | ||
| 30 | u8 channel; | ||
| 31 | u8 type; | ||
| 32 | |||
| 33 | size_t tx_len; | ||
| 34 | const void *tx_buf; | ||
| 35 | |||
| 36 | size_t rx_len; | ||
| 37 | void *rx_buf; | ||
| 38 | }; | ||
| 39 | |||
| 40 | /** | ||
| 41 | * struct mipi_dsi_host_ops - DSI bus operations | ||
| 42 | * @attach: attach DSI device to DSI host | ||
| 43 | * @detach: detach DSI device from DSI host | ||
| 44 | * @transfer: send and/or receive DSI packet, return number of received bytes, | ||
| 45 | * or error | ||
| 46 | */ | ||
| 47 | struct mipi_dsi_host_ops { | ||
| 48 | int (*attach)(struct mipi_dsi_host *host, | ||
| 49 | struct mipi_dsi_device *dsi); | ||
| 50 | int (*detach)(struct mipi_dsi_host *host, | ||
| 51 | struct mipi_dsi_device *dsi); | ||
| 52 | ssize_t (*transfer)(struct mipi_dsi_host *host, | ||
| 53 | struct mipi_dsi_msg *msg); | ||
| 54 | }; | ||
| 55 | |||
| 56 | /** | ||
| 57 | * struct mipi_dsi_host - DSI host device | ||
| 58 | * @dev: driver model device node for this DSI host | ||
| 59 | * @ops: DSI host operations | ||
| 60 | */ | ||
| 61 | struct mipi_dsi_host { | ||
| 62 | struct device *dev; | ||
| 63 | const struct mipi_dsi_host_ops *ops; | ||
| 64 | }; | ||
| 65 | |||
| 66 | int mipi_dsi_host_register(struct mipi_dsi_host *host); | ||
| 67 | void mipi_dsi_host_unregister(struct mipi_dsi_host *host); | ||
| 68 | |||
| 69 | /* DSI mode flags */ | ||
| 70 | |||
| 71 | /* video mode */ | ||
| 72 | #define MIPI_DSI_MODE_VIDEO BIT(0) | ||
| 73 | /* video burst mode */ | ||
| 74 | #define MIPI_DSI_MODE_VIDEO_BURST BIT(1) | ||
| 75 | /* video pulse mode */ | ||
| 76 | #define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2) | ||
| 77 | /* enable auto vertical count mode */ | ||
| 78 | #define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3) | ||
| 79 | /* enable hsync-end packets in vsync-pulse and v-porch area */ | ||
| 80 | #define MIPI_DSI_MODE_VIDEO_HSE BIT(4) | ||
| 81 | /* disable hfront-porch area */ | ||
| 82 | #define MIPI_DSI_MODE_VIDEO_HFP BIT(5) | ||
| 83 | /* disable hback-porch area */ | ||
| 84 | #define MIPI_DSI_MODE_VIDEO_HBP BIT(6) | ||
| 85 | /* disable hsync-active area */ | ||
| 86 | #define MIPI_DSI_MODE_VIDEO_HSA BIT(7) | ||
| 87 | /* flush display FIFO on vsync pulse */ | ||
| 88 | #define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) | ||
| 89 | /* disable EoT packets in HS mode */ | ||
| 90 | #define MIPI_DSI_MODE_EOT_PACKET BIT(9) | ||
| 91 | |||
| 92 | enum mipi_dsi_pixel_format { | ||
| 93 | MIPI_DSI_FMT_RGB888, | ||
| 94 | MIPI_DSI_FMT_RGB666, | ||
| 95 | MIPI_DSI_FMT_RGB666_PACKED, | ||
| 96 | MIPI_DSI_FMT_RGB565, | ||
| 97 | }; | ||
| 98 | |||
| 99 | /** | ||
| 100 | * struct mipi_dsi_device - DSI peripheral device | ||
| 101 | * @host: DSI host for this peripheral | ||
| 102 | * @dev: driver model device node for this peripheral | ||
| 103 | * @channel: virtual channel assigned to the peripheral | ||
| 104 | * @format: pixel format for video mode | ||
| 105 | * @lanes: number of active data lanes | ||
| 106 | * @mode_flags: DSI operation mode related flags | ||
| 107 | */ | ||
| 108 | struct mipi_dsi_device { | ||
| 109 | struct mipi_dsi_host *host; | ||
| 110 | struct device dev; | ||
| 111 | |||
| 112 | unsigned int channel; | ||
| 113 | unsigned int lanes; | ||
| 114 | enum mipi_dsi_pixel_format format; | ||
| 115 | unsigned long mode_flags; | ||
| 116 | }; | ||
| 117 | |||
| 118 | #define to_mipi_dsi_device(d) container_of(d, struct mipi_dsi_device, dev) | ||
| 119 | |||
| 120 | int mipi_dsi_attach(struct mipi_dsi_device *dsi); | ||
| 121 | int mipi_dsi_detach(struct mipi_dsi_device *dsi); | ||
| 122 | int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel, | ||
| 123 | const void *data, size_t len); | ||
| 124 | ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel, | ||
| 125 | u8 cmd, void *data, size_t len); | ||
| 126 | |||
| 127 | /** | ||
| 128 | * struct mipi_dsi_driver - DSI driver | ||
| 129 | * @driver: device driver model driver | ||
| 130 | * @probe: callback for device binding | ||
| 131 | * @remove: callback for device unbinding | ||
| 132 | */ | ||
| 133 | struct mipi_dsi_driver { | ||
| 134 | struct device_driver driver; | ||
| 135 | int(*probe)(struct mipi_dsi_device *dsi); | ||
| 136 | int(*remove)(struct mipi_dsi_device *dsi); | ||
| 137 | }; | ||
| 138 | |||
| 139 | #define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver) | ||
| 140 | |||
| 141 | static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) | ||
| 142 | { | ||
| 143 | return dev_get_drvdata(&dsi->dev); | ||
| 144 | } | ||
| 145 | |||
| 146 | static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) | ||
| 147 | { | ||
| 148 | dev_set_drvdata(&dsi->dev, data); | ||
| 149 | } | ||
| 150 | |||
| 151 | int mipi_dsi_driver_register(struct mipi_dsi_driver *driver); | ||
| 152 | void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); | ||
| 153 | |||
| 154 | #define module_mipi_dsi_driver(__mipi_dsi_driver) \ | ||
| 155 | module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ | ||
| 156 | mipi_dsi_driver_unregister) | ||
| 157 | |||
| 158 | #endif /* __DRM_MIPI_DSI__ */ | ||
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 815fafc6b4ad..86ab99bc0ac5 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h | |||
| @@ -21,7 +21,6 @@ static inline void writeq(u64 val, void __iomem *reg) | |||
| 21 | 21 | ||
| 22 | /** Current process ID */ | 22 | /** Current process ID */ |
| 23 | #define DRM_CURRENTPID task_pid_nr(current) | 23 | #define DRM_CURRENTPID task_pid_nr(current) |
| 24 | #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) | ||
| 25 | #define DRM_UDELAY(d) udelay(d) | 24 | #define DRM_UDELAY(d) udelay(d) |
| 26 | /** Read a byte from a MMIO region */ | 25 | /** Read a byte from a MMIO region */ |
| 27 | #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) | 26 | #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) |
| @@ -35,45 +34,12 @@ static inline void writeq(u64 val, void __iomem *reg) | |||
| 35 | #define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) | 34 | #define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) |
| 36 | /** Write a dword into a MMIO region */ | 35 | /** Write a dword into a MMIO region */ |
| 37 | #define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) | 36 | #define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) |
| 38 | /** Read memory barrier */ | ||
| 39 | 37 | ||
| 40 | /** Read a qword from a MMIO region - be careful using these unless you really understand them */ | 38 | /** Read a qword from a MMIO region - be careful using these unless you really understand them */ |
| 41 | #define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) | 39 | #define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) |
| 42 | /** Write a qword into a MMIO region */ | 40 | /** Write a qword into a MMIO region */ |
| 43 | #define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) | 41 | #define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) |
| 44 | 42 | ||
| 45 | #define DRM_READMEMORYBARRIER() rmb() | ||
| 46 | /** Write memory barrier */ | ||
| 47 | #define DRM_WRITEMEMORYBARRIER() wmb() | ||
| 48 | /** Read/write memory barrier */ | ||
| 49 | #define DRM_MEMORYBARRIER() mb() | ||
| 50 | |||
| 51 | /** IRQ handler arguments and return type and values */ | ||
| 52 | #define DRM_IRQ_ARGS int irq, void *arg | ||
| 53 | |||
| 54 | /** AGP types */ | ||
| 55 | #if __OS_HAS_AGP | ||
| 56 | #define DRM_AGP_MEM struct agp_memory | ||
| 57 | #define DRM_AGP_KERN struct agp_kern_info | ||
| 58 | #else | ||
| 59 | /* define some dummy types for non AGP supporting kernels */ | ||
| 60 | struct no_agp_kern { | ||
| 61 | unsigned long aper_base; | ||
| 62 | unsigned long aper_size; | ||
| 63 | }; | ||
| 64 | #define DRM_AGP_MEM int | ||
| 65 | #define DRM_AGP_KERN struct no_agp_kern | ||
| 66 | #endif | ||
| 67 | |||
| 68 | /** Other copying of data to kernel space */ | ||
| 69 | #define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ | ||
| 70 | copy_from_user(arg1, arg2, arg3) | ||
| 71 | /** Other copying of data from kernel space */ | ||
| 72 | #define DRM_COPY_TO_USER(arg1, arg2, arg3) \ | ||
| 73 | copy_to_user(arg1, arg2, arg3) | ||
| 74 | |||
| 75 | #define DRM_HZ HZ | ||
| 76 | |||
| 77 | #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ | 43 | #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ |
| 78 | do { \ | 44 | do { \ |
| 79 | DECLARE_WAITQUEUE(entry, current); \ | 45 | DECLARE_WAITQUEUE(entry, current); \ |
| @@ -97,6 +63,3 @@ do { \ | |||
| 97 | __set_current_state(TASK_RUNNING); \ | 63 | __set_current_state(TASK_RUNNING); \ |
| 98 | remove_wait_queue(&(queue), &entry); \ | 64 | remove_wait_queue(&(queue), &entry); \ |
| 99 | } while (0) | 65 | } while (0) |
| 100 | |||
| 101 | #define DRM_WAKEUP( queue ) wake_up( queue ) | ||
| 102 | #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) | ||
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h new file mode 100644 index 000000000000..c2ab77add67c --- /dev/null +++ b/include/drm/drm_panel.h | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the | ||
| 12 | * next paragraph) shall be included in all copies or substantial portions | ||
| 13 | * of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __DRM_PANEL_H__ | ||
| 25 | #define __DRM_PANEL_H__ | ||
| 26 | |||
| 27 | #include <linux/list.h> | ||
| 28 | |||
| 29 | struct drm_connector; | ||
| 30 | struct drm_device; | ||
| 31 | struct drm_panel; | ||
| 32 | |||
| 33 | struct drm_panel_funcs { | ||
| 34 | int (*disable)(struct drm_panel *panel); | ||
| 35 | int (*enable)(struct drm_panel *panel); | ||
| 36 | int (*get_modes)(struct drm_panel *panel); | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct drm_panel { | ||
| 40 | struct drm_device *drm; | ||
| 41 | struct drm_connector *connector; | ||
| 42 | struct device *dev; | ||
| 43 | |||
| 44 | const struct drm_panel_funcs *funcs; | ||
| 45 | |||
| 46 | struct list_head list; | ||
| 47 | }; | ||
| 48 | |||
| 49 | static inline int drm_panel_disable(struct drm_panel *panel) | ||
| 50 | { | ||
| 51 | if (panel && panel->funcs && panel->funcs->disable) | ||
| 52 | return panel->funcs->disable(panel); | ||
| 53 | |||
| 54 | return panel ? -ENOSYS : -EINVAL; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline int drm_panel_enable(struct drm_panel *panel) | ||
| 58 | { | ||
| 59 | if (panel && panel->funcs && panel->funcs->enable) | ||
| 60 | return panel->funcs->enable(panel); | ||
| 61 | |||
| 62 | return panel ? -ENOSYS : -EINVAL; | ||
| 63 | } | ||
| 64 | |||
| 65 | void drm_panel_init(struct drm_panel *panel); | ||
| 66 | |||
| 67 | int drm_panel_add(struct drm_panel *panel); | ||
| 68 | void drm_panel_remove(struct drm_panel *panel); | ||
| 69 | |||
| 70 | int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); | ||
| 71 | int drm_panel_detach(struct drm_panel *panel); | ||
| 72 | |||
| 73 | #ifdef CONFIG_OF | ||
| 74 | struct drm_panel *of_drm_find_panel(struct device_node *np); | ||
| 75 | #else | ||
| 76 | static inline struct drm_panel *of_drm_find_panel(struct device_node *np) | ||
| 77 | { | ||
| 78 | return NULL; | ||
| 79 | } | ||
| 80 | #endif | ||
| 81 | |||
| 82 | #endif | ||
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 8639c85d61c4..32d34ebf0706 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
| @@ -681,6 +681,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); | |||
| 681 | extern int ttm_tt_swapout(struct ttm_tt *ttm, | 681 | extern int ttm_tt_swapout(struct ttm_tt *ttm, |
| 682 | struct file *persistent_swap_storage); | 682 | struct file *persistent_swap_storage); |
| 683 | 683 | ||
| 684 | /** | ||
| 685 | * ttm_tt_unpopulate - free pages from a ttm | ||
| 686 | * | ||
| 687 | * @ttm: Pointer to the ttm_tt structure | ||
| 688 | * | ||
| 689 | * Calls the driver method to free all pages from a ttm | ||
| 690 | */ | ||
| 691 | extern void ttm_tt_unpopulate(struct ttm_tt *ttm); | ||
| 692 | |||
| 684 | /* | 693 | /* |
| 685 | * ttm_bo.c | 694 | * ttm_bo.c |
| 686 | */ | 695 | */ |
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index 58b029894eb3..0097cc03034e 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h | |||
| @@ -190,14 +190,26 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, | |||
| 190 | * @key: Hash key | 190 | * @key: Hash key |
| 191 | * | 191 | * |
| 192 | * Looks up a struct ttm_base_object with the key @key. | 192 | * Looks up a struct ttm_base_object with the key @key. |
| 193 | * Also verifies that the object is visible to the application, by | ||
| 194 | * comparing the @tfile argument and checking the object shareable flag. | ||
| 195 | */ | 193 | */ |
| 196 | 194 | ||
| 197 | extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file | 195 | extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file |
| 198 | *tfile, uint32_t key); | 196 | *tfile, uint32_t key); |
| 199 | 197 | ||
| 200 | /** | 198 | /** |
| 199 | * ttm_base_object_lookup_for_ref | ||
| 200 | * | ||
| 201 | * @tdev: Pointer to a struct ttm_object_device. | ||
| 202 | * @key: Hash key | ||
| 203 | * | ||
| 204 | * Looks up a struct ttm_base_object with the key @key. | ||
| 205 | * This function should only be used when the struct tfile associated with the | ||
| 206 | * caller doesn't yet have a reference to the base object. | ||
| 207 | */ | ||
| 208 | |||
| 209 | extern struct ttm_base_object * | ||
| 210 | ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); | ||
| 211 | |||
| 212 | /** | ||
| 201 | * ttm_base_object_unref | 213 | * ttm_base_object_unref |
| 202 | * | 214 | * |
| 203 | * @p_base: Pointer to a pointer referencing a struct ttm_base_object. | 215 | * @p_base: Pointer to a pointer referencing a struct ttm_base_object. |
| @@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); | |||
| 218 | * @existed: Upon completion, indicates that an identical reference object | 230 | * @existed: Upon completion, indicates that an identical reference object |
| 219 | * already existed, and the refcount was upped on that object instead. | 231 | * already existed, and the refcount was upped on that object instead. |
| 220 | * | 232 | * |
| 233 | * Checks that the base object is shareable and adds a ref object to it. | ||
| 234 | * | ||
| 221 | * Adding a ref object to a base object is basically like referencing the | 235 | * Adding a ref object to a base object is basically like referencing the |
| 222 | * base object, but a user-space application holds the reference. When the | 236 | * base object, but a user-space application holds the reference. When the |
| 223 | * file corresponding to @tfile is closed, all its reference objects are | 237 | * file corresponding to @tfile is closed, all its reference objects are |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 70c4836e4a9f..fe6ac956550e 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -613,8 +613,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, | |||
| 613 | extern int register_framebuffer(struct fb_info *fb_info); | 613 | extern int register_framebuffer(struct fb_info *fb_info); |
| 614 | extern int unregister_framebuffer(struct fb_info *fb_info); | 614 | extern int unregister_framebuffer(struct fb_info *fb_info); |
| 615 | extern int unlink_framebuffer(struct fb_info *fb_info); | 615 | extern int unlink_framebuffer(struct fb_info *fb_info); |
| 616 | extern void remove_conflicting_framebuffers(struct apertures_struct *a, | 616 | extern int remove_conflicting_framebuffers(struct apertures_struct *a, |
| 617 | const char *name, bool primary); | 617 | const char *name, bool primary); |
| 618 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); | 618 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
| 619 | extern int fb_show_logo(struct fb_info *fb_info, int rotate); | 619 | extern int fb_show_logo(struct fb_info *fb_info, int rotate); |
| 620 | extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); | 620 | extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); |
diff --git a/include/linux/host1x.h b/include/linux/host1x.h index f5b9b87ac9a9..3af847273277 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h | |||
| @@ -281,4 +281,10 @@ int host1x_device_exit(struct host1x_device *device); | |||
| 281 | int host1x_client_register(struct host1x_client *client); | 281 | int host1x_client_register(struct host1x_client *client); |
| 282 | int host1x_client_unregister(struct host1x_client *client); | 282 | int host1x_client_unregister(struct host1x_client *client); |
| 283 | 283 | ||
| 284 | struct tegra_mipi_device; | ||
| 285 | |||
| 286 | struct tegra_mipi_device *tegra_mipi_request(struct device *device); | ||
| 287 | void tegra_mipi_free(struct tegra_mipi_device *device); | ||
| 288 | int tegra_mipi_calibrate(struct tegra_mipi_device *device); | ||
| 289 | |||
| 284 | #endif | 290 | #endif |
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 9b24d65fed72..3c9a833992e8 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h | |||
| @@ -181,7 +181,6 @@ enum drm_map_type { | |||
| 181 | _DRM_AGP = 3, /**< AGP/GART */ | 181 | _DRM_AGP = 3, /**< AGP/GART */ |
| 182 | _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ | 182 | _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
| 183 | _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ | 183 | _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
| 184 | _DRM_GEM = 6, /**< GEM object (obsolete) */ | ||
| 185 | }; | 184 | }; |
| 186 | 185 | ||
| 187 | /** | 186 | /** |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 3a4e97bd8607..126bfaa8bb6b 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
| @@ -222,6 +222,7 @@ typedef struct _drm_i915_sarea { | |||
| 222 | #define DRM_I915_GEM_SET_CACHING 0x2f | 222 | #define DRM_I915_GEM_SET_CACHING 0x2f |
| 223 | #define DRM_I915_GEM_GET_CACHING 0x30 | 223 | #define DRM_I915_GEM_GET_CACHING 0x30 |
| 224 | #define DRM_I915_REG_READ 0x31 | 224 | #define DRM_I915_REG_READ 0x31 |
| 225 | #define DRM_I915_GET_RESET_STATS 0x32 | ||
| 225 | 226 | ||
| 226 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 227 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
| 227 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 228 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
| @@ -271,6 +272,7 @@ typedef struct _drm_i915_sarea { | |||
| 271 | #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) | 272 | #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) |
| 272 | #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) | 273 | #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) |
| 273 | #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) | 274 | #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) |
| 275 | #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) | ||
| 274 | 276 | ||
| 275 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 277 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
| 276 | * on the security mechanisms provided by hardware. | 278 | * on the security mechanisms provided by hardware. |
| @@ -719,7 +721,7 @@ struct drm_i915_gem_execbuffer2 { | |||
| 719 | */ | 721 | */ |
| 720 | #define I915_EXEC_IS_PINNED (1<<10) | 722 | #define I915_EXEC_IS_PINNED (1<<10) |
| 721 | 723 | ||
| 722 | /** Provide a hint to the kernel that the command stream and auxilliary | 724 | /** Provide a hint to the kernel that the command stream and auxiliary |
| 723 | * state buffers already holds the correct presumed addresses and so the | 725 | * state buffers already holds the correct presumed addresses and so the |
| 724 | * relocation process may be skipped if no buffers need to be moved in | 726 | * relocation process may be skipped if no buffers need to be moved in |
| 725 | * preparation for the execbuffer. | 727 | * preparation for the execbuffer. |
| @@ -1030,4 +1032,21 @@ struct drm_i915_reg_read { | |||
| 1030 | __u64 offset; | 1032 | __u64 offset; |
| 1031 | __u64 val; /* Return value */ | 1033 | __u64 val; /* Return value */ |
| 1032 | }; | 1034 | }; |
| 1035 | |||
| 1036 | struct drm_i915_reset_stats { | ||
| 1037 | __u32 ctx_id; | ||
| 1038 | __u32 flags; | ||
| 1039 | |||
| 1040 | /* All resets since boot/module reload, for all contexts */ | ||
| 1041 | __u32 reset_count; | ||
| 1042 | |||
| 1043 | /* Number of batches lost when active in GPU, for this context */ | ||
| 1044 | __u32 batch_active; | ||
| 1045 | |||
| 1046 | /* Number of batches lost pending for execution, for this context */ | ||
| 1047 | __u32 batch_pending; | ||
| 1048 | |||
| 1049 | __u32 pad; | ||
| 1050 | }; | ||
| 1051 | |||
| 1033 | #endif /* _UAPI_I915_DRM_H_ */ | 1052 | #endif /* _UAPI_I915_DRM_H_ */ |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index fe421e8a431b..d9ea3a73afe2 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
| @@ -985,6 +985,8 @@ struct drm_radeon_cs { | |||
| 985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 | 985 | #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
| 986 | /* query the number of render backends */ | 986 | /* query the number of render backends */ |
| 987 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 | 987 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
| 988 | /* max engine clock - needed for OpenCL */ | ||
| 989 | #define RADEON_INFO_MAX_SCLK 0x1a | ||
| 988 | 990 | ||
| 989 | 991 | ||
| 990 | struct drm_radeon_info { | 992 | struct drm_radeon_info { |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index f854ca4a1372..9971c560ed9a 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
| @@ -28,6 +28,10 @@ | |||
| 28 | #ifndef __VMWGFX_DRM_H__ | 28 | #ifndef __VMWGFX_DRM_H__ |
| 29 | #define __VMWGFX_DRM_H__ | 29 | #define __VMWGFX_DRM_H__ |
| 30 | 30 | ||
| 31 | #ifndef __KERNEL__ | ||
| 32 | #include <drm.h> | ||
| 33 | #endif | ||
| 34 | |||
| 31 | #define DRM_VMW_MAX_SURFACE_FACES 6 | 35 | #define DRM_VMW_MAX_SURFACE_FACES 6 |
| 32 | #define DRM_VMW_MAX_MIP_LEVELS 24 | 36 | #define DRM_VMW_MAX_MIP_LEVELS 24 |
| 33 | 37 | ||
| @@ -55,6 +59,11 @@ | |||
| 55 | #define DRM_VMW_PRESENT 18 | 59 | #define DRM_VMW_PRESENT 18 |
| 56 | #define DRM_VMW_PRESENT_READBACK 19 | 60 | #define DRM_VMW_PRESENT_READBACK 19 |
| 57 | #define DRM_VMW_UPDATE_LAYOUT 20 | 61 | #define DRM_VMW_UPDATE_LAYOUT 20 |
| 62 | #define DRM_VMW_CREATE_SHADER 21 | ||
| 63 | #define DRM_VMW_UNREF_SHADER 22 | ||
| 64 | #define DRM_VMW_GB_SURFACE_CREATE 23 | ||
| 65 | #define DRM_VMW_GB_SURFACE_REF 24 | ||
| 66 | #define DRM_VMW_SYNCCPU 25 | ||
| 58 | 67 | ||
| 59 | /*************************************************************************/ | 68 | /*************************************************************************/ |
| 60 | /** | 69 | /** |
| @@ -76,6 +85,8 @@ | |||
| 76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 | 85 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
| 77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 | 86 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
| 78 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 | 87 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
| 88 | #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 | ||
| 89 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 | ||
| 79 | 90 | ||
| 80 | /** | 91 | /** |
| 81 | * struct drm_vmw_getparam_arg | 92 | * struct drm_vmw_getparam_arg |
| @@ -788,4 +799,253 @@ struct drm_vmw_update_layout_arg { | |||
| 788 | uint64_t rects; | 799 | uint64_t rects; |
| 789 | }; | 800 | }; |
| 790 | 801 | ||
| 802 | |||
| 803 | /*************************************************************************/ | ||
| 804 | /** | ||
| 805 | * DRM_VMW_CREATE_SHADER - Create shader | ||
| 806 | * | ||
| 807 | * Creates a shader and optionally binds it to a dma buffer containing | ||
| 808 | * the shader byte-code. | ||
| 809 | */ | ||
| 810 | |||
| 811 | /** | ||
| 812 | * enum drm_vmw_shader_type - Shader types | ||
| 813 | */ | ||
| 814 | enum drm_vmw_shader_type { | ||
| 815 | drm_vmw_shader_type_vs = 0, | ||
| 816 | drm_vmw_shader_type_ps, | ||
| 817 | drm_vmw_shader_type_gs | ||
| 818 | }; | ||
| 819 | |||
| 820 | |||
| 821 | /** | ||
| 822 | * struct drm_vmw_shader_create_arg | ||
| 823 | * | ||
| 824 | * @shader_type: Shader type of the shader to create. | ||
| 825 | * @size: Size of the byte-code in bytes. | ||
| 826 | * where the shader byte-code starts | ||
| 827 | * @buffer_handle: Buffer handle identifying the buffer containing the | ||
| 828 | * shader byte-code | ||
| 829 | * @shader_handle: On successful completion contains a handle that | ||
| 830 | * can be used to subsequently identify the shader. | ||
| 831 | * @offset: Offset in bytes into the buffer given by @buffer_handle, | ||
| 832 | * | ||
| 833 | * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. | ||
| 834 | */ | ||
| 835 | struct drm_vmw_shader_create_arg { | ||
| 836 | enum drm_vmw_shader_type shader_type; | ||
| 837 | uint32_t size; | ||
| 838 | uint32_t buffer_handle; | ||
| 839 | uint32_t shader_handle; | ||
| 840 | uint64_t offset; | ||
| 841 | }; | ||
| 842 | |||
| 843 | /*************************************************************************/ | ||
| 844 | /** | ||
| 845 | * DRM_VMW_UNREF_SHADER - Unreferences a shader | ||
| 846 | * | ||
| 847 | * Destroys a user-space reference to a shader, optionally destroying | ||
| 848 | * it. | ||
| 849 | */ | ||
| 850 | |||
| 851 | /** | ||
| 852 | * struct drm_vmw_shader_arg | ||
| 853 | * | ||
| 854 | * @handle: Handle identifying the shader to destroy. | ||
| 855 | * | ||
| 856 | * Input argument to the DRM_VMW_UNREF_SHADER ioctl. | ||
| 857 | */ | ||
| 858 | struct drm_vmw_shader_arg { | ||
| 859 | uint32_t handle; | ||
| 860 | uint32_t pad64; | ||
| 861 | }; | ||
| 862 | |||
| 863 | /*************************************************************************/ | ||
| 864 | /** | ||
| 865 | * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. | ||
| 866 | * | ||
| 867 | * Allocates a surface handle and queues a create surface command | ||
| 868 | * for the host on the first use of the surface. The surface ID can | ||
| 869 | * be used as the surface ID in commands referencing the surface. | ||
| 870 | */ | ||
| 871 | |||
| 872 | /** | ||
| 873 | * enum drm_vmw_surface_flags | ||
| 874 | * | ||
| 875 | * @drm_vmw_surface_flag_shareable: Whether the surface is shareable | ||
| 876 | * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout | ||
| 877 | * surface. | ||
| 878 | * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is | ||
| 879 | * given. | ||
| 880 | */ | ||
| 881 | enum drm_vmw_surface_flags { | ||
| 882 | drm_vmw_surface_flag_shareable = (1 << 0), | ||
| 883 | drm_vmw_surface_flag_scanout = (1 << 1), | ||
| 884 | drm_vmw_surface_flag_create_buffer = (1 << 2) | ||
| 885 | }; | ||
| 886 | |||
| 887 | /** | ||
| 888 | * struct drm_vmw_gb_surface_create_req | ||
| 889 | * | ||
| 890 | * @svga3d_flags: SVGA3d surface flags for the device. | ||
| 891 | * @format: SVGA3d format. | ||
| 892 | * @mip_level: Number of mip levels for all faces. | ||
| 893 | * @drm_surface_flags Flags as described above. | ||
| 894 | * @multisample_count Future use. Set to 0. | ||
| 895 | * @autogen_filter Future use. Set to 0. | ||
| 896 | * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID | ||
| 897 | * if none. | ||
| 898 | * @base_size Size of the base mip level for all faces. | ||
| 899 | * | ||
| 900 | * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. | ||
| 901 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. | ||
| 902 | */ | ||
| 903 | struct drm_vmw_gb_surface_create_req { | ||
| 904 | uint32_t svga3d_flags; | ||
| 905 | uint32_t format; | ||
| 906 | uint32_t mip_levels; | ||
| 907 | enum drm_vmw_surface_flags drm_surface_flags; | ||
| 908 | uint32_t multisample_count; | ||
| 909 | uint32_t autogen_filter; | ||
| 910 | uint32_t buffer_handle; | ||
| 911 | uint32_t pad64; | ||
| 912 | struct drm_vmw_size base_size; | ||
| 913 | }; | ||
| 914 | |||
| 915 | /** | ||
| 916 | * struct drm_vmw_gb_surface_create_rep | ||
| 917 | * | ||
| 918 | * @handle: Surface handle. | ||
| 919 | * @backup_size: Size of backup buffers for this surface. | ||
| 920 | * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. | ||
| 921 | * @buffer_size: Actual size of the buffer identified by | ||
| 922 | * @buffer_handle | ||
| 923 | * @buffer_map_handle: Offset into device address space for the buffer | ||
| 924 | * identified by @buffer_handle. | ||
| 925 | * | ||
| 926 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. | ||
| 927 | * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
| 928 | */ | ||
| 929 | struct drm_vmw_gb_surface_create_rep { | ||
| 930 | uint32_t handle; | ||
| 931 | uint32_t backup_size; | ||
| 932 | uint32_t buffer_handle; | ||
| 933 | uint32_t buffer_size; | ||
| 934 | uint64_t buffer_map_handle; | ||
| 935 | }; | ||
| 936 | |||
| 937 | /** | ||
| 938 | * union drm_vmw_gb_surface_create_arg | ||
| 939 | * | ||
| 940 | * @req: Input argument as described above. | ||
| 941 | * @rep: Output argument as described above. | ||
| 942 | * | ||
| 943 | * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
| 944 | */ | ||
| 945 | union drm_vmw_gb_surface_create_arg { | ||
| 946 | struct drm_vmw_gb_surface_create_rep rep; | ||
| 947 | struct drm_vmw_gb_surface_create_req req; | ||
| 948 | }; | ||
| 949 | |||
| 950 | /*************************************************************************/ | ||
| 951 | /** | ||
| 952 | * DRM_VMW_GB_SURFACE_REF - Reference a host surface. | ||
| 953 | * | ||
| 954 | * Puts a reference on a host surface with a given handle, as previously | ||
| 955 | * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
| 956 | * A reference will make sure the surface isn't destroyed while we hold | ||
| 957 | * it and will allow the calling client to use the surface handle in | ||
| 958 | * the command stream. | ||
| 959 | * | ||
| 960 | * On successful return, the Ioctl returns the surface information given | ||
| 961 | * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
| 962 | */ | ||
| 963 | |||
| 964 | /** | ||
| 965 | * struct drm_vmw_gb_surface_reference_arg | ||
| 966 | * | ||
| 967 | * @creq: The data used as input when the surface was created, as described | ||
| 968 | * above at "struct drm_vmw_gb_surface_create_req" | ||
| 969 | * @crep: Additional data output when the surface was created, as described | ||
| 970 | * above at "struct drm_vmw_gb_surface_create_rep" | ||
| 971 | * | ||
| 972 | * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. | ||
| 973 | */ | ||
| 974 | struct drm_vmw_gb_surface_ref_rep { | ||
| 975 | struct drm_vmw_gb_surface_create_req creq; | ||
| 976 | struct drm_vmw_gb_surface_create_rep crep; | ||
| 977 | }; | ||
| 978 | |||
| 979 | /** | ||
| 980 | * union drm_vmw_gb_surface_reference_arg | ||
| 981 | * | ||
| 982 | * @req: Input data as described above at "struct drm_vmw_surface_arg" | ||
| 983 | * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" | ||
| 984 | * | ||
| 985 | * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. | ||
| 986 | */ | ||
| 987 | union drm_vmw_gb_surface_reference_arg { | ||
| 988 | struct drm_vmw_gb_surface_ref_rep rep; | ||
| 989 | struct drm_vmw_surface_arg req; | ||
| 990 | }; | ||
| 991 | |||
| 992 | |||
| 993 | /*************************************************************************/ | ||
| 994 | /** | ||
| 995 | * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. | ||
| 996 | * | ||
| 997 | * Idles any previously submitted GPU operations on the buffer and | ||
| 998 | * by default blocks command submissions that reference the buffer. | ||
| 999 | * If the file descriptor used to grab a blocking CPU sync is closed, the | ||
| 1000 | * cpu sync is released. | ||
| 1001 | * The flags argument indicates how the grab / release operation should be | ||
| 1002 | * performed: | ||
| 1003 | */ | ||
| 1004 | |||
| 1005 | /** | ||
| 1006 | * enum drm_vmw_synccpu_flags - Synccpu flags: | ||
| 1007 | * | ||
| 1008 | * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a | ||
| 1009 | * hint to the kernel to allow command submissions that references the buffer | ||
| 1010 | * for read-only. | ||
| 1011 | * @drm_vmw_synccpu_write: Sync for write. Block all command submissions | ||
| 1012 | * referencing this buffer. | ||
| 1013 | * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return | ||
| 1014 | * -EBUSY should the buffer be busy. | ||
| 1015 | * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer | ||
| 1016 | * while the buffer is synced for CPU. This is similar to the GEM bo idle | ||
| 1017 | * behavior. | ||
| 1018 | */ | ||
| 1019 | enum drm_vmw_synccpu_flags { | ||
| 1020 | drm_vmw_synccpu_read = (1 << 0), | ||
| 1021 | drm_vmw_synccpu_write = (1 << 1), | ||
| 1022 | drm_vmw_synccpu_dontblock = (1 << 2), | ||
| 1023 | drm_vmw_synccpu_allow_cs = (1 << 3) | ||
| 1024 | }; | ||
| 1025 | |||
| 1026 | /** | ||
| 1027 | * enum drm_vmw_synccpu_op - Synccpu operations: | ||
| 1028 | * | ||
| 1029 | * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations | ||
| 1030 | * @drm_vmw_synccpu_release: Release a previous grab. | ||
| 1031 | */ | ||
| 1032 | enum drm_vmw_synccpu_op { | ||
| 1033 | drm_vmw_synccpu_grab, | ||
| 1034 | drm_vmw_synccpu_release | ||
| 1035 | }; | ||
| 1036 | |||
| 1037 | /** | ||
| 1038 | * struct drm_vmw_synccpu_arg | ||
| 1039 | * | ||
| 1040 | * @op: The synccpu operation as described above. | ||
| 1041 | * @handle: Handle identifying the buffer object. | ||
| 1042 | * @flags: Flags as described above. | ||
| 1043 | */ | ||
| 1044 | struct drm_vmw_synccpu_arg { | ||
| 1045 | enum drm_vmw_synccpu_op op; | ||
| 1046 | enum drm_vmw_synccpu_flags flags; | ||
| 1047 | uint32_t handle; | ||
| 1048 | uint32_t pad64; | ||
| 1049 | }; | ||
| 1050 | |||
| 791 | #endif | 1051 | #endif |
