aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/nouveau
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig44
-rw-r--r--drivers/gpu/drm/nouveau/Makefile32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c159
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c6127
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h301
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c778
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c478
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c447
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c881
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c178
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c112
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c353
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h166
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c581
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c434
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1344
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c422
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c837
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c1080
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h455
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c70
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1256
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c727
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c200
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1295
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h837
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c341
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c923
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c1000
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c531
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c623
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c312
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c584
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c305
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c44
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1009
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c778
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c583
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c775
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c319
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c427
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c678
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c790
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1004
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h47
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c268
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c500
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2383
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c527
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c346
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h535
82 files changed, 39441 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 000000000000..1175429da102
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,44 @@
1config DRM_NOUVEAU
2 tristate "Nouveau (nVidia) cards"
3 depends on DRM
4 select FW_LOADER
5 select DRM_KMS_HELPER
6 select DRM_TTM
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 help
14 Choose this option for open-source nVidia support.
15
16config DRM_NOUVEAU_BACKLIGHT
17 bool "Support for backlight control"
18 depends on DRM_NOUVEAU
19 default y
20 help
21 Say Y here if you want to control the backlight of your display
22 (e.g. a laptop panel).
23
24config DRM_NOUVEAU_DEBUG
25 bool "Build in Nouveau's debugfs support"
26 depends on DRM_NOUVEAU && DEBUG_FS
27 default y
28 help
29 Say Y here if you want Nouveau to output debugging information
30 via debugfs.
31
32menu "I2C encoder or helper chips"
33 depends on DRM && DRM_KMS_HELPER && I2C
34
35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder"
37 default m if DRM_NOUVEAU
38 help
39 Support for Chrontel ch7006 and similar TV encoders, found
40 on some nVidia video cards.
41
42 This driver is currently only useful if you're also using
43 the nouveau driver.
44endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 000000000000..453df3f6053f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \
8 nouveau_sgdma.o nouveau_dma.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o nouveau_grctx.o \
13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \
19 nv40_grctx.o nv50_grctx.o \
20 nv04_instmem.o nv50_instmem.o \
21 nv50_crtc.o nv50_dac.o nv50_sor.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
25 nv17_gpio.o nv50_gpio.o
26
27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
29nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
30nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
31
32obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 000000000000..e13f6af0037a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,206 @@
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <linux/slab.h>
4#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h>
6
7#include "drmP.h"
8#include "drm.h"
9#include "drm_sarea.h"
10#include "drm_crtc_helper.h"
11#include "nouveau_drv.h"
12#include "nouveau_drm.h"
13#include "nv50_display.h"
14
15#include <linux/vga_switcheroo.h>
16
17#define NOUVEAU_DSM_SUPPORTED 0x00
18#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
19
20#define NOUVEAU_DSM_ACTIVE 0x01
21#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
22
23#define NOUVEAU_DSM_LED 0x02
24#define NOUVEAU_DSM_LED_STATE 0x00
25#define NOUVEAU_DSM_LED_OFF 0x10
26#define NOUVEAU_DSM_LED_STAMINA 0x11
27#define NOUVEAU_DSM_LED_SPEED 0x12
28
29#define NOUVEAU_DSM_POWER 0x03
30#define NOUVEAU_DSM_POWER_STATE 0x00
31#define NOUVEAU_DSM_POWER_SPEED 0x01
32#define NOUVEAU_DSM_POWER_STAMINA 0x02
33
34static struct nouveau_dsm_priv {
35 bool dsm_detected;
36 acpi_handle dhandle;
37 acpi_handle dsm_handle;
38} nouveau_dsm_priv;
39
40static const char nouveau_dsm_muid[] = {
41 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
42 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
43};
44
45static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
46{
47 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
48 struct acpi_object_list input;
49 union acpi_object params[4];
50 union acpi_object *obj;
51 int err;
52
53 input.count = 4;
54 input.pointer = params;
55 params[0].type = ACPI_TYPE_BUFFER;
56 params[0].buffer.length = sizeof(nouveau_dsm_muid);
57 params[0].buffer.pointer = (char *)nouveau_dsm_muid;
58 params[1].type = ACPI_TYPE_INTEGER;
59 params[1].integer.value = 0x00000102;
60 params[2].type = ACPI_TYPE_INTEGER;
61 params[2].integer.value = func;
62 params[3].type = ACPI_TYPE_INTEGER;
63 params[3].integer.value = arg;
64
65 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
66 if (err) {
67 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
68 return err;
69 }
70
71 obj = (union acpi_object *)output.pointer;
72
73 if (obj->type == ACPI_TYPE_INTEGER)
74 if (obj->integer.value == 0x80000002)
75 return -ENODEV;
76
77 if (obj->type == ACPI_TYPE_BUFFER) {
78 if (obj->buffer.length == 4 && result) {
79 *result = 0;
80 *result |= obj->buffer.pointer[0];
81 *result |= (obj->buffer.pointer[1] << 8);
82 *result |= (obj->buffer.pointer[2] << 16);
83 *result |= (obj->buffer.pointer[3] << 24);
84 }
85 }
86
87 kfree(output.pointer);
88 return 0;
89}
90
91static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
92{
93 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
94}
95
96static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
97{
98 int arg;
99 if (state == VGA_SWITCHEROO_ON)
100 arg = NOUVEAU_DSM_POWER_SPEED;
101 else
102 arg = NOUVEAU_DSM_POWER_STAMINA;
103 nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
104 return 0;
105}
106
107static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
108{
109 if (id == VGA_SWITCHEROO_IGD)
110 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
111 else
112 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
113}
114
115static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
116 enum vga_switcheroo_state state)
117{
118 if (id == VGA_SWITCHEROO_IGD)
119 return 0;
120
121 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
122}
123
124static int nouveau_dsm_init(void)
125{
126 return 0;
127}
128
129static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
130{
131 if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
132 return VGA_SWITCHEROO_IGD;
133 else
134 return VGA_SWITCHEROO_DIS;
135}
136
137static struct vga_switcheroo_handler nouveau_dsm_handler = {
138 .switchto = nouveau_dsm_switchto,
139 .power_state = nouveau_dsm_power_state,
140 .init = nouveau_dsm_init,
141 .get_client_id = nouveau_dsm_get_client_id,
142};
143
144static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
145{
146 acpi_handle dhandle, nvidia_handle;
147 acpi_status status;
148 int ret;
149 uint32_t result;
150
151 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
152 if (!dhandle)
153 return false;
154 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
155 if (ACPI_FAILURE(status)) {
156 return false;
157 }
158
159 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
160 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
161 if (ret < 0)
162 return false;
163
164 nouveau_dsm_priv.dhandle = dhandle;
165 nouveau_dsm_priv.dsm_handle = nvidia_handle;
166 return true;
167}
168
169static bool nouveau_dsm_detect(void)
170{
171 char acpi_method_name[255] = { 0 };
172 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
173 struct pci_dev *pdev = NULL;
174 int has_dsm = 0;
175 int vga_count = 0;
176 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
177 vga_count++;
178
179 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
180 }
181
182 if (vga_count == 2 && has_dsm) {
183 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
184 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
185 acpi_method_name);
186 nouveau_dsm_priv.dsm_detected = true;
187 return true;
188 }
189 return false;
190}
191
192void nouveau_register_dsm_handler(void)
193{
194 bool r;
195
196 r = nouveau_dsm_detect();
197 if (!r)
198 return;
199
200 vga_switcheroo_register_handler(&nouveau_dsm_handler);
201}
202
203void nouveau_unregister_dsm_handler(void)
204{
205 vga_switcheroo_unregister_handler();
206}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 000000000000..406228f4a2a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright (C) 2009 Red Hat <mjg@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Matthew Garrett <mjg@redhat.com>
29 *
30 * Register locations derived from NVClock by Roderick Colenbrander
31 */
32
33#include <linux/backlight.h>
34
35#include "drmP.h"
36#include "nouveau_drv.h"
37#include "nouveau_drm.h"
38#include "nouveau_reg.h"
39
40static int nv40_get_intensity(struct backlight_device *bd)
41{
42 struct drm_device *dev = bl_get_data(bd);
43 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
44 >> 16;
45
46 return val;
47}
48
49static int nv40_set_intensity(struct backlight_device *bd)
50{
51 struct drm_device *dev = bl_get_data(bd);
52 int val = bd->props.brightness;
53 int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
54
55 nv_wr32(dev, NV40_PMC_BACKLIGHT,
56 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
57
58 return 0;
59}
60
61static struct backlight_ops nv40_bl_ops = {
62 .options = BL_CORE_SUSPENDRESUME,
63 .get_brightness = nv40_get_intensity,
64 .update_status = nv40_set_intensity,
65};
66
67static int nv50_get_intensity(struct backlight_device *bd)
68{
69 struct drm_device *dev = bl_get_data(bd);
70
71 return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
72}
73
74static int nv50_set_intensity(struct backlight_device *bd)
75{
76 struct drm_device *dev = bl_get_data(bd);
77 int val = bd->props.brightness;
78
79 nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
80 val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
81 return 0;
82}
83
84static struct backlight_ops nv50_bl_ops = {
85 .options = BL_CORE_SUSPENDRESUME,
86 .get_brightness = nv50_get_intensity,
87 .update_status = nv50_set_intensity,
88};
89
90static int nouveau_nv40_backlight_init(struct drm_device *dev)
91{
92 struct backlight_properties props;
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct backlight_device *bd;
95
96 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
97 return 0;
98
99 memset(&props, 0, sizeof(struct backlight_properties));
100 props.max_brightness = 31;
101 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
102 &nv40_bl_ops, &props);
103 if (IS_ERR(bd))
104 return PTR_ERR(bd);
105
106 dev_priv->backlight = bd;
107 bd->props.brightness = nv40_get_intensity(bd);
108 backlight_update_status(bd);
109
110 return 0;
111}
112
113static int nouveau_nv50_backlight_init(struct drm_device *dev)
114{
115 struct backlight_properties props;
116 struct drm_nouveau_private *dev_priv = dev->dev_private;
117 struct backlight_device *bd;
118
119 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
120 return 0;
121
122 memset(&props, 0, sizeof(struct backlight_properties));
123 props.max_brightness = 1025;
124 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
125 &nv50_bl_ops, &props);
126 if (IS_ERR(bd))
127 return PTR_ERR(bd);
128
129 dev_priv->backlight = bd;
130 bd->props.brightness = nv50_get_intensity(bd);
131 backlight_update_status(bd);
132 return 0;
133}
134
135int nouveau_backlight_init(struct drm_device *dev)
136{
137 struct drm_nouveau_private *dev_priv = dev->dev_private;
138
139 switch (dev_priv->card_type) {
140 case NV_40:
141 return nouveau_nv40_backlight_init(dev);
142 case NV_50:
143 return nouveau_nv50_backlight_init(dev);
144 default:
145 break;
146 }
147
148 return 0;
149}
150
151void nouveau_backlight_exit(struct drm_device *dev)
152{
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154
155 if (dev_priv->backlight) {
156 backlight_device_unregister(dev_priv->backlight);
157 dev_priv->backlight = NULL;
158 }
159}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 000000000000..abc382a9918b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6127 @@
1/*
2 * Copyright 2005-2006 Erik Waling
3 * Copyright 2006 Stephane Marchesin
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#define NV_DEBUG_NOTRACE
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29
30/* these defines are made up */
31#define NV_CIO_CRE_44_HEADA 0x0
32#define NV_CIO_CRE_44_HEADB 0x3
33#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
34#define LEGACY_I2C_CRT 0x80
35#define LEGACY_I2C_PANEL 0x81
36#define LEGACY_I2C_TV 0x82
37
38#define EDID1_LEN 128
39
40#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
41#define LOG_OLD_VALUE(x)
42
43#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
44#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
45
46struct init_exec {
47 bool execute;
48 bool repeat;
49};
50
51static bool nv_cksum(const uint8_t *data, unsigned int length)
52{
53 /*
54 * There's a few checksums in the BIOS, so here's a generic checking
55 * function.
56 */
57 int i;
58 uint8_t sum = 0;
59
60 for (i = 0; i < length; i++)
61 sum += data[i];
62
63 if (sum)
64 return true;
65
66 return false;
67}
68
69static int
70score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
71{
72 if (!(data[0] == 0x55 && data[1] == 0xAA)) {
73 NV_TRACEWARN(dev, "... BIOS signature not found\n");
74 return 0;
75 }
76
77 if (nv_cksum(data, data[2] * 512)) {
78 NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
79 /* if a ro image is somewhat bad, it's probably all rubbish */
80 return writeable ? 2 : 1;
81 } else
82 NV_TRACE(dev, "... appears to be valid\n");
83
84 return 3;
85}
86
87static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 uint32_t pci_nv_20, save_pci_nv_20;
91 int pcir_ptr;
92 int i;
93
94 if (dev_priv->card_type >= NV_50)
95 pci_nv_20 = 0x88050;
96 else
97 pci_nv_20 = NV_PBUS_PCI_NV_20;
98
99 /* enable ROM access */
100 save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
101 nvWriteMC(dev, pci_nv_20,
102 save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
103
104 /* bail if no rom signature */
105 if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
106 nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
107 goto out;
108
109 /* additional check (see note below) - read PCI record header */
110 pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
111 nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
112 if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
113 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
114 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
115 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
116 goto out;
117
118 /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
119 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
120 * each byte. we'll hope pramin has something usable instead
121 */
122 for (i = 0; i < NV_PROM_SIZE; i++)
123 data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
124
125out:
126 /* disable ROM access */
127 nvWriteMC(dev, pci_nv_20,
128 save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
129}
130
131static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 uint32_t old_bar0_pramin = 0;
135 int i;
136
137 if (dev_priv->card_type >= NV_50) {
138 uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
139
140 if (!vbios_vram)
141 vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
142
143 old_bar0_pramin = nv_rd32(dev, 0x1700);
144 nv_wr32(dev, 0x1700, vbios_vram >> 16);
145 }
146
147 /* bail if no rom signature */
148 if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
149 nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
150 goto out;
151
152 for (i = 0; i < NV_PROM_SIZE; i++)
153 data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
154
155out:
156 if (dev_priv->card_type >= NV_50)
157 nv_wr32(dev, 0x1700, old_bar0_pramin);
158}
159
160static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
161{
162 void __iomem *rom = NULL;
163 size_t rom_len;
164 int ret;
165
166 ret = pci_enable_rom(dev->pdev);
167 if (ret)
168 return;
169
170 rom = pci_map_rom(dev->pdev, &rom_len);
171 if (!rom)
172 goto out;
173 memcpy_fromio(data, rom, rom_len);
174 pci_unmap_rom(dev->pdev, rom);
175
176out:
177 pci_disable_rom(dev->pdev);
178}
179
180struct methods {
181 const char desc[8];
182 void (*loadbios)(struct drm_device *, uint8_t *);
183 const bool rw;
184};
185
186static struct methods nv04_methods[] = {
187 { "PROM", load_vbios_prom, false },
188 { "PRAMIN", load_vbios_pramin, true },
189 { "PCIROM", load_vbios_pci, true },
190};
191
192static struct methods nv50_methods[] = {
193 { "PRAMIN", load_vbios_pramin, true },
194 { "PROM", load_vbios_prom, false },
195 { "PCIROM", load_vbios_pci, true },
196};
197
198#define METHODCNT 3
199
200static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct methods *methods;
204 int i;
205 int testscore = 3;
206 int scores[METHODCNT];
207
208 if (nouveau_vbios) {
209 methods = nv04_methods;
210 for (i = 0; i < METHODCNT; i++)
211 if (!strcasecmp(nouveau_vbios, methods[i].desc))
212 break;
213
214 if (i < METHODCNT) {
215 NV_INFO(dev, "Attempting to use BIOS image from %s\n",
216 methods[i].desc);
217
218 methods[i].loadbios(dev, data);
219 if (score_vbios(dev, data, methods[i].rw))
220 return true;
221 }
222
223 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
224 }
225
226 if (dev_priv->card_type < NV_50)
227 methods = nv04_methods;
228 else
229 methods = nv50_methods;
230
231 for (i = 0; i < METHODCNT; i++) {
232 NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
233 methods[i].desc);
234 data[0] = data[1] = 0; /* avoid reuse of previous image */
235 methods[i].loadbios(dev, data);
236 scores[i] = score_vbios(dev, data, methods[i].rw);
237 if (scores[i] == testscore)
238 return true;
239 }
240
241 while (--testscore > 0) {
242 for (i = 0; i < METHODCNT; i++) {
243 if (scores[i] == testscore) {
244 NV_TRACE(dev, "Using BIOS image from %s\n",
245 methods[i].desc);
246 methods[i].loadbios(dev, data);
247 return true;
248 }
249 }
250 }
251
252 NV_ERROR(dev, "No valid BIOS image found\n");
253 return false;
254}
255
256struct init_tbl_entry {
257 char *name;
258 uint8_t id;
259 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
260};
261
262struct bit_entry {
263 uint8_t id[2];
264 uint16_t length;
265 uint16_t offset;
266};
267
268static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
269
270#define MACRO_INDEX_SIZE 2
271#define MACRO_SIZE 8
272#define CONDITION_SIZE 12
273#define IO_FLAG_CONDITION_SIZE 9
274#define IO_CONDITION_SIZE 5
275#define MEM_INIT_SIZE 66
276
277static void still_alive(void)
278{
279#if 0
280 sync();
281 msleep(2);
282#endif
283}
284
285static uint32_t
286munge_reg(struct nvbios *bios, uint32_t reg)
287{
288 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
289 struct dcb_entry *dcbent = bios->display.output;
290
291 if (dev_priv->card_type < NV_50)
292 return reg;
293
294 if (reg & 0x40000000) {
295 BUG_ON(!dcbent);
296
297 reg += (ffs(dcbent->or) - 1) * 0x800;
298 if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
299 reg += 0x00000080;
300 }
301
302 reg &= ~0x60000000;
303 return reg;
304}
305
306static int
307valid_reg(struct nvbios *bios, uint32_t reg)
308{
309 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
310 struct drm_device *dev = bios->dev;
311
312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 ||
314 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316
317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
321 reg);
322
323 if (reg >= (8*1024*1024)) {
324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
325 return 0;
326 }
327
328 return 1;
329}
330
331static bool
332valid_idx_port(struct nvbios *bios, uint16_t port)
333{
334 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
335 struct drm_device *dev = bios->dev;
336
337 /*
338 * If adding more ports here, the read/write functions below will need
339 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
340 * used for the port in question
341 */
342 if (dev_priv->card_type < NV_50) {
343 if (port == NV_CIO_CRX__COLOR)
344 return true;
345 if (port == NV_VIO_SRX)
346 return true;
347 } else {
348 if (port == NV_CIO_CRX__COLOR)
349 return true;
350 }
351
352 NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
353 port);
354
355 return false;
356}
357
358static bool
359valid_port(struct nvbios *bios, uint16_t port)
360{
361 struct drm_device *dev = bios->dev;
362
363 /*
364 * If adding more ports here, the read/write functions below will need
365 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
366 * used for the port in question
367 */
368 if (port == NV_VIO_VSE2)
369 return true;
370
371 NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
372
373 return false;
374}
375
376static uint32_t
377bios_rd32(struct nvbios *bios, uint32_t reg)
378{
379 uint32_t data;
380
381 reg = munge_reg(bios, reg);
382 if (!valid_reg(bios, reg))
383 return 0;
384
385 /*
386 * C51 sometimes uses regs with bit0 set in the address. For these
387 * cases there should exist a translation in a BIOS table to an IO
388 * port address which the BIOS uses for accessing the reg
389 *
390 * These only seem to appear for the power control regs to a flat panel,
391 * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
392 * for 0x1308 and 0x1310 are used - hence the mask below. An S3
393 * suspend-resume mmio trace from a C51 will be required to see if this
394 * is true for the power microcode in 0x14.., or whether the direct IO
395 * port access method is needed
396 */
397 if (reg & 0x1)
398 reg &= ~0x1;
399
400 data = nv_rd32(bios->dev, reg);
401
402 BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
403
404 return data;
405}
406
407static void
408bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
409{
410 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
411
412 reg = munge_reg(bios, reg);
413 if (!valid_reg(bios, reg))
414 return;
415
416 /* see note in bios_rd32 */
417 if (reg & 0x1)
418 reg &= 0xfffffffe;
419
420 LOG_OLD_VALUE(bios_rd32(bios, reg));
421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
422
423 if (dev_priv->vbios.execute) {
424 still_alive();
425 nv_wr32(bios->dev, reg, data);
426 }
427}
428
429static uint8_t
430bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
431{
432 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
433 struct drm_device *dev = bios->dev;
434 uint8_t data;
435
436 if (!valid_idx_port(bios, port))
437 return 0;
438
439 if (dev_priv->card_type < NV_50) {
440 if (port == NV_VIO_SRX)
441 data = NVReadVgaSeq(dev, bios->state.crtchead, index);
442 else /* assume NV_CIO_CRX__COLOR */
443 data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
444 } else {
445 uint32_t data32;
446
447 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
448 data = (data32 >> ((index & 3) << 3)) & 0xff;
449 }
450
451 BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
452 "Head: 0x%02X, Data: 0x%02X\n",
453 port, index, bios->state.crtchead, data);
454 return data;
455}
456
457static void
458bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
459{
460 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
461 struct drm_device *dev = bios->dev;
462
463 if (!valid_idx_port(bios, port))
464 return;
465
466 /*
467 * The current head is maintained in the nvbios member state.crtchead.
468 * We trap changes to CR44 and update the head variable and hence the
469 * register set written.
470 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
471 * of the write, and to head1 after the write
472 */
473 if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
474 data != NV_CIO_CRE_44_HEADB)
475 bios->state.crtchead = 0;
476
477 LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
478 BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
479 "Head: 0x%02X, Data: 0x%02X\n",
480 port, index, bios->state.crtchead, data);
481
482 if (bios->execute && dev_priv->card_type < NV_50) {
483 still_alive();
484 if (port == NV_VIO_SRX)
485 NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
486 else /* assume NV_CIO_CRX__COLOR */
487 NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
488 } else
489 if (bios->execute) {
490 uint32_t data32, shift = (index & 3) << 3;
491
492 still_alive();
493
494 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
495 data32 &= ~(0xff << shift);
496 data32 |= (data << shift);
497 bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
498 }
499
500 if (port == NV_CIO_CRX__COLOR &&
501 index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
502 bios->state.crtchead = 1;
503}
504
505static uint8_t
506bios_port_rd(struct nvbios *bios, uint16_t port)
507{
508 uint8_t data, head = bios->state.crtchead;
509
510 if (!valid_port(bios, port))
511 return 0;
512
513 data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
514
515 BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
516 port, head, data);
517
518 return data;
519}
520
521static void
522bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
523{
524 int head = bios->state.crtchead;
525
526 if (!valid_port(bios, port))
527 return;
528
529 LOG_OLD_VALUE(bios_port_rd(bios, port));
530 BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
531 port, head, data);
532
533 if (!bios->execute)
534 return;
535
536 still_alive();
537 NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
538}
539
540static bool
541io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
542{
543 /*
544 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
545 * for the CRTC index; 1 byte for the mask to apply to the value
546 * retrieved from the CRTC; 1 byte for the shift right to apply to the
547 * masked CRTC value; 2 bytes for the offset to the flag array, to
548 * which the shifted value is added; 1 byte for the mask applied to the
549 * value read from the flag array; and 1 byte for the value to compare
550 * against the masked byte from the flag table.
551 */
552
553 uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
554 uint16_t crtcport = ROM16(bios->data[condptr]);
555 uint8_t crtcindex = bios->data[condptr + 2];
556 uint8_t mask = bios->data[condptr + 3];
557 uint8_t shift = bios->data[condptr + 4];
558 uint16_t flagarray = ROM16(bios->data[condptr + 5]);
559 uint8_t flagarraymask = bios->data[condptr + 7];
560 uint8_t cmpval = bios->data[condptr + 8];
561 uint8_t data;
562
563 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
564 "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
565 "Cmpval: 0x%02X\n",
566 offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
567
568 data = bios_idxprt_rd(bios, crtcport, crtcindex);
569
570 data = bios->data[flagarray + ((data & mask) >> shift)];
571 data &= flagarraymask;
572
573 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
574 offset, data, cmpval);
575
576 return (data == cmpval);
577}
578
579static bool
580bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
581{
582 /*
583 * The condition table entry has 4 bytes for the address of the
584 * register to check, 4 bytes for a mask to apply to the register and
585 * 4 for a test comparison value
586 */
587
588 uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
589 uint32_t reg = ROM32(bios->data[condptr]);
590 uint32_t mask = ROM32(bios->data[condptr + 4]);
591 uint32_t cmpval = ROM32(bios->data[condptr + 8]);
592 uint32_t data;
593
594 BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
595 offset, cond, reg, mask);
596
597 data = bios_rd32(bios, reg) & mask;
598
599 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
600 offset, data, cmpval);
601
602 return (data == cmpval);
603}
604
605static bool
606io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
607{
608 /*
609 * The IO condition entry has 2 bytes for the IO port address; 1 byte
610 * for the index to write to io_port; 1 byte for the mask to apply to
611 * the byte read from io_port+1; and 1 byte for the value to compare
612 * against the masked byte.
613 */
614
615 uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
616 uint16_t io_port = ROM16(bios->data[condptr]);
617 uint8_t port_index = bios->data[condptr + 2];
618 uint8_t mask = bios->data[condptr + 3];
619 uint8_t cmpval = bios->data[condptr + 4];
620
621 uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
622
623 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
624 offset, data, cmpval);
625
626 return (data == cmpval);
627}
628
629static int
630nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
631{
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633 uint32_t reg0 = nv_rd32(dev, reg + 0);
634 uint32_t reg1 = nv_rd32(dev, reg + 4);
635 struct nouveau_pll_vals pll;
636 struct pll_lims pll_limits;
637 int ret;
638
639 ret = get_pll_limits(dev, reg, &pll_limits);
640 if (ret)
641 return ret;
642
643 clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
644 if (!clk)
645 return -ERANGE;
646
647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
649
650 if (dev_priv->vbios.execute) {
651 still_alive();
652 nv_wr32(dev, reg + 4, reg1);
653 nv_wr32(dev, reg + 0, reg0);
654 }
655
656 return 0;
657}
658
659static int
660setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
661{
662 struct drm_device *dev = bios->dev;
663 struct drm_nouveau_private *dev_priv = dev->dev_private;
664 /* clk in kHz */
665 struct pll_lims pll_lim;
666 struct nouveau_pll_vals pllvals;
667 int ret;
668
669 if (dev_priv->card_type >= NV_50)
670 return nv50_pll_set(dev, reg, clk);
671
672 /* high regs (such as in the mac g5 table) are not -= 4 */
673 ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
674 if (ret)
675 return ret;
676
677 clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
678 if (!clk)
679 return -ERANGE;
680
681 if (bios->execute) {
682 still_alive();
683 nouveau_hw_setpll(dev, reg, &pllvals);
684 }
685
686 return 0;
687}
688
689static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
690{
691 struct drm_nouveau_private *dev_priv = dev->dev_private;
692 struct nvbios *bios = &dev_priv->vbios;
693
694 /*
695 * For the results of this function to be correct, CR44 must have been
696 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
697 * and the DCB table parsed, before the script calling the function is
698 * run. run_digital_op_script is example of how to do such setup
699 */
700
701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
702
703 if (dcb_entry > bios->dcb.entries) {
704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
705 "(%02X)\n", dcb_entry);
706 dcb_entry = 0x7f; /* unused / invalid marker */
707 }
708
709 return dcb_entry;
710}
711
712static struct nouveau_i2c_chan *
713init_i2c_device_find(struct drm_device *dev, int i2c_index)
714{
715 struct drm_nouveau_private *dev_priv = dev->dev_private;
716 struct dcb_table *dcb = &dev_priv->vbios.dcb;
717
718 if (i2c_index == 0xff) {
719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
721 int default_indices = dcb->i2c_default_indices;
722
723 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
724 shift = 4;
725
726 i2c_index = (default_indices >> shift) & 0xf;
727 }
728 if (i2c_index == 0x80) /* g80+ */
729 i2c_index = dcb->i2c_default_indices & 0xf;
730
731 return nouveau_i2c_find(dev, i2c_index);
732}
733
734static uint32_t
735get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
736{
737 /*
738 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
739 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
740 * CR58 for CR57 = 0 to index a table of offsets to the basic
741 * 0x6808b0 address.
742 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
743 * CR58 for CR57 = 0 to index a table of offsets to the basic
744 * 0x6808b0 address, and then flip the offset by 8.
745 */
746
747 struct drm_nouveau_private *dev_priv = dev->dev_private;
748 struct nvbios *bios = &dev_priv->vbios;
749 const int pramdac_offset[13] = {
750 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
751 const uint32_t pramdac_table[4] = {
752 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
753
754 if (mlv >= 0x80) {
755 int dcb_entry, dacoffset;
756
757 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
758 dcb_entry = dcb_entry_idx_from_crtchead(dev);
759 if (dcb_entry == 0x7f)
760 return 0;
761 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
762 if (mlv == 0x81)
763 dacoffset ^= 8;
764 return 0x6808b0 + dacoffset;
765 } else {
766 if (mlv >= ARRAY_SIZE(pramdac_table)) {
767 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
768 mlv);
769 return 0;
770 }
771 return pramdac_table[mlv];
772 }
773}
774
775static int
776init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
777 struct init_exec *iexec)
778{
779 /*
780 * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
781 *
782 * offset (8 bit): opcode
783 * offset + 1 (16 bit): CRTC port
784 * offset + 3 (8 bit): CRTC index
785 * offset + 4 (8 bit): mask
786 * offset + 5 (8 bit): shift
787 * offset + 6 (8 bit): count
788 * offset + 7 (32 bit): register
789 * offset + 11 (32 bit): configuration 1
790 * ...
791 *
792 * Starting at offset + 11 there are "count" 32 bit values.
793 * To find out which value to use read index "CRTC index" on "CRTC
794 * port", AND this value with "mask" and then bit shift right "shift"
795 * bits. Read the appropriate value using this index and write to
796 * "register"
797 */
798
799 uint16_t crtcport = ROM16(bios->data[offset + 1]);
800 uint8_t crtcindex = bios->data[offset + 3];
801 uint8_t mask = bios->data[offset + 4];
802 uint8_t shift = bios->data[offset + 5];
803 uint8_t count = bios->data[offset + 6];
804 uint32_t reg = ROM32(bios->data[offset + 7]);
805 uint8_t config;
806 uint32_t configval;
807 int len = 11 + count * 4;
808
809 if (!iexec->execute)
810 return len;
811
812 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
813 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
814 offset, crtcport, crtcindex, mask, shift, count, reg);
815
816 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
817 if (config > count) {
818 NV_ERROR(bios->dev,
819 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
820 offset, config, count);
821 return 0;
822 }
823
824 configval = ROM32(bios->data[offset + 11 + config * 4]);
825
826 BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
827
828 bios_wr32(bios, reg, configval);
829
830 return len;
831}
832
833static int
834init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
835{
836 /*
837 * INIT_REPEAT opcode: 0x33 ('3')
838 *
839 * offset (8 bit): opcode
840 * offset + 1 (8 bit): count
841 *
842 * Execute script following this opcode up to INIT_REPEAT_END
843 * "count" times
844 */
845
846 uint8_t count = bios->data[offset + 1];
847 uint8_t i;
848
849 /* no iexec->execute check by design */
850
851 BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
852 offset, count);
853
854 iexec->repeat = true;
855
856 /*
857 * count - 1, as the script block will execute once when we leave this
858 * opcode -- this is compatible with bios behaviour as:
859 * a) the block is always executed at least once, even if count == 0
860 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
861 * while we don't
862 */
863 for (i = 0; i < count - 1; i++)
864 parse_init_table(bios, offset + 2, iexec);
865
866 iexec->repeat = false;
867
868 return 2;
869}
870
871static int
872init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
873 struct init_exec *iexec)
874{
875 /*
876 * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
877 *
878 * offset (8 bit): opcode
879 * offset + 1 (16 bit): CRTC port
880 * offset + 3 (8 bit): CRTC index
881 * offset + 4 (8 bit): mask
882 * offset + 5 (8 bit): shift
883 * offset + 6 (8 bit): IO flag condition index
884 * offset + 7 (8 bit): count
885 * offset + 8 (32 bit): register
886 * offset + 12 (16 bit): frequency 1
887 * ...
888 *
889 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
890 * Set PLL register "register" to coefficients for frequency n,
891 * selected by reading index "CRTC index" of "CRTC port" ANDed with
892 * "mask" and shifted right by "shift".
893 *
894 * If "IO flag condition index" > 0, and condition met, double
895 * frequency before setting it.
896 */
897
898 uint16_t crtcport = ROM16(bios->data[offset + 1]);
899 uint8_t crtcindex = bios->data[offset + 3];
900 uint8_t mask = bios->data[offset + 4];
901 uint8_t shift = bios->data[offset + 5];
902 int8_t io_flag_condition_idx = bios->data[offset + 6];
903 uint8_t count = bios->data[offset + 7];
904 uint32_t reg = ROM32(bios->data[offset + 8]);
905 uint8_t config;
906 uint16_t freq;
907 int len = 12 + count * 2;
908
909 if (!iexec->execute)
910 return len;
911
912 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
913 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
914 "Count: 0x%02X, Reg: 0x%08X\n",
915 offset, crtcport, crtcindex, mask, shift,
916 io_flag_condition_idx, count, reg);
917
918 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
919 if (config > count) {
920 NV_ERROR(bios->dev,
921 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
922 offset, config, count);
923 return 0;
924 }
925
926 freq = ROM16(bios->data[offset + 12 + config * 2]);
927
928 if (io_flag_condition_idx > 0) {
929 if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
930 BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
931 "frequency doubled\n", offset);
932 freq *= 2;
933 } else
934 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
935 "frequency unchanged\n", offset);
936 }
937
938 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
939 offset, reg, config, freq);
940
941 setPLL(bios, reg, freq * 10);
942
943 return len;
944}
945
946static int
947init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
948{
949 /*
950 * INIT_END_REPEAT opcode: 0x36 ('6')
951 *
952 * offset (8 bit): opcode
953 *
954 * Marks the end of the block for INIT_REPEAT to repeat
955 */
956
957 /* no iexec->execute check by design */
958
959 /*
960 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
961 * we're not in repeat mode
962 */
963 if (iexec->repeat)
964 return 0;
965
966 return 1;
967}
968
969static int
970init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
971{
972 /*
973 * INIT_COPY opcode: 0x37 ('7')
974 *
975 * offset (8 bit): opcode
976 * offset + 1 (32 bit): register
977 * offset + 5 (8 bit): shift
978 * offset + 6 (8 bit): srcmask
979 * offset + 7 (16 bit): CRTC port
980 * offset + 9 (8 bit): CRTC index
981 * offset + 10 (8 bit): mask
982 *
983 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
984 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
985 * port
986 */
987
988 uint32_t reg = ROM32(bios->data[offset + 1]);
989 uint8_t shift = bios->data[offset + 5];
990 uint8_t srcmask = bios->data[offset + 6];
991 uint16_t crtcport = ROM16(bios->data[offset + 7]);
992 uint8_t crtcindex = bios->data[offset + 9];
993 uint8_t mask = bios->data[offset + 10];
994 uint32_t data;
995 uint8_t crtcdata;
996
997 if (!iexec->execute)
998 return 11;
999
1000 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1001 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
1002 offset, reg, shift, srcmask, crtcport, crtcindex, mask);
1003
1004 data = bios_rd32(bios, reg);
1005
1006 if (shift < 0x80)
1007 data >>= shift;
1008 else
1009 data <<= (0x100 - shift);
1010
1011 data &= srcmask;
1012
1013 crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
1014 crtcdata |= (uint8_t)data;
1015 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1016
1017 return 11;
1018}
1019
1020static int
1021init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1022{
1023 /*
1024 * INIT_NOT opcode: 0x38 ('8')
1025 *
1026 * offset (8 bit): opcode
1027 *
1028 * Invert the current execute / no-execute condition (i.e. "else")
1029 */
1030 if (iexec->execute)
1031 BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
1032 else
1033 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1034
1035 iexec->execute = !iexec->execute;
1036 return 1;
1037}
1038
1039static int
1040init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1041 struct init_exec *iexec)
1042{
1043 /*
1044 * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
1045 *
1046 * offset (8 bit): opcode
1047 * offset + 1 (8 bit): condition number
1048 *
1049 * Check condition "condition number" in the IO flag condition table.
1050 * If condition not met skip subsequent opcodes until condition is
1051 * inverted (INIT_NOT), or we hit INIT_RESUME
1052 */
1053
1054 uint8_t cond = bios->data[offset + 1];
1055
1056 if (!iexec->execute)
1057 return 2;
1058
1059 if (io_flag_condition_met(bios, offset, cond))
1060 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
1061 else {
1062 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
1063 iexec->execute = false;
1064 }
1065
1066 return 2;
1067}
1068
1069static int
1070init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1071 struct init_exec *iexec)
1072{
1073 /*
1074 * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
1075 *
1076 * offset (8 bit): opcode
1077 * offset + 1 (32 bit): control register
1078 * offset + 5 (32 bit): data register
1079 * offset + 9 (32 bit): mask
1080 * offset + 13 (32 bit): data
1081 * offset + 17 (8 bit): count
1082 * offset + 18 (8 bit): address 1
1083 * offset + 19 (8 bit): data 1
1084 * ...
1085 *
1086 * For each of "count" address and data pairs, write "data n" to
1087 * "data register", read the current value of "control register",
1088 * and write it back once ANDed with "mask", ORed with "data",
1089 * and ORed with "address n"
1090 */
1091
1092 uint32_t controlreg = ROM32(bios->data[offset + 1]);
1093 uint32_t datareg = ROM32(bios->data[offset + 5]);
1094 uint32_t mask = ROM32(bios->data[offset + 9]);
1095 uint32_t data = ROM32(bios->data[offset + 13]);
1096 uint8_t count = bios->data[offset + 17];
1097 int len = 18 + count * 2;
1098 uint32_t value;
1099 int i;
1100
1101 if (!iexec->execute)
1102 return len;
1103
1104 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1105 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
1106 offset, controlreg, datareg, mask, data, count);
1107
1108 for (i = 0; i < count; i++) {
1109 uint8_t instaddress = bios->data[offset + 18 + i * 2];
1110 uint8_t instdata = bios->data[offset + 19 + i * 2];
1111
1112 BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
1113 offset, instaddress, instdata);
1114
1115 bios_wr32(bios, datareg, instdata);
1116 value = bios_rd32(bios, controlreg) & mask;
1117 value |= data;
1118 value |= instaddress;
1119 bios_wr32(bios, controlreg, value);
1120 }
1121
1122 return len;
1123}
1124
1125static int
1126init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1127 struct init_exec *iexec)
1128{
1129 /*
1130 * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
1131 *
1132 * offset (8 bit): opcode
1133 * offset + 1 (16 bit): CRTC port
1134 * offset + 3 (8 bit): CRTC index
1135 * offset + 4 (8 bit): mask
1136 * offset + 5 (8 bit): shift
1137 * offset + 6 (8 bit): count
1138 * offset + 7 (32 bit): register
1139 * offset + 11 (32 bit): frequency 1
1140 * ...
1141 *
1142 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
1143 * Set PLL register "register" to coefficients for frequency n,
1144 * selected by reading index "CRTC index" of "CRTC port" ANDed with
1145 * "mask" and shifted right by "shift".
1146 */
1147
1148 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1149 uint8_t crtcindex = bios->data[offset + 3];
1150 uint8_t mask = bios->data[offset + 4];
1151 uint8_t shift = bios->data[offset + 5];
1152 uint8_t count = bios->data[offset + 6];
1153 uint32_t reg = ROM32(bios->data[offset + 7]);
1154 int len = 11 + count * 4;
1155 uint8_t config;
1156 uint32_t freq;
1157
1158 if (!iexec->execute)
1159 return len;
1160
1161 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1162 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1163 offset, crtcport, crtcindex, mask, shift, count, reg);
1164
1165 if (!reg)
1166 return len;
1167
1168 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1169 if (config > count) {
1170 NV_ERROR(bios->dev,
1171 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1172 offset, config, count);
1173 return 0;
1174 }
1175
1176 freq = ROM32(bios->data[offset + 11 + config * 4]);
1177
1178 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
1179 offset, reg, config, freq);
1180
1181 setPLL(bios, reg, freq);
1182
1183 return len;
1184}
1185
1186static int
1187init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1188{
1189 /*
1190 * INIT_PLL2 opcode: 0x4B ('K')
1191 *
1192 * offset (8 bit): opcode
1193 * offset + 1 (32 bit): register
1194 * offset + 5 (32 bit): freq
1195 *
1196 * Set PLL register "register" to coefficients for frequency "freq"
1197 */
1198
1199 uint32_t reg = ROM32(bios->data[offset + 1]);
1200 uint32_t freq = ROM32(bios->data[offset + 5]);
1201
1202 if (!iexec->execute)
1203 return 9;
1204
1205 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1206 offset, reg, freq);
1207
1208 setPLL(bios, reg, freq);
1209 return 9;
1210}
1211
1212static int
1213init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1214{
1215 /*
1216 * INIT_I2C_BYTE opcode: 0x4C ('L')
1217 *
1218 * offset (8 bit): opcode
1219 * offset + 1 (8 bit): DCB I2C table entry index
1220 * offset + 2 (8 bit): I2C slave address
1221 * offset + 3 (8 bit): count
1222 * offset + 4 (8 bit): I2C register 1
1223 * offset + 5 (8 bit): mask 1
1224 * offset + 6 (8 bit): data 1
1225 * ...
1226 *
1227 * For each of "count" registers given by "I2C register n" on the device
1228 * addressed by "I2C slave address" on the I2C bus given by
1229 * "DCB I2C table entry index", read the register, AND the result with
1230 * "mask n" and OR it with "data n" before writing it back to the device
1231 */
1232
1233 uint8_t i2c_index = bios->data[offset + 1];
1234 uint8_t i2c_address = bios->data[offset + 2];
1235 uint8_t count = bios->data[offset + 3];
1236 int len = 4 + count * 3;
1237 struct nouveau_i2c_chan *chan;
1238 struct i2c_msg msg;
1239 int i;
1240
1241 if (!iexec->execute)
1242 return len;
1243
1244 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1245 "Count: 0x%02X\n",
1246 offset, i2c_index, i2c_address, count);
1247
1248 chan = init_i2c_device_find(bios->dev, i2c_index);
1249 if (!chan)
1250 return 0;
1251
1252 for (i = 0; i < count; i++) {
1253 uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
1254 uint8_t mask = bios->data[offset + 5 + i * 3];
1255 uint8_t data = bios->data[offset + 6 + i * 3];
1256 uint8_t value;
1257
1258 msg.addr = i2c_address;
1259 msg.flags = I2C_M_RD;
1260 msg.len = 1;
1261 msg.buf = &value;
1262 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1263 return 0;
1264
1265 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1266 "Mask: 0x%02X, Data: 0x%02X\n",
1267 offset, i2c_reg, value, mask, data);
1268
1269 value = (value & mask) | data;
1270
1271 if (bios->execute) {
1272 msg.addr = i2c_address;
1273 msg.flags = 0;
1274 msg.len = 1;
1275 msg.buf = &value;
1276 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1277 return 0;
1278 }
1279 }
1280
1281 return len;
1282}
1283
1284static int
1285init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1286{
1287 /*
1288 * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
1289 *
1290 * offset (8 bit): opcode
1291 * offset + 1 (8 bit): DCB I2C table entry index
1292 * offset + 2 (8 bit): I2C slave address
1293 * offset + 3 (8 bit): count
1294 * offset + 4 (8 bit): I2C register 1
1295 * offset + 5 (8 bit): data 1
1296 * ...
1297 *
1298 * For each of "count" registers given by "I2C register n" on the device
1299 * addressed by "I2C slave address" on the I2C bus given by
1300 * "DCB I2C table entry index", set the register to "data n"
1301 */
1302
1303 uint8_t i2c_index = bios->data[offset + 1];
1304 uint8_t i2c_address = bios->data[offset + 2];
1305 uint8_t count = bios->data[offset + 3];
1306 int len = 4 + count * 2;
1307 struct nouveau_i2c_chan *chan;
1308 struct i2c_msg msg;
1309 int i;
1310
1311 if (!iexec->execute)
1312 return len;
1313
1314 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1315 "Count: 0x%02X\n",
1316 offset, i2c_index, i2c_address, count);
1317
1318 chan = init_i2c_device_find(bios->dev, i2c_index);
1319 if (!chan)
1320 return 0;
1321
1322 for (i = 0; i < count; i++) {
1323 uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
1324 uint8_t data = bios->data[offset + 5 + i * 2];
1325
1326 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1327 offset, i2c_reg, data);
1328
1329 if (bios->execute) {
1330 msg.addr = i2c_address;
1331 msg.flags = 0;
1332 msg.len = 1;
1333 msg.buf = &data;
1334 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1335 return 0;
1336 }
1337 }
1338
1339 return len;
1340}
1341
1342static int
1343init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1344{
1345 /*
1346 * INIT_ZM_I2C opcode: 0x4E ('N')
1347 *
1348 * offset (8 bit): opcode
1349 * offset + 1 (8 bit): DCB I2C table entry index
1350 * offset + 2 (8 bit): I2C slave address
1351 * offset + 3 (8 bit): count
1352 * offset + 4 (8 bit): data 1
1353 * ...
1354 *
1355 * Send "count" bytes ("data n") to the device addressed by "I2C slave
1356 * address" on the I2C bus given by "DCB I2C table entry index"
1357 */
1358
1359 uint8_t i2c_index = bios->data[offset + 1];
1360 uint8_t i2c_address = bios->data[offset + 2];
1361 uint8_t count = bios->data[offset + 3];
1362 int len = 4 + count;
1363 struct nouveau_i2c_chan *chan;
1364 struct i2c_msg msg;
1365 uint8_t data[256];
1366 int i;
1367
1368 if (!iexec->execute)
1369 return len;
1370
1371 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1372 "Count: 0x%02X\n",
1373 offset, i2c_index, i2c_address, count);
1374
1375 chan = init_i2c_device_find(bios->dev, i2c_index);
1376 if (!chan)
1377 return 0;
1378
1379 for (i = 0; i < count; i++) {
1380 data[i] = bios->data[offset + 4 + i];
1381
1382 BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
1383 }
1384
1385 if (bios->execute) {
1386 msg.addr = i2c_address;
1387 msg.flags = 0;
1388 msg.len = count;
1389 msg.buf = data;
1390 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1391 return 0;
1392 }
1393
1394 return len;
1395}
1396
1397static int
1398init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1399{
1400 /*
1401 * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
1402 *
1403 * offset (8 bit): opcode
1404 * offset + 1 (8 bit): magic lookup value
1405 * offset + 2 (8 bit): TMDS address
1406 * offset + 3 (8 bit): mask
1407 * offset + 4 (8 bit): data
1408 *
1409 * Read the data reg for TMDS address "TMDS address", AND it with mask
1410 * and OR it with data, then write it back
1411 * "magic lookup value" determines which TMDS base address register is
1412 * used -- see get_tmds_index_reg()
1413 */
1414
1415 uint8_t mlv = bios->data[offset + 1];
1416 uint32_t tmdsaddr = bios->data[offset + 2];
1417 uint8_t mask = bios->data[offset + 3];
1418 uint8_t data = bios->data[offset + 4];
1419 uint32_t reg, value;
1420
1421 if (!iexec->execute)
1422 return 5;
1423
1424 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1425 "Mask: 0x%02X, Data: 0x%02X\n",
1426 offset, mlv, tmdsaddr, mask, data);
1427
1428 reg = get_tmds_index_reg(bios->dev, mlv);
1429 if (!reg)
1430 return 0;
1431
1432 bios_wr32(bios, reg,
1433 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
1434 value = (bios_rd32(bios, reg + 4) & mask) | data;
1435 bios_wr32(bios, reg + 4, value);
1436 bios_wr32(bios, reg, tmdsaddr);
1437
1438 return 5;
1439}
1440
1441static int
1442init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1443 struct init_exec *iexec)
1444{
1445 /*
1446 * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
1447 *
1448 * offset (8 bit): opcode
1449 * offset + 1 (8 bit): magic lookup value
1450 * offset + 2 (8 bit): count
1451 * offset + 3 (8 bit): addr 1
1452 * offset + 4 (8 bit): data 1
1453 * ...
1454 *
1455 * For each of "count" TMDS address and data pairs write "data n" to
1456 * "addr n". "magic lookup value" determines which TMDS base address
1457 * register is used -- see get_tmds_index_reg()
1458 */
1459
1460 uint8_t mlv = bios->data[offset + 1];
1461 uint8_t count = bios->data[offset + 2];
1462 int len = 3 + count * 2;
1463 uint32_t reg;
1464 int i;
1465
1466 if (!iexec->execute)
1467 return len;
1468
1469 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1470 offset, mlv, count);
1471
1472 reg = get_tmds_index_reg(bios->dev, mlv);
1473 if (!reg)
1474 return 0;
1475
1476 for (i = 0; i < count; i++) {
1477 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
1478 uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
1479
1480 bios_wr32(bios, reg + 4, tmdsdata);
1481 bios_wr32(bios, reg, tmdsaddr);
1482 }
1483
1484 return len;
1485}
1486
1487static int
1488init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1489 struct init_exec *iexec)
1490{
1491 /*
1492 * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
1493 *
1494 * offset (8 bit): opcode
1495 * offset + 1 (8 bit): CRTC index1
1496 * offset + 2 (8 bit): CRTC index2
1497 * offset + 3 (8 bit): baseaddr
1498 * offset + 4 (8 bit): count
1499 * offset + 5 (8 bit): data 1
1500 * ...
1501 *
1502 * For each of "count" address and data pairs, write "baseaddr + n" to
1503 * "CRTC index1" and "data n" to "CRTC index2"
1504 * Once complete, restore initial value read from "CRTC index1"
1505 */
1506 uint8_t crtcindex1 = bios->data[offset + 1];
1507 uint8_t crtcindex2 = bios->data[offset + 2];
1508 uint8_t baseaddr = bios->data[offset + 3];
1509 uint8_t count = bios->data[offset + 4];
1510 int len = 5 + count;
1511 uint8_t oldaddr, data;
1512 int i;
1513
1514 if (!iexec->execute)
1515 return len;
1516
1517 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1518 "BaseAddr: 0x%02X, Count: 0x%02X\n",
1519 offset, crtcindex1, crtcindex2, baseaddr, count);
1520
1521 oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
1522
1523 for (i = 0; i < count; i++) {
1524 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
1525 baseaddr + i);
1526 data = bios->data[offset + 5 + i];
1527 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
1528 }
1529
1530 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1531
1532 return len;
1533}
1534
1535static int
1536init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1537{
1538 /*
1539 * INIT_CR opcode: 0x52 ('R')
1540 *
1541 * offset (8 bit): opcode
1542 * offset + 1 (8 bit): CRTC index
1543 * offset + 2 (8 bit): mask
1544 * offset + 3 (8 bit): data
1545 *
1546 * Assign the value of at "CRTC index" ANDed with mask and ORed with
1547 * data back to "CRTC index"
1548 */
1549
1550 uint8_t crtcindex = bios->data[offset + 1];
1551 uint8_t mask = bios->data[offset + 2];
1552 uint8_t data = bios->data[offset + 3];
1553 uint8_t value;
1554
1555 if (!iexec->execute)
1556 return 4;
1557
1558 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1559 offset, crtcindex, mask, data);
1560
1561 value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
1562 value |= data;
1563 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1564
1565 return 4;
1566}
1567
1568static int
1569init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1570{
1571 /*
1572 * INIT_ZM_CR opcode: 0x53 ('S')
1573 *
1574 * offset (8 bit): opcode
1575 * offset + 1 (8 bit): CRTC index
1576 * offset + 2 (8 bit): value
1577 *
1578 * Assign "value" to CRTC register with index "CRTC index".
1579 */
1580
1581 uint8_t crtcindex = ROM32(bios->data[offset + 1]);
1582 uint8_t data = bios->data[offset + 2];
1583
1584 if (!iexec->execute)
1585 return 3;
1586
1587 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1588
1589 return 3;
1590}
1591
1592static int
1593init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1594{
1595 /*
1596 * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
1597 *
1598 * offset (8 bit): opcode
1599 * offset + 1 (8 bit): count
1600 * offset + 2 (8 bit): CRTC index 1
1601 * offset + 3 (8 bit): value 1
1602 * ...
1603 *
1604 * For "count", assign "value n" to CRTC register with index
1605 * "CRTC index n".
1606 */
1607
1608 uint8_t count = bios->data[offset + 1];
1609 int len = 2 + count * 2;
1610 int i;
1611
1612 if (!iexec->execute)
1613 return len;
1614
1615 for (i = 0; i < count; i++)
1616 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1617
1618 return len;
1619}
1620
1621static int
1622init_condition_time(struct nvbios *bios, uint16_t offset,
1623 struct init_exec *iexec)
1624{
1625 /*
1626 * INIT_CONDITION_TIME opcode: 0x56 ('V')
1627 *
1628 * offset (8 bit): opcode
1629 * offset + 1 (8 bit): condition number
1630 * offset + 2 (8 bit): retries / 50
1631 *
1632 * Check condition "condition number" in the condition table.
1633 * Bios code then sleeps for 2ms if the condition is not met, and
1634 * repeats up to "retries" times, but on one C51 this has proved
1635 * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
1636 * this, and bail after "retries" times, or 2s, whichever is less.
1637 * If still not met after retries, clear execution flag for this table.
1638 */
1639
1640 uint8_t cond = bios->data[offset + 1];
1641 uint16_t retries = bios->data[offset + 2] * 50;
1642 unsigned cnt;
1643
1644 if (!iexec->execute)
1645 return 3;
1646
1647 if (retries > 100)
1648 retries = 100;
1649
1650 BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
1651 offset, cond, retries);
1652
1653 if (!bios->execute) /* avoid 2s delays when "faking" execution */
1654 retries = 1;
1655
1656 for (cnt = 0; cnt < retries; cnt++) {
1657 if (bios_condition_met(bios, offset, cond)) {
1658 BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
1659 offset);
1660 break;
1661 } else {
1662 BIOSLOG(bios, "0x%04X: "
1663 "Condition not met, sleeping for 20ms\n",
1664 offset);
1665 msleep(20);
1666 }
1667 }
1668
1669 if (!bios_condition_met(bios, offset, cond)) {
1670 NV_WARN(bios->dev,
1671 "0x%04X: Condition still not met after %dms, "
1672 "skipping following opcodes\n", offset, 20 * retries);
1673 iexec->execute = false;
1674 }
1675
1676 return 3;
1677}
1678
1679static int
1680init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1681 struct init_exec *iexec)
1682{
1683 /*
1684 * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
1685 *
1686 * offset (8 bit): opcode
1687 * offset + 1 (32 bit): base register
1688 * offset + 5 (8 bit): count
1689 * offset + 6 (32 bit): value 1
1690 * ...
1691 *
1692 * Starting at offset + 6 there are "count" 32 bit values.
1693 * For "count" iterations set "base register" + 4 * current_iteration
1694 * to "value current_iteration"
1695 */
1696
1697 uint32_t basereg = ROM32(bios->data[offset + 1]);
1698 uint32_t count = bios->data[offset + 5];
1699 int len = 6 + count * 4;
1700 int i;
1701
1702 if (!iexec->execute)
1703 return len;
1704
1705 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1706 offset, basereg, count);
1707
1708 for (i = 0; i < count; i++) {
1709 uint32_t reg = basereg + i * 4;
1710 uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
1711
1712 bios_wr32(bios, reg, data);
1713 }
1714
1715 return len;
1716}
1717
1718static int
1719init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1720{
1721 /*
1722 * INIT_SUB_DIRECT opcode: 0x5B ('[')
1723 *
1724 * offset (8 bit): opcode
1725 * offset + 1 (16 bit): subroutine offset (in bios)
1726 *
1727 * Calls a subroutine that will execute commands until INIT_DONE
1728 * is found.
1729 */
1730
1731 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1732
1733 if (!iexec->execute)
1734 return 3;
1735
1736 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1737 offset, sub_offset);
1738
1739 parse_init_table(bios, sub_offset, iexec);
1740
1741 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1742
1743 return 3;
1744}
1745
1746static int
1747init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1748{
1749 /*
1750 * INIT_COPY_NV_REG opcode: 0x5F ('_')
1751 *
1752 * offset (8 bit): opcode
1753 * offset + 1 (32 bit): src reg
1754 * offset + 5 (8 bit): shift
1755 * offset + 6 (32 bit): src mask
1756 * offset + 10 (32 bit): xor
1757 * offset + 14 (32 bit): dst reg
1758 * offset + 18 (32 bit): dst mask
1759 *
1760 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
1761 * "src mask", then XOR with "xor". Write this OR'd with
1762 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
1763 */
1764
1765 uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
1766 uint8_t shift = bios->data[offset + 5];
1767 uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
1768 uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
1769 uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
1770 uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
1771 uint32_t srcvalue, dstvalue;
1772
1773 if (!iexec->execute)
1774 return 22;
1775
1776 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
1777 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
1778 offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
1779
1780 srcvalue = bios_rd32(bios, srcreg);
1781
1782 if (shift < 0x80)
1783 srcvalue >>= shift;
1784 else
1785 srcvalue <<= (0x100 - shift);
1786
1787 srcvalue = (srcvalue & srcmask) ^ xor;
1788
1789 dstvalue = bios_rd32(bios, dstreg) & dstmask;
1790
1791 bios_wr32(bios, dstreg, dstvalue | srcvalue);
1792
1793 return 22;
1794}
1795
1796static int
1797init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1798{
1799 /*
1800 * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
1801 *
1802 * offset (8 bit): opcode
1803 * offset + 1 (16 bit): CRTC port
1804 * offset + 3 (8 bit): CRTC index
1805 * offset + 4 (8 bit): data
1806 *
1807 * Write "data" to index "CRTC index" of "CRTC port"
1808 */
1809 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1810 uint8_t crtcindex = bios->data[offset + 3];
1811 uint8_t data = bios->data[offset + 4];
1812
1813 if (!iexec->execute)
1814 return 5;
1815
1816 bios_idxprt_wr(bios, crtcport, crtcindex, data);
1817
1818 return 5;
1819}
1820
1821static int
1822init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1823{
1824 /*
1825 * INIT_COMPUTE_MEM opcode: 0x63 ('c')
1826 *
1827 * offset (8 bit): opcode
1828 *
1829 * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
1830 * that the hardware can correctly calculate how much VRAM it has
1831 * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
1832 *
1833 * The implementation of this opcode in general consists of two parts:
1834 * 1) determination of the memory bus width
1835 * 2) determination of how many of the card's RAM pads have ICs attached
1836 *
1837 * 1) is done by a cunning combination of writes to offsets 0x1c and
1838 * 0x3c in the framebuffer, and seeing whether the written values are
1839 * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
1840 *
1841 * 2) is done by a cunning combination of writes to an offset slightly
1842 * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
1843 * if the test pattern can be read back. This then affects bits 12-15 of
1844 * NV_PFB_CFG0
1845 *
1846 * In this context a "cunning combination" may include multiple reads
1847 * and writes to varying locations, often alternating the test pattern
1848 * and 0, doubtless to make sure buffers are filled, residual charges
1849 * on tracks are removed etc.
1850 *
1851 * Unfortunately, the "cunning combination"s mentioned above, and the
1852 * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
1853 * trace I have.
1854 *
1855 * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
1856 * we started was correct, and use that instead
1857 */
1858
1859 /* no iexec->execute check by design */
1860
1861 /*
1862 * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
1863 * and kmmio traces of the binary driver POSTing the card show nothing
1864 * being done for this opcode. why is it still listed in the table?!
1865 */
1866
1867 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1868
1869 if (dev_priv->card_type >= NV_40)
1870 return 1;
1871
1872 /*
1873 * On every card I've seen, this step gets done for us earlier in
1874 * the init scripts
1875 uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
1876 bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
1877 */
1878
1879 /*
1880 * This also has probably been done in the scripts, but an mmio trace of
1881 * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
1882 */
1883 bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
1884
1885 /* write back the saved configuration value */
1886 bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
1887
1888 return 1;
1889}
1890
1891static int
1892init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1893{
1894 /*
1895 * INIT_RESET opcode: 0x65 ('e')
1896 *
1897 * offset (8 bit): opcode
1898 * offset + 1 (32 bit): register
1899 * offset + 5 (32 bit): value1
1900 * offset + 9 (32 bit): value2
1901 *
1902 * Assign "value1" to "register", then assign "value2" to "register"
1903 */
1904
1905 uint32_t reg = ROM32(bios->data[offset + 1]);
1906 uint32_t value1 = ROM32(bios->data[offset + 5]);
1907 uint32_t value2 = ROM32(bios->data[offset + 9]);
1908 uint32_t pci_nv_19, pci_nv_20;
1909
1910 /* no iexec->execute check by design */
1911
1912 pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
1913 bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
1914 bios_wr32(bios, reg, value1);
1915
1916 udelay(10);
1917
1918 bios_wr32(bios, reg, value2);
1919 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
1920
1921 pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
1922 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
1923 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
1924
1925 return 13;
1926}
1927
1928static int
1929init_configure_mem(struct nvbios *bios, uint16_t offset,
1930 struct init_exec *iexec)
1931{
1932 /*
1933 * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
1934 *
1935 * offset (8 bit): opcode
1936 *
1937 * Equivalent to INIT_DONE on bios version 3 or greater.
1938 * For early bios versions, sets up the memory registers, using values
1939 * taken from the memory init table
1940 */
1941
1942 /* no iexec->execute check by design */
1943
1944 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
1945 uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
1946 uint32_t reg, data;
1947
1948 if (bios->major_version > 2)
1949 return 0;
1950
1951 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
1952 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
1953
1954 if (bios->data[meminitoffs] & 1)
1955 seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
1956
1957 for (reg = ROM32(bios->data[seqtbloffs]);
1958 reg != 0xffffffff;
1959 reg = ROM32(bios->data[seqtbloffs += 4])) {
1960
1961 switch (reg) {
1962 case NV_PFB_PRE:
1963 data = NV_PFB_PRE_CMD_PRECHARGE;
1964 break;
1965 case NV_PFB_PAD:
1966 data = NV_PFB_PAD_CKE_NORMAL;
1967 break;
1968 case NV_PFB_REF:
1969 data = NV_PFB_REF_CMD_REFRESH;
1970 break;
1971 default:
1972 data = ROM32(bios->data[meminitdata]);
1973 meminitdata += 4;
1974 if (data == 0xffffffff)
1975 continue;
1976 }
1977
1978 bios_wr32(bios, reg, data);
1979 }
1980
1981 return 1;
1982}
1983
1984static int
1985init_configure_clk(struct nvbios *bios, uint16_t offset,
1986 struct init_exec *iexec)
1987{
1988 /*
1989 * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
1990 *
1991 * offset (8 bit): opcode
1992 *
1993 * Equivalent to INIT_DONE on bios version 3 or greater.
1994 * For early bios versions, sets up the NVClk and MClk PLLs, using
1995 * values taken from the memory init table
1996 */
1997
1998 /* no iexec->execute check by design */
1999
2000 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2001 int clock;
2002
2003 if (bios->major_version > 2)
2004 return 0;
2005
2006 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2007 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
2008
2009 clock = ROM16(bios->data[meminitoffs + 2]) * 10;
2010 if (bios->data[meminitoffs] & 1) /* DDR */
2011 clock *= 2;
2012 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2013
2014 return 1;
2015}
2016
2017static int
2018init_configure_preinit(struct nvbios *bios, uint16_t offset,
2019 struct init_exec *iexec)
2020{
2021 /*
2022 * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
2023 *
2024 * offset (8 bit): opcode
2025 *
2026 * Equivalent to INIT_DONE on bios version 3 or greater.
2027 * For early bios versions, does early init, loading ram and crystal
2028 * configuration from straps into CR3C
2029 */
2030
2031 /* no iexec->execute check by design */
2032
2033 uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
2034 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
2035
2036 if (bios->major_version > 2)
2037 return 0;
2038
2039 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2040 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2041
2042 return 1;
2043}
2044
2045static int
2046init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2047{
2048 /*
2049 * INIT_IO opcode: 0x69 ('i')
2050 *
2051 * offset (8 bit): opcode
2052 * offset + 1 (16 bit): CRTC port
2053 * offset + 3 (8 bit): mask
2054 * offset + 4 (8 bit): data
2055 *
2056 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
2057 */
2058
2059 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2060 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2061 uint8_t mask = bios->data[offset + 3];
2062 uint8_t data = bios->data[offset + 4];
2063
2064 if (!iexec->execute)
2065 return 5;
2066
2067 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2068 offset, crtcport, mask, data);
2069
2070 /*
2071 * I have no idea what this does, but NVIDIA do this magic sequence
2072 * in the places where this INIT_IO happens..
2073 */
2074 if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
2075 int i;
2076
2077 bios_wr32(bios, 0x614100, (bios_rd32(
2078 bios, 0x614100) & 0x0fffffff) | 0x00800000);
2079
2080 bios_wr32(bios, 0x00e18c, bios_rd32(
2081 bios, 0x00e18c) | 0x00020000);
2082
2083 bios_wr32(bios, 0x614900, (bios_rd32(
2084 bios, 0x614900) & 0x0fffffff) | 0x00800000);
2085
2086 bios_wr32(bios, 0x000200, bios_rd32(
2087 bios, 0x000200) & ~0x40000000);
2088
2089 mdelay(10);
2090
2091 bios_wr32(bios, 0x00e18c, bios_rd32(
2092 bios, 0x00e18c) & ~0x00020000);
2093
2094 bios_wr32(bios, 0x000200, bios_rd32(
2095 bios, 0x000200) | 0x40000000);
2096
2097 bios_wr32(bios, 0x614100, 0x00800018);
2098 bios_wr32(bios, 0x614900, 0x00800018);
2099
2100 mdelay(10);
2101
2102 bios_wr32(bios, 0x614100, 0x10000018);
2103 bios_wr32(bios, 0x614900, 0x10000018);
2104
2105 for (i = 0; i < 3; i++)
2106 bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
2107 bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
2108
2109 for (i = 0; i < 2; i++)
2110 bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
2111 bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
2112
2113 for (i = 0; i < 3; i++)
2114 bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
2115 bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
2116
2117 for (i = 0; i < 2; i++)
2118 bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
2119 bios, 0x614200 + (i*0x800)) & 0xfffffff0);
2120
2121 for (i = 0; i < 2; i++)
2122 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2123 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2124 return 5;
2125 }
2126
2127 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2128 data);
2129 return 5;
2130}
2131
2132static int
2133init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2134{
2135 /*
2136 * INIT_SUB opcode: 0x6B ('k')
2137 *
2138 * offset (8 bit): opcode
2139 * offset + 1 (8 bit): script number
2140 *
2141 * Execute script number "script number", as a subroutine
2142 */
2143
2144 uint8_t sub = bios->data[offset + 1];
2145
2146 if (!iexec->execute)
2147 return 2;
2148
2149 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2150
2151 parse_init_table(bios,
2152 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
2153 iexec);
2154
2155 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2156
2157 return 2;
2158}
2159
2160static int
2161init_ram_condition(struct nvbios *bios, uint16_t offset,
2162 struct init_exec *iexec)
2163{
2164 /*
2165 * INIT_RAM_CONDITION opcode: 0x6D ('m')
2166 *
2167 * offset (8 bit): opcode
2168 * offset + 1 (8 bit): mask
2169 * offset + 2 (8 bit): cmpval
2170 *
2171 * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
2172 * If condition not met skip subsequent opcodes until condition is
2173 * inverted (INIT_NOT), or we hit INIT_RESUME
2174 */
2175
2176 uint8_t mask = bios->data[offset + 1];
2177 uint8_t cmpval = bios->data[offset + 2];
2178 uint8_t data;
2179
2180 if (!iexec->execute)
2181 return 3;
2182
2183 data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
2184
2185 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
2186 offset, data, cmpval);
2187
2188 if (data == cmpval)
2189 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2190 else {
2191 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2192 iexec->execute = false;
2193 }
2194
2195 return 3;
2196}
2197
2198static int
2199init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2200{
2201 /*
2202 * INIT_NV_REG opcode: 0x6E ('n')
2203 *
2204 * offset (8 bit): opcode
2205 * offset + 1 (32 bit): register
2206 * offset + 5 (32 bit): mask
2207 * offset + 9 (32 bit): data
2208 *
2209 * Assign ((REGVAL("register") & "mask") | "data") to "register"
2210 */
2211
2212 uint32_t reg = ROM32(bios->data[offset + 1]);
2213 uint32_t mask = ROM32(bios->data[offset + 5]);
2214 uint32_t data = ROM32(bios->data[offset + 9]);
2215
2216 if (!iexec->execute)
2217 return 13;
2218
2219 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2220 offset, reg, mask, data);
2221
2222 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2223
2224 return 13;
2225}
2226
2227static int
2228init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2229{
2230 /*
2231 * INIT_MACRO opcode: 0x6F ('o')
2232 *
2233 * offset (8 bit): opcode
2234 * offset + 1 (8 bit): macro number
2235 *
2236 * Look up macro index "macro number" in the macro index table.
2237 * The macro index table entry has 1 byte for the index in the macro
2238 * table, and 1 byte for the number of times to repeat the macro.
2239 * The macro table entry has 4 bytes for the register address and
2240 * 4 bytes for the value to write to that register
2241 */
2242
2243 uint8_t macro_index_tbl_idx = bios->data[offset + 1];
2244 uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
2245 uint8_t macro_tbl_idx = bios->data[tmp];
2246 uint8_t count = bios->data[tmp + 1];
2247 uint32_t reg, data;
2248 int i;
2249
2250 if (!iexec->execute)
2251 return 2;
2252
2253 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2254 "Count: 0x%02X\n",
2255 offset, macro_index_tbl_idx, macro_tbl_idx, count);
2256
2257 for (i = 0; i < count; i++) {
2258 uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
2259
2260 reg = ROM32(bios->data[macroentryptr]);
2261 data = ROM32(bios->data[macroentryptr + 4]);
2262
2263 bios_wr32(bios, reg, data);
2264 }
2265
2266 return 2;
2267}
2268
2269static int
2270init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2271{
2272 /*
2273 * INIT_DONE opcode: 0x71 ('q')
2274 *
2275 * offset (8 bit): opcode
2276 *
2277 * End the current script
2278 */
2279
2280 /* mild retval abuse to stop parsing this table */
2281 return 0;
2282}
2283
2284static int
2285init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2286{
2287 /*
2288 * INIT_RESUME opcode: 0x72 ('r')
2289 *
2290 * offset (8 bit): opcode
2291 *
2292 * End the current execute / no-execute condition
2293 */
2294
2295 if (iexec->execute)
2296 return 1;
2297
2298 iexec->execute = true;
2299 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2300
2301 return 1;
2302}
2303
2304static int
2305init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2306{
2307 /*
2308 * INIT_TIME opcode: 0x74 ('t')
2309 *
2310 * offset (8 bit): opcode
2311 * offset + 1 (16 bit): time
2312 *
2313 * Sleep for "time" microseconds.
2314 */
2315
2316 unsigned time = ROM16(bios->data[offset + 1]);
2317
2318 if (!iexec->execute)
2319 return 3;
2320
2321 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2322 offset, time);
2323
2324 if (time < 1000)
2325 udelay(time);
2326 else
2327 msleep((time + 900) / 1000);
2328
2329 return 3;
2330}
2331
2332static int
2333init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2334{
2335 /*
2336 * INIT_CONDITION opcode: 0x75 ('u')
2337 *
2338 * offset (8 bit): opcode
2339 * offset + 1 (8 bit): condition number
2340 *
2341 * Check condition "condition number" in the condition table.
2342 * If condition not met skip subsequent opcodes until condition is
2343 * inverted (INIT_NOT), or we hit INIT_RESUME
2344 */
2345
2346 uint8_t cond = bios->data[offset + 1];
2347
2348 if (!iexec->execute)
2349 return 2;
2350
2351 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2352
2353 if (bios_condition_met(bios, offset, cond))
2354 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2355 else {
2356 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2357 iexec->execute = false;
2358 }
2359
2360 return 2;
2361}
2362
2363static int
2364init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2365{
2366 /*
2367 * INIT_IO_CONDITION opcode: 0x76
2368 *
2369 * offset (8 bit): opcode
2370 * offset + 1 (8 bit): condition number
2371 *
2372 * Check condition "condition number" in the io condition table.
2373 * If condition not met skip subsequent opcodes until condition is
2374 * inverted (INIT_NOT), or we hit INIT_RESUME
2375 */
2376
2377 uint8_t cond = bios->data[offset + 1];
2378
2379 if (!iexec->execute)
2380 return 2;
2381
2382 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2383
2384 if (io_condition_met(bios, offset, cond))
2385 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2386 else {
2387 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2388 iexec->execute = false;
2389 }
2390
2391 return 2;
2392}
2393
2394static int
2395init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2396{
2397 /*
2398 * INIT_INDEX_IO opcode: 0x78 ('x')
2399 *
2400 * offset (8 bit): opcode
2401 * offset + 1 (16 bit): CRTC port
2402 * offset + 3 (8 bit): CRTC index
2403 * offset + 4 (8 bit): mask
2404 * offset + 5 (8 bit): data
2405 *
2406 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
2407 * OR with "data", write-back
2408 */
2409
2410 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2411 uint8_t crtcindex = bios->data[offset + 3];
2412 uint8_t mask = bios->data[offset + 4];
2413 uint8_t data = bios->data[offset + 5];
2414 uint8_t value;
2415
2416 if (!iexec->execute)
2417 return 6;
2418
2419 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
2420 "Data: 0x%02X\n",
2421 offset, crtcport, crtcindex, mask, data);
2422
2423 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
2424 bios_idxprt_wr(bios, crtcport, crtcindex, value);
2425
2426 return 6;
2427}
2428
2429static int
2430init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2431{
2432 /*
2433 * INIT_PLL opcode: 0x79 ('y')
2434 *
2435 * offset (8 bit): opcode
2436 * offset + 1 (32 bit): register
2437 * offset + 5 (16 bit): freq
2438 *
2439 * Set PLL register "register" to coefficients for frequency (10kHz)
2440 * "freq"
2441 */
2442
2443 uint32_t reg = ROM32(bios->data[offset + 1]);
2444 uint16_t freq = ROM16(bios->data[offset + 5]);
2445
2446 if (!iexec->execute)
2447 return 7;
2448
2449 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
2450
2451 setPLL(bios, reg, freq * 10);
2452
2453 return 7;
2454}
2455
2456static int
2457init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2458{
2459 /*
2460 * INIT_ZM_REG opcode: 0x7A ('z')
2461 *
2462 * offset (8 bit): opcode
2463 * offset + 1 (32 bit): register
2464 * offset + 5 (32 bit): value
2465 *
2466 * Assign "value" to "register"
2467 */
2468
2469 uint32_t reg = ROM32(bios->data[offset + 1]);
2470 uint32_t value = ROM32(bios->data[offset + 5]);
2471
2472 if (!iexec->execute)
2473 return 9;
2474
2475 if (reg == 0x000200)
2476 value |= 1;
2477
2478 bios_wr32(bios, reg, value);
2479
2480 return 9;
2481}
2482
2483static int
2484init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2485 struct init_exec *iexec)
2486{
2487 /*
2488 * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
2489 *
2490 * offset (8 bit): opcode
2491 * offset + 1 (8 bit): PLL type
2492 * offset + 2 (32 bit): frequency 0
2493 *
2494 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2495 * ram_restrict_table_ptr. The value read from there is used to select
2496 * a frequency from the table starting at 'frequency 0' to be
2497 * programmed into the PLL corresponding to 'type'.
2498 *
2499 * The PLL limits table on cards using this opcode has a mapping of
2500 * 'type' to the relevant registers.
2501 */
2502
2503 struct drm_device *dev = bios->dev;
2504 uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
2505 uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
2506 uint8_t type = bios->data[offset + 1];
2507 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
2508 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
2509 int len = 2 + bios->ram_restrict_group_count * 4;
2510 int i;
2511
2512 if (!iexec->execute)
2513 return len;
2514
2515 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
2516 NV_ERROR(dev, "PLL limits table not version 3.x\n");
2517 return len; /* deliberate, allow default clocks to remain */
2518 }
2519
2520 entry = pll_limits + pll_limits[1];
2521 for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
2522 if (entry[0] == type) {
2523 uint32_t reg = ROM32(entry[3]);
2524
2525 BIOSLOG(bios, "0x%04X: "
2526 "Type %02x Reg 0x%08x Freq %dKHz\n",
2527 offset, type, reg, freq);
2528
2529 setPLL(bios, reg, freq);
2530 return len;
2531 }
2532 }
2533
2534 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
2535 return len;
2536}
2537
2538static int
2539init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2540{
2541 /*
2542 * INIT_8C opcode: 0x8C ('')
2543 *
2544 * NOP so far....
2545 *
2546 */
2547
2548 return 1;
2549}
2550
2551static int
2552init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2553{
2554 /*
2555 * INIT_8D opcode: 0x8D ('')
2556 *
2557 * NOP so far....
2558 *
2559 */
2560
2561 return 1;
2562}
2563
2564static int
2565init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2566{
2567 /*
2568 * INIT_GPIO opcode: 0x8E ('')
2569 *
2570 * offset (8 bit): opcode
2571 *
2572 * Loop over all entries in the DCB GPIO table, and initialise
2573 * each GPIO according to various values listed in each entry
2574 */
2575
2576 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2578 int i;
2579
2580 if (dev_priv->card_type != NV_50) {
2581 NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
2582 return -ENODEV;
2583 }
2584
2585 if (!iexec->execute)
2586 return 1;
2587
2588 for (i = 0; i < bios->dcb.gpio.entries; i++) {
2589 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
2590 uint32_t r, s, v;
2591
2592 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
2593
2594 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
2595
2596 /* The NVIDIA binary driver doesn't appear to actually do
2597 * any of this, my VBIOS does however.
2598 */
2599 /* Not a clue, needs de-magicing */
2600 r = nv50_gpio_ctl[gpio->line >> 4];
2601 s = (gpio->line & 0x0f);
2602 v = bios_rd32(bios, r) & ~(0x00010001 << s);
2603 switch ((gpio->entry & 0x06000000) >> 25) {
2604 case 1:
2605 v |= (0x00000001 << s);
2606 break;
2607 case 2:
2608 v |= (0x00010000 << s);
2609 break;
2610 default:
2611 break;
2612 }
2613 bios_wr32(bios, r, v);
2614 }
2615
2616 return 1;
2617}
2618
2619static int
2620init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2621 struct init_exec *iexec)
2622{
2623 /*
2624 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
2625 *
2626 * offset (8 bit): opcode
2627 * offset + 1 (32 bit): reg
2628 * offset + 5 (8 bit): regincrement
2629 * offset + 6 (8 bit): count
2630 * offset + 7 (32 bit): value 1,1
2631 * ...
2632 *
2633 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2634 * ram_restrict_table_ptr. The value read from here is 'n', and
2635 * "value 1,n" gets written to "reg". This repeats "count" times and on
2636 * each iteration 'm', "reg" increases by "regincrement" and
2637 * "value m,n" is used. The extent of n is limited by a number read
2638 * from the 'M' BIT table, herein called "blocklen"
2639 */
2640
2641 uint32_t reg = ROM32(bios->data[offset + 1]);
2642 uint8_t regincrement = bios->data[offset + 5];
2643 uint8_t count = bios->data[offset + 6];
2644 uint32_t strap_ramcfg, data;
2645 /* previously set by 'M' BIT table */
2646 uint16_t blocklen = bios->ram_restrict_group_count * 4;
2647 int len = 7 + count * blocklen;
2648 uint8_t index;
2649 int i;
2650
2651
2652 if (!iexec->execute)
2653 return len;
2654
2655 if (!blocklen) {
2656 NV_ERROR(bios->dev,
2657 "0x%04X: Zero block length - has the M table "
2658 "been parsed?\n", offset);
2659 return 0;
2660 }
2661
2662 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
2663 index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
2664
2665 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
2666 "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
2667 offset, reg, regincrement, count, strap_ramcfg, index);
2668
2669 for (i = 0; i < count; i++) {
2670 data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
2671
2672 bios_wr32(bios, reg, data);
2673
2674 reg += regincrement;
2675 }
2676
2677 return len;
2678}
2679
2680static int
2681init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2682{
2683 /*
2684 * INIT_COPY_ZM_REG opcode: 0x90 ('')
2685 *
2686 * offset (8 bit): opcode
2687 * offset + 1 (32 bit): src reg
2688 * offset + 5 (32 bit): dst reg
2689 *
2690 * Put contents of "src reg" into "dst reg"
2691 */
2692
2693 uint32_t srcreg = ROM32(bios->data[offset + 1]);
2694 uint32_t dstreg = ROM32(bios->data[offset + 5]);
2695
2696 if (!iexec->execute)
2697 return 9;
2698
2699 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
2700
2701 return 9;
2702}
2703
2704static int
2705init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
2706 struct init_exec *iexec)
2707{
2708 /*
2709 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
2710 *
2711 * offset (8 bit): opcode
2712 * offset + 1 (32 bit): dst reg
2713 * offset + 5 (8 bit): count
2714 * offset + 6 (32 bit): data 1
2715 * ...
2716 *
2717 * For each of "count" values write "data n" to "dst reg"
2718 */
2719
2720 uint32_t reg = ROM32(bios->data[offset + 1]);
2721 uint8_t count = bios->data[offset + 5];
2722 int len = 6 + count * 4;
2723 int i;
2724
2725 if (!iexec->execute)
2726 return len;
2727
2728 for (i = 0; i < count; i++) {
2729 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
2730 bios_wr32(bios, reg, data);
2731 }
2732
2733 return len;
2734}
2735
2736static int
2737init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2738{
2739 /*
2740 * INIT_RESERVED opcode: 0x92 ('')
2741 *
2742 * offset (8 bit): opcode
2743 *
2744 * Seemingly does nothing
2745 */
2746
2747 return 1;
2748}
2749
2750static int
2751init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2752{
2753 /*
2754 * INIT_96 opcode: 0x96 ('')
2755 *
2756 * offset (8 bit): opcode
2757 * offset + 1 (32 bit): sreg
2758 * offset + 5 (8 bit): sshift
2759 * offset + 6 (8 bit): smask
2760 * offset + 7 (8 bit): index
2761 * offset + 8 (32 bit): reg
2762 * offset + 12 (32 bit): mask
2763 * offset + 16 (8 bit): shift
2764 *
2765 */
2766
2767 uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
2768 uint32_t reg = ROM32(bios->data[offset + 8]);
2769 uint32_t mask = ROM32(bios->data[offset + 12]);
2770 uint32_t val;
2771
2772 val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
2773 if (bios->data[offset + 5] < 0x80)
2774 val >>= bios->data[offset + 5];
2775 else
2776 val <<= (0x100 - bios->data[offset + 5]);
2777 val &= bios->data[offset + 6];
2778
2779 val = bios->data[ROM16(bios->data[xlatptr]) + val];
2780 val <<= bios->data[offset + 16];
2781
2782 if (!iexec->execute)
2783 return 17;
2784
2785 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
2786 return 17;
2787}
2788
2789static int
2790init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2791{
2792 /*
2793 * INIT_97 opcode: 0x97 ('')
2794 *
2795 * offset (8 bit): opcode
2796 * offset + 1 (32 bit): register
2797 * offset + 5 (32 bit): mask
2798 * offset + 9 (32 bit): value
2799 *
2800 * Adds "value" to "register" preserving the fields specified
2801 * by "mask"
2802 */
2803
2804 uint32_t reg = ROM32(bios->data[offset + 1]);
2805 uint32_t mask = ROM32(bios->data[offset + 5]);
2806 uint32_t add = ROM32(bios->data[offset + 9]);
2807 uint32_t val;
2808
2809 val = bios_rd32(bios, reg);
2810 val = (val & mask) | ((val + add) & ~mask);
2811
2812 if (!iexec->execute)
2813 return 13;
2814
2815 bios_wr32(bios, reg, val);
2816 return 13;
2817}
2818
2819static int
2820init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2821{
2822 /*
2823 * INIT_AUXCH opcode: 0x98 ('')
2824 *
2825 * offset (8 bit): opcode
2826 * offset + 1 (32 bit): address
2827 * offset + 5 (8 bit): count
2828 * offset + 6 (8 bit): mask 0
2829 * offset + 7 (8 bit): data 0
2830 * ...
2831 *
2832 */
2833
2834 struct drm_device *dev = bios->dev;
2835 struct nouveau_i2c_chan *auxch;
2836 uint32_t addr = ROM32(bios->data[offset + 1]);
2837 uint8_t count = bios->data[offset + 5];
2838 int len = 6 + count * 2;
2839 int ret, i;
2840
2841 if (!bios->display.output) {
2842 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
2843 return 0;
2844 }
2845
2846 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2847 if (!auxch) {
2848 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
2849 bios->display.output->i2c_index);
2850 return 0;
2851 }
2852
2853 if (!iexec->execute)
2854 return len;
2855
2856 offset += 6;
2857 for (i = 0; i < count; i++, offset += 2) {
2858 uint8_t data;
2859
2860 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
2861 if (ret) {
2862 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
2863 return 0;
2864 }
2865
2866 data &= bios->data[offset + 0];
2867 data |= bios->data[offset + 1];
2868
2869 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
2870 if (ret) {
2871 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
2872 return 0;
2873 }
2874 }
2875
2876 return len;
2877}
2878
2879static int
2880init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2881{
2882 /*
2883 * INIT_ZM_AUXCH opcode: 0x99 ('')
2884 *
2885 * offset (8 bit): opcode
2886 * offset + 1 (32 bit): address
2887 * offset + 5 (8 bit): count
2888 * offset + 6 (8 bit): data 0
2889 * ...
2890 *
2891 */
2892
2893 struct drm_device *dev = bios->dev;
2894 struct nouveau_i2c_chan *auxch;
2895 uint32_t addr = ROM32(bios->data[offset + 1]);
2896 uint8_t count = bios->data[offset + 5];
2897 int len = 6 + count;
2898 int ret, i;
2899
2900 if (!bios->display.output) {
2901 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
2902 return 0;
2903 }
2904
2905 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2906 if (!auxch) {
2907 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
2908 bios->display.output->i2c_index);
2909 return 0;
2910 }
2911
2912 if (!iexec->execute)
2913 return len;
2914
2915 offset += 6;
2916 for (i = 0; i < count; i++, offset++) {
2917 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
2918 if (ret) {
2919 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
2920 return 0;
2921 }
2922 }
2923
2924 return len;
2925}
2926
2927static struct init_tbl_entry itbl_entry[] = {
2928 /* command name , id , length , offset , mult , command handler */
2929 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
2930 { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
2931 { "INIT_REPEAT" , 0x33, init_repeat },
2932 { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
2933 { "INIT_END_REPEAT" , 0x36, init_end_repeat },
2934 { "INIT_COPY" , 0x37, init_copy },
2935 { "INIT_NOT" , 0x38, init_not },
2936 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
2937 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
2938 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
2939 { "INIT_PLL2" , 0x4B, init_pll2 },
2940 { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
2941 { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
2942 { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
2943 { "INIT_TMDS" , 0x4F, init_tmds },
2944 { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
2945 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
2946 { "INIT_CR" , 0x52, init_cr },
2947 { "INIT_ZM_CR" , 0x53, init_zm_cr },
2948 { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
2949 { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
2950 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
2951 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
2952 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
2953 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
2954 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
2955 { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
2956 { "INIT_RESET" , 0x65, init_reset },
2957 { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
2958 { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
2959 { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
2960 { "INIT_IO" , 0x69, init_io },
2961 { "INIT_SUB" , 0x6B, init_sub },
2962 { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
2963 { "INIT_NV_REG" , 0x6E, init_nv_reg },
2964 { "INIT_MACRO" , 0x6F, init_macro },
2965 { "INIT_DONE" , 0x71, init_done },
2966 { "INIT_RESUME" , 0x72, init_resume },
2967 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
2968 { "INIT_TIME" , 0x74, init_time },
2969 { "INIT_CONDITION" , 0x75, init_condition },
2970 { "INIT_IO_CONDITION" , 0x76, init_io_condition },
2971 { "INIT_INDEX_IO" , 0x78, init_index_io },
2972 { "INIT_PLL" , 0x79, init_pll },
2973 { "INIT_ZM_REG" , 0x7A, init_zm_reg },
2974 { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
2975 { "INIT_8C" , 0x8C, init_8c },
2976 { "INIT_8D" , 0x8D, init_8d },
2977 { "INIT_GPIO" , 0x8E, init_gpio },
2978 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
2979 { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
2980 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
2981 { "INIT_RESERVED" , 0x92, init_reserved },
2982 { "INIT_96" , 0x96, init_96 },
2983 { "INIT_97" , 0x97, init_97 },
2984 { "INIT_AUXCH" , 0x98, init_auxch },
2985 { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
2986 { NULL , 0 , NULL }
2987};
2988
2989#define MAX_TABLE_OPS 1000
2990
2991static int
2992parse_init_table(struct nvbios *bios, unsigned int offset,
2993 struct init_exec *iexec)
2994{
2995 /*
2996 * Parses all commands in an init table.
2997 *
2998 * We start out executing all commands found in the init table. Some
2999 * opcodes may change the status of iexec->execute to SKIP, which will
3000 * cause the following opcodes to perform no operation until the value
3001 * is changed back to EXECUTE.
3002 */
3003
3004 int count = 0, i, res;
3005 uint8_t id;
3006
3007 /*
3008 * Loop until INIT_DONE causes us to break out of the loop
3009 * (or until offset > bios length just in case... )
3010 * (and no more than MAX_TABLE_OPS iterations, just in case... )
3011 */
3012 while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
3013 id = bios->data[offset];
3014
3015 /* Find matching id in itbl_entry */
3016 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3017 ;
3018
3019 if (itbl_entry[i].name) {
3020 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
3021 offset, itbl_entry[i].id, itbl_entry[i].name);
3022
3023 /* execute eventual command handler */
3024 res = (*itbl_entry[i].handler)(bios, offset, iexec);
3025 if (!res)
3026 break;
3027 /*
3028 * Add the offset of the current command including all data
3029 * of that command. The offset will then be pointing on the
3030 * next op code.
3031 */
3032 offset += res;
3033 } else {
3034 NV_ERROR(bios->dev,
3035 "0x%04X: Init table command not found: "
3036 "0x%02X\n", offset, id);
3037 return -ENOENT;
3038 }
3039 }
3040
3041 if (offset >= bios->length)
3042 NV_WARN(bios->dev,
3043 "Offset 0x%04X greater than known bios image length. "
3044 "Corrupt image?\n", offset);
3045 if (count >= MAX_TABLE_OPS)
3046 NV_WARN(bios->dev,
3047 "More than %d opcodes to a table is unlikely, "
3048 "is the bios image corrupt?\n", MAX_TABLE_OPS);
3049
3050 return 0;
3051}
3052
3053static void
3054parse_init_tables(struct nvbios *bios)
3055{
3056 /* Loops and calls parse_init_table() for each present table. */
3057
3058 int i = 0;
3059 uint16_t table;
3060 struct init_exec iexec = {true, false};
3061
3062 if (bios->old_style_init) {
3063 if (bios->init_script_tbls_ptr)
3064 parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
3065 if (bios->extra_init_script_tbl_ptr)
3066 parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
3067
3068 return;
3069 }
3070
3071 while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
3072 NV_INFO(bios->dev,
3073 "Parsing VBIOS init table %d at offset 0x%04X\n",
3074 i / 2, table);
3075 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
3076
3077 parse_init_table(bios, table, &iexec);
3078 i += 2;
3079 }
3080}
3081
3082static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3083{
3084 int compare_record_len, i = 0;
3085 uint16_t compareclk, scriptptr = 0;
3086
3087 if (bios->major_version < 5) /* pre BIT */
3088 compare_record_len = 3;
3089 else
3090 compare_record_len = 4;
3091
3092 do {
3093 compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
3094 if (pxclk >= compareclk * 10) {
3095 if (bios->major_version < 5) {
3096 uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
3097 scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
3098 } else
3099 scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
3100 break;
3101 }
3102 i++;
3103 } while (compareclk);
3104
3105 return scriptptr;
3106}
3107
3108static void
3109run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3110 struct dcb_entry *dcbent, int head, bool dl)
3111{
3112 struct drm_nouveau_private *dev_priv = dev->dev_private;
3113 struct nvbios *bios = &dev_priv->vbios;
3114 struct init_exec iexec = {true, false};
3115
3116 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
3117 scriptptr);
3118 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
3119 head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
3120 /* note: if dcb entries have been merged, index may be misleading */
3121 NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
3122 parse_init_table(bios, scriptptr, &iexec);
3123
3124 nv04_dfp_bind_head(dev, dcbent, head, dl);
3125}
3126
3127static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3128{
3129 struct drm_nouveau_private *dev_priv = dev->dev_private;
3130 struct nvbios *bios = &dev_priv->vbios;
3131 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3132 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3133
3134 if (!bios->fp.xlated_entry || !sub || !scriptofs)
3135 return -EINVAL;
3136
3137 run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
3138
3139 if (script == LVDS_PANEL_OFF) {
3140 /* off-on delay in ms */
3141 msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
3142 }
3143#ifdef __powerpc__
3144 /* Powerbook specific quirks */
3145 if ((dev->pci_device & 0xffff) == 0x0179 ||
3146 (dev->pci_device & 0xffff) == 0x0189 ||
3147 (dev->pci_device & 0xffff) == 0x0329) {
3148 if (script == LVDS_RESET) {
3149 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3150
3151 } else if (script == LVDS_PANEL_ON) {
3152 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3153 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3154 | (1 << 31));
3155 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3156 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3157
3158 } else if (script == LVDS_PANEL_OFF) {
3159 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3160 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3161 & ~(1 << 31));
3162 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3163 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3164 }
3165 }
3166#endif
3167
3168 return 0;
3169}
3170
3171static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3172{
3173 /*
3174 * The BIT LVDS table's header has the information to setup the
3175 * necessary registers. Following the standard 4 byte header are:
3176 * A bitmask byte and a dual-link transition pxclk value for use in
3177 * selecting the init script when not using straps; 4 script pointers
3178 * for panel power, selected by output and on/off; and 8 table pointers
3179 * for panel init, the needed one determined by output, and bits in the
3180 * conf byte. These tables are similar to the TMDS tables, consisting
3181 * of a list of pxclks and script pointers.
3182 */
3183 struct drm_nouveau_private *dev_priv = dev->dev_private;
3184 struct nvbios *bios = &dev_priv->vbios;
3185 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3186 uint16_t scriptptr = 0, clktable;
3187
3188 /*
3189 * For now we assume version 3.0 table - g80 support will need some
3190 * changes
3191 */
3192
3193 switch (script) {
3194 case LVDS_INIT:
3195 return -ENOSYS;
3196 case LVDS_BACKLIGHT_ON:
3197 case LVDS_PANEL_ON:
3198 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
3199 break;
3200 case LVDS_BACKLIGHT_OFF:
3201 case LVDS_PANEL_OFF:
3202 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
3203 break;
3204 case LVDS_RESET:
3205 clktable = bios->fp.lvdsmanufacturerpointer + 15;
3206 if (dcbent->or == 4)
3207 clktable += 8;
3208
3209 if (dcbent->lvdsconf.use_straps_for_mode) {
3210 if (bios->fp.dual_link)
3211 clktable += 4;
3212 if (bios->fp.if_is_24bit)
3213 clktable += 2;
3214 } else {
3215 /* using EDID */
3216 int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
3217
3218 if (bios->fp.dual_link) {
3219 clktable += 4;
3220 cmpval_24bit <<= 1;
3221 }
3222
3223 if (bios->fp.strapless_is_24bit & cmpval_24bit)
3224 clktable += 2;
3225 }
3226
3227 clktable = ROM16(bios->data[clktable]);
3228 if (!clktable) {
3229 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3230 return -ENOENT;
3231 }
3232 scriptptr = clkcmptable(bios, clktable, pxclk);
3233 }
3234
3235 if (!scriptptr) {
3236 NV_ERROR(dev, "LVDS output init script not found\n");
3237 return -ENOENT;
3238 }
3239 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
3240
3241 return 0;
3242}
3243
3244int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3245{
3246 /*
3247 * LVDS operations are multiplexed in an effort to present a single API
3248 * which works with two vastly differing underlying structures.
3249 * This acts as the demux
3250 */
3251
3252 struct drm_nouveau_private *dev_priv = dev->dev_private;
3253 struct nvbios *bios = &dev_priv->vbios;
3254 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3255 uint32_t sel_clk_binding, sel_clk;
3256 int ret;
3257
3258 if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
3259 (lvds_ver >= 0x30 && script == LVDS_INIT))
3260 return 0;
3261
3262 if (!bios->fp.lvds_init_run) {
3263 bios->fp.lvds_init_run = true;
3264 call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
3265 }
3266
3267 if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
3268 call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
3269 if (script == LVDS_RESET && bios->fp.power_off_for_reset)
3270 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
3271
3272 NV_TRACE(dev, "Calling LVDS script %d:\n", script);
3273
3274 /* don't let script change pll->head binding */
3275 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3276
3277 if (lvds_ver < 0x30)
3278 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
3279 else
3280 ret = run_lvds_table(dev, dcbent, head, script, pxclk);
3281
3282 bios->fp.last_script_invoc = (script << 1 | head);
3283
3284 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3285 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3286 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
3287 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
3288
3289 return ret;
3290}
3291
3292struct lvdstableheader {
3293 uint8_t lvds_ver, headerlen, recordlen;
3294};
3295
3296static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
3297{
3298 /*
3299 * BMP version (0xa) LVDS table has a simple header of version and
3300 * record length. The BIT LVDS table has the typical BIT table header:
3301 * version byte, header length byte, record length byte, and a byte for
3302 * the maximum number of records that can be held in the table.
3303 */
3304
3305 uint8_t lvds_ver, headerlen, recordlen;
3306
3307 memset(lth, 0, sizeof(struct lvdstableheader));
3308
3309 if (bios->fp.lvdsmanufacturerpointer == 0x0) {
3310 NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
3311 return -EINVAL;
3312 }
3313
3314 lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3315
3316 switch (lvds_ver) {
3317 case 0x0a: /* pre NV40 */
3318 headerlen = 2;
3319 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3320 break;
3321 case 0x30: /* NV4x */
3322 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3323 if (headerlen < 0x1f) {
3324 NV_ERROR(dev, "LVDS table header not understood\n");
3325 return -EINVAL;
3326 }
3327 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3328 break;
3329 case 0x40: /* G80/G90 */
3330 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3331 if (headerlen < 0x7) {
3332 NV_ERROR(dev, "LVDS table header not understood\n");
3333 return -EINVAL;
3334 }
3335 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3336 break;
3337 default:
3338 NV_ERROR(dev,
3339 "LVDS table revision %d.%d not currently supported\n",
3340 lvds_ver >> 4, lvds_ver & 0xf);
3341 return -ENOSYS;
3342 }
3343
3344 lth->lvds_ver = lvds_ver;
3345 lth->headerlen = headerlen;
3346 lth->recordlen = recordlen;
3347
3348 return 0;
3349}
3350
3351static int
3352get_fp_strap(struct drm_device *dev, struct nvbios *bios)
3353{
3354 struct drm_nouveau_private *dev_priv = dev->dev_private;
3355
3356 /*
3357 * The fp strap is normally dictated by the "User Strap" in
3358 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
3359 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
3360 * by the PCI subsystem ID during POST, but not before the previous user
3361 * strap has been committed to CR58 for CR57=0xf on head A, which may be
3362 * read and used instead
3363 */
3364
3365 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
3366 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
3367
3368 if (dev_priv->card_type >= NV_50)
3369 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
3370 else
3371 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
3372}
3373
3374static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3375{
3376 uint8_t *fptable;
3377 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
3378 int ret, ofs, fpstrapping;
3379 struct lvdstableheader lth;
3380
3381 if (bios->fp.fptablepointer == 0x0) {
3382 /* Apple cards don't have the fp table; the laptops use DDC */
3383 /* The table is also missing on some x86 IGPs */
3384#ifndef __powerpc__
3385 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3386#endif
3387 bios->digital_min_front_porch = 0x4b;
3388 return 0;
3389 }
3390
3391 fptable = &bios->data[bios->fp.fptablepointer];
3392 fptable_ver = fptable[0];
3393
3394 switch (fptable_ver) {
3395 /*
3396 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
3397 * version field, and miss one of the spread spectrum/PWM bytes.
3398 * This could affect early GF2Go parts (not seen any appropriate ROMs
3399 * though). Here we assume that a version of 0x05 matches this case
3400 * (combining with a BMP version check would be better), as the
3401 * common case for the panel type field is 0x0005, and that is in
3402 * fact what we are reading the first byte of.
3403 */
3404 case 0x05: /* some NV10, 11, 15, 16 */
3405 recordlen = 42;
3406 ofs = -1;
3407 break;
3408 case 0x10: /* some NV15/16, and NV11+ */
3409 recordlen = 44;
3410 ofs = 0;
3411 break;
3412 case 0x20: /* NV40+ */
3413 headerlen = fptable[1];
3414 recordlen = fptable[2];
3415 fpentries = fptable[3];
3416 /*
3417 * fptable[4] is the minimum
3418 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3419 */
3420 bios->digital_min_front_porch = fptable[4];
3421 ofs = -7;
3422 break;
3423 default:
3424 NV_ERROR(dev,
3425 "FP table revision %d.%d not currently supported\n",
3426 fptable_ver >> 4, fptable_ver & 0xf);
3427 return -ENOSYS;
3428 }
3429
3430 if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
3431 return 0;
3432
3433 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3434 if (ret)
3435 return ret;
3436
3437 if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
3438 bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
3439 lth.headerlen + 1;
3440 bios->fp.xlatwidth = lth.recordlen;
3441 }
3442 if (bios->fp.fpxlatetableptr == 0x0) {
3443 NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
3444 return -EINVAL;
3445 }
3446
3447 fpstrapping = get_fp_strap(dev, bios);
3448
3449 fpindex = bios->data[bios->fp.fpxlatetableptr +
3450 fpstrapping * bios->fp.xlatwidth];
3451
3452 if (fpindex > fpentries) {
3453 NV_ERROR(dev, "Bad flat panel table index\n");
3454 return -ENOENT;
3455 }
3456
3457 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3458 if (lth.lvds_ver > 0x10)
3459 bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3460
3461 /*
3462 * If either the strap or xlated fpindex value are 0xf there is no
3463 * panel using a strap-derived bios mode present. this condition
3464 * includes, but is different from, the DDC panel indicator above
3465 */
3466 if (fpstrapping == 0xf || fpindex == 0xf)
3467 return 0;
3468
3469 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
3470 recordlen * fpindex + ofs;
3471
3472 NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
3473 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
3474 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
3475 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
3476
3477 return 0;
3478}
3479
3480bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3481{
3482 struct drm_nouveau_private *dev_priv = dev->dev_private;
3483 struct nvbios *bios = &dev_priv->vbios;
3484 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3485
3486 if (!mode) /* just checking whether we can produce a mode */
3487 return bios->fp.mode_ptr;
3488
3489 memset(mode, 0, sizeof(struct drm_display_mode));
3490 /*
3491 * For version 1.0 (version in byte 0):
3492 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
3493 * single/dual link, and type (TFT etc.)
3494 * bytes 3-6 are bits per colour in RGBX
3495 */
3496 mode->clock = ROM16(mode_entry[7]) * 10;
3497 /* bytes 9-10 is HActive */
3498 mode->hdisplay = ROM16(mode_entry[11]) + 1;
3499 /*
3500 * bytes 13-14 is HValid Start
3501 * bytes 15-16 is HValid End
3502 */
3503 mode->hsync_start = ROM16(mode_entry[17]) + 1;
3504 mode->hsync_end = ROM16(mode_entry[19]) + 1;
3505 mode->htotal = ROM16(mode_entry[21]) + 1;
3506 /* bytes 23-24, 27-30 similarly, but vertical */
3507 mode->vdisplay = ROM16(mode_entry[25]) + 1;
3508 mode->vsync_start = ROM16(mode_entry[31]) + 1;
3509 mode->vsync_end = ROM16(mode_entry[33]) + 1;
3510 mode->vtotal = ROM16(mode_entry[35]) + 1;
3511 mode->flags |= (mode_entry[37] & 0x10) ?
3512 DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
3513 mode->flags |= (mode_entry[37] & 0x1) ?
3514 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
3515 /*
3516 * bytes 38-39 relate to spread spectrum settings
3517 * bytes 40-43 are something to do with PWM
3518 */
3519
3520 mode->status = MODE_OK;
3521 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
3522 drm_mode_set_name(mode);
3523 return bios->fp.mode_ptr;
3524}
3525
3526int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
3527{
3528 /*
3529 * The LVDS table header is (mostly) described in
3530 * parse_lvds_manufacturer_table_header(): the BIT header additionally
3531 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
3532 * straps are not being used for the panel, this specifies the frequency
3533 * at which modes should be set up in the dual link style.
3534 *
3535 * Following the header, the BMP (ver 0xa) table has several records,
3536 * indexed by a separate xlat table, indexed in turn by the fp strap in
3537 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
3538 * numbers for use by INIT_SUB which controlled panel init and power,
3539 * and finally a dword of ms to sleep between power off and on
3540 * operations.
3541 *
3542 * In the BIT versions, the table following the header serves as an
3543 * integrated config and xlat table: the records in the table are
3544 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
3545 * two bytes - the first as a config byte, the second for indexing the
3546 * fp mode table pointed to by the BIT 'D' table
3547 *
3548 * DDC is not used until after card init, so selecting the correct table
3549 * entry and setting the dual link flag for EDID equipped panels,
3550 * requiring tests against the native-mode pixel clock, cannot be done
3551 * until later, when this function should be called with non-zero pxclk
3552 */
3553 struct drm_nouveau_private *dev_priv = dev->dev_private;
3554 struct nvbios *bios = &dev_priv->vbios;
3555 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3556 struct lvdstableheader lth;
3557 uint16_t lvdsofs;
3558 int ret, chip_version = bios->chip_version;
3559
3560 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3561 if (ret)
3562 return ret;
3563
3564 switch (lth.lvds_ver) {
3565 case 0x0a: /* pre NV40 */
3566 lvdsmanufacturerindex = bios->data[
3567 bios->fp.fpxlatemanufacturertableptr +
3568 fpstrapping];
3569
3570 /* we're done if this isn't the EDID panel case */
3571 if (!pxclk)
3572 break;
3573
3574 if (chip_version < 0x25) {
3575 /* nv17 behaviour
3576 *
3577 * It seems the old style lvds script pointer is reused
3578 * to select 18/24 bit colour depth for EDID panels.
3579 */
3580 lvdsmanufacturerindex =
3581 (bios->legacy.lvds_single_a_script_ptr & 1) ?
3582 2 : 0;
3583 if (pxclk >= bios->fp.duallink_transition_clk)
3584 lvdsmanufacturerindex++;
3585 } else if (chip_version < 0x30) {
3586 /* nv28 behaviour (off-chip encoder)
3587 *
3588 * nv28 does a complex dance of first using byte 121 of
3589 * the EDID to choose the lvdsmanufacturerindex, then
3590 * later attempting to match the EDID manufacturer and
3591 * product IDs in a table (signature 'pidt' (panel id
3592 * table?)), setting an lvdsmanufacturerindex of 0 and
3593 * an fp strap of the match index (or 0xf if none)
3594 */
3595 lvdsmanufacturerindex = 0;
3596 } else {
3597 /* nv31, nv34 behaviour */
3598 lvdsmanufacturerindex = 0;
3599 if (pxclk >= bios->fp.duallink_transition_clk)
3600 lvdsmanufacturerindex = 2;
3601 if (pxclk >= 140000)
3602 lvdsmanufacturerindex = 3;
3603 }
3604
3605 /*
3606 * nvidia set the high nibble of (cr57=f, cr58) to
3607 * lvdsmanufacturerindex in this case; we don't
3608 */
3609 break;
3610 case 0x30: /* NV4x */
3611 case 0x40: /* G80/G90 */
3612 lvdsmanufacturerindex = fpstrapping;
3613 break;
3614 default:
3615 NV_ERROR(dev, "LVDS table revision not currently supported\n");
3616 return -ENOSYS;
3617 }
3618
3619 lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
3620 switch (lth.lvds_ver) {
3621 case 0x0a:
3622 bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
3623 bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
3624 bios->fp.dual_link = bios->data[lvdsofs] & 4;
3625 bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
3626 *if_is_24bit = bios->data[lvdsofs] & 16;
3627 break;
3628 case 0x30:
3629 case 0x40:
3630 /*
3631 * No sign of the "power off for reset" or "reset for panel
3632 * on" bits, but it's safer to assume we should
3633 */
3634 bios->fp.power_off_for_reset = true;
3635 bios->fp.reset_after_pclk_change = true;
3636
3637 /*
3638 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
3639 * over-written, and if_is_24bit isn't used
3640 */
3641 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3642 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
3643 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3644 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3645 break;
3646 }
3647
3648 /* Dell Latitude D620 reports a too-high value for the dual-link
3649 * transition freq, causing us to program the panel incorrectly.
3650 *
3651 * It doesn't appear the VBIOS actually uses its transition freq
3652 * (90000kHz), instead it uses the "Number of LVDS channels" field
3653 * out of the panel ID structure (http://www.spwg.org/).
3654 *
3655 * For the moment, a quirk will do :)
3656 */
3657 if ((dev->pdev->device == 0x01d7) &&
3658 (dev->pdev->subsystem_vendor == 0x1028) &&
3659 (dev->pdev->subsystem_device == 0x01c2)) {
3660 bios->fp.duallink_transition_clk = 80000;
3661 }
3662
3663 /* set dual_link flag for EDID case */
3664 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
3665 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
3666
3667 *dl = bios->fp.dual_link;
3668
3669 return 0;
3670}
3671
3672static uint8_t *
3673bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3674 uint16_t record, int record_len, int record_nr)
3675{
3676 struct drm_nouveau_private *dev_priv = dev->dev_private;
3677 struct nvbios *bios = &dev_priv->vbios;
3678 uint32_t entry;
3679 uint16_t table;
3680 int i, v;
3681
3682 for (i = 0; i < record_nr; i++, record += record_len) {
3683 table = ROM16(bios->data[record]);
3684 if (!table)
3685 continue;
3686 entry = ROM32(bios->data[table]);
3687
3688 v = (entry & 0x000f0000) >> 16;
3689 if (!(v & dcbent->or))
3690 continue;
3691
3692 v = (entry & 0x000000f0) >> 4;
3693 if (v != dcbent->location)
3694 continue;
3695
3696 v = (entry & 0x0000000f);
3697 if (v != dcbent->type)
3698 continue;
3699
3700 return &bios->data[table];
3701 }
3702
3703 return NULL;
3704}
3705
3706void *
3707nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3708 int *length)
3709{
3710 struct drm_nouveau_private *dev_priv = dev->dev_private;
3711 struct nvbios *bios = &dev_priv->vbios;
3712 uint8_t *table;
3713
3714 if (!bios->display.dp_table_ptr) {
3715 NV_ERROR(dev, "No pointer to DisplayPort table\n");
3716 return NULL;
3717 }
3718 table = &bios->data[bios->display.dp_table_ptr];
3719
3720 if (table[0] != 0x20 && table[0] != 0x21) {
3721 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3722 table[0]);
3723 return NULL;
3724 }
3725
3726 *length = table[4];
3727 return bios_output_config_match(dev, dcbent,
3728 bios->display.dp_table_ptr + table[1],
3729 table[2], table[3]);
3730}
3731
3732int
3733nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3734 uint32_t sub, int pxclk)
3735{
3736 /*
3737 * The display script table is located by the BIT 'U' table.
3738 *
3739 * It contains an array of pointers to various tables describing
3740 * a particular output type. The first 32-bits of the output
3741 * tables contains similar information to a DCB entry, and is
3742 * used to decide whether that particular table is suitable for
3743 * the output you want to access.
3744 *
3745 * The "record header length" field here seems to indicate the
3746 * offset of the first configuration entry in the output tables.
3747 * This is 10 on most cards I've seen, but 12 has been witnessed
3748 * on DP cards, and there's another script pointer within the
3749 * header.
3750 *
3751 * offset + 0 ( 8 bits): version
3752 * offset + 1 ( 8 bits): header length
3753 * offset + 2 ( 8 bits): record length
3754 * offset + 3 ( 8 bits): number of records
3755 * offset + 4 ( 8 bits): record header length
3756 * offset + 5 (16 bits): pointer to first output script table
3757 */
3758
3759 struct drm_nouveau_private *dev_priv = dev->dev_private;
3760 struct nvbios *bios = &dev_priv->vbios;
3761 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3762 uint8_t *otable = NULL;
3763 uint16_t script;
3764 int i = 0;
3765
3766 if (!bios->display.script_table_ptr) {
3767 NV_ERROR(dev, "No pointer to output script table\n");
3768 return 1;
3769 }
3770
3771 /*
3772 * Nothing useful has been in any of the pre-2.0 tables I've seen,
3773 * so until they are, we really don't need to care.
3774 */
3775 if (table[0] < 0x20)
3776 return 1;
3777
3778 if (table[0] != 0x20 && table[0] != 0x21) {
3779 NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
3780 table[0]);
3781 return 1;
3782 }
3783
3784 /*
3785 * The output script tables describing a particular output type
3786 * look as follows:
3787 *
3788 * offset + 0 (32 bits): output this table matches (hash of DCB)
3789 * offset + 4 ( 8 bits): unknown
3790 * offset + 5 ( 8 bits): number of configurations
3791 * offset + 6 (16 bits): pointer to some script
3792 * offset + 8 (16 bits): pointer to some script
3793 *
3794 * headerlen == 10
3795 * offset + 10 : configuration 0
3796 *
3797 * headerlen == 12
3798 * offset + 10 : pointer to some script
3799 * offset + 12 : configuration 0
3800 *
3801 * Each config entry is as follows:
3802 *
3803 * offset + 0 (16 bits): unknown, assumed to be a match value
3804 * offset + 2 (16 bits): pointer to script table (clock set?)
3805 * offset + 4 (16 bits): pointer to script table (reset?)
3806 *
3807 * There doesn't appear to be a count value to say how many
3808 * entries exist in each script table, instead, a 0 value in
3809 * the first 16-bit word seems to indicate both the end of the
3810 * list and the default entry. The second 16-bit word in the
3811 * script tables is a pointer to the script to execute.
3812 */
3813
3814 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
3815 dcbent->type, dcbent->location, dcbent->or);
3816 otable = bios_output_config_match(dev, dcbent, table[1] +
3817 bios->display.script_table_ptr,
3818 table[2], table[3]);
3819 if (!otable) {
3820 NV_ERROR(dev, "Couldn't find matching output script table\n");
3821 return 1;
3822 }
3823
3824 if (pxclk < -2 || pxclk > 0) {
3825 /* Try to find matching script table entry */
3826 for (i = 0; i < otable[5]; i++) {
3827 if (ROM16(otable[table[4] + i*6]) == sub)
3828 break;
3829 }
3830
3831 if (i == otable[5]) {
3832 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
3833 "using first\n",
3834 sub, dcbent->type, dcbent->or);
3835 i = 0;
3836 }
3837 }
3838
3839 if (pxclk == 0) {
3840 script = ROM16(otable[6]);
3841 if (!script) {
3842 NV_DEBUG_KMS(dev, "output script 0 not found\n");
3843 return 1;
3844 }
3845
3846 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3847 nouveau_bios_run_init_table(dev, script, dcbent);
3848 } else
3849 if (pxclk == -1) {
3850 script = ROM16(otable[8]);
3851 if (!script) {
3852 NV_DEBUG_KMS(dev, "output script 1 not found\n");
3853 return 1;
3854 }
3855
3856 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3857 nouveau_bios_run_init_table(dev, script, dcbent);
3858 } else
3859 if (pxclk == -2) {
3860 if (table[4] >= 12)
3861 script = ROM16(otable[10]);
3862 else
3863 script = 0;
3864 if (!script) {
3865 NV_DEBUG_KMS(dev, "output script 2 not found\n");
3866 return 1;
3867 }
3868
3869 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3870 nouveau_bios_run_init_table(dev, script, dcbent);
3871 } else
3872 if (pxclk > 0) {
3873 script = ROM16(otable[table[4] + i*6 + 2]);
3874 if (script)
3875 script = clkcmptable(bios, script, pxclk);
3876 if (!script) {
3877 NV_ERROR(dev, "clock script 0 not found\n");
3878 return 1;
3879 }
3880
3881 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3882 nouveau_bios_run_init_table(dev, script, dcbent);
3883 } else
3884 if (pxclk < 0) {
3885 script = ROM16(otable[table[4] + i*6 + 4]);
3886 if (script)
3887 script = clkcmptable(bios, script, -pxclk);
3888 if (!script) {
3889 NV_DEBUG_KMS(dev, "clock script 1 not found\n");
3890 return 1;
3891 }
3892
3893 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3894 nouveau_bios_run_init_table(dev, script, dcbent);
3895 }
3896
3897 return 0;
3898}
3899
3900
3901int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
3902{
3903 /*
3904 * the pxclk parameter is in kHz
3905 *
3906 * This runs the TMDS regs setting code found on BIT bios cards
3907 *
3908 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
3909 * ffs(or) == 3, use the second.
3910 */
3911
3912 struct drm_nouveau_private *dev_priv = dev->dev_private;
3913 struct nvbios *bios = &dev_priv->vbios;
3914 int cv = bios->chip_version;
3915 uint16_t clktable = 0, scriptptr;
3916 uint32_t sel_clk_binding, sel_clk;
3917
3918 /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
3919 if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
3920 dcbent->location != DCB_LOC_ON_CHIP)
3921 return 0;
3922
3923 switch (ffs(dcbent->or)) {
3924 case 1:
3925 clktable = bios->tmds.output0_script_ptr;
3926 break;
3927 case 2:
3928 case 3:
3929 clktable = bios->tmds.output1_script_ptr;
3930 break;
3931 }
3932
3933 if (!clktable) {
3934 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3935 return -EINVAL;
3936 }
3937
3938 scriptptr = clkcmptable(bios, clktable, pxclk);
3939
3940 if (!scriptptr) {
3941 NV_ERROR(dev, "TMDS output init script not found\n");
3942 return -ENOENT;
3943 }
3944
3945 /* don't let script change pll->head binding */
3946 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3947 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
3948 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3949 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3950
3951 return 0;
3952}
3953
3954int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
3955{
3956 /*
3957 * PLL limits table
3958 *
3959 * Version 0x10: NV30, NV31
3960 * One byte header (version), one record of 24 bytes
3961 * Version 0x11: NV36 - Not implemented
3962 * Seems to have same record style as 0x10, but 3 records rather than 1
3963 * Version 0x20: Found on Geforce 6 cards
3964 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
3965 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
3966 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
3967 * length in general, some (integrated) have an extra configuration byte
3968 * Version 0x30: Found on Geforce 8, separates the register mapping
3969 * from the limits tables.
3970 */
3971
3972 struct drm_nouveau_private *dev_priv = dev->dev_private;
3973 struct nvbios *bios = &dev_priv->vbios;
3974 int cv = bios->chip_version, pllindex = 0;
3975 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
3976 uint32_t crystal_strap_mask, crystal_straps;
3977
3978 if (!bios->pll_limit_tbl_ptr) {
3979 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
3980 cv >= 0x40) {
3981 NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
3982 return -EINVAL;
3983 }
3984 } else
3985 pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
3986
3987 crystal_strap_mask = 1 << 6;
3988 /* open coded dev->twoHeads test */
3989 if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
3990 crystal_strap_mask |= 1 << 22;
3991 crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
3992 crystal_strap_mask;
3993
3994 switch (pll_lim_ver) {
3995 /*
3996 * We use version 0 to indicate a pre limit table bios (single stage
3997 * pll) and load the hard coded limits instead.
3998 */
3999 case 0:
4000 break;
4001 case 0x10:
4002 case 0x11:
4003 /*
4004 * Strictly v0x11 has 3 entries, but the last two don't seem
4005 * to get used.
4006 */
4007 headerlen = 1;
4008 recordlen = 0x18;
4009 entries = 1;
4010 pllindex = 0;
4011 break;
4012 case 0x20:
4013 case 0x21:
4014 case 0x30:
4015 case 0x40:
4016 headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
4017 recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
4018 entries = bios->data[bios->pll_limit_tbl_ptr + 3];
4019 break;
4020 default:
4021 NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
4022 "supported\n", pll_lim_ver);
4023 return -ENOSYS;
4024 }
4025
4026 /* initialize all members to zero */
4027 memset(pll_lim, 0, sizeof(struct pll_lims));
4028
4029 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4030 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4031
4032 pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
4033 pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
4034 pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
4035 pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
4036 pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
4037 pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
4038 pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
4039
4040 /* these values taken from nv30/31/36 */
4041 pll_lim->vco1.min_n = 0x1;
4042 if (cv == 0x36)
4043 pll_lim->vco1.min_n = 0x5;
4044 pll_lim->vco1.max_n = 0xff;
4045 pll_lim->vco1.min_m = 0x1;
4046 pll_lim->vco1.max_m = 0xd;
4047 pll_lim->vco2.min_n = 0x4;
4048 /*
4049 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
4050 * table version (apart from nv35)), N2 is compared to
4051 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
4052 * save a comparison
4053 */
4054 pll_lim->vco2.max_n = 0x28;
4055 if (cv == 0x30 || cv == 0x35)
4056 /* only 5 bits available for N2 on nv30/35 */
4057 pll_lim->vco2.max_n = 0x1f;
4058 pll_lim->vco2.min_m = 0x1;
4059 pll_lim->vco2.max_m = 0x4;
4060 pll_lim->max_log2p = 0x7;
4061 pll_lim->max_usable_log2p = 0x6;
4062 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4063 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4064 uint32_t reg = 0; /* default match */
4065 uint8_t *pll_rec;
4066 int i;
4067
4068 /*
4069 * First entry is default match, if nothing better. warn if
4070 * reg field nonzero
4071 */
4072 if (ROM32(bios->data[plloffs]))
4073 NV_WARN(dev, "Default PLL limit entry has non-zero "
4074 "register field\n");
4075
4076 if (limit_match > MAX_PLL_TYPES)
4077 /* we've been passed a reg as the match */
4078 reg = limit_match;
4079 else /* limit match is a pll type */
4080 for (i = 1; i < entries && !reg; i++) {
4081 uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
4082
4083 if (limit_match == NVPLL &&
4084 (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
4085 reg = cmpreg;
4086 if (limit_match == MPLL &&
4087 (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
4088 reg = cmpreg;
4089 if (limit_match == VPLL1 &&
4090 (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
4091 reg = cmpreg;
4092 if (limit_match == VPLL2 &&
4093 (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
4094 reg = cmpreg;
4095 }
4096
4097 for (i = 1; i < entries; i++)
4098 if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
4099 pllindex = i;
4100 break;
4101 }
4102
4103 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4104
4105 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4106 pllindex ? reg : 0);
4107
4108 /*
4109 * Frequencies are stored in tables in MHz, kHz are more
4110 * useful, so we convert.
4111 */
4112
4113 /* What output frequencies can each VCO generate? */
4114 pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
4115 pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
4116 pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
4117 pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
4118
4119 /* What input frequencies they accept (past the m-divider)? */
4120 pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
4121 pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
4122 pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
4123 pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
4124
4125 /* What values are accepted as multiplier and divider? */
4126 pll_lim->vco1.min_n = pll_rec[20];
4127 pll_lim->vco1.max_n = pll_rec[21];
4128 pll_lim->vco1.min_m = pll_rec[22];
4129 pll_lim->vco1.max_m = pll_rec[23];
4130 pll_lim->vco2.min_n = pll_rec[24];
4131 pll_lim->vco2.max_n = pll_rec[25];
4132 pll_lim->vco2.min_m = pll_rec[26];
4133 pll_lim->vco2.max_m = pll_rec[27];
4134
4135 pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
4136 if (pll_lim->max_log2p > 0x7)
4137 /* pll decoding in nv_hw.c assumes never > 7 */
4138 NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
4139 pll_lim->max_log2p);
4140 if (cv < 0x60)
4141 pll_lim->max_usable_log2p = 0x6;
4142 pll_lim->log2p_bias = pll_rec[30];
4143
4144 if (recordlen > 0x22)
4145 pll_lim->refclk = ROM32(pll_rec[31]);
4146
4147 if (recordlen > 0x23 && pll_rec[35])
4148 NV_WARN(dev,
4149 "Bits set in PLL configuration byte (%x)\n",
4150 pll_rec[35]);
4151
4152 /* C51 special not seen elsewhere */
4153 if (cv == 0x51 && !pll_lim->refclk) {
4154 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4155
4156 if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
4157 ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
4158 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4159 pll_lim->refclk = 200000;
4160 else
4161 pll_lim->refclk = 25000;
4162 }
4163 }
4164 } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
4165 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4166 uint8_t *record = NULL;
4167 int i;
4168
4169 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4170 limit_match);
4171
4172 for (i = 0; i < entries; i++, entry += recordlen) {
4173 if (ROM32(entry[3]) == limit_match) {
4174 record = &bios->data[ROM16(entry[1])];
4175 break;
4176 }
4177 }
4178
4179 if (!record) {
4180 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4181 "limits table", limit_match);
4182 return -ENOENT;
4183 }
4184
4185 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4186 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4187 pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
4188 pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
4189 pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
4190 pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
4191 pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
4192 pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
4193 pll_lim->vco1.min_n = record[16];
4194 pll_lim->vco1.max_n = record[17];
4195 pll_lim->vco1.min_m = record[18];
4196 pll_lim->vco1.max_m = record[19];
4197 pll_lim->vco2.min_n = record[20];
4198 pll_lim->vco2.max_n = record[21];
4199 pll_lim->vco2.min_m = record[22];
4200 pll_lim->vco2.max_m = record[23];
4201 pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
4202 pll_lim->log2p_bias = record[27];
4203 pll_lim->refclk = ROM32(record[28]);
4204 } else if (pll_lim_ver) { /* ver 0x40 */
4205 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4206 uint8_t *record = NULL;
4207 int i;
4208
4209 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4210 limit_match);
4211
4212 for (i = 0; i < entries; i++, entry += recordlen) {
4213 if (ROM32(entry[3]) == limit_match) {
4214 record = &bios->data[ROM16(entry[1])];
4215 break;
4216 }
4217 }
4218
4219 if (!record) {
4220 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4221 "limits table", limit_match);
4222 return -ENOENT;
4223 }
4224
4225 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4226 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4227 pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
4228 pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
4229 pll_lim->vco1.min_m = record[8];
4230 pll_lim->vco1.max_m = record[9];
4231 pll_lim->vco1.min_n = record[10];
4232 pll_lim->vco1.max_n = record[11];
4233 pll_lim->min_p = record[12];
4234 pll_lim->max_p = record[13];
4235 /* where did this go to?? */
4236 if (limit_match == 0x00614100 || limit_match == 0x00614900)
4237 pll_lim->refclk = 27000;
4238 else
4239 pll_lim->refclk = 100000;
4240 }
4241
4242 /*
4243 * By now any valid limit table ought to have set a max frequency for
4244 * vco1, so if it's zero it's either a pre limit table bios, or one
4245 * with an empty limit table (seen on nv18)
4246 */
4247 if (!pll_lim->vco1.maxfreq) {
4248 pll_lim->vco1.minfreq = bios->fminvco;
4249 pll_lim->vco1.maxfreq = bios->fmaxvco;
4250 pll_lim->vco1.min_inputfreq = 0;
4251 pll_lim->vco1.max_inputfreq = INT_MAX;
4252 pll_lim->vco1.min_n = 0x1;
4253 pll_lim->vco1.max_n = 0xff;
4254 pll_lim->vco1.min_m = 0x1;
4255 if (crystal_straps == 0) {
4256 /* nv05 does this, nv11 doesn't, nv10 unknown */
4257 if (cv < 0x11)
4258 pll_lim->vco1.min_m = 0x7;
4259 pll_lim->vco1.max_m = 0xd;
4260 } else {
4261 if (cv < 0x11)
4262 pll_lim->vco1.min_m = 0x8;
4263 pll_lim->vco1.max_m = 0xe;
4264 }
4265 if (cv < 0x17 || cv == 0x1a || cv == 0x20)
4266 pll_lim->max_log2p = 4;
4267 else
4268 pll_lim->max_log2p = 5;
4269 pll_lim->max_usable_log2p = pll_lim->max_log2p;
4270 }
4271
4272 if (!pll_lim->refclk)
4273 switch (crystal_straps) {
4274 case 0:
4275 pll_lim->refclk = 13500;
4276 break;
4277 case (1 << 6):
4278 pll_lim->refclk = 14318;
4279 break;
4280 case (1 << 22):
4281 pll_lim->refclk = 27000;
4282 break;
4283 case (1 << 22 | 1 << 6):
4284 pll_lim->refclk = 25000;
4285 break;
4286 }
4287
4288#if 0 /* for easy debugging */
4289 ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4290 ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4291 ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4292 ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4293
4294 ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4295 ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4296 ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4297 ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4298
4299 ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4300 ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4301 ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4302 ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4303 ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4304 ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4305 ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4306 ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4307
4308 ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
4309 ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4310
4311 ErrorF("pll.refclk: %d\n", pll_lim->refclk);
4312#endif
4313
4314 return 0;
4315}
4316
4317static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
4318{
4319 /*
4320 * offset + 0 (8 bits): Micro version
4321 * offset + 1 (8 bits): Minor version
4322 * offset + 2 (8 bits): Chip version
4323 * offset + 3 (8 bits): Major version
4324 */
4325
4326 bios->major_version = bios->data[offset + 3];
4327 bios->chip_version = bios->data[offset + 2];
4328 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4329 bios->data[offset + 3], bios->data[offset + 2],
4330 bios->data[offset + 1], bios->data[offset]);
4331}
4332
4333static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
4334{
4335 /*
4336 * Parses the init table segment for pointers used in script execution.
4337 *
4338 * offset + 0 (16 bits): init script tables pointer
4339 * offset + 2 (16 bits): macro index table pointer
4340 * offset + 4 (16 bits): macro table pointer
4341 * offset + 6 (16 bits): condition table pointer
4342 * offset + 8 (16 bits): io condition table pointer
4343 * offset + 10 (16 bits): io flag condition table pointer
4344 * offset + 12 (16 bits): init function table pointer
4345 */
4346
4347 bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
4348 bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
4349 bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
4350 bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
4351 bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
4352 bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
4353 bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
4354}
4355
4356static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4357{
4358 /*
4359 * Parses the load detect values for g80 cards.
4360 *
4361 * offset + 0 (16 bits): loadval table pointer
4362 */
4363
4364 uint16_t load_table_ptr;
4365 uint8_t version, headerlen, entrylen, num_entries;
4366
4367 if (bitentry->length != 3) {
4368 NV_ERROR(dev, "Do not understand BIT A table\n");
4369 return -EINVAL;
4370 }
4371
4372 load_table_ptr = ROM16(bios->data[bitentry->offset]);
4373
4374 if (load_table_ptr == 0x0) {
4375 NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
4376 return -EINVAL;
4377 }
4378
4379 version = bios->data[load_table_ptr];
4380
4381 if (version != 0x10) {
4382 NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
4383 version >> 4, version & 0xF);
4384 return -ENOSYS;
4385 }
4386
4387 headerlen = bios->data[load_table_ptr + 1];
4388 entrylen = bios->data[load_table_ptr + 2];
4389 num_entries = bios->data[load_table_ptr + 3];
4390
4391 if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
4392 NV_ERROR(dev, "Do not understand BIT loadval table\n");
4393 return -EINVAL;
4394 }
4395
4396 /* First entry is normal dac, 2nd tv-out perhaps? */
4397 bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4398
4399 return 0;
4400}
4401
4402static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4403{
4404 /*
4405 * offset + 8 (16 bits): PLL limits table pointer
4406 *
4407 * There's more in here, but that's unknown.
4408 */
4409
4410 if (bitentry->length < 10) {
4411 NV_ERROR(dev, "Do not understand BIT C table\n");
4412 return -EINVAL;
4413 }
4414
4415 bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
4416
4417 return 0;
4418}
4419
4420static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4421{
4422 /*
4423 * Parses the flat panel table segment that the bit entry points to.
4424 * Starting at bitentry->offset:
4425 *
4426 * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
4427 * records beginning with a freq.
4428 * offset + 2 (16 bits): mode table pointer
4429 */
4430
4431 if (bitentry->length != 4) {
4432 NV_ERROR(dev, "Do not understand BIT display table\n");
4433 return -EINVAL;
4434 }
4435
4436 bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
4437
4438 return 0;
4439}
4440
4441static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4442{
4443 /*
4444 * Parses the init table segment that the bit entry points to.
4445 *
4446 * See parse_script_table_pointers for layout
4447 */
4448
4449 if (bitentry->length < 14) {
4450 NV_ERROR(dev, "Do not understand init table\n");
4451 return -EINVAL;
4452 }
4453
4454 parse_script_table_pointers(bios, bitentry->offset);
4455
4456 if (bitentry->length >= 16)
4457 bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
4458 if (bitentry->length >= 18)
4459 bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
4460
4461 return 0;
4462}
4463
4464static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4465{
4466 /*
4467 * BIT 'i' (info?) table
4468 *
4469 * offset + 0 (32 bits): BIOS version dword (as in B table)
4470 * offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
4471 * offset + 13 (16 bits): pointer to table containing DAC load
4472 * detection comparison values
4473 *
4474 * There's other things in the table, purpose unknown
4475 */
4476
4477 uint16_t daccmpoffset;
4478 uint8_t dacver, dacheaderlen;
4479
4480 if (bitentry->length < 6) {
4481 NV_ERROR(dev, "BIT i table too short for needed information\n");
4482 return -EINVAL;
4483 }
4484
4485 parse_bios_version(dev, bios, bitentry->offset);
4486
4487 /*
4488 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
4489 * Quadro identity crisis), other bits possibly as for BMP feature byte
4490 */
4491 bios->feature_byte = bios->data[bitentry->offset + 5];
4492 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
4493
4494 if (bitentry->length < 15) {
4495 NV_WARN(dev, "BIT i table not long enough for DAC load "
4496 "detection comparison table\n");
4497 return -EINVAL;
4498 }
4499
4500 daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
4501
4502 /* doesn't exist on g80 */
4503 if (!daccmpoffset)
4504 return 0;
4505
4506 /*
4507 * The first value in the table, following the header, is the
4508 * comparison value, the second entry is a comparison value for
4509 * TV load detection.
4510 */
4511
4512 dacver = bios->data[daccmpoffset];
4513 dacheaderlen = bios->data[daccmpoffset + 1];
4514
4515 if (dacver != 0x00 && dacver != 0x10) {
4516 NV_WARN(dev, "DAC load detection comparison table version "
4517 "%d.%d not known\n", dacver >> 4, dacver & 0xf);
4518 return -ENOSYS;
4519 }
4520
4521 bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4522 bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4523
4524 return 0;
4525}
4526
4527static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4528{
4529 /*
4530 * Parses the LVDS table segment that the bit entry points to.
4531 * Starting at bitentry->offset:
4532 *
4533 * offset + 0 (16 bits): LVDS strap xlate table pointer
4534 */
4535
4536 if (bitentry->length != 2) {
4537 NV_ERROR(dev, "Do not understand BIT LVDS table\n");
4538 return -EINVAL;
4539 }
4540
4541 /*
4542 * No idea if it's still called the LVDS manufacturer table, but
4543 * the concept's close enough.
4544 */
4545 bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
4546
4547 return 0;
4548}
4549
4550static int
4551parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4552 struct bit_entry *bitentry)
4553{
4554 /*
4555 * offset + 2 (8 bits): number of options in an
4556 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
4557 * offset + 3 (16 bits): pointer to strap xlate table for RAM
4558 * restrict option selection
4559 *
4560 * There's a bunch of bits in this table other than the RAM restrict
4561 * stuff that we don't use - their use currently unknown
4562 */
4563
4564 /*
4565 * Older bios versions don't have a sufficiently long table for
4566 * what we want
4567 */
4568 if (bitentry->length < 0x5)
4569 return 0;
4570
4571 if (bitentry->id[1] < 2) {
4572 bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
4573 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
4574 } else {
4575 bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
4576 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
4577 }
4578
4579 return 0;
4580}
4581
4582static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4583{
4584 /*
4585 * Parses the pointer to the TMDS table
4586 *
4587 * Starting at bitentry->offset:
4588 *
4589 * offset + 0 (16 bits): TMDS table pointer
4590 *
4591 * The TMDS table is typically found just before the DCB table, with a
4592 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
4593 * length?)
4594 *
4595 * At offset +7 is a pointer to a script, which I don't know how to
4596 * run yet.
4597 * At offset +9 is a pointer to another script, likewise
4598 * Offset +11 has a pointer to a table where the first word is a pxclk
4599 * frequency and the second word a pointer to a script, which should be
4600 * run if the comparison pxclk frequency is less than the pxclk desired.
4601 * This repeats for decreasing comparison frequencies
4602 * Offset +13 has a pointer to a similar table
4603 * The selection of table (and possibly +7/+9 script) is dictated by
4604 * "or" from the DCB.
4605 */
4606
4607 uint16_t tmdstableptr, script1, script2;
4608
4609 if (bitentry->length != 2) {
4610 NV_ERROR(dev, "Do not understand BIT TMDS table\n");
4611 return -EINVAL;
4612 }
4613
4614 tmdstableptr = ROM16(bios->data[bitentry->offset]);
4615
4616 if (tmdstableptr == 0x0) {
4617 NV_ERROR(dev, "Pointer to TMDS table invalid\n");
4618 return -EINVAL;
4619 }
4620
4621 /* nv50+ has v2.0, but we don't parse it atm */
4622 if (bios->data[tmdstableptr] != 0x11) {
4623 NV_WARN(dev,
4624 "TMDS table revision %d.%d not currently supported\n",
4625 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
4626 return -ENOSYS;
4627 }
4628
4629 /*
4630 * These two scripts are odd: they don't seem to get run even when
4631 * they are not stubbed.
4632 */
4633 script1 = ROM16(bios->data[tmdstableptr + 7]);
4634 script2 = ROM16(bios->data[tmdstableptr + 9]);
4635 if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
4636 NV_WARN(dev, "TMDS table script pointers not stubbed\n");
4637
4638 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
4639 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
4640
4641 return 0;
4642}
4643
4644static int
4645parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4646 struct bit_entry *bitentry)
4647{
4648 /*
4649 * Parses the pointer to the G80 output script tables
4650 *
4651 * Starting at bitentry->offset:
4652 *
4653 * offset + 0 (16 bits): output script table pointer
4654 */
4655
4656 uint16_t outputscripttableptr;
4657
4658 if (bitentry->length != 3) {
4659 NV_ERROR(dev, "Do not understand BIT U table\n");
4660 return -EINVAL;
4661 }
4662
4663 outputscripttableptr = ROM16(bios->data[bitentry->offset]);
4664 bios->display.script_table_ptr = outputscripttableptr;
4665 return 0;
4666}
4667
4668static int
4669parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4670 struct bit_entry *bitentry)
4671{
4672 bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
4673 return 0;
4674}
4675
4676struct bit_table {
4677 const char id;
4678 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
4679};
4680
4681#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
4682
4683static int
4684parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
4685 struct bit_table *table)
4686{
4687 struct drm_device *dev = bios->dev;
4688 uint8_t maxentries = bios->data[bitoffset + 4];
4689 int i, offset;
4690 struct bit_entry bitentry;
4691
4692 for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
4693 bitentry.id[0] = bios->data[offset];
4694
4695 if (bitentry.id[0] != table->id)
4696 continue;
4697
4698 bitentry.id[1] = bios->data[offset + 1];
4699 bitentry.length = ROM16(bios->data[offset + 2]);
4700 bitentry.offset = ROM16(bios->data[offset + 4]);
4701
4702 return table->parse_fn(dev, bios, &bitentry);
4703 }
4704
4705 NV_INFO(dev, "BIT table '%c' not found\n", table->id);
4706 return -ENOSYS;
4707}
4708
4709static int
4710parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
4711{
4712 int ret;
4713
4714 /*
4715 * The only restriction on parsing order currently is having 'i' first
4716 * for use of bios->*_version or bios->feature_byte while parsing;
4717 * functions shouldn't be actually *doing* anything apart from pulling
4718 * data from the image into the bios struct, thus no interdependencies
4719 */
4720 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
4721 if (ret) /* info? */
4722 return ret;
4723 if (bios->major_version >= 0x60) /* g80+ */
4724 parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
4725 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
4726 if (ret)
4727 return ret;
4728 parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
4729 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
4730 if (ret)
4731 return ret;
4732 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
4733 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
4734 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
4735 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
4736 parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
4737
4738 return 0;
4739}
4740
4741static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
4742{
4743 /*
4744 * Parses the BMP structure for useful things, but does not act on them
4745 *
4746 * offset + 5: BMP major version
4747 * offset + 6: BMP minor version
4748 * offset + 9: BMP feature byte
4749 * offset + 10: BCD encoded BIOS version
4750 *
4751 * offset + 18: init script table pointer (for bios versions < 5.10h)
4752 * offset + 20: extra init script table pointer (for bios
4753 * versions < 5.10h)
4754 *
4755 * offset + 24: memory init table pointer (used on early bios versions)
4756 * offset + 26: SDR memory sequencing setup data table
4757 * offset + 28: DDR memory sequencing setup data table
4758 *
4759 * offset + 54: index of I2C CRTC pair to use for CRT output
4760 * offset + 55: index of I2C CRTC pair to use for TV output
4761 * offset + 56: index of I2C CRTC pair to use for flat panel output
4762 * offset + 58: write CRTC index for I2C pair 0
4763 * offset + 59: read CRTC index for I2C pair 0
4764 * offset + 60: write CRTC index for I2C pair 1
4765 * offset + 61: read CRTC index for I2C pair 1
4766 *
4767 * offset + 67: maximum internal PLL frequency (single stage PLL)
4768 * offset + 71: minimum internal PLL frequency (single stage PLL)
4769 *
4770 * offset + 75: script table pointers, as described in
4771 * parse_script_table_pointers
4772 *
4773 * offset + 89: TMDS single link output A table pointer
4774 * offset + 91: TMDS single link output B table pointer
4775 * offset + 95: LVDS single link output A table pointer
4776 * offset + 105: flat panel timings table pointer
4777 * offset + 107: flat panel strapping translation table pointer
4778 * offset + 117: LVDS manufacturer panel config table pointer
4779 * offset + 119: LVDS manufacturer strapping translation table pointer
4780 *
4781 * offset + 142: PLL limits table pointer
4782 *
4783 * offset + 156: minimum pixel clock for LVDS dual link
4784 */
4785
4786 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
4787 uint16_t bmplength;
4788 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4789
4790 /* load needed defaults in case we can't parse this info */
4791 bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4792 bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4793 bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4794 bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4795 bios->digital_min_front_porch = 0x4b;
4796 bios->fmaxvco = 256000;
4797 bios->fminvco = 128000;
4798 bios->fp.duallink_transition_clk = 90000;
4799
4800 bmp_version_major = bmp[5];
4801 bmp_version_minor = bmp[6];
4802
4803 NV_TRACE(dev, "BMP version %d.%d\n",
4804 bmp_version_major, bmp_version_minor);
4805
4806 /*
4807 * Make sure that 0x36 is blank and can't be mistaken for a DCB
4808 * pointer on early versions
4809 */
4810 if (bmp_version_major < 5)
4811 *(uint16_t *)&bios->data[0x36] = 0;
4812
4813 /*
4814 * Seems that the minor version was 1 for all major versions prior
4815 * to 5. Version 6 could theoretically exist, but I suspect BIT
4816 * happened instead.
4817 */
4818 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
4819 NV_ERROR(dev, "You have an unsupported BMP version. "
4820 "Please send in your bios\n");
4821 return -ENOSYS;
4822 }
4823
4824 if (bmp_version_major == 0)
4825 /* nothing that's currently useful in this version */
4826 return 0;
4827 else if (bmp_version_major == 1)
4828 bmplength = 44; /* exact for 1.01 */
4829 else if (bmp_version_major == 2)
4830 bmplength = 48; /* exact for 2.01 */
4831 else if (bmp_version_major == 3)
4832 bmplength = 54;
4833 /* guessed - mem init tables added in this version */
4834 else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
4835 /* don't know if 5.0 exists... */
4836 bmplength = 62;
4837 /* guessed - BMP I2C indices added in version 4*/
4838 else if (bmp_version_minor < 0x6)
4839 bmplength = 67; /* exact for 5.01 */
4840 else if (bmp_version_minor < 0x10)
4841 bmplength = 75; /* exact for 5.06 */
4842 else if (bmp_version_minor == 0x10)
4843 bmplength = 89; /* exact for 5.10h */
4844 else if (bmp_version_minor < 0x14)
4845 bmplength = 118; /* exact for 5.11h */
4846 else if (bmp_version_minor < 0x24)
4847 /*
4848 * Not sure of version where pll limits came in;
4849 * certainly exist by 0x24 though.
4850 */
4851 /* length not exact: this is long enough to get lvds members */
4852 bmplength = 123;
4853 else if (bmp_version_minor < 0x27)
4854 /*
4855 * Length not exact: this is long enough to get pll limit
4856 * member
4857 */
4858 bmplength = 144;
4859 else
4860 /*
4861 * Length not exact: this is long enough to get dual link
4862 * transition clock.
4863 */
4864 bmplength = 158;
4865
4866 /* checksum */
4867 if (nv_cksum(bmp, 8)) {
4868 NV_ERROR(dev, "Bad BMP checksum\n");
4869 return -EINVAL;
4870 }
4871
4872 /*
4873 * Bit 4 seems to indicate either a mobile bios or a quadro card --
4874 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
4875 * (not nv10gl), bit 5 that the flat panel tables are present, and
4876 * bit 6 a tv bios.
4877 */
4878 bios->feature_byte = bmp[9];
4879
4880 parse_bios_version(dev, bios, offset + 10);
4881
4882 if (bmp_version_major < 5 || bmp_version_minor < 0x10)
4883 bios->old_style_init = true;
4884 legacy_scripts_offset = 18;
4885 if (bmp_version_major < 2)
4886 legacy_scripts_offset -= 4;
4887 bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
4888 bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
4889
4890 if (bmp_version_major > 2) { /* appears in BMP 3 */
4891 bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
4892 bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
4893 bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
4894 }
4895
4896 legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
4897 if (bmplength > 61)
4898 legacy_i2c_offset = offset + 54;
4899 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4900 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4901 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4902 bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4903 bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4904 bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4905 bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4906
4907 if (bmplength > 74) {
4908 bios->fmaxvco = ROM32(bmp[67]);
4909 bios->fminvco = ROM32(bmp[71]);
4910 }
4911 if (bmplength > 88)
4912 parse_script_table_pointers(bios, offset + 75);
4913 if (bmplength > 94) {
4914 bios->tmds.output0_script_ptr = ROM16(bmp[89]);
4915 bios->tmds.output1_script_ptr = ROM16(bmp[91]);
4916 /*
4917 * Never observed in use with lvds scripts, but is reused for
4918 * 18/24 bit panel interface default for EDID equipped panels
4919 * (if_is_24bit not set directly to avoid any oscillation).
4920 */
4921 bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
4922 }
4923 if (bmplength > 108) {
4924 bios->fp.fptablepointer = ROM16(bmp[105]);
4925 bios->fp.fpxlatetableptr = ROM16(bmp[107]);
4926 bios->fp.xlatwidth = 1;
4927 }
4928 if (bmplength > 120) {
4929 bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
4930 bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
4931 }
4932 if (bmplength > 143)
4933 bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
4934
4935 if (bmplength > 157)
4936 bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
4937
4938 return 0;
4939}
4940
4941static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
4942{
4943 int i, j;
4944
4945 for (i = 0; i <= (n - len); i++) {
4946 for (j = 0; j < len; j++)
4947 if (data[i + j] != str[j])
4948 break;
4949 if (j == len)
4950 return i;
4951 }
4952
4953 return 0;
4954}
4955
4956static int
4957read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
4958{
4959 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
4960 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
4961 int recordoffset = 0, rdofs = 1, wrofs = 0;
4962 uint8_t port_type = 0;
4963
4964 if (!i2ctable)
4965 return -EINVAL;
4966
4967 if (dcb_version >= 0x30) {
4968 if (i2ctable[0] != dcb_version) /* necessary? */
4969 NV_WARN(dev,
4970 "DCB I2C table version mismatch (%02X vs %02X)\n",
4971 i2ctable[0], dcb_version);
4972 dcb_i2c_ver = i2ctable[0];
4973 headerlen = i2ctable[1];
4974 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
4975 i2c_entries = i2ctable[2];
4976 else
4977 NV_WARN(dev,
4978 "DCB I2C table has more entries than indexable "
4979 "(%d entries, max %d)\n", i2ctable[2],
4980 DCB_MAX_NUM_I2C_ENTRIES);
4981 entry_len = i2ctable[3];
4982 /* [4] is i2c_default_indices, read in parse_dcb_table() */
4983 }
4984 /*
4985 * It's your own fault if you call this function on a DCB 1.1 BIOS --
4986 * the test below is for DCB 1.2
4987 */
4988 if (dcb_version < 0x14) {
4989 recordoffset = 2;
4990 rdofs = 0;
4991 wrofs = 1;
4992 }
4993
4994 if (index == 0xf)
4995 return 0;
4996 if (index >= i2c_entries) {
4997 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
4998 index, i2ctable[2]);
4999 return -ENOENT;
5000 }
5001 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
5002 NV_ERROR(dev, "DCB I2C entry invalid\n");
5003 return -EINVAL;
5004 }
5005
5006 if (dcb_i2c_ver >= 0x30) {
5007 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
5008
5009 /*
5010 * Fixup for chips using same address offset for read and
5011 * write.
5012 */
5013 if (port_type == 4) /* seen on C51 */
5014 rdofs = wrofs = 1;
5015 if (port_type >= 5) /* G80+ */
5016 rdofs = wrofs = 0;
5017 }
5018
5019 if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
5020 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
5021
5022 i2c->port_type = port_type;
5023 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
5024 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
5025
5026 return 0;
5027}
5028
5029static struct dcb_gpio_entry *
5030new_gpio_entry(struct nvbios *bios)
5031{
5032 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5033
5034 return &gpio->entry[gpio->entries++];
5035}
5036
5037struct dcb_gpio_entry *
5038nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5039{
5040 struct drm_nouveau_private *dev_priv = dev->dev_private;
5041 struct nvbios *bios = &dev_priv->vbios;
5042 int i;
5043
5044 for (i = 0; i < bios->dcb.gpio.entries; i++) {
5045 if (bios->dcb.gpio.entry[i].tag != tag)
5046 continue;
5047
5048 return &bios->dcb.gpio.entry[i];
5049 }
5050
5051 return NULL;
5052}
5053
5054static void
5055parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5056{
5057 struct dcb_gpio_entry *gpio;
5058 uint16_t ent = ROM16(bios->data[offset]);
5059 uint8_t line = ent & 0x1f,
5060 tag = ent >> 5 & 0x3f,
5061 flags = ent >> 11 & 0x1f;
5062
5063 if (tag == 0x3f)
5064 return;
5065
5066 gpio = new_gpio_entry(bios);
5067
5068 gpio->tag = tag;
5069 gpio->line = line;
5070 gpio->invert = flags != 4;
5071 gpio->entry = ent;
5072}
5073
5074static void
5075parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5076{
5077 uint32_t entry = ROM32(bios->data[offset]);
5078 struct dcb_gpio_entry *gpio;
5079
5080 if ((entry & 0x0000ff00) == 0x0000ff00)
5081 return;
5082
5083 gpio = new_gpio_entry(bios);
5084 gpio->tag = (entry & 0x0000ff00) >> 8;
5085 gpio->line = (entry & 0x0000001f) >> 0;
5086 gpio->state_default = (entry & 0x01000000) >> 24;
5087 gpio->state[0] = (entry & 0x18000000) >> 27;
5088 gpio->state[1] = (entry & 0x60000000) >> 29;
5089 gpio->entry = entry;
5090}
5091
5092static void
5093parse_dcb_gpio_table(struct nvbios *bios)
5094{
5095 struct drm_device *dev = bios->dev;
5096 uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
5097 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5098 int header_len = gpio_table[1],
5099 entries = gpio_table[2],
5100 entry_len = gpio_table[3];
5101 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5102 int i;
5103
5104 if (bios->dcb.version >= 0x40) {
5105 if (gpio_table_ptr && entry_len != 4) {
5106 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5107 return;
5108 }
5109
5110 parse_entry = parse_dcb40_gpio_entry;
5111
5112 } else if (bios->dcb.version >= 0x30) {
5113 if (gpio_table_ptr && entry_len != 2) {
5114 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5115 return;
5116 }
5117
5118 parse_entry = parse_dcb30_gpio_entry;
5119
5120 } else if (bios->dcb.version >= 0x22) {
5121 /*
5122 * DCBs older than v3.0 don't really have a GPIO
5123 * table, instead they keep some GPIO info at fixed
5124 * locations.
5125 */
5126 uint16_t dcbptr = ROM16(bios->data[0x36]);
5127 uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
5128
5129 if (tvdac_gpio[0] & 1) {
5130 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5131
5132 gpio->tag = DCB_GPIO_TVDAC0;
5133 gpio->line = tvdac_gpio[1] >> 4;
5134 gpio->invert = tvdac_gpio[0] & 2;
5135 }
5136 }
5137
5138 if (!gpio_table_ptr)
5139 return;
5140
5141 if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
5142 NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
5143 entries = DCB_MAX_NUM_GPIO_ENTRIES;
5144 }
5145
5146 for (i = 0; i < entries; i++)
5147 parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
5148}
5149
5150struct dcb_connector_table_entry *
5151nouveau_bios_connector_entry(struct drm_device *dev, int index)
5152{
5153 struct drm_nouveau_private *dev_priv = dev->dev_private;
5154 struct nvbios *bios = &dev_priv->vbios;
5155 struct dcb_connector_table_entry *cte;
5156
5157 if (index >= bios->dcb.connector.entries)
5158 return NULL;
5159
5160 cte = &bios->dcb.connector.entry[index];
5161 if (cte->type == 0xff)
5162 return NULL;
5163
5164 return cte;
5165}
5166
5167static enum dcb_connector_type
5168divine_connector_type(struct nvbios *bios, int index)
5169{
5170 struct dcb_table *dcb = &bios->dcb;
5171 unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
5172 int i;
5173
5174 for (i = 0; i < dcb->entries; i++) {
5175 if (dcb->entry[i].connector == index)
5176 encoders |= (1 << dcb->entry[i].type);
5177 }
5178
5179 if (encoders & (1 << OUTPUT_DP)) {
5180 if (encoders & (1 << OUTPUT_TMDS))
5181 type = DCB_CONNECTOR_DP;
5182 else
5183 type = DCB_CONNECTOR_eDP;
5184 } else
5185 if (encoders & (1 << OUTPUT_TMDS)) {
5186 if (encoders & (1 << OUTPUT_ANALOG))
5187 type = DCB_CONNECTOR_DVI_I;
5188 else
5189 type = DCB_CONNECTOR_DVI_D;
5190 } else
5191 if (encoders & (1 << OUTPUT_ANALOG)) {
5192 type = DCB_CONNECTOR_VGA;
5193 } else
5194 if (encoders & (1 << OUTPUT_LVDS)) {
5195 type = DCB_CONNECTOR_LVDS;
5196 } else
5197 if (encoders & (1 << OUTPUT_TV)) {
5198 type = DCB_CONNECTOR_TV_0;
5199 }
5200
5201 return type;
5202}
5203
5204static void
5205apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5206{
5207 struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
5208 struct drm_device *dev = bios->dev;
5209
5210 /* Gigabyte NX85T */
5211 if ((dev->pdev->device == 0x0421) &&
5212 (dev->pdev->subsystem_vendor == 0x1458) &&
5213 (dev->pdev->subsystem_device == 0x344c)) {
5214 if (cte->type == DCB_CONNECTOR_HDMI_1)
5215 cte->type = DCB_CONNECTOR_DVI_I;
5216 }
5217}
5218
5219static void
5220parse_dcb_connector_table(struct nvbios *bios)
5221{
5222 struct drm_device *dev = bios->dev;
5223 struct dcb_connector_table *ct = &bios->dcb.connector;
5224 struct dcb_connector_table_entry *cte;
5225 uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
5226 uint8_t *entry;
5227 int i;
5228
5229 if (!bios->dcb.connector_table_ptr) {
5230 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5231 return;
5232 }
5233
5234 NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
5235 conntab[0], conntab[1], conntab[2], conntab[3]);
5236 if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
5237 (conntab[3] != 2 && conntab[3] != 4)) {
5238 NV_ERROR(dev, " Unknown! Please report.\n");
5239 return;
5240 }
5241
5242 ct->entries = conntab[2];
5243
5244 entry = conntab + conntab[1];
5245 cte = &ct->entry[0];
5246 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
5247 cte->index = i;
5248 if (conntab[3] == 2)
5249 cte->entry = ROM16(entry[0]);
5250 else
5251 cte->entry = ROM32(entry[0]);
5252
5253 cte->type = (cte->entry & 0x000000ff) >> 0;
5254 cte->index2 = (cte->entry & 0x00000f00) >> 8;
5255 switch (cte->entry & 0x00033000) {
5256 case 0x00001000:
5257 cte->gpio_tag = 0x07;
5258 break;
5259 case 0x00002000:
5260 cte->gpio_tag = 0x08;
5261 break;
5262 case 0x00010000:
5263 cte->gpio_tag = 0x51;
5264 break;
5265 case 0x00020000:
5266 cte->gpio_tag = 0x52;
5267 break;
5268 default:
5269 cte->gpio_tag = 0xff;
5270 break;
5271 }
5272
5273 if (cte->type == 0xff)
5274 continue;
5275
5276 apply_dcb_connector_quirks(bios, i);
5277
5278 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5279 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5280
5281 /* check for known types, fallback to guessing the type
5282 * from attached encoders if we hit an unknown.
5283 */
5284 switch (cte->type) {
5285 case DCB_CONNECTOR_VGA:
5286 case DCB_CONNECTOR_TV_0:
5287 case DCB_CONNECTOR_TV_1:
5288 case DCB_CONNECTOR_TV_3:
5289 case DCB_CONNECTOR_DVI_I:
5290 case DCB_CONNECTOR_DVI_D:
5291 case DCB_CONNECTOR_LVDS:
5292 case DCB_CONNECTOR_DP:
5293 case DCB_CONNECTOR_eDP:
5294 case DCB_CONNECTOR_HDMI_0:
5295 case DCB_CONNECTOR_HDMI_1:
5296 break;
5297 default:
5298 cte->type = divine_connector_type(bios, cte->index);
5299 NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
5300 break;
5301 }
5302
5303 if (nouveau_override_conntype) {
5304 int type = divine_connector_type(bios, cte->index);
5305 if (type != cte->type)
5306 NV_WARN(dev, " -> type 0x%02x\n", cte->type);
5307 }
5308
5309 }
5310}
5311
5312static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5313{
5314 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5315
5316 memset(entry, 0, sizeof(struct dcb_entry));
5317 entry->index = dcb->entries++;
5318
5319 return entry;
5320}
5321
5322static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
5323{
5324 struct dcb_entry *entry = new_dcb_entry(dcb);
5325
5326 entry->type = 0;
5327 entry->i2c_index = i2c;
5328 entry->heads = heads;
5329 entry->location = DCB_LOC_ON_CHIP;
5330 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5331}
5332
5333static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
5334{
5335 struct dcb_entry *entry = new_dcb_entry(dcb);
5336
5337 entry->type = 2;
5338 entry->i2c_index = LEGACY_I2C_PANEL;
5339 entry->heads = twoHeads ? 3 : 1;
5340 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5341 entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
5342 entry->duallink_possible = false; /* SiI164 and co. are single link */
5343
5344#if 0
5345 /*
5346 * For dvi-a either crtc probably works, but my card appears to only
5347 * support dvi-d. "nvidia" still attempts to program it for dvi-a,
5348 * doing the full fp output setup (program 0x6808.. fp dimension regs,
5349 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
5350 * the monitor picks up the mode res ok and lights up, but no pixel
5351 * data appears, so the board manufacturer probably connected up the
5352 * sync lines, but missed the video traces / components
5353 *
5354 * with this introduction, dvi-a left as an exercise for the reader.
5355 */
5356 fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
5357#endif
5358}
5359
5360static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
5361{
5362 struct dcb_entry *entry = new_dcb_entry(dcb);
5363
5364 entry->type = 1;
5365 entry->i2c_index = LEGACY_I2C_TV;
5366 entry->heads = twoHeads ? 3 : 1;
5367 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5368}
5369
5370static bool
5371parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5372 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5373{
5374 entry->type = conn & 0xf;
5375 entry->i2c_index = (conn >> 4) & 0xf;
5376 entry->heads = (conn >> 8) & 0xf;
5377 if (dcb->version >= 0x40)
5378 entry->connector = (conn >> 12) & 0xf;
5379 entry->bus = (conn >> 16) & 0xf;
5380 entry->location = (conn >> 20) & 0x3;
5381 entry->or = (conn >> 24) & 0xf;
5382 /*
5383 * Normal entries consist of a single bit, but dual link has the
5384 * next most significant bit set too
5385 */
5386 entry->duallink_possible =
5387 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5388
5389 switch (entry->type) {
5390 case OUTPUT_ANALOG:
5391 /*
5392 * Although the rest of a CRT conf dword is usually
5393 * zeros, mac biosen have stuff there so we must mask
5394 */
5395 entry->crtconf.maxfreq = (dcb->version < 0x30) ?
5396 (conf & 0xffff) * 10 :
5397 (conf & 0xff) * 10000;
5398 break;
5399 case OUTPUT_LVDS:
5400 {
5401 uint32_t mask;
5402 if (conf & 0x1)
5403 entry->lvdsconf.use_straps_for_mode = true;
5404 if (dcb->version < 0x22) {
5405 mask = ~0xd;
5406 /*
5407 * The laptop in bug 14567 lies and claims to not use
5408 * straps when it does, so assume all DCB 2.0 laptops
5409 * use straps, until a broken EDID using one is produced
5410 */
5411 entry->lvdsconf.use_straps_for_mode = true;
5412 /*
5413 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
5414 * mean the same thing (probably wrong, but might work)
5415 */
5416 if (conf & 0x4 || conf & 0x8)
5417 entry->lvdsconf.use_power_scripts = true;
5418 } else {
5419 mask = ~0x5;
5420 if (conf & 0x4)
5421 entry->lvdsconf.use_power_scripts = true;
5422 }
5423 if (conf & mask) {
5424 /*
5425 * Until we even try to use these on G8x, it's
5426 * useless reporting unknown bits. They all are.
5427 */
5428 if (dcb->version >= 0x40)
5429 break;
5430
5431 NV_ERROR(dev, "Unknown LVDS configuration bits, "
5432 "please report\n");
5433 }
5434 break;
5435 }
5436 case OUTPUT_TV:
5437 {
5438 if (dcb->version >= 0x30)
5439 entry->tvconf.has_component_output = conf & (0x8 << 4);
5440 else
5441 entry->tvconf.has_component_output = false;
5442
5443 break;
5444 }
5445 case OUTPUT_DP:
5446 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
5447 entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
5448 switch ((conf & 0x0f000000) >> 24) {
5449 case 0xf:
5450 entry->dpconf.link_nr = 4;
5451 break;
5452 case 0x3:
5453 entry->dpconf.link_nr = 2;
5454 break;
5455 default:
5456 entry->dpconf.link_nr = 1;
5457 break;
5458 }
5459 break;
5460 case OUTPUT_TMDS:
5461 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
5462 break;
5463 case 0xe:
5464 /* weird g80 mobile type that "nv" treats as a terminator */
5465 dcb->entries--;
5466 return false;
5467 default:
5468 break;
5469 }
5470
5471 /* unsure what DCB version introduces this, 3.0? */
5472 if (conf & 0x100000)
5473 entry->i2c_upper_default = true;
5474
5475 return true;
5476}
5477
5478static bool
5479parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5480 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5481{
5482 switch (conn & 0x0000000f) {
5483 case 0:
5484 entry->type = OUTPUT_ANALOG;
5485 break;
5486 case 1:
5487 entry->type = OUTPUT_TV;
5488 break;
5489 case 2:
5490 case 3:
5491 entry->type = OUTPUT_LVDS;
5492 break;
5493 case 4:
5494 switch ((conn & 0x000000f0) >> 4) {
5495 case 0:
5496 entry->type = OUTPUT_TMDS;
5497 break;
5498 case 1:
5499 entry->type = OUTPUT_LVDS;
5500 break;
5501 default:
5502 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5503 (conn & 0x000000f0) >> 4);
5504 return false;
5505 }
5506 break;
5507 default:
5508 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5509 return false;
5510 }
5511
5512 entry->i2c_index = (conn & 0x0003c000) >> 14;
5513 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5514 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5515 entry->location = (conn & 0x01e00000) >> 21;
5516 entry->bus = (conn & 0x0e000000) >> 25;
5517 entry->duallink_possible = false;
5518
5519 switch (entry->type) {
5520 case OUTPUT_ANALOG:
5521 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5522 break;
5523 case OUTPUT_TV:
5524 entry->tvconf.has_component_output = false;
5525 break;
5526 case OUTPUT_TMDS:
5527 /*
5528 * Invent a DVI-A output, by copying the fields of the DVI-D
5529 * output; reported to work by math_b on an NV20(!).
5530 */
5531 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5532 break;
5533 case OUTPUT_LVDS:
5534 if ((conn & 0x00003f00) != 0x10)
5535 entry->lvdsconf.use_straps_for_mode = true;
5536 entry->lvdsconf.use_power_scripts = true;
5537 break;
5538 default:
5539 break;
5540 }
5541
5542 return true;
5543}
5544
5545static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
5546 uint32_t conn, uint32_t conf)
5547{
5548 struct dcb_entry *entry = new_dcb_entry(dcb);
5549 bool ret;
5550
5551 if (dcb->version >= 0x20)
5552 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
5553 else
5554 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
5555 if (!ret)
5556 return ret;
5557
5558 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
5559 entry->i2c_index, &dcb->i2c[entry->i2c_index]);
5560
5561 return true;
5562}
5563
5564static
5565void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5566{
5567 /*
5568 * DCB v2.0 lists each output combination separately.
5569 * Here we merge compatible entries to have fewer outputs, with
5570 * more options
5571 */
5572
5573 int i, newentries = 0;
5574
5575 for (i = 0; i < dcb->entries; i++) {
5576 struct dcb_entry *ient = &dcb->entry[i];
5577 int j;
5578
5579 for (j = i + 1; j < dcb->entries; j++) {
5580 struct dcb_entry *jent = &dcb->entry[j];
5581
5582 if (jent->type == 100) /* already merged entry */
5583 continue;
5584
5585 /* merge heads field when all other fields the same */
5586 if (jent->i2c_index == ient->i2c_index &&
5587 jent->type == ient->type &&
5588 jent->location == ient->location &&
5589 jent->or == ient->or) {
5590 NV_TRACE(dev, "Merging DCB entries %d and %d\n",
5591 i, j);
5592 ient->heads |= jent->heads;
5593 jent->type = 100; /* dummy value */
5594 }
5595 }
5596 }
5597
5598 /* Compact entries merged into others out of dcb */
5599 for (i = 0; i < dcb->entries; i++) {
5600 if (dcb->entry[i].type == 100)
5601 continue;
5602
5603 if (newentries != i) {
5604 dcb->entry[newentries] = dcb->entry[i];
5605 dcb->entry[newentries].index = newentries;
5606 }
5607 newentries++;
5608 }
5609
5610 dcb->entries = newentries;
5611}
5612
5613static int
5614parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5615{
5616 struct drm_nouveau_private *dev_priv = dev->dev_private;
5617 struct dcb_table *dcb = &bios->dcb;
5618 uint16_t dcbptr = 0, i2ctabptr = 0;
5619 uint8_t *dcbtable;
5620 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5621 bool configblock = true;
5622 int recordlength = 8, confofs = 4;
5623 int i;
5624
5625 /* get the offset from 0x36 */
5626 if (dev_priv->card_type > NV_04) {
5627 dcbptr = ROM16(bios->data[0x36]);
5628 if (dcbptr == 0x0000)
5629 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5630 }
5631
5632 /* this situation likely means a really old card, pre DCB */
5633 if (dcbptr == 0x0) {
5634 NV_INFO(dev, "Assuming a CRT output exists\n");
5635 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5636
5637 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5638 fabricate_tv_output(dcb, twoHeads);
5639
5640 return 0;
5641 }
5642
5643 dcbtable = &bios->data[dcbptr];
5644
5645 /* get DCB version */
5646 dcb->version = dcbtable[0];
5647 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5648 dcb->version >> 4, dcb->version & 0xf);
5649
5650 if (dcb->version >= 0x20) { /* NV17+ */
5651 uint32_t sig;
5652
5653 if (dcb->version >= 0x30) { /* NV40+ */
5654 headerlen = dcbtable[1];
5655 entries = dcbtable[2];
5656 recordlength = dcbtable[3];
5657 i2ctabptr = ROM16(dcbtable[4]);
5658 sig = ROM32(dcbtable[6]);
5659 dcb->gpio_table_ptr = ROM16(dcbtable[10]);
5660 dcb->connector_table_ptr = ROM16(dcbtable[20]);
5661 } else {
5662 i2ctabptr = ROM16(dcbtable[2]);
5663 sig = ROM32(dcbtable[4]);
5664 headerlen = 8;
5665 }
5666
5667 if (sig != 0x4edcbdcb) {
5668 NV_ERROR(dev, "Bad Display Configuration Block "
5669 "signature (%08X)\n", sig);
5670 return -EINVAL;
5671 }
5672 } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
5673 char sig[8] = { 0 };
5674
5675 strncpy(sig, (char *)&dcbtable[-7], 7);
5676 i2ctabptr = ROM16(dcbtable[2]);
5677 recordlength = 10;
5678 confofs = 6;
5679
5680 if (strcmp(sig, "DEV_REC")) {
5681 NV_ERROR(dev, "Bad Display Configuration Block "
5682 "signature (%s)\n", sig);
5683 return -EINVAL;
5684 }
5685 } else {
5686 /*
5687 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
5688 * has the same single (crt) entry, even when tv-out present, so
5689 * the conclusion is this version cannot really be used.
5690 * v1.2 tables (some NV6/10, and NV15+) normally have the same
5691 * 5 entries, which are not specific to the card and so no use.
5692 * v1.2 does have an I2C table that read_dcb_i2c_table can
5693 * handle, but cards exist (nv11 in #14821) with a bad i2c table
5694 * pointer, so use the indices parsed in parse_bmp_structure.
5695 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5696 */
5697 NV_TRACEWARN(dev, "No useful information in BIOS output table; "
5698 "adding all possible outputs\n");
5699 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5700
5701 /*
5702 * Attempt to detect TV before DVI because the test
5703 * for the former is more accurate and it rules the
5704 * latter out.
5705 */
5706 if (nv04_tv_identify(dev,
5707 bios->legacy.i2c_indices.tv) >= 0)
5708 fabricate_tv_output(dcb, twoHeads);
5709
5710 else if (bios->tmds.output0_script_ptr ||
5711 bios->tmds.output1_script_ptr)
5712 fabricate_dvi_i_output(dcb, twoHeads);
5713
5714 return 0;
5715 }
5716
5717 if (!i2ctabptr)
5718 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5719 else {
5720 dcb->i2c_table = &bios->data[i2ctabptr];
5721 if (dcb->version >= 0x30)
5722 dcb->i2c_default_indices = dcb->i2c_table[4];
5723 }
5724
5725 if (entries > DCB_MAX_NUM_ENTRIES)
5726 entries = DCB_MAX_NUM_ENTRIES;
5727
5728 for (i = 0; i < entries; i++) {
5729 uint32_t connection, config = 0;
5730
5731 connection = ROM32(dcbtable[headerlen + recordlength * i]);
5732 if (configblock)
5733 config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
5734
5735 /* seen on an NV11 with DCB v1.5 */
5736 if (connection == 0x00000000)
5737 break;
5738
5739 /* seen on an NV17 with DCB v2.0 */
5740 if (connection == 0xffffffff)
5741 break;
5742
5743 if ((connection & 0x0000000f) == 0x0000000f)
5744 continue;
5745
5746 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5747 dcb->entries, connection, config);
5748
5749 if (!parse_dcb_entry(dev, dcb, connection, config))
5750 break;
5751 }
5752
5753 /*
5754 * apart for v2.1+ not being known for requiring merging, this
5755 * guarantees dcbent->index is the index of the entry in the rom image
5756 */
5757 if (dcb->version < 0x21)
5758 merge_like_dcb_entries(dev, dcb);
5759
5760 if (!dcb->entries)
5761 return -ENXIO;
5762
5763 parse_dcb_gpio_table(bios);
5764 parse_dcb_connector_table(bios);
5765 return 0;
5766}
5767
5768static void
5769fixup_legacy_connector(struct nvbios *bios)
5770{
5771 struct dcb_table *dcb = &bios->dcb;
5772 int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
5773
5774 /*
5775 * DCB 3.0 also has the table in most cases, but there are some cards
5776 * where the table is filled with stub entries, and the DCB entriy
5777 * indices are all 0. We don't need the connector indices on pre-G80
5778 * chips (yet?) so limit the use to DCB 4.0 and above.
5779 */
5780 if (dcb->version >= 0x40)
5781 return;
5782
5783 dcb->connector.entries = 0;
5784
5785 /*
5786 * No known connector info before v3.0, so make it up. the rule here
5787 * is: anything on the same i2c bus is considered to be on the same
5788 * connector. any output without an associated i2c bus is assigned
5789 * its own unique connector index.
5790 */
5791 for (i = 0; i < dcb->entries; i++) {
5792 /*
5793 * Ignore the I2C index for on-chip TV-out, as there
5794 * are cards with bogus values (nv31m in bug 23212),
5795 * and it's otherwise useless.
5796 */
5797 if (dcb->entry[i].type == OUTPUT_TV &&
5798 dcb->entry[i].location == DCB_LOC_ON_CHIP)
5799 dcb->entry[i].i2c_index = 0xf;
5800 i2c = dcb->entry[i].i2c_index;
5801
5802 if (i2c_conn[i2c]) {
5803 dcb->entry[i].connector = i2c_conn[i2c] - 1;
5804 continue;
5805 }
5806
5807 dcb->entry[i].connector = dcb->connector.entries++;
5808 if (i2c != 0xf)
5809 i2c_conn[i2c] = dcb->connector.entries;
5810 }
5811
5812 /* Fake the connector table as well as just connector indices */
5813 for (i = 0; i < dcb->connector.entries; i++) {
5814 dcb->connector.entry[i].index = i;
5815 dcb->connector.entry[i].type = divine_connector_type(bios, i);
5816 dcb->connector.entry[i].gpio_tag = 0xff;
5817 }
5818}
5819
5820static void
5821fixup_legacy_i2c(struct nvbios *bios)
5822{
5823 struct dcb_table *dcb = &bios->dcb;
5824 int i;
5825
5826 for (i = 0; i < dcb->entries; i++) {
5827 if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
5828 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
5829 if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
5830 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
5831 if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
5832 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
5833 }
5834}
5835
5836static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
5837{
5838 /*
5839 * The header following the "HWSQ" signature has the number of entries,
5840 * and the entry size
5841 *
5842 * An entry consists of a dword to write to the sequencer control reg
5843 * (0x00001304), followed by the ucode bytes, written sequentially,
5844 * starting at reg 0x00001400
5845 */
5846
5847 uint8_t bytes_to_write;
5848 uint16_t hwsq_entry_offset;
5849 int i;
5850
5851 if (bios->data[hwsq_offset] <= entry) {
5852 NV_ERROR(dev, "Too few entries in HW sequencer table for "
5853 "requested entry\n");
5854 return -ENOENT;
5855 }
5856
5857 bytes_to_write = bios->data[hwsq_offset + 1];
5858
5859 if (bytes_to_write != 36) {
5860 NV_ERROR(dev, "Unknown HW sequencer entry size\n");
5861 return -EINVAL;
5862 }
5863
5864 NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
5865
5866 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
5867
5868 /* set sequencer control */
5869 bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
5870 bytes_to_write -= 4;
5871
5872 /* write ucode */
5873 for (i = 0; i < bytes_to_write; i += 4)
5874 bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
5875
5876 /* twiddle NV_PBUS_DEBUG_4 */
5877 bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
5878
5879 return 0;
5880}
5881
5882static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5883 struct nvbios *bios)
5884{
5885 /*
5886 * BMP based cards, from NV17, need a microcode loading to correctly
5887 * control the GPIO etc for LVDS panels
5888 *
5889 * BIT based cards seem to do this directly in the init scripts
5890 *
5891 * The microcode entries are found by the "HWSQ" signature.
5892 */
5893
5894 const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
5895 const int sz = sizeof(hwsq_signature);
5896 int hwsq_offset;
5897
5898 hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
5899 if (!hwsq_offset)
5900 return 0;
5901
5902 /* always use entry 0? */
5903 return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
5904}
5905
5906uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5907{
5908 struct drm_nouveau_private *dev_priv = dev->dev_private;
5909 struct nvbios *bios = &dev_priv->vbios;
5910 const uint8_t edid_sig[] = {
5911 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5912 uint16_t offset = 0;
5913 uint16_t newoffset;
5914 int searchlen = NV_PROM_SIZE;
5915
5916 if (bios->fp.edid)
5917 return bios->fp.edid;
5918
5919 while (searchlen) {
5920 newoffset = findstr(&bios->data[offset], searchlen,
5921 edid_sig, 8);
5922 if (!newoffset)
5923 return NULL;
5924 offset += newoffset;
5925 if (!nv_cksum(&bios->data[offset], EDID1_LEN))
5926 break;
5927
5928 searchlen -= offset;
5929 offset++;
5930 }
5931
5932 NV_TRACE(dev, "Found EDID in BIOS\n");
5933
5934 return bios->fp.edid = &bios->data[offset];
5935}
5936
5937void
5938nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5939 struct dcb_entry *dcbent)
5940{
5941 struct drm_nouveau_private *dev_priv = dev->dev_private;
5942 struct nvbios *bios = &dev_priv->vbios;
5943 struct init_exec iexec = { true, false };
5944
5945 mutex_lock(&bios->lock);
5946 bios->display.output = dcbent;
5947 parse_init_table(bios, table, &iexec);
5948 bios->display.output = NULL;
5949 mutex_unlock(&bios->lock);
5950}
5951
5952static bool NVInitVBIOS(struct drm_device *dev)
5953{
5954 struct drm_nouveau_private *dev_priv = dev->dev_private;
5955 struct nvbios *bios = &dev_priv->vbios;
5956
5957 memset(bios, 0, sizeof(struct nvbios));
5958 mutex_init(&bios->lock);
5959 bios->dev = dev;
5960
5961 if (!NVShadowVBIOS(dev, bios->data))
5962 return false;
5963
5964 bios->length = NV_PROM_SIZE;
5965 return true;
5966}
5967
5968static int nouveau_parse_vbios_struct(struct drm_device *dev)
5969{
5970 struct drm_nouveau_private *dev_priv = dev->dev_private;
5971 struct nvbios *bios = &dev_priv->vbios;
5972 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5973 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5974 int offset;
5975
5976 offset = findstr(bios->data, bios->length,
5977 bit_signature, sizeof(bit_signature));
5978 if (offset) {
5979 NV_TRACE(dev, "BIT BIOS found\n");
5980 return parse_bit_structure(bios, offset + 6);
5981 }
5982
5983 offset = findstr(bios->data, bios->length,
5984 bmp_signature, sizeof(bmp_signature));
5985 if (offset) {
5986 NV_TRACE(dev, "BMP BIOS found\n");
5987 return parse_bmp_structure(dev, bios, offset);
5988 }
5989
5990 NV_ERROR(dev, "No known BIOS signature found\n");
5991 return -ENODEV;
5992}
5993
5994int
5995nouveau_run_vbios_init(struct drm_device *dev)
5996{
5997 struct drm_nouveau_private *dev_priv = dev->dev_private;
5998 struct nvbios *bios = &dev_priv->vbios;
5999 int i, ret = 0;
6000
6001 NVLockVgaCrtcs(dev, false);
6002 if (nv_two_heads(dev))
6003 NVSetOwner(dev, bios->state.crtchead);
6004
6005 if (bios->major_version < 5) /* BMP only */
6006 load_nv17_hw_sequencer_ucode(dev, bios);
6007
6008 if (bios->execute) {
6009 bios->fp.last_script_invoc = 0;
6010 bios->fp.lvds_init_run = false;
6011 }
6012
6013 parse_init_tables(bios);
6014
6015 /*
6016 * Runs some additional script seen on G8x VBIOSen. The VBIOS'
6017 * parser will run this right after the init tables, the binary
6018 * driver appears to run it at some point later.
6019 */
6020 if (bios->some_script_ptr) {
6021 struct init_exec iexec = {true, false};
6022
6023 NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
6024 bios->some_script_ptr);
6025 parse_init_table(bios, bios->some_script_ptr, &iexec);
6026 }
6027
6028 if (dev_priv->card_type >= NV_50) {
6029 for (i = 0; i < bios->dcb.entries; i++) {
6030 nouveau_bios_run_display_table(dev,
6031 &bios->dcb.entry[i],
6032 0, 0);
6033 }
6034 }
6035
6036 NVLockVgaCrtcs(dev, true);
6037
6038 return ret;
6039}
6040
6041static void
6042nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6043{
6044 struct drm_nouveau_private *dev_priv = dev->dev_private;
6045 struct nvbios *bios = &dev_priv->vbios;
6046 struct dcb_i2c_entry *entry;
6047 int i;
6048
6049 entry = &bios->dcb.i2c[0];
6050 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
6051 nouveau_i2c_fini(dev, entry);
6052}
6053
6054int
6055nouveau_bios_init(struct drm_device *dev)
6056{
6057 struct drm_nouveau_private *dev_priv = dev->dev_private;
6058 struct nvbios *bios = &dev_priv->vbios;
6059 uint32_t saved_nv_pextdev_boot_0;
6060 bool was_locked;
6061 int ret;
6062
6063 if (!NVInitVBIOS(dev))
6064 return -ENODEV;
6065
6066 ret = nouveau_parse_vbios_struct(dev);
6067 if (ret)
6068 return ret;
6069
6070 ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
6071 if (ret)
6072 return ret;
6073
6074 fixup_legacy_i2c(bios);
6075 fixup_legacy_connector(bios);
6076
6077 if (!bios->major_version) /* we don't run version 0 bios */
6078 return 0;
6079
6080 /* these will need remembering across a suspend */
6081 saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
6082 bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
6083
6084 /* init script execution disabled */
6085 bios->execute = false;
6086
6087 /* ... unless card isn't POSTed already */
6088 if (dev_priv->card_type >= NV_10 &&
6089 NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6090 NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
6091 NV_INFO(dev, "Adaptor not initialised\n");
6092 if (dev_priv->card_type < NV_50) {
6093 NV_ERROR(dev, "Unable to POST this chipset\n");
6094 return -ENODEV;
6095 }
6096
6097 NV_INFO(dev, "Running VBIOS init tables\n");
6098 bios->execute = true;
6099 }
6100
6101 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6102
6103 ret = nouveau_run_vbios_init(dev);
6104 if (ret)
6105 return ret;
6106
6107 /* feature_byte on BMP is poor, but init always sets CR4B */
6108 was_locked = NVLockVgaCrtcs(dev, false);
6109 if (bios->major_version < 5)
6110 bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
6111
6112 /* all BIT systems need p_f_m_t for digital_min_front_porch */
6113 if (bios->is_mobile || bios->major_version >= 5)
6114 ret = parse_fp_mode_table(dev, bios);
6115 NVLockVgaCrtcs(dev, was_locked);
6116
6117 /* allow subsequent scripts to execute */
6118 bios->execute = true;
6119
6120 return 0;
6121}
6122
6123void
6124nouveau_bios_takedown(struct drm_device *dev)
6125{
6126 nouveau_bios_i2c_devices_takedown(dev);
6127}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 000000000000..c0d7b0a3ece0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,301 @@
1/*
2 * Copyright 2007-2008 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef __NOUVEAU_BIOS_H__
25#define __NOUVEAU_BIOS_H__
26
27#include "nvreg.h"
28#include "nouveau_i2c.h"
29
30#define DCB_MAX_NUM_ENTRIES 16
31#define DCB_MAX_NUM_I2C_ENTRIES 16
32#define DCB_MAX_NUM_GPIO_ENTRIES 32
33#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
34
35#define DCB_LOC_ON_CHIP 0
36
37struct dcb_i2c_entry {
38 uint8_t port_type;
39 uint8_t read, write;
40 struct nouveau_i2c_chan *chan;
41};
42
43enum dcb_gpio_tag {
44 DCB_GPIO_TVDAC0 = 0xc,
45 DCB_GPIO_TVDAC1 = 0x2d,
46};
47
48struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag;
50 int line;
51 bool invert;
52 uint32_t entry;
53 uint8_t state_default;
54 uint8_t state[2];
55};
56
57struct dcb_gpio_table {
58 int entries;
59 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
60};
61
62enum dcb_connector_type {
63 DCB_CONNECTOR_VGA = 0x00,
64 DCB_CONNECTOR_TV_0 = 0x10,
65 DCB_CONNECTOR_TV_1 = 0x11,
66 DCB_CONNECTOR_TV_3 = 0x13,
67 DCB_CONNECTOR_DVI_I = 0x30,
68 DCB_CONNECTOR_DVI_D = 0x31,
69 DCB_CONNECTOR_LVDS = 0x40,
70 DCB_CONNECTOR_DP = 0x46,
71 DCB_CONNECTOR_eDP = 0x47,
72 DCB_CONNECTOR_HDMI_0 = 0x60,
73 DCB_CONNECTOR_HDMI_1 = 0x61,
74 DCB_CONNECTOR_NONE = 0xff
75};
76
77struct dcb_connector_table_entry {
78 uint8_t index;
79 uint32_t entry;
80 enum dcb_connector_type type;
81 uint8_t index2;
82 uint8_t gpio_tag;
83};
84
85struct dcb_connector_table {
86 int entries;
87 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
88};
89
90enum dcb_type {
91 OUTPUT_ANALOG = 0,
92 OUTPUT_TV = 1,
93 OUTPUT_TMDS = 2,
94 OUTPUT_LVDS = 3,
95 OUTPUT_DP = 6,
96 OUTPUT_ANY = -1
97};
98
99struct dcb_entry {
100 int index; /* may not be raw dcb index if merging has happened */
101 enum dcb_type type;
102 uint8_t i2c_index;
103 uint8_t heads;
104 uint8_t connector;
105 uint8_t bus;
106 uint8_t location;
107 uint8_t or;
108 bool duallink_possible;
109 union {
110 struct sor_conf {
111 int link;
112 } sorconf;
113 struct {
114 int maxfreq;
115 } crtconf;
116 struct {
117 struct sor_conf sor;
118 bool use_straps_for_mode;
119 bool use_power_scripts;
120 } lvdsconf;
121 struct {
122 bool has_component_output;
123 } tvconf;
124 struct {
125 struct sor_conf sor;
126 int link_nr;
127 int link_bw;
128 } dpconf;
129 struct {
130 struct sor_conf sor;
131 } tmdsconf;
132 };
133 bool i2c_upper_default;
134};
135
136struct dcb_table {
137 uint8_t version;
138
139 int entries;
140 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
141
142 uint8_t *i2c_table;
143 uint8_t i2c_default_indices;
144 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
145
146 uint16_t gpio_table_ptr;
147 struct dcb_gpio_table gpio;
148 uint16_t connector_table_ptr;
149 struct dcb_connector_table connector;
150};
151
152enum nouveau_or {
153 OUTPUT_A = (1 << 0),
154 OUTPUT_B = (1 << 1),
155 OUTPUT_C = (1 << 2)
156};
157
158enum LVDS_script {
159 /* Order *does* matter here */
160 LVDS_INIT = 1,
161 LVDS_RESET,
162 LVDS_BACKLIGHT_ON,
163 LVDS_BACKLIGHT_OFF,
164 LVDS_PANEL_ON,
165 LVDS_PANEL_OFF
166};
167
168/* changing these requires matching changes to reg tables in nv_get_clock */
169#define MAX_PLL_TYPES 4
170enum pll_types {
171 NVPLL,
172 MPLL,
173 VPLL1,
174 VPLL2
175};
176
177struct pll_lims {
178 struct {
179 int minfreq;
180 int maxfreq;
181 int min_inputfreq;
182 int max_inputfreq;
183
184 uint8_t min_m;
185 uint8_t max_m;
186 uint8_t min_n;
187 uint8_t max_n;
188 } vco1, vco2;
189
190 uint8_t max_log2p;
191 /*
192 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
193 * value) is no different to 6 (at least for vplls) so allowing the MNP
194 * calc to use 7 causes the generated clock to be out by a factor of 2.
195 * however, max_log2p cannot be fixed-up during parsing as the
196 * unmodified max_log2p value is still needed for setting mplls, hence
197 * an additional max_usable_log2p member
198 */
199 uint8_t max_usable_log2p;
200 uint8_t log2p_bias;
201
202 uint8_t min_p;
203 uint8_t max_p;
204
205 int refclk;
206};
207
208struct nvbios {
209 struct drm_device *dev;
210
211 uint8_t chip_version;
212
213 uint32_t dactestval;
214 uint32_t tvdactestval;
215 uint8_t digital_min_front_porch;
216 bool fp_no_ddc;
217
218 struct mutex lock;
219
220 uint8_t data[NV_PROM_SIZE];
221 unsigned int length;
222 bool execute;
223
224 uint8_t major_version;
225 uint8_t feature_byte;
226 bool is_mobile;
227
228 uint32_t fmaxvco, fminvco;
229
230 bool old_style_init;
231 uint16_t init_script_tbls_ptr;
232 uint16_t extra_init_script_tbl_ptr;
233 uint16_t macro_index_tbl_ptr;
234 uint16_t macro_tbl_ptr;
235 uint16_t condition_tbl_ptr;
236 uint16_t io_condition_tbl_ptr;
237 uint16_t io_flag_condition_tbl_ptr;
238 uint16_t init_function_tbl_ptr;
239
240 uint16_t pll_limit_tbl_ptr;
241 uint16_t ram_restrict_tbl_ptr;
242 uint8_t ram_restrict_group_count;
243
244 uint16_t some_script_ptr; /* BIT I + 14 */
245 uint16_t init96_tbl_ptr; /* BIT I + 16 */
246
247 struct dcb_table dcb;
248
249 struct {
250 int crtchead;
251 /* these need remembering across suspend */
252 uint32_t saved_nv_pfb_cfg0;
253 } state;
254
255 struct {
256 struct dcb_entry *output;
257 uint16_t script_table_ptr;
258 uint16_t dp_table_ptr;
259 } display;
260
261 struct {
262 uint16_t fptablepointer; /* also used by tmds */
263 uint16_t fpxlatetableptr;
264 int xlatwidth;
265 uint16_t lvdsmanufacturerpointer;
266 uint16_t fpxlatemanufacturertableptr;
267 uint16_t mode_ptr;
268 uint16_t xlated_entry;
269 bool power_off_for_reset;
270 bool reset_after_pclk_change;
271 bool dual_link;
272 bool link_c_increment;
273 bool if_is_24bit;
274 int duallink_transition_clk;
275 uint8_t strapless_is_24bit;
276 uint8_t *edid;
277
278 /* will need resetting after suspend */
279 int last_script_invoc;
280 bool lvds_init_run;
281 } fp;
282
283 struct {
284 uint16_t output0_script_ptr;
285 uint16_t output1_script_ptr;
286 } tmds;
287
288 struct {
289 uint16_t mem_init_tbl_ptr;
290 uint16_t sdr_seq_tbl_ptr;
291 uint16_t ddr_seq_tbl_ptr;
292
293 struct {
294 uint8_t crt, tv, panel;
295 } i2c_indices;
296
297 uint16_t lvds_single_a_script_ptr;
298 } legacy;
299};
300
301#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 000000000000..957d17629840
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,778 @@
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
36#include <linux/log2.h>
37#include <linux/slab.h>
38
39static void
40nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
41{
42 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
43 struct drm_device *dev = dev_priv->dev;
44 struct nouveau_bo *nvbo = nouveau_bo(bo);
45
46 ttm_bo_kunmap(&nvbo->kmap);
47
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
51 if (nvbo->tile)
52 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
53
54 spin_lock(&dev_priv->ttm.bo_list_lock);
55 list_del(&nvbo->head);
56 spin_unlock(&dev_priv->ttm.bo_list_lock);
57 kfree(nvbo);
58}
59
60static void
61nouveau_bo_fixup_align(struct drm_device *dev,
62 uint32_t tile_mode, uint32_t tile_flags,
63 int *align, int *size)
64{
65 struct drm_nouveau_private *dev_priv = dev->dev_private;
66
67 /*
68 * Some of the tile_flags have a periodic structure of N*4096 bytes,
69 * align to to that as well as the page size. Align the size to the
70 * appropriate boundaries. This does imply that sizes are rounded up
71 * 3-7 pages, so be aware of this and do not waste memory by allocating
72 * many small buffers.
73 */
74 if (dev_priv->card_type == NV_50) {
75 uint32_t block_size = dev_priv->vram_size >> 15;
76 int i;
77
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (is_power_of_2(block_size)) {
84 for (i = 1; i < 10; i++) {
85 *align = 12 * i * block_size;
86 if (!(*align % 65536))
87 break;
88 }
89 } else {
90 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size;
92 if (!(*align % 65536))
93 break;
94 }
95 }
96 *size = roundup(*size, *align);
97 break;
98 default:
99 break;
100 }
101
102 } else {
103 if (tile_mode) {
104 if (dev_priv->chipset >= 0x40) {
105 *align = 65536;
106 *size = roundup(*size, 64 * tile_mode);
107
108 } else if (dev_priv->chipset >= 0x30) {
109 *align = 32768;
110 *size = roundup(*size, 64 * tile_mode);
111
112 } else if (dev_priv->chipset >= 0x20) {
113 *align = 16384;
114 *size = roundup(*size, 64 * tile_mode);
115
116 } else if (dev_priv->chipset >= 0x10) {
117 *align = 16384;
118 *size = roundup(*size, 32 * tile_mode);
119 }
120 }
121 }
122
123 /* ALIGN works only on powers of two. */
124 *size = roundup(*size, PAGE_SIZE);
125
126 if (dev_priv->card_type == NV_50) {
127 *size = roundup(*size, 65536);
128 *align = max(65536, *align);
129 }
130}
131
132int
133nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
134 int size, int align, uint32_t flags, uint32_t tile_mode,
135 uint32_t tile_flags, bool no_vm, bool mappable,
136 struct nouveau_bo **pnvbo)
137{
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nouveau_bo *nvbo;
140 int ret = 0;
141
142 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
143 if (!nvbo)
144 return -ENOMEM;
145 INIT_LIST_HEAD(&nvbo->head);
146 INIT_LIST_HEAD(&nvbo->entry);
147 nvbo->mappable = mappable;
148 nvbo->no_vm = no_vm;
149 nvbo->tile_mode = tile_mode;
150 nvbo->tile_flags = tile_flags;
151
152 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
153 align >>= PAGE_SHIFT;
154
155 nvbo->placement.fpfn = 0;
156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
157 nouveau_bo_placement_set(nvbo, flags, 0);
158
159 nvbo->channel = chan;
160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
161 ttm_bo_type_device, &nvbo->placement, align, 0,
162 false, NULL, size, nouveau_bo_del_ttm);
163 nvbo->channel = NULL;
164 if (ret) {
165 /* ttm will call nouveau_bo_del_ttm if it fails.. */
166 return ret;
167 }
168
169 spin_lock(&dev_priv->ttm.bo_list_lock);
170 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
171 spin_unlock(&dev_priv->ttm.bo_list_lock);
172 *pnvbo = nvbo;
173 return 0;
174}
175
176static void
177set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
178{
179 *n = 0;
180
181 if (type & TTM_PL_FLAG_VRAM)
182 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
183 if (type & TTM_PL_FLAG_TT)
184 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
185 if (type & TTM_PL_FLAG_SYSTEM)
186 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
187}
188
189void
190nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
191{
192 struct ttm_placement *pl = &nvbo->placement;
193 uint32_t flags = TTM_PL_MASK_CACHING |
194 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
195
196 pl->placement = nvbo->placements;
197 set_placement_list(nvbo->placements, &pl->num_placement,
198 type, flags);
199
200 pl->busy_placement = nvbo->busy_placements;
201 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
202 type | busy, flags);
203}
204
205int
206nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
207{
208 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
209 struct ttm_buffer_object *bo = &nvbo->bo;
210 int ret;
211
212 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
213 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
214 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
215 1 << bo->mem.mem_type, memtype);
216 return -EINVAL;
217 }
218
219 if (nvbo->pin_refcnt++)
220 return 0;
221
222 ret = ttm_bo_reserve(bo, false, false, false, 0);
223 if (ret)
224 goto out;
225
226 nouveau_bo_placement_set(nvbo, memtype, 0);
227
228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
229 if (ret == 0) {
230 switch (bo->mem.mem_type) {
231 case TTM_PL_VRAM:
232 dev_priv->fb_aper_free -= bo->mem.size;
233 break;
234 case TTM_PL_TT:
235 dev_priv->gart_info.aper_free -= bo->mem.size;
236 break;
237 default:
238 break;
239 }
240 }
241 ttm_bo_unreserve(bo);
242out:
243 if (unlikely(ret))
244 nvbo->pin_refcnt--;
245 return ret;
246}
247
248int
249nouveau_bo_unpin(struct nouveau_bo *nvbo)
250{
251 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
252 struct ttm_buffer_object *bo = &nvbo->bo;
253 int ret;
254
255 if (--nvbo->pin_refcnt)
256 return 0;
257
258 ret = ttm_bo_reserve(bo, false, false, false, 0);
259 if (ret)
260 return ret;
261
262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
263
264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
265 if (ret == 0) {
266 switch (bo->mem.mem_type) {
267 case TTM_PL_VRAM:
268 dev_priv->fb_aper_free += bo->mem.size;
269 break;
270 case TTM_PL_TT:
271 dev_priv->gart_info.aper_free += bo->mem.size;
272 break;
273 default:
274 break;
275 }
276 }
277
278 ttm_bo_unreserve(bo);
279 return ret;
280}
281
282int
283nouveau_bo_map(struct nouveau_bo *nvbo)
284{
285 int ret;
286
287 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
288 if (ret)
289 return ret;
290
291 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
292 ttm_bo_unreserve(&nvbo->bo);
293 return ret;
294}
295
296void
297nouveau_bo_unmap(struct nouveau_bo *nvbo)
298{
299 ttm_bo_kunmap(&nvbo->kmap);
300}
301
302u16
303nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
304{
305 bool is_iomem;
306 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
307 mem = &mem[index];
308 if (is_iomem)
309 return ioread16_native((void __force __iomem *)mem);
310 else
311 return *mem;
312}
313
314void
315nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
316{
317 bool is_iomem;
318 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
319 mem = &mem[index];
320 if (is_iomem)
321 iowrite16_native(val, (void __force __iomem *)mem);
322 else
323 *mem = val;
324}
325
326u32
327nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
328{
329 bool is_iomem;
330 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
331 mem = &mem[index];
332 if (is_iomem)
333 return ioread32_native((void __force __iomem *)mem);
334 else
335 return *mem;
336}
337
338void
339nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
340{
341 bool is_iomem;
342 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
343 mem = &mem[index];
344 if (is_iomem)
345 iowrite32_native(val, (void __force __iomem *)mem);
346 else
347 *mem = val;
348}
349
350static struct ttm_backend *
351nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
352{
353 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
354 struct drm_device *dev = dev_priv->dev;
355
356 switch (dev_priv->gart_info.type) {
357#if __OS_HAS_AGP
358 case NOUVEAU_GART_AGP:
359 return ttm_agp_backend_init(bdev, dev->agp->bridge);
360#endif
361 case NOUVEAU_GART_SGDMA:
362 return nouveau_sgdma_init_ttm(dev);
363 default:
364 NV_ERROR(dev, "Unknown GART type %d\n",
365 dev_priv->gart_info.type);
366 break;
367 }
368
369 return NULL;
370}
371
372static int
373nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
374{
375 /* We'll do this from user space. */
376 return 0;
377}
378
379static int
380nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
381 struct ttm_mem_type_manager *man)
382{
383 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
384 struct drm_device *dev = dev_priv->dev;
385
386 switch (type) {
387 case TTM_PL_SYSTEM:
388 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
389 man->available_caching = TTM_PL_MASK_CACHING;
390 man->default_caching = TTM_PL_FLAG_CACHED;
391 break;
392 case TTM_PL_VRAM:
393 man->flags = TTM_MEMTYPE_FLAG_FIXED |
394 TTM_MEMTYPE_FLAG_MAPPABLE |
395 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
396 man->available_caching = TTM_PL_FLAG_UNCACHED |
397 TTM_PL_FLAG_WC;
398 man->default_caching = TTM_PL_FLAG_WC;
399
400 man->io_addr = NULL;
401 man->io_offset = drm_get_resource_start(dev, 1);
402 man->io_size = drm_get_resource_len(dev, 1);
403 if (man->io_size > dev_priv->vram_size)
404 man->io_size = dev_priv->vram_size;
405
406 man->gpu_offset = dev_priv->vm_vram_base;
407 break;
408 case TTM_PL_TT:
409 switch (dev_priv->gart_info.type) {
410 case NOUVEAU_GART_AGP:
411 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
412 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
413 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED;
415 break;
416 case NOUVEAU_GART_SGDMA:
417 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
418 TTM_MEMTYPE_FLAG_CMA;
419 man->available_caching = TTM_PL_MASK_CACHING;
420 man->default_caching = TTM_PL_FLAG_CACHED;
421 break;
422 default:
423 NV_ERROR(dev, "Unknown GART type: %d\n",
424 dev_priv->gart_info.type);
425 return -EINVAL;
426 }
427
428 man->io_offset = dev_priv->gart_info.aper_base;
429 man->io_size = dev_priv->gart_info.aper_size;
430 man->io_addr = NULL;
431 man->gpu_offset = dev_priv->vm_gart_base;
432 break;
433 default:
434 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
435 return -EINVAL;
436 }
437 return 0;
438}
439
440static void
441nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
442{
443 struct nouveau_bo *nvbo = nouveau_bo(bo);
444
445 switch (bo->mem.mem_type) {
446 case TTM_PL_VRAM:
447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
448 TTM_PL_FLAG_SYSTEM);
449 break;
450 default:
451 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
452 break;
453 }
454
455 *pl = nvbo->placement;
456}
457
458
459/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
460 * TTM_PL_{VRAM,TT} directly.
461 */
462
463static int
464nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
465 struct nouveau_bo *nvbo, bool evict, bool no_wait,
466 struct ttm_mem_reg *new_mem)
467{
468 struct nouveau_fence *fence = NULL;
469 int ret;
470
471 ret = nouveau_fence_new(chan, &fence, true);
472 if (ret)
473 return ret;
474
475 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
476 evict, no_wait, new_mem);
477 if (nvbo->channel && nvbo->channel != chan)
478 ret = nouveau_fence_wait(fence, NULL, false, false);
479 nouveau_fence_unref((void *)&fence);
480 return ret;
481}
482
483static inline uint32_t
484nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
485 struct ttm_mem_reg *mem)
486{
487 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
488 if (mem->mem_type == TTM_PL_TT)
489 return NvDmaGART;
490 return NvDmaVRAM;
491 }
492
493 if (mem->mem_type == TTM_PL_TT)
494 return chan->gart_handle;
495 return chan->vram_handle;
496}
497
498static int
499nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
500 int no_wait, struct ttm_mem_reg *new_mem)
501{
502 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
504 struct ttm_mem_reg *old_mem = &bo->mem;
505 struct nouveau_channel *chan;
506 uint64_t src_offset, dst_offset;
507 uint32_t page_count;
508 int ret;
509
510 chan = nvbo->channel;
511 if (!chan || nvbo->tile_flags || nvbo->no_vm)
512 chan = dev_priv->channel;
513
514 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
515 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
516 if (chan != dev_priv->channel) {
517 if (old_mem->mem_type == TTM_PL_TT)
518 src_offset += dev_priv->vm_gart_base;
519 else
520 src_offset += dev_priv->vm_vram_base;
521
522 if (new_mem->mem_type == TTM_PL_TT)
523 dst_offset += dev_priv->vm_gart_base;
524 else
525 dst_offset += dev_priv->vm_vram_base;
526 }
527
528 ret = RING_SPACE(chan, 3);
529 if (ret)
530 return ret;
531 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
532 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
533 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
534
535 if (dev_priv->card_type >= NV_50) {
536 ret = RING_SPACE(chan, 4);
537 if (ret)
538 return ret;
539 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
540 OUT_RING(chan, 1);
541 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
542 OUT_RING(chan, 1);
543 }
544
545 page_count = new_mem->num_pages;
546 while (page_count) {
547 int line_count = (page_count > 2047) ? 2047 : page_count;
548
549 if (dev_priv->card_type >= NV_50) {
550 ret = RING_SPACE(chan, 3);
551 if (ret)
552 return ret;
553 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
554 OUT_RING(chan, upper_32_bits(src_offset));
555 OUT_RING(chan, upper_32_bits(dst_offset));
556 }
557 ret = RING_SPACE(chan, 11);
558 if (ret)
559 return ret;
560 BEGIN_RING(chan, NvSubM2MF,
561 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
562 OUT_RING(chan, lower_32_bits(src_offset));
563 OUT_RING(chan, lower_32_bits(dst_offset));
564 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
565 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
566 OUT_RING(chan, PAGE_SIZE); /* line_length */
567 OUT_RING(chan, line_count);
568 OUT_RING(chan, (1<<8)|(1<<0));
569 OUT_RING(chan, 0);
570 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
571 OUT_RING(chan, 0);
572
573 page_count -= line_count;
574 src_offset += (PAGE_SIZE * line_count);
575 dst_offset += (PAGE_SIZE * line_count);
576 }
577
578 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
579}
580
581static int
582nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
583 bool no_wait, struct ttm_mem_reg *new_mem)
584{
585 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
586 struct ttm_placement placement;
587 struct ttm_mem_reg tmp_mem;
588 int ret;
589
590 placement.fpfn = placement.lpfn = 0;
591 placement.num_placement = placement.num_busy_placement = 1;
592 placement.placement = placement.busy_placement = &placement_memtype;
593
594 tmp_mem = *new_mem;
595 tmp_mem.mm_node = NULL;
596 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
597 if (ret)
598 return ret;
599
600 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
601 if (ret)
602 goto out;
603
604 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
605 if (ret)
606 goto out;
607
608 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
609out:
610 if (tmp_mem.mm_node) {
611 spin_lock(&bo->bdev->glob->lru_lock);
612 drm_mm_put_block(tmp_mem.mm_node);
613 spin_unlock(&bo->bdev->glob->lru_lock);
614 }
615
616 return ret;
617}
618
619static int
620nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
621 bool no_wait, struct ttm_mem_reg *new_mem)
622{
623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
624 struct ttm_placement placement;
625 struct ttm_mem_reg tmp_mem;
626 int ret;
627
628 placement.fpfn = placement.lpfn = 0;
629 placement.num_placement = placement.num_busy_placement = 1;
630 placement.placement = placement.busy_placement = &placement_memtype;
631
632 tmp_mem = *new_mem;
633 tmp_mem.mm_node = NULL;
634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
635 if (ret)
636 return ret;
637
638 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
639 if (ret)
640 goto out;
641
642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
643 if (ret)
644 goto out;
645
646out:
647 if (tmp_mem.mm_node) {
648 spin_lock(&bo->bdev->glob->lru_lock);
649 drm_mm_put_block(tmp_mem.mm_node);
650 spin_unlock(&bo->bdev->glob->lru_lock);
651 }
652
653 return ret;
654}
655
656static int
657nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
658 struct nouveau_tile_reg **new_tile)
659{
660 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
661 struct drm_device *dev = dev_priv->dev;
662 struct nouveau_bo *nvbo = nouveau_bo(bo);
663 uint64_t offset;
664 int ret;
665
666 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
667 /* Nothing to do. */
668 *new_tile = NULL;
669 return 0;
670 }
671
672 offset = new_mem->mm_node->start << PAGE_SHIFT;
673
674 if (dev_priv->card_type == NV_50) {
675 ret = nv50_mem_vm_bind_linear(dev,
676 offset + dev_priv->vm_vram_base,
677 new_mem->size, nvbo->tile_flags,
678 offset);
679 if (ret)
680 return ret;
681
682 } else if (dev_priv->card_type >= NV_10) {
683 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
684 nvbo->tile_mode);
685 }
686
687 return 0;
688}
689
690static void
691nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
692 struct nouveau_tile_reg *new_tile,
693 struct nouveau_tile_reg **old_tile)
694{
695 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
696 struct drm_device *dev = dev_priv->dev;
697
698 if (dev_priv->card_type >= NV_10 &&
699 dev_priv->card_type < NV_50) {
700 if (*old_tile)
701 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
702
703 *old_tile = new_tile;
704 }
705}
706
707static int
708nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
709 bool no_wait, struct ttm_mem_reg *new_mem)
710{
711 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
712 struct nouveau_bo *nvbo = nouveau_bo(bo);
713 struct ttm_mem_reg *old_mem = &bo->mem;
714 struct nouveau_tile_reg *new_tile = NULL;
715 int ret = 0;
716
717 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
718 if (ret)
719 return ret;
720
721 /* Software copy if the card isn't up and running yet. */
722 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
723 !dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
725 goto out;
726 }
727
728 /* Fake bo copy. */
729 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
730 BUG_ON(bo->mem.mm_node != NULL);
731 bo->mem = *new_mem;
732 new_mem->mm_node = NULL;
733 goto out;
734 }
735
736 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
739 else if (old_mem->mem_type == TTM_PL_SYSTEM)
740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
741 else
742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
743
744 if (!ret)
745 goto out;
746
747 /* Fallback to software copy. */
748 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
749
750out:
751 if (ret)
752 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
753 else
754 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
755
756 return ret;
757}
758
759static int
760nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
761{
762 return 0;
763}
764
765struct ttm_bo_driver nouveau_bo_driver = {
766 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
767 .invalidate_caches = nouveau_bo_invalidate_caches,
768 .init_mem_type = nouveau_bo_init_mem_type,
769 .evict_flags = nouveau_bo_evict_flags,
770 .move = nouveau_bo_move,
771 .verify_access = nouveau_bo_verify_access,
772 .sync_obj_signaled = nouveau_fence_signalled,
773 .sync_obj_wait = nouveau_fence_wait,
774 .sync_obj_flush = nouveau_fence_flush,
775 .sync_obj_unref = nouveau_fence_unref,
776 .sync_obj_ref = nouveau_fence_ref,
777};
778
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 000000000000..88f9bc0941eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "nouveau_drv.h"
26#include "nouveau_hw.h"
27
28/****************************************************************************\
29* *
30* The video arbitration routines calculate some "magic" numbers. Fixes *
31* the snow seen when accessing the framebuffer without it. *
32* It just works (I hope). *
33* *
34\****************************************************************************/
35
36struct nv_fifo_info {
37 int lwm;
38 int burst;
39};
40
41struct nv_sim_state {
42 int pclk_khz;
43 int mclk_khz;
44 int nvclk_khz;
45 int bpp;
46 int mem_page_miss;
47 int mem_latency;
48 int memory_type;
49 int memory_width;
50 int two_heads;
51};
52
53static void
54nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
55{
56 int pagemiss, cas, width, bpp;
57 int nvclks, mclks, pclks, crtpagemiss;
58 int found, mclk_extra, mclk_loop, cbs, m1, p1;
59 int mclk_freq, pclk_freq, nvclk_freq;
60 int us_m, us_n, us_p, crtc_drain_rate;
61 int cpm_us, us_crt, clwm;
62
63 pclk_freq = arb->pclk_khz;
64 mclk_freq = arb->mclk_khz;
65 nvclk_freq = arb->nvclk_khz;
66 pagemiss = arb->mem_page_miss;
67 cas = arb->mem_latency;
68 width = arb->memory_width >> 6;
69 bpp = arb->bpp;
70 cbs = 128;
71
72 pclks = 2;
73 nvclks = 10;
74 mclks = 13 + cas;
75 mclk_extra = 3;
76 found = 0;
77
78 while (!found) {
79 found = 1;
80
81 mclk_loop = mclks + mclk_extra;
82 us_m = mclk_loop * 1000 * 1000 / mclk_freq;
83 us_n = nvclks * 1000 * 1000 / nvclk_freq;
84 us_p = nvclks * 1000 * 1000 / pclk_freq;
85
86 crtc_drain_rate = pclk_freq * bpp / 8;
87 crtpagemiss = 2;
88 crtpagemiss += 1;
89 cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
90 us_crt = cpm_us + us_m + us_n + us_p;
91 clwm = us_crt * crtc_drain_rate / (1000 * 1000);
92 clwm++;
93
94 m1 = clwm + cbs - 512;
95 p1 = m1 * pclk_freq / mclk_freq;
96 p1 = p1 * bpp / 8;
97 if ((p1 < m1 && m1 > 0) || clwm > 519) {
98 found = !mclk_extra;
99 mclk_extra--;
100 }
101 if (clwm < 384)
102 clwm = 384;
103
104 fifo->lwm = clwm;
105 fifo->burst = cbs;
106 }
107}
108
109static void
110nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
111{
112 int fill_rate, drain_rate;
113 int pclks, nvclks, mclks, xclks;
114 int pclk_freq, nvclk_freq, mclk_freq;
115 int fill_lat, extra_lat;
116 int max_burst_o, max_burst_l;
117 int fifo_len, min_lwm, max_lwm;
118 const int burst_lat = 80; /* Maximum allowable latency due
119 * to the CRTC FIFO burst. (ns) */
120
121 pclk_freq = arb->pclk_khz;
122 nvclk_freq = arb->nvclk_khz;
123 mclk_freq = arb->mclk_khz;
124
125 fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
126 drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
127
128 fifo_len = arb->two_heads ? 1536 : 1024; /* B */
129
130 /* Fixed FIFO refill latency. */
131
132 pclks = 4; /* lwm detect. */
133
134 nvclks = 3 /* lwm -> sync. */
135 + 2 /* fbi bus cycles (1 req + 1 busy) */
136 + 1 /* 2 edge sync. may be very close to edge so
137 * just put one. */
138 + 1 /* fbi_d_rdv_n */
139 + 1 /* Fbi_d_rdata */
140 + 1; /* crtfifo load */
141
142 mclks = 1 /* 2 edge sync. may be very close to edge so
143 * just put one. */
144 + 1 /* arb_hp_req */
145 + 5 /* tiling pipeline */
146 + 2 /* latency fifo */
147 + 2 /* memory request to fbio block */
148 + 7; /* data returned from fbio block */
149
150 /* Need to accumulate 256 bits for read */
151 mclks += (arb->memory_type == 0 ? 2 : 1)
152 * arb->memory_width / 32;
153
154 fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
155 + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
156 + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
157
158 /* Conditional FIFO refill latency. */
159
160 xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
161 * the overlay. */
162 + 2 * arb->mem_page_miss /* Extra pagemiss latency. */
163 + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
164
165 extra_lat = xclks * 1000 * 1000 / mclk_freq;
166
167 if (arb->two_heads)
168 /* Account for another CRTC. */
169 extra_lat += fill_lat + extra_lat + burst_lat;
170
171 /* FIFO burst */
172
173 /* Max burst not leading to overflows. */
174 max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
175 * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
176 fifo->burst = min(max_burst_o, 1024);
177
178 /* Max burst value with an acceptable latency. */
179 max_burst_l = burst_lat * fill_rate / (1000 * 1000);
180 fifo->burst = min(max_burst_l, fifo->burst);
181
182 fifo->burst = rounddown_pow_of_two(fifo->burst);
183
184 /* FIFO low watermark */
185
186 min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
187 max_lwm = fifo_len - fifo->burst
188 + fill_lat * drain_rate / (1000 * 1000)
189 + fifo->burst * drain_rate / fill_rate;
190
191 fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
192}
193
194static void
195nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
196 int *burst, int *lwm)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, MPLL);
202 int NVClk = nouveau_hw_get_clock(dev, NVPLL);
203 uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
204
205 sim_data.pclk_khz = VClk;
206 sim_data.mclk_khz = MClk;
207 sim_data.nvclk_khz = NVClk;
208 sim_data.bpp = bpp;
209 sim_data.two_heads = nv_two_heads(dev);
210 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
211 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
212 uint32_t type;
213
214 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
215
216 sim_data.memory_type = (type >> 12) & 1;
217 sim_data.memory_width = 64;
218 sim_data.mem_latency = 3;
219 sim_data.mem_page_miss = 10;
220 } else {
221 sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
222 sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
223 sim_data.mem_latency = cfg1 & 0xf;
224 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
225 }
226
227 if (dev_priv->card_type == NV_04)
228 nv04_calc_arb(&fifo_data, &sim_data);
229 else
230 nv10_calc_arb(&fifo_data, &sim_data);
231
232 *burst = ilog2(fifo_data.burst >> 4);
233 *lwm = fifo_data.lwm >> 3;
234}
235
236static void
237nv30_update_arb(int *burst, int *lwm)
238{
239 unsigned int fifo_size, burst_size, graphics_lwm;
240
241 fifo_size = 2048;
242 burst_size = 512;
243 graphics_lwm = fifo_size - burst_size;
244
245 *burst = ilog2(burst_size >> 5);
246 *lwm = graphics_lwm >> 3;
247}
248
249void
250nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253
254 if (dev_priv->card_type < NV_30)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
258 *burst = 128;
259 *lwm = 0x0480;
260 } else
261 nv30_update_arb(burst, lwm);
262}
263
264static int
265getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
266 struct nouveau_pll_vals *bestpv)
267{
268 /* Find M, N and P for a single stage PLL
269 *
270 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
271 * values, but we're too lazy to use those atm
272 *
273 * "clk" parameter in kHz
274 * returns calculated clock
275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
281 int minU = pll_lim->vco1.min_inputfreq;
282 int maxU = pll_lim->vco1.max_inputfreq;
283 int minP = pll_lim->max_p ? pll_lim->min_p : 0;
284 int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
285 int crystal = pll_lim->refclk;
286 int M, N, thisP, P;
287 int clkP, calcclk;
288 int delta, bestdelta = INT_MAX;
289 int bestclk = 0;
290
291 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
292 /* possibly correlated with introduction of 27MHz crystal */
293 if (dev_priv->card_type < NV_50) {
294 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
295 if (clk > 250000)
296 maxM = 6;
297 if (clk > 340000)
298 maxM = 2;
299 } else if (cv < 0x40) {
300 if (clk > 150000)
301 maxM = 6;
302 if (clk > 200000)
303 maxM = 4;
304 if (clk > 340000)
305 maxM = 2;
306 }
307 }
308
309 P = pll_lim->max_p ? maxP : (1 << maxP);
310 if ((clk * P) < minvco) {
311 minvco = clk * maxP;
312 maxvco = minvco * 2;
313 }
314
315 if (clk + clk/200 > maxvco) /* +0.5% */
316 maxvco = clk + clk/200;
317
318 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
319 for (thisP = minP; thisP <= maxP; thisP++) {
320 P = pll_lim->max_p ? thisP : (1 << thisP);
321 clkP = clk * P;
322
323 if (clkP < minvco)
324 continue;
325 if (clkP > maxvco)
326 return bestclk;
327
328 for (M = minM; M <= maxM; M++) {
329 if (crystal/M < minU)
330 return bestclk;
331 if (crystal/M > maxU)
332 continue;
333
334 /* add crystal/2 to round better */
335 N = (clkP * M + crystal/2) / crystal;
336
337 if (N < minN)
338 continue;
339 if (N > maxN)
340 break;
341
342 /* more rounding additions */
343 calcclk = ((N * crystal + P/2) / P + M/2) / M;
344 delta = abs(calcclk - clk);
345 /* we do an exhaustive search rather than terminating
346 * on an optimality condition...
347 */
348 if (delta < bestdelta) {
349 bestdelta = delta;
350 bestclk = calcclk;
351 bestpv->N1 = N;
352 bestpv->M1 = M;
353 bestpv->log2P = thisP;
354 if (delta == 0) /* except this one */
355 return bestclk;
356 }
357 }
358 }
359
360 return bestclk;
361}
362
363static int
364getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
365 struct nouveau_pll_vals *bestpv)
366{
367 /* Find M, N and P for a two stage PLL
368 *
369 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
370 * values, but we're too lazy to use those atm
371 *
372 * "clk" parameter in kHz
373 * returns calculated clock
374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
380 int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
381 int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
382 int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
383 int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
384 int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
385 int maxlog2P = pll_lim->max_usable_log2p;
386 int crystal = pll_lim->refclk;
387 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
388 int M1, N1, M2, N2, log2P;
389 int clkP, calcclk1, calcclk2, calcclkout;
390 int delta, bestdelta = INT_MAX;
391 int bestclk = 0;
392
393 int vco2 = (maxvco2 - maxvco2/200) / 2;
394 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
395 ;
396 clkP = clk << log2P;
397
398 if (maxvco2 < clk + clk/200) /* +0.5% */
399 maxvco2 = clk + clk/200;
400
401 for (M1 = minM1; M1 <= maxM1; M1++) {
402 if (crystal/M1 < minU1)
403 return bestclk;
404 if (crystal/M1 > maxU1)
405 continue;
406
407 for (N1 = minN1; N1 <= maxN1; N1++) {
408 calcclk1 = crystal * N1 / M1;
409 if (calcclk1 < minvco1)
410 continue;
411 if (calcclk1 > maxvco1)
412 break;
413
414 for (M2 = minM2; M2 <= maxM2; M2++) {
415 if (calcclk1/M2 < minU2)
416 break;
417 if (calcclk1/M2 > maxU2)
418 continue;
419
420 /* add calcclk1/2 to round better */
421 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
422 if (N2 < minN2)
423 continue;
424 if (N2 > maxN2)
425 break;
426
427 if (!fixedgain2) {
428 if (chip_version < 0x60)
429 if (N2/M2 < 4 || N2/M2 > 10)
430 continue;
431
432 calcclk2 = calcclk1 * N2 / M2;
433 if (calcclk2 < minvco2)
434 break;
435 if (calcclk2 > maxvco2)
436 continue;
437 } else
438 calcclk2 = calcclk1;
439
440 calcclkout = calcclk2 >> log2P;
441 delta = abs(calcclkout - clk);
442 /* we do an exhaustive search rather than terminating
443 * on an optimality condition...
444 */
445 if (delta < bestdelta) {
446 bestdelta = delta;
447 bestclk = calcclkout;
448 bestpv->N1 = N1;
449 bestpv->M1 = M1;
450 bestpv->N2 = N2;
451 bestpv->M2 = M2;
452 bestpv->log2P = log2P;
453 if (delta == 0) /* except this one */
454 return bestclk;
455 }
456 }
457 }
458 }
459
460 return bestclk;
461}
462
463int
464nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
465 struct nouveau_pll_vals *pv)
466{
467 int outclk;
468
469 if (!pll_lim->vco2.maxfreq)
470 outclk = getMNP_single(dev, pll_lim, clk, pv);
471 else
472 outclk = getMNP_double(dev, pll_lim, clk, pv);
473
474 if (!outclk)
475 NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
476
477 return outclk;
478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 000000000000..1fc57ef58295
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,447 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30
31static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33{
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
38 int ret;
39
40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
45 } else
46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf,
50 NULL);
51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
52 } else
53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
59 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in
62 * VRAM.
63 */
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
65 drm_get_resource_start(dev, 1),
66 dev_priv->fb_available_size,
67 NV_DMA_ACCESS_RO,
68 NV_DMA_TARGET_PCI, &pushbuf);
69 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
70 }
71
72 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
73 if (ret) {
74 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
75 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
76 nouveau_gpuobj_del(dev, &pushbuf);
77 return ret;
78 }
79
80 return 0;
81}
82
83static struct nouveau_bo *
84nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
85{
86 struct nouveau_bo *pushbuf = NULL;
87 int location, ret;
88
89 if (nouveau_vram_pushbuf)
90 location = TTM_PL_FLAG_VRAM;
91 else
92 location = TTM_PL_FLAG_TT;
93
94 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
95 true, &pushbuf);
96 if (ret) {
97 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
98 return NULL;
99 }
100
101 ret = nouveau_bo_pin(pushbuf, location);
102 if (ret) {
103 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
104 nouveau_bo_ref(NULL, &pushbuf);
105 return NULL;
106 }
107
108 return pushbuf;
109}
110
111/* allocates and initializes a fifo for user space consumption */
112int
113nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
114 struct drm_file *file_priv,
115 uint32_t vram_handle, uint32_t tt_handle)
116{
117 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
119 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
120 struct nouveau_channel *chan;
121 int channel, user;
122 int ret;
123
124 /*
125 * Alright, here is the full story
126 * Nvidia cards have multiple hw fifo contexts (praise them for that,
127 * no complicated crash-prone context switches)
128 * We allocate a new context for each app and let it write to it
129 * directly (woo, full userspace command submission !)
130 * When there are no more contexts, you lost
131 */
132 for (channel = 0; channel < pfifo->channels; channel++) {
133 if (dev_priv->fifos[channel] == NULL)
134 break;
135 }
136
137 /* no more fifos. you lost. */
138 if (channel == pfifo->channels)
139 return -EINVAL;
140
141 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
142 GFP_KERNEL);
143 if (!dev_priv->fifos[channel])
144 return -ENOMEM;
145 chan = dev_priv->fifos[channel];
146 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
147 INIT_LIST_HEAD(&chan->fence.pending);
148 chan->dev = dev;
149 chan->id = channel;
150 chan->file_priv = file_priv;
151 chan->vram_handle = vram_handle;
152 chan->gart_handle = tt_handle;
153
154 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
155
156 /* Allocate DMA push buffer */
157 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
158 if (!chan->pushbuf_bo) {
159 ret = -ENOMEM;
160 NV_ERROR(dev, "pushbuf %d\n", ret);
161 nouveau_channel_free(chan);
162 return ret;
163 }
164
165 nouveau_dma_pre_init(chan);
166
167 /* Locate channel's user control regs */
168 if (dev_priv->card_type < NV_40)
169 user = NV03_USER(channel);
170 else
171 if (dev_priv->card_type < NV_50)
172 user = NV40_USER(channel);
173 else
174 user = NV50_USER(channel);
175
176 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
177 PAGE_SIZE);
178 if (!chan->user) {
179 NV_ERROR(dev, "ioremap of regs failed.\n");
180 nouveau_channel_free(chan);
181 return -ENOMEM;
182 }
183 chan->user_put = 0x40;
184 chan->user_get = 0x44;
185
186 /* Allocate space for per-channel fixed notifier memory */
187 ret = nouveau_notifier_init_channel(chan);
188 if (ret) {
189 NV_ERROR(dev, "ntfy %d\n", ret);
190 nouveau_channel_free(chan);
191 return ret;
192 }
193
194 /* Setup channel's default objects */
195 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
196 if (ret) {
197 NV_ERROR(dev, "gpuobj %d\n", ret);
198 nouveau_channel_free(chan);
199 return ret;
200 }
201
202 /* Create a dma object for the push buffer */
203 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
204 if (ret) {
205 NV_ERROR(dev, "pbctxdma %d\n", ret);
206 nouveau_channel_free(chan);
207 return ret;
208 }
209
210 /* disable the fifo caches */
211 pfifo->reassign(dev, false);
212
213 /* Create a graphics context for new channel */
214 ret = pgraph->create_context(chan);
215 if (ret) {
216 nouveau_channel_free(chan);
217 return ret;
218 }
219
220 /* Construct inital RAMFC for new channel */
221 ret = pfifo->create_context(chan);
222 if (ret) {
223 nouveau_channel_free(chan);
224 return ret;
225 }
226
227 pfifo->reassign(dev, true);
228
229 ret = nouveau_dma_init(chan);
230 if (!ret)
231 ret = nouveau_fence_init(chan);
232 if (ret) {
233 nouveau_channel_free(chan);
234 return ret;
235 }
236
237 nouveau_debugfs_channel_init(chan);
238
239 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
240 *chan_ret = chan;
241 return 0;
242}
243
244/* stops a fifo */
245void
246nouveau_channel_free(struct nouveau_channel *chan)
247{
248 struct drm_device *dev = chan->dev;
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
251 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
252 unsigned long flags;
253 int ret;
254
255 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
256
257 nouveau_debugfs_channel_fini(chan);
258
259 /* Give outstanding push buffers a chance to complete */
260 spin_lock_irqsave(&chan->fence.lock, flags);
261 nouveau_fence_update(chan);
262 spin_unlock_irqrestore(&chan->fence.lock, flags);
263 if (chan->fence.sequence != chan->fence.sequence_ack) {
264 struct nouveau_fence *fence = NULL;
265
266 ret = nouveau_fence_new(chan, &fence, true);
267 if (ret == 0) {
268 ret = nouveau_fence_wait(fence, NULL, false, false);
269 nouveau_fence_unref((void *)&fence);
270 }
271
272 if (ret)
273 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
274 }
275
276 /* Ensure all outstanding fences are signaled. They should be if the
277 * above attempts at idling were OK, but if we failed this'll tell TTM
278 * we're done with the buffers.
279 */
280 nouveau_fence_fini(chan);
281
282 /* This will prevent pfifo from switching channels. */
283 pfifo->reassign(dev, false);
284
285 /* We want to give pgraph a chance to idle and get rid of all potential
286 * errors. We need to do this before the lock, otherwise the irq handler
287 * is unable to process them.
288 */
289 if (pgraph->channel(dev) == chan)
290 nouveau_wait_for_idle(dev);
291
292 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
293
294 pgraph->fifo_access(dev, false);
295 if (pgraph->channel(dev) == chan)
296 pgraph->unload_context(dev);
297 pgraph->destroy_context(chan);
298 pgraph->fifo_access(dev, true);
299
300 if (pfifo->channel_id(dev) == chan->id) {
301 pfifo->disable(dev);
302 pfifo->unload_context(dev);
303 pfifo->enable(dev);
304 }
305 pfifo->destroy_context(chan);
306
307 pfifo->reassign(dev, true);
308
309 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
310
311 /* Release the channel's resources */
312 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
313 if (chan->pushbuf_bo) {
314 nouveau_bo_unpin(chan->pushbuf_bo);
315 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
316 }
317 nouveau_gpuobj_channel_takedown(chan);
318 nouveau_notifier_takedown_channel(chan);
319 if (chan->user)
320 iounmap(chan->user);
321
322 dev_priv->fifos[chan->id] = NULL;
323 kfree(chan);
324}
325
326/* cleans up all the fifos from file_priv */
327void
328nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
329{
330 struct drm_nouveau_private *dev_priv = dev->dev_private;
331 struct nouveau_engine *engine = &dev_priv->engine;
332 int i;
333
334 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
335 for (i = 0; i < engine->fifo.channels; i++) {
336 struct nouveau_channel *chan = dev_priv->fifos[i];
337
338 if (chan && chan->file_priv == file_priv)
339 nouveau_channel_free(chan);
340 }
341}
342
343int
344nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
345 int channel)
346{
347 struct drm_nouveau_private *dev_priv = dev->dev_private;
348 struct nouveau_engine *engine = &dev_priv->engine;
349
350 if (channel >= engine->fifo.channels)
351 return 0;
352 if (dev_priv->fifos[channel] == NULL)
353 return 0;
354
355 return (dev_priv->fifos[channel]->file_priv == file_priv);
356}
357
358/***********************************
359 * ioctls wrapping the functions
360 ***********************************/
361
362static int
363nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
364 struct drm_file *file_priv)
365{
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
367 struct drm_nouveau_channel_alloc *init = data;
368 struct nouveau_channel *chan;
369 int ret;
370
371 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
372
373 if (dev_priv->engine.graph.accel_blocked)
374 return -ENODEV;
375
376 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
377 return -EINVAL;
378
379 ret = nouveau_channel_alloc(dev, &chan, file_priv,
380 init->fb_ctxdma_handle,
381 init->tt_ctxdma_handle);
382 if (ret)
383 return ret;
384 init->channel = chan->id;
385
386 if (chan->dma.ib_max)
387 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
388 NOUVEAU_GEM_DOMAIN_GART;
389 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
390 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
391 else
392 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
393
394 init->subchan[0].handle = NvM2MF;
395 if (dev_priv->card_type < NV_50)
396 init->subchan[0].grclass = 0x0039;
397 else
398 init->subchan[0].grclass = 0x5039;
399 init->subchan[1].handle = NvSw;
400 init->subchan[1].grclass = NV_SW;
401 init->nr_subchan = 2;
402
403 /* Named memory object area */
404 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
405 &init->notifier_handle);
406 if (ret) {
407 nouveau_channel_free(chan);
408 return ret;
409 }
410
411 return 0;
412}
413
414static int
415nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
416 struct drm_file *file_priv)
417{
418 struct drm_nouveau_channel_free *cfree = data;
419 struct nouveau_channel *chan;
420
421 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
422 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
423
424 nouveau_channel_free(chan);
425 return 0;
426}
427
428/***********************************
429 * finally, the ioctl table
430 ***********************************/
431
432struct drm_ioctl_desc nouveau_ioctls[] = {
433 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
434 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
441 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
445};
446
447int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 000000000000..14afe1e47e57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,881 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <acpi/button.h>
28
29#include "drmP.h"
30#include "drm_edid.h"
31#include "drm_crtc_helper.h"
32
33#include "nouveau_reg.h"
34#include "nouveau_drv.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_connector.h"
38#include "nouveau_hw.h"
39
40static inline struct drm_encoder_slave_funcs *
41get_slave_funcs(struct nouveau_encoder *enc)
42{
43 return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
44}
45
46static struct nouveau_encoder *
47find_encoder_by_type(struct drm_connector *connector, int type)
48{
49 struct drm_device *dev = connector->dev;
50 struct nouveau_encoder *nv_encoder;
51 struct drm_mode_object *obj;
52 int i, id;
53
54 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
55 id = connector->encoder_ids[i];
56 if (!id)
57 break;
58
59 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
60 if (!obj)
61 continue;
62 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
63
64 if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
65 return nv_encoder;
66 }
67
68 return NULL;
69}
70
71struct nouveau_connector *
72nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
73{
74 struct drm_device *dev = to_drm_encoder(encoder)->dev;
75 struct drm_connector *drm_connector;
76
77 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
78 if (drm_connector->encoder == to_drm_encoder(encoder))
79 return nouveau_connector(drm_connector);
80 }
81
82 return NULL;
83}
84
85
86static void
87nouveau_connector_destroy(struct drm_connector *drm_connector)
88{
89 struct nouveau_connector *nv_connector =
90 nouveau_connector(drm_connector);
91 struct drm_device *dev;
92
93 if (!nv_connector)
94 return;
95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
99 kfree(nv_connector->edid);
100 drm_sysfs_connector_remove(drm_connector);
101 drm_connector_cleanup(drm_connector);
102 kfree(drm_connector);
103}
104
105static void
106nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
107{
108 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
109
110 if (dev_priv->card_type >= NV_50)
111 return;
112
113 *flags = 0;
114 if (NVLockVgaCrtcs(dev_priv->dev, false))
115 *flags |= 1;
116 if (nv_heads_tied(dev_priv->dev))
117 *flags |= 2;
118
119 if (*flags & 2)
120 NVSetOwner(dev_priv->dev, 0); /* necessary? */
121}
122
123static void
124nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
125{
126 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
127
128 if (dev_priv->card_type >= NV_50)
129 return;
130
131 if (flags & 2)
132 NVSetOwner(dev_priv->dev, 4);
133 if (flags & 1)
134 NVLockVgaCrtcs(dev_priv->dev, true);
135}
136
137static struct nouveau_i2c_chan *
138nouveau_connector_ddc_detect(struct drm_connector *connector,
139 struct nouveau_encoder **pnv_encoder)
140{
141 struct drm_device *dev = connector->dev;
142 uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
143 int ret, flags, i;
144
145 struct i2c_msg msgs[] = {
146 {
147 .addr = 0x50,
148 .flags = 0,
149 .len = 1,
150 .buf = out_buf,
151 },
152 {
153 .addr = 0x50,
154 .flags = I2C_M_RD,
155 .len = 1,
156 .buf = buf,
157 }
158 };
159
160 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
161 struct nouveau_i2c_chan *i2c = NULL;
162 struct nouveau_encoder *nv_encoder;
163 struct drm_mode_object *obj;
164 int id;
165
166 id = connector->encoder_ids[i];
167 if (!id)
168 break;
169
170 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
171 if (!obj)
172 continue;
173 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
174
175 if (nv_encoder->dcb->i2c_index < 0xf)
176 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
177 if (!i2c)
178 continue;
179
180 nouveau_connector_ddc_prepare(connector, &flags);
181 ret = i2c_transfer(&i2c->adapter, msgs, 2);
182 nouveau_connector_ddc_finish(connector, flags);
183
184 if (ret == 2) {
185 *pnv_encoder = nv_encoder;
186 return i2c;
187 }
188 }
189
190 return NULL;
191}
192
193static void
194nouveau_connector_set_encoder(struct drm_connector *connector,
195 struct nouveau_encoder *nv_encoder)
196{
197 struct nouveau_connector *nv_connector = nouveau_connector(connector);
198 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
199 struct drm_device *dev = connector->dev;
200
201 if (nv_connector->detected_encoder == nv_encoder)
202 return;
203 nv_connector->detected_encoder = nv_encoder;
204
205 if (nv_encoder->dcb->type == OUTPUT_LVDS ||
206 nv_encoder->dcb->type == OUTPUT_TMDS) {
207 connector->doublescan_allowed = false;
208 connector->interlace_allowed = false;
209 } else {
210 connector->doublescan_allowed = true;
211 if (dev_priv->card_type == NV_20 ||
212 (dev_priv->card_type == NV_10 &&
213 (dev->pci_device & 0x0ff0) != 0x0100 &&
214 (dev->pci_device & 0x0ff0) != 0x0150))
215 /* HW is broken */
216 connector->interlace_allowed = false;
217 else
218 connector->interlace_allowed = true;
219 }
220
221 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
222 drm_connector_property_set_value(connector,
223 dev->mode_config.dvi_i_subconnector_property,
224 nv_encoder->dcb->type == OUTPUT_TMDS ?
225 DRM_MODE_SUBCONNECTOR_DVID :
226 DRM_MODE_SUBCONNECTOR_DVIA);
227 }
228}
229
230static enum drm_connector_status
231nouveau_connector_detect(struct drm_connector *connector)
232{
233 struct drm_device *dev = connector->dev;
234 struct nouveau_connector *nv_connector = nouveau_connector(connector);
235 struct nouveau_encoder *nv_encoder = NULL;
236 struct nouveau_i2c_chan *i2c;
237 int type, flags;
238
239 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected;
243
244#ifdef CONFIG_ACPI
245 if (!nouveau_ignorelid && !acpi_lid_open())
246 status = connector_status_unknown;
247#endif
248 nouveau_connector_set_encoder(connector, nv_encoder);
249 return status;
250 }
251
252 /* Cleanup the previous EDID block. */
253 if (nv_connector->edid) {
254 drm_mode_connector_update_edid_property(connector, NULL);
255 kfree(nv_connector->edid);
256 nv_connector->edid = NULL;
257 }
258
259 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
260 if (i2c) {
261 nouveau_connector_ddc_prepare(connector, &flags);
262 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
263 nouveau_connector_ddc_finish(connector, flags);
264 drm_mode_connector_update_edid_property(connector,
265 nv_connector->edid);
266 if (!nv_connector->edid) {
267 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
268 drm_get_connector_name(connector));
269 goto detect_analog;
270 }
271
272 if (nv_encoder->dcb->type == OUTPUT_DP &&
273 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
274 NV_ERROR(dev, "Detected %s, but failed init\n",
275 drm_get_connector_name(connector));
276 return connector_status_disconnected;
277 }
278
279 /* Override encoder type for DVI-I based on whether EDID
280 * says the display is digital or analog, both use the
281 * same i2c channel so the value returned from ddc_detect
282 * isn't necessarily correct.
283 */
284 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
285 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
286 type = OUTPUT_TMDS;
287 else
288 type = OUTPUT_ANALOG;
289
290 nv_encoder = find_encoder_by_type(connector, type);
291 if (!nv_encoder) {
292 NV_ERROR(dev, "Detected %d encoder on %s, "
293 "but no object!\n", type,
294 drm_get_connector_name(connector));
295 return connector_status_disconnected;
296 }
297 }
298
299 nouveau_connector_set_encoder(connector, nv_encoder);
300 return connector_status_connected;
301 }
302
303detect_analog:
304 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
305 if (!nv_encoder && !nouveau_tv_disable)
306 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
307 if (nv_encoder) {
308 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
309 struct drm_encoder_helper_funcs *helper =
310 encoder->helper_private;
311
312 if (helper->detect(encoder, connector) ==
313 connector_status_connected) {
314 nouveau_connector_set_encoder(connector, nv_encoder);
315 return connector_status_connected;
316 }
317
318 }
319
320 return connector_status_disconnected;
321}
322
323static void
324nouveau_connector_force(struct drm_connector *connector)
325{
326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
327 struct nouveau_encoder *nv_encoder;
328 int type;
329
330 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
331 if (connector->force == DRM_FORCE_ON_DIGITAL)
332 type = OUTPUT_TMDS;
333 else
334 type = OUTPUT_ANALOG;
335 } else
336 type = OUTPUT_ANY;
337
338 nv_encoder = find_encoder_by_type(connector, type);
339 if (!nv_encoder) {
340 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
341 drm_get_connector_name(connector));
342 connector->status = connector_status_disconnected;
343 return;
344 }
345
346 nouveau_connector_set_encoder(connector, nv_encoder);
347}
348
349static int
350nouveau_connector_set_property(struct drm_connector *connector,
351 struct drm_property *property, uint64_t value)
352{
353 struct nouveau_connector *nv_connector = nouveau_connector(connector);
354 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
355 struct drm_device *dev = connector->dev;
356 int ret;
357
358 /* Scaling mode */
359 if (property == dev->mode_config.scaling_mode_property) {
360 struct nouveau_crtc *nv_crtc = NULL;
361 bool modeset = false;
362
363 switch (value) {
364 case DRM_MODE_SCALE_NONE:
365 case DRM_MODE_SCALE_FULLSCREEN:
366 case DRM_MODE_SCALE_CENTER:
367 case DRM_MODE_SCALE_ASPECT:
368 break;
369 default:
370 return -EINVAL;
371 }
372
373 /* LVDS always needs gpu scaling */
374 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
375 value == DRM_MODE_SCALE_NONE)
376 return -EINVAL;
377
378 /* Changing between GPU and panel scaling requires a full
379 * modeset
380 */
381 if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
382 (value == DRM_MODE_SCALE_NONE))
383 modeset = true;
384 nv_connector->scaling_mode = value;
385
386 if (connector->encoder && connector->encoder->crtc)
387 nv_crtc = nouveau_crtc(connector->encoder->crtc);
388 if (!nv_crtc)
389 return 0;
390
391 if (modeset || !nv_crtc->set_scale) {
392 ret = drm_crtc_helper_set_mode(&nv_crtc->base,
393 &nv_crtc->base.mode,
394 nv_crtc->base.x,
395 nv_crtc->base.y, NULL);
396 if (!ret)
397 return -EINVAL;
398 } else {
399 ret = nv_crtc->set_scale(nv_crtc, value, true);
400 if (ret)
401 return ret;
402 }
403
404 return 0;
405 }
406
407 /* Dithering */
408 if (property == dev->mode_config.dithering_mode_property) {
409 struct nouveau_crtc *nv_crtc = NULL;
410
411 if (value == DRM_MODE_DITHERING_ON)
412 nv_connector->use_dithering = true;
413 else
414 nv_connector->use_dithering = false;
415
416 if (connector->encoder && connector->encoder->crtc)
417 nv_crtc = nouveau_crtc(connector->encoder->crtc);
418
419 if (!nv_crtc || !nv_crtc->set_dither)
420 return 0;
421
422 return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
423 true);
424 }
425
426 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
427 return get_slave_funcs(nv_encoder)->
428 set_property(to_drm_encoder(nv_encoder), connector, property, value);
429
430 return -EINVAL;
431}
432
433static struct drm_display_mode *
434nouveau_connector_native_mode(struct nouveau_connector *connector)
435{
436 struct drm_device *dev = connector->base.dev;
437 struct drm_display_mode *mode, *largest = NULL;
438 int high_w = 0, high_h = 0, high_v = 0;
439
440 /* Use preferred mode if there is one.. */
441 list_for_each_entry(mode, &connector->base.probed_modes, head) {
442 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
443 NV_DEBUG_KMS(dev, "native mode from preferred\n");
444 return drm_mode_duplicate(dev, mode);
445 }
446 }
447
448 /* Otherwise, take the resolution with the largest width, then height,
449 * then vertical refresh
450 */
451 list_for_each_entry(mode, &connector->base.probed_modes, head) {
452 if (mode->hdisplay < high_w)
453 continue;
454
455 if (mode->hdisplay == high_w && mode->vdisplay < high_h)
456 continue;
457
458 if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
459 mode->vrefresh < high_v)
460 continue;
461
462 high_w = mode->hdisplay;
463 high_h = mode->vdisplay;
464 high_v = mode->vrefresh;
465 largest = mode;
466 }
467
468 NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
469 high_w, high_h, high_v);
470 return largest ? drm_mode_duplicate(dev, largest) : NULL;
471}
472
473struct moderec {
474 int hdisplay;
475 int vdisplay;
476};
477
478static struct moderec scaler_modes[] = {
479 { 1920, 1200 },
480 { 1920, 1080 },
481 { 1680, 1050 },
482 { 1600, 1200 },
483 { 1400, 1050 },
484 { 1280, 1024 },
485 { 1280, 960 },
486 { 1152, 864 },
487 { 1024, 768 },
488 { 800, 600 },
489 { 720, 400 },
490 { 640, 480 },
491 { 640, 400 },
492 { 640, 350 },
493 {}
494};
495
496static int
497nouveau_connector_scaler_modes_add(struct drm_connector *connector)
498{
499 struct nouveau_connector *nv_connector = nouveau_connector(connector);
500 struct drm_display_mode *native = nv_connector->native_mode, *m;
501 struct drm_device *dev = connector->dev;
502 struct moderec *mode = &scaler_modes[0];
503 int modes = 0;
504
505 if (!native)
506 return 0;
507
508 while (mode->hdisplay) {
509 if (mode->hdisplay <= native->hdisplay &&
510 mode->vdisplay <= native->vdisplay) {
511 m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
512 drm_mode_vrefresh(native), false,
513 false, false);
514 if (!m)
515 continue;
516
517 m->type |= DRM_MODE_TYPE_DRIVER;
518
519 drm_mode_probed_add(connector, m);
520 modes++;
521 }
522
523 mode++;
524 }
525
526 return modes;
527}
528
529static int
530nouveau_connector_get_modes(struct drm_connector *connector)
531{
532 struct drm_device *dev = connector->dev;
533 struct nouveau_connector *nv_connector = nouveau_connector(connector);
534 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
535 int ret = 0;
536
537 /* If we're not LVDS, destroy the previous native mode, the attached
538 * monitor could have changed.
539 */
540 if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
541 nv_connector->native_mode) {
542 drm_mode_destroy(dev, nv_connector->native_mode);
543 nv_connector->native_mode = NULL;
544 }
545
546 if (nv_connector->edid)
547 ret = drm_add_edid_modes(connector, nv_connector->edid);
548
549 /* Find the native mode if this is a digital panel, if we didn't
550 * find any modes through DDC previously add the native mode to
551 * the list of modes.
552 */
553 if (!nv_connector->native_mode)
554 nv_connector->native_mode =
555 nouveau_connector_native_mode(nv_connector);
556 if (ret == 0 && nv_connector->native_mode) {
557 struct drm_display_mode *mode;
558
559 mode = drm_mode_duplicate(dev, nv_connector->native_mode);
560 drm_mode_probed_add(connector, mode);
561 ret = 1;
562 }
563
564 if (nv_encoder->dcb->type == OUTPUT_TV)
565 ret = get_slave_funcs(nv_encoder)->
566 get_modes(to_drm_encoder(nv_encoder), connector);
567
568 if (nv_encoder->dcb->type == OUTPUT_LVDS)
569 ret += nouveau_connector_scaler_modes_add(connector);
570
571 return ret;
572}
573
574static int
575nouveau_connector_mode_valid(struct drm_connector *connector,
576 struct drm_display_mode *mode)
577{
578 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
579 struct nouveau_connector *nv_connector = nouveau_connector(connector);
580 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
581 unsigned min_clock = 25000, max_clock = min_clock;
582 unsigned clock = mode->clock;
583
584 switch (nv_encoder->dcb->type) {
585 case OUTPUT_LVDS:
586 BUG_ON(!nv_connector->native_mode);
587 if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
588 mode->vdisplay > nv_connector->native_mode->vdisplay)
589 return MODE_PANEL;
590
591 min_clock = 0;
592 max_clock = 400000;
593 break;
594 case OUTPUT_TMDS:
595 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
596 (dev_priv->card_type < NV_50 &&
597 !nv_encoder->dcb->duallink_possible))
598 max_clock = 165000;
599 else
600 max_clock = 330000;
601 break;
602 case OUTPUT_ANALOG:
603 max_clock = nv_encoder->dcb->crtconf.maxfreq;
604 if (!max_clock)
605 max_clock = 350000;
606 break;
607 case OUTPUT_TV:
608 return get_slave_funcs(nv_encoder)->
609 mode_valid(to_drm_encoder(nv_encoder), mode);
610 case OUTPUT_DP:
611 if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
612 max_clock = nv_encoder->dp.link_nr * 270000;
613 else
614 max_clock = nv_encoder->dp.link_nr * 162000;
615
616 clock *= 3;
617 break;
618 default:
619 BUG_ON(1);
620 return MODE_BAD;
621 }
622
623 if (clock < min_clock)
624 return MODE_CLOCK_LOW;
625
626 if (clock > max_clock)
627 return MODE_CLOCK_HIGH;
628
629 return MODE_OK;
630}
631
632static struct drm_encoder *
633nouveau_connector_best_encoder(struct drm_connector *connector)
634{
635 struct nouveau_connector *nv_connector = nouveau_connector(connector);
636
637 if (nv_connector->detected_encoder)
638 return to_drm_encoder(nv_connector->detected_encoder);
639
640 return NULL;
641}
642
643static const struct drm_connector_helper_funcs
644nouveau_connector_helper_funcs = {
645 .get_modes = nouveau_connector_get_modes,
646 .mode_valid = nouveau_connector_mode_valid,
647 .best_encoder = nouveau_connector_best_encoder,
648};
649
650static const struct drm_connector_funcs
651nouveau_connector_funcs = {
652 .dpms = drm_helper_connector_dpms,
653 .save = NULL,
654 .restore = NULL,
655 .detect = nouveau_connector_detect,
656 .destroy = nouveau_connector_destroy,
657 .fill_modes = drm_helper_probe_single_connector_modes,
658 .set_property = nouveau_connector_set_property,
659 .force = nouveau_connector_force
660};
661
662static int
663nouveau_connector_create_lvds(struct drm_device *dev,
664 struct drm_connector *connector)
665{
666 struct nouveau_connector *nv_connector = nouveau_connector(connector);
667 struct drm_nouveau_private *dev_priv = dev->dev_private;
668 struct nouveau_i2c_chan *i2c = NULL;
669 struct nouveau_encoder *nv_encoder;
670 struct drm_display_mode native, *mode, *temp;
671 bool dummy, if_is_24bit = false;
672 int ret, flags;
673
674 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
675 if (!nv_encoder)
676 return -ENODEV;
677
678 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
679 if (ret) {
680 NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
681 return ret;
682 }
683 nv_connector->use_dithering = !if_is_24bit;
684
685 /* Firstly try getting EDID over DDC, if allowed and I2C channel
686 * is available.
687 */
688 if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
689 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
690
691 if (i2c) {
692 nouveau_connector_ddc_prepare(connector, &flags);
693 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
694 nouveau_connector_ddc_finish(connector, flags);
695 }
696
697 /* If no EDID found above, and the VBIOS indicates a hardcoded
698 * modeline is avalilable for the panel, set it as the panel's
699 * native mode and exit.
700 */
701 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
702 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
703 dev_priv->vbios.fp_no_ddc)) {
704 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
705 goto out;
706 }
707
708 /* Still nothing, some VBIOS images have a hardcoded EDID block
709 * stored for the panel stored in them.
710 */
711 if (!nv_connector->edid && !nv_connector->native_mode &&
712 !dev_priv->vbios.fp_no_ddc) {
713 struct edid *edid =
714 (struct edid *)nouveau_bios_embedded_edid(dev);
715 if (edid) {
716 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
717 *(nv_connector->edid) = *edid;
718 }
719 }
720
721 if (!nv_connector->edid)
722 goto out;
723
724 /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
725 * block and look for the preferred mode there.
726 */
727 ret = drm_add_edid_modes(connector, nv_connector->edid);
728 if (ret == 0)
729 goto out;
730 nv_connector->detected_encoder = nv_encoder;
731 nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
732 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
733 drm_mode_remove(connector, mode);
734
735out:
736 if (!nv_connector->native_mode) {
737 NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
738 "determine its native mode. Disabling.\n");
739 return -ENODEV;
740 }
741
742 drm_mode_connector_update_edid_property(connector, nv_connector->edid);
743 return 0;
744}
745
746int
747nouveau_connector_create(struct drm_device *dev,
748 struct dcb_connector_table_entry *dcb)
749{
750 struct drm_nouveau_private *dev_priv = dev->dev_private;
751 struct nouveau_connector *nv_connector = NULL;
752 struct drm_connector *connector;
753 struct drm_encoder *encoder;
754 int ret, type;
755
756 NV_DEBUG_KMS(dev, "\n");
757
758 switch (dcb->type) {
759 case DCB_CONNECTOR_NONE:
760 return 0;
761 case DCB_CONNECTOR_VGA:
762 NV_INFO(dev, "Detected a VGA connector\n");
763 type = DRM_MODE_CONNECTOR_VGA;
764 break;
765 case DCB_CONNECTOR_TV_0:
766 case DCB_CONNECTOR_TV_1:
767 case DCB_CONNECTOR_TV_3:
768 NV_INFO(dev, "Detected a TV connector\n");
769 type = DRM_MODE_CONNECTOR_TV;
770 break;
771 case DCB_CONNECTOR_DVI_I:
772 NV_INFO(dev, "Detected a DVI-I connector\n");
773 type = DRM_MODE_CONNECTOR_DVII;
774 break;
775 case DCB_CONNECTOR_DVI_D:
776 NV_INFO(dev, "Detected a DVI-D connector\n");
777 type = DRM_MODE_CONNECTOR_DVID;
778 break;
779 case DCB_CONNECTOR_HDMI_0:
780 case DCB_CONNECTOR_HDMI_1:
781 NV_INFO(dev, "Detected a HDMI connector\n");
782 type = DRM_MODE_CONNECTOR_HDMIA;
783 break;
784 case DCB_CONNECTOR_LVDS:
785 NV_INFO(dev, "Detected a LVDS connector\n");
786 type = DRM_MODE_CONNECTOR_LVDS;
787 break;
788 case DCB_CONNECTOR_DP:
789 NV_INFO(dev, "Detected a DisplayPort connector\n");
790 type = DRM_MODE_CONNECTOR_DisplayPort;
791 break;
792 case DCB_CONNECTOR_eDP:
793 NV_INFO(dev, "Detected an eDP connector\n");
794 type = DRM_MODE_CONNECTOR_eDP;
795 break;
796 default:
797 NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
798 return -EINVAL;
799 }
800
801 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
802 if (!nv_connector)
803 return -ENOMEM;
804 nv_connector->dcb = dcb;
805 connector = &nv_connector->base;
806
807 /* defaults, will get overridden in detect() */
808 connector->interlace_allowed = false;
809 connector->doublescan_allowed = false;
810
811 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
812 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
813
814 /* attach encoders */
815 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
816 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
817
818 if (nv_encoder->dcb->connector != dcb->index)
819 continue;
820
821 if (get_slave_funcs(nv_encoder))
822 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
823
824 drm_mode_connector_attach_encoder(connector, encoder);
825 }
826
827 if (!connector->encoder_ids[0]) {
828 NV_WARN(dev, " no encoders, ignoring\n");
829 drm_connector_cleanup(connector);
830 kfree(connector);
831 return 0;
832 }
833
834 /* Init DVI-I specific properties */
835 if (dcb->type == DCB_CONNECTOR_DVI_I) {
836 drm_mode_create_dvi_i_properties(dev);
837 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
838 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
839 }
840
841 if (dcb->type != DCB_CONNECTOR_LVDS)
842 nv_connector->use_dithering = false;
843
844 switch (dcb->type) {
845 case DCB_CONNECTOR_VGA:
846 if (dev_priv->card_type >= NV_50) {
847 drm_connector_attach_property(connector,
848 dev->mode_config.scaling_mode_property,
849 nv_connector->scaling_mode);
850 }
851 /* fall-through */
852 case DCB_CONNECTOR_TV_0:
853 case DCB_CONNECTOR_TV_1:
854 case DCB_CONNECTOR_TV_3:
855 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
856 break;
857 default:
858 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
859
860 drm_connector_attach_property(connector,
861 dev->mode_config.scaling_mode_property,
862 nv_connector->scaling_mode);
863 drm_connector_attach_property(connector,
864 dev->mode_config.dithering_mode_property,
865 nv_connector->use_dithering ?
866 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
867 break;
868 }
869
870 drm_sysfs_connector_add(connector);
871
872 if (dcb->type == DCB_CONNECTOR_LVDS) {
873 ret = nouveau_connector_create_lvds(dev, connector);
874 if (ret) {
875 connector->funcs->destroy(connector);
876 return ret;
877 }
878 }
879
880 return 0;
881}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 000000000000..4ef38abc2d9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CONNECTOR_H__
28#define __NOUVEAU_CONNECTOR_H__
29
30#include "drm_edid.h"
31#include "nouveau_i2c.h"
32
33struct nouveau_connector {
34 struct drm_connector base;
35
36 struct dcb_connector_table_entry *dcb;
37
38 int scaling_mode;
39 bool use_dithering;
40
41 struct nouveau_encoder *detected_encoder;
42 struct edid *edid;
43 struct drm_display_mode *native_mode;
44};
45
46static inline struct nouveau_connector *nouveau_connector(
47 struct drm_connector *con)
48{
49 return container_of(con, struct nouveau_connector, base);
50}
51
52int nouveau_connector_create(struct drm_device *,
53 struct dcb_connector_table_entry *);
54
55#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 000000000000..49fa7b2d257e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CRTC_H__
28#define __NOUVEAU_CRTC_H__
29
30struct nouveau_crtc {
31 struct drm_crtc base;
32
33 int index;
34
35 struct drm_display_mode *mode;
36
37 uint32_t dpms_saved_fp_control;
38 uint32_t fp_users;
39 int saturation;
40 int sharpness;
41 int last_dpms;
42
43 struct {
44 int cpp;
45 bool blanked;
46 uint32_t offset;
47 uint32_t tile_flags;
48 } fb;
49
50 struct {
51 struct nouveau_bo *nvbo;
52 bool visible;
53 uint32_t offset;
54 void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
55 void (*set_pos)(struct nouveau_crtc *, int x, int y);
56 void (*hide)(struct nouveau_crtc *, bool update);
57 void (*show)(struct nouveau_crtc *, bool update);
58 } cursor;
59
60 struct {
61 struct nouveau_bo *nvbo;
62 uint16_t r[256];
63 uint16_t g[256];
64 uint16_t b[256];
65 int depth;
66 } lut;
67
68 int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
69 int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
70};
71
72static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
73{
74 return container_of(crtc, struct nouveau_crtc, base);
75}
76
77static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
78{
79 return &crtc->base;
80}
81
82int nv50_crtc_create(struct drm_device *dev, int index);
83int nv50_cursor_init(struct nouveau_crtc *);
84void nv50_cursor_fini(struct nouveau_crtc *);
85int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
86 uint32_t buffer_handle, uint32_t width,
87 uint32_t height);
88int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
89
90int nv04_cursor_init(struct nouveau_crtc *);
91
92struct nouveau_connector *
93nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
94
95#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..a251886a0ce6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "nouveau_drv.h"
35
36static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{
39 struct drm_info_node *node = (struct drm_info_node *) m->private;
40 struct nouveau_channel *chan = node->info_ent->data;
41
42 seq_printf(m, "channel id : %d\n", chan->id);
43
44 seq_printf(m, "cpu fifo state:\n");
45 seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
46 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50 if (chan->dma.ib_max) {
51 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
52 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
53 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
54 }
55
56 seq_printf(m, "gpu fifo state:\n");
57 seq_printf(m, " get: 0x%08x\n",
58 nvchan_rd32(chan, chan->user_get));
59 seq_printf(m, " put: 0x%08x\n",
60 nvchan_rd32(chan, chan->user_put));
61 if (chan->dma.ib_max) {
62 seq_printf(m, " ib get: 0x%08x\n",
63 nvchan_rd32(chan, 0x88));
64 seq_printf(m, " ib put: 0x%08x\n",
65 nvchan_rd32(chan, 0x8c));
66 }
67
68 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
69 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
70 return 0;
71}
72
73int
74nouveau_debugfs_channel_init(struct nouveau_channel *chan)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct drm_minor *minor = chan->dev->primary;
78 int ret;
79
80 if (!dev_priv->debugfs.channel_root) {
81 dev_priv->debugfs.channel_root =
82 debugfs_create_dir("channel", minor->debugfs_root);
83 if (!dev_priv->debugfs.channel_root)
84 return -ENOENT;
85 }
86
87 snprintf(chan->debugfs.name, 32, "%d", chan->id);
88 chan->debugfs.info.name = chan->debugfs.name;
89 chan->debugfs.info.show = nouveau_debugfs_channel_info;
90 chan->debugfs.info.driver_features = 0;
91 chan->debugfs.info.data = chan;
92
93 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
94 dev_priv->debugfs.channel_root,
95 chan->dev->primary);
96 if (ret == 0)
97 chan->debugfs.active = true;
98 return ret;
99}
100
101void
102nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
103{
104 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
105
106 if (!chan->debugfs.active)
107 return;
108
109 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
110 chan->debugfs.active = false;
111
112 if (chan == dev_priv->channel) {
113 debugfs_remove(dev_priv->debugfs.channel_root);
114 dev_priv->debugfs.channel_root = NULL;
115 }
116}
117
118static int
119nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
120{
121 struct drm_info_node *node = (struct drm_info_node *) m->private;
122 struct drm_minor *minor = node->minor;
123 struct drm_device *dev = minor->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 uint32_t ppci_0;
126
127 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
128
129 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
130 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
131 ppci_0 & 0xffff, ppci_0 >> 16);
132 return 0;
133}
134
135static int
136nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor;
140 struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
141
142 seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
143 return 0;
144}
145
146static int
147nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
148{
149 struct drm_info_node *node = (struct drm_info_node *) m->private;
150 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
151 int i;
152
153 for (i = 0; i < dev_priv->vbios.length; i++)
154 seq_printf(m, "%c", dev_priv->vbios.data[i]);
155 return 0;
156}
157
158static struct drm_info_list nouveau_debugfs_list[] = {
159 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
160 { "memory", nouveau_debugfs_memory_info, 0, NULL },
161 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
162};
163#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
164
165int
166nouveau_debugfs_init(struct drm_minor *minor)
167{
168 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
169 minor->debugfs_root, minor);
170 return 0;
171}
172
173void
174nouveau_debugfs_takedown(struct drm_minor *minor)
175{
176 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
177 minor);
178}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 000000000000..cf1c5c0a0abe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h"
32
33static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
35{
36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
37 struct drm_device *dev = drm_fb->dev;
38
39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb);
41
42 if (fb->nvbo)
43 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
44
45 drm_framebuffer_cleanup(drm_fb);
46 kfree(fb);
47}
48
49static int
50nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
51 struct drm_file *file_priv,
52 unsigned int *handle)
53{
54 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
55
56 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
57}
58
59static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
60 .destroy = nouveau_user_framebuffer_destroy,
61 .create_handle = nouveau_user_framebuffer_create_handle,
62};
63
64struct drm_framebuffer *
65nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
66 struct drm_mode_fb_cmd *mode_cmd)
67{
68 struct nouveau_framebuffer *fb;
69 int ret;
70
71 fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
72 if (!fb)
73 return NULL;
74
75 ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
76 if (ret) {
77 kfree(fb);
78 return NULL;
79 }
80
81 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
82
83 fb->nvbo = nvbo;
84 return &fb->base;
85}
86
87static struct drm_framebuffer *
88nouveau_user_framebuffer_create(struct drm_device *dev,
89 struct drm_file *file_priv,
90 struct drm_mode_fb_cmd *mode_cmd)
91{
92 struct drm_framebuffer *fb;
93 struct drm_gem_object *gem;
94
95 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
96 if (!gem)
97 return NULL;
98
99 fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
100 if (!fb) {
101 drm_gem_object_unreference(gem);
102 return NULL;
103 }
104
105 return fb;
106}
107
108const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
109 .fb_create = nouveau_user_framebuffer_create,
110 .fb_changed = nouveau_fbcon_probe,
111};
112
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 000000000000..65c441a1999f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,353 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31
32void
33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{
35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type == NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47 } else {
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
51 chan->dma.put = 0;
52 chan->dma.cur = chan->dma.put;
53 chan->dma.free = chan->dma.max - chan->dma.cur;
54}
55
56int
57nouveau_dma_init(struct nouveau_channel *chan)
58{
59 struct drm_device *dev = chan->dev;
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpuobj *m2mf = NULL;
62 struct nouveau_gpuobj *nvsw = NULL;
63 int ret, i;
64
65 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
66 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
67 0x0039 : 0x5039, &m2mf);
68 if (ret)
69 return ret;
70
71 ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
72 if (ret)
73 return ret;
74
75 /* Create an NV_SW object for various sync purposes */
76 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
77 if (ret)
78 return ret;
79
80 ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
81 if (ret)
82 return ret;
83
84 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
85 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
86 if (ret)
87 return ret;
88
89 /* Map push buffer */
90 ret = nouveau_bo_map(chan->pushbuf_bo);
91 if (ret)
92 return ret;
93
94 /* Map M2MF notifier object - fbcon. */
95 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
96 ret = nouveau_bo_map(chan->notifier_bo);
97 if (ret)
98 return ret;
99 }
100
101 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
102 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
103 if (ret)
104 return ret;
105
106 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
107 OUT_RING(chan, 0);
108
109 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
110 ret = RING_SPACE(chan, 4);
111 if (ret)
112 return ret;
113 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
114 OUT_RING(chan, NvM2MF);
115 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
116 OUT_RING(chan, NvNotify0);
117
118 /* Initialise NV_SW */
119 ret = RING_SPACE(chan, 2);
120 if (ret)
121 return ret;
122 BEGIN_RING(chan, NvSubSw, 0, 1);
123 OUT_RING(chan, NvSw);
124
125 /* Sit back and pray the channel works.. */
126 FIRE_RING(chan);
127
128 return 0;
129}
130
131void
132OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
133{
134 bool is_iomem;
135 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
136 mem = &mem[chan->dma.cur];
137 if (is_iomem)
138 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
139 else
140 memcpy(mem, data, nr_dwords * 4);
141 chan->dma.cur += nr_dwords;
142}
143
144/* Fetch and adjust GPU GET pointer
145 *
146 * Returns:
147 * value >= 0, the adjusted GET pointer
148 * -EINVAL if GET pointer currently outside main push buffer
149 * -EBUSY if timeout exceeded
150 */
151static inline int
152READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
153{
154 uint32_t val;
155
156 val = nvchan_rd32(chan, chan->user_get);
157
158 /* reset counter as long as GET is still advancing, this is
159 * to avoid misdetecting a GPU lockup if the GPU happens to
160 * just be processing an operation that takes a long time
161 */
162 if (val != *prev_get) {
163 *prev_get = val;
164 *timeout = 0;
165 }
166
167 if ((++*timeout & 0xff) == 0) {
168 DRM_UDELAY(1);
169 if (*timeout > 100000)
170 return -EBUSY;
171 }
172
173 if (val < chan->pushbuf_base ||
174 val > chan->pushbuf_base + (chan->dma.max << 2))
175 return -EINVAL;
176
177 return (val - chan->pushbuf_base) >> 2;
178}
179
180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int length)
183{
184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = bo->bo.offset + delta;
186 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
187
188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193
194 DRM_MEMORYBARRIER();
195 /* Flush writes. */
196 nouveau_bo_rd32(pb, 0);
197
198 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
199 chan->dma.ib_free--;
200}
201
202static int
203nv50_dma_push_wait(struct nouveau_channel *chan, int count)
204{
205 uint32_t cnt = 0, prev_get = 0;
206
207 while (chan->dma.ib_free < count) {
208 uint32_t get = nvchan_rd32(chan, 0x88);
209 if (get != prev_get) {
210 prev_get = get;
211 cnt = 0;
212 }
213
214 if ((++cnt & 0xff) == 0) {
215 DRM_UDELAY(1);
216 if (cnt > 100000)
217 return -EBUSY;
218 }
219
220 chan->dma.ib_free = get - chan->dma.ib_put;
221 if (chan->dma.ib_free <= 0)
222 chan->dma.ib_free += chan->dma.ib_max + 1;
223 }
224
225 return 0;
226}
227
228static int
229nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
230{
231 uint32_t cnt = 0, prev_get = 0;
232 int ret;
233
234 ret = nv50_dma_push_wait(chan, slots + 1);
235 if (unlikely(ret))
236 return ret;
237
238 while (chan->dma.free < count) {
239 int get = READ_GET(chan, &prev_get, &cnt);
240 if (unlikely(get < 0)) {
241 if (get == -EINVAL)
242 continue;
243
244 return get;
245 }
246
247 if (get <= chan->dma.cur) {
248 chan->dma.free = chan->dma.max - chan->dma.cur;
249 if (chan->dma.free >= count)
250 break;
251
252 FIRE_RING(chan);
253 do {
254 get = READ_GET(chan, &prev_get, &cnt);
255 if (unlikely(get < 0)) {
256 if (get == -EINVAL)
257 continue;
258 return get;
259 }
260 } while (get == 0);
261 chan->dma.cur = 0;
262 chan->dma.put = 0;
263 }
264
265 chan->dma.free = get - chan->dma.cur - 1;
266 }
267
268 return 0;
269}
270
271int
272nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
273{
274 uint32_t prev_get = 0, cnt = 0;
275 int get;
276
277 if (chan->dma.ib_max)
278 return nv50_dma_wait(chan, slots, size);
279
280 while (chan->dma.free < size) {
281 get = READ_GET(chan, &prev_get, &cnt);
282 if (unlikely(get == -EBUSY))
283 return -EBUSY;
284
285 /* loop until we have a usable GET pointer. the value
286 * we read from the GPU may be outside the main ring if
287 * PFIFO is processing a buffer called from the main ring,
288 * discard these values until something sensible is seen.
289 *
290 * the other case we discard GET is while the GPU is fetching
291 * from the SKIPS area, so the code below doesn't have to deal
292 * with some fun corner cases.
293 */
294 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
295 continue;
296
297 if (get <= chan->dma.cur) {
298 /* engine is fetching behind us, or is completely
299 * idle (GET == PUT) so we have free space up until
300 * the end of the push buffer
301 *
302 * we can only hit that path once per call due to
303 * looping back to the beginning of the push buffer,
304 * we'll hit the fetching-ahead-of-us path from that
305 * point on.
306 *
307 * the *one* exception to that rule is if we read
308 * GET==PUT, in which case the below conditional will
309 * always succeed and break us out of the wait loop.
310 */
311 chan->dma.free = chan->dma.max - chan->dma.cur;
312 if (chan->dma.free >= size)
313 break;
314
315 /* not enough space left at the end of the push buffer,
316 * instruct the GPU to jump back to the start right
317 * after processing the currently pending commands.
318 */
319 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
320
321 /* wait for GET to depart from the skips area.
322 * prevents writing GET==PUT and causing a race
323 * condition that causes us to think the GPU is
324 * idle when it's not.
325 */
326 do {
327 get = READ_GET(chan, &prev_get, &cnt);
328 if (unlikely(get == -EBUSY))
329 return -EBUSY;
330 if (unlikely(get == -EINVAL))
331 continue;
332 } while (get <= NOUVEAU_DMA_SKIPS);
333 WRITE_PUT(NOUVEAU_DMA_SKIPS);
334
335 /* we're now submitting commands at the start of
336 * the push buffer.
337 */
338 chan->dma.cur =
339 chan->dma.put = NOUVEAU_DMA_SKIPS;
340 }
341
342 /* engine fetching ahead of us, we have space up until the
343 * current GET pointer. the "- 1" is to ensure there's
344 * space left to emit a jump back to the beginning of the
345 * push buffer if we require it. we can never get GET == PUT
346 * here, so this is safe.
347 */
348 chan->dma.free = get - chan->dma.cur - 1;
349 }
350
351 return 0;
352}
353
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 000000000000..8b05c15866d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,166 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__
29
30#ifndef NOUVEAU_DMA_DEBUG
31#define NOUVEAU_DMA_DEBUG 0
32#endif
33
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length);
36
37/*
38 * There's a hw race condition where you can't jump to your PUT offset,
39 * to avoid this we jump to offset + SKIPS and fill the difference with
40 * NOPs.
41 *
42 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
43 * a SKIPS value of 8. Lets assume that the race condition is to do
44 * with writing into the fetch area, we configure a fetch size of 128
45 * bytes so we need a larger SKIPS value.
46 */
47#define NOUVEAU_DMA_SKIPS (128 / 4)
48
49/* Hardcoded object assignments to subchannels (subchannel id). */
50enum {
51 NvSubM2MF = 0,
52 NvSubSw = 1,
53 NvSub2D = 2,
54 NvSubCtxSurf2D = 2,
55 NvSubGdiRect = 3,
56 NvSubImageBlit = 4
57};
58
59/* Object handles. */
60enum {
61 NvM2MF = 0x80000001,
62 NvDmaFB = 0x80000002,
63 NvDmaTT = 0x80000003,
64 NvDmaVRAM = 0x80000004,
65 NvDmaGART = 0x80000005,
66 NvNotify0 = 0x80000006,
67 Nv2D = 0x80000007,
68 NvCtxSurf2D = 0x80000008,
69 NvRop = 0x80000009,
70 NvImagePatt = 0x8000000a,
71 NvClipRect = 0x8000000b,
72 NvGdiRect = 0x8000000c,
73 NvImageBlit = 0x8000000d,
74 NvSw = 0x8000000e,
75
76 /* G80+ display objects */
77 NvEvoVRAM = 0x01000000,
78 NvEvoFB16 = 0x01000001,
79 NvEvoFB32 = 0x01000002
80};
81
82#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
83#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
84#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
85#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
86#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
87#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
88#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
89#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
90#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
91#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
92
93#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
94#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
95#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
96#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
97#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
98
99static __must_check inline int
100RING_SPACE(struct nouveau_channel *chan, int size)
101{
102 int ret;
103
104 ret = nouveau_dma_wait(chan, 1, size);
105 if (ret)
106 return ret;
107
108 chan->dma.free -= size;
109 return 0;
110}
111
112static inline void
113OUT_RING(struct nouveau_channel *chan, int data)
114{
115 if (NOUVEAU_DMA_DEBUG) {
116 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
117 chan->id, chan->dma.cur << 2, data);
118 }
119
120 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
121}
122
123extern void
124OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
125
126static inline void
127BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
128{
129 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
130}
131
132#define WRITE_PUT(val) do { \
133 DRM_MEMORYBARRIER(); \
134 nouveau_bo_rd32(chan->pushbuf_bo, 0); \
135 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
136} while (0)
137
138static inline void
139FIRE_RING(struct nouveau_channel *chan)
140{
141 if (NOUVEAU_DMA_DEBUG) {
142 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
143 chan->id, chan->dma.cur << 2);
144 }
145
146 if (chan->dma.cur == chan->dma.put)
147 return;
148 chan->accel_done = true;
149
150 if (chan->dma.ib_max) {
151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
152 (chan->dma.cur - chan->dma.put) << 2);
153 } else {
154 WRITE_PUT(chan->dma.cur);
155 }
156
157 chan->dma.put = chan->dma.cur;
158}
159
160static inline void
161WIND_RING(struct nouveau_channel *chan)
162{
163 chan->dma.cur = chan->dma.put;
164}
165
166#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 000000000000..deeb21c6865c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,581 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_encoder.h"
29
30static int
31auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
32{
33 struct drm_device *dev = encoder->dev;
34 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
35 struct nouveau_i2c_chan *auxch;
36 int ret;
37
38 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
39 if (!auxch)
40 return -ENODEV;
41
42 ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
43 if (ret)
44 return ret;
45
46 return 0;
47}
48
49static int
50auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
51{
52 struct drm_device *dev = encoder->dev;
53 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
54 struct nouveau_i2c_chan *auxch;
55 int ret;
56
57 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
58 if (!auxch)
59 return -ENODEV;
60
61 ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
62 return ret;
63}
64
65static int
66nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
67{
68 struct drm_device *dev = encoder->dev;
69 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
70 uint32_t tmp;
71 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
72
73 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
74 tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
75 NV50_SOR_DP_CTRL_LANE_MASK);
76 tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
77 if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
78 tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
79 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
80
81 return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
82}
83
84static int
85nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
86{
87 struct drm_device *dev = encoder->dev;
88 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
89 uint32_t tmp;
90 int reg = 0x614300 + (nv_encoder->or * 0x800);
91
92 tmp = nv_rd32(dev, reg);
93 tmp &= 0xfff3ffff;
94 if (cmd == DP_LINK_BW_2_7)
95 tmp |= 0x00040000;
96 nv_wr32(dev, reg, tmp);
97
98 return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
99}
100
101static int
102nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
103{
104 struct drm_device *dev = encoder->dev;
105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
106 uint32_t tmp;
107 uint8_t cmd;
108 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
109 int ret;
110
111 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
112 tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
113 tmp |= (pattern << 24);
114 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
115
116 ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
117 if (ret)
118 return ret;
119 cmd &= ~DP_TRAINING_PATTERN_MASK;
120 cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
121 return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
122}
123
124static int
125nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
126{
127 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
128 struct drm_device *dev = encoder->dev;
129 struct bit_displayport_encoder_table_entry *dpse;
130 struct bit_displayport_encoder_table *dpe;
131 int i, dpe_headerlen, max_vs = 0;
132
133 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
134 if (!dpe)
135 return false;
136 dpse = (void *)((char *)dpe + dpe_headerlen);
137
138 for (i = 0; i < dpe_headerlen; i++, dpse++) {
139 if (dpse->vs_level > max_vs)
140 max_vs = dpse->vs_level;
141 }
142
143 return max_vs;
144}
145
146static int
147nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
148{
149 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
150 struct drm_device *dev = encoder->dev;
151 struct bit_displayport_encoder_table_entry *dpse;
152 struct bit_displayport_encoder_table *dpe;
153 int i, dpe_headerlen, max_pre = 0;
154
155 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
156 if (!dpe)
157 return false;
158 dpse = (void *)((char *)dpe + dpe_headerlen);
159
160 for (i = 0; i < dpe_headerlen; i++, dpse++) {
161 if (dpse->vs_level != vs)
162 continue;
163
164 if (dpse->pre_level > max_pre)
165 max_pre = dpse->pre_level;
166 }
167
168 return max_pre;
169}
170
171static bool
172nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
173{
174 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
175 struct drm_device *dev = encoder->dev;
176 struct bit_displayport_encoder_table_entry *dpse;
177 struct bit_displayport_encoder_table *dpe;
178 int ret, i, dpe_headerlen, vs = 0, pre = 0;
179 uint8_t request[2];
180
181 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
182 if (!dpe)
183 return false;
184 dpse = (void *)((char *)dpe + dpe_headerlen);
185
186 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
187 if (ret)
188 return false;
189
190 NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
191
192 /* Keep all lanes at the same level.. */
193 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
194 int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
195 int lane_vs = lane_req & 3;
196 int lane_pre = (lane_req >> 2) & 3;
197
198 if (lane_vs > vs)
199 vs = lane_vs;
200 if (lane_pre > pre)
201 pre = lane_pre;
202 }
203
204 if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
205 vs = nouveau_dp_max_voltage_swing(encoder);
206 vs |= 4;
207 }
208
209 if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
210 pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
211 pre |= 4;
212 }
213
214 /* Update the configuration for all lanes.. */
215 for (i = 0; i < nv_encoder->dp.link_nr; i++)
216 config[i] = (pre << 3) | vs;
217
218 return true;
219}
220
221static bool
222nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
223{
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
225 struct drm_device *dev = encoder->dev;
226 struct bit_displayport_encoder_table_entry *dpse;
227 struct bit_displayport_encoder_table *dpe;
228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
229 int dpe_headerlen, ret, i;
230
231 NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
232 config[0], config[1], config[2], config[3]);
233
234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
235 if (!dpe)
236 return false;
237 dpse = (void *)((char *)dpe + dpe_headerlen);
238
239 for (i = 0; i < dpe->record_nr; i++, dpse++) {
240 if (dpse->vs_level == (config[0] & 3) &&
241 dpse->pre_level == ((config[0] >> 3) & 3))
242 break;
243 }
244 BUG_ON(i == dpe->record_nr);
245
246 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
247 const int shift[4] = { 16, 8, 0, 24 };
248 uint32_t mask = 0xff << shift[i];
249 uint32_t reg0, reg1, reg2;
250
251 reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
252 reg0 |= (dpse->reg0 << shift[i]);
253 reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
254 reg1 |= (dpse->reg1 << shift[i]);
255 reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
256 reg2 |= (dpse->reg2 << 8);
257 nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
258 nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
259 nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
260 }
261
262 ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
263 if (ret)
264 return false;
265
266 return true;
267}
268
269bool
270nouveau_dp_link_train(struct drm_encoder *encoder)
271{
272 struct drm_device *dev = encoder->dev;
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
274 uint8_t config[4];
275 uint8_t status[3];
276 bool cr_done, cr_max_vs, eq_done;
277 int ret = 0, i, tries, voltage;
278
279 NV_DEBUG_KMS(dev, "link training!!\n");
280train:
281 cr_done = eq_done = false;
282
283 /* set link configuration */
284 NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
285 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
286
287 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
288 if (ret)
289 return false;
290
291 config[0] = nv_encoder->dp.link_nr;
292 if (nv_encoder->dp.dpcd_version >= 0x11)
293 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
294
295 ret = nouveau_dp_lane_count_set(encoder, config[0]);
296 if (ret)
297 return false;
298
299 /* clock recovery */
300 NV_DEBUG_KMS(dev, "\tbegin cr\n");
301 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
302 if (ret)
303 goto stop;
304
305 tries = 0;
306 voltage = -1;
307 memset(config, 0x00, sizeof(config));
308 for (;;) {
309 if (!nouveau_dp_link_train_commit(encoder, config))
310 break;
311
312 udelay(100);
313
314 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
315 if (ret)
316 break;
317 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
318 status[0], status[1]);
319
320 cr_done = true;
321 cr_max_vs = false;
322 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
323 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
324
325 if (!(lane & DP_LANE_CR_DONE)) {
326 cr_done = false;
327 if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
328 cr_max_vs = true;
329 break;
330 }
331 }
332
333 if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
334 voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
335 tries = 0;
336 }
337
338 if (cr_done || cr_max_vs || (++tries == 5))
339 break;
340
341 if (!nouveau_dp_link_train_adjust(encoder, config))
342 break;
343 }
344
345 if (!cr_done)
346 goto stop;
347
348 /* channel equalisation */
349 NV_DEBUG_KMS(dev, "\tbegin eq\n");
350 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
351 if (ret)
352 goto stop;
353
354 for (tries = 0; tries <= 5; tries++) {
355 udelay(400);
356
357 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
358 if (ret)
359 break;
360 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
361 status[0], status[1]);
362
363 eq_done = true;
364 if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
365 eq_done = false;
366
367 for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
368 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
369
370 if (!(lane & DP_LANE_CR_DONE)) {
371 cr_done = false;
372 break;
373 }
374
375 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
376 !(lane & DP_LANE_SYMBOL_LOCKED)) {
377 eq_done = false;
378 break;
379 }
380 }
381
382 if (eq_done || !cr_done)
383 break;
384
385 if (!nouveau_dp_link_train_adjust(encoder, config) ||
386 !nouveau_dp_link_train_commit(encoder, config))
387 break;
388 }
389
390stop:
391 /* end link training */
392 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
393 if (ret)
394 return false;
395
396 /* retry at a lower setting, if possible */
397 if (!ret && !(eq_done && cr_done)) {
398 NV_DEBUG_KMS(dev, "\twe failed\n");
399 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
400 NV_DEBUG_KMS(dev, "retry link training at low rate\n");
401 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
402 goto train;
403 }
404 }
405
406 return eq_done;
407}
408
409bool
410nouveau_dp_detect(struct drm_encoder *encoder)
411{
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct drm_device *dev = encoder->dev;
414 uint8_t dpcd[4];
415 int ret;
416
417 ret = auxch_rd(encoder, 0x0000, dpcd, 4);
418 if (ret)
419 return false;
420
421 NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
422 "display: link_bw %d, link_nr %d version 0x%02x\n",
423 nv_encoder->dcb->dpconf.link_bw,
424 nv_encoder->dcb->dpconf.link_nr,
425 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
426
427 nv_encoder->dp.dpcd_version = dpcd[0];
428
429 nv_encoder->dp.link_bw = dpcd[1];
430 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
431 !nv_encoder->dcb->dpconf.link_bw)
432 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
433
434 nv_encoder->dp.link_nr = dpcd[2] & 0xf;
435 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
436 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
437
438 return true;
439}
440
441int
442nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
443 uint8_t *data, int data_nr)
444{
445 struct drm_device *dev = auxch->dev;
446 uint32_t tmp, ctrl, stat = 0, data32[4] = {};
447 int ret = 0, i, index = auxch->rd;
448
449 NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
450
451 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
452 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
453 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
454 if (!(tmp & 0x01000000)) {
455 NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
456 ret = -EIO;
457 goto out;
458 }
459
460 for (i = 0; i < 3; i++) {
461 tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
462 if (tmp & NV50_AUXCH_STAT_STATE_READY)
463 break;
464 udelay(100);
465 }
466
467 if (i == 3) {
468 ret = -EBUSY;
469 goto out;
470 }
471
472 if (!(cmd & 1)) {
473 memcpy(data32, data, data_nr);
474 for (i = 0; i < 4; i++) {
475 NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
476 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
477 }
478 }
479
480 nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
481 ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
482 ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
485
486 for (i = 0; i < 16; i++) {
487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 ret = -EBUSY;
494 goto out;
495 }
496
497 udelay(400);
498
499 stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
500 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
501 NV50_AUXCH_STAT_REPLY_AUX_DEFER)
502 break;
503 }
504
505 if (i == 16) {
506 NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
511 if (cmd & 1) {
512 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
513 ret = -EREMOTEIO;
514 goto out;
515 }
516
517 for (i = 0; i < 4; i++) {
518 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
519 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
520 }
521 memcpy(data, data32, data_nr);
522 }
523
524out:
525 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
526 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
527 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
528 if (tmp & 0x01000000) {
529 NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
530 ret = -EIO;
531 }
532
533 udelay(400);
534
535 return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
536}
537
538int
539nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
540 uint8_t write_byte, uint8_t *read_byte)
541{
542 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
543 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
544 struct drm_device *dev = auxch->dev;
545 int ret = 0, cmd, addr = algo_data->address;
546 uint8_t *buf;
547
548 if (mode == MODE_I2C_READ) {
549 cmd = AUX_I2C_READ;
550 buf = read_byte;
551 } else {
552 cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
553 buf = &write_byte;
554 }
555
556 if (!(mode & MODE_I2C_STOP))
557 cmd |= AUX_I2C_MOT;
558
559 if (mode & MODE_I2C_START)
560 return 1;
561
562 for (;;) {
563 ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
564 if (ret < 0)
565 return ret;
566
567 switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
568 case NV50_AUXCH_STAT_REPLY_I2C_ACK:
569 return 1;
570 case NV50_AUXCH_STAT_REPLY_I2C_NACK:
571 return -EREMOTEIO;
572 case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
573 udelay(100);
574 break;
575 default:
576 NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
577 return -EREMOTEIO;
578 }
579 }
580}
581
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 000000000000..1de974acbc65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,434 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/console.h>
26
27#include "drmP.h"
28#include "drm.h"
29#include "drm_crtc_helper.h"
30#include "nouveau_drv.h"
31#include "nouveau_hw.h"
32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h"
34#include "nv50_display.h"
35
36#include "drm_pciids.h"
37
38MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
39int nouveau_ctxfw = 0;
40module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
41
42MODULE_PARM_DESC(noagp, "Disable AGP");
43int nouveau_noagp;
44module_param_named(noagp, nouveau_noagp, int, 0400);
45
46MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
47static int nouveau_modeset = -1; /* kms */
48module_param_named(modeset, nouveau_modeset, int, 0400);
49
50MODULE_PARM_DESC(vbios, "Override default VBIOS location");
51char *nouveau_vbios;
52module_param_named(vbios, nouveau_vbios, charp, 0400);
53
54MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
55int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
63int nouveau_duallink = 1;
64module_param_named(duallink, nouveau_duallink, int, 0400);
65
66MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
67int nouveau_uscript_lvds = -1;
68module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
69
70MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
78MODULE_PARM_DESC(noaccel, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
86MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
87int nouveau_override_conntype = 0;
88module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
89
90MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
91int nouveau_tv_disable = 0;
92module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
93
94MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
95 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
96 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
97 "\t\tDefault: PAL\n"
98 "\t\t*NOTE* Ignored for cards with external TV encoders.");
99char *nouveau_tv_norm;
100module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
101
102MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
103 "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
104 "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
105 "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
106int nouveau_reg_debug;
107module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
108
109int nouveau_fbpercrtc;
110#if 0
111module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
112#endif
113
114static struct pci_device_id pciidlist[] = {
115 {
116 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
117 .class = PCI_BASE_CLASS_DISPLAY << 16,
118 .class_mask = 0xff << 16,
119 },
120 {
121 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
122 .class = PCI_BASE_CLASS_DISPLAY << 16,
123 .class_mask = 0xff << 16,
124 },
125 {}
126};
127
128MODULE_DEVICE_TABLE(pci, pciidlist);
129
130static struct drm_driver driver;
131
132static int __devinit
133nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
134{
135 return drm_get_dev(pdev, ent, &driver);
136}
137
138static void
139nouveau_pci_remove(struct pci_dev *pdev)
140{
141 struct drm_device *dev = pci_get_drvdata(pdev);
142
143 drm_put_dev(dev);
144}
145
146int
147nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
148{
149 struct drm_device *dev = pci_get_drvdata(pdev);
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
152 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
154 struct nouveau_channel *chan;
155 struct drm_crtc *crtc;
156 uint32_t fbdev_flags;
157 int ret, i;
158
159 if (!drm_core_check_feature(dev, DRIVER_MODESET))
160 return -ENODEV;
161
162 if (pm_state.event == PM_EVENT_PRETHAW)
163 return 0;
164
165 NV_INFO(dev, "Disabling fbcon acceleration...\n");
166 fbdev_flags = dev_priv->fbdev_info->flags;
167 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
168
169 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
171 struct nouveau_framebuffer *nouveau_fb;
172
173 nouveau_fb = nouveau_framebuffer(crtc->fb);
174 if (!nouveau_fb || !nouveau_fb->nvbo)
175 continue;
176
177 nouveau_bo_unpin(nouveau_fb->nvbo);
178 }
179
180 NV_INFO(dev, "Evicting buffers...\n");
181 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
182
183 NV_INFO(dev, "Idling channels...\n");
184 for (i = 0; i < pfifo->channels; i++) {
185 struct nouveau_fence *fence = NULL;
186
187 chan = dev_priv->fifos[i];
188 if (!chan || (dev_priv->card_type >= NV_50 &&
189 chan == dev_priv->fifos[0]))
190 continue;
191
192 ret = nouveau_fence_new(chan, &fence, true);
193 if (ret == 0) {
194 ret = nouveau_fence_wait(fence, NULL, false, false);
195 nouveau_fence_unref((void *)&fence);
196 }
197
198 if (ret) {
199 NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
200 chan->id);
201 }
202 }
203
204 pgraph->fifo_access(dev, false);
205 nouveau_wait_for_idle(dev);
206 pfifo->reassign(dev, false);
207 pfifo->disable(dev);
208 pfifo->unload_context(dev);
209 pgraph->unload_context(dev);
210
211 NV_INFO(dev, "Suspending GPU objects...\n");
212 ret = nouveau_gpuobj_suspend(dev);
213 if (ret) {
214 NV_ERROR(dev, "... failed: %d\n", ret);
215 goto out_abort;
216 }
217
218 ret = pinstmem->suspend(dev);
219 if (ret) {
220 NV_ERROR(dev, "... failed: %d\n", ret);
221 nouveau_gpuobj_suspend_cleanup(dev);
222 goto out_abort;
223 }
224
225 NV_INFO(dev, "And we're gone!\n");
226 pci_save_state(pdev);
227 if (pm_state.event == PM_EVENT_SUSPEND) {
228 pci_disable_device(pdev);
229 pci_set_power_state(pdev, PCI_D3hot);
230 }
231
232 acquire_console_sem();
233 fb_set_suspend(dev_priv->fbdev_info, 1);
234 release_console_sem();
235 dev_priv->fbdev_info->flags = fbdev_flags;
236 return 0;
237
238out_abort:
239 NV_INFO(dev, "Re-enabling acceleration..\n");
240 pfifo->enable(dev);
241 pfifo->reassign(dev, true);
242 pgraph->fifo_access(dev, true);
243 return ret;
244}
245
246int
247nouveau_pci_resume(struct pci_dev *pdev)
248{
249 struct drm_device *dev = pci_get_drvdata(pdev);
250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_engine *engine = &dev_priv->engine;
252 struct drm_crtc *crtc;
253 uint32_t fbdev_flags;
254 int ret, i;
255
256 if (!drm_core_check_feature(dev, DRIVER_MODESET))
257 return -ENODEV;
258
259 fbdev_flags = dev_priv->fbdev_info->flags;
260 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
261
262 NV_INFO(dev, "We're back, enabling device...\n");
263 pci_set_power_state(pdev, PCI_D0);
264 pci_restore_state(pdev);
265 if (pci_enable_device(pdev))
266 return -1;
267 pci_set_master(dev->pdev);
268
269 NV_INFO(dev, "POSTing device...\n");
270 ret = nouveau_run_vbios_init(dev);
271 if (ret)
272 return ret;
273
274 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
275 ret = nouveau_mem_init_agp(dev);
276 if (ret) {
277 NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
278 return ret;
279 }
280 }
281
282 NV_INFO(dev, "Reinitialising engines...\n");
283 engine->instmem.resume(dev);
284 engine->mc.init(dev);
285 engine->timer.init(dev);
286 engine->fb.init(dev);
287 engine->graph.init(dev);
288 engine->fifo.init(dev);
289
290 NV_INFO(dev, "Restoring GPU objects...\n");
291 nouveau_gpuobj_resume(dev);
292
293 nouveau_irq_postinstall(dev);
294
295 /* Re-write SKIPS, they'll have been lost over the suspend */
296 if (nouveau_vram_pushbuf) {
297 struct nouveau_channel *chan;
298 int j;
299
300 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
301 chan = dev_priv->fifos[i];
302 if (!chan || !chan->pushbuf_bo)
303 continue;
304
305 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
306 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
307 }
308 }
309
310 NV_INFO(dev, "Restoring mode...\n");
311 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
312 struct nouveau_framebuffer *nouveau_fb;
313
314 nouveau_fb = nouveau_framebuffer(crtc->fb);
315 if (!nouveau_fb || !nouveau_fb->nvbo)
316 continue;
317
318 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
319 }
320
321 if (dev_priv->card_type < NV_50) {
322 nv04_display_restore(dev);
323 NVLockVgaCrtcs(dev, false);
324 } else
325 nv50_display_init(dev);
326
327 /* Force CLUT to get re-loaded during modeset */
328 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
329 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
330
331 nv_crtc->lut.depth = 0;
332 }
333
334 acquire_console_sem();
335 fb_set_suspend(dev_priv->fbdev_info, 0);
336 release_console_sem();
337
338 nouveau_fbcon_zfill(dev);
339
340 drm_helper_resume_force_mode(dev);
341 dev_priv->fbdev_info->flags = fbdev_flags;
342 return 0;
343}
344
345static struct drm_driver driver = {
346 .driver_features =
347 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
348 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
349 .load = nouveau_load,
350 .firstopen = nouveau_firstopen,
351 .lastclose = nouveau_lastclose,
352 .unload = nouveau_unload,
353 .preclose = nouveau_preclose,
354#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
355 .debugfs_init = nouveau_debugfs_init,
356 .debugfs_cleanup = nouveau_debugfs_takedown,
357#endif
358 .irq_preinstall = nouveau_irq_preinstall,
359 .irq_postinstall = nouveau_irq_postinstall,
360 .irq_uninstall = nouveau_irq_uninstall,
361 .irq_handler = nouveau_irq_handler,
362 .reclaim_buffers = drm_core_reclaim_buffers,
363 .get_map_ofs = drm_core_get_map_ofs,
364 .get_reg_ofs = drm_core_get_reg_ofs,
365 .ioctls = nouveau_ioctls,
366 .fops = {
367 .owner = THIS_MODULE,
368 .open = drm_open,
369 .release = drm_release,
370 .unlocked_ioctl = drm_ioctl,
371 .mmap = nouveau_ttm_mmap,
372 .poll = drm_poll,
373 .fasync = drm_fasync,
374#if defined(CONFIG_COMPAT)
375 .compat_ioctl = nouveau_compat_ioctl,
376#endif
377 },
378 .pci_driver = {
379 .name = DRIVER_NAME,
380 .id_table = pciidlist,
381 .probe = nouveau_pci_probe,
382 .remove = nouveau_pci_remove,
383 .suspend = nouveau_pci_suspend,
384 .resume = nouveau_pci_resume
385 },
386
387 .gem_init_object = nouveau_gem_object_new,
388 .gem_free_object = nouveau_gem_object_del,
389
390 .name = DRIVER_NAME,
391 .desc = DRIVER_DESC,
392#ifdef GIT_REVISION
393 .date = GIT_REVISION,
394#else
395 .date = DRIVER_DATE,
396#endif
397 .major = DRIVER_MAJOR,
398 .minor = DRIVER_MINOR,
399 .patchlevel = DRIVER_PATCHLEVEL,
400};
401
402static int __init nouveau_init(void)
403{
404 driver.num_ioctls = nouveau_max_ioctl;
405
406 if (nouveau_modeset == -1) {
407#ifdef CONFIG_VGA_CONSOLE
408 if (vgacon_text_force())
409 nouveau_modeset = 0;
410 else
411#endif
412 nouveau_modeset = 1;
413 }
414
415 if (nouveau_modeset == 1) {
416 driver.driver_features |= DRIVER_MODESET;
417 nouveau_register_dsm_handler();
418 }
419
420 return drm_init(&driver);
421}
422
423static void __exit nouveau_exit(void)
424{
425 drm_exit(&driver);
426 nouveau_unregister_dsm_handler();
427}
428
429module_init(nouveau_init);
430module_exit(nouveau_exit);
431
432MODULE_AUTHOR(DRIVER_AUTHOR);
433MODULE_DESCRIPTION(DRIVER_DESC);
434MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 000000000000..ace630aa89e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1344 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRV_H__
26#define __NOUVEAU_DRV_H__
27
28#define DRIVER_AUTHOR "Stephane Marchesin"
29#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
30
31#define DRIVER_NAME "nouveau"
32#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
33#define DRIVER_DATE "20090420"
34
35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 16
38
39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000
41
42#include "ttm/ttm_bo_api.h"
43#include "ttm/ttm_bo_driver.h"
44#include "ttm/ttm_placement.h"
45#include "ttm/ttm_memory.h"
46#include "ttm/ttm_module.h"
47
48struct nouveau_fpriv {
49 struct ttm_object_file *tfile;
50};
51
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
54#include "nouveau_drm.h"
55#include "nouveau_reg.h"
56#include "nouveau_bios.h"
57struct nouveau_grctx;
58
59#define MAX_NUM_DCB_ENTRIES 16
60
61#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15
63
64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
65#define NV50_VM_BLOCK (512*1024*1024ULL)
66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
67
68struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used;
73};
74
75struct nouveau_bo {
76 struct ttm_buffer_object bo;
77 struct ttm_placement placement;
78 u32 placements[3];
79 u32 busy_placements[3];
80 struct ttm_bo_kmap_obj kmap;
81 struct list_head head;
82
83 /* protected by ttm_bo_reserve() */
84 struct drm_file *reserved_by;
85 struct list_head entry;
86 int pbbo_index;
87 bool validate_mapped;
88
89 struct nouveau_channel *channel;
90
91 bool mappable;
92 bool no_vm;
93
94 uint32_t tile_mode;
95 uint32_t tile_flags;
96 struct nouveau_tile_reg *tile;
97
98 struct drm_gem_object *gem;
99 struct drm_file *cpu_filp;
100 int pin_refcnt;
101};
102
103static inline struct nouveau_bo *
104nouveau_bo(struct ttm_buffer_object *bo)
105{
106 return container_of(bo, struct nouveau_bo, bo);
107}
108
109static inline struct nouveau_bo *
110nouveau_gem_object(struct drm_gem_object *gem)
111{
112 return gem ? gem->driver_private : NULL;
113}
114
115/* TODO: submit equivalent to TTM generic API upstream? */
116static inline void __iomem *
117nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
118{
119 bool is_iomem;
120 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
121 &nvbo->kmap, &is_iomem);
122 WARN_ON_ONCE(ioptr && !is_iomem);
123 return ioptr;
124}
125
126struct mem_block {
127 struct mem_block *next;
128 struct mem_block *prev;
129 uint64_t start;
130 uint64_t size;
131 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
132};
133
134enum nouveau_flags {
135 NV_NFORCE = 0x10000000,
136 NV_NFORCE2 = 0x20000000
137};
138
139#define NVOBJ_ENGINE_SW 0
140#define NVOBJ_ENGINE_GR 1
141#define NVOBJ_ENGINE_DISPLAY 2
142#define NVOBJ_ENGINE_INT 0xdeadbeef
143
144#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
145#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
146#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
147#define NVOBJ_FLAG_FAKE (1 << 3)
148struct nouveau_gpuobj {
149 struct list_head list;
150
151 struct nouveau_channel *im_channel;
152 struct mem_block *im_pramin;
153 struct nouveau_bo *im_backing;
154 uint32_t im_backing_start;
155 uint32_t *im_backing_suspend;
156 int im_bound;
157
158 uint32_t flags;
159 int refcount;
160
161 uint32_t engine;
162 uint32_t class;
163
164 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
165 void *priv;
166};
167
168struct nouveau_gpuobj_ref {
169 struct list_head list;
170
171 struct nouveau_gpuobj *gpuobj;
172 uint32_t instance;
173
174 struct nouveau_channel *channel;
175 int handle;
176};
177
178struct nouveau_channel {
179 struct drm_device *dev;
180 int id;
181
182 /* owner of this fifo */
183 struct drm_file *file_priv;
184 /* mapping of the fifo itself */
185 struct drm_local_map *map;
186
187 /* mapping of the regs controling the fifo */
188 void __iomem *user;
189 uint32_t user_get;
190 uint32_t user_put;
191
192 /* Fencing */
193 struct {
194 /* lock protects the pending list only */
195 spinlock_t lock;
196 struct list_head pending;
197 uint32_t sequence;
198 uint32_t sequence_ack;
199 uint32_t last_sequence_irq;
200 } fence;
201
202 /* DMA push buffer */
203 struct nouveau_gpuobj_ref *pushbuf;
204 struct nouveau_bo *pushbuf_bo;
205 uint32_t pushbuf_base;
206
207 /* Notifier memory */
208 struct nouveau_bo *notifier_bo;
209 struct mem_block *notifier_heap;
210
211 /* PFIFO context */
212 struct nouveau_gpuobj_ref *ramfc;
213 struct nouveau_gpuobj_ref *cache;
214
215 /* PGRAPH context */
216 /* XXX may be merge 2 pointers as private data ??? */
217 struct nouveau_gpuobj_ref *ramin_grctx;
218 void *pgraph_ctx;
219
220 /* NV50 VM */
221 struct nouveau_gpuobj *vm_pd;
222 struct nouveau_gpuobj_ref *vm_gart_pt;
223 struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
224
225 /* Objects */
226 struct nouveau_gpuobj_ref *ramin; /* Private instmem */
227 struct mem_block *ramin_heap; /* Private PRAMIN heap */
228 struct nouveau_gpuobj_ref *ramht; /* Hash table */
229 struct list_head ramht_refs; /* Objects referenced by RAMHT */
230
231 /* GPU object info for stuff used in-kernel (mm_enabled) */
232 uint32_t m2mf_ntfy;
233 uint32_t vram_handle;
234 uint32_t gart_handle;
235 bool accel_done;
236
237 /* Push buffer state (only for drm's channel on !mm_enabled) */
238 struct {
239 int max;
240 int free;
241 int cur;
242 int put;
243 /* access via pushbuf_bo */
244
245 int ib_base;
246 int ib_max;
247 int ib_free;
248 int ib_put;
249 } dma;
250
251 uint32_t sw_subchannel[8];
252
253 struct {
254 struct nouveau_gpuobj *vblsem;
255 uint32_t vblsem_offset;
256 uint32_t vblsem_rval;
257 struct list_head vbl_wait;
258 } nvsw;
259
260 struct {
261 bool active;
262 char name[32];
263 struct drm_info_list info;
264 } debugfs;
265};
266
267struct nouveau_instmem_engine {
268 void *priv;
269
270 int (*init)(struct drm_device *dev);
271 void (*takedown)(struct drm_device *dev);
272 int (*suspend)(struct drm_device *dev);
273 void (*resume)(struct drm_device *dev);
274
275 int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
276 uint32_t *size);
277 void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
278 int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
279 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
280 void (*prepare_access)(struct drm_device *, bool write);
281 void (*finish_access)(struct drm_device *);
282};
283
284struct nouveau_mc_engine {
285 int (*init)(struct drm_device *dev);
286 void (*takedown)(struct drm_device *dev);
287};
288
289struct nouveau_timer_engine {
290 int (*init)(struct drm_device *dev);
291 void (*takedown)(struct drm_device *dev);
292 uint64_t (*read)(struct drm_device *dev);
293};
294
295struct nouveau_fb_engine {
296 int num_tiles;
297
298 int (*init)(struct drm_device *dev);
299 void (*takedown)(struct drm_device *dev);
300
301 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
302 uint32_t size, uint32_t pitch);
303};
304
305struct nouveau_fifo_engine {
306 void *priv;
307
308 int channels;
309
310 int (*init)(struct drm_device *);
311 void (*takedown)(struct drm_device *);
312
313 void (*disable)(struct drm_device *);
314 void (*enable)(struct drm_device *);
315 bool (*reassign)(struct drm_device *, bool enable);
316 bool (*cache_flush)(struct drm_device *dev);
317 bool (*cache_pull)(struct drm_device *dev, bool enable);
318
319 int (*channel_id)(struct drm_device *);
320
321 int (*create_context)(struct nouveau_channel *);
322 void (*destroy_context)(struct nouveau_channel *);
323 int (*load_context)(struct nouveau_channel *);
324 int (*unload_context)(struct drm_device *);
325};
326
327struct nouveau_pgraph_object_method {
328 int id;
329 int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
330 uint32_t data);
331};
332
333struct nouveau_pgraph_object_class {
334 int id;
335 bool software;
336 struct nouveau_pgraph_object_method *methods;
337};
338
339struct nouveau_pgraph_engine {
340 struct nouveau_pgraph_object_class *grclass;
341 bool accel_blocked;
342 void *ctxprog;
343 void *ctxvals;
344 int grctx_size;
345
346 int (*init)(struct drm_device *);
347 void (*takedown)(struct drm_device *);
348
349 void (*fifo_access)(struct drm_device *, bool);
350
351 struct nouveau_channel *(*channel)(struct drm_device *);
352 int (*create_context)(struct nouveau_channel *);
353 void (*destroy_context)(struct nouveau_channel *);
354 int (*load_context)(struct nouveau_channel *);
355 int (*unload_context)(struct drm_device *);
356
357 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
358 uint32_t size, uint32_t pitch);
359};
360
361struct nouveau_engine {
362 struct nouveau_instmem_engine instmem;
363 struct nouveau_mc_engine mc;
364 struct nouveau_timer_engine timer;
365 struct nouveau_fb_engine fb;
366 struct nouveau_pgraph_engine graph;
367 struct nouveau_fifo_engine fifo;
368};
369
370struct nouveau_pll_vals {
371 union {
372 struct {
373#ifdef __BIG_ENDIAN
374 uint8_t N1, M1, N2, M2;
375#else
376 uint8_t M1, N1, M2, N2;
377#endif
378 };
379 struct {
380 uint16_t NM1, NM2;
381 } __attribute__((packed));
382 };
383 int log2P;
384
385 int refclk;
386};
387
388enum nv04_fp_display_regs {
389 FP_DISPLAY_END,
390 FP_TOTAL,
391 FP_CRTC,
392 FP_SYNC_START,
393 FP_SYNC_END,
394 FP_VALID_START,
395 FP_VALID_END
396};
397
398struct nv04_crtc_reg {
399 unsigned char MiscOutReg; /* */
400 uint8_t CRTC[0x9f];
401 uint8_t CR58[0x10];
402 uint8_t Sequencer[5];
403 uint8_t Graphics[9];
404 uint8_t Attribute[21];
405 unsigned char DAC[768]; /* Internal Colorlookuptable */
406
407 /* PCRTC regs */
408 uint32_t fb_start;
409 uint32_t crtc_cfg;
410 uint32_t cursor_cfg;
411 uint32_t gpio_ext;
412 uint32_t crtc_830;
413 uint32_t crtc_834;
414 uint32_t crtc_850;
415 uint32_t crtc_eng_ctrl;
416
417 /* PRAMDAC regs */
418 uint32_t nv10_cursync;
419 struct nouveau_pll_vals pllvals;
420 uint32_t ramdac_gen_ctrl;
421 uint32_t ramdac_630;
422 uint32_t ramdac_634;
423 uint32_t tv_setup;
424 uint32_t tv_vtotal;
425 uint32_t tv_vskew;
426 uint32_t tv_vsync_delay;
427 uint32_t tv_htotal;
428 uint32_t tv_hskew;
429 uint32_t tv_hsync_delay;
430 uint32_t tv_hsync_delay2;
431 uint32_t fp_horiz_regs[7];
432 uint32_t fp_vert_regs[7];
433 uint32_t dither;
434 uint32_t fp_control;
435 uint32_t dither_regs[6];
436 uint32_t fp_debug_0;
437 uint32_t fp_debug_1;
438 uint32_t fp_debug_2;
439 uint32_t fp_margin_color;
440 uint32_t ramdac_8c0;
441 uint32_t ramdac_a20;
442 uint32_t ramdac_a24;
443 uint32_t ramdac_a34;
444 uint32_t ctv_regs[38];
445};
446
447struct nv04_output_reg {
448 uint32_t output;
449 int head;
450};
451
452struct nv04_mode_state {
453 uint32_t bpp;
454 uint32_t width;
455 uint32_t height;
456 uint32_t interlace;
457 uint32_t repaint0;
458 uint32_t repaint1;
459 uint32_t screen;
460 uint32_t scale;
461 uint32_t dither;
462 uint32_t extra;
463 uint32_t fifo;
464 uint32_t pixel;
465 uint32_t horiz;
466 int arbitration0;
467 int arbitration1;
468 uint32_t pll;
469 uint32_t pllB;
470 uint32_t vpll;
471 uint32_t vpll2;
472 uint32_t vpllB;
473 uint32_t vpll2B;
474 uint32_t pllsel;
475 uint32_t sel_clk;
476 uint32_t general;
477 uint32_t crtcOwner;
478 uint32_t head;
479 uint32_t head2;
480 uint32_t cursorConfig;
481 uint32_t cursor0;
482 uint32_t cursor1;
483 uint32_t cursor2;
484 uint32_t timingH;
485 uint32_t timingV;
486 uint32_t displayV;
487 uint32_t crtcSync;
488
489 struct nv04_crtc_reg crtc_reg[2];
490};
491
492enum nouveau_card_type {
493 NV_04 = 0x00,
494 NV_10 = 0x10,
495 NV_20 = 0x20,
496 NV_30 = 0x30,
497 NV_40 = 0x40,
498 NV_50 = 0x50,
499};
500
501struct drm_nouveau_private {
502 struct drm_device *dev;
503 enum {
504 NOUVEAU_CARD_INIT_DOWN,
505 NOUVEAU_CARD_INIT_DONE,
506 NOUVEAU_CARD_INIT_FAILED
507 } init_state;
508
509 /* the card type, takes NV_* as values */
510 enum nouveau_card_type card_type;
511 /* exact chipset, derived from NV_PMC_BOOT_0 */
512 int chipset;
513 int flags;
514
515 void __iomem *mmio;
516 void __iomem *ramin;
517 uint32_t ramin_size;
518
519 struct nouveau_bo *vga_ram;
520
521 struct workqueue_struct *wq;
522 struct work_struct irq_work;
523 struct work_struct hpd_work;
524
525 struct list_head vbl_waiting;
526
527 struct {
528 struct ttm_global_reference mem_global_ref;
529 struct ttm_bo_global_ref bo_global_ref;
530 struct ttm_bo_device bdev;
531 spinlock_t bo_list_lock;
532 struct list_head bo_list;
533 atomic_t validate_sequence;
534 } ttm;
535
536 struct fb_info *fbdev_info;
537
538 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
539
540 struct nouveau_engine engine;
541 struct nouveau_channel *channel;
542
543 /* For PFIFO and PGRAPH. */
544 spinlock_t context_switch_lock;
545
546 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
547 struct nouveau_gpuobj *ramht;
548 uint32_t ramin_rsvd_vram;
549 uint32_t ramht_offset;
550 uint32_t ramht_size;
551 uint32_t ramht_bits;
552 uint32_t ramfc_offset;
553 uint32_t ramfc_size;
554 uint32_t ramro_offset;
555 uint32_t ramro_size;
556
557 struct {
558 enum {
559 NOUVEAU_GART_NONE = 0,
560 NOUVEAU_GART_AGP,
561 NOUVEAU_GART_SGDMA
562 } type;
563 uint64_t aper_base;
564 uint64_t aper_size;
565 uint64_t aper_free;
566
567 struct nouveau_gpuobj *sg_ctxdma;
568 struct page *sg_dummy_page;
569 dma_addr_t sg_dummy_bus;
570 } gart_info;
571
572 /* nv10-nv40 tiling regions */
573 struct {
574 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
575 spinlock_t lock;
576 } tile;
577
578 /* VRAM/fb configuration */
579 uint64_t vram_size;
580 uint64_t vram_sys_base;
581
582 uint64_t fb_phys;
583 uint64_t fb_available_size;
584 uint64_t fb_mappable_pages;
585 uint64_t fb_aper_free;
586 int fb_mtrr;
587
588 /* G8x/G9x virtual address space */
589 uint64_t vm_gart_base;
590 uint64_t vm_gart_size;
591 uint64_t vm_vram_base;
592 uint64_t vm_vram_size;
593 uint64_t vm_end;
594 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
595 int vm_vram_pt_nr;
596
597 struct mem_block *ramin_heap;
598
599 /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
600 uint32_t ctx_table_size;
601 struct nouveau_gpuobj_ref *ctx_table;
602
603 struct list_head gpuobj_list;
604
605 struct nvbios vbios;
606
607 struct nv04_mode_state mode_reg;
608 struct nv04_mode_state saved_reg;
609 uint32_t saved_vga_font[4][16384];
610 uint32_t crtc_owner;
611 uint32_t dac_users[4];
612
613 struct nouveau_suspend_resume {
614 uint32_t *ramin_copy;
615 } susres;
616
617 struct backlight_device *backlight;
618
619 struct nouveau_channel *evo;
620
621 struct {
622 struct dentry *channel_root;
623 } debugfs;
624};
625
626static inline struct drm_nouveau_private *
627nouveau_bdev(struct ttm_bo_device *bd)
628{
629 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
630}
631
632static inline int
633nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
634{
635 struct nouveau_bo *prev;
636
637 if (!pnvbo)
638 return -EINVAL;
639 prev = *pnvbo;
640
641 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
642 if (prev) {
643 struct ttm_buffer_object *bo = &prev->bo;
644
645 ttm_bo_unref(&bo);
646 }
647
648 return 0;
649}
650
651#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
652 struct drm_nouveau_private *nv = dev->dev_private; \
653 if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
654 NV_ERROR(dev, "called without init\n"); \
655 return -EINVAL; \
656 } \
657} while (0)
658
659#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
660 struct drm_nouveau_private *nv = dev->dev_private; \
661 if (!nouveau_channel_owner(dev, (cl), (id))) { \
662 NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
663 DRM_CURRENTPID, (id)); \
664 return -EPERM; \
665 } \
666 (ch) = nv->fifos[(id)]; \
667} while (0)
668
669/* nouveau_drv.c */
670extern int nouveau_noagp;
671extern int nouveau_duallink;
672extern int nouveau_uscript_lvds;
673extern int nouveau_uscript_tmds;
674extern int nouveau_vram_pushbuf;
675extern int nouveau_vram_notify;
676extern int nouveau_fbpercrtc;
677extern int nouveau_tv_disable;
678extern char *nouveau_tv_norm;
679extern int nouveau_reg_debug;
680extern char *nouveau_vbios;
681extern int nouveau_ctxfw;
682extern int nouveau_ignorelid;
683extern int nouveau_nofbaccel;
684extern int nouveau_noaccel;
685extern int nouveau_override_conntype;
686
687extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
688extern int nouveau_pci_resume(struct pci_dev *pdev);
689
690/* nouveau_state.c */
691extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
692extern int nouveau_load(struct drm_device *, unsigned long flags);
693extern int nouveau_firstopen(struct drm_device *);
694extern void nouveau_lastclose(struct drm_device *);
695extern int nouveau_unload(struct drm_device *);
696extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
697 struct drm_file *);
698extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
699 struct drm_file *);
700extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
701 uint32_t reg, uint32_t mask, uint32_t val);
702extern bool nouveau_wait_for_idle(struct drm_device *);
703extern int nouveau_card_init(struct drm_device *);
704
705/* nouveau_mem.c */
706extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
707 uint64_t size);
708extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
709 uint64_t size, int align2,
710 struct drm_file *, int tail);
711extern void nouveau_mem_takedown(struct mem_block **heap);
712extern void nouveau_mem_free_block(struct mem_block *);
713extern int nouveau_mem_detect(struct drm_device *dev);
714extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
715extern int nouveau_mem_init(struct drm_device *);
716extern int nouveau_mem_init_agp(struct drm_device *);
717extern void nouveau_mem_close(struct drm_device *);
718extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
719 uint32_t addr,
720 uint32_t size,
721 uint32_t pitch);
722extern void nv10_mem_expire_tiling(struct drm_device *dev,
723 struct nouveau_tile_reg *tile,
724 struct nouveau_fence *fence);
725extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
726 uint32_t size, uint32_t flags,
727 uint64_t phys);
728extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
729 uint32_t size);
730
731/* nouveau_notifier.c */
732extern int nouveau_notifier_init_channel(struct nouveau_channel *);
733extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
734extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
735 int cout, uint32_t *offset);
736extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
737extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
738 struct drm_file *);
739extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
740 struct drm_file *);
741
742/* nouveau_channel.c */
743extern struct drm_ioctl_desc nouveau_ioctls[];
744extern int nouveau_max_ioctl;
745extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
746extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
747 int channel);
748extern int nouveau_channel_alloc(struct drm_device *dev,
749 struct nouveau_channel **chan,
750 struct drm_file *file_priv,
751 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
752extern void nouveau_channel_free(struct nouveau_channel *);
753
754/* nouveau_object.c */
755extern int nouveau_gpuobj_early_init(struct drm_device *);
756extern int nouveau_gpuobj_init(struct drm_device *);
757extern void nouveau_gpuobj_takedown(struct drm_device *);
758extern void nouveau_gpuobj_late_takedown(struct drm_device *);
759extern int nouveau_gpuobj_suspend(struct drm_device *dev);
760extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
761extern void nouveau_gpuobj_resume(struct drm_device *dev);
762extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
763 uint32_t vram_h, uint32_t tt_h);
764extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
765extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
766 uint32_t size, int align, uint32_t flags,
767 struct nouveau_gpuobj **);
768extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
769extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
770 uint32_t handle, struct nouveau_gpuobj *,
771 struct nouveau_gpuobj_ref **);
772extern int nouveau_gpuobj_ref_del(struct drm_device *,
773 struct nouveau_gpuobj_ref **);
774extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
775 struct nouveau_gpuobj_ref **ref_ret);
776extern int nouveau_gpuobj_new_ref(struct drm_device *,
777 struct nouveau_channel *alloc_chan,
778 struct nouveau_channel *ref_chan,
779 uint32_t handle, uint32_t size, int align,
780 uint32_t flags, struct nouveau_gpuobj_ref **);
781extern int nouveau_gpuobj_new_fake(struct drm_device *,
782 uint32_t p_offset, uint32_t b_offset,
783 uint32_t size, uint32_t flags,
784 struct nouveau_gpuobj **,
785 struct nouveau_gpuobj_ref**);
786extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
787 uint64_t offset, uint64_t size, int access,
788 int target, struct nouveau_gpuobj **);
789extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
790 uint64_t offset, uint64_t size,
791 int access, struct nouveau_gpuobj **,
792 uint32_t *o_ret);
793extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
794 struct nouveau_gpuobj **);
795extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
796 struct nouveau_gpuobj **);
797extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
798 struct drm_file *);
799extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
800 struct drm_file *);
801
802/* nouveau_irq.c */
803extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
804extern void nouveau_irq_preinstall(struct drm_device *);
805extern int nouveau_irq_postinstall(struct drm_device *);
806extern void nouveau_irq_uninstall(struct drm_device *);
807
808/* nouveau_sgdma.c */
809extern int nouveau_sgdma_init(struct drm_device *);
810extern void nouveau_sgdma_takedown(struct drm_device *);
811extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
812 uint32_t *page);
813extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
814
815/* nouveau_debugfs.c */
816#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
817extern int nouveau_debugfs_init(struct drm_minor *);
818extern void nouveau_debugfs_takedown(struct drm_minor *);
819extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
820extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
821#else
822static inline int
823nouveau_debugfs_init(struct drm_minor *minor)
824{
825 return 0;
826}
827
828static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
829{
830}
831
832static inline int
833nouveau_debugfs_channel_init(struct nouveau_channel *chan)
834{
835 return 0;
836}
837
838static inline void
839nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
840{
841}
842#endif
843
844/* nouveau_dma.c */
845extern void nouveau_dma_pre_init(struct nouveau_channel *);
846extern int nouveau_dma_init(struct nouveau_channel *);
847extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
848
849/* nouveau_acpi.c */
850#if defined(CONFIG_ACPI)
851void nouveau_register_dsm_handler(void);
852void nouveau_unregister_dsm_handler(void);
853#else
854static inline void nouveau_register_dsm_handler(void) {}
855static inline void nouveau_unregister_dsm_handler(void) {}
856#endif
857
858/* nouveau_backlight.c */
859#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
860extern int nouveau_backlight_init(struct drm_device *);
861extern void nouveau_backlight_exit(struct drm_device *);
862#else
863static inline int nouveau_backlight_init(struct drm_device *dev)
864{
865 return 0;
866}
867
868static inline void nouveau_backlight_exit(struct drm_device *dev) { }
869#endif
870
871/* nouveau_bios.c */
872extern int nouveau_bios_init(struct drm_device *);
873extern void nouveau_bios_takedown(struct drm_device *dev);
874extern int nouveau_run_vbios_init(struct drm_device *);
875extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
876 struct dcb_entry *);
877extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
878 enum dcb_gpio_tag);
879extern struct dcb_connector_table_entry *
880nouveau_bios_connector_entry(struct drm_device *, int index);
881extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
882 struct pll_lims *);
883extern int nouveau_bios_run_display_table(struct drm_device *,
884 struct dcb_entry *,
885 uint32_t script, int pxclk);
886extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
887 int *length);
888extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
889extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
890extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
891 bool *dl, bool *if_is_24bit);
892extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
893 int head, int pxclk);
894extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
895 enum LVDS_script, int pxclk);
896
897/* nouveau_ttm.c */
898int nouveau_ttm_global_init(struct drm_nouveau_private *);
899void nouveau_ttm_global_release(struct drm_nouveau_private *);
900int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
901
902/* nouveau_dp.c */
903int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
904 uint8_t *data, int data_nr);
905bool nouveau_dp_detect(struct drm_encoder *);
906bool nouveau_dp_link_train(struct drm_encoder *);
907
908/* nv04_fb.c */
909extern int nv04_fb_init(struct drm_device *);
910extern void nv04_fb_takedown(struct drm_device *);
911
912/* nv10_fb.c */
913extern int nv10_fb_init(struct drm_device *);
914extern void nv10_fb_takedown(struct drm_device *);
915extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
916 uint32_t, uint32_t);
917
918/* nv40_fb.c */
919extern int nv40_fb_init(struct drm_device *);
920extern void nv40_fb_takedown(struct drm_device *);
921extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
922 uint32_t, uint32_t);
923
924/* nv50_fb.c */
925extern int nv50_fb_init(struct drm_device *);
926extern void nv50_fb_takedown(struct drm_device *);
927
928/* nv04_fifo.c */
929extern int nv04_fifo_init(struct drm_device *);
930extern void nv04_fifo_disable(struct drm_device *);
931extern void nv04_fifo_enable(struct drm_device *);
932extern bool nv04_fifo_reassign(struct drm_device *, bool);
933extern bool nv04_fifo_cache_flush(struct drm_device *);
934extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
935extern int nv04_fifo_channel_id(struct drm_device *);
936extern int nv04_fifo_create_context(struct nouveau_channel *);
937extern void nv04_fifo_destroy_context(struct nouveau_channel *);
938extern int nv04_fifo_load_context(struct nouveau_channel *);
939extern int nv04_fifo_unload_context(struct drm_device *);
940
941/* nv10_fifo.c */
942extern int nv10_fifo_init(struct drm_device *);
943extern int nv10_fifo_channel_id(struct drm_device *);
944extern int nv10_fifo_create_context(struct nouveau_channel *);
945extern void nv10_fifo_destroy_context(struct nouveau_channel *);
946extern int nv10_fifo_load_context(struct nouveau_channel *);
947extern int nv10_fifo_unload_context(struct drm_device *);
948
949/* nv40_fifo.c */
950extern int nv40_fifo_init(struct drm_device *);
951extern int nv40_fifo_create_context(struct nouveau_channel *);
952extern void nv40_fifo_destroy_context(struct nouveau_channel *);
953extern int nv40_fifo_load_context(struct nouveau_channel *);
954extern int nv40_fifo_unload_context(struct drm_device *);
955
956/* nv50_fifo.c */
957extern int nv50_fifo_init(struct drm_device *);
958extern void nv50_fifo_takedown(struct drm_device *);
959extern int nv50_fifo_channel_id(struct drm_device *);
960extern int nv50_fifo_create_context(struct nouveau_channel *);
961extern void nv50_fifo_destroy_context(struct nouveau_channel *);
962extern int nv50_fifo_load_context(struct nouveau_channel *);
963extern int nv50_fifo_unload_context(struct drm_device *);
964
965/* nv04_graph.c */
966extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
967extern int nv04_graph_init(struct drm_device *);
968extern void nv04_graph_takedown(struct drm_device *);
969extern void nv04_graph_fifo_access(struct drm_device *, bool);
970extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
971extern int nv04_graph_create_context(struct nouveau_channel *);
972extern void nv04_graph_destroy_context(struct nouveau_channel *);
973extern int nv04_graph_load_context(struct nouveau_channel *);
974extern int nv04_graph_unload_context(struct drm_device *);
975extern void nv04_graph_context_switch(struct drm_device *);
976
977/* nv10_graph.c */
978extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
979extern int nv10_graph_init(struct drm_device *);
980extern void nv10_graph_takedown(struct drm_device *);
981extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
982extern int nv10_graph_create_context(struct nouveau_channel *);
983extern void nv10_graph_destroy_context(struct nouveau_channel *);
984extern int nv10_graph_load_context(struct nouveau_channel *);
985extern int nv10_graph_unload_context(struct drm_device *);
986extern void nv10_graph_context_switch(struct drm_device *);
987extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
988 uint32_t, uint32_t);
989
990/* nv20_graph.c */
991extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
992extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
993extern int nv20_graph_create_context(struct nouveau_channel *);
994extern void nv20_graph_destroy_context(struct nouveau_channel *);
995extern int nv20_graph_load_context(struct nouveau_channel *);
996extern int nv20_graph_unload_context(struct drm_device *);
997extern int nv20_graph_init(struct drm_device *);
998extern void nv20_graph_takedown(struct drm_device *);
999extern int nv30_graph_init(struct drm_device *);
1000extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1001 uint32_t, uint32_t);
1002
1003/* nv40_graph.c */
1004extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
1005extern int nv40_graph_init(struct drm_device *);
1006extern void nv40_graph_takedown(struct drm_device *);
1007extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
1008extern int nv40_graph_create_context(struct nouveau_channel *);
1009extern void nv40_graph_destroy_context(struct nouveau_channel *);
1010extern int nv40_graph_load_context(struct nouveau_channel *);
1011extern int nv40_graph_unload_context(struct drm_device *);
1012extern void nv40_grctx_init(struct nouveau_grctx *);
1013extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1014 uint32_t, uint32_t);
1015
1016/* nv50_graph.c */
1017extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
1018extern int nv50_graph_init(struct drm_device *);
1019extern void nv50_graph_takedown(struct drm_device *);
1020extern void nv50_graph_fifo_access(struct drm_device *, bool);
1021extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
1022extern int nv50_graph_create_context(struct nouveau_channel *);
1023extern void nv50_graph_destroy_context(struct nouveau_channel *);
1024extern int nv50_graph_load_context(struct nouveau_channel *);
1025extern int nv50_graph_unload_context(struct drm_device *);
1026extern void nv50_graph_context_switch(struct drm_device *);
1027extern int nv50_grctx_init(struct nouveau_grctx *);
1028
1029/* nouveau_grctx.c */
1030extern int nouveau_grctx_prog_load(struct drm_device *);
1031extern void nouveau_grctx_vals_load(struct drm_device *,
1032 struct nouveau_gpuobj *);
1033extern void nouveau_grctx_fini(struct drm_device *);
1034
1035/* nv04_instmem.c */
1036extern int nv04_instmem_init(struct drm_device *);
1037extern void nv04_instmem_takedown(struct drm_device *);
1038extern int nv04_instmem_suspend(struct drm_device *);
1039extern void nv04_instmem_resume(struct drm_device *);
1040extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
1041 uint32_t *size);
1042extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
1043extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1044extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1045extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
1046extern void nv04_instmem_finish_access(struct drm_device *);
1047
1048/* nv50_instmem.c */
1049extern int nv50_instmem_init(struct drm_device *);
1050extern void nv50_instmem_takedown(struct drm_device *);
1051extern int nv50_instmem_suspend(struct drm_device *);
1052extern void nv50_instmem_resume(struct drm_device *);
1053extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
1054 uint32_t *size);
1055extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
1056extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1057extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1058extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
1059extern void nv50_instmem_finish_access(struct drm_device *);
1060
1061/* nv04_mc.c */
1062extern int nv04_mc_init(struct drm_device *);
1063extern void nv04_mc_takedown(struct drm_device *);
1064
1065/* nv40_mc.c */
1066extern int nv40_mc_init(struct drm_device *);
1067extern void nv40_mc_takedown(struct drm_device *);
1068
1069/* nv50_mc.c */
1070extern int nv50_mc_init(struct drm_device *);
1071extern void nv50_mc_takedown(struct drm_device *);
1072
1073/* nv04_timer.c */
1074extern int nv04_timer_init(struct drm_device *);
1075extern uint64_t nv04_timer_read(struct drm_device *);
1076extern void nv04_timer_takedown(struct drm_device *);
1077
1078extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1079 unsigned long arg);
1080
1081/* nv04_dac.c */
1082extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
1083extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
1084extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1085extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1086
1087/* nv04_dfp.c */
1088extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
1089extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
1090extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
1091 int head, bool dl);
1092extern void nv04_dfp_disable(struct drm_device *dev, int head);
1093extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
1094
1095/* nv04_tv.c */
1096extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
1097extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1098
1099/* nv17_tv.c */
1100extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1101
1102/* nv04_display.c */
1103extern int nv04_display_create(struct drm_device *);
1104extern void nv04_display_destroy(struct drm_device *);
1105extern void nv04_display_restore(struct drm_device *);
1106
1107/* nv04_crtc.c */
1108extern int nv04_crtc_create(struct drm_device *, int index);
1109
1110/* nouveau_bo.c */
1111extern struct ttm_bo_driver nouveau_bo_driver;
1112extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
1113 int size, int align, uint32_t flags,
1114 uint32_t tile_mode, uint32_t tile_flags,
1115 bool no_vm, bool mappable, struct nouveau_bo **);
1116extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1117extern int nouveau_bo_unpin(struct nouveau_bo *);
1118extern int nouveau_bo_map(struct nouveau_bo *);
1119extern void nouveau_bo_unmap(struct nouveau_bo *);
1120extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
1121 uint32_t busy);
1122extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1123extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1124extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1125extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1126
1127/* nouveau_fence.c */
1128struct nouveau_fence;
1129extern int nouveau_fence_init(struct nouveau_channel *);
1130extern void nouveau_fence_fini(struct nouveau_channel *);
1131extern void nouveau_fence_update(struct nouveau_channel *);
1132extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1133 bool emit);
1134extern int nouveau_fence_emit(struct nouveau_fence *);
1135struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1136extern bool nouveau_fence_signalled(void *obj, void *arg);
1137extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1138extern int nouveau_fence_flush(void *obj, void *arg);
1139extern void nouveau_fence_unref(void **obj);
1140extern void *nouveau_fence_ref(void *obj);
1141extern void nouveau_fence_handler(struct drm_device *dev, int channel);
1142
1143/* nouveau_gem.c */
1144extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
1145 int size, int align, uint32_t flags,
1146 uint32_t tile_mode, uint32_t tile_flags,
1147 bool no_vm, bool mappable, struct nouveau_bo **);
1148extern int nouveau_gem_object_new(struct drm_gem_object *);
1149extern void nouveau_gem_object_del(struct drm_gem_object *);
1150extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1151 struct drm_file *);
1152extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1153 struct drm_file *);
1154extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1155 struct drm_file *);
1156extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1157 struct drm_file *);
1158extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1159 struct drm_file *);
1160
1161/* nv17_gpio.c */
1162int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1163int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1164
1165/* nv50_gpio.c */
1166int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1167int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1168
1169#ifndef ioread32_native
1170#ifdef __BIG_ENDIAN
1171#define ioread16_native ioread16be
1172#define iowrite16_native iowrite16be
1173#define ioread32_native ioread32be
1174#define iowrite32_native iowrite32be
1175#else /* def __BIG_ENDIAN */
1176#define ioread16_native ioread16
1177#define iowrite16_native iowrite16
1178#define ioread32_native ioread32
1179#define iowrite32_native iowrite32
1180#endif /* def __BIG_ENDIAN else */
1181#endif /* !ioread32_native */
1182
1183/* channel control reg access */
1184static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
1185{
1186 return ioread32_native(chan->user + reg);
1187}
1188
1189static inline void nvchan_wr32(struct nouveau_channel *chan,
1190 unsigned reg, u32 val)
1191{
1192 iowrite32_native(val, chan->user + reg);
1193}
1194
1195/* register access */
1196static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
1197{
1198 struct drm_nouveau_private *dev_priv = dev->dev_private;
1199 return ioread32_native(dev_priv->mmio + reg);
1200}
1201
1202static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1203{
1204 struct drm_nouveau_private *dev_priv = dev->dev_private;
1205 iowrite32_native(val, dev_priv->mmio + reg);
1206}
1207
1208static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
1209{
1210 struct drm_nouveau_private *dev_priv = dev->dev_private;
1211 return ioread8(dev_priv->mmio + reg);
1212}
1213
1214static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1215{
1216 struct drm_nouveau_private *dev_priv = dev->dev_private;
1217 iowrite8(val, dev_priv->mmio + reg);
1218}
1219
1220#define nv_wait(reg, mask, val) \
1221 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
1222
1223/* PRAMIN access */
1224static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1225{
1226 struct drm_nouveau_private *dev_priv = dev->dev_private;
1227 return ioread32_native(dev_priv->ramin + offset);
1228}
1229
1230static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1231{
1232 struct drm_nouveau_private *dev_priv = dev->dev_private;
1233 iowrite32_native(val, dev_priv->ramin + offset);
1234}
1235
1236/* object access */
1237static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1238 unsigned index)
1239{
1240 return nv_ri32(dev, obj->im_pramin->start + index * 4);
1241}
1242
1243static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1244 unsigned index, u32 val)
1245{
1246 nv_wi32(dev, obj->im_pramin->start + index * 4, val);
1247}
1248
1249/*
1250 * Logging
1251 * Argument d is (struct drm_device *).
1252 */
1253#define NV_PRINTK(level, d, fmt, arg...) \
1254 printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
1255 pci_name(d->pdev), ##arg)
1256#ifndef NV_DEBUG_NOTRACE
1257#define NV_DEBUG(d, fmt, arg...) do { \
1258 if (drm_debug & DRM_UT_DRIVER) { \
1259 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1260 __LINE__, ##arg); \
1261 } \
1262} while (0)
1263#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1264 if (drm_debug & DRM_UT_KMS) { \
1265 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1266 __LINE__, ##arg); \
1267 } \
1268} while (0)
1269#else
1270#define NV_DEBUG(d, fmt, arg...) do { \
1271 if (drm_debug & DRM_UT_DRIVER) \
1272 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1273} while (0)
1274#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1275 if (drm_debug & DRM_UT_KMS) \
1276 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1277} while (0)
1278#endif
1279#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
1280#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1281#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1282#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1283#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1284
1285/* nouveau_reg_debug bitmask */
1286enum {
1287 NOUVEAU_REG_DEBUG_MC = 0x1,
1288 NOUVEAU_REG_DEBUG_VIDEO = 0x2,
1289 NOUVEAU_REG_DEBUG_FB = 0x4,
1290 NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
1291 NOUVEAU_REG_DEBUG_CRTC = 0x10,
1292 NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
1293 NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
1294 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1295 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1296 NOUVEAU_REG_DEBUG_EVO = 0x200,
1297};
1298
1299#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
1300 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
1301 NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
1302} while (0)
1303
1304static inline bool
1305nv_two_heads(struct drm_device *dev)
1306{
1307 struct drm_nouveau_private *dev_priv = dev->dev_private;
1308 const int impl = dev->pci_device & 0x0ff0;
1309
1310 if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
1311 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
1312 return true;
1313
1314 return false;
1315}
1316
1317static inline bool
1318nv_gf4_disp_arch(struct drm_device *dev)
1319{
1320 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
1321}
1322
1323static inline bool
1324nv_two_reg_pll(struct drm_device *dev)
1325{
1326 struct drm_nouveau_private *dev_priv = dev->dev_private;
1327 const int impl = dev->pci_device & 0x0ff0;
1328
1329 if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
1330 return true;
1331 return false;
1332}
1333
1334#define NV_SW 0x0000506e
1335#define NV_SW_DMA_SEMAPHORE 0x00000060
1336#define NV_SW_SEMAPHORE_OFFSET 0x00000064
1337#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
1338#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
1339#define NV_SW_DMA_VBLSEM 0x0000018c
1340#define NV_SW_VBLSEM_OFFSET 0x00000400
1341#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1342#define NV_SW_VBLSEM_RELEASE 0x00000408
1343
1344#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 000000000000..9f28b94e479b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_ENCODER_H__
28#define __NOUVEAU_ENCODER_H__
29
30#include "drm_encoder_slave.h"
31#include "nouveau_drv.h"
32
33#define NV_DPMS_CLEARED 0x80
34
35struct nouveau_encoder {
36 struct drm_encoder_slave base;
37
38 struct dcb_entry *dcb;
39 int or;
40
41 struct drm_display_mode mode;
42 int last_dpms;
43
44 struct nv04_output_reg restore;
45
46 void (*disconnect)(struct nouveau_encoder *encoder);
47
48 union {
49 struct {
50 int mc_unknown;
51 int dpcd_version;
52 int link_nr;
53 int link_bw;
54 } dp;
55 };
56};
57
58static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
59{
60 struct drm_encoder_slave *slave = to_encoder_slave(enc);
61
62 return container_of(slave, struct nouveau_encoder, base);
63}
64
65static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
66{
67 return &enc->base.base;
68}
69
70struct nouveau_connector *
71nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
72int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
73int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
74
75struct bit_displayport_encoder_table {
76 uint32_t match;
77 uint8_t record_nr;
78 uint8_t unknown;
79 uint16_t script0;
80 uint16_t script1;
81 uint16_t unknown_table;
82} __attribute__ ((packed));
83
84struct bit_displayport_encoder_table_entry {
85 uint8_t vs_level;
86 uint8_t pre_level;
87 uint8_t reg0;
88 uint8_t reg1;
89 uint8_t reg2;
90} __attribute__ ((packed));
91
92#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 000000000000..4a3f31aa1949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FB_H__
28#define __NOUVEAU_FB_H__
29
30struct nouveau_framebuffer {
31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo;
33};
34
35static inline struct nouveau_framebuffer *
36nouveau_framebuffer(struct drm_framebuffer *fb)
37{
38 return container_of(fb, struct nouveau_framebuffer, base);
39}
40
41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
42
43struct drm_framebuffer *
44nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
45 struct drm_mode_fb_cmd *);
46
47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 000000000000..8e7dc1d4912a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,422 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/tty.h>
33#include <linux/sysrq.h>
34#include <linux/delay.h>
35#include <linux/fb.h>
36#include <linux/init.h>
37#include <linux/screen_info.h>
38#include <linux/vga_switcheroo.h>
39
40#include "drmP.h"
41#include "drm.h"
42#include "drm_crtc.h"
43#include "drm_crtc_helper.h"
44#include "drm_fb_helper.h"
45#include "nouveau_drv.h"
46#include "nouveau_drm.h"
47#include "nouveau_crtc.h"
48#include "nouveau_fb.h"
49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h"
51
52static int
53nouveau_fbcon_sync(struct fb_info *info)
54{
55 struct nouveau_fbcon_par *par = info->par;
56 struct drm_device *dev = par->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i;
60
61 if (!chan || !chan->accel_done ||
62 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0;
65
66 if (RING_SPACE(chan, 4)) {
67 nouveau_fbcon_gpu_lockup(info);
68 return 0;
69 }
70
71 BEGIN_RING(chan, 0, 0x0104, 1);
72 OUT_RING(chan, 0);
73 BEGIN_RING(chan, 0, 0x0100, 1);
74 OUT_RING(chan, 0);
75 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
76 FIRE_RING(chan);
77
78 ret = -EBUSY;
79 for (i = 0; i < 100000; i++) {
80 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
81 ret = 0;
82 break;
83 }
84 DRM_UDELAY(1);
85 }
86
87 if (ret) {
88 nouveau_fbcon_gpu_lockup(info);
89 return 0;
90 }
91
92 chan->accel_done = false;
93 return 0;
94}
95
96static struct fb_ops nouveau_fbcon_ops = {
97 .owner = THIS_MODULE,
98 .fb_check_var = drm_fb_helper_check_var,
99 .fb_set_par = drm_fb_helper_set_par,
100 .fb_setcolreg = drm_fb_helper_setcolreg,
101 .fb_fillrect = cfb_fillrect,
102 .fb_copyarea = cfb_copyarea,
103 .fb_imageblit = cfb_imageblit,
104 .fb_sync = nouveau_fbcon_sync,
105 .fb_pan_display = drm_fb_helper_pan_display,
106 .fb_blank = drm_fb_helper_blank,
107 .fb_setcmap = drm_fb_helper_setcmap,
108};
109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
139 u16 blue, int regno)
140{
141 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
142
143 nv_crtc->lut.r[regno] = red;
144 nv_crtc->lut.g[regno] = green;
145 nv_crtc->lut.b[regno] = blue;
146}
147
148static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
149 u16 *blue, int regno)
150{
151 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
152
153 *red = nv_crtc->lut.r[regno];
154 *green = nv_crtc->lut.g[regno];
155 *blue = nv_crtc->lut.b[regno];
156}
157
158static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
159 .gamma_set = nouveau_fbcon_gamma_set,
160 .gamma_get = nouveau_fbcon_gamma_get
161};
162
163#if defined(__i386__) || defined(__x86_64__)
164static bool
165nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
166{
167 struct pci_dev *pdev = dev->pdev;
168 int ramin;
169
170 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
171 screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
172 return false;
173
174 if (screen_info.lfb_base < pci_resource_start(pdev, 1))
175 goto not_fb;
176
177 if (screen_info.lfb_base + screen_info.lfb_size >=
178 pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
179 goto not_fb;
180
181 return true;
182not_fb:
183 ramin = 2;
184 if (pci_resource_len(pdev, ramin) == 0) {
185 ramin = 3;
186 if (pci_resource_len(pdev, ramin) == 0)
187 return false;
188 }
189
190 if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
191 return false;
192
193 if (screen_info.lfb_base + screen_info.lfb_size >=
194 pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
195 return false;
196
197 return true;
198}
199#endif
200
201void
202nouveau_fbcon_zfill(struct drm_device *dev)
203{
204 struct drm_nouveau_private *dev_priv = dev->dev_private;
205 struct fb_info *info = dev_priv->fbdev_info;
206 struct fb_fillrect rect;
207
208 /* Clear the entire fbcon. The drm will program every connector
209 * with it's preferred mode. If the sizes differ, one display will
210 * quite likely have garbage around the console.
211 */
212 rect.dx = rect.dy = 0;
213 rect.width = info->var.xres_virtual;
214 rect.height = info->var.yres_virtual;
215 rect.color = 0;
216 rect.rop = ROP_COPY;
217 info->fbops->fb_fillrect(info, &rect);
218}
219
220static int
221nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
222 uint32_t fb_height, uint32_t surface_width,
223 uint32_t surface_height, uint32_t surface_depth,
224 uint32_t surface_bpp, struct drm_framebuffer **pfb)
225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct fb_info *info;
228 struct nouveau_fbcon_par *par;
229 struct drm_framebuffer *fb;
230 struct nouveau_framebuffer *nouveau_fb;
231 struct nouveau_bo *nvbo;
232 struct drm_mode_fb_cmd mode_cmd;
233 struct device *device = &dev->pdev->dev;
234 int size, ret;
235
236 mode_cmd.width = surface_width;
237 mode_cmd.height = surface_height;
238
239 mode_cmd.bpp = surface_bpp;
240 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
241 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
242 mode_cmd.depth = surface_depth;
243
244 size = mode_cmd.pitch * mode_cmd.height;
245 size = roundup(size, PAGE_SIZE);
246
247 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
248 0, 0x0000, false, true, &nvbo);
249 if (ret) {
250 NV_ERROR(dev, "failed to allocate framebuffer\n");
251 goto out;
252 }
253
254 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
255 if (ret) {
256 NV_ERROR(dev, "failed to pin fb: %d\n", ret);
257 nouveau_bo_ref(NULL, &nvbo);
258 goto out;
259 }
260
261 ret = nouveau_bo_map(nvbo);
262 if (ret) {
263 NV_ERROR(dev, "failed to map fb: %d\n", ret);
264 nouveau_bo_unpin(nvbo);
265 nouveau_bo_ref(NULL, &nvbo);
266 goto out;
267 }
268
269 mutex_lock(&dev->struct_mutex);
270
271 fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
272 if (!fb) {
273 ret = -ENOMEM;
274 NV_ERROR(dev, "failed to allocate fb.\n");
275 goto out_unref;
276 }
277
278 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
279
280 nouveau_fb = nouveau_framebuffer(fb);
281 *pfb = fb;
282
283 info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
284 if (!info) {
285 ret = -ENOMEM;
286 goto out_unref;
287 }
288
289 par = info->par;
290 par->helper.funcs = &nouveau_fbcon_helper_funcs;
291 par->helper.dev = dev;
292 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
293 if (ret)
294 goto out_unref;
295 dev_priv->fbdev_info = info;
296
297 strcpy(info->fix.id, "nouveaufb");
298 if (nouveau_nofbaccel)
299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
304 info->fbops = &nouveau_fbcon_ops;
305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
306 dev_priv->vm_vram_base;
307 info->fix.smem_len = size;
308
309 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
310 info->screen_size = size;
311
312 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
313 drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
314
315 /* FIXME: we really shouldn't expose mmio space at all */
316 info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
317 info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
318
319 /* Set aperture base/size for vesafb takeover */
320#if defined(__i386__) || defined(__x86_64__)
321 if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
322 /* Some NVIDIA VBIOS' are stupid and decide to put the
323 * framebuffer in the middle of the PRAMIN BAR for
324 * whatever reason. We need to know the exact lfb_base
325 * to get vesafb kicked off, and the only reliable way
326 * we have left is to find out lfb_base the same way
327 * vesafb did.
328 */
329 info->aperture_base = screen_info.lfb_base;
330 info->aperture_size = screen_info.lfb_size;
331 if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
332 info->aperture_size *= 65536;
333 } else
334#endif
335 {
336 info->aperture_base = info->fix.mmio_start;
337 info->aperture_size = info->fix.mmio_len;
338 }
339
340 info->pixmap.size = 64*1024;
341 info->pixmap.buf_align = 8;
342 info->pixmap.access_align = 32;
343 info->pixmap.flags = FB_PIXMAP_SYSTEM;
344 info->pixmap.scan_align = 1;
345
346 fb->fbdev = info;
347
348 par->nouveau_fb = nouveau_fb;
349 par->dev = dev;
350
351 if (dev_priv->channel && !nouveau_nofbaccel) {
352 switch (dev_priv->card_type) {
353 case NV_50:
354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
356 break;
357 default:
358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
360 break;
361 };
362 }
363
364 nouveau_fbcon_zfill(dev);
365
366 /* To allow resizeing without swapping buffers */
367 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
368 nouveau_fb->base.width,
369 nouveau_fb->base.height,
370 nvbo->bo.offset, nvbo);
371
372 mutex_unlock(&dev->struct_mutex);
373 vga_switcheroo_client_fb_set(dev->pdev, info);
374 return 0;
375
376out_unref:
377 mutex_unlock(&dev->struct_mutex);
378out:
379 return ret;
380}
381
382int
383nouveau_fbcon_probe(struct drm_device *dev)
384{
385 NV_DEBUG_KMS(dev, "\n");
386
387 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
388}
389
390int
391nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
392{
393 struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
394 struct fb_info *info;
395
396 if (!fb)
397 return -EINVAL;
398
399 info = fb->fbdev;
400 if (info) {
401 struct nouveau_fbcon_par *par = info->par;
402
403 unregister_framebuffer(info);
404 nouveau_bo_unmap(nouveau_fb->nvbo);
405 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL;
407 if (par)
408 drm_fb_helper_free(&par->helper);
409 framebuffer_release(info);
410 }
411
412 return 0;
413}
414
415void nouveau_fbcon_gpu_lockup(struct fb_info *info)
416{
417 struct nouveau_fbcon_par *par = info->par;
418 struct drm_device *dev = par->dev;
419
420 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
421 info->flags |= FBINFO_HWACCEL_DISABLED;
422}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 000000000000..f9c34e1a8c11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FBCON_H__
28#define __NOUVEAU_FBCON_H__
29
30#include "drm_fb_helper.h"
31
32struct nouveau_fbcon_par {
33 struct drm_fb_helper helper;
34 struct drm_device *dev;
35 struct nouveau_framebuffer *nouveau_fb;
36};
37
38int nouveau_fbcon_probe(struct drm_device *dev);
39int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev);
42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
50int nv50_fbcon_accel_init(struct fb_info *info);
51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
53#endif /* __NV50_FBCON_H__ */
54
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 000000000000..faddf53ff9ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
67 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48);
69 else
70 sequence = chan->fence.last_sequence_irq;
71
72 if (chan->fence.sequence_ack == sequence)
73 return;
74 chan->fence.sequence_ack = sequence;
75
76 list_for_each_safe(entry, tmp, &chan->fence.pending) {
77 fence = list_entry(entry, struct nouveau_fence, entry);
78
79 sequence = fence->sequence;
80 fence->signalled = true;
81 list_del(&fence->entry);
82 kref_put(&fence->refcount, nouveau_fence_del);
83
84 if (sequence == chan->fence.sequence_ack)
85 break;
86 }
87}
88
89int
90nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
91 bool emit)
92{
93 struct nouveau_fence *fence;
94 int ret = 0;
95
96 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
97 if (!fence)
98 return -ENOMEM;
99 kref_init(&fence->refcount);
100 fence->channel = chan;
101
102 if (emit)
103 ret = nouveau_fence_emit(fence);
104
105 if (ret)
106 nouveau_fence_unref((void *)&fence);
107 *pfence = fence;
108 return ret;
109}
110
111struct nouveau_channel *
112nouveau_fence_channel(struct nouveau_fence *fence)
113{
114 return fence ? fence->channel : NULL;
115}
116
117int
118nouveau_fence_emit(struct nouveau_fence *fence)
119{
120 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
121 struct nouveau_channel *chan = fence->channel;
122 unsigned long flags;
123 int ret;
124
125 ret = RING_SPACE(chan, 2);
126 if (ret)
127 return ret;
128
129 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
130 spin_lock_irqsave(&chan->fence.lock, flags);
131 nouveau_fence_update(chan);
132 spin_unlock_irqrestore(&chan->fence.lock, flags);
133
134 BUG_ON(chan->fence.sequence ==
135 chan->fence.sequence_ack - 1);
136 }
137
138 fence->sequence = ++chan->fence.sequence;
139
140 kref_get(&fence->refcount);
141 spin_lock_irqsave(&chan->fence.lock, flags);
142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144
145 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan);
148
149 return 0;
150}
151
152void
153nouveau_fence_unref(void **sync_obj)
154{
155 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
156
157 if (fence)
158 kref_put(&fence->refcount, nouveau_fence_del);
159 *sync_obj = NULL;
160}
161
162void *
163nouveau_fence_ref(void *sync_obj)
164{
165 struct nouveau_fence *fence = nouveau_fence(sync_obj);
166
167 kref_get(&fence->refcount);
168 return sync_obj;
169}
170
171bool
172nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173{
174 struct nouveau_fence *fence = nouveau_fence(sync_obj);
175 struct nouveau_channel *chan = fence->channel;
176 unsigned long flags;
177
178 if (fence->signalled)
179 return true;
180
181 spin_lock_irqsave(&chan->fence.lock, flags);
182 nouveau_fence_update(chan);
183 spin_unlock_irqrestore(&chan->fence.lock, flags);
184 return fence->signalled;
185}
186
187int
188nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
189{
190 unsigned long timeout = jiffies + (3 * DRM_HZ);
191 int ret = 0;
192
193 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
194
195 while (1) {
196 if (nouveau_fence_signalled(sync_obj, sync_arg))
197 break;
198
199 if (time_after_eq(jiffies, timeout)) {
200 ret = -EBUSY;
201 break;
202 }
203
204 if (lazy)
205 schedule_timeout(1);
206
207 if (intr && signal_pending(current)) {
208 ret = -ERESTARTSYS;
209 break;
210 }
211 }
212
213 __set_current_state(TASK_RUNNING);
214
215 return ret;
216}
217
218int
219nouveau_fence_flush(void *sync_obj, void *sync_arg)
220{
221 return 0;
222}
223
224void
225nouveau_fence_handler(struct drm_device *dev, int channel)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_channel *chan = NULL;
229
230 if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
231 chan = dev_priv->fifos[channel];
232
233 if (chan) {
234 spin_lock_irq(&chan->fence.lock);
235 nouveau_fence_update(chan);
236 spin_unlock_irq(&chan->fence.lock);
237 }
238}
239
240int
241nouveau_fence_init(struct nouveau_channel *chan)
242{
243 INIT_LIST_HEAD(&chan->fence.pending);
244 spin_lock_init(&chan->fence.lock);
245 return 0;
246}
247
248void
249nouveau_fence_fini(struct nouveau_channel *chan)
250{
251 struct list_head *entry, *tmp;
252 struct nouveau_fence *fence;
253
254 list_for_each_safe(entry, tmp, &chan->fence.pending) {
255 fence = list_entry(entry, struct nouveau_fence, entry);
256
257 fence->signalled = true;
258 list_del(&fence->entry);
259 kref_put(&fence->refcount, nouveau_fence_del);
260 }
261}
262
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 000000000000..1bc0b38a5167
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,837 @@
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 ttm_bo_unref(&bo);
60}
61
62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64 int size, int align, uint32_t flags, uint32_t tile_mode,
65 uint32_t tile_flags, bool no_vm, bool mappable,
66 struct nouveau_bo **pnvbo)
67{
68 struct nouveau_bo *nvbo;
69 int ret;
70
71 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
72 tile_flags, no_vm, mappable, pnvbo);
73 if (ret)
74 return ret;
75 nvbo = *pnvbo;
76
77 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
78 if (!nvbo->gem) {
79 nouveau_bo_ref(NULL, pnvbo);
80 return -ENOMEM;
81 }
82
83 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
84 nvbo->gem->driver_private = nvbo;
85 return 0;
86}
87
88static int
89nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
90{
91 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
92
93 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
94 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
95 else
96 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
97
98 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
99 rep->offset = nvbo->bo.offset;
100 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
101 rep->tile_mode = nvbo->tile_mode;
102 rep->tile_flags = nvbo->tile_flags;
103 return 0;
104}
105
106static bool
107nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
108 switch (tile_flags) {
109 case 0x0000:
110 case 0x1800:
111 case 0x2800:
112 case 0x4800:
113 case 0x7000:
114 case 0x7400:
115 case 0x7a00:
116 case 0xe000:
117 break;
118 default:
119 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
120 return false;
121 }
122
123 return true;
124}
125
126int
127nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
128 struct drm_file *file_priv)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 struct drm_nouveau_gem_new *req = data;
132 struct nouveau_bo *nvbo = NULL;
133 struct nouveau_channel *chan = NULL;
134 uint32_t flags = 0;
135 int ret = 0;
136
137 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
138
139 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
140 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
141
142 if (req->channel_hint) {
143 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
144 file_priv, chan);
145 }
146
147 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
148 flags |= TTM_PL_FLAG_VRAM;
149 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
150 flags |= TTM_PL_FLAG_TT;
151 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
152 flags |= TTM_PL_FLAG_SYSTEM;
153
154 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
155 return -EINVAL;
156
157 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
158 req->info.tile_mode, req->info.tile_flags, false,
159 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
160 &nvbo);
161 if (ret)
162 return ret;
163
164 ret = nouveau_gem_info(nvbo->gem, &req->info);
165 if (ret)
166 goto out;
167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out:
170 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
171
172 if (ret)
173 drm_gem_object_unreference_unlocked(nvbo->gem);
174 return ret;
175}
176
177static int
178nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
179 uint32_t write_domains, uint32_t valid_domains)
180{
181 struct nouveau_bo *nvbo = gem->driver_private;
182 struct ttm_buffer_object *bo = &nvbo->bo;
183 uint32_t domains = valid_domains &
184 (write_domains ? write_domains : read_domains);
185 uint32_t pref_flags = 0, valid_flags = 0;
186
187 if (!domains)
188 return -EINVAL;
189
190 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
191 valid_flags |= TTM_PL_FLAG_VRAM;
192
193 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
194 valid_flags |= TTM_PL_FLAG_TT;
195
196 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
197 bo->mem.mem_type == TTM_PL_VRAM)
198 pref_flags |= TTM_PL_FLAG_VRAM;
199
200 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
201 bo->mem.mem_type == TTM_PL_TT)
202 pref_flags |= TTM_PL_FLAG_TT;
203
204 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
205 pref_flags |= TTM_PL_FLAG_VRAM;
206
207 else
208 pref_flags |= TTM_PL_FLAG_TT;
209
210 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
211
212 return 0;
213}
214
215struct validate_op {
216 struct list_head vram_list;
217 struct list_head gart_list;
218 struct list_head both_list;
219};
220
221static void
222validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
223{
224 struct list_head *entry, *tmp;
225 struct nouveau_bo *nvbo;
226
227 list_for_each_safe(entry, tmp, list) {
228 nvbo = list_entry(entry, struct nouveau_bo, entry);
229 if (likely(fence)) {
230 struct nouveau_fence *prev_fence;
231
232 spin_lock(&nvbo->bo.lock);
233 prev_fence = nvbo->bo.sync_obj;
234 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
235 spin_unlock(&nvbo->bo.lock);
236 nouveau_fence_unref((void *)&prev_fence);
237 }
238
239 if (unlikely(nvbo->validate_mapped)) {
240 ttm_bo_kunmap(&nvbo->kmap);
241 nvbo->validate_mapped = false;
242 }
243
244 list_del(&nvbo->entry);
245 nvbo->reserved_by = NULL;
246 ttm_bo_unreserve(&nvbo->bo);
247 drm_gem_object_unreference(nvbo->gem);
248 }
249}
250
251static void
252validate_fini(struct validate_op *op, struct nouveau_fence* fence)
253{
254 validate_fini_list(&op->vram_list, fence);
255 validate_fini_list(&op->gart_list, fence);
256 validate_fini_list(&op->both_list, fence);
257}
258
259static int
260validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
261 struct drm_nouveau_gem_pushbuf_bo *pbbo,
262 int nr_buffers, struct validate_op *op)
263{
264 struct drm_device *dev = chan->dev;
265 struct drm_nouveau_private *dev_priv = dev->dev_private;
266 uint32_t sequence;
267 int trycnt = 0;
268 int ret, i;
269
270 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
271retry:
272 if (++trycnt > 100000) {
273 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
274 return -EINVAL;
275 }
276
277 for (i = 0; i < nr_buffers; i++) {
278 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
279 struct drm_gem_object *gem;
280 struct nouveau_bo *nvbo;
281
282 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
283 if (!gem) {
284 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
285 validate_fini(op, NULL);
286 return -EINVAL;
287 }
288 nvbo = gem->driver_private;
289
290 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
291 NV_ERROR(dev, "multiple instances of buffer %d on "
292 "validation list\n", b->handle);
293 validate_fini(op, NULL);
294 return -EINVAL;
295 }
296
297 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
298 if (ret) {
299 validate_fini(op, NULL);
300 if (ret == -EAGAIN)
301 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
302 drm_gem_object_unreference(gem);
303 if (ret) {
304 NV_ERROR(dev, "fail reserve\n");
305 return ret;
306 }
307 goto retry;
308 }
309
310 b->user_priv = (uint64_t)(unsigned long)nvbo;
311 nvbo->reserved_by = file_priv;
312 nvbo->pbbo_index = i;
313 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
314 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
315 list_add_tail(&nvbo->entry, &op->both_list);
316 else
317 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
318 list_add_tail(&nvbo->entry, &op->vram_list);
319 else
320 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
321 list_add_tail(&nvbo->entry, &op->gart_list);
322 else {
323 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
324 b->valid_domains);
325 list_add_tail(&nvbo->entry, &op->both_list);
326 validate_fini(op, NULL);
327 return -EINVAL;
328 }
329
330 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
331 validate_fini(op, NULL);
332
333 if (nvbo->cpu_filp == file_priv) {
334 NV_ERROR(dev, "bo %p mapped by process trying "
335 "to validate it!\n", nvbo);
336 return -EINVAL;
337 }
338
339 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
340 if (ret) {
341 NV_ERROR(dev, "fail wait_cpu\n");
342 return ret;
343 }
344 goto retry;
345 }
346 }
347
348 return 0;
349}
350
351static int
352validate_list(struct nouveau_channel *chan, struct list_head *list,
353 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
354{
355 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
356 (void __force __user *)(uintptr_t)user_pbbo_ptr;
357 struct drm_device *dev = chan->dev;
358 struct nouveau_bo *nvbo;
359 int ret, relocs = 0;
360
361 list_for_each_entry(nvbo, list, entry) {
362 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
363 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
364
365 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
366 spin_lock(&nvbo->bo.lock);
367 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
368 spin_unlock(&nvbo->bo.lock);
369 if (unlikely(ret)) {
370 NV_ERROR(dev, "fail wait other chan\n");
371 return ret;
372 }
373 }
374
375 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
376 b->write_domains,
377 b->valid_domains);
378 if (unlikely(ret)) {
379 NV_ERROR(dev, "fail set_domain\n");
380 return ret;
381 }
382
383 nvbo->channel = chan;
384 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
385 false, false);
386 nvbo->channel = NULL;
387 if (unlikely(ret)) {
388 NV_ERROR(dev, "fail ttm_validate\n");
389 return ret;
390 }
391
392 if (nvbo->bo.offset == b->presumed.offset &&
393 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
394 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
395 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
396 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
397 continue;
398
399 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
400 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
401 else
402 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
403 b->presumed.offset = nvbo->bo.offset;
404 b->presumed.valid = 0;
405 relocs++;
406
407 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
408 &b->presumed, sizeof(b->presumed)))
409 return -EFAULT;
410 }
411
412 return relocs;
413}
414
415static int
416nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
417 struct drm_file *file_priv,
418 struct drm_nouveau_gem_pushbuf_bo *pbbo,
419 uint64_t user_buffers, int nr_buffers,
420 struct validate_op *op, int *apply_relocs)
421{
422 struct drm_device *dev = chan->dev;
423 int ret, relocs = 0;
424
425 INIT_LIST_HEAD(&op->vram_list);
426 INIT_LIST_HEAD(&op->gart_list);
427 INIT_LIST_HEAD(&op->both_list);
428
429 if (nr_buffers == 0)
430 return 0;
431
432 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
433 if (unlikely(ret)) {
434 NV_ERROR(dev, "validate_init\n");
435 return ret;
436 }
437
438 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) {
440 NV_ERROR(dev, "validate vram_list\n");
441 validate_fini(op, NULL);
442 return ret;
443 }
444 relocs += ret;
445
446 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
447 if (unlikely(ret < 0)) {
448 NV_ERROR(dev, "validate gart_list\n");
449 validate_fini(op, NULL);
450 return ret;
451 }
452 relocs += ret;
453
454 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
455 if (unlikely(ret < 0)) {
456 NV_ERROR(dev, "validate both_list\n");
457 validate_fini(op, NULL);
458 return ret;
459 }
460 relocs += ret;
461
462 *apply_relocs = relocs;
463 return 0;
464}
465
466static inline void *
467u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
468{
469 void *mem;
470 void __user *userptr = (void __force __user *)(uintptr_t)user;
471
472 mem = kmalloc(nmemb * size, GFP_KERNEL);
473 if (!mem)
474 return ERR_PTR(-ENOMEM);
475
476 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
477 kfree(mem);
478 return ERR_PTR(-EFAULT);
479 }
480
481 return mem;
482}
483
484static int
485nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
486 struct drm_nouveau_gem_pushbuf *req,
487 struct drm_nouveau_gem_pushbuf_bo *bo)
488{
489 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
490 int ret = 0;
491 unsigned i;
492
493 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
494 if (IS_ERR(reloc))
495 return PTR_ERR(reloc);
496
497 for (i = 0; i < req->nr_relocs; i++) {
498 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
499 struct drm_nouveau_gem_pushbuf_bo *b;
500 struct nouveau_bo *nvbo;
501 uint32_t data;
502
503 if (unlikely(r->bo_index > req->nr_buffers)) {
504 NV_ERROR(dev, "reloc bo index invalid\n");
505 ret = -EINVAL;
506 break;
507 }
508
509 b = &bo[r->bo_index];
510 if (b->presumed.valid)
511 continue;
512
513 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
514 NV_ERROR(dev, "reloc container bo index invalid\n");
515 ret = -EINVAL;
516 break;
517 }
518 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
519
520 if (unlikely(r->reloc_bo_offset + 4 >
521 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
522 NV_ERROR(dev, "reloc outside of bo\n");
523 ret = -EINVAL;
524 break;
525 }
526
527 if (!nvbo->kmap.virtual) {
528 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
529 &nvbo->kmap);
530 if (ret) {
531 NV_ERROR(dev, "failed kmap for reloc\n");
532 break;
533 }
534 nvbo->validate_mapped = true;
535 }
536
537 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
538 data = b->presumed.offset + r->data;
539 else
540 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
541 data = (b->presumed.offset + r->data) >> 32;
542 else
543 data = r->data;
544
545 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
546 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
547 data |= r->tor;
548 else
549 data |= r->vor;
550 }
551
552 spin_lock(&nvbo->bo.lock);
553 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
554 spin_unlock(&nvbo->bo.lock);
555 if (ret) {
556 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
557 break;
558 }
559
560 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
561 }
562
563 kfree(reloc);
564 return ret;
565}
566
567int
568nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
569 struct drm_file *file_priv)
570{
571 struct drm_nouveau_private *dev_priv = dev->dev_private;
572 struct drm_nouveau_gem_pushbuf *req = data;
573 struct drm_nouveau_gem_pushbuf_push *push;
574 struct drm_nouveau_gem_pushbuf_bo *bo;
575 struct nouveau_channel *chan;
576 struct validate_op op;
577 struct nouveau_fence *fence = 0;
578 int i, j, ret = 0, do_reloc = 0;
579
580 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
581 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
582
583 req->vram_available = dev_priv->fb_aper_free;
584 req->gart_available = dev_priv->gart_info.aper_free;
585 if (unlikely(req->nr_push == 0))
586 goto out_next;
587
588 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
589 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
590 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
591 return -EINVAL;
592 }
593
594 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
595 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
596 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
597 return -EINVAL;
598 }
599
600 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
601 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
602 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
603 return -EINVAL;
604 }
605
606 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
607 if (IS_ERR(push))
608 return PTR_ERR(push);
609
610 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
611 if (IS_ERR(bo)) {
612 kfree(push);
613 return PTR_ERR(bo);
614 }
615
616 mutex_lock(&dev->struct_mutex);
617
618 /* Validate buffer list */
619 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
620 req->nr_buffers, &op, &do_reloc);
621 if (ret) {
622 NV_ERROR(dev, "validate: %d\n", ret);
623 goto out;
624 }
625
626 /* Apply any relocations that are required */
627 if (do_reloc) {
628 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
629 if (ret) {
630 NV_ERROR(dev, "reloc apply: %d\n", ret);
631 goto out;
632 }
633 }
634
635 if (chan->dma.ib_max) {
636 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
637 if (ret) {
638 NV_INFO(dev, "nv50cal_space: %d\n", ret);
639 goto out;
640 }
641
642 for (i = 0; i < req->nr_push; i++) {
643 struct nouveau_bo *nvbo = (void *)(unsigned long)
644 bo[push[i].bo_index].user_priv;
645
646 nv50_dma_push(chan, nvbo, push[i].offset,
647 push[i].length);
648 }
649 } else
650 if (dev_priv->card_type >= NV_20) {
651 ret = RING_SPACE(chan, req->nr_push * 2);
652 if (ret) {
653 NV_ERROR(dev, "cal_space: %d\n", ret);
654 goto out;
655 }
656
657 for (i = 0; i < req->nr_push; i++) {
658 struct nouveau_bo *nvbo = (void *)(unsigned long)
659 bo[push[i].bo_index].user_priv;
660 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
661
662 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
663 push[i].offset) | 2);
664 OUT_RING(chan, 0);
665 }
666 } else {
667 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
668 if (ret) {
669 NV_ERROR(dev, "jmp_space: %d\n", ret);
670 goto out;
671 }
672
673 for (i = 0; i < req->nr_push; i++) {
674 struct nouveau_bo *nvbo = (void *)(unsigned long)
675 bo[push[i].bo_index].user_priv;
676 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
677 uint32_t cmd;
678
679 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
680 cmd |= 0x20000000;
681 if (unlikely(cmd != req->suffix0)) {
682 if (!nvbo->kmap.virtual) {
683 ret = ttm_bo_kmap(&nvbo->bo, 0,
684 nvbo->bo.mem.
685 num_pages,
686 &nvbo->kmap);
687 if (ret) {
688 WIND_RING(chan);
689 goto out;
690 }
691 nvbo->validate_mapped = true;
692 }
693
694 nouveau_bo_wr32(nvbo, (push[i].offset +
695 push[i].length - 8) / 4, cmd);
696 }
697
698 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
699 push[i].offset) | 0x20000000);
700 OUT_RING(chan, 0);
701 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
702 OUT_RING(chan, 0);
703 }
704 }
705
706 ret = nouveau_fence_new(chan, &fence, true);
707 if (ret) {
708 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
709 WIND_RING(chan);
710 goto out;
711 }
712
713out:
714 validate_fini(&op, fence);
715 nouveau_fence_unref((void**)&fence);
716 mutex_unlock(&dev->struct_mutex);
717 kfree(bo);
718 kfree(push);
719
720out_next:
721 if (chan->dma.ib_max) {
722 req->suffix0 = 0x00000000;
723 req->suffix1 = 0x00000000;
724 } else
725 if (dev_priv->card_type >= NV_20) {
726 req->suffix0 = 0x00020000;
727 req->suffix1 = 0x00000000;
728 } else {
729 req->suffix0 = 0x20000000 |
730 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
731 req->suffix1 = 0x00000000;
732 }
733
734 return ret;
735}
736
737static inline uint32_t
738domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
739{
740 uint32_t flags = 0;
741
742 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
743 flags |= TTM_PL_FLAG_VRAM;
744 if (domain & NOUVEAU_GEM_DOMAIN_GART)
745 flags |= TTM_PL_FLAG_TT;
746
747 return flags;
748}
749
750int
751nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
752 struct drm_file *file_priv)
753{
754 struct drm_nouveau_gem_cpu_prep *req = data;
755 struct drm_gem_object *gem;
756 struct nouveau_bo *nvbo;
757 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
758 int ret = -EINVAL;
759
760 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
761
762 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
763 if (!gem)
764 return ret;
765 nvbo = nouveau_gem_object(gem);
766
767 if (nvbo->cpu_filp) {
768 if (nvbo->cpu_filp == file_priv)
769 goto out;
770
771 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
772 if (ret)
773 goto out;
774 }
775
776 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
777 spin_lock(&nvbo->bo.lock);
778 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
779 spin_unlock(&nvbo->bo.lock);
780 } else {
781 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
782 if (ret == 0)
783 nvbo->cpu_filp = file_priv;
784 }
785
786out:
787 drm_gem_object_unreference_unlocked(gem);
788 return ret;
789}
790
791int
792nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
793 struct drm_file *file_priv)
794{
795 struct drm_nouveau_gem_cpu_prep *req = data;
796 struct drm_gem_object *gem;
797 struct nouveau_bo *nvbo;
798 int ret = -EINVAL;
799
800 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
801
802 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
803 if (!gem)
804 return ret;
805 nvbo = nouveau_gem_object(gem);
806
807 if (nvbo->cpu_filp != file_priv)
808 goto out;
809 nvbo->cpu_filp = NULL;
810
811 ttm_bo_synccpu_write_release(&nvbo->bo);
812 ret = 0;
813
814out:
815 drm_gem_object_unreference_unlocked(gem);
816 return ret;
817}
818
819int
820nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
821 struct drm_file *file_priv)
822{
823 struct drm_nouveau_gem_info *req = data;
824 struct drm_gem_object *gem;
825 int ret;
826
827 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
828
829 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
830 if (!gem)
831 return -EINVAL;
832
833 ret = nouveau_gem_info(gem, req);
834 drm_gem_object_unreference_unlocked(gem);
835 return ret;
836}
837
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
new file mode 100644
index 000000000000..32f0e495464c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/slab.h>
27
28#include "drmP.h"
29#include "nouveau_drv.h"
30
31struct nouveau_ctxprog {
32 uint32_t signature;
33 uint8_t version;
34 uint16_t length;
35 uint32_t data[];
36} __attribute__ ((packed));
37
38struct nouveau_ctxvals {
39 uint32_t signature;
40 uint8_t version;
41 uint32_t length;
42 struct {
43 uint32_t offset;
44 uint32_t value;
45 } data[];
46} __attribute__ ((packed));
47
48int
49nouveau_grctx_prog_load(struct drm_device *dev)
50{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
53 const int chipset = dev_priv->chipset;
54 const struct firmware *fw;
55 const struct nouveau_ctxprog *cp;
56 const struct nouveau_ctxvals *cv;
57 char name[32];
58 int ret, i;
59
60 if (pgraph->accel_blocked)
61 return -ENODEV;
62
63 if (!pgraph->ctxprog) {
64 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
65 ret = request_firmware(&fw, name, &dev->pdev->dev);
66 if (ret) {
67 NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
68 return ret;
69 }
70
71 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
72 if (!pgraph->ctxprog) {
73 NV_ERROR(dev, "OOM copying ctxprog\n");
74 release_firmware(fw);
75 return -ENOMEM;
76 }
77 memcpy(pgraph->ctxprog, fw->data, fw->size);
78
79 cp = pgraph->ctxprog;
80 if (le32_to_cpu(cp->signature) != 0x5043564e ||
81 cp->version != 0 ||
82 le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
83 NV_ERROR(dev, "ctxprog invalid\n");
84 release_firmware(fw);
85 nouveau_grctx_fini(dev);
86 return -EINVAL;
87 }
88 release_firmware(fw);
89 }
90
91 if (!pgraph->ctxvals) {
92 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
93 ret = request_firmware(&fw, name, &dev->pdev->dev);
94 if (ret) {
95 NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
96 nouveau_grctx_fini(dev);
97 return ret;
98 }
99
100 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
101 if (!pgraph->ctxvals) {
102 NV_ERROR(dev, "OOM copying ctxvals\n");
103 release_firmware(fw);
104 nouveau_grctx_fini(dev);
105 return -ENOMEM;
106 }
107 memcpy(pgraph->ctxvals, fw->data, fw->size);
108
109 cv = (void *)pgraph->ctxvals;
110 if (le32_to_cpu(cv->signature) != 0x5643564e ||
111 cv->version != 0 ||
112 le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
113 NV_ERROR(dev, "ctxvals invalid\n");
114 release_firmware(fw);
115 nouveau_grctx_fini(dev);
116 return -EINVAL;
117 }
118 release_firmware(fw);
119 }
120
121 cp = pgraph->ctxprog;
122
123 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
124 for (i = 0; i < le16_to_cpu(cp->length); i++)
125 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
126 le32_to_cpu(cp->data[i]));
127
128 return 0;
129}
130
131void
132nouveau_grctx_fini(struct drm_device *dev)
133{
134 struct drm_nouveau_private *dev_priv = dev->dev_private;
135 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
136
137 if (pgraph->ctxprog) {
138 kfree(pgraph->ctxprog);
139 pgraph->ctxprog = NULL;
140 }
141
142 if (pgraph->ctxvals) {
143 kfree(pgraph->ctxprog);
144 pgraph->ctxvals = NULL;
145 }
146}
147
148void
149nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
150{
151 struct drm_nouveau_private *dev_priv = dev->dev_private;
152 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
153 struct nouveau_ctxvals *cv = pgraph->ctxvals;
154 int i;
155
156 if (!cv)
157 return;
158
159 for (i = 0; i < le32_to_cpu(cv->length); i++)
160 nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
161 le32_to_cpu(cv->data[i].value));
162}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
new file mode 100644
index 000000000000..5d39c4ce8006
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -0,0 +1,133 @@
1#ifndef __NOUVEAU_GRCTX_H__
2#define __NOUVEAU_GRCTX_H__
3
4struct nouveau_grctx {
5 struct drm_device *dev;
6
7 enum {
8 NOUVEAU_GRCTX_PROG,
9 NOUVEAU_GRCTX_VALS
10 } mode;
11 void *data;
12
13 uint32_t ctxprog_max;
14 uint32_t ctxprog_len;
15 uint32_t ctxprog_reg;
16 int ctxprog_label[32];
17 uint32_t ctxvals_pos;
18 uint32_t ctxvals_base;
19};
20
21#ifdef CP_CTX
22static inline void
23cp_out(struct nouveau_grctx *ctx, uint32_t inst)
24{
25 uint32_t *ctxprog = ctx->data;
26
27 if (ctx->mode != NOUVEAU_GRCTX_PROG)
28 return;
29
30 BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
31 ctxprog[ctx->ctxprog_len++] = inst;
32}
33
34static inline void
35cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
36{
37 cp_out(ctx, CP_LOAD_SR | val);
38}
39
40static inline void
41cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
42{
43 ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
44
45 ctx->ctxvals_base = ctx->ctxvals_pos;
46 ctx->ctxvals_pos = ctx->ctxvals_base + length;
47
48 if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
49 cp_lsr(ctx, length);
50 length = 0;
51 }
52
53 cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
54}
55
56static inline void
57cp_name(struct nouveau_grctx *ctx, int name)
58{
59 uint32_t *ctxprog = ctx->data;
60 int i;
61
62 if (ctx->mode != NOUVEAU_GRCTX_PROG)
63 return;
64
65 ctx->ctxprog_label[name] = ctx->ctxprog_len;
66 for (i = 0; i < ctx->ctxprog_len; i++) {
67 if ((ctxprog[i] & 0xfff00000) != 0xff400000)
68 continue;
69 if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
70 continue;
71 ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
72 (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
73 }
74}
75
76static inline void
77_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
78{
79 int ip = 0;
80
81 if (mod != 2) {
82 ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
83 if (ip == 0)
84 ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
85 }
86
87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
88 (state ? 0 : CP_BRA_IF_CLEAR));
89}
90#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD
92#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif
95
96static inline void
97_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
98{
99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
100}
101#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
102
103static inline void
104_cp_set(struct nouveau_grctx *ctx, int flag, int state)
105{
106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
107}
108#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
109
110static inline void
111cp_pos(struct nouveau_grctx *ctx, int offset)
112{
113 ctx->ctxvals_pos = offset;
114 ctx->ctxvals_base = ctx->ctxvals_pos;
115
116 cp_lsr(ctx, ctx->ctxvals_pos);
117 cp_out(ctx, CP_SET_CONTEXT_POINTER);
118}
119
120static inline void
121gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
122{
123 if (ctx->mode != NOUVEAU_GRCTX_VALS)
124 return;
125
126 reg = (reg - 0x00400000) / 4;
127 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
128
129 nv_wo32(ctx->dev, ctx->data, reg, val);
130}
131#endif
132
133#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 000000000000..7855b35effc3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright 2006 Dave Airlie
3 * Copyright 2007 Maarten Maathuis
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29#define CHIPSET_NFORCE 0x01a0
30#define CHIPSET_NFORCE2 0x01f0
31
32/*
33 * misc hw access wrappers/control functions
34 */
35
36void
37NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
38{
39 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
40 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
41}
42
43uint8_t
44NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
45{
46 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
47 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
48}
49
50void
51NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
52{
53 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
54 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
55}
56
57uint8_t
58NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
59{
60 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
61 return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
62}
63
64/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
65 * it affects only the 8 bit vga io regs, which we access using mmio at
66 * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
67 * in general, the set value of cr44 does not matter: reg access works as
68 * expected and values can be set for the appropriate head by using a 0x2000
69 * offset as required
70 * however:
71 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
72 * cr44 must be set to 0 or 3 for accessing values on the correct head
73 * through the common 0xc03c* addresses
74 * b) in tied mode (4) head B is programmed to the values set on head A, and
75 * access using the head B addresses can have strange results, ergo we leave
76 * tied mode in init once we know to what cr44 should be restored on exit
77 *
78 * the owner parameter is slightly abused:
79 * 0 and 1 are treated as head values and so the set value is (owner * 3)
80 * other values are treated as literal values to set
81 */
82void
83NVSetOwner(struct drm_device *dev, int owner)
84{
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86
87 if (owner == 1)
88 owner *= 3;
89
90 if (dev_priv->chipset == 0x11) {
91 /* This might seem stupid, but the blob does it and
92 * omitting it often locks the system up.
93 */
94 NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
95 NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
96 }
97
98 /* CR44 is always changed on CRTC0 */
99 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
100
101 if (dev_priv->chipset == 0x11) { /* set me harder */
102 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
104 }
105}
106
107void
108NVBlankScreen(struct drm_device *dev, int head, bool blank)
109{
110 unsigned char seq1;
111
112 if (nv_two_heads(dev))
113 NVSetOwner(dev, head);
114
115 seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
116
117 NVVgaSeqReset(dev, head, true);
118 if (blank)
119 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
120 else
121 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
122 NVVgaSeqReset(dev, head, false);
123}
124
125/*
126 * PLL setting
127 */
128
129static int
130powerctrl_1_shift(int chip_version, int reg)
131{
132 int shift = -4;
133
134 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
135 return shift;
136
137 switch (reg) {
138 case NV_RAMDAC_VPLL2:
139 shift += 4;
140 case NV_PRAMDAC_VPLL_COEFF:
141 shift += 4;
142 case NV_PRAMDAC_MPLL_COEFF:
143 shift += 4;
144 case NV_PRAMDAC_NVPLL_COEFF:
145 shift += 4;
146 }
147
148 /*
149 * the shift for vpll regs is only used for nv3x chips with a single
150 * stage pll
151 */
152 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
153 chip_version == 0x36 || chip_version >= 0x40))
154 shift = -4;
155
156 return shift;
157}
158
159static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
167 uint32_t saved_powerctrl_1 = 0;
168 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
169
170 if (oldpll == pll)
171 return; /* already set */
172
173 if (shift_powerctrl_1 >= 0) {
174 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
175 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
176 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
177 1 << shift_powerctrl_1);
178 }
179
180 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
181 /* upclock -- write new post divider first */
182 NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
183 else
184 /* downclock -- write new NM first */
185 NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
186
187 if (chip_version < 0x17 && chip_version != 0x11)
188 /* wait a bit on older chips */
189 msleep(64);
190 NVReadRAMDAC(dev, 0, reg);
191
192 /* then write the other half as well */
193 NVWriteRAMDAC(dev, 0, reg, pll);
194
195 if (shift_powerctrl_1 >= 0)
196 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
197}
198
199static uint32_t
200new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
201{
202 bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
203
204 if (ss) /* single stage pll mode */
205 ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
206 NV_RAMDAC_580_VPLL2_ACTIVE;
207 else
208 ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
209 ~NV_RAMDAC_580_VPLL2_ACTIVE;
210
211 return ramdac580;
212}
213
214static void
215setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
223 uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
224 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
225 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
226 uint32_t oldramdac580 = 0, ramdac580 = 0;
227 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
228 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
229 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
230
231 /* model specific additions to generic pll1 and pll2 set up above */
232 if (nv3035) {
233 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
234 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
235 pll2 = 0;
236 }
237 if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
238 oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
239 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
240 if (oldramdac580 != ramdac580)
241 oldpll1 = ~0; /* force mismatch */
242 if (single_stage)
243 /* magic value used by nvidia in single stage mode */
244 pll2 |= 0x011f;
245 }
246 if (chip_version > 0x70)
247 /* magic bits set by the blob (but not the bios) on g71-73 */
248 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
249
250 if (oldpll1 == pll1 && oldpll2 == pll2)
251 return; /* already set */
252
253 if (shift_powerctrl_1 >= 0) {
254 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
255 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
256 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
257 1 << shift_powerctrl_1);
258 }
259
260 if (chip_version >= 0x40) {
261 int shift_c040 = 14;
262
263 switch (reg1) {
264 case NV_PRAMDAC_MPLL_COEFF:
265 shift_c040 += 2;
266 case NV_PRAMDAC_NVPLL_COEFF:
267 shift_c040 += 2;
268 case NV_RAMDAC_VPLL2:
269 shift_c040 += 2;
270 case NV_PRAMDAC_VPLL_COEFF:
271 shift_c040 += 2;
272 }
273
274 savedc040 = nvReadMC(dev, 0xc040);
275 if (shift_c040 != 14)
276 nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
277 }
278
279 if (oldramdac580 != ramdac580)
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
281
282 if (!nv3035)
283 NVWriteRAMDAC(dev, 0, reg2, pll2);
284 NVWriteRAMDAC(dev, 0, reg1, pll1);
285
286 if (shift_powerctrl_1 >= 0)
287 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
288 if (chip_version >= 0x40)
289 nvWriteMC(dev, 0xc040, savedc040);
290}
291
292static void
293setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
294 struct nouveau_pll_vals *pv)
295{
296 /* When setting PLLs, there is a merry game of disabling and enabling
297 * various bits of hardware during the process. This function is a
298 * synthesis of six nv4x traces, nearly each card doing a subtly
299 * different thing. With luck all the necessary bits for each card are
300 * combined herein. Without luck it deviates from each card's formula
301 * so as to not work on any :)
302 */
303
304 uint32_t Preg = NMNMreg - 4;
305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */
312 uint32_t maskc040 = ~(3 << 14), savedc040;
313 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
314
315 if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
316 return;
317
318 if (Preg == 0x4000)
319 maskc040 = ~0x333;
320 if (Preg == 0x4058)
321 maskc040 = ~(0xc << 24);
322
323 if (mpll) {
324 struct pll_lims pll_lim;
325 uint8_t Pval2;
326
327 if (get_pll_limits(dev, Preg, &pll_lim))
328 return;
329
330 Pval2 = pv->log2P + pll_lim.log2p_bias;
331 if (Pval2 > pll_lim.max_log2p)
332 Pval2 = pll_lim.max_log2p;
333 Pval |= 1 << 28 | Pval2 << 20;
334
335 saved4600 = nvReadMC(dev, 0x4600);
336 nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
337 }
338 if (single_stage)
339 Pval |= mpll ? 1 << 12 : 1 << 8;
340
341 nvWriteMC(dev, Preg, oldPval | 1 << 28);
342 nvWriteMC(dev, Preg, Pval & ~(4 << 28));
343 if (mpll) {
344 Pval |= 8 << 20;
345 nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
346 nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
347 }
348
349 savedc040 = nvReadMC(dev, 0xc040);
350 nvWriteMC(dev, 0xc040, savedc040 & maskc040);
351
352 nvWriteMC(dev, NMNMreg, NMNM);
353 if (NMNMreg == 0x4024)
354 nvWriteMC(dev, 0x403c, NMNM);
355
356 nvWriteMC(dev, Preg, Pval);
357 if (mpll) {
358 Pval &= ~(8 << 20);
359 nvWriteMC(dev, 0x4020, Pval);
360 nvWriteMC(dev, 0x4038, Pval);
361 nvWriteMC(dev, 0x4600, saved4600);
362 }
363
364 nvWriteMC(dev, 0xc040, savedc040);
365
366 if (mpll) {
367 nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
368 nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
369 }
370}
371
372void
373nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios.chip_version;
378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) {
381 if (reg1 > 0x405c)
382 setPLL_double_highregs(dev, reg1, pv);
383 else
384 setPLL_double_lowregs(dev, reg1, pv);
385 } else
386 setPLL_single(dev, reg1, pv);
387}
388
389/*
390 * PLL getting
391 */
392
393static void
394nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
395 uint32_t pll2, struct nouveau_pll_vals *pllvals)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398
399 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
400
401 /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
402 pllvals->log2P = (pll1 >> 16) & 0x7;
403 pllvals->N2 = pllvals->M2 = 1;
404
405 if (reg1 <= 0x405c) {
406 pllvals->NM1 = pll2 & 0xffff;
407 /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
408 if (!(pll1 & 0x1100))
409 pllvals->NM2 = pll2 >> 16;
410 } else {
411 pllvals->NM1 = pll1 & 0xffff;
412 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
413 pllvals->NM2 = pll2 & 0xffff;
414 else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
415 pllvals->M1 &= 0xf; /* only 4 bits */
416 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
417 pllvals->M2 = (pll1 >> 4) & 0x7;
418 pllvals->N2 = ((pll1 >> 21) & 0x18) |
419 ((pll1 >> 19) & 0x7);
420 }
421 }
422 }
423}
424
425int
426nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
427 struct nouveau_pll_vals *pllvals)
428{
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430 const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
431 NV_PRAMDAC_MPLL_COEFF,
432 NV_PRAMDAC_VPLL_COEFF,
433 NV_RAMDAC_VPLL2 };
434 const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
435 0x4020,
436 NV_PRAMDAC_VPLL_COEFF,
437 NV_RAMDAC_VPLL2 };
438 uint32_t reg1, pll1, pll2 = 0;
439 struct pll_lims pll_lim;
440 int ret;
441
442 if (dev_priv->card_type < NV_40)
443 reg1 = nv04_regs[plltype];
444 else
445 reg1 = nv40_regs[plltype];
446
447 pll1 = nvReadMC(dev, reg1);
448
449 if (reg1 <= 0x405c)
450 pll2 = nvReadMC(dev, reg1 + 4);
451 else if (nv_two_reg_pll(dev)) {
452 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
453
454 pll2 = nvReadMC(dev, reg2);
455 }
456
457 if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
458 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
459
460 /* check whether vpll has been forced into single stage mode */
461 if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
462 if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
463 pll2 = 0;
464 } else
465 if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
466 pll2 = 0;
467 }
468
469 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
470
471 ret = get_pll_limits(dev, plltype, &pll_lim);
472 if (ret)
473 return ret;
474
475 pllvals->refclk = pll_lim.refclk;
476
477 return 0;
478}
479
480int
481nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
482{
483 /* Avoid divide by zero if called at an inappropriate time */
484 if (!pv->M1 || !pv->M2)
485 return 0;
486
487 return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
488}
489
490int
491nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
492{
493 struct nouveau_pll_vals pllvals;
494
495 if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
496 uint32_t mpllP;
497
498 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
499 if (!mpllP)
500 mpllP = 4;
501
502 return 400000 / mpllP;
503 } else
504 if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
505 uint32_t clock;
506
507 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
508 return clock;
509 }
510
511 nouveau_hw_get_pllvals(dev, plltype, &pllvals);
512
513 return nouveau_hw_pllvals_to_clk(&pllvals);
514}
515
516static void
517nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
518{
519 /* the vpll on an unused head can come up with a random value, way
520 * beyond the pll limits. for some reason this causes the chip to
521 * lock up when reading the dac palette regs, so set a valid pll here
522 * when such a condition detected. only seen on nv11 to date
523 */
524
525 struct pll_lims pll_lim;
526 struct nouveau_pll_vals pv;
527 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
528
529 if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
530 return;
531 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
532
533 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
534 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
535 pv.log2P <= pll_lim.max_log2p)
536 return;
537
538 NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
539
540 /* set lowest clock within static limits */
541 pv.M1 = pll_lim.vco1.max_m;
542 pv.N1 = pll_lim.vco1.min_n;
543 pv.log2P = pll_lim.max_usable_log2p;
544 nouveau_hw_setpll(dev, pllreg, &pv);
545}
546
547/*
548 * vga font save/restore
549 */
550
551static void nouveau_vga_font_io(struct drm_device *dev,
552 void __iomem *iovram,
553 bool save, unsigned plane)
554{
555 struct drm_nouveau_private *dev_priv = dev->dev_private;
556 unsigned i;
557
558 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
559 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
560 for (i = 0; i < 16384; i++) {
561 if (save) {
562 dev_priv->saved_vga_font[plane][i] =
563 ioread32_native(iovram + i * 4);
564 } else {
565 iowrite32_native(dev_priv->saved_vga_font[plane][i],
566 iovram + i * 4);
567 }
568 }
569}
570
571void
572nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
573{
574 uint8_t misc, gr4, gr5, gr6, seq2, seq4;
575 bool graphicsmode;
576 unsigned plane;
577 void __iomem *iovram;
578
579 if (nv_two_heads(dev))
580 NVSetOwner(dev, 0);
581
582 NVSetEnablePalette(dev, 0, true);
583 graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
584 NVSetEnablePalette(dev, 0, false);
585
586 if (graphicsmode) /* graphics mode => framebuffer => no need to save */
587 return;
588
589 NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
590
591 /* map first 64KiB of VRAM, holds VGA fonts etc */
592 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
593 if (!iovram) {
594 NV_ERROR(dev, "Failed to map VRAM, "
595 "cannot save/restore VGA fonts.\n");
596 return;
597 }
598
599 if (nv_two_heads(dev))
600 NVBlankScreen(dev, 1, true);
601 NVBlankScreen(dev, 0, true);
602
603 /* save control regs */
604 misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
605 seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
606 seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
607 gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
608 gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
609 gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
610
611 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
612 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
613 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
614 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
615
616 /* store font in planes 0..3 */
617 for (plane = 0; plane < 4; plane++)
618 nouveau_vga_font_io(dev, iovram, save, plane);
619
620 /* restore control regs */
621 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
622 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
623 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
624 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
625 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
626 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
627
628 if (nv_two_heads(dev))
629 NVBlankScreen(dev, 1, false);
630 NVBlankScreen(dev, 0, false);
631
632 iounmap(iovram);
633}
634
635/*
636 * mode state save/load
637 */
638
639static void
640rd_cio_state(struct drm_device *dev, int head,
641 struct nv04_crtc_reg *crtcstate, int index)
642{
643 crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
644}
645
646static void
647wr_cio_state(struct drm_device *dev, int head,
648 struct nv04_crtc_reg *crtcstate, int index)
649{
650 NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
651}
652
653static void
654nv_save_state_ramdac(struct drm_device *dev, int head,
655 struct nv04_mode_state *state)
656{
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
659 int i;
660
661 if (dev_priv->card_type >= NV_10)
662 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
663
664 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
665 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
666 if (nv_two_heads(dev))
667 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
668 if (dev_priv->chipset == 0x11)
669 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
670
671 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
672
673 if (nv_gf4_disp_arch(dev))
674 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
675 if (dev_priv->chipset >= 0x30)
676 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
677
678 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
679 regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
680 regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
681 regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
682 regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
683 regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
684 regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
685 regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
686
687 for (i = 0; i < 7; i++) {
688 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
689 regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
690 regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
691 }
692
693 if (nv_gf4_disp_arch(dev)) {
694 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
695 for (i = 0; i < 3; i++) {
696 regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
697 regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
698 }
699 }
700
701 regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
702 regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
703 if (!nv_gf4_disp_arch(dev) && head == 0) {
704 /* early chips don't allow access to PRAMDAC_TMDS_* without
705 * the head A FPCLK on (nv11 even locks up) */
706 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
707 ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
708 }
709 regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
710 regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
711
712 regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
713
714 if (nv_gf4_disp_arch(dev))
715 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
716
717 if (dev_priv->card_type == NV_40) {
718 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
719 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
720 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
721
722 for (i = 0; i < 38; i++)
723 regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
724 NV_PRAMDAC_CTV + 4*i);
725 }
726}
727
728static void
729nv_load_state_ramdac(struct drm_device *dev, int head,
730 struct nv04_mode_state *state)
731{
732 struct drm_nouveau_private *dev_priv = dev->dev_private;
733 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
734 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
735 int i;
736
737 if (dev_priv->card_type >= NV_10)
738 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
739
740 nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
741 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
742 if (nv_two_heads(dev))
743 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
744 if (dev_priv->chipset == 0x11)
745 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
746
747 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
748
749 if (nv_gf4_disp_arch(dev))
750 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
751 if (dev_priv->chipset >= 0x30)
752 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
753
754 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
755 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
756 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
757 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
758 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
759 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
760 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
761 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
762
763 for (i = 0; i < 7; i++) {
764 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
765
766 NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
767 NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
768 }
769
770 if (nv_gf4_disp_arch(dev)) {
771 NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
772 for (i = 0; i < 3; i++) {
773 NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
774 NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
775 }
776 }
777
778 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
779 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
780 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
781 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
782
783 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
784
785 if (nv_gf4_disp_arch(dev))
786 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
787
788 if (dev_priv->card_type == NV_40) {
789 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
790 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
791 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
792
793 for (i = 0; i < 38; i++)
794 NVWriteRAMDAC(dev, head,
795 NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
796 }
797}
798
799static void
800nv_save_state_vga(struct drm_device *dev, int head,
801 struct nv04_mode_state *state)
802{
803 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
804 int i;
805
806 regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
807
808 for (i = 0; i < 25; i++)
809 rd_cio_state(dev, head, regp, i);
810
811 NVSetEnablePalette(dev, head, true);
812 for (i = 0; i < 21; i++)
813 regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
814 NVSetEnablePalette(dev, head, false);
815
816 for (i = 0; i < 9; i++)
817 regp->Graphics[i] = NVReadVgaGr(dev, head, i);
818
819 for (i = 0; i < 5; i++)
820 regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
821}
822
823static void
824nv_load_state_vga(struct drm_device *dev, int head,
825 struct nv04_mode_state *state)
826{
827 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
828 int i;
829
830 NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
831
832 for (i = 0; i < 5; i++)
833 NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
834
835 nv_lock_vga_crtc_base(dev, head, false);
836 for (i = 0; i < 25; i++)
837 wr_cio_state(dev, head, regp, i);
838 nv_lock_vga_crtc_base(dev, head, true);
839
840 for (i = 0; i < 9; i++)
841 NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
842
843 NVSetEnablePalette(dev, head, true);
844 for (i = 0; i < 21; i++)
845 NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
846 NVSetEnablePalette(dev, head, false);
847}
848
849static void
850nv_save_state_ext(struct drm_device *dev, int head,
851 struct nv04_mode_state *state)
852{
853 struct drm_nouveau_private *dev_priv = dev->dev_private;
854 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
855 int i;
856
857 rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
858 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
859 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
860 rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
861 rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
862 rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
863 rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
864
865 rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
866 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
867 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
868 if (dev_priv->card_type >= NV_30)
869 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
871 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
872 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
873 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
875
876 if (dev_priv->card_type >= NV_10) {
877 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
878 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
879
880 if (dev_priv->card_type >= NV_30)
881 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
882
883 if (dev_priv->card_type == NV_40)
884 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
885
886 if (nv_two_heads(dev))
887 regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
888 regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
889 }
890
891 regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
892
893 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
894 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
895 if (dev_priv->card_type >= NV_10) {
896 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
897 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
898 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
899 rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905
906 for (i = 0; i < 0x10; i++)
907 regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
908 rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
909 rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
910
911 rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
912 rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
913 }
914
915 regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
916}
917
918static void
919nv_load_state_ext(struct drm_device *dev, int head,
920 struct nv04_mode_state *state)
921{
922 struct drm_nouveau_private *dev_priv = dev->dev_private;
923 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
924 uint32_t reg900;
925 int i;
926
927 if (dev_priv->card_type >= NV_10) {
928 if (nv_two_heads(dev))
929 /* setting ENGINE_CTRL (EC) *must* come before
930 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
931 * EC that should not be overwritten by writing stale EC
932 */
933 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
934
935 nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
936 nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
937 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
938 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
939 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
940 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
941 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
942 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
943 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
944
945 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
946 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
947 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
948
949 if (dev_priv->card_type >= NV_30)
950 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
951
952 if (dev_priv->card_type == NV_40) {
953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
954
955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
956 if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
958 else
959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
960 }
961 }
962
963 NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
964
965 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
966 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
967 wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
968 wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
969 wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
970 wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
971 wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
972 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
974 if (dev_priv->card_type >= NV_30)
975 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
976
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
978 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
979 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
980 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
981 if (dev_priv->card_type == NV_40)
982 nv_fix_nv40_hw_cursor(dev, head);
983 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
984
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
986 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
987 if (dev_priv->card_type >= NV_10) {
988 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
989 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
990 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
991 wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
992 }
993 /* NV11 and NV20 stop at 0x52. */
994 if (nv_gf4_disp_arch(dev)) {
995 if (dev_priv->card_type == NV_10) {
996 /* Not waiting for vertical retrace before modifying
997 CRE_53/CRE_54 causes lockups. */
998 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
999 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1000 }
1001
1002 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1003 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1004
1005 for (i = 0; i < 0x10; i++)
1006 NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
1009
1010 wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
1011 wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
1012 }
1013
1014 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1015
1016 /* Setting 1 on this value gives you interrupts for every vblank period. */
1017 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
1018 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1019}
1020
1021static void
1022nv_save_state_palette(struct drm_device *dev, int head,
1023 struct nv04_mode_state *state)
1024{
1025 int head_offset = head * NV_PRMDIO_SIZE, i;
1026
1027 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1028 NV_PRMDIO_PIXEL_MASK_MASK);
1029 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
1030
1031 for (i = 0; i < 768; i++) {
1032 state->crtc_reg[head].DAC[i] = nv_rd08(dev,
1033 NV_PRMDIO_PALETTE_DATA + head_offset);
1034 }
1035
1036 NVSetEnablePalette(dev, head, false);
1037}
1038
1039void
1040nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1041 struct nv04_mode_state *state)
1042{
1043 int head_offset = head * NV_PRMDIO_SIZE, i;
1044
1045 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1046 NV_PRMDIO_PIXEL_MASK_MASK);
1047 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
1048
1049 for (i = 0; i < 768; i++) {
1050 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
1051 state->crtc_reg[head].DAC[i]);
1052 }
1053
1054 NVSetEnablePalette(dev, head, false);
1055}
1056
1057void nouveau_hw_save_state(struct drm_device *dev, int head,
1058 struct nv04_mode_state *state)
1059{
1060 struct drm_nouveau_private *dev_priv = dev->dev_private;
1061
1062 if (dev_priv->chipset == 0x11)
1063 /* NB: no attempt is made to restore the bad pll later on */
1064 nouveau_hw_fix_bad_vpll(dev, head);
1065 nv_save_state_ramdac(dev, head, state);
1066 nv_save_state_vga(dev, head, state);
1067 nv_save_state_palette(dev, head, state);
1068 nv_save_state_ext(dev, head, state);
1069}
1070
1071void nouveau_hw_load_state(struct drm_device *dev, int head,
1072 struct nv04_mode_state *state)
1073{
1074 NVVgaProtect(dev, head, true);
1075 nv_load_state_ramdac(dev, head, state);
1076 nv_load_state_ext(dev, head, state);
1077 nouveau_hw_load_state_palette(dev, head, state);
1078 nv_load_state_vga(dev, head, state);
1079 NVVgaProtect(dev, head, false);
1080}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 000000000000..869130f83602
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
1/*
2 * Copyright 2008 Stuart Bennett
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_HW_H__
24#define __NOUVEAU_HW_H__
25
26#include "drmP.h"
27#include "nouveau_drv.h"
28
29#define MASK(field) ( \
30 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
31
32#define XLATE(src, srclowbit, outfield) ( \
33 (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
34
35void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
36uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
37void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
38uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
39void NVSetOwner(struct drm_device *, int owner);
40void NVBlankScreen(struct drm_device *, int head, bool blank);
41void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
42 struct nouveau_pll_vals *pv);
43int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
44 struct nouveau_pll_vals *pllvals);
45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
46int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
48void nouveau_hw_save_state(struct drm_device *, int head,
49 struct nv04_mode_state *state);
50void nouveau_hw_load_state(struct drm_device *, int head,
51 struct nv04_mode_state *state);
52void nouveau_hw_load_state_palette(struct drm_device *, int head,
53 struct nv04_mode_state *state);
54
55/* nouveau_calc.c */
56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
57 int *burst, int *lwm);
58extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
59 int clk, struct nouveau_pll_vals *pv);
60
61static inline uint32_t
62nvReadMC(struct drm_device *dev, uint32_t reg)
63{
64 uint32_t val = nv_rd32(dev, reg);
65 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
66 return val;
67}
68
69static inline void
70nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
71{
72 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
73 nv_wr32(dev, reg, val);
74}
75
76static inline uint32_t
77nvReadVIDEO(struct drm_device *dev, uint32_t reg)
78{
79 uint32_t val = nv_rd32(dev, reg);
80 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
81 return val;
82}
83
84static inline void
85nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
86{
87 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
88 nv_wr32(dev, reg, val);
89}
90
91static inline uint32_t
92nvReadFB(struct drm_device *dev, uint32_t reg)
93{
94 uint32_t val = nv_rd32(dev, reg);
95 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
96 return val;
97}
98
99static inline void
100nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
101{
102 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
103 nv_wr32(dev, reg, val);
104}
105
106static inline uint32_t
107nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
108{
109 uint32_t val = nv_rd32(dev, reg);
110 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
111 return val;
112}
113
114static inline void
115nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
116{
117 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
118 nv_wr32(dev, reg, val);
119}
120
121static inline uint32_t NVReadCRTC(struct drm_device *dev,
122 int head, uint32_t reg)
123{
124 uint32_t val;
125 if (head)
126 reg += NV_PCRTC0_SIZE;
127 val = nv_rd32(dev, reg);
128 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
129 return val;
130}
131
132static inline void NVWriteCRTC(struct drm_device *dev,
133 int head, uint32_t reg, uint32_t val)
134{
135 if (head)
136 reg += NV_PCRTC0_SIZE;
137 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
138 nv_wr32(dev, reg, val);
139}
140
141static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
142 int head, uint32_t reg)
143{
144 uint32_t val;
145 if (head)
146 reg += NV_PRAMDAC0_SIZE;
147 val = nv_rd32(dev, reg);
148 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
149 head, reg, val);
150 return val;
151}
152
153static inline void NVWriteRAMDAC(struct drm_device *dev,
154 int head, uint32_t reg, uint32_t val)
155{
156 if (head)
157 reg += NV_PRAMDAC0_SIZE;
158 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
159 head, reg, val);
160 nv_wr32(dev, reg, val);
161}
162
163static inline uint8_t nv_read_tmds(struct drm_device *dev,
164 int or, int dl, uint8_t address)
165{
166 int ramdac = (or & OUTPUT_C) >> 2;
167
168 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
169 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
170 return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
171}
172
173static inline void nv_write_tmds(struct drm_device *dev,
174 int or, int dl, uint8_t address,
175 uint8_t data)
176{
177 int ramdac = (or & OUTPUT_C) >> 2;
178
179 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
180 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
181}
182
183static inline void NVWriteVgaCrtc(struct drm_device *dev,
184 int head, uint8_t index, uint8_t value)
185{
186 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
187 head, index, value);
188 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
189 nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
190}
191
192static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
193 int head, uint8_t index)
194{
195 uint8_t val;
196 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
197 val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
198 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
199 head, index, val);
200 return val;
201}
202
203/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
204 * I suspect they in fact do nothing, but are merely a way to carry useful
205 * per-head variables around
206 *
207 * Known uses:
208 * CR57 CR58
209 * 0x00 index to the appropriate dcb entry (or 7f for inactive)
210 * 0x02 dcb entry's "or" value (or 00 for inactive)
211 * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too)
212 * 0x08 or 0x09 pxclk in MHz
213 * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap
214 * high nibble for xlat strap value
215 */
216
217static inline void
218NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
219{
220 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
221 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
222}
223
224static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
225{
226 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
227 return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
228}
229
230static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
231 int head, uint32_t reg)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 uint8_t val;
235
236 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
237 * NVSetOwner for the relevant head to be programmed */
238 if (head && dev_priv->card_type == NV_40)
239 reg += NV_PRMVIO_SIZE;
240
241 val = nv_rd08(dev, reg);
242 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
243 return val;
244}
245
246static inline void NVWritePRMVIO(struct drm_device *dev,
247 int head, uint32_t reg, uint8_t value)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250
251 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
252 * NVSetOwner for the relevant head to be programmed */
253 if (head && dev_priv->card_type == NV_40)
254 reg += NV_PRMVIO_SIZE;
255
256 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
257 head, reg, value);
258 nv_wr08(dev, reg, value);
259}
260
261static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
262{
263 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
264 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
265}
266
267static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
268{
269 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
270 return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
271}
272
273static inline void NVWriteVgaAttr(struct drm_device *dev,
274 int head, uint8_t index, uint8_t value)
275{
276 if (NVGetEnablePalette(dev, head))
277 index &= ~0x20;
278 else
279 index |= 0x20;
280
281 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
282 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
283 head, index, value);
284 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
285 nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
286}
287
288static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
289 int head, uint8_t index)
290{
291 uint8_t val;
292 if (NVGetEnablePalette(dev, head))
293 index &= ~0x20;
294 else
295 index |= 0x20;
296
297 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
298 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
299 val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
300 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
301 head, index, val);
302 return val;
303}
304
305static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
306{
307 NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
308}
309
310static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
311{
312 uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
313
314 if (protect) {
315 NVVgaSeqReset(dev, head, true);
316 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
317 } else {
318 /* Reenable sequencer, then turn on screen */
319 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */
320 NVVgaSeqReset(dev, head, false);
321 }
322 NVSetEnablePalette(dev, head, protect);
323}
324
325static inline bool
326nv_heads_tied(struct drm_device *dev)
327{
328 struct drm_nouveau_private *dev_priv = dev->dev_private;
329
330 if (dev_priv->chipset == 0x11)
331 return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
332
333 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
334}
335
336/* makes cr0-7 on the specified head read-only */
337static inline bool
338nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
339{
340 uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
341 bool waslocked = cr11 & 0x80;
342
343 if (lock)
344 cr11 |= 0x80;
345 else
346 cr11 &= ~0x80;
347 NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
348
349 return waslocked;
350}
351
352static inline void
353nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
354{
355 /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
356 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
357 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
358 * bit5: unlocks HDE
359 * bit4: unlocks VDE
360 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
361 * bit2: same as bit 1 of 0x60?804
362 * bit0: same as bit 0 of 0x60?804
363 */
364
365 uint8_t cr21 = lock;
366
367 if (lock < 0)
368 /* 0xfa is generic "unlock all" mask */
369 cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
370
371 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
372}
373
374/* renders the extended crtc regs (cr19+) on all crtcs impervious:
375 * immutable and unreadable
376 */
377static inline bool
378NVLockVgaCrtcs(struct drm_device *dev, bool lock)
379{
380 struct drm_nouveau_private *dev_priv = dev->dev_private;
381 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
382
383 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
384 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
385 /* NV11 has independently lockable extended crtcs, except when tied */
386 if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
387 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
388 lock ? NV_CIO_SR_LOCK_VALUE :
389 NV_CIO_SR_UNLOCK_RW_VALUE);
390
391 return waslocked;
392}
393
394/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
395#define NV04_CURSOR_SIZE 32
396/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
397#define NV10_CURSOR_SIZE 64
398
399static inline int nv_cursor_width(struct drm_device *dev)
400{
401 struct drm_nouveau_private *dev_priv = dev->dev_private;
402
403 return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
404}
405
406static inline void
407nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
408{
409 /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
410 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
411 * for changes to the CRTC CURCTL regs to take effect, whether changing
412 * the pixmap location, or just showing/hiding the cursor
413 */
414 uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
415 NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
416}
417
418static inline void
419nv_show_cursor(struct drm_device *dev, int head, bool show)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 uint8_t *curctl1 =
423 &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
424
425 if (show)
426 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
427 else
428 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
429 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
430
431 if (dev_priv->card_type == NV_40)
432 nv_fix_nv40_hw_cursor(dev, head);
433}
434
435static inline uint32_t
436nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
437{
438 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 int mask;
440
441 if (bpp == 15)
442 bpp = 16;
443 if (bpp == 24)
444 bpp = 8;
445
446 /* Alignment requirements taken from the Haiku driver */
447 if (dev_priv->card_type == NV_04)
448 mask = 128 / bpp - 1;
449 else
450 mask = 512 / bpp - 1;
451
452 return (width + mask) & ~mask;
453}
454
455#endif /* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 000000000000..88583e7bf651
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_hw.h"
29
30static void
31nv04_i2c_setscl(void *data, int state)
32{
33 struct nouveau_i2c_chan *i2c = data;
34 struct drm_device *dev = i2c->dev;
35 uint8_t val;
36
37 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
38 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
39}
40
41static void
42nv04_i2c_setsda(void *data, int state)
43{
44 struct nouveau_i2c_chan *i2c = data;
45 struct drm_device *dev = i2c->dev;
46 uint8_t val;
47
48 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
49 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
50}
51
52static int
53nv04_i2c_getscl(void *data)
54{
55 struct nouveau_i2c_chan *i2c = data;
56 struct drm_device *dev = i2c->dev;
57
58 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
59}
60
61static int
62nv04_i2c_getsda(void *data)
63{
64 struct nouveau_i2c_chan *i2c = data;
65 struct drm_device *dev = i2c->dev;
66
67 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
68}
69
70static void
71nv4e_i2c_setscl(void *data, int state)
72{
73 struct nouveau_i2c_chan *i2c = data;
74 struct drm_device *dev = i2c->dev;
75 uint8_t val;
76
77 val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
78 nv_wr32(dev, i2c->wr, val | 0x01);
79}
80
81static void
82nv4e_i2c_setsda(void *data, int state)
83{
84 struct nouveau_i2c_chan *i2c = data;
85 struct drm_device *dev = i2c->dev;
86 uint8_t val;
87
88 val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
89 nv_wr32(dev, i2c->wr, val | 0x01);
90}
91
92static int
93nv4e_i2c_getscl(void *data)
94{
95 struct nouveau_i2c_chan *i2c = data;
96 struct drm_device *dev = i2c->dev;
97
98 return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
99}
100
101static int
102nv4e_i2c_getsda(void *data)
103{
104 struct nouveau_i2c_chan *i2c = data;
105 struct drm_device *dev = i2c->dev;
106
107 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
108}
109
110static int
111nv50_i2c_getscl(void *data)
112{
113 struct nouveau_i2c_chan *i2c = data;
114 struct drm_device *dev = i2c->dev;
115
116 return !!(nv_rd32(dev, i2c->rd) & 1);
117}
118
119
120static int
121nv50_i2c_getsda(void *data)
122{
123 struct nouveau_i2c_chan *i2c = data;
124 struct drm_device *dev = i2c->dev;
125
126 return !!(nv_rd32(dev, i2c->rd) & 2);
127}
128
129static void
130nv50_i2c_setscl(void *data, int state)
131{
132 struct nouveau_i2c_chan *i2c = data;
133 struct drm_device *dev = i2c->dev;
134
135 nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
136}
137
138static void
139nv50_i2c_setsda(void *data, int state)
140{
141 struct nouveau_i2c_chan *i2c = data;
142 struct drm_device *dev = i2c->dev;
143
144 nv_wr32(dev, i2c->wr,
145 (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
146 i2c->data = state;
147}
148
149static const uint32_t nv50_i2c_port[] = {
150 0x00e138, 0x00e150, 0x00e168, 0x00e180,
151 0x00e254, 0x00e274, 0x00e764, 0x00e780,
152 0x00e79c, 0x00e7b8
153};
154#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
155
156int
157nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nouveau_i2c_chan *i2c;
161 int ret;
162
163 if (entry->chan)
164 return -EEXIST;
165
166 if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL;
169 }
170
171 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
172 if (i2c == NULL)
173 return -ENOMEM;
174
175 switch (entry->port_type) {
176 case 0:
177 i2c->algo.bit.setsda = nv04_i2c_setsda;
178 i2c->algo.bit.setscl = nv04_i2c_setscl;
179 i2c->algo.bit.getsda = nv04_i2c_getsda;
180 i2c->algo.bit.getscl = nv04_i2c_getscl;
181 i2c->rd = entry->read;
182 i2c->wr = entry->write;
183 break;
184 case 4:
185 i2c->algo.bit.setsda = nv4e_i2c_setsda;
186 i2c->algo.bit.setscl = nv4e_i2c_setscl;
187 i2c->algo.bit.getsda = nv4e_i2c_getsda;
188 i2c->algo.bit.getscl = nv4e_i2c_getscl;
189 i2c->rd = 0x600800 + entry->read;
190 i2c->wr = 0x600800 + entry->write;
191 break;
192 case 5:
193 i2c->algo.bit.setsda = nv50_i2c_setsda;
194 i2c->algo.bit.setscl = nv50_i2c_setscl;
195 i2c->algo.bit.getsda = nv50_i2c_getsda;
196 i2c->algo.bit.getscl = nv50_i2c_getscl;
197 i2c->rd = nv50_i2c_port[entry->read];
198 i2c->wr = i2c->rd;
199 break;
200 case 6:
201 i2c->rd = entry->read;
202 i2c->wr = entry->write;
203 break;
204 default:
205 NV_ERROR(dev, "DCB I2C port type %d unknown\n",
206 entry->port_type);
207 kfree(i2c);
208 return -EINVAL;
209 }
210
211 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
212 "nouveau-%s-%d", pci_name(dev->pdev), index);
213 i2c->adapter.owner = THIS_MODULE;
214 i2c->adapter.dev.parent = &dev->pdev->dev;
215 i2c->dev = dev;
216 i2c_set_adapdata(&i2c->adapter, i2c);
217
218 if (entry->port_type < 6) {
219 i2c->adapter.algo_data = &i2c->algo.bit;
220 i2c->algo.bit.udelay = 40;
221 i2c->algo.bit.timeout = usecs_to_jiffies(5000);
222 i2c->algo.bit.data = i2c;
223 ret = i2c_bit_add_bus(&i2c->adapter);
224 } else {
225 i2c->adapter.algo_data = &i2c->algo.dp;
226 i2c->algo.dp.running = false;
227 i2c->algo.dp.address = 0;
228 i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
229 ret = i2c_dp_aux_add_bus(&i2c->adapter);
230 }
231
232 if (ret) {
233 NV_ERROR(dev, "Failed to register i2c %d\n", index);
234 kfree(i2c);
235 return ret;
236 }
237
238 entry->chan = i2c;
239 return 0;
240}
241
242void
243nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
244{
245 if (!entry->chan)
246 return;
247
248 i2c_del_adapter(&entry->chan->adapter);
249 kfree(entry->chan);
250 entry->chan = NULL;
251}
252
253struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index)
255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->vbios;
258
259 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL;
261
262 if (!bios->dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
264 return NULL;
265 }
266
267 return bios->dcb.i2c[index].chan;
268}
269
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 000000000000..c8eaf7a9fcbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_I2C_H__
24#define __NOUVEAU_I2C_H__
25
26#include <linux/i2c.h>
27#include <linux/i2c-id.h>
28#include <linux/i2c-algo-bit.h>
29#include "drm_dp_helper.h"
30
31struct dcb_i2c_entry;
32
33struct nouveau_i2c_chan {
34 struct i2c_adapter adapter;
35 struct drm_device *dev;
36 union {
37 struct i2c_algo_bit_data bit;
38 struct i2c_algo_dp_aux_data dp;
39 } algo;
40 unsigned rd;
41 unsigned wr;
42 unsigned data;
43};
44
45int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
46void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
47struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
48
49int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
50 uint8_t *read_byte);
51
52#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 000000000000..475ba810bba3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,70 @@
1/**
2 * \file mga_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the MGA DRM.
5 *
6 * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Egbert Eich 2003,2004
11 * Copyright (C) Dave Airlie 2005
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
29 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/compat.h>
35
36#include "drmP.h"
37#include "drm.h"
38
39#include "nouveau_drv.h"
40
41/**
42 * Called whenever a 32-bit process running under a 64-bit kernel
43 * performs an ioctl on /dev/dri/card<n>.
44 *
45 * \param filp file pointer.
46 * \param cmd command.
47 * \param arg user argument.
48 * \return zero on success or negative number on failure.
49 */
50long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
51 unsigned long arg)
52{
53 unsigned int nr = DRM_IOCTL_NR(cmd);
54 drm_ioctl_compat_t *fn = NULL;
55 int ret;
56
57 if (nr < DRM_COMMAND_BASE)
58 return drm_compat_ioctl(filp, cmd, arg);
59
60#if 0
61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
63#endif
64 if (fn != NULL)
65 ret = (*fn)(filp, cmd, arg);
66 else
67 ret = drm_ioctl(filp, cmd, arg);
68
69 return ret;
70}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 000000000000..13e73cee4c44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,1256 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h"
36#include "nouveau_drv.h"
37#include "nouveau_reg.h"
38#include <linux/ratelimit.h>
39
40/* needed for hotplug irq */
41#include "nouveau_connector.h"
42#include "nv50_display.h"
43
44void
45nouveau_irq_preinstall(struct drm_device *dev)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48
49 /* Master disable */
50 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
51
52 if (dev_priv->card_type == NV_50) {
53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
54 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
55 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
56 }
57}
58
59int
60nouveau_irq_postinstall(struct drm_device *dev)
61{
62 /* Master enable */
63 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
64 return 0;
65}
66
67void
68nouveau_irq_uninstall(struct drm_device *dev)
69{
70 /* Master disable */
71 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
72}
73
74static int
75nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
76{
77 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
78 struct nouveau_pgraph_object_method *grm;
79 struct nouveau_pgraph_object_class *grc;
80
81 grc = dev_priv->engine.graph.grclass;
82 while (grc->id) {
83 if (grc->id == class)
84 break;
85 grc++;
86 }
87
88 if (grc->id != class || !grc->methods)
89 return -ENOENT;
90
91 grm = grc->methods;
92 while (grm->id) {
93 if (grm->id == mthd)
94 return grm->exec(chan, class, mthd, data);
95 grm++;
96 }
97
98 return -ENOENT;
99}
100
101static bool
102nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
103{
104 struct drm_device *dev = chan->dev;
105 const int subc = (addr >> 13) & 0x7;
106 const int mthd = addr & 0x1ffc;
107
108 if (mthd == 0x0000) {
109 struct nouveau_gpuobj_ref *ref = NULL;
110
111 if (nouveau_gpuobj_ref_find(chan, data, &ref))
112 return false;
113
114 if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
115 return false;
116
117 chan->sw_subchannel[subc] = ref->gpuobj->class;
118 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
119 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
120 return true;
121 }
122
123 /* hw object */
124 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
125 return false;
126
127 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
128 return false;
129
130 return true;
131}
132
133static void
134nouveau_fifo_irq_handler(struct drm_device *dev)
135{
136 struct drm_nouveau_private *dev_priv = dev->dev_private;
137 struct nouveau_engine *engine = &dev_priv->engine;
138 uint32_t status, reassign;
139 int cnt = 0;
140
141 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
142 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
143 struct nouveau_channel *chan = NULL;
144 uint32_t chid, get;
145
146 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
147
148 chid = engine->fifo.channel_id(dev);
149 if (chid >= 0 && chid < engine->fifo.channels)
150 chan = dev_priv->fifos[chid];
151 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
152
153 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
154 uint32_t mthd, data;
155 int ptr;
156
157 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
158 * wrapping on my G80 chips, but CACHE1 isn't big
159 * enough for this much data.. Tests show that it
160 * wraps around to the start at GET=0x800.. No clue
161 * as to why..
162 */
163 ptr = (get & 0x7ff) >> 2;
164
165 if (dev_priv->card_type < NV_40) {
166 mthd = nv_rd32(dev,
167 NV04_PFIFO_CACHE1_METHOD(ptr));
168 data = nv_rd32(dev,
169 NV04_PFIFO_CACHE1_DATA(ptr));
170 } else {
171 mthd = nv_rd32(dev,
172 NV40_PFIFO_CACHE1_METHOD(ptr));
173 data = nv_rd32(dev,
174 NV40_PFIFO_CACHE1_DATA(ptr));
175 }
176
177 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
178 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
179 "Mthd 0x%04x Data 0x%08x\n",
180 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
181 data);
182 }
183
184 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
185 nv_wr32(dev, NV03_PFIFO_INTR_0,
186 NV_PFIFO_INTR_CACHE_ERROR);
187
188 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
189 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
190 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
191 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
192 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
193 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
194
195 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
196 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
197 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
198
199 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
200 }
201
202 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
203 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
204
205 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
206 nv_wr32(dev, NV03_PFIFO_INTR_0,
207 NV_PFIFO_INTR_DMA_PUSHER);
208
209 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
210 if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
211 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
212 get + 4);
213 }
214
215 if (status & NV_PFIFO_INTR_SEMAPHORE) {
216 uint32_t sem;
217
218 status &= ~NV_PFIFO_INTR_SEMAPHORE;
219 nv_wr32(dev, NV03_PFIFO_INTR_0,
220 NV_PFIFO_INTR_SEMAPHORE);
221
222 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
223 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
224
225 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
226 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
227 }
228
229 if (status) {
230 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
231 status, chid);
232 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
233 status = 0;
234 }
235
236 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
237 }
238
239 if (status) {
240 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
241 nv_wr32(dev, 0x2140, 0);
242 nv_wr32(dev, 0x140, 0);
243 }
244
245 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
246}
247
248struct nouveau_bitfield_names {
249 uint32_t mask;
250 const char *name;
251};
252
253static struct nouveau_bitfield_names nstatus_names[] =
254{
255 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
256 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
257 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
258 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
259};
260
261static struct nouveau_bitfield_names nstatus_names_nv10[] =
262{
263 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
264 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
265 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
266 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
267};
268
269static struct nouveau_bitfield_names nsource_names[] =
270{
271 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
272 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
273 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
274 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
275 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
276 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
277 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
278 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
279 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
280 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
281 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
282 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
283 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
284 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
285 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
286 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
287 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
288 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
289 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
290};
291
292static void
293nouveau_print_bitfield_names_(uint32_t value,
294 const struct nouveau_bitfield_names *namelist,
295 const int namelist_len)
296{
297 /*
298 * Caller must have already printed the KERN_* log level for us.
299 * Also the caller is responsible for adding the newline.
300 */
301 int i;
302 for (i = 0; i < namelist_len; ++i) {
303 uint32_t mask = namelist[i].mask;
304 if (value & mask) {
305 printk(" %s", namelist[i].name);
306 value &= ~mask;
307 }
308 }
309 if (value)
310 printk(" (unknown bits 0x%08x)", value);
311}
312#define nouveau_print_bitfield_names(val, namelist) \
313 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
314
315struct nouveau_enum_names {
316 uint32_t value;
317 const char *name;
318};
319
320static void
321nouveau_print_enum_names_(uint32_t value,
322 const struct nouveau_enum_names *namelist,
323 const int namelist_len)
324{
325 /*
326 * Caller must have already printed the KERN_* log level for us.
327 * Also the caller is responsible for adding the newline.
328 */
329 int i;
330 for (i = 0; i < namelist_len; ++i) {
331 if (value == namelist[i].value) {
332 printk("%s", namelist[i].name);
333 return;
334 }
335 }
336 printk("unknown value 0x%08x", value);
337}
338#define nouveau_print_enum_names(val, namelist) \
339 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
340
341static int
342nouveau_graph_chid_from_grctx(struct drm_device *dev)
343{
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 uint32_t inst;
346 int i;
347
348 if (dev_priv->card_type < NV_40)
349 return dev_priv->engine.fifo.channels;
350 else
351 if (dev_priv->card_type < NV_50) {
352 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
353
354 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
355 struct nouveau_channel *chan = dev_priv->fifos[i];
356
357 if (!chan || !chan->ramin_grctx)
358 continue;
359
360 if (inst == chan->ramin_grctx->instance)
361 break;
362 }
363 } else {
364 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
365
366 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
367 struct nouveau_channel *chan = dev_priv->fifos[i];
368
369 if (!chan || !chan->ramin)
370 continue;
371
372 if (inst == chan->ramin->instance)
373 break;
374 }
375 }
376
377
378 return i;
379}
380
381static int
382nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
383{
384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385 struct nouveau_engine *engine = &dev_priv->engine;
386 int channel;
387
388 if (dev_priv->card_type < NV_10)
389 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
390 else
391 if (dev_priv->card_type < NV_40)
392 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
393 else
394 channel = nouveau_graph_chid_from_grctx(dev);
395
396 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
397 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
398 return -EINVAL;
399 }
400
401 *channel_ret = channel;
402 return 0;
403}
404
405struct nouveau_pgraph_trap {
406 int channel;
407 int class;
408 int subc, mthd, size;
409 uint32_t data, data2;
410 uint32_t nsource, nstatus;
411};
412
413static void
414nouveau_graph_trap_info(struct drm_device *dev,
415 struct nouveau_pgraph_trap *trap)
416{
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 uint32_t address;
419
420 trap->nsource = trap->nstatus = 0;
421 if (dev_priv->card_type < NV_50) {
422 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
423 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
424 }
425
426 if (nouveau_graph_trapped_channel(dev, &trap->channel))
427 trap->channel = -1;
428 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
429
430 trap->mthd = address & 0x1FFC;
431 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
432 if (dev_priv->card_type < NV_10) {
433 trap->subc = (address >> 13) & 0x7;
434 } else {
435 trap->subc = (address >> 16) & 0x7;
436 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
437 }
438
439 if (dev_priv->card_type < NV_10)
440 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
441 else if (dev_priv->card_type < NV_40)
442 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
443 else if (dev_priv->card_type < NV_50)
444 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
445 else
446 trap->class = nv_rd32(dev, 0x400814);
447}
448
449static void
450nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
451 struct nouveau_pgraph_trap *trap)
452{
453 struct drm_nouveau_private *dev_priv = dev->dev_private;
454 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
455
456 if (dev_priv->card_type < NV_50) {
457 NV_INFO(dev, "%s - nSource:", id);
458 nouveau_print_bitfield_names(nsource, nsource_names);
459 printk(", nStatus:");
460 if (dev_priv->card_type < NV_10)
461 nouveau_print_bitfield_names(nstatus, nstatus_names);
462 else
463 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
464 printk("\n");
465 }
466
467 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
468 "Data 0x%08x:0x%08x\n",
469 id, trap->channel, trap->subc,
470 trap->class, trap->mthd,
471 trap->data2, trap->data);
472}
473
474static int
475nouveau_pgraph_intr_swmthd(struct drm_device *dev,
476 struct nouveau_pgraph_trap *trap)
477{
478 struct drm_nouveau_private *dev_priv = dev->dev_private;
479
480 if (trap->channel < 0 ||
481 trap->channel >= dev_priv->engine.fifo.channels ||
482 !dev_priv->fifos[trap->channel])
483 return -ENODEV;
484
485 return nouveau_call_method(dev_priv->fifos[trap->channel],
486 trap->class, trap->mthd, trap->data);
487}
488
489static inline void
490nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
491{
492 struct nouveau_pgraph_trap trap;
493 int unhandled = 0;
494
495 nouveau_graph_trap_info(dev, &trap);
496
497 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
498 if (nouveau_pgraph_intr_swmthd(dev, &trap))
499 unhandled = 1;
500 } else {
501 unhandled = 1;
502 }
503
504 if (unhandled)
505 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
506}
507
508static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
509
510static int nouveau_ratelimit(void)
511{
512 return __ratelimit(&nouveau_ratelimit_state);
513}
514
515
516static inline void
517nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
518{
519 struct nouveau_pgraph_trap trap;
520 int unhandled = 0;
521
522 nouveau_graph_trap_info(dev, &trap);
523 trap.nsource = nsource;
524
525 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
526 if (nouveau_pgraph_intr_swmthd(dev, &trap))
527 unhandled = 1;
528 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
529 uint32_t v = nv_rd32(dev, 0x402000);
530 nv_wr32(dev, 0x402000, v);
531
532 /* dump the error anyway for now: it's useful for
533 Gallium development */
534 unhandled = 1;
535 } else {
536 unhandled = 1;
537 }
538
539 if (unhandled && nouveau_ratelimit())
540 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
541}
542
543static inline void
544nouveau_pgraph_intr_context_switch(struct drm_device *dev)
545{
546 struct drm_nouveau_private *dev_priv = dev->dev_private;
547 struct nouveau_engine *engine = &dev_priv->engine;
548 uint32_t chid;
549
550 chid = engine->fifo.channel_id(dev);
551 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
552
553 switch (dev_priv->card_type) {
554 case NV_04:
555 nv04_graph_context_switch(dev);
556 break;
557 case NV_10:
558 nv10_graph_context_switch(dev);
559 break;
560 default:
561 NV_ERROR(dev, "Context switch not implemented\n");
562 break;
563 }
564}
565
566static void
567nouveau_pgraph_irq_handler(struct drm_device *dev)
568{
569 uint32_t status;
570
571 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
572 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
573
574 if (status & NV_PGRAPH_INTR_NOTIFY) {
575 nouveau_pgraph_intr_notify(dev, nsource);
576
577 status &= ~NV_PGRAPH_INTR_NOTIFY;
578 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
579 }
580
581 if (status & NV_PGRAPH_INTR_ERROR) {
582 nouveau_pgraph_intr_error(dev, nsource);
583
584 status &= ~NV_PGRAPH_INTR_ERROR;
585 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
586 }
587
588 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
589 nouveau_pgraph_intr_context_switch(dev);
590
591 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
592 nv_wr32(dev, NV03_PGRAPH_INTR,
593 NV_PGRAPH_INTR_CONTEXT_SWITCH);
594 }
595
596 if (status) {
597 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
598 nv_wr32(dev, NV03_PGRAPH_INTR, status);
599 }
600
601 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
602 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
603 }
604
605 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
606}
607
608static void
609nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
610{
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
612 uint32_t trap[6];
613 int i, ch;
614 uint32_t idx = nv_rd32(dev, 0x100c90);
615 if (idx & 0x80000000) {
616 idx &= 0xffffff;
617 if (display) {
618 for (i = 0; i < 6; i++) {
619 nv_wr32(dev, 0x100c90, idx | i << 24);
620 trap[i] = nv_rd32(dev, 0x100c94);
621 }
622 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
623 struct nouveau_channel *chan = dev_priv->fifos[ch];
624
625 if (!chan || !chan->ramin)
626 continue;
627
628 if (trap[1] == chan->ramin->instance >> 12)
629 break;
630 }
631 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
632 name, (trap[5]&0x100?"read":"write"),
633 trap[5]&0xff, trap[4]&0xffff,
634 trap[3]&0xffff, trap[0], trap[2], ch);
635 }
636 nv_wr32(dev, 0x100c90, idx | 0x80000000);
637 } else if (display) {
638 NV_INFO(dev, "%s - no VM fault?\n", name);
639 }
640}
641
642static struct nouveau_enum_names nv50_mp_exec_error_names[] =
643{
644 { 3, "STACK_UNDERFLOW" },
645 { 4, "QUADON_ACTIVE" },
646 { 8, "TIMEOUT" },
647 { 0x10, "INVALID_OPCODE" },
648 { 0x40, "BREAKPOINT" },
649};
650
651static void
652nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
653{
654 struct drm_nouveau_private *dev_priv = dev->dev_private;
655 uint32_t units = nv_rd32(dev, 0x1540);
656 uint32_t addr, mp10, status, pc, oplow, ophigh;
657 int i;
658 int mps = 0;
659 for (i = 0; i < 4; i++) {
660 if (!(units & 1 << (i+24)))
661 continue;
662 if (dev_priv->chipset < 0xa0)
663 addr = 0x408200 + (tpid << 12) + (i << 7);
664 else
665 addr = 0x408100 + (tpid << 11) + (i << 7);
666 mp10 = nv_rd32(dev, addr + 0x10);
667 status = nv_rd32(dev, addr + 0x14);
668 if (!status)
669 continue;
670 if (display) {
671 nv_rd32(dev, addr + 0x20);
672 pc = nv_rd32(dev, addr + 0x24);
673 oplow = nv_rd32(dev, addr + 0x70);
674 ophigh= nv_rd32(dev, addr + 0x74);
675 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
676 "TP %d MP %d: ", tpid, i);
677 nouveau_print_enum_names(status,
678 nv50_mp_exec_error_names);
679 printk(" at %06x warp %d, opcode %08x %08x\n",
680 pc&0xffffff, pc >> 24,
681 oplow, ophigh);
682 }
683 nv_wr32(dev, addr + 0x10, mp10);
684 nv_wr32(dev, addr + 0x14, 0);
685 mps++;
686 }
687 if (!mps && display)
688 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
689 "No MPs claiming errors?\n", tpid);
690}
691
692static void
693nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
694 uint32_t ustatus_new, int display, const char *name)
695{
696 struct drm_nouveau_private *dev_priv = dev->dev_private;
697 int tps = 0;
698 uint32_t units = nv_rd32(dev, 0x1540);
699 int i, r;
700 uint32_t ustatus_addr, ustatus;
701 for (i = 0; i < 16; i++) {
702 if (!(units & (1 << i)))
703 continue;
704 if (dev_priv->chipset < 0xa0)
705 ustatus_addr = ustatus_old + (i << 12);
706 else
707 ustatus_addr = ustatus_new + (i << 11);
708 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
709 if (!ustatus)
710 continue;
711 tps++;
712 switch (type) {
713 case 6: /* texture error... unknown for now */
714 nv50_pfb_vm_trap(dev, display, name);
715 if (display) {
716 NV_ERROR(dev, "magic set %d:\n", i);
717 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
718 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
719 nv_rd32(dev, r));
720 }
721 break;
722 case 7: /* MP error */
723 if (ustatus & 0x00010000) {
724 nv50_pgraph_mp_trap(dev, i, display);
725 ustatus &= ~0x00010000;
726 }
727 break;
728 case 8: /* TPDMA error */
729 {
730 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
731 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
732 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
733 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
734 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
735 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
736 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
737 nv50_pfb_vm_trap(dev, display, name);
738 /* 2d engine destination */
739 if (ustatus & 0x00000010) {
740 if (display) {
741 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
742 i, e14, e10);
743 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
744 i, e0c, e18, e1c, e20, e24);
745 }
746 ustatus &= ~0x00000010;
747 }
748 /* Render target */
749 if (ustatus & 0x00000040) {
750 if (display) {
751 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
752 i, e14, e10);
753 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
754 i, e0c, e18, e1c, e20, e24);
755 }
756 ustatus &= ~0x00000040;
757 }
758 /* CUDA memory: l[], g[] or stack. */
759 if (ustatus & 0x00000080) {
760 if (display) {
761 if (e18 & 0x80000000) {
762 /* g[] read fault? */
763 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
764 i, e14, e10 | ((e18 >> 24) & 0x1f));
765 e18 &= ~0x1f000000;
766 } else if (e18 & 0xc) {
767 /* g[] write fault? */
768 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
769 i, e14, e10 | ((e18 >> 7) & 0x1f));
770 e18 &= ~0x00000f80;
771 } else {
772 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
773 i, e14, e10);
774 }
775 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
776 i, e0c, e18, e1c, e20, e24);
777 }
778 ustatus &= ~0x00000080;
779 }
780 }
781 break;
782 }
783 if (ustatus) {
784 if (display)
785 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
786 }
787 nv_wr32(dev, ustatus_addr, 0xc0000000);
788 }
789
790 if (!tps && display)
791 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
792}
793
794static void
795nv50_pgraph_trap_handler(struct drm_device *dev)
796{
797 struct nouveau_pgraph_trap trap;
798 uint32_t status = nv_rd32(dev, 0x400108);
799 uint32_t ustatus;
800 int display = nouveau_ratelimit();
801
802
803 if (!status && display) {
804 nouveau_graph_trap_info(dev, &trap);
805 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
806 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
807 }
808
809 /* DISPATCH: Relays commands to other units and handles NOTIFY,
810 * COND, QUERY. If you get a trap from it, the command is still stuck
811 * in DISPATCH and you need to do something about it. */
812 if (status & 0x001) {
813 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
814 if (!ustatus && display) {
815 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
816 }
817
818 /* Known to be triggered by screwed up NOTIFY and COND... */
819 if (ustatus & 0x00000001) {
820 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
821 nv_wr32(dev, 0x400500, 0);
822 if (nv_rd32(dev, 0x400808) & 0x80000000) {
823 if (display) {
824 if (nouveau_graph_trapped_channel(dev, &trap.channel))
825 trap.channel = -1;
826 trap.class = nv_rd32(dev, 0x400814);
827 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
828 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
829 trap.data = nv_rd32(dev, 0x40080c);
830 trap.data2 = nv_rd32(dev, 0x400810);
831 nouveau_graph_dump_trap_info(dev,
832 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
833 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
834 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
835 }
836 nv_wr32(dev, 0x400808, 0);
837 } else if (display) {
838 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
839 }
840 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
841 nv_wr32(dev, 0x400848, 0);
842 ustatus &= ~0x00000001;
843 }
844 if (ustatus & 0x00000002) {
845 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
846 nv_wr32(dev, 0x400500, 0);
847 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
848 if (display) {
849 if (nouveau_graph_trapped_channel(dev, &trap.channel))
850 trap.channel = -1;
851 trap.class = nv_rd32(dev, 0x400814);
852 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
853 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
854 trap.data = nv_rd32(dev, 0x40085c);
855 trap.data2 = 0;
856 nouveau_graph_dump_trap_info(dev,
857 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
858 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
859 }
860 nv_wr32(dev, 0x40084c, 0);
861 } else if (display) {
862 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
863 }
864 ustatus &= ~0x00000002;
865 }
866 if (ustatus && display)
867 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
868 nv_wr32(dev, 0x400804, 0xc0000000);
869 nv_wr32(dev, 0x400108, 0x001);
870 status &= ~0x001;
871 }
872
873 /* TRAPs other than dispatch use the "normal" trap regs. */
874 if (status && display) {
875 nouveau_graph_trap_info(dev, &trap);
876 nouveau_graph_dump_trap_info(dev,
877 "PGRAPH_TRAP", &trap);
878 }
879
880 /* M2MF: Memory to memory copy engine. */
881 if (status & 0x002) {
882 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
883 if (!ustatus && display) {
884 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
885 }
886 if (ustatus & 0x00000001) {
887 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
888 ustatus &= ~0x00000001;
889 }
890 if (ustatus & 0x00000002) {
891 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
892 ustatus &= ~0x00000002;
893 }
894 if (ustatus & 0x00000004) {
895 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
896 ustatus &= ~0x00000004;
897 }
898 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
899 nv_rd32(dev, 0x406804),
900 nv_rd32(dev, 0x406808),
901 nv_rd32(dev, 0x40680c),
902 nv_rd32(dev, 0x406810));
903 if (ustatus && display)
904 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
905 /* No sane way found yet -- just reset the bugger. */
906 nv_wr32(dev, 0x400040, 2);
907 nv_wr32(dev, 0x400040, 0);
908 nv_wr32(dev, 0x406800, 0xc0000000);
909 nv_wr32(dev, 0x400108, 0x002);
910 status &= ~0x002;
911 }
912
913 /* VFETCH: Fetches data from vertex buffers. */
914 if (status & 0x004) {
915 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
916 if (!ustatus && display) {
917 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
918 }
919 if (ustatus & 0x00000001) {
920 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
921 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
922 nv_rd32(dev, 0x400c00),
923 nv_rd32(dev, 0x400c08),
924 nv_rd32(dev, 0x400c0c),
925 nv_rd32(dev, 0x400c10));
926 ustatus &= ~0x00000001;
927 }
928 if (ustatus && display)
929 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
930 nv_wr32(dev, 0x400c04, 0xc0000000);
931 nv_wr32(dev, 0x400108, 0x004);
932 status &= ~0x004;
933 }
934
935 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
936 if (status & 0x008) {
937 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
938 if (!ustatus && display) {
939 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
940 }
941 if (ustatus & 0x00000001) {
942 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
943 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
944 nv_rd32(dev, 0x401804),
945 nv_rd32(dev, 0x401808),
946 nv_rd32(dev, 0x40180c),
947 nv_rd32(dev, 0x401810));
948 ustatus &= ~0x00000001;
949 }
950 if (ustatus && display)
951 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
952 /* No sane way found yet -- just reset the bugger. */
953 nv_wr32(dev, 0x400040, 0x80);
954 nv_wr32(dev, 0x400040, 0);
955 nv_wr32(dev, 0x401800, 0xc0000000);
956 nv_wr32(dev, 0x400108, 0x008);
957 status &= ~0x008;
958 }
959
960 /* CCACHE: Handles code and c[] caches and fills them. */
961 if (status & 0x010) {
962 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
963 if (!ustatus && display) {
964 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
965 }
966 if (ustatus & 0x00000001) {
967 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
968 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
969 nv_rd32(dev, 0x405800),
970 nv_rd32(dev, 0x405804),
971 nv_rd32(dev, 0x405808),
972 nv_rd32(dev, 0x40580c),
973 nv_rd32(dev, 0x405810),
974 nv_rd32(dev, 0x405814),
975 nv_rd32(dev, 0x40581c));
976 ustatus &= ~0x00000001;
977 }
978 if (ustatus && display)
979 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
980 nv_wr32(dev, 0x405018, 0xc0000000);
981 nv_wr32(dev, 0x400108, 0x010);
982 status &= ~0x010;
983 }
984
985 /* Unknown, not seen yet... 0x402000 is the only trap status reg
986 * remaining, so try to handle it anyway. Perhaps related to that
987 * unknown DMA slot on tesla? */
988 if (status & 0x20) {
989 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
990 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
991 if (display)
992 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
993 nv_wr32(dev, 0x402000, 0xc0000000);
994 /* no status modifiction on purpose */
995 }
996
997 /* TEXTURE: CUDA texturing units */
998 if (status & 0x040) {
999 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1000 "PGRAPH_TRAP_TEXTURE");
1001 nv_wr32(dev, 0x400108, 0x040);
1002 status &= ~0x040;
1003 }
1004
1005 /* MP: CUDA execution engines. */
1006 if (status & 0x080) {
1007 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1008 "PGRAPH_TRAP_MP");
1009 nv_wr32(dev, 0x400108, 0x080);
1010 status &= ~0x080;
1011 }
1012
1013 /* TPDMA: Handles TP-initiated uncached memory accesses:
1014 * l[], g[], stack, 2d surfaces, render targets. */
1015 if (status & 0x100) {
1016 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1017 "PGRAPH_TRAP_TPDMA");
1018 nv_wr32(dev, 0x400108, 0x100);
1019 status &= ~0x100;
1020 }
1021
1022 if (status) {
1023 if (display)
1024 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1025 status);
1026 nv_wr32(dev, 0x400108, status);
1027 }
1028}
1029
1030/* There must be a *lot* of these. Will take some time to gather them up. */
1031static struct nouveau_enum_names nv50_data_error_names[] =
1032{
1033 { 4, "INVALID_VALUE" },
1034 { 5, "INVALID_ENUM" },
1035 { 8, "INVALID_OBJECT" },
1036 { 0xc, "INVALID_BITFIELD" },
1037 { 0x28, "MP_NO_REG_SPACE" },
1038 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1039};
1040
1041static void
1042nv50_pgraph_irq_handler(struct drm_device *dev)
1043{
1044 struct nouveau_pgraph_trap trap;
1045 int unhandled = 0;
1046 uint32_t status;
1047
1048 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1049 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
1050 if (status & 0x00000001) {
1051 nouveau_graph_trap_info(dev, &trap);
1052 if (nouveau_ratelimit())
1053 nouveau_graph_dump_trap_info(dev,
1054 "PGRAPH_NOTIFY", &trap);
1055 status &= ~0x00000001;
1056 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1057 }
1058
1059 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1060 * when you write 0x200 to 0x50c0 method 0x31c. */
1061 if (status & 0x00000002) {
1062 nouveau_graph_trap_info(dev, &trap);
1063 if (nouveau_ratelimit())
1064 nouveau_graph_dump_trap_info(dev,
1065 "PGRAPH_COMPUTE_QUERY", &trap);
1066 status &= ~0x00000002;
1067 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1068 }
1069
1070 /* Unknown, never seen: 0x4 */
1071
1072 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1073 if (status & 0x00000010) {
1074 nouveau_graph_trap_info(dev, &trap);
1075 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1076 unhandled = 1;
1077 if (unhandled && nouveau_ratelimit())
1078 nouveau_graph_dump_trap_info(dev,
1079 "PGRAPH_ILLEGAL_MTHD", &trap);
1080 status &= ~0x00000010;
1081 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1082 }
1083
1084 /* ILLEGAL_CLASS: You used a wrong class. */
1085 if (status & 0x00000020) {
1086 nouveau_graph_trap_info(dev, &trap);
1087 if (nouveau_ratelimit())
1088 nouveau_graph_dump_trap_info(dev,
1089 "PGRAPH_ILLEGAL_CLASS", &trap);
1090 status &= ~0x00000020;
1091 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1092 }
1093
1094 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1095 if (status & 0x00000040) {
1096 nouveau_graph_trap_info(dev, &trap);
1097 if (nouveau_ratelimit())
1098 nouveau_graph_dump_trap_info(dev,
1099 "PGRAPH_DOUBLE_NOTIFY", &trap);
1100 status &= ~0x00000040;
1101 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1102 }
1103
1104 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
1105 if (status & 0x00001000) {
1106 nv_wr32(dev, 0x400500, 0x00000000);
1107 nv_wr32(dev, NV03_PGRAPH_INTR,
1108 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1109 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1110 NV40_PGRAPH_INTR_EN) &
1111 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1112 nv_wr32(dev, 0x400500, 0x00010001);
1113
1114 nv50_graph_context_switch(dev);
1115
1116 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1117 }
1118
1119 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1120 if (status & 0x00010000) {
1121 nouveau_graph_trap_info(dev, &trap);
1122 if (nouveau_ratelimit())
1123 nouveau_graph_dump_trap_info(dev,
1124 "PGRAPH_BUFFER_NOTIFY", &trap);
1125 status &= ~0x00010000;
1126 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1127 }
1128
1129 /* DATA_ERROR: Invalid value for this method, or invalid
1130 * state in current PGRAPH context for this operation */
1131 if (status & 0x00100000) {
1132 nouveau_graph_trap_info(dev, &trap);
1133 if (nouveau_ratelimit()) {
1134 nouveau_graph_dump_trap_info(dev,
1135 "PGRAPH_DATA_ERROR", &trap);
1136 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1137 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1138 nv50_data_error_names);
1139 printk("\n");
1140 }
1141 status &= ~0x00100000;
1142 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1143 }
1144
1145 /* TRAP: Something bad happened in the middle of command
1146 * execution. Has a billion types, subtypes, and even
1147 * subsubtypes. */
1148 if (status & 0x00200000) {
1149 nv50_pgraph_trap_handler(dev);
1150 status &= ~0x00200000;
1151 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1152 }
1153
1154 /* Unknown, never seen: 0x00400000 */
1155
1156 /* SINGLE_STEP: Happens on every method if you turned on
1157 * single stepping in 40008c */
1158 if (status & 0x01000000) {
1159 nouveau_graph_trap_info(dev, &trap);
1160 if (nouveau_ratelimit())
1161 nouveau_graph_dump_trap_info(dev,
1162 "PGRAPH_SINGLE_STEP", &trap);
1163 status &= ~0x01000000;
1164 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1165 }
1166
1167 /* 0x02000000 happens when you pause a ctxprog...
1168 * but the only way this can happen that I know is by
1169 * poking the relevant MMIO register, and we don't
1170 * do that. */
1171
1172 if (status) {
1173 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1174 status);
1175 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1176 }
1177
1178 {
1179 const int isb = (1 << 16) | (1 << 0);
1180
1181 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1182 nv_wr32(dev, 0x400500,
1183 nv_rd32(dev, 0x400500) | isb);
1184 }
1185 }
1186
1187 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
1188 if (nv_rd32(dev, 0x400824) & (1 << 31))
1189 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1190}
1191
1192static void
1193nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
1194{
1195 if (crtc & 1)
1196 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
1197
1198 if (crtc & 2)
1199 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
1200}
1201
1202irqreturn_t
1203nouveau_irq_handler(DRM_IRQ_ARGS)
1204{
1205 struct drm_device *dev = (struct drm_device *)arg;
1206 struct drm_nouveau_private *dev_priv = dev->dev_private;
1207 uint32_t status, fbdev_flags = 0;
1208 unsigned long flags;
1209
1210 status = nv_rd32(dev, NV03_PMC_INTR_0);
1211 if (!status)
1212 return IRQ_NONE;
1213
1214 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1215
1216 if (dev_priv->fbdev_info) {
1217 fbdev_flags = dev_priv->fbdev_info->flags;
1218 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
1219 }
1220
1221 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1222 nouveau_fifo_irq_handler(dev);
1223 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1224 }
1225
1226 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1227 if (dev_priv->card_type >= NV_50)
1228 nv50_pgraph_irq_handler(dev);
1229 else
1230 nouveau_pgraph_irq_handler(dev);
1231
1232 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1233 }
1234
1235 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1236 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1237 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1238 }
1239
1240 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1241 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1242 nv50_display_irq_handler(dev);
1243 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1244 NV_PMC_INTR_0_NV50_I2C_PENDING);
1245 }
1246
1247 if (status)
1248 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1249
1250 if (dev_priv->fbdev_info)
1251 dev_priv->fbdev_info->flags = fbdev_flags;
1252
1253 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1254
1255 return IRQ_HANDLED;
1256}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 000000000000..775a7017af64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,727 @@
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37
38static struct mem_block *
39split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
41{
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 kmalloc(sizeof(*newblock), GFP_KERNEL);
46 if (!newblock)
47 goto out;
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
52 newblock->prev = p;
53 p->next->prev = newblock;
54 p->next = newblock;
55 p->size -= newblock->size;
56 p = newblock;
57 }
58
59 /* Maybe cut off the end of an existing block */
60 if (size < p->size) {
61 struct mem_block *newblock =
62 kmalloc(sizeof(*newblock), GFP_KERNEL);
63 if (!newblock)
64 goto out;
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
69 newblock->prev = p;
70 p->next->prev = newblock;
71 p->next = newblock;
72 p->size = size;
73 }
74
75out:
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
78 return p;
79}
80
81struct mem_block *
82nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
84{
85 struct mem_block *p;
86 uint64_t mask = (1 << align2) - 1;
87
88 if (!heap)
89 return NULL;
90
91 if (tail) {
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
94
95 if (p->file_priv == NULL && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
98 }
99 } else {
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
102
103 if (p->file_priv == NULL &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
106 }
107 }
108
109 return NULL;
110}
111
112void nouveau_mem_free_block(struct mem_block *p)
113{
114 p->file_priv = NULL;
115
116 /* Assumes a single contiguous range. Needs a special file_priv in
117 * 'heap' to stop it being subsumed.
118 */
119 if (p->next->file_priv == NULL) {
120 struct mem_block *q = p->next;
121 p->size += q->size;
122 p->next = q->next;
123 p->next->prev = p;
124 kfree(q);
125 }
126
127 if (p->prev->file_priv == NULL) {
128 struct mem_block *q = p->prev;
129 q->size += p->size;
130 q->next = p->next;
131 q->next->prev = q;
132 kfree(p);
133 }
134}
135
136/* Initialize. How to check for an uninitialized heap?
137 */
138int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
139 uint64_t size)
140{
141 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
142
143 if (!blocks)
144 return -ENOMEM;
145
146 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
147 if (!*heap) {
148 kfree(blocks);
149 return -ENOMEM;
150 }
151
152 blocks->start = start;
153 blocks->size = size;
154 blocks->file_priv = NULL;
155 blocks->next = blocks->prev = *heap;
156
157 memset(*heap, 0, sizeof(**heap));
158 (*heap)->file_priv = (struct drm_file *) -1;
159 (*heap)->next = (*heap)->prev = blocks;
160 return 0;
161}
162
163/*
164 * Free all blocks associated with the releasing file_priv
165 */
166void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
167{
168 struct mem_block *p;
169
170 if (!heap || !heap->next)
171 return;
172
173 list_for_each(p, heap) {
174 if (p->file_priv == file_priv)
175 p->file_priv = NULL;
176 }
177
178 /* Assumes a single contiguous range. Needs a special file_priv in
179 * 'heap' to stop it being subsumed.
180 */
181 list_for_each(p, heap) {
182 while ((p->file_priv == NULL) &&
183 (p->next->file_priv == NULL) &&
184 (p->next != heap)) {
185 struct mem_block *q = p->next;
186 p->size += q->size;
187 p->next = q->next;
188 p->next->prev = p;
189 kfree(q);
190 }
191 }
192}
193
194/*
195 * NV10-NV40 tiling helpers
196 */
197
198static void
199nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
207
208 tile->addr = addr;
209 tile->size = size;
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
212
213 if (!pfifo->cache_flush(dev))
214 return;
215
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
219
220 nouveau_wait_for_idle(dev);
221
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
224
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
227}
228
229struct nouveau_tile_reg *
230nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
231 uint32_t pitch)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
236 int i;
237
238 spin_lock(&dev_priv->tile.lock);
239
240 for (i = 0; i < pfb->num_tiles; i++) {
241 if (tile[i].used)
242 /* Tile region in use. */
243 continue;
244
245 if (tile[i].fence &&
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
248 continue;
249
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
254
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
258 found = &tile[i];
259 }
260 }
261
262 spin_unlock(&dev_priv->tile.lock);
263
264 return found;
265}
266
267void
268nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
270{
271 if (fence) {
272 /* Mark it as pending. */
273 tile->fence = fence;
274 nouveau_fence_ref(fence);
275 }
276
277 tile->used = false;
278}
279
280/*
281 * NV50 VM helpers
282 */
283int
284nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys)
286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj *pgt;
289 unsigned block;
290 int i;
291
292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
301
302 dev_priv->engine.instmem.prepare_access(dev, true);
303 while (size) {
304 unsigned offset_h = upper_32_bits(phys);
305 unsigned offset_l = lower_32_bits(phys);
306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
314
315 phys += block << 15;
316 size -= block;
317
318 while (block) {
319 pgt = dev_priv->vm_vram_pt[virt >> 14];
320 pte = virt & 0x3ffe;
321
322 end = pte + block;
323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
327
328 while (pte < end) {
329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
332 }
333 }
334 dev_priv->engine.instmem.finish_access(dev);
335
336 nv_wr32(dev, 0x100c80, 0x00050001);
337 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
338 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
339 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
340 return -EBUSY;
341 }
342
343 nv_wr32(dev, 0x100c80, 0x00000001);
344 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
345 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
346 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
347 return -EBUSY;
348 }
349
350 nv_wr32(dev, 0x100c80, 0x00040001);
351 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
352 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
353 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
354 return -EBUSY;
355 }
356
357 nv_wr32(dev, 0x100c80, 0x00060001);
358 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
359 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
360 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
361 return -EBUSY;
362 }
363
364 return 0;
365}
366
367void
368nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
369{
370 struct drm_nouveau_private *dev_priv = dev->dev_private;
371 struct nouveau_gpuobj *pgt;
372 unsigned pages, pte, end;
373
374 virt -= dev_priv->vm_vram_base;
375 pages = (size >> 16) << 1;
376
377 dev_priv->engine.instmem.prepare_access(dev, true);
378 while (pages) {
379 pgt = dev_priv->vm_vram_pt[virt >> 29];
380 pte = (virt & 0x1ffe0000ULL) >> 15;
381
382 end = pte + pages;
383 if (end > 16384)
384 end = 16384;
385 pages -= (end - pte);
386 virt += (end - pte) << 15;
387
388 while (pte < end)
389 nv_wo32(dev, pgt, pte++, 0);
390 }
391 dev_priv->engine.instmem.finish_access(dev);
392
393 nv_wr32(dev, 0x100c80, 0x00050001);
394 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
395 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
396 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
397 return;
398 }
399
400 nv_wr32(dev, 0x100c80, 0x00000001);
401 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
402 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
403 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
404 return;
405 }
406
407 nv_wr32(dev, 0x100c80, 0x00040001);
408 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
409 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
410 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
411 return;
412 }
413
414 nv_wr32(dev, 0x100c80, 0x00060001);
415 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
416 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
417 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
418 }
419}
420
421/*
422 * Cleanup everything
423 */
424void nouveau_mem_takedown(struct mem_block **heap)
425{
426 struct mem_block *p;
427
428 if (!*heap)
429 return;
430
431 for (p = (*heap)->next; p != *heap;) {
432 struct mem_block *q = p;
433 p = p->next;
434 kfree(q);
435 }
436
437 kfree(*heap);
438 *heap = NULL;
439}
440
441void nouveau_mem_close(struct drm_device *dev)
442{
443 struct drm_nouveau_private *dev_priv = dev->dev_private;
444
445 nouveau_bo_unpin(dev_priv->vga_ram);
446 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
447
448 ttm_bo_device_release(&dev_priv->ttm.bdev);
449
450 nouveau_ttm_global_release(dev_priv);
451
452 if (drm_core_has_AGP(dev) && dev->agp &&
453 drm_core_check_feature(dev, DRIVER_MODESET)) {
454 struct drm_agp_mem *entry, *tempe;
455
456 /* Remove AGP resources, but leave dev->agp
457 intact until drv_cleanup is called. */
458 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
459 if (entry->bound)
460 drm_unbind_agp(entry->memory);
461 drm_free_agp(entry->memory, entry->pages);
462 kfree(entry);
463 }
464 INIT_LIST_HEAD(&dev->agp->memory);
465
466 if (dev->agp->acquired)
467 drm_agp_release(dev);
468
469 dev->agp->acquired = 0;
470 dev->agp->enabled = 0;
471 }
472
473 if (dev_priv->fb_mtrr) {
474 drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
475 drm_get_resource_len(dev, 1), DRM_MTRR_WC);
476 dev_priv->fb_mtrr = 0;
477 }
478}
479
480static uint32_t
481nouveau_mem_detect_nv04(struct drm_device *dev)
482{
483 uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
484
485 if (boot0 & 0x00000100)
486 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
487
488 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
489 case NV04_BOOT_0_RAM_AMOUNT_32MB:
490 return 32 * 1024 * 1024;
491 case NV04_BOOT_0_RAM_AMOUNT_16MB:
492 return 16 * 1024 * 1024;
493 case NV04_BOOT_0_RAM_AMOUNT_8MB:
494 return 8 * 1024 * 1024;
495 case NV04_BOOT_0_RAM_AMOUNT_4MB:
496 return 4 * 1024 * 1024;
497 }
498
499 return 0;
500}
501
502static uint32_t
503nouveau_mem_detect_nforce(struct drm_device *dev)
504{
505 struct drm_nouveau_private *dev_priv = dev->dev_private;
506 struct pci_dev *bridge;
507 uint32_t mem;
508
509 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
510 if (!bridge) {
511 NV_ERROR(dev, "no bridge device\n");
512 return 0;
513 }
514
515 if (dev_priv->flags & NV_NFORCE) {
516 pci_read_config_dword(bridge, 0x7C, &mem);
517 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
518 } else
519 if (dev_priv->flags & NV_NFORCE2) {
520 pci_read_config_dword(bridge, 0x84, &mem);
521 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
522 }
523
524 NV_ERROR(dev, "impossible!\n");
525 return 0;
526}
527
528/* returns the amount of FB ram in bytes */
529int
530nouveau_mem_detect(struct drm_device *dev)
531{
532 struct drm_nouveau_private *dev_priv = dev->dev_private;
533
534 if (dev_priv->card_type == NV_04) {
535 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
536 } else
537 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
538 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
539 } else {
540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
544 }
545
546 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
547 if (dev_priv->vram_sys_base) {
548 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
549 dev_priv->vram_sys_base);
550 }
551
552 if (dev_priv->vram_size)
553 return 0;
554 return -ENOMEM;
555}
556
557#if __OS_HAS_AGP
558static void nouveau_mem_reset_agp(struct drm_device *dev)
559{
560 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
561
562 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
563 saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
564
565 /* clear busmaster bit */
566 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
567 /* clear SBA and AGP bits */
568 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
569
570 /* power cycle pgraph, if enabled */
571 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
572 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
573 nv_wr32(dev, NV03_PMC_ENABLE,
574 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
575 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
576 NV_PMC_ENABLE_PGRAPH);
577 }
578
579 /* and restore (gives effect of resetting AGP) */
580 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
581 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
582}
583#endif
584
585int
586nouveau_mem_init_agp(struct drm_device *dev)
587{
588#if __OS_HAS_AGP
589 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 struct drm_agp_info info;
591 struct drm_agp_mode mode;
592 int ret;
593
594 if (nouveau_noagp)
595 return 0;
596
597 nouveau_mem_reset_agp(dev);
598
599 if (!dev->agp->acquired) {
600 ret = drm_agp_acquire(dev);
601 if (ret) {
602 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
603 return ret;
604 }
605 }
606
607 ret = drm_agp_info(dev, &info);
608 if (ret) {
609 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
610 return ret;
611 }
612
613 /* see agp.h for the AGPSTAT_* modes available */
614 mode.mode = info.mode;
615 ret = drm_agp_enable(dev, mode);
616 if (ret) {
617 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
618 return ret;
619 }
620
621 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
622 dev_priv->gart_info.aper_base = info.aperture_base;
623 dev_priv->gart_info.aper_size = info.aperture_size;
624#endif
625 return 0;
626}
627
628int
629nouveau_mem_init(struct drm_device *dev)
630{
631 struct drm_nouveau_private *dev_priv = dev->dev_private;
632 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
633 int ret, dma_bits = 32;
634
635 dev_priv->fb_phys = drm_get_resource_start(dev, 1);
636 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
637
638 if (dev_priv->card_type >= NV_50 &&
639 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
640 dma_bits = 40;
641
642 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
643 if (ret) {
644 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
645 return ret;
646 }
647
648 ret = nouveau_ttm_global_init(dev_priv);
649 if (ret)
650 return ret;
651
652 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
653 dev_priv->ttm.bo_global_ref.ref.object,
654 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
655 dma_bits <= 32 ? true : false);
656 if (ret) {
657 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
658 return ret;
659 }
660
661 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
662 spin_lock_init(&dev_priv->ttm.bo_list_lock);
663 spin_lock_init(&dev_priv->tile.lock);
664
665 dev_priv->fb_available_size = dev_priv->vram_size;
666 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
667 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
668 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
669 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
670
671 /* remove reserved space at end of vram from available amount */
672 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
673 dev_priv->fb_aper_free = dev_priv->fb_available_size;
674
675 /* mappable vram */
676 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
677 dev_priv->fb_available_size >> PAGE_SHIFT);
678 if (ret) {
679 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
680 return ret;
681 }
682
683 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
684 0, 0, true, true, &dev_priv->vga_ram);
685 if (ret == 0)
686 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
687 if (ret) {
688 NV_WARN(dev, "failed to reserve VGA memory\n");
689 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
690 }
691
692 /* GART */
693#if !defined(__powerpc__) && !defined(__ia64__)
694 if (drm_device_is_agp(dev) && dev->agp) {
695 ret = nouveau_mem_init_agp(dev);
696 if (ret)
697 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
698 }
699#endif
700
701 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
702 ret = nouveau_sgdma_init(dev);
703 if (ret) {
704 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
705 return ret;
706 }
707 }
708
709 NV_INFO(dev, "%d MiB GART (aperture)\n",
710 (int)(dev_priv->gart_info.aper_size >> 20));
711 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
712
713 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
714 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
715 if (ret) {
716 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
717 return ret;
718 }
719
720 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
721 drm_get_resource_len(dev, 1),
722 DRM_MTRR_WC);
723
724 return 0;
725}
726
727
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 000000000000..9537f3e30115
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,200 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32int
33nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
38 int ret;
39
40 if (nouveau_vram_notify)
41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
46 0, 0x0000, false, true, &ntfy);
47 if (ret)
48 return ret;
49
50 ret = nouveau_bo_pin(ntfy, flags);
51 if (ret)
52 goto out_err;
53
54 ret = nouveau_bo_map(ntfy);
55 if (ret)
56 goto out_err;
57
58 ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
59 if (ret)
60 goto out_err;
61
62 chan->notifier_bo = ntfy;
63out_err:
64 if (ret)
65 drm_gem_object_unreference_unlocked(ntfy->gem);
66
67 return ret;
68}
69
70void
71nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
72{
73 struct drm_device *dev = chan->dev;
74
75 if (!chan->notifier_bo)
76 return;
77
78 nouveau_bo_unmap(chan->notifier_bo);
79 mutex_lock(&dev->struct_mutex);
80 nouveau_bo_unpin(chan->notifier_bo);
81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
83 nouveau_mem_takedown(&chan->notifier_heap);
84}
85
86static void
87nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
88 struct nouveau_gpuobj *gpuobj)
89{
90 NV_DEBUG(dev, "\n");
91
92 if (gpuobj->priv)
93 nouveau_mem_free_block(gpuobj->priv);
94}
95
96int
97nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
98 int size, uint32_t *b_offset)
99{
100 struct drm_device *dev = chan->dev;
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 struct nouveau_gpuobj *nobj = NULL;
103 struct mem_block *mem;
104 uint32_t offset;
105 int target, ret;
106
107 if (!chan->notifier_heap) {
108 NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
109 chan->id);
110 return -EINVAL;
111 }
112
113 mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
114 (struct drm_file *)-2, 0);
115 if (!mem) {
116 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
117 return -ENOMEM;
118 }
119
120 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
121 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
122 target = NV_DMA_TARGET_VIDMEM;
123 } else
124 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
125 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
126 dev_priv->card_type < NV_50) {
127 ret = nouveau_sgdma_get_page(dev, offset, &offset);
128 if (ret)
129 return ret;
130 target = NV_DMA_TARGET_PCI;
131 } else {
132 target = NV_DMA_TARGET_AGP;
133 if (dev_priv->card_type >= NV_50)
134 offset += dev_priv->vm_gart_base;
135 }
136 } else {
137 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
138 chan->notifier_bo->bo.mem.mem_type);
139 return -EINVAL;
140 }
141 offset += mem->start;
142
143 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
144 mem->size, NV_DMA_ACCESS_RW, target,
145 &nobj);
146 if (ret) {
147 nouveau_mem_free_block(mem);
148 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
149 return ret;
150 }
151 nobj->dtor = nouveau_notifier_gpuobj_dtor;
152 nobj->priv = mem;
153
154 ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
155 if (ret) {
156 nouveau_gpuobj_del(dev, &nobj);
157 nouveau_mem_free_block(mem);
158 NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
159 return ret;
160 }
161
162 *b_offset = mem->start;
163 return 0;
164}
165
166int
167nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
168{
169 if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
170 return -EINVAL;
171
172 if (poffset) {
173 struct mem_block *mem = nobj->priv;
174
175 if (*poffset >= mem->size)
176 return false;
177
178 *poffset += mem->start;
179 }
180
181 return 0;
182}
183
184int
185nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
186 struct drm_file *file_priv)
187{
188 struct drm_nouveau_notifierobj_alloc *na = data;
189 struct nouveau_channel *chan;
190 int ret;
191
192 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
193 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
194
195 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
196 if (ret)
197 return ret;
198
199 return 0;
200}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 000000000000..e7c100ba63a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1295 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37
38/* NVidia uses context objects to drive drawing operations.
39
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
42
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
45
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
49
50 The format of the second CARD32 seems to be:
51
52 NV4 to NV30:
53
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
57 31 valid (use 1)
58
59 NV40:
60
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
64
65 The key into the hash table depends on the object handle and channel id and
66 is given as:
67*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
139 NV_DEBUG(dev,
140 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
141 chan->id, co, ref->handle, ctx);
142 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
143 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144
145 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev);
147 return 0;
148 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
150 chan->id, co, nv_ro32(dev, ramht, co/4));
151
152 co += 8;
153 if (co >= dev_priv->ramht_size)
154 co = 0;
155 } while (co != ho);
156 instmem->finish_access(dev);
157
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM;
160}
161
162static void
163nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
167 struct nouveau_channel *chan = ref->channel;
168 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
169 uint32_t co, ho;
170
171 if (!ramht) {
172 NV_ERROR(dev, "No hash table!\n");
173 return;
174 }
175
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
180 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
181 NV_DEBUG(dev,
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, ref->handle,
184 nv_ro32(dev, ramht, (co + 4)));
185 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187
188 list_del(&ref->list);
189 instmem->finish_access(dev);
190 return;
191 }
192
193 co += 8;
194 if (co >= dev_priv->ramht_size)
195 co = 0;
196 } while (co != ho);
197 list_del(&ref->list);
198 instmem->finish_access(dev);
199
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle);
202}
203
204int
205nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
206 uint32_t size, int align, uint32_t flags,
207 struct nouveau_gpuobj **gpuobj_ret)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nouveau_engine *engine = &dev_priv->engine;
211 struct nouveau_gpuobj *gpuobj;
212 struct mem_block *pramin = NULL;
213 int ret;
214
215 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
216 chan ? chan->id : -1, size, align, flags);
217
218 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
219 return -EINVAL;
220
221 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
222 if (!gpuobj)
223 return -ENOMEM;
224 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
225 gpuobj->flags = flags;
226 gpuobj->im_channel = chan;
227
228 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
229
230 /* Choose between global instmem heap, and per-channel private
231 * instmem heap. On <NV50 allow requests for private instmem
232 * to be satisfied from global heap if no per-channel area
233 * available.
234 */
235 if (chan) {
236 if (chan->ramin_heap) {
237 NV_DEBUG(dev, "private heap\n");
238 pramin = chan->ramin_heap;
239 } else
240 if (dev_priv->card_type < NV_50) {
241 NV_DEBUG(dev, "global heap fallback\n");
242 pramin = dev_priv->ramin_heap;
243 }
244 } else {
245 NV_DEBUG(dev, "global heap\n");
246 pramin = dev_priv->ramin_heap;
247 }
248
249 if (!pramin) {
250 NV_ERROR(dev, "No PRAMIN heap!\n");
251 return -EINVAL;
252 }
253
254 if (!chan) {
255 ret = engine->instmem.populate(dev, gpuobj, &size);
256 if (ret) {
257 nouveau_gpuobj_del(dev, &gpuobj);
258 return ret;
259 }
260 }
261
262 /* Allocate a chunk of the PRAMIN aperture */
263 gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
264 drm_order(align),
265 (struct drm_file *)-2, 0);
266 if (!gpuobj->im_pramin) {
267 nouveau_gpuobj_del(dev, &gpuobj);
268 return -ENOMEM;
269 }
270
271 if (!chan) {
272 ret = engine->instmem.bind(dev, gpuobj);
273 if (ret) {
274 nouveau_gpuobj_del(dev, &gpuobj);
275 return ret;
276 }
277 }
278
279 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
280 int i;
281
282 engine->instmem.prepare_access(dev, true);
283 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
284 nv_wo32(dev, gpuobj, i/4, 0);
285 engine->instmem.finish_access(dev);
286 }
287
288 *gpuobj_ret = gpuobj;
289 return 0;
290}
291
292int
293nouveau_gpuobj_early_init(struct drm_device *dev)
294{
295 struct drm_nouveau_private *dev_priv = dev->dev_private;
296
297 NV_DEBUG(dev, "\n");
298
299 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
300
301 return 0;
302}
303
304int
305nouveau_gpuobj_init(struct drm_device *dev)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 int ret;
309
310 NV_DEBUG(dev, "\n");
311
312 if (dev_priv->card_type < NV_50) {
313 ret = nouveau_gpuobj_new_fake(dev,
314 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
315 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
316 &dev_priv->ramht, NULL);
317 if (ret)
318 return ret;
319 }
320
321 return 0;
322}
323
324void
325nouveau_gpuobj_takedown(struct drm_device *dev)
326{
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328
329 NV_DEBUG(dev, "\n");
330
331 nouveau_gpuobj_del(dev, &dev_priv->ramht);
332}
333
334void
335nouveau_gpuobj_late_takedown(struct drm_device *dev)
336{
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *gpuobj = NULL;
339 struct list_head *entry, *tmp;
340
341 NV_DEBUG(dev, "\n");
342
343 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
344 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
345
346 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
347 gpuobj, gpuobj->refcount);
348 gpuobj->refcount = 0;
349 nouveau_gpuobj_del(dev, &gpuobj);
350 }
351}
352
353int
354nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_engine *engine = &dev_priv->engine;
358 struct nouveau_gpuobj *gpuobj;
359 int i;
360
361 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
362
363 if (!dev_priv || !pgpuobj || !(*pgpuobj))
364 return -EINVAL;
365 gpuobj = *pgpuobj;
366
367 if (gpuobj->refcount != 0) {
368 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
369 return -EINVAL;
370 }
371
372 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
373 engine->instmem.prepare_access(dev, true);
374 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
375 nv_wo32(dev, gpuobj, i/4, 0);
376 engine->instmem.finish_access(dev);
377 }
378
379 if (gpuobj->dtor)
380 gpuobj->dtor(dev, gpuobj);
381
382 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
383 engine->instmem.clear(dev, gpuobj);
384
385 if (gpuobj->im_pramin) {
386 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
387 kfree(gpuobj->im_pramin);
388 else
389 nouveau_mem_free_block(gpuobj->im_pramin);
390 }
391
392 list_del(&gpuobj->list);
393
394 *pgpuobj = NULL;
395 kfree(gpuobj);
396 return 0;
397}
398
399static int
400nouveau_gpuobj_instance_get(struct drm_device *dev,
401 struct nouveau_channel *chan,
402 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
403{
404 struct drm_nouveau_private *dev_priv = dev->dev_private;
405 struct nouveau_gpuobj *cpramin;
406
407 /* <NV50 use PRAMIN address everywhere */
408 if (dev_priv->card_type < NV_50) {
409 *inst = gpuobj->im_pramin->start;
410 return 0;
411 }
412
413 if (chan && gpuobj->im_channel != chan) {
414 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
415 gpuobj->im_channel->id, chan->id);
416 return -EINVAL;
417 }
418
419 /* NV50 channel-local instance */
420 if (chan) {
421 cpramin = chan->ramin->gpuobj;
422 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
423 return 0;
424 }
425
426 /* NV50 global (VRAM) instance */
427 if (!gpuobj->im_channel) {
428 /* ...from global heap */
429 if (!gpuobj->im_backing) {
430 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
431 return -EINVAL;
432 }
433 *inst = gpuobj->im_backing_start;
434 return 0;
435 } else {
436 /* ...from local heap */
437 cpramin = gpuobj->im_channel->ramin->gpuobj;
438 *inst = cpramin->im_backing_start +
439 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
440 return 0;
441 }
442
443 return -EINVAL;
444}
445
446int
447nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
448 uint32_t handle, struct nouveau_gpuobj *gpuobj,
449 struct nouveau_gpuobj_ref **ref_ret)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_gpuobj_ref *ref;
453 uint32_t instance;
454 int ret;
455
456 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
457 chan ? chan->id : -1, handle, gpuobj);
458
459 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
460 return -EINVAL;
461
462 if (!chan && !ref_ret)
463 return -EINVAL;
464
465 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
466 /* sw object */
467 instance = 0x40;
468 } else {
469 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
470 if (ret)
471 return ret;
472 }
473
474 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
475 if (!ref)
476 return -ENOMEM;
477 INIT_LIST_HEAD(&ref->list);
478 ref->gpuobj = gpuobj;
479 ref->channel = chan;
480 ref->instance = instance;
481
482 if (!ref_ret) {
483 ref->handle = handle;
484
485 ret = nouveau_ramht_insert(dev, ref);
486 if (ret) {
487 kfree(ref);
488 return ret;
489 }
490 } else {
491 ref->handle = ~0;
492 *ref_ret = ref;
493 }
494
495 ref->gpuobj->refcount++;
496 return 0;
497}
498
499int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
500{
501 struct nouveau_gpuobj_ref *ref;
502
503 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
504
505 if (!dev || !pref || *pref == NULL)
506 return -EINVAL;
507 ref = *pref;
508
509 if (ref->handle != ~0)
510 nouveau_ramht_remove(dev, ref);
511
512 if (ref->gpuobj) {
513 ref->gpuobj->refcount--;
514
515 if (ref->gpuobj->refcount == 0) {
516 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
517 nouveau_gpuobj_del(dev, &ref->gpuobj);
518 }
519 }
520
521 *pref = NULL;
522 kfree(ref);
523 return 0;
524}
525
526int
527nouveau_gpuobj_new_ref(struct drm_device *dev,
528 struct nouveau_channel *oc, struct nouveau_channel *rc,
529 uint32_t handle, uint32_t size, int align,
530 uint32_t flags, struct nouveau_gpuobj_ref **ref)
531{
532 struct nouveau_gpuobj *gpuobj = NULL;
533 int ret;
534
535 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
536 if (ret)
537 return ret;
538
539 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
540 if (ret) {
541 nouveau_gpuobj_del(dev, &gpuobj);
542 return ret;
543 }
544
545 return 0;
546}
547
548int
549nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
550 struct nouveau_gpuobj_ref **ref_ret)
551{
552 struct nouveau_gpuobj_ref *ref;
553 struct list_head *entry, *tmp;
554
555 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
556 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
557
558 if (ref->handle == handle) {
559 if (ref_ret)
560 *ref_ret = ref;
561 return 0;
562 }
563 }
564
565 return -EINVAL;
566}
567
568int
569nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
570 uint32_t b_offset, uint32_t size,
571 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
572 struct nouveau_gpuobj_ref **pref)
573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575 struct nouveau_gpuobj *gpuobj = NULL;
576 int i;
577
578 NV_DEBUG(dev,
579 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
580 p_offset, b_offset, size, flags);
581
582 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
583 if (!gpuobj)
584 return -ENOMEM;
585 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
586 gpuobj->im_channel = NULL;
587 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
588
589 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
590
591 if (p_offset != ~0) {
592 gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
593 GFP_KERNEL);
594 if (!gpuobj->im_pramin) {
595 nouveau_gpuobj_del(dev, &gpuobj);
596 return -ENOMEM;
597 }
598 gpuobj->im_pramin->start = p_offset;
599 gpuobj->im_pramin->size = size;
600 }
601
602 if (b_offset != ~0) {
603 gpuobj->im_backing = (struct nouveau_bo *)-1;
604 gpuobj->im_backing_start = b_offset;
605 }
606
607 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
608 dev_priv->engine.instmem.prepare_access(dev, true);
609 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
610 nv_wo32(dev, gpuobj, i/4, 0);
611 dev_priv->engine.instmem.finish_access(dev);
612 }
613
614 if (pref) {
615 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
616 if (i) {
617 nouveau_gpuobj_del(dev, &gpuobj);
618 return i;
619 }
620 }
621
622 if (pgpuobj)
623 *pgpuobj = gpuobj;
624 return 0;
625}
626
627
628static uint32_t
629nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
630{
631 struct drm_nouveau_private *dev_priv = dev->dev_private;
632
633 /*XXX: dodgy hack for now */
634 if (dev_priv->card_type >= NV_50)
635 return 24;
636 if (dev_priv->card_type >= NV_40)
637 return 32;
638 return 16;
639}
640
641/*
642 DMA objects are used to reference a piece of memory in the
643 framebuffer, PCI or AGP address space. Each object is 16 bytes big
644 and looks as follows:
645
646 entry[0]
647 11:0 class (seems like I can always use 0 here)
648 12 page table present?
649 13 page entry linear?
650 15:14 access: 0 rw, 1 ro, 2 wo
651 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
652 31:20 dma adjust (bits 0-11 of the address)
653 entry[1]
654 dma limit (size of transfer)
655 entry[X]
656 1 0 readonly, 1 readwrite
657 31:12 dma frame address of the page (bits 12-31 of the address)
658 entry[N]
659 page table terminator, same value as the first pte, as does nvidia
660 rivatv uses 0xffffffff
661
662 Non linear page tables need a list of frame addresses afterwards,
663 the rivatv project has some info on this.
664
665 The method below creates a DMA object in instance RAM and returns a handle
666 to it that can be used to set up context objects.
667*/
668int
669nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
670 uint64_t offset, uint64_t size, int access,
671 int target, struct nouveau_gpuobj **gpuobj)
672{
673 struct drm_device *dev = chan->dev;
674 struct drm_nouveau_private *dev_priv = dev->dev_private;
675 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
676 int ret;
677
678 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
679 chan->id, class, offset, size);
680 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
681
682 switch (target) {
683 case NV_DMA_TARGET_AGP:
684 offset += dev_priv->gart_info.aper_base;
685 break;
686 default:
687 break;
688 }
689
690 ret = nouveau_gpuobj_new(dev, chan,
691 nouveau_gpuobj_class_instmem_size(dev, class),
692 16, NVOBJ_FLAG_ZERO_ALLOC |
693 NVOBJ_FLAG_ZERO_FREE, gpuobj);
694 if (ret) {
695 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
696 return ret;
697 }
698
699 instmem->prepare_access(dev, true);
700
701 if (dev_priv->card_type < NV_50) {
702 uint32_t frame, adjust, pte_flags = 0;
703
704 if (access != NV_DMA_ACCESS_RO)
705 pte_flags |= (1<<1);
706 adjust = offset & 0x00000fff;
707 frame = offset & ~0x00000fff;
708
709 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
710 (adjust << 20) |
711 (access << 14) |
712 (target << 16) |
713 class));
714 nv_wo32(dev, *gpuobj, 1, size - 1);
715 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
716 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
717 } else {
718 uint64_t limit = offset + size - 1;
719 uint32_t flags0, flags5;
720
721 if (target == NV_DMA_TARGET_VIDMEM) {
722 flags0 = 0x00190000;
723 flags5 = 0x00010000;
724 } else {
725 flags0 = 0x7fc00000;
726 flags5 = 0x00080000;
727 }
728
729 nv_wo32(dev, *gpuobj, 0, flags0 | class);
730 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
731 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
732 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
733 (upper_32_bits(offset) & 0xff));
734 nv_wo32(dev, *gpuobj, 5, flags5);
735 }
736
737 instmem->finish_access(dev);
738
739 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
740 (*gpuobj)->class = class;
741 return 0;
742}
743
744int
745nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
746 uint64_t offset, uint64_t size, int access,
747 struct nouveau_gpuobj **gpuobj,
748 uint32_t *o_ret)
749{
750 struct drm_device *dev = chan->dev;
751 struct drm_nouveau_private *dev_priv = dev->dev_private;
752 int ret;
753
754 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
755 (dev_priv->card_type >= NV_50 &&
756 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
757 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
758 offset + dev_priv->vm_gart_base,
759 size, access, NV_DMA_TARGET_AGP,
760 gpuobj);
761 if (o_ret)
762 *o_ret = 0;
763 } else
764 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
765 *gpuobj = dev_priv->gart_info.sg_ctxdma;
766 if (offset & ~0xffffffffULL) {
767 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
768 return -EINVAL;
769 }
770 if (o_ret)
771 *o_ret = (uint32_t)offset;
772 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
773 } else {
774 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
775 return -EINVAL;
776 }
777
778 return ret;
779}
780
781/* Context objects in the instance RAM have the following structure.
782 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
783
784 NV4 - NV30:
785
786 entry[0]
787 11:0 class
788 12 chroma key enable
789 13 user clip enable
790 14 swizzle enable
791 17:15 patch config:
792 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
793 18 synchronize enable
794 19 endian: 1 big, 0 little
795 21:20 dither mode
796 23 single step enable
797 24 patch status: 0 invalid, 1 valid
798 25 context_surface 0: 1 valid
799 26 context surface 1: 1 valid
800 27 context pattern: 1 valid
801 28 context rop: 1 valid
802 29,30 context beta, beta4
803 entry[1]
804 7:0 mono format
805 15:8 color format
806 31:16 notify instance address
807 entry[2]
808 15:0 dma 0 instance address
809 31:16 dma 1 instance address
810 entry[3]
811 dma method traps
812
813 NV40:
814 No idea what the exact format is. Here's what can be deducted:
815
816 entry[0]:
817 11:0 class (maybe uses more bits here?)
818 17 user clip enable
819 21:19 patch config
820 25 patch status valid ?
821 entry[1]:
822 15:0 DMA notifier (maybe 20:0)
823 entry[2]:
824 15:0 DMA 0 instance (maybe 20:0)
825 24 big endian
826 entry[3]:
827 15:0 DMA 1 instance (maybe 20:0)
828 entry[4]:
829 entry[5]:
830 set to 0?
831*/
832int
833nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
834 struct nouveau_gpuobj **gpuobj)
835{
836 struct drm_device *dev = chan->dev;
837 struct drm_nouveau_private *dev_priv = dev->dev_private;
838 int ret;
839
840 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
841
842 ret = nouveau_gpuobj_new(dev, chan,
843 nouveau_gpuobj_class_instmem_size(dev, class),
844 16,
845 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
846 gpuobj);
847 if (ret) {
848 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
849 return ret;
850 }
851
852 dev_priv->engine.instmem.prepare_access(dev, true);
853 if (dev_priv->card_type >= NV_50) {
854 nv_wo32(dev, *gpuobj, 0, class);
855 nv_wo32(dev, *gpuobj, 5, 0x00010000);
856 } else {
857 switch (class) {
858 case NV_CLASS_NULL:
859 nv_wo32(dev, *gpuobj, 0, 0x00001030);
860 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
861 break;
862 default:
863 if (dev_priv->card_type >= NV_40) {
864 nv_wo32(dev, *gpuobj, 0, class);
865#ifdef __BIG_ENDIAN
866 nv_wo32(dev, *gpuobj, 2, 0x01000000);
867#endif
868 } else {
869#ifdef __BIG_ENDIAN
870 nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
871#else
872 nv_wo32(dev, *gpuobj, 0, class);
873#endif
874 }
875 }
876 }
877 dev_priv->engine.instmem.finish_access(dev);
878
879 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
880 (*gpuobj)->class = class;
881 return 0;
882}
883
884int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret)
887{
888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj;
890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
894
895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
896 if (!gpuobj)
897 return -ENOMEM;
898 gpuobj->engine = NVOBJ_ENGINE_SW;
899 gpuobj->class = class;
900
901 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
902 *gpuobj_ret = gpuobj;
903 return 0;
904}
905
906static int
907nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
908{
909 struct drm_device *dev = chan->dev;
910 struct drm_nouveau_private *dev_priv = dev->dev_private;
911 struct nouveau_gpuobj *pramin = NULL;
912 uint32_t size;
913 uint32_t base;
914 int ret;
915
916 NV_DEBUG(dev, "ch%d\n", chan->id);
917
918 /* Base amount for object storage (4KiB enough?) */
919 size = 0x1000;
920 base = 0;
921
922 /* PGRAPH context */
923
924 if (dev_priv->card_type == NV_50) {
925 /* Various fixed table thingos */
926 size += 0x1400; /* mostly unknown stuff */
927 size += 0x4000; /* vm pd */
928 base = 0x6000;
929 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
930 size += 0x8000;
931 /* RAMFC */
932 size += 0x1000;
933 /* PGRAPH context */
934 size += 0x70000;
935 }
936
937 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
938 chan->id, size, base);
939 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
940 &chan->ramin);
941 if (ret) {
942 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
943 return ret;
944 }
945 pramin = chan->ramin->gpuobj;
946
947 ret = nouveau_mem_init_heap(&chan->ramin_heap,
948 pramin->im_pramin->start + base, size);
949 if (ret) {
950 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
951 nouveau_gpuobj_ref_del(dev, &chan->ramin);
952 return ret;
953 }
954
955 return 0;
956}
957
958int
959nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
960 uint32_t vram_h, uint32_t tt_h)
961{
962 struct drm_device *dev = chan->dev;
963 struct drm_nouveau_private *dev_priv = dev->dev_private;
964 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
965 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
966 int ret, i;
967
968 INIT_LIST_HEAD(&chan->ramht_refs);
969
970 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
971
972 /* Reserve a block of PRAMIN for the channel
973 *XXX: maybe on <NV50 too at some point
974 */
975 if (0 || dev_priv->card_type == NV_50) {
976 ret = nouveau_gpuobj_channel_init_pramin(chan);
977 if (ret) {
978 NV_ERROR(dev, "init pramin\n");
979 return ret;
980 }
981 }
982
983 /* NV50 VM
984 * - Allocate per-channel page-directory
985 * - Map GART and VRAM into the channel's address space at the
986 * locations determined during init.
987 */
988 if (dev_priv->card_type >= NV_50) {
989 uint32_t vm_offset, pde;
990
991 instmem->prepare_access(dev, true);
992
993 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
994 vm_offset += chan->ramin->gpuobj->im_pramin->start;
995
996 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 0, &chan->vm_pd, NULL);
998 if (ret) {
999 instmem->finish_access(dev);
1000 return ret;
1001 }
1002 for (i = 0; i < 0x4000; i += 8) {
1003 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1004 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
1005 }
1006
1007 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
1008 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1009 dev_priv->gart_info.sg_ctxdma,
1010 &chan->vm_gart_pt);
1011 if (ret) {
1012 instmem->finish_access(dev);
1013 return ret;
1014 }
1015 nv_wo32(dev, chan->vm_pd, pde++,
1016 chan->vm_gart_pt->instance | 0x03);
1017 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1018
1019 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
1020 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1022 dev_priv->vm_vram_pt[i],
1023 &chan->vm_vram_pt[i]);
1024 if (ret) {
1025 instmem->finish_access(dev);
1026 return ret;
1027 }
1028
1029 nv_wo32(dev, chan->vm_pd, pde++,
1030 chan->vm_vram_pt[i]->instance | 0x61);
1031 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1032 }
1033
1034 instmem->finish_access(dev);
1035 }
1036
1037 /* RAMHT */
1038 if (dev_priv->card_type < NV_50) {
1039 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1040 &chan->ramht);
1041 if (ret)
1042 return ret;
1043 } else {
1044 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1045 0x8000, 16,
1046 NVOBJ_FLAG_ZERO_ALLOC,
1047 &chan->ramht);
1048 if (ret)
1049 return ret;
1050 }
1051
1052 /* VRAM ctxdma */
1053 if (dev_priv->card_type >= NV_50) {
1054 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1055 0, dev_priv->vm_end,
1056 NV_DMA_ACCESS_RW,
1057 NV_DMA_TARGET_AGP, &vram);
1058 if (ret) {
1059 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1060 return ret;
1061 }
1062 } else {
1063 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1064 0, dev_priv->fb_available_size,
1065 NV_DMA_ACCESS_RW,
1066 NV_DMA_TARGET_VIDMEM, &vram);
1067 if (ret) {
1068 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1069 return ret;
1070 }
1071 }
1072
1073 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
1074 if (ret) {
1075 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
1076 return ret;
1077 }
1078
1079 /* TT memory ctxdma */
1080 if (dev_priv->card_type >= NV_50) {
1081 tt = vram;
1082 } else
1083 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1084 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1085 dev_priv->gart_info.aper_size,
1086 NV_DMA_ACCESS_RW, &tt, NULL);
1087 } else {
1088 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
1089 ret = -EINVAL;
1090 }
1091
1092 if (ret) {
1093 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
1094 return ret;
1095 }
1096
1097 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1098 if (ret) {
1099 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
1100 return ret;
1101 }
1102
1103 return 0;
1104}
1105
1106void
1107nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1108{
1109 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1110 struct drm_device *dev = chan->dev;
1111 struct list_head *entry, *tmp;
1112 struct nouveau_gpuobj_ref *ref;
1113 int i;
1114
1115 NV_DEBUG(dev, "ch%d\n", chan->id);
1116
1117 if (!chan->ramht_refs.next)
1118 return;
1119
1120 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1121 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1122
1123 nouveau_gpuobj_ref_del(dev, &ref);
1124 }
1125
1126 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1127
1128 nouveau_gpuobj_del(dev, &chan->vm_pd);
1129 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1130 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1131 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1132
1133 if (chan->ramin_heap)
1134 nouveau_mem_takedown(&chan->ramin_heap);
1135 if (chan->ramin)
1136 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1137
1138}
1139
1140int
1141nouveau_gpuobj_suspend(struct drm_device *dev)
1142{
1143 struct drm_nouveau_private *dev_priv = dev->dev_private;
1144 struct nouveau_gpuobj *gpuobj;
1145 int i;
1146
1147 if (dev_priv->card_type < NV_50) {
1148 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
1149 if (!dev_priv->susres.ramin_copy)
1150 return -ENOMEM;
1151
1152 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1153 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1154 return 0;
1155 }
1156
1157 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1158 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1159 continue;
1160
1161 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1162 if (!gpuobj->im_backing_suspend) {
1163 nouveau_gpuobj_resume(dev);
1164 return -ENOMEM;
1165 }
1166
1167 dev_priv->engine.instmem.prepare_access(dev, false);
1168 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1169 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1170 dev_priv->engine.instmem.finish_access(dev);
1171 }
1172
1173 return 0;
1174}
1175
1176void
1177nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1178{
1179 struct drm_nouveau_private *dev_priv = dev->dev_private;
1180 struct nouveau_gpuobj *gpuobj;
1181
1182 if (dev_priv->card_type < NV_50) {
1183 vfree(dev_priv->susres.ramin_copy);
1184 dev_priv->susres.ramin_copy = NULL;
1185 return;
1186 }
1187
1188 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1189 if (!gpuobj->im_backing_suspend)
1190 continue;
1191
1192 vfree(gpuobj->im_backing_suspend);
1193 gpuobj->im_backing_suspend = NULL;
1194 }
1195}
1196
1197void
1198nouveau_gpuobj_resume(struct drm_device *dev)
1199{
1200 struct drm_nouveau_private *dev_priv = dev->dev_private;
1201 struct nouveau_gpuobj *gpuobj;
1202 int i;
1203
1204 if (dev_priv->card_type < NV_50) {
1205 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1206 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1207 nouveau_gpuobj_suspend_cleanup(dev);
1208 return;
1209 }
1210
1211 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1212 if (!gpuobj->im_backing_suspend)
1213 continue;
1214
1215 dev_priv->engine.instmem.prepare_access(dev, true);
1216 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1217 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1218 dev_priv->engine.instmem.finish_access(dev);
1219 }
1220
1221 nouveau_gpuobj_suspend_cleanup(dev);
1222}
1223
1224int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1225 struct drm_file *file_priv)
1226{
1227 struct drm_nouveau_private *dev_priv = dev->dev_private;
1228 struct drm_nouveau_grobj_alloc *init = data;
1229 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1230 struct nouveau_pgraph_object_class *grc;
1231 struct nouveau_gpuobj *gr = NULL;
1232 struct nouveau_channel *chan;
1233 int ret;
1234
1235 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1236 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1237
1238 if (init->handle == ~0)
1239 return -EINVAL;
1240
1241 grc = pgraph->grclass;
1242 while (grc->id) {
1243 if (grc->id == init->class)
1244 break;
1245 grc++;
1246 }
1247
1248 if (!grc->id) {
1249 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1250 return -EPERM;
1251 }
1252
1253 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1254 return -EEXIST;
1255
1256 if (!grc->software)
1257 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1258 else
1259 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1260
1261 if (ret) {
1262 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1263 ret, init->channel, init->handle);
1264 return ret;
1265 }
1266
1267 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1268 if (ret) {
1269 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1270 ret, init->channel, init->handle);
1271 nouveau_gpuobj_del(dev, &gr);
1272 return ret;
1273 }
1274
1275 return 0;
1276}
1277
1278int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1279 struct drm_file *file_priv)
1280{
1281 struct drm_nouveau_gpuobj_free *objfree = data;
1282 struct nouveau_gpuobj_ref *ref;
1283 struct nouveau_channel *chan;
1284 int ret;
1285
1286 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1287 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1288
1289 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1290 if (ret)
1291 return ret;
1292 nouveau_gpuobj_ref_del(dev, &ref);
1293
1294 return 0;
1295}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 000000000000..aa9b310e41be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,837 @@
1
2
3#define NV03_BOOT_0 0x00100000
4# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
5# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
6# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
7# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
8# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
9# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
10# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
11# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
12# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
13
14#define NV04_FIFO_DATA 0x0010020c
15# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
16# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
17
18#define NV_RAMIN 0x00700000
19
20#define NV_RAMHT_HANDLE_OFFSET 0
21#define NV_RAMHT_CONTEXT_OFFSET 4
22# define NV_RAMHT_CONTEXT_VALID (1<<31)
23# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
24# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
25# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
26# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
27# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
28# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
29# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
30# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
31
32/* DMA object defines */
33#define NV_DMA_ACCESS_RW 0
34#define NV_DMA_ACCESS_RO 1
35#define NV_DMA_ACCESS_WO 2
36#define NV_DMA_TARGET_VIDMEM 0
37#define NV_DMA_TARGET_PCI 2
38#define NV_DMA_TARGET_AGP 3
39/* The following is not a real value used by the card, it's changed by
40 * nouveau_object_dma_create */
41#define NV_DMA_TARGET_PCI_NONLINEAR 8
42
43/* Some object classes we care about in the drm */
44#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
45#define NV_CLASS_DMA_TO_MEMORY 0x00000003
46#define NV_CLASS_NULL 0x00000030
47#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
48
49#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
50#define NV03_USER__SIZE 16
51#define NV10_USER__SIZE 32
52#define NV03_USER_SIZE 0x00010000
53#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
54#define NV03_USER_DMA_PUT__SIZE 16
55#define NV10_USER_DMA_PUT__SIZE 32
56#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
57#define NV03_USER_DMA_GET__SIZE 16
58#define NV10_USER_DMA_GET__SIZE 32
59#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
60#define NV03_USER_REF_CNT__SIZE 16
61#define NV10_USER_REF_CNT__SIZE 32
62
63#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
64#define NV40_USER_SIZE 0x00001000
65#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
66#define NV40_USER_DMA_PUT__SIZE 32
67#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
68#define NV40_USER_DMA_GET__SIZE 32
69#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
70#define NV40_USER_REF_CNT__SIZE 32
71
72#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
73#define NV50_USER_SIZE 0x00002000
74#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
75#define NV50_USER_DMA_PUT__SIZE 128
76#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
77#define NV50_USER_DMA_GET__SIZE 128
78#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
79#define NV50_USER_REF_CNT__SIZE 128
80
81#define NV03_FIFO_SIZE 0x8000UL
82
83#define NV03_PMC_BOOT_0 0x00000000
84#define NV03_PMC_BOOT_1 0x00000004
85#define NV03_PMC_INTR_0 0x00000100
86# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8)
87# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
88# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
89# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
90# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
91# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
92# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
93#define NV03_PMC_INTR_EN_0 0x00000140
94# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0)
95#define NV03_PMC_ENABLE 0x00000200
96# define NV_PMC_ENABLE_PFIFO (1<<8)
97# define NV_PMC_ENABLE_PGRAPH (1<<12)
98/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
99 * the card will hang early on in the X init process.
100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
103#define NV40_PMC_BACKLIGHT 0x000015f0
104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
105#define NV40_PMC_1700 0x00001700
106#define NV40_PMC_1704 0x00001704
107#define NV40_PMC_1708 0x00001708
108#define NV40_PMC_170C 0x0000170C
109
110/* probably PMC ? */
111#define NV50_PUNK_BAR0_PRAMIN 0x00001700
112#define NV50_PUNK_BAR_CFG_BASE 0x00001704
113#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
114#define NV50_PUNK_BAR1_CTXDMA 0x00001708
115#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
116#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
117#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
118#define NV50_PUNK_UNK1710 0x00001710
119
120#define NV04_PBUS_PCI_NV_1 0x00001804
121#define NV04_PBUS_PCI_NV_19 0x0000184C
122#define NV04_PBUS_PCI_NV_20 0x00001850
123# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
124# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
125
126#define NV04_PTIMER_INTR_0 0x00009100
127#define NV04_PTIMER_INTR_EN_0 0x00009140
128#define NV04_PTIMER_NUMERATOR 0x00009200
129#define NV04_PTIMER_DENOMINATOR 0x00009210
130#define NV04_PTIMER_TIME_0 0x00009400
131#define NV04_PTIMER_TIME_1 0x00009410
132#define NV04_PTIMER_ALARM_0 0x00009420
133
134#define NV04_PFB_CFG0 0x00100200
135#define NV04_PFB_CFG1 0x00100204
136#define NV40_PFB_020C 0x0010020C
137#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
138#define NV10_PFB_TILE__SIZE 8
139#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
140#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
141#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
142#define NV10_PFB_CLOSE_PAGE2 0x0010033C
143#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
144#define NV40_PFB_TILE__SIZE_0 12
145#define NV40_PFB_TILE__SIZE_1 15
146#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
147#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
148#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
149#define NV40_PFB_UNK_800 0x00100800
150
151#define NV04_PGRAPH_DEBUG_0 0x00400080
152#define NV04_PGRAPH_DEBUG_1 0x00400084
153#define NV04_PGRAPH_DEBUG_2 0x00400088
154#define NV04_PGRAPH_DEBUG_3 0x0040008c
155#define NV10_PGRAPH_DEBUG_4 0x00400090
156#define NV03_PGRAPH_INTR 0x00400100
157#define NV03_PGRAPH_NSTATUS 0x00400104
158# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
159# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
160# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
161# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
162# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
163# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
164# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
165# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
166#define NV03_PGRAPH_NSOURCE 0x00400108
167# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
168# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
169# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
170# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
171# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
172# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
173# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
174# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
175# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
176# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
177# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
178# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
179# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
180# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
181# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
182# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
183# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
184# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
185# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
186#define NV03_PGRAPH_INTR_EN 0x00400140
187#define NV40_PGRAPH_INTR_EN 0x0040013C
188# define NV_PGRAPH_INTR_NOTIFY (1<<0)
189# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
190# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
191# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
192# define NV_PGRAPH_INTR_ERROR (1<<20)
193#define NV10_PGRAPH_CTX_CONTROL 0x00400144
194#define NV10_PGRAPH_CTX_USER 0x00400148
195#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
196#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
197#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
198#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
199#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
200#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
201#define NV10_PGRAPH_CTX_CACHE1 0x00400160
202#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
203#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
204#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
205#define NV04_PGRAPH_CTX_CONTROL 0x00400170
206#define NV04_PGRAPH_CTX_USER 0x00400174
207#define NV04_PGRAPH_CTX_CACHE1 0x00400180
208#define NV10_PGRAPH_CTX_CACHE2 0x00400180
209#define NV03_PGRAPH_CTX_CONTROL 0x00400190
210#define NV03_PGRAPH_CTX_USER 0x00400194
211#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
212#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
213#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
214#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
215#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
216#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
217#define NV40_PGRAPH_CTXCTL_0304 0x00400304
218#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
219#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
220#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
221#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
222#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
223#define NV40_PGRAPH_CTXCTL_0310 0x00400310
224#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
225#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
226#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
227#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
228#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
229#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
230#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
231#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
232#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
233#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
234#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
235#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
236#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
237#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
238#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
239#define NV03_PGRAPH_ABS_X_RAM 0x00400400
240#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
241#define NV03_PGRAPH_X_MISC 0x00400500
242#define NV03_PGRAPH_Y_MISC 0x00400504
243#define NV04_PGRAPH_VALID1 0x00400508
244#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
245#define NV04_PGRAPH_MISC24_0 0x00400510
246#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
247#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
248#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
249#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
250#define NV03_PGRAPH_CLIPX_0 0x00400524
251#define NV03_PGRAPH_CLIPX_1 0x00400528
252#define NV03_PGRAPH_CLIPY_0 0x0040052C
253#define NV03_PGRAPH_CLIPY_1 0x00400530
254#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
255#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
256#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
257#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
258#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
259#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
260#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
261#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
262#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
263#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
264#define NV04_PGRAPH_MISC24_1 0x00400570
265#define NV04_PGRAPH_MISC24_2 0x00400574
266#define NV04_PGRAPH_VALID2 0x00400578
267#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
268#define NV04_PGRAPH_PASSTHRU_1 0x00400580
269#define NV04_PGRAPH_PASSTHRU_2 0x00400584
270#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
271#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
272#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
273#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
274#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
275#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
276#define NV04_PGRAPH_FORMAT_0 0x004005A8
277#define NV04_PGRAPH_FORMAT_1 0x004005AC
278#define NV04_PGRAPH_FILTER_0 0x004005B0
279#define NV04_PGRAPH_FILTER_1 0x004005B4
280#define NV03_PGRAPH_MONO_COLOR0 0x00400600
281#define NV04_PGRAPH_ROP3 0x00400604
282#define NV04_PGRAPH_BETA_AND 0x00400608
283#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
284#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
285#define NV04_PGRAPH_FORMATS 0x00400618
286#define NV10_PGRAPH_DEBUG_2 0x00400620
287#define NV04_PGRAPH_BOFFSET0 0x00400640
288#define NV04_PGRAPH_BOFFSET1 0x00400644
289#define NV04_PGRAPH_BOFFSET2 0x00400648
290#define NV04_PGRAPH_BOFFSET3 0x0040064C
291#define NV04_PGRAPH_BOFFSET4 0x00400650
292#define NV04_PGRAPH_BOFFSET5 0x00400654
293#define NV04_PGRAPH_BBASE0 0x00400658
294#define NV04_PGRAPH_BBASE1 0x0040065C
295#define NV04_PGRAPH_BBASE2 0x00400660
296#define NV04_PGRAPH_BBASE3 0x00400664
297#define NV04_PGRAPH_BBASE4 0x00400668
298#define NV04_PGRAPH_BBASE5 0x0040066C
299#define NV04_PGRAPH_BPITCH0 0x00400670
300#define NV04_PGRAPH_BPITCH1 0x00400674
301#define NV04_PGRAPH_BPITCH2 0x00400678
302#define NV04_PGRAPH_BPITCH3 0x0040067C
303#define NV04_PGRAPH_BPITCH4 0x00400680
304#define NV04_PGRAPH_BLIMIT0 0x00400684
305#define NV04_PGRAPH_BLIMIT1 0x00400688
306#define NV04_PGRAPH_BLIMIT2 0x0040068C
307#define NV04_PGRAPH_BLIMIT3 0x00400690
308#define NV04_PGRAPH_BLIMIT4 0x00400694
309#define NV04_PGRAPH_BLIMIT5 0x00400698
310#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
311#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
312#define NV03_PGRAPH_STATUS 0x004006B0
313#define NV04_PGRAPH_STATUS 0x00400700
314#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
315#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
316#define NV04_PGRAPH_SURFACE 0x0040070C
317#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
318#define NV04_PGRAPH_STATE 0x00400710
319#define NV10_PGRAPH_SURFACE 0x00400710
320#define NV04_PGRAPH_NOTIFY 0x00400714
321#define NV10_PGRAPH_STATE 0x00400714
322#define NV10_PGRAPH_NOTIFY 0x00400718
323
324#define NV04_PGRAPH_FIFO 0x00400720
325
326#define NV04_PGRAPH_BPIXEL 0x00400724
327#define NV10_PGRAPH_RDI_INDEX 0x00400750
328#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
329#define NV10_PGRAPH_RDI_DATA 0x00400754
330#define NV04_PGRAPH_DMA_PITCH 0x00400760
331#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
332#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
333#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
334#define NV10_PGRAPH_DMA_PITCH 0x00400770
335#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
336#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
337#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
338#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
339#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
340#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
341#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
342#define NV04_PGRAPH_PATT_COLOR0 0x00400800
343#define NV04_PGRAPH_PATT_COLOR1 0x00400804
344#define NV04_PGRAPH_PATTERN 0x00400808
345#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
346#define NV04_PGRAPH_CHROMA 0x00400814
347#define NV04_PGRAPH_CONTROL0 0x00400818
348#define NV04_PGRAPH_CONTROL1 0x0040081C
349#define NV04_PGRAPH_CONTROL2 0x00400820
350#define NV04_PGRAPH_BLEND 0x00400824
351#define NV04_PGRAPH_STORED_FMT 0x00400830
352#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
353#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
354#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
355#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
356#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
357#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
358#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
359#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
360#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
361#define NV04_PGRAPH_U_RAM 0x00400D00
362#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
363#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
364#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
365#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
366#define NV04_PGRAPH_V_RAM 0x00400D40
367#define NV04_PGRAPH_W_RAM 0x00400D80
368#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
369#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
370#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
371#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
372#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
373#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
374#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
375#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
376#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
377#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
378#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
379#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
380#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
381#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
382#define NV10_PGRAPH_XFMODE0 0x00400F40
383#define NV10_PGRAPH_XFMODE1 0x00400F44
384#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
385#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
386#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
387#define NV10_PGRAPH_PIPE_DATA 0x00400F54
388#define NV04_PGRAPH_DMA_START_0 0x00401000
389#define NV04_PGRAPH_DMA_START_1 0x00401004
390#define NV04_PGRAPH_DMA_LENGTH 0x00401008
391#define NV04_PGRAPH_DMA_MISC 0x0040100C
392#define NV04_PGRAPH_DMA_DATA_0 0x00401020
393#define NV04_PGRAPH_DMA_DATA_1 0x00401024
394#define NV04_PGRAPH_DMA_RM 0x00401030
395#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
396#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
397#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
398#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
399#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
400#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
401#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
402#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
403#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
404#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
405#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
406#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
407#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
408#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
409#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
410#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
411#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
412#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
413#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
414#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
415#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
416#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
417
418
419/* It's a guess that this works on NV03. Confirmed on NV04, though */
420#define NV04_PFIFO_DELAY_0 0x00002040
421#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
422#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
423#define NV03_PFIFO_INTR_0 0x00002100
424#define NV03_PFIFO_INTR_EN_0 0x00002140
425# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
426# define NV_PFIFO_INTR_RUNOUT (1<<4)
427# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
428# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
429# define NV_PFIFO_INTR_DMA_PT (1<<16)
430# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
431# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
432#define NV03_PFIFO_RAMHT 0x00002210
433#define NV03_PFIFO_RAMFC 0x00002214
434#define NV03_PFIFO_RAMRO 0x00002218
435#define NV40_PFIFO_RAMFC 0x00002220
436#define NV03_PFIFO_CACHES 0x00002500
437#define NV04_PFIFO_MODE 0x00002504
438#define NV04_PFIFO_DMA 0x00002508
439#define NV04_PFIFO_SIZE 0x0000250c
440#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
441#define NV50_PFIFO_CTX_TABLE__SIZE 128
442#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
443#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
444#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
445#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
446#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
447#define NV03_PFIFO_CACHE0_PULL0 0x00003040
448#define NV04_PFIFO_CACHE0_PULL0 0x00003050
449#define NV04_PFIFO_CACHE0_PULL1 0x00003054
450#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
451#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
452#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
453#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
454#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
455#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
456#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
457#define NV03_PFIFO_CACHE1_PUT 0x00003210
458#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
459#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
460# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
461# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
462# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
463# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
464# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
465# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
466# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
467# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
468# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
469# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
470# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
471# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
472# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
473# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
474# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
475# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
476# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
477# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
478# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
479# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
480# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
481# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
482# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
483# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
484# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
485# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
486# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
487# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
488# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
489# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
490# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
491# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
492# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
493# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
494# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
495# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
496# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
497# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
498# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
499# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
500# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
501# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
502# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
503# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
504# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
505# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
506# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
507# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
508# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
509# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
510# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
511# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
512# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
513# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
514# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
515# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
516# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
517# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
518# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
519# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
520# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
521#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
522#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
523#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
524#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
525#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
526#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
527#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
528#define NV03_PFIFO_CACHE1_PULL0 0x00003240
529#define NV04_PFIFO_CACHE1_PULL0 0x00003250
530#define NV03_PFIFO_CACHE1_PULL1 0x00003250
531#define NV04_PFIFO_CACHE1_PULL1 0x00003254
532#define NV04_PFIFO_CACHE1_HASH 0x00003258
533#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
534#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
535#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
536#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
537#define NV03_PFIFO_CACHE1_GET 0x00003270
538#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
539#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
540#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
541#define NV40_PFIFO_UNK32E4 0x000032E4
542#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
543#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
544#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
545#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
546
547#define NV_CRTC0_INTSTAT 0x00600100
548#define NV_CRTC0_INTEN 0x00600140
549#define NV_CRTC1_INTSTAT 0x00602100
550#define NV_CRTC1_INTEN 0x00602140
551# define NV_CRTC_INTR_VBLANK (1<<0)
552
553#define NV04_PRAMIN 0x00700000
554
555/* Fifo commands. These are not regs, neither masks */
556#define NV03_FIFO_CMD_JUMP 0x20000000
557#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
558#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
559
560/* This is a partial import from rules-ng, a few things may be duplicated.
561 * Eventually we should completely import everything from rules-ng.
562 * For the moment check rules-ng for docs.
563 */
564
565#define NV50_PMC 0x00000000
566#define NV50_PMC__LEN 0x1
567#define NV50_PMC__ESIZE 0x2000
568# define NV50_PMC_BOOT_0 0x00000000
569# define NV50_PMC_BOOT_0_REVISION 0x000000ff
570# define NV50_PMC_BOOT_0_REVISION__SHIFT 0
571# define NV50_PMC_BOOT_0_ARCH 0x0ff00000
572# define NV50_PMC_BOOT_0_ARCH__SHIFT 20
573# define NV50_PMC_INTR_0 0x00000100
574# define NV50_PMC_INTR_0_PFIFO (1<<8)
575# define NV50_PMC_INTR_0_PGRAPH (1<<12)
576# define NV50_PMC_INTR_0_PTIMER (1<<20)
577# define NV50_PMC_INTR_0_HOTPLUG (1<<21)
578# define NV50_PMC_INTR_0_DISPLAY (1<<26)
579# define NV50_PMC_INTR_EN_0 0x00000140
580# define NV50_PMC_INTR_EN_0_MASTER (1<<0)
581# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0)
582# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0)
583# define NV50_PMC_ENABLE 0x00000200
584# define NV50_PMC_ENABLE_PFIFO (1<<8)
585# define NV50_PMC_ENABLE_PGRAPH (1<<12)
586
587#define NV50_PCONNECTOR 0x0000e000
588#define NV50_PCONNECTOR__LEN 0x1
589#define NV50_PCONNECTOR__ESIZE 0x1000
590# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050
591# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0)
592# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1)
593# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2)
594# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3)
595# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16)
596# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17)
597# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18)
598# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19)
599# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054
600# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0)
601# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1)
602# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2)
603# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3)
604# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16)
605# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17)
606# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18)
607# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19)
608# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104
609# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
610# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
611# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
612# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
613# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138
614# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150
615# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168
616# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180
617# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
618# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
619
620#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
621#define NV50_AUXCH_DATA_OUT__SIZE 4
622#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
623#define NV50_AUXCH_DATA_IN__SIZE 4
624#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
625#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
626#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000
627#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000
628#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000
629#define NV50_AUXCH_CTRL_LINKEN 0x00100000
630#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000
631#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000
632#define NV50_AUXCH_CTRL_EXEC 0x00010000
633#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000
634#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000
635#define NV50_AUXCH_CTRL_CMD 0x0000f000
636#define NV50_AUXCH_CTRL_CMD_SHIFT 12
637#define NV50_AUXCH_CTRL_LEN 0x0000000f
638#define NV50_AUXCH_CTRL_LEN_SHIFT 0
639#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8)
640#define NV50_AUXCH_STAT_STATE 0x10000000
641#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000
642#define NV50_AUXCH_STAT_STATE_READY 0x10000000
643#define NV50_AUXCH_STAT_REPLY 0x000f0000
644#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000
645#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000
646#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000
647#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000
648#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000
649#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000
650#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000
651#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000
652#define NV50_AUXCH_STAT_COUNT 0x0000001f
653
654#define NV50_PBUS 0x00088000
655#define NV50_PBUS__LEN 0x1
656#define NV50_PBUS__ESIZE 0x1000
657# define NV50_PBUS_PCI_ID 0x00088000
658# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff
659# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0
660# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000
661# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16
662
663#define NV50_PFB 0x00100000
664#define NV50_PFB__LEN 0x1
665#define NV50_PFB__ESIZE 0x1000
666
667#define NV50_PEXTDEV 0x00101000
668#define NV50_PEXTDEV__LEN 0x1
669#define NV50_PEXTDEV__ESIZE 0x1000
670
671#define NV50_PROM 0x00300000
672#define NV50_PROM__LEN 0x1
673#define NV50_PROM__ESIZE 0x10000
674
675#define NV50_PGRAPH 0x00400000
676#define NV50_PGRAPH__LEN 0x1
677#define NV50_PGRAPH__ESIZE 0x10000
678
679#define NV50_PDISPLAY 0x00610000
680#define NV50_PDISPLAY_OBJECTS 0x00610010
681#define NV50_PDISPLAY_INTR_0 0x00610020
682#define NV50_PDISPLAY_INTR_1 0x00610024
683#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c
684#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2
685#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
686#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004
687#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008
688#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
689#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
690#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
691#define NV50_PDISPLAY_INTR_EN 0x0061002c
692#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
693#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
694#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
695#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
696#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
697#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
698#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
699#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
700#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
701#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
702#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
703#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
704#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
705#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
706#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
707#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
708#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
709#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
710#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
711#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
712#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
713#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
714#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
715#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
716
717#define NV50_PDISPLAY_CURSOR 0x00610270
718#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
719#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001
720#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
721#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
722
723#define NV50_PDISPLAY_CTRL_STATE 0x00610300
724#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
725#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
726#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
727#define NV50_PDISPLAY_CTRL_VAL 0x00610304
728#define NV50_PDISPLAY_UNK_380 0x00610380
729#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
730#define NV50_PDISPLAY_UNK_388 0x00610388
731#define NV50_PDISPLAY_UNK_38C 0x0061038c
732
733#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
734#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
735#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18
736#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24
737#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48
738#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50
739#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58
740#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78
741#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8
742#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8
743#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0
744#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0
745#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8
746#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0
747#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8
748#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00
749#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08
750#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10
751#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18
752#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20
753#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000
754#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28
755#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38
756#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40
757#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48
758#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50
759
760#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
761#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
762#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
763#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
764#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
765#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
766
767#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
768#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
769#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
770#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
771#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
772#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
773
774#define NV50_PDISPLAY_CRTC_CLK 0x00614000
775#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
776#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600
777#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104)
778#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108)
779#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200)
780
781#define NV50_PDISPLAY_DAC_CLK 0x00614000
782#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280)
783
784#define NV50_PDISPLAY_SOR_CLK 0x00614000
785#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300)
786
787#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400)
788
789#define NV50_PDISPLAY_DAC 0x0061a000
790#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800)
791#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001
792#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004
793#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010
794#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040
795#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000
796#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800)
797#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000
798#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000
799#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000
800#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800)
801#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600
802
803#define NV50_PDISPLAY_SOR 0x0061c000
804#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800)
805#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000
806#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001
807#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800)
808#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600
809#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800)
810#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
811#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
812#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
813#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
814#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
815#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
816#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
817#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
818#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
819#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
820#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000
821#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000
822#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000
823#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000
824#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
825#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
826#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
827#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
828#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
829#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
830
831#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
832#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
833#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004)
834
835#define NV50_PDISPLAY_CURSOR_USER 0x00647000
836#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080)
837#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 000000000000..1d6ee8b55154
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,341 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16
17 unsigned pte_start;
18 bool bound;
19};
20
21static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23 struct page **pages, struct page *dummy_read_page)
24{
25 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26 struct drm_device *dev = nvbe->dev;
27
28 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30 if (nvbe->pages)
31 return -EINVAL;
32
33 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34 if (!nvbe->pages)
35 return -ENOMEM;
36
37 nvbe->nr_pages = 0;
38 while (num_pages--) {
39 nvbe->pages[nvbe->nr_pages] =
40 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42 if (pci_dma_mapping_error(dev->pdev,
43 nvbe->pages[nvbe->nr_pages])) {
44 be->func->clear(be);
45 return -EFAULT;
46 }
47
48 nvbe->nr_pages++;
49 }
50
51 return 0;
52}
53
54static void
55nouveau_sgdma_clear(struct ttm_backend *be)
56{
57 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58 struct drm_device *dev;
59
60 if (nvbe && nvbe->pages) {
61 dev = nvbe->dev;
62 NV_DEBUG(dev, "\n");
63
64 if (nvbe->bound)
65 be->func->unbind(be);
66
67 while (nvbe->nr_pages--) {
68 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70 }
71 kfree(nvbe->pages);
72 nvbe->pages = NULL;
73 nvbe->nr_pages = 0;
74 }
75}
76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87}
88
89static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte;
97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
101 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
102 nvbe->pte_start = pte;
103 for (i = 0; i < nvbe->nr_pages; i++) {
104 dma_addr_t dma_offset = nvbe->pages[i];
105 uint32_t offset_l = lower_32_bits(dma_offset);
106 uint32_t offset_h = upper_32_bits(dma_offset);
107
108 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
109 if (dev_priv->card_type < NV_50)
110 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
111 else {
112 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
113 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
114 }
115
116 dma_offset += NV_CTXDMA_PAGE_SIZE;
117 }
118 }
119 dev_priv->engine.instmem.finish_access(nvbe->dev);
120
121 if (dev_priv->card_type == NV_50) {
122 nv_wr32(dev, 0x100c80, 0x00050001);
123 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
124 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
125 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
126 nv_rd32(dev, 0x100c80));
127 return -EBUSY;
128 }
129
130 nv_wr32(dev, 0x100c80, 0x00000001);
131 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
132 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
133 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
134 nv_rd32(dev, 0x100c80));
135 return -EBUSY;
136 }
137 }
138
139 nvbe->bound = true;
140 return 0;
141}
142
143static int
144nouveau_sgdma_unbind(struct ttm_backend *be)
145{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
147 struct drm_device *dev = nvbe->dev;
148 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
150 unsigned i, j, pte;
151
152 NV_DEBUG(dev, "\n");
153
154 if (!nvbe->bound)
155 return 0;
156
157 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
158 pte = nvbe->pte_start;
159 for (i = 0; i < nvbe->nr_pages; i++) {
160 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
161
162 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
163 if (dev_priv->card_type < NV_50)
164 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
165 else {
166 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
167 nv_wo32(dev, gpuobj, pte++, 0x00000000);
168 }
169
170 dma_offset += NV_CTXDMA_PAGE_SIZE;
171 }
172 }
173 dev_priv->engine.instmem.finish_access(nvbe->dev);
174
175 if (dev_priv->card_type == NV_50) {
176 nv_wr32(dev, 0x100c80, 0x00050001);
177 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
178 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
179 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
180 nv_rd32(dev, 0x100c80));
181 return -EBUSY;
182 }
183
184 nv_wr32(dev, 0x100c80, 0x00000001);
185 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
186 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
187 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
188 nv_rd32(dev, 0x100c80));
189 return -EBUSY;
190 }
191 }
192
193 nvbe->bound = false;
194 return 0;
195}
196
197static void
198nouveau_sgdma_destroy(struct ttm_backend *be)
199{
200 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
201
202 if (be) {
203 NV_DEBUG(nvbe->dev, "\n");
204
205 if (nvbe) {
206 if (nvbe->pages)
207 be->func->clear(be);
208 kfree(nvbe);
209 }
210 }
211}
212
213static struct ttm_backend_func nouveau_sgdma_backend = {
214 .populate = nouveau_sgdma_populate,
215 .clear = nouveau_sgdma_clear,
216 .bind = nouveau_sgdma_bind,
217 .unbind = nouveau_sgdma_unbind,
218 .destroy = nouveau_sgdma_destroy
219};
220
221struct ttm_backend *
222nouveau_sgdma_init_ttm(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_sgdma_be *nvbe;
226
227 if (!dev_priv->gart_info.sg_ctxdma)
228 return NULL;
229
230 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
231 if (!nvbe)
232 return NULL;
233
234 nvbe->dev = dev;
235
236 nvbe->backend.func = &nouveau_sgdma_backend;
237
238 return &nvbe->backend;
239}
240
241int
242nouveau_sgdma_init(struct drm_device *dev)
243{
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *gpuobj = NULL;
246 uint32_t aper_size, obj_size;
247 int i, ret;
248
249 if (dev_priv->card_type < NV_50) {
250 aper_size = (64 * 1024 * 1024);
251 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
252 obj_size += 8; /* ctxdma header */
253 } else {
254 /* 1 entire VM page table */
255 aper_size = (512 * 1024 * 1024);
256 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
257 }
258
259 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
260 NVOBJ_FLAG_ALLOW_NO_REFS |
261 NVOBJ_FLAG_ZERO_ALLOC |
262 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
263 if (ret) {
264 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
265 return ret;
266 }
267
268 dev_priv->gart_info.sg_dummy_page =
269 alloc_page(GFP_KERNEL|__GFP_DMA32);
270 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
271 dev_priv->gart_info.sg_dummy_bus =
272 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
273 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
274
275 dev_priv->engine.instmem.prepare_access(dev, true);
276 if (dev_priv->card_type < NV_50) {
277 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
278 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
279 * on those cards? */
280 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
281 (1 << 12) /* PT present */ |
282 (0 << 13) /* PT *not* linear */ |
283 (NV_DMA_ACCESS_RW << 14) |
284 (NV_DMA_TARGET_PCI << 16));
285 nv_wo32(dev, gpuobj, 1, aper_size - 1);
286 for (i = 2; i < 2 + (aper_size >> 12); i++) {
287 nv_wo32(dev, gpuobj, i,
288 dev_priv->gart_info.sg_dummy_bus | 3);
289 }
290 } else {
291 for (i = 0; i < obj_size; i += 8) {
292 nv_wo32(dev, gpuobj, (i+0)/4,
293 dev_priv->gart_info.sg_dummy_bus | 0x21);
294 nv_wo32(dev, gpuobj, (i+4)/4, 0);
295 }
296 }
297 dev_priv->engine.instmem.finish_access(dev);
298
299 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
300 dev_priv->gart_info.aper_base = 0;
301 dev_priv->gart_info.aper_size = aper_size;
302 dev_priv->gart_info.sg_ctxdma = gpuobj;
303 return 0;
304}
305
306void
307nouveau_sgdma_takedown(struct drm_device *dev)
308{
309 struct drm_nouveau_private *dev_priv = dev->dev_private;
310
311 if (dev_priv->gart_info.sg_dummy_page) {
312 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
313 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
314 unlock_page(dev_priv->gart_info.sg_dummy_page);
315 __free_page(dev_priv->gart_info.sg_dummy_page);
316 dev_priv->gart_info.sg_dummy_page = NULL;
317 dev_priv->gart_info.sg_dummy_bus = 0;
318 }
319
320 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
321}
322
323int
324nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
325{
326 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
328 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
329 int pte;
330
331 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
332 if (dev_priv->card_type < NV_50) {
333 instmem->prepare_access(dev, false);
334 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
335 instmem->finish_access(dev);
336 return 0;
337 }
338
339 NV_ERROR(dev, "Unimplemented on NV50\n");
340 return -EINVAL;
341}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 000000000000..e1710640a278
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,923 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include <linux/slab.h>
28#include "drmP.h"
29#include "drm.h"
30#include "drm_sarea.h"
31#include "drm_crtc_helper.h"
32#include <linux/vgaarb.h>
33#include <linux/vga_switcheroo.h>
34
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37#include "nv50_display.h"
38
39static void nouveau_stub_takedown(struct drm_device *dev) {}
40
41static int nouveau_init_engine_ptrs(struct drm_device *dev)
42{
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_engine *engine = &dev_priv->engine;
45
46 switch (dev_priv->chipset & 0xf0) {
47 case 0x00:
48 engine->instmem.init = nv04_instmem_init;
49 engine->instmem.takedown = nv04_instmem_takedown;
50 engine->instmem.suspend = nv04_instmem_suspend;
51 engine->instmem.resume = nv04_instmem_resume;
52 engine->instmem.populate = nv04_instmem_populate;
53 engine->instmem.clear = nv04_instmem_clear;
54 engine->instmem.bind = nv04_instmem_bind;
55 engine->instmem.unbind = nv04_instmem_unbind;
56 engine->instmem.prepare_access = nv04_instmem_prepare_access;
57 engine->instmem.finish_access = nv04_instmem_finish_access;
58 engine->mc.init = nv04_mc_init;
59 engine->mc.takedown = nv04_mc_takedown;
60 engine->timer.init = nv04_timer_init;
61 engine->timer.read = nv04_timer_read;
62 engine->timer.takedown = nv04_timer_takedown;
63 engine->fb.init = nv04_fb_init;
64 engine->fb.takedown = nv04_fb_takedown;
65 engine->graph.grclass = nv04_graph_grclass;
66 engine->graph.init = nv04_graph_init;
67 engine->graph.takedown = nv04_graph_takedown;
68 engine->graph.fifo_access = nv04_graph_fifo_access;
69 engine->graph.channel = nv04_graph_channel;
70 engine->graph.create_context = nv04_graph_create_context;
71 engine->graph.destroy_context = nv04_graph_destroy_context;
72 engine->graph.load_context = nv04_graph_load_context;
73 engine->graph.unload_context = nv04_graph_unload_context;
74 engine->fifo.channels = 16;
75 engine->fifo.init = nv04_fifo_init;
76 engine->fifo.takedown = nouveau_stub_takedown;
77 engine->fifo.disable = nv04_fifo_disable;
78 engine->fifo.enable = nv04_fifo_enable;
79 engine->fifo.reassign = nv04_fifo_reassign;
80 engine->fifo.cache_flush = nv04_fifo_cache_flush;
81 engine->fifo.cache_pull = nv04_fifo_cache_pull;
82 engine->fifo.channel_id = nv04_fifo_channel_id;
83 engine->fifo.create_context = nv04_fifo_create_context;
84 engine->fifo.destroy_context = nv04_fifo_destroy_context;
85 engine->fifo.load_context = nv04_fifo_load_context;
86 engine->fifo.unload_context = nv04_fifo_unload_context;
87 break;
88 case 0x10:
89 engine->instmem.init = nv04_instmem_init;
90 engine->instmem.takedown = nv04_instmem_takedown;
91 engine->instmem.suspend = nv04_instmem_suspend;
92 engine->instmem.resume = nv04_instmem_resume;
93 engine->instmem.populate = nv04_instmem_populate;
94 engine->instmem.clear = nv04_instmem_clear;
95 engine->instmem.bind = nv04_instmem_bind;
96 engine->instmem.unbind = nv04_instmem_unbind;
97 engine->instmem.prepare_access = nv04_instmem_prepare_access;
98 engine->instmem.finish_access = nv04_instmem_finish_access;
99 engine->mc.init = nv04_mc_init;
100 engine->mc.takedown = nv04_mc_takedown;
101 engine->timer.init = nv04_timer_init;
102 engine->timer.read = nv04_timer_read;
103 engine->timer.takedown = nv04_timer_takedown;
104 engine->fb.init = nv10_fb_init;
105 engine->fb.takedown = nv10_fb_takedown;
106 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
107 engine->graph.grclass = nv10_graph_grclass;
108 engine->graph.init = nv10_graph_init;
109 engine->graph.takedown = nv10_graph_takedown;
110 engine->graph.channel = nv10_graph_channel;
111 engine->graph.create_context = nv10_graph_create_context;
112 engine->graph.destroy_context = nv10_graph_destroy_context;
113 engine->graph.fifo_access = nv04_graph_fifo_access;
114 engine->graph.load_context = nv10_graph_load_context;
115 engine->graph.unload_context = nv10_graph_unload_context;
116 engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
117 engine->fifo.channels = 32;
118 engine->fifo.init = nv10_fifo_init;
119 engine->fifo.takedown = nouveau_stub_takedown;
120 engine->fifo.disable = nv04_fifo_disable;
121 engine->fifo.enable = nv04_fifo_enable;
122 engine->fifo.reassign = nv04_fifo_reassign;
123 engine->fifo.cache_flush = nv04_fifo_cache_flush;
124 engine->fifo.cache_pull = nv04_fifo_cache_pull;
125 engine->fifo.channel_id = nv10_fifo_channel_id;
126 engine->fifo.create_context = nv10_fifo_create_context;
127 engine->fifo.destroy_context = nv10_fifo_destroy_context;
128 engine->fifo.load_context = nv10_fifo_load_context;
129 engine->fifo.unload_context = nv10_fifo_unload_context;
130 break;
131 case 0x20:
132 engine->instmem.init = nv04_instmem_init;
133 engine->instmem.takedown = nv04_instmem_takedown;
134 engine->instmem.suspend = nv04_instmem_suspend;
135 engine->instmem.resume = nv04_instmem_resume;
136 engine->instmem.populate = nv04_instmem_populate;
137 engine->instmem.clear = nv04_instmem_clear;
138 engine->instmem.bind = nv04_instmem_bind;
139 engine->instmem.unbind = nv04_instmem_unbind;
140 engine->instmem.prepare_access = nv04_instmem_prepare_access;
141 engine->instmem.finish_access = nv04_instmem_finish_access;
142 engine->mc.init = nv04_mc_init;
143 engine->mc.takedown = nv04_mc_takedown;
144 engine->timer.init = nv04_timer_init;
145 engine->timer.read = nv04_timer_read;
146 engine->timer.takedown = nv04_timer_takedown;
147 engine->fb.init = nv10_fb_init;
148 engine->fb.takedown = nv10_fb_takedown;
149 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
150 engine->graph.grclass = nv20_graph_grclass;
151 engine->graph.init = nv20_graph_init;
152 engine->graph.takedown = nv20_graph_takedown;
153 engine->graph.channel = nv10_graph_channel;
154 engine->graph.create_context = nv20_graph_create_context;
155 engine->graph.destroy_context = nv20_graph_destroy_context;
156 engine->graph.fifo_access = nv04_graph_fifo_access;
157 engine->graph.load_context = nv20_graph_load_context;
158 engine->graph.unload_context = nv20_graph_unload_context;
159 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
160 engine->fifo.channels = 32;
161 engine->fifo.init = nv10_fifo_init;
162 engine->fifo.takedown = nouveau_stub_takedown;
163 engine->fifo.disable = nv04_fifo_disable;
164 engine->fifo.enable = nv04_fifo_enable;
165 engine->fifo.reassign = nv04_fifo_reassign;
166 engine->fifo.cache_flush = nv04_fifo_cache_flush;
167 engine->fifo.cache_pull = nv04_fifo_cache_pull;
168 engine->fifo.channel_id = nv10_fifo_channel_id;
169 engine->fifo.create_context = nv10_fifo_create_context;
170 engine->fifo.destroy_context = nv10_fifo_destroy_context;
171 engine->fifo.load_context = nv10_fifo_load_context;
172 engine->fifo.unload_context = nv10_fifo_unload_context;
173 break;
174 case 0x30:
175 engine->instmem.init = nv04_instmem_init;
176 engine->instmem.takedown = nv04_instmem_takedown;
177 engine->instmem.suspend = nv04_instmem_suspend;
178 engine->instmem.resume = nv04_instmem_resume;
179 engine->instmem.populate = nv04_instmem_populate;
180 engine->instmem.clear = nv04_instmem_clear;
181 engine->instmem.bind = nv04_instmem_bind;
182 engine->instmem.unbind = nv04_instmem_unbind;
183 engine->instmem.prepare_access = nv04_instmem_prepare_access;
184 engine->instmem.finish_access = nv04_instmem_finish_access;
185 engine->mc.init = nv04_mc_init;
186 engine->mc.takedown = nv04_mc_takedown;
187 engine->timer.init = nv04_timer_init;
188 engine->timer.read = nv04_timer_read;
189 engine->timer.takedown = nv04_timer_takedown;
190 engine->fb.init = nv10_fb_init;
191 engine->fb.takedown = nv10_fb_takedown;
192 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
193 engine->graph.grclass = nv30_graph_grclass;
194 engine->graph.init = nv30_graph_init;
195 engine->graph.takedown = nv20_graph_takedown;
196 engine->graph.fifo_access = nv04_graph_fifo_access;
197 engine->graph.channel = nv10_graph_channel;
198 engine->graph.create_context = nv20_graph_create_context;
199 engine->graph.destroy_context = nv20_graph_destroy_context;
200 engine->graph.load_context = nv20_graph_load_context;
201 engine->graph.unload_context = nv20_graph_unload_context;
202 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
203 engine->fifo.channels = 32;
204 engine->fifo.init = nv10_fifo_init;
205 engine->fifo.takedown = nouveau_stub_takedown;
206 engine->fifo.disable = nv04_fifo_disable;
207 engine->fifo.enable = nv04_fifo_enable;
208 engine->fifo.reassign = nv04_fifo_reassign;
209 engine->fifo.cache_flush = nv04_fifo_cache_flush;
210 engine->fifo.cache_pull = nv04_fifo_cache_pull;
211 engine->fifo.channel_id = nv10_fifo_channel_id;
212 engine->fifo.create_context = nv10_fifo_create_context;
213 engine->fifo.destroy_context = nv10_fifo_destroy_context;
214 engine->fifo.load_context = nv10_fifo_load_context;
215 engine->fifo.unload_context = nv10_fifo_unload_context;
216 break;
217 case 0x40:
218 case 0x60:
219 engine->instmem.init = nv04_instmem_init;
220 engine->instmem.takedown = nv04_instmem_takedown;
221 engine->instmem.suspend = nv04_instmem_suspend;
222 engine->instmem.resume = nv04_instmem_resume;
223 engine->instmem.populate = nv04_instmem_populate;
224 engine->instmem.clear = nv04_instmem_clear;
225 engine->instmem.bind = nv04_instmem_bind;
226 engine->instmem.unbind = nv04_instmem_unbind;
227 engine->instmem.prepare_access = nv04_instmem_prepare_access;
228 engine->instmem.finish_access = nv04_instmem_finish_access;
229 engine->mc.init = nv40_mc_init;
230 engine->mc.takedown = nv40_mc_takedown;
231 engine->timer.init = nv04_timer_init;
232 engine->timer.read = nv04_timer_read;
233 engine->timer.takedown = nv04_timer_takedown;
234 engine->fb.init = nv40_fb_init;
235 engine->fb.takedown = nv40_fb_takedown;
236 engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
237 engine->graph.grclass = nv40_graph_grclass;
238 engine->graph.init = nv40_graph_init;
239 engine->graph.takedown = nv40_graph_takedown;
240 engine->graph.fifo_access = nv04_graph_fifo_access;
241 engine->graph.channel = nv40_graph_channel;
242 engine->graph.create_context = nv40_graph_create_context;
243 engine->graph.destroy_context = nv40_graph_destroy_context;
244 engine->graph.load_context = nv40_graph_load_context;
245 engine->graph.unload_context = nv40_graph_unload_context;
246 engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
247 engine->fifo.channels = 32;
248 engine->fifo.init = nv40_fifo_init;
249 engine->fifo.takedown = nouveau_stub_takedown;
250 engine->fifo.disable = nv04_fifo_disable;
251 engine->fifo.enable = nv04_fifo_enable;
252 engine->fifo.reassign = nv04_fifo_reassign;
253 engine->fifo.cache_flush = nv04_fifo_cache_flush;
254 engine->fifo.cache_pull = nv04_fifo_cache_pull;
255 engine->fifo.channel_id = nv10_fifo_channel_id;
256 engine->fifo.create_context = nv40_fifo_create_context;
257 engine->fifo.destroy_context = nv40_fifo_destroy_context;
258 engine->fifo.load_context = nv40_fifo_load_context;
259 engine->fifo.unload_context = nv40_fifo_unload_context;
260 break;
261 case 0x50:
262 case 0x80: /* gotta love NVIDIA's consistency.. */
263 case 0x90:
264 case 0xA0:
265 engine->instmem.init = nv50_instmem_init;
266 engine->instmem.takedown = nv50_instmem_takedown;
267 engine->instmem.suspend = nv50_instmem_suspend;
268 engine->instmem.resume = nv50_instmem_resume;
269 engine->instmem.populate = nv50_instmem_populate;
270 engine->instmem.clear = nv50_instmem_clear;
271 engine->instmem.bind = nv50_instmem_bind;
272 engine->instmem.unbind = nv50_instmem_unbind;
273 engine->instmem.prepare_access = nv50_instmem_prepare_access;
274 engine->instmem.finish_access = nv50_instmem_finish_access;
275 engine->mc.init = nv50_mc_init;
276 engine->mc.takedown = nv50_mc_takedown;
277 engine->timer.init = nv04_timer_init;
278 engine->timer.read = nv04_timer_read;
279 engine->timer.takedown = nv04_timer_takedown;
280 engine->fb.init = nv50_fb_init;
281 engine->fb.takedown = nv50_fb_takedown;
282 engine->graph.grclass = nv50_graph_grclass;
283 engine->graph.init = nv50_graph_init;
284 engine->graph.takedown = nv50_graph_takedown;
285 engine->graph.fifo_access = nv50_graph_fifo_access;
286 engine->graph.channel = nv50_graph_channel;
287 engine->graph.create_context = nv50_graph_create_context;
288 engine->graph.destroy_context = nv50_graph_destroy_context;
289 engine->graph.load_context = nv50_graph_load_context;
290 engine->graph.unload_context = nv50_graph_unload_context;
291 engine->fifo.channels = 128;
292 engine->fifo.init = nv50_fifo_init;
293 engine->fifo.takedown = nv50_fifo_takedown;
294 engine->fifo.disable = nv04_fifo_disable;
295 engine->fifo.enable = nv04_fifo_enable;
296 engine->fifo.reassign = nv04_fifo_reassign;
297 engine->fifo.channel_id = nv50_fifo_channel_id;
298 engine->fifo.create_context = nv50_fifo_create_context;
299 engine->fifo.destroy_context = nv50_fifo_destroy_context;
300 engine->fifo.load_context = nv50_fifo_load_context;
301 engine->fifo.unload_context = nv50_fifo_unload_context;
302 break;
303 default:
304 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
305 return 1;
306 }
307
308 return 0;
309}
310
311static unsigned int
312nouveau_vga_set_decode(void *priv, bool state)
313{
314 struct drm_device *dev = priv;
315 struct drm_nouveau_private *dev_priv = dev->dev_private;
316
317 if (dev_priv->chipset >= 0x40)
318 nv_wr32(dev, 0x88054, state);
319 else
320 nv_wr32(dev, 0x1854, state);
321
322 if (state)
323 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
324 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
325 else
326 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
327}
328
329static int
330nouveau_card_init_channel(struct drm_device *dev)
331{
332 struct drm_nouveau_private *dev_priv = dev->dev_private;
333 struct nouveau_gpuobj *gpuobj;
334 int ret;
335
336 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
337 (struct drm_file *)-2,
338 NvDmaFB, NvDmaTT);
339 if (ret)
340 return ret;
341
342 gpuobj = NULL;
343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
344 0, dev_priv->vram_size,
345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
346 &gpuobj);
347 if (ret)
348 goto out_err;
349
350 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
351 gpuobj, NULL);
352 if (ret)
353 goto out_err;
354
355 gpuobj = NULL;
356 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
357 dev_priv->gart_info.aper_size,
358 NV_DMA_ACCESS_RW, &gpuobj, NULL);
359 if (ret)
360 goto out_err;
361
362 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
363 gpuobj, NULL);
364 if (ret)
365 goto out_err;
366
367 return 0;
368out_err:
369 nouveau_gpuobj_del(dev, &gpuobj);
370 nouveau_channel_free(dev_priv->channel);
371 dev_priv->channel = NULL;
372 return ret;
373}
374
375static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
376 enum vga_switcheroo_state state)
377{
378 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
379 if (state == VGA_SWITCHEROO_ON) {
380 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
381 nouveau_pci_resume(pdev);
382 } else {
383 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
384 nouveau_pci_suspend(pdev, pmm);
385 }
386}
387
388static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
389{
390 struct drm_device *dev = pci_get_drvdata(pdev);
391 bool can_switch;
392
393 spin_lock(&dev->count_lock);
394 can_switch = (dev->open_count == 0);
395 spin_unlock(&dev->count_lock);
396 return can_switch;
397}
398
399int
400nouveau_card_init(struct drm_device *dev)
401{
402 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 struct nouveau_engine *engine;
404 int ret;
405
406 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
407
408 if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
409 return 0;
410
411 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
412 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
413 nouveau_switcheroo_can_switch);
414
415 /* Initialise internal driver API hooks */
416 ret = nouveau_init_engine_ptrs(dev);
417 if (ret)
418 goto out;
419 engine = &dev_priv->engine;
420 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
421 spin_lock_init(&dev_priv->context_switch_lock);
422
423 /* Parse BIOS tables / Run init tables if card not POSTed */
424 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
425 ret = nouveau_bios_init(dev);
426 if (ret)
427 goto out;
428 }
429
430 ret = nouveau_mem_detect(dev);
431 if (ret)
432 goto out_bios;
433
434 ret = nouveau_gpuobj_early_init(dev);
435 if (ret)
436 goto out_bios;
437
438 /* Initialise instance memory, must happen before mem_init so we
439 * know exactly how much VRAM we're able to use for "normal"
440 * purposes.
441 */
442 ret = engine->instmem.init(dev);
443 if (ret)
444 goto out_gpuobj_early;
445
446 /* Setup the memory manager */
447 ret = nouveau_mem_init(dev);
448 if (ret)
449 goto out_instmem;
450
451 ret = nouveau_gpuobj_init(dev);
452 if (ret)
453 goto out_mem;
454
455 /* PMC */
456 ret = engine->mc.init(dev);
457 if (ret)
458 goto out_gpuobj;
459
460 /* PTIMER */
461 ret = engine->timer.init(dev);
462 if (ret)
463 goto out_mc;
464
465 /* PFB */
466 ret = engine->fb.init(dev);
467 if (ret)
468 goto out_timer;
469
470 if (nouveau_noaccel)
471 engine->graph.accel_blocked = true;
472 else {
473 /* PGRAPH */
474 ret = engine->graph.init(dev);
475 if (ret)
476 goto out_fb;
477
478 /* PFIFO */
479 ret = engine->fifo.init(dev);
480 if (ret)
481 goto out_graph;
482 }
483
484 /* this call irq_preinstall, register irq handler and
485 * call irq_postinstall
486 */
487 ret = drm_irq_install(dev);
488 if (ret)
489 goto out_fifo;
490
491 ret = drm_vblank_init(dev, 0);
492 if (ret)
493 goto out_irq;
494
495 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
496
497 if (!engine->graph.accel_blocked) {
498 ret = nouveau_card_init_channel(dev);
499 if (ret)
500 goto out_irq;
501 }
502
503 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
504 if (dev_priv->card_type >= NV_50)
505 ret = nv50_display_create(dev);
506 else
507 ret = nv04_display_create(dev);
508 if (ret)
509 goto out_channel;
510 }
511
512 ret = nouveau_backlight_init(dev);
513 if (ret)
514 NV_ERROR(dev, "Error %d registering backlight\n", ret);
515
516 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
517
518 if (drm_core_check_feature(dev, DRIVER_MODESET))
519 drm_helper_initial_config(dev);
520
521 return 0;
522
523out_channel:
524 if (dev_priv->channel) {
525 nouveau_channel_free(dev_priv->channel);
526 dev_priv->channel = NULL;
527 }
528out_irq:
529 drm_irq_uninstall(dev);
530out_fifo:
531 if (!nouveau_noaccel)
532 engine->fifo.takedown(dev);
533out_graph:
534 if (!nouveau_noaccel)
535 engine->graph.takedown(dev);
536out_fb:
537 engine->fb.takedown(dev);
538out_timer:
539 engine->timer.takedown(dev);
540out_mc:
541 engine->mc.takedown(dev);
542out_gpuobj:
543 nouveau_gpuobj_takedown(dev);
544out_mem:
545 nouveau_sgdma_takedown(dev);
546 nouveau_mem_close(dev);
547out_instmem:
548 engine->instmem.takedown(dev);
549out_gpuobj_early:
550 nouveau_gpuobj_late_takedown(dev);
551out_bios:
552 nouveau_bios_takedown(dev);
553out:
554 vga_client_register(dev->pdev, NULL, NULL, NULL);
555 return ret;
556}
557
558static void nouveau_card_takedown(struct drm_device *dev)
559{
560 struct drm_nouveau_private *dev_priv = dev->dev_private;
561 struct nouveau_engine *engine = &dev_priv->engine;
562
563 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
564
565 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
566 nouveau_backlight_exit(dev);
567
568 if (dev_priv->channel) {
569 nouveau_channel_free(dev_priv->channel);
570 dev_priv->channel = NULL;
571 }
572
573 if (!nouveau_noaccel) {
574 engine->fifo.takedown(dev);
575 engine->graph.takedown(dev);
576 }
577 engine->fb.takedown(dev);
578 engine->timer.takedown(dev);
579 engine->mc.takedown(dev);
580
581 mutex_lock(&dev->struct_mutex);
582 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
583 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
584 mutex_unlock(&dev->struct_mutex);
585 nouveau_sgdma_takedown(dev);
586
587 nouveau_gpuobj_takedown(dev);
588 nouveau_mem_close(dev);
589 engine->instmem.takedown(dev);
590
591 if (drm_core_check_feature(dev, DRIVER_MODESET))
592 drm_irq_uninstall(dev);
593
594 nouveau_gpuobj_late_takedown(dev);
595 nouveau_bios_takedown(dev);
596
597 vga_client_register(dev->pdev, NULL, NULL, NULL);
598
599 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
600 }
601}
602
603/* here a client dies, release the stuff that was allocated for its
604 * file_priv */
605void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
606{
607 nouveau_channel_cleanup(dev, file_priv);
608}
609
610/* first module load, setup the mmio/fb mapping */
611/* KMS: we need mmio at load time, not when the first drm client opens. */
612int nouveau_firstopen(struct drm_device *dev)
613{
614 return 0;
615}
616
617/* if we have an OF card, copy vbios to RAMIN */
618static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
619{
620#if defined(__powerpc__)
621 int size, i;
622 const uint32_t *bios;
623 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
624 if (!dn) {
625 NV_INFO(dev, "Unable to get the OF node\n");
626 return;
627 }
628
629 bios = of_get_property(dn, "NVDA,BMP", &size);
630 if (bios) {
631 for (i = 0; i < size; i += 4)
632 nv_wi32(dev, i, bios[i/4]);
633 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
634 } else {
635 NV_INFO(dev, "Unable to get the OF bios\n");
636 }
637#endif
638}
639
640int nouveau_load(struct drm_device *dev, unsigned long flags)
641{
642 struct drm_nouveau_private *dev_priv;
643 uint32_t reg0;
644 resource_size_t mmio_start_offs;
645
646 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
647 if (!dev_priv)
648 return -ENOMEM;
649 dev->dev_private = dev_priv;
650 dev_priv->dev = dev;
651
652 dev_priv->flags = flags & NOUVEAU_FLAGS;
653 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
654
655 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
656 dev->pci_vendor, dev->pci_device, dev->pdev->class);
657
658 dev_priv->wq = create_workqueue("nouveau");
659 if (!dev_priv->wq)
660 return -EINVAL;
661
662 /* resource 0 is mmio regs */
663 /* resource 1 is linear FB */
664 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
665 /* resource 6 is bios */
666
667 /* map the mmio regs */
668 mmio_start_offs = pci_resource_start(dev->pdev, 0);
669 dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
670 if (!dev_priv->mmio) {
671 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
672 "Please report your setup to " DRIVER_EMAIL "\n");
673 return -EINVAL;
674 }
675 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
676 (unsigned long long)mmio_start_offs);
677
678#ifdef __BIG_ENDIAN
679 /* Put the card in BE mode if it's not */
680 if (nv_rd32(dev, NV03_PMC_BOOT_1))
681 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
682
683 DRM_MEMORYBARRIER();
684#endif
685
686 /* Time to determine the card architecture */
687 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
688
689 /* We're dealing with >=NV10 */
690 if ((reg0 & 0x0f000000) > 0) {
691 /* Bit 27-20 contain the architecture in hex */
692 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
693 /* NV04 or NV05 */
694 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
695 if (reg0 & 0x00f00000)
696 dev_priv->chipset = 0x05;
697 else
698 dev_priv->chipset = 0x04;
699 } else
700 dev_priv->chipset = 0xff;
701
702 switch (dev_priv->chipset & 0xf0) {
703 case 0x00:
704 case 0x10:
705 case 0x20:
706 case 0x30:
707 dev_priv->card_type = dev_priv->chipset & 0xf0;
708 break;
709 case 0x40:
710 case 0x60:
711 dev_priv->card_type = NV_40;
712 break;
713 case 0x50:
714 case 0x80:
715 case 0x90:
716 case 0xa0:
717 dev_priv->card_type = NV_50;
718 break;
719 default:
720 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
721 return -EINVAL;
722 }
723
724 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
725 dev_priv->card_type, reg0);
726
727 /* map larger RAMIN aperture on NV40 cards */
728 dev_priv->ramin = NULL;
729 if (dev_priv->card_type >= NV_40) {
730 int ramin_bar = 2;
731 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
732 ramin_bar = 3;
733
734 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
735 dev_priv->ramin = ioremap(
736 pci_resource_start(dev->pdev, ramin_bar),
737 dev_priv->ramin_size);
738 if (!dev_priv->ramin) {
739 NV_ERROR(dev, "Failed to init RAMIN mapping, "
740 "limited instance memory available\n");
741 }
742 }
743
744 /* On older cards (or if the above failed), create a map covering
745 * the BAR0 PRAMIN aperture */
746 if (!dev_priv->ramin) {
747 dev_priv->ramin_size = 1 * 1024 * 1024;
748 dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
749 dev_priv->ramin_size);
750 if (!dev_priv->ramin) {
751 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
752 return -ENOMEM;
753 }
754 }
755
756 nouveau_OF_copy_vbios_to_ramin(dev);
757
758 /* Special flags */
759 if (dev->pci_device == 0x01a0)
760 dev_priv->flags |= NV_NFORCE;
761 else if (dev->pci_device == 0x01f0)
762 dev_priv->flags |= NV_NFORCE2;
763
764 /* For kernel modesetting, init card now and bring up fbcon */
765 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
766 int ret = nouveau_card_init(dev);
767 if (ret)
768 return ret;
769 }
770
771 return 0;
772}
773
774static void nouveau_close(struct drm_device *dev)
775{
776 struct drm_nouveau_private *dev_priv = dev->dev_private;
777
778 /* In the case of an error dev_priv may not be allocated yet */
779 if (dev_priv)
780 nouveau_card_takedown(dev);
781}
782
783/* KMS: we need mmio at load time, not when the first drm client opens. */
784void nouveau_lastclose(struct drm_device *dev)
785{
786 if (drm_core_check_feature(dev, DRIVER_MODESET))
787 return;
788
789 nouveau_close(dev);
790}
791
792int nouveau_unload(struct drm_device *dev)
793{
794 struct drm_nouveau_private *dev_priv = dev->dev_private;
795
796 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
797 if (dev_priv->card_type >= NV_50)
798 nv50_display_destroy(dev);
799 else
800 nv04_display_destroy(dev);
801 nouveau_close(dev);
802 }
803
804 iounmap(dev_priv->mmio);
805 iounmap(dev_priv->ramin);
806
807 kfree(dev_priv);
808 dev->dev_private = NULL;
809 return 0;
810}
811
812int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
813 struct drm_file *file_priv)
814{
815 struct drm_nouveau_private *dev_priv = dev->dev_private;
816 struct drm_nouveau_getparam *getparam = data;
817
818 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
819
820 switch (getparam->param) {
821 case NOUVEAU_GETPARAM_CHIPSET_ID:
822 getparam->value = dev_priv->chipset;
823 break;
824 case NOUVEAU_GETPARAM_PCI_VENDOR:
825 getparam->value = dev->pci_vendor;
826 break;
827 case NOUVEAU_GETPARAM_PCI_DEVICE:
828 getparam->value = dev->pci_device;
829 break;
830 case NOUVEAU_GETPARAM_BUS_TYPE:
831 if (drm_device_is_agp(dev))
832 getparam->value = NV_AGP;
833 else if (drm_device_is_pcie(dev))
834 getparam->value = NV_PCIE;
835 else
836 getparam->value = NV_PCI;
837 break;
838 case NOUVEAU_GETPARAM_FB_PHYSICAL:
839 getparam->value = dev_priv->fb_phys;
840 break;
841 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
842 getparam->value = dev_priv->gart_info.aper_base;
843 break;
844 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
845 if (dev->sg) {
846 getparam->value = (unsigned long)dev->sg->virtual;
847 } else {
848 NV_ERROR(dev, "Requested PCIGART address, "
849 "while no PCIGART was created\n");
850 return -EINVAL;
851 }
852 break;
853 case NOUVEAU_GETPARAM_FB_SIZE:
854 getparam->value = dev_priv->fb_available_size;
855 break;
856 case NOUVEAU_GETPARAM_AGP_SIZE:
857 getparam->value = dev_priv->gart_info.aper_size;
858 break;
859 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
860 getparam->value = dev_priv->vm_vram_base;
861 break;
862 case NOUVEAU_GETPARAM_GRAPH_UNITS:
863 /* NV40 and NV50 versions are quite different, but register
864 * address is the same. User is supposed to know the card
865 * family anyway... */
866 if (dev_priv->chipset >= 0x40) {
867 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
868 break;
869 }
870 /* FALLTHRU */
871 default:
872 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
873 return -EINVAL;
874 }
875
876 return 0;
877}
878
879int
880nouveau_ioctl_setparam(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 struct drm_nouveau_setparam *setparam = data;
884
885 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
886
887 switch (setparam->param) {
888 default:
889 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
890 return -EINVAL;
891 }
892
893 return 0;
894}
895
896/* Wait until (value(reg) & mask) == val, up until timeout has hit */
897bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
898 uint32_t reg, uint32_t mask, uint32_t val)
899{
900 struct drm_nouveau_private *dev_priv = dev->dev_private;
901 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
902 uint64_t start = ptimer->read(dev);
903
904 do {
905 if ((nv_rd32(dev, reg) & mask) == val)
906 return true;
907 } while (ptimer->read(dev) - start < timeout);
908
909 return false;
910}
911
912/* Waits for PGRAPH to go completely idle */
913bool nouveau_wait_for_idle(struct drm_device *dev)
914{
915 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
916 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
917 nv_rd32(dev, NV04_PGRAPH_STATUS));
918 return false;
919 }
920
921 return true;
922}
923
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 000000000000..c385d50f041b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3 * All Rights Reserved.
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28
29#include "nouveau_drv.h"
30
31int
32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
33{
34 struct drm_file *file_priv = filp->private_data;
35 struct drm_nouveau_private *dev_priv =
36 file_priv->minor->dev->dev_private;
37
38 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
39 return drm_mmap(filp, vma);
40
41 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
42}
43
44static int
45nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
46{
47 return ttm_mem_global_init(ref->object);
48}
49
50static void
51nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
52{
53 ttm_mem_global_release(ref->object);
54}
55
56int
57nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
58{
59 struct ttm_global_reference *global_ref;
60 int ret;
61
62 global_ref = &dev_priv->ttm.mem_global_ref;
63 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
64 global_ref->size = sizeof(struct ttm_mem_global);
65 global_ref->init = &nouveau_ttm_mem_global_init;
66 global_ref->release = &nouveau_ttm_mem_global_release;
67
68 ret = ttm_global_item_ref(global_ref);
69 if (unlikely(ret != 0)) {
70 DRM_ERROR("Failed setting up TTM memory accounting\n");
71 dev_priv->ttm.mem_global_ref.release = NULL;
72 return ret;
73 }
74
75 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
76 global_ref = &dev_priv->ttm.bo_global_ref.ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_BO;
78 global_ref->size = sizeof(struct ttm_bo_global);
79 global_ref->init = &ttm_bo_global_init;
80 global_ref->release = &ttm_bo_global_release;
81
82 ret = ttm_global_item_ref(global_ref);
83 if (unlikely(ret != 0)) {
84 DRM_ERROR("Failed setting up TTM BO subsystem\n");
85 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
86 dev_priv->ttm.mem_global_ref.release = NULL;
87 return ret;
88 }
89
90 return 0;
91}
92
93void
94nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
95{
96 if (dev_priv->ttm.mem_global_ref.release == NULL)
97 return;
98
99 ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
100 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
101 dev_priv->ttm.mem_global_ref.release = NULL;
102}
103
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 000000000000..eba687f1099e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1000 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_fb.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37static int
38nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
39 struct drm_framebuffer *old_fb);
40
41static void
42crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
43{
44 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
45 crtcstate->CRTC[index]);
46}
47
48static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
49{
50 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
51 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
52 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
53
54 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
55 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
56 regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
57 regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
58 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
59 }
60 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
61}
62
63static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
64{
65 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
66 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
67 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
68
69 nv_crtc->sharpness = level;
70 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
71 level += 0x40;
72 regp->ramdac_634 = level;
73 NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
74}
75
76#define PLLSEL_VPLL1_MASK \
77 (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
78 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
79#define PLLSEL_VPLL2_MASK \
80 (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
81 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
82#define PLLSEL_TV_MASK \
83 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
84 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
85 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
86 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
87
88/* NV4x 0x40.. pll notes:
89 * gpu pll: 0x4000 + 0x4004
90 * ?gpu? pll: 0x4008 + 0x400c
91 * vpll1: 0x4010 + 0x4014
92 * vpll2: 0x4018 + 0x401c
93 * mpll: 0x4020 + 0x4024
94 * mpll: 0x4038 + 0x403c
95 *
96 * the first register of each pair has some unknown details:
97 * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
98 * bits 20-23: (mpll) something to do with post divider?
99 * bits 28-31: related to single stage mode? (bit 8/12)
100 */
101
102static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
103{
104 struct drm_device *dev = crtc->dev;
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
107 struct nv04_mode_state *state = &dev_priv->mode_reg;
108 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
109 struct nouveau_pll_vals *pv = &regp->pllvals;
110 struct pll_lims pll_lim;
111
112 if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
113 return;
114
115 /* NM2 == 0 is used to determine single stage mode on two stage plls */
116 pv->NM2 = 0;
117
118 /* for newer nv4x the blob uses only the first stage of the vpll below a
119 * certain clock. for a certain nv4b this is 150MHz. since the max
120 * output frequency of the first stage for this card is 300MHz, it is
121 * assumed the threshold is given by vco1 maxfreq/2
122 */
123 /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
124 * not 8, others unknown), the blob always uses both plls. no problem
125 * has yet been observed in allowing the use a single stage pll on all
126 * nv43 however. the behaviour of single stage use is untested on nv40
127 */
128 if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
129 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
130
131 if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
132 return;
133
134 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
135
136 /* The blob uses this always, so let's do the same */
137 if (dev_priv->card_type == NV_40)
138 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
139 /* again nv40 and some nv43 act more like nv3x as described above */
140 if (dev_priv->chipset < 0x41)
141 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
142 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
143 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
144
145 if (pv->NM2)
146 NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
147 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
148 else
149 NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
150 pv->N1, pv->M1, pv->log2P);
151
152 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
153}
154
155static void
156nv_crtc_dpms(struct drm_crtc *crtc, int mode)
157{
158 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
159 struct drm_device *dev = crtc->dev;
160 unsigned char seq1 = 0, crtc17 = 0;
161 unsigned char crtc1A;
162
163 NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
164 nv_crtc->index);
165
166 if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
167 return;
168
169 nv_crtc->last_dpms = mode;
170
171 if (nv_two_heads(dev))
172 NVSetOwner(dev, nv_crtc->index);
173
174 /* nv4ref indicates these two RPC1 bits inhibit h/v sync */
175 crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
176 NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
177 switch (mode) {
178 case DRM_MODE_DPMS_STANDBY:
179 /* Screen: Off; HSync: Off, VSync: On -- Not Supported */
180 seq1 = 0x20;
181 crtc17 = 0x80;
182 crtc1A |= 0x80;
183 break;
184 case DRM_MODE_DPMS_SUSPEND:
185 /* Screen: Off; HSync: On, VSync: Off -- Not Supported */
186 seq1 = 0x20;
187 crtc17 = 0x80;
188 crtc1A |= 0x40;
189 break;
190 case DRM_MODE_DPMS_OFF:
191 /* Screen: Off; HSync: Off, VSync: Off */
192 seq1 = 0x20;
193 crtc17 = 0x00;
194 crtc1A |= 0xC0;
195 break;
196 case DRM_MODE_DPMS_ON:
197 default:
198 /* Screen: On; HSync: On, VSync: On */
199 seq1 = 0x00;
200 crtc17 = 0x80;
201 break;
202 }
203
204 NVVgaSeqReset(dev, nv_crtc->index, true);
205 /* Each head has it's own sequencer, so we can turn it off when we want */
206 seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
207 NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
208 crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
209 mdelay(10);
210 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
211 NVVgaSeqReset(dev, nv_crtc->index, false);
212
213 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
214}
215
216static bool
217nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode)
219{
220 return true;
221}
222
223static void
224nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
225{
226 struct drm_device *dev = crtc->dev;
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
229 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
230 struct drm_framebuffer *fb = crtc->fb;
231
232 /* Calculate our timings */
233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
234 int horizStart = (mode->crtc_hsync_start >> 3) + 1;
235 int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
236 int horizTotal = (mode->crtc_htotal >> 3) - 5;
237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
239 int vertDisplay = mode->crtc_vdisplay - 1;
240 int vertStart = mode->crtc_vsync_start - 1;
241 int vertEnd = mode->crtc_vsync_end - 1;
242 int vertTotal = mode->crtc_vtotal - 2;
243 int vertBlankStart = mode->crtc_vdisplay - 1;
244 int vertBlankEnd = mode->crtc_vtotal - 1;
245
246 struct drm_encoder *encoder;
247 bool fp_output = false;
248
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
251
252 if (encoder->crtc == crtc &&
253 (nv_encoder->dcb->type == OUTPUT_LVDS ||
254 nv_encoder->dcb->type == OUTPUT_TMDS))
255 fp_output = true;
256 }
257
258 if (fp_output) {
259 vertStart = vertTotal - 3;
260 vertEnd = vertTotal - 2;
261 vertBlankStart = vertStart;
262 horizStart = horizTotal - 5;
263 horizEnd = horizTotal - 2;
264 horizBlankEnd = horizTotal + 4;
265#if 0
266 if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
267 /* This reportedly works around some video overlay bandwidth problems */
268 horizTotal += 2;
269#endif
270 }
271
272 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
273 vertTotal |= 1;
274
275#if 0
276 ErrorF("horizDisplay: 0x%X \n", horizDisplay);
277 ErrorF("horizStart: 0x%X \n", horizStart);
278 ErrorF("horizEnd: 0x%X \n", horizEnd);
279 ErrorF("horizTotal: 0x%X \n", horizTotal);
280 ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
281 ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
282 ErrorF("vertDisplay: 0x%X \n", vertDisplay);
283 ErrorF("vertStart: 0x%X \n", vertStart);
284 ErrorF("vertEnd: 0x%X \n", vertEnd);
285 ErrorF("vertTotal: 0x%X \n", vertTotal);
286 ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
287 ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
288#endif
289
290 /*
291 * compute correct Hsync & Vsync polarity
292 */
293 if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
294 && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
295
296 regp->MiscOutReg = 0x23;
297 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
298 regp->MiscOutReg |= 0x40;
299 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
300 regp->MiscOutReg |= 0x80;
301 } else {
302 int vdisplay = mode->vdisplay;
303 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
304 vdisplay *= 2;
305 if (mode->vscan > 1)
306 vdisplay *= mode->vscan;
307 if (vdisplay < 400)
308 regp->MiscOutReg = 0xA3; /* +hsync -vsync */
309 else if (vdisplay < 480)
310 regp->MiscOutReg = 0x63; /* -hsync +vsync */
311 else if (vdisplay < 768)
312 regp->MiscOutReg = 0xE3; /* -hsync -vsync */
313 else
314 regp->MiscOutReg = 0x23; /* +hsync +vsync */
315 }
316
317 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
318
319 /*
320 * Time Sequencer
321 */
322 regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
323 /* 0x20 disables the sequencer */
324 if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
325 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
326 else
327 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
328 regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
329 regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
330 regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
331
332 /*
333 * CRTC
334 */
335 regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
336 regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
337 regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
338 regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
339 XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
340 regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
341 regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
342 XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
343 regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
344 regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
345 XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
346 XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
347 (1 << 4) |
348 XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
349 XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
350 XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
351 XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
352 regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
353 regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
354 1 << 6 |
355 XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
356 regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
357 regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
358 regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
359 regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
360 regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
361 regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
362 regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
363 regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
364 regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
365 /* framebuffer can be larger than crtc scanout area. */
366 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
367 regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
368 regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
369 regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
370 regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
371 regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
372
373 /*
374 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
375 */
376
377 /* framebuffer can be larger than crtc scanout area. */
378 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
379 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
380 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
381 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
382 XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
383 XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
384 XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
385 XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
386 regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
387 XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
388 XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
389 XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
390 regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
391 XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
392 XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
393 XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
394
395 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
396 horizTotal = (horizTotal >> 1) & ~1;
397 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
398 regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
399 } else
400 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
401
402 /*
403 * Graphics Display Controller
404 */
405 regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
406 regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
407 regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
408 regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
409 regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
410 regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
411 regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
412 regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
413 regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
414
415 regp->Attribute[0] = 0x00; /* standard colormap translation */
416 regp->Attribute[1] = 0x01;
417 regp->Attribute[2] = 0x02;
418 regp->Attribute[3] = 0x03;
419 regp->Attribute[4] = 0x04;
420 regp->Attribute[5] = 0x05;
421 regp->Attribute[6] = 0x06;
422 regp->Attribute[7] = 0x07;
423 regp->Attribute[8] = 0x08;
424 regp->Attribute[9] = 0x09;
425 regp->Attribute[10] = 0x0A;
426 regp->Attribute[11] = 0x0B;
427 regp->Attribute[12] = 0x0C;
428 regp->Attribute[13] = 0x0D;
429 regp->Attribute[14] = 0x0E;
430 regp->Attribute[15] = 0x0F;
431 regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
432 /* Non-vga */
433 regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
434 regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
435 regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
436 regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
437}
438
439/**
440 * Sets up registers for the given mode/adjusted_mode pair.
441 *
442 * The clocks, CRTCs and outputs attached to this CRTC must be off.
443 *
444 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
445 * be easily turned on/off after this.
446 */
447static void
448nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
449{
450 struct drm_device *dev = crtc->dev;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
453 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
454 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
455 struct drm_encoder *encoder;
456 bool lvds_output = false, tmds_output = false, tv_output = false,
457 off_chip_digital = false;
458
459 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
460 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
461 bool digital = false;
462
463 if (encoder->crtc != crtc)
464 continue;
465
466 if (nv_encoder->dcb->type == OUTPUT_LVDS)
467 digital = lvds_output = true;
468 if (nv_encoder->dcb->type == OUTPUT_TV)
469 tv_output = true;
470 if (nv_encoder->dcb->type == OUTPUT_TMDS)
471 digital = tmds_output = true;
472 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
473 off_chip_digital = true;
474 }
475
476 /* Registers not directly related to the (s)vga mode */
477
478 /* What is the meaning of this register? */
479 /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
480 regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
481
482 regp->crtc_eng_ctrl = 0;
483 /* Except for rare conditions I2C is enabled on the primary crtc */
484 if (nv_crtc->index == 0)
485 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
486#if 0
487 /* Set overlay to desired crtc. */
488 if (dev->overlayAdaptor) {
489 NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
490 if (pPriv->overlayCRTC == nv_crtc->index)
491 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
492 }
493#endif
494
495 /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
496 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
497 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
498 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
499 if (dev_priv->chipset >= 0x11)
500 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
501 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
502 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
503
504 /* Unblock some timings */
505 regp->CRTC[NV_CIO_CRE_53] = 0;
506 regp->CRTC[NV_CIO_CRE_54] = 0;
507
508 /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
509 if (lvds_output)
510 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
511 else if (tmds_output)
512 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
513 else
514 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
515
516 /* These values seem to vary */
517 /* This register seems to be used by the bios to make certain decisions on some G70 cards? */
518 regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
519
520 nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
521
522 /* probably a scratch reg, but kept for cargo-cult purposes:
523 * bit0: crtc0?, head A
524 * bit6: lvds, head A
525 * bit7: (only in X), head A
526 */
527 if (nv_crtc->index == 0)
528 regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
529
530 /* The blob seems to take the current value from crtc 0, add 4 to that
531 * and reuse the old value for crtc 1 */
532 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
533 if (!nv_crtc->index)
534 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
535
536 /* the blob sometimes sets |= 0x10 (which is the same as setting |=
537 * 1 << 30 on 0x60.830), for no apparent reason */
538 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
539
540 regp->crtc_830 = mode->crtc_vdisplay - 3;
541 regp->crtc_834 = mode->crtc_vdisplay - 1;
542
543 if (dev_priv->card_type == NV_40)
544 /* This is what the blob does */
545 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
546
547 if (dev_priv->card_type >= NV_30)
548 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
549
550 regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
551
552 /* Some misc regs */
553 if (dev_priv->card_type == NV_40) {
554 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
555 regp->CRTC[NV_CIO_CRE_86] = 0x1;
556 }
557
558 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
559 /* Enable slaved mode (called MODE_TV in nv4ref.h) */
560 if (lvds_output || tmds_output || tv_output)
561 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
562
563 /* Generic PRAMDAC regs */
564
565 if (dev_priv->card_type >= NV_10)
566 /* Only bit that bios and blob set. */
567 regp->nv10_cursync = (1 << 25);
568
569 regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
570 NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
571 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
572 if (crtc->fb->depth == 16)
573 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
574 if (dev_priv->chipset >= 0x11)
575 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
576
577 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
578 regp->tv_setup = 0;
579
580 nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
581
582 /* Some values the blob sets */
583 regp->ramdac_8c0 = 0x100;
584 regp->ramdac_a20 = 0x0;
585 regp->ramdac_a24 = 0xfffff;
586 regp->ramdac_a34 = 0x1;
587}
588
589/**
590 * Sets up registers for the given mode/adjusted_mode pair.
591 *
592 * The clocks, CRTCs and outputs attached to this CRTC must be off.
593 *
594 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
595 * be easily turned on/off after this.
596 */
597static int
598nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
599 struct drm_display_mode *adjusted_mode,
600 int x, int y, struct drm_framebuffer *old_fb)
601{
602 struct drm_device *dev = crtc->dev;
603 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
604 struct drm_nouveau_private *dev_priv = dev->dev_private;
605
606 NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
607 drm_mode_debug_printmodeline(adjusted_mode);
608
609 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
610 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
611
612 nv_crtc_mode_set_vga(crtc, adjusted_mode);
613 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
614 if (dev_priv->card_type == NV_40)
615 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
616 nv_crtc_mode_set_regs(crtc, adjusted_mode);
617 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
618 return 0;
619}
620
621static void nv_crtc_save(struct drm_crtc *crtc)
622{
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
625 struct nv04_mode_state *state = &dev_priv->mode_reg;
626 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
627 struct nv04_mode_state *saved = &dev_priv->saved_reg;
628 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
629
630 if (nv_two_heads(crtc->dev))
631 NVSetOwner(crtc->dev, nv_crtc->index);
632
633 nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
634
635 /* init some state to saved value */
636 state->sel_clk = saved->sel_clk & ~(0x5 << 16);
637 crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
638 state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
639 crtc_state->gpio_ext = crtc_saved->gpio_ext;
640}
641
642static void nv_crtc_restore(struct drm_crtc *crtc)
643{
644 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
645 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
646 int head = nv_crtc->index;
647 uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
648
649 if (nv_two_heads(crtc->dev))
650 NVSetOwner(crtc->dev, head);
651
652 nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
653 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
654
655 nv_crtc->last_dpms = NV_DPMS_CLEARED;
656}
657
658static void nv_crtc_prepare(struct drm_crtc *crtc)
659{
660 struct drm_device *dev = crtc->dev;
661 struct drm_nouveau_private *dev_priv = dev->dev_private;
662 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
663 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
664
665 if (nv_two_heads(dev))
666 NVSetOwner(dev, nv_crtc->index);
667
668 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
669
670 NVBlankScreen(dev, nv_crtc->index, true);
671
672 /* Some more preperation. */
673 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
674 if (dev_priv->card_type == NV_40) {
675 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
676 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
677 }
678}
679
680static void nv_crtc_commit(struct drm_crtc *crtc)
681{
682 struct drm_device *dev = crtc->dev;
683 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
684 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
685 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
686
687 nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
688 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
689
690#ifdef __BIG_ENDIAN
691 /* turn on LFB swapping */
692 {
693 uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
694 tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
695 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
696 }
697#endif
698
699 funcs->dpms(crtc, DRM_MODE_DPMS_ON);
700}
701
702static void nv_crtc_destroy(struct drm_crtc *crtc)
703{
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705
706 NV_DEBUG_KMS(crtc->dev, "\n");
707
708 if (!nv_crtc)
709 return;
710
711 drm_crtc_cleanup(crtc);
712
713 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
714 kfree(nv_crtc);
715}
716
717static void
718nv_crtc_gamma_load(struct drm_crtc *crtc)
719{
720 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
721 struct drm_device *dev = nv_crtc->base.dev;
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
724 int i;
725
726 rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
727 for (i = 0; i < 256; i++) {
728 rgbs[i].r = nv_crtc->lut.r[i] >> 8;
729 rgbs[i].g = nv_crtc->lut.g[i] >> 8;
730 rgbs[i].b = nv_crtc->lut.b[i] >> 8;
731 }
732
733 nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
734}
735
736static void
737nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
738{
739 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
740 int i;
741
742 if (size != 256)
743 return;
744
745 for (i = 0; i < 256; i++) {
746 nv_crtc->lut.r[i] = r[i];
747 nv_crtc->lut.g[i] = g[i];
748 nv_crtc->lut.b[i] = b[i];
749 }
750
751 /* We need to know the depth before we upload, but it's possible to
752 * get called before a framebuffer is bound. If this is the case,
753 * mark the lut values as dirty by setting depth==0, and it'll be
754 * uploaded on the first mode_set_base()
755 */
756 if (!nv_crtc->base.fb) {
757 nv_crtc->lut.depth = 0;
758 return;
759 }
760
761 nv_crtc_gamma_load(crtc);
762}
763
764static int
765nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
766 struct drm_framebuffer *old_fb)
767{
768 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
769 struct drm_device *dev = crtc->dev;
770 struct drm_nouveau_private *dev_priv = dev->dev_private;
771 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
772 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
773 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
774 int arb_burst, arb_lwm;
775 int ret;
776
777 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
778 if (ret)
779 return ret;
780
781 if (old_fb) {
782 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
783 nouveau_bo_unpin(ofb->nvbo);
784 }
785
786 nv_crtc->fb.offset = fb->nvbo->bo.offset;
787
788 if (nv_crtc->lut.depth != drm_fb->depth) {
789 nv_crtc->lut.depth = drm_fb->depth;
790 nv_crtc_gamma_load(crtc);
791 }
792
793 /* Update the framebuffer format. */
794 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
795 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
796 regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
797 if (crtc->fb->depth == 16)
798 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
799 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
800 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
801 regp->ramdac_gen_ctrl);
802
803 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
804 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
805 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
806 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
807 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
808
809 /* Update the framebuffer location. */
810 regp->fb_start = nv_crtc->fb.offset & ~3;
811 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
812 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
813
814 /* Update the arbitration parameters. */
815 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
816 &arb_burst, &arb_lwm);
817
818 regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
819 regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
820 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
821 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
822
823 if (dev_priv->card_type >= NV_30) {
824 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
825 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
826 }
827
828 return 0;
829}
830
831static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
832 struct nouveau_bo *dst)
833{
834 int width = nv_cursor_width(dev);
835 uint32_t pixel;
836 int i, j;
837
838 for (i = 0; i < width; i++) {
839 for (j = 0; j < width; j++) {
840 pixel = nouveau_bo_rd32(src, i*64 + j);
841
842 nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
843 | (pixel & 0xf80000) >> 9
844 | (pixel & 0xf800) >> 6
845 | (pixel & 0xf8) >> 3);
846 }
847 }
848}
849
850static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
851 struct nouveau_bo *dst)
852{
853 uint32_t pixel;
854 int alpha, i;
855
856 /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
857 * cursors (though NPM in combination with fp dithering may not work on
858 * nv11, from "nv" driver history)
859 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
860 * blob uses, however we get given PM cursors so we use PM mode
861 */
862 for (i = 0; i < 64 * 64; i++) {
863 pixel = nouveau_bo_rd32(src, i);
864
865 /* hw gets unhappy if alpha <= rgb values. for a PM image "less
866 * than" shouldn't happen; fix "equal to" case by adding one to
867 * alpha channel (slightly inaccurate, but so is attempting to
868 * get back to NPM images, due to limits of integer precision)
869 */
870 alpha = pixel >> 24;
871 if (alpha > 0 && alpha < 255)
872 pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
873
874#ifdef __BIG_ENDIAN
875 {
876 struct drm_nouveau_private *dev_priv = dev->dev_private;
877
878 if (dev_priv->chipset == 0x11) {
879 pixel = ((pixel & 0x000000ff) << 24) |
880 ((pixel & 0x0000ff00) << 8) |
881 ((pixel & 0x00ff0000) >> 8) |
882 ((pixel & 0xff000000) >> 24);
883 }
884 }
885#endif
886
887 nouveau_bo_wr32(dst, i, pixel);
888 }
889}
890
891static int
892nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
893 uint32_t buffer_handle, uint32_t width, uint32_t height)
894{
895 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
896 struct drm_device *dev = dev_priv->dev;
897 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
898 struct nouveau_bo *cursor = NULL;
899 struct drm_gem_object *gem;
900 int ret = 0;
901
902 if (width != 64 || height != 64)
903 return -EINVAL;
904
905 if (!buffer_handle) {
906 nv_crtc->cursor.hide(nv_crtc, true);
907 return 0;
908 }
909
910 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
911 if (!gem)
912 return -EINVAL;
913 cursor = nouveau_gem_object(gem);
914
915 ret = nouveau_bo_map(cursor);
916 if (ret)
917 goto out;
918
919 if (dev_priv->chipset >= 0x11)
920 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
921 else
922 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
923
924 nouveau_bo_unmap(cursor);
925 nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true);
928out:
929 drm_gem_object_unreference_unlocked(gem);
930 return ret;
931}
932
933static int
934nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
935{
936 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
937
938 nv_crtc->cursor.set_pos(nv_crtc, x, y);
939 return 0;
940}
941
942static const struct drm_crtc_funcs nv04_crtc_funcs = {
943 .save = nv_crtc_save,
944 .restore = nv_crtc_restore,
945 .cursor_set = nv04_crtc_cursor_set,
946 .cursor_move = nv04_crtc_cursor_move,
947 .gamma_set = nv_crtc_gamma_set,
948 .set_config = drm_crtc_helper_set_config,
949 .destroy = nv_crtc_destroy,
950};
951
952static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
953 .dpms = nv_crtc_dpms,
954 .prepare = nv_crtc_prepare,
955 .commit = nv_crtc_commit,
956 .mode_fixup = nv_crtc_mode_fixup,
957 .mode_set = nv_crtc_mode_set,
958 .mode_set_base = nv04_crtc_mode_set_base,
959 .load_lut = nv_crtc_gamma_load,
960};
961
962int
963nv04_crtc_create(struct drm_device *dev, int crtc_num)
964{
965 struct nouveau_crtc *nv_crtc;
966 int ret, i;
967
968 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
969 if (!nv_crtc)
970 return -ENOMEM;
971
972 for (i = 0; i < 256; i++) {
973 nv_crtc->lut.r[i] = i << 8;
974 nv_crtc->lut.g[i] = i << 8;
975 nv_crtc->lut.b[i] = i << 8;
976 }
977 nv_crtc->lut.depth = 0;
978
979 nv_crtc->index = crtc_num;
980 nv_crtc->last_dpms = NV_DPMS_CLEARED;
981
982 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
983 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
984 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
985
986 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
987 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
988 if (!ret) {
989 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
990 if (!ret)
991 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
992 if (ret)
993 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
994 }
995
996 nv04_cursor_init(nv_crtc);
997
998 return 0;
999}
1000
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 000000000000..89a91b9d8b25
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
1#include "drmP.h"
2#include "drm_mode.h"
3#include "nouveau_reg.h"
4#include "nouveau_drv.h"
5#include "nouveau_crtc.h"
6#include "nouveau_hw.h"
7
8static void
9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
10{
11 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
12}
13
14static void
15nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
16{
17 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
18}
19
20static void
21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
22{
23 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
24 NV_PRAMDAC_CU_START_POS,
25 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
26 XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
27}
28
29static void
30crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
31{
32 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
33 crtcstate->CRTC[index]);
34}
35
36static void
37nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
38{
39 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
42 struct drm_crtc *crtc = &nv_crtc->base;
43
44 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
45 MASK(NV_CIO_CRE_HCUR_ASI) |
46 XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
47 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
48 XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
49 if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
50 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
51 MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
52 regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
53
54 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
57 if (dev_priv->card_type == NV_40)
58 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
59}
60
61int
62nv04_cursor_init(struct nouveau_crtc *crtc)
63{
64 crtc->cursor.set_offset = nv04_cursor_set_offset;
65 crtc->cursor.set_pos = nv04_cursor_set_pos;
66 crtc->cursor.hide = nv04_cursor_hide;
67 crtc->cursor.show = nv04_cursor_show;
68 return 0;
69}
70
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 000000000000..1cb19e3acb55
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,531 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37int nv04_dac_output_offset(struct drm_encoder *encoder)
38{
39 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
40 int offset = 0;
41
42 if (dcb->or & (8 | OUTPUT_C))
43 offset += 0x68;
44 if (dcb->or & (8 | OUTPUT_B))
45 offset += 0x2000;
46
47 return offset;
48}
49
50/*
51 * arbitrary limit to number of sense oscillations tolerated in one sample
52 * period (observed to be at least 13 in "nvidia")
53 */
54#define MAX_HBLANK_OSC 20
55
56/*
57 * arbitrary limit to number of conflicting sample pairs to tolerate at a
58 * voltage step (observed to be at least 5 in "nvidia")
59 */
60#define MAX_SAMPLE_PAIRS 10
61
62static int sample_load_twice(struct drm_device *dev, bool sense[2])
63{
64 int i;
65
66 for (i = 0; i < 2; i++) {
67 bool sense_a, sense_b, sense_b_prime;
68 int j = 0;
69
70 /*
71 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
72 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
73 * hblank again
74 * use a 10ms timeout (guards against crtc being inactive, in
75 * which case blank state would never change)
76 */
77 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
78 0x00000001, 0x00000000))
79 return -EBUSY;
80 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
81 0x00000001, 0x00000001))
82 return -EBUSY;
83 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
84 0x00000001, 0x00000000))
85 return -EBUSY;
86
87 udelay(100);
88 /* when level triggers, sense is _LO_ */
89 sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
90
91 /* take another reading until it agrees with sense_a... */
92 do {
93 udelay(100);
94 sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
95 if (sense_a != sense_b) {
96 sense_b_prime =
97 nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
98 if (sense_b == sense_b_prime) {
99 /* ... unless two consecutive subsequent
100 * samples agree; sense_a is replaced */
101 sense_a = sense_b;
102 /* force mis-match so we loop */
103 sense_b = !sense_a;
104 }
105 }
106 } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
107
108 if (j == MAX_HBLANK_OSC)
109 /* with so much oscillation, default to sense:LO */
110 sense[i] = false;
111 else
112 sense[i] = sense_a;
113 }
114
115 return 0;
116}
117
118static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector)
120{
121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i;
126 uint8_t blue;
127 bool sense = true;
128
129 /*
130 * for this detection to work, there needs to be a mode set up on the
131 * CRTC. this is presumed to be the case
132 */
133
134 if (nv_two_heads(dev))
135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0);
137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
143
144 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
145 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
146 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
147
148 msleep(10);
149
150 saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
151 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
152 saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
153 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
154 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
155
156 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
157 for (i = 0; i < 3; i++)
158 saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
159 saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
160 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
161
162 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
163 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
164 (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
165 NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
166 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
167
168 blue = 8; /* start of test range */
169
170 do {
171 bool sense_pair[2];
172
173 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
174 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
175 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
176 /* testing blue won't find monochrome monitors. I don't care */
177 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
178
179 i = 0;
180 /* take sample pairs until both samples in the pair agree */
181 do {
182 if (sample_load_twice(dev, sense_pair))
183 goto out;
184 } while ((sense_pair[0] != sense_pair[1]) &&
185 ++i < MAX_SAMPLE_PAIRS);
186
187 if (i == MAX_SAMPLE_PAIRS)
188 /* too much oscillation defaults to LO */
189 sense = false;
190 else
191 sense = sense_pair[0];
192
193 /*
194 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
195 * ergo, if blue gets to 0x18, monitor must be connected
196 */
197 } while (++blue < 0x18 && sense);
198
199out:
200 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
201 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
202 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
203 for (i = 0; i < 3; i++)
204 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
205 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
210
211 if (blue == 0x18) {
212 NV_INFO(dev, "Load detected on head A\n");
213 return connector_status_connected;
214 }
215
216 return connector_status_disconnected;
217}
218
219uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
220{
221 struct drm_device *dev = encoder->dev;
222 struct drm_nouveau_private *dev_priv = dev->dev_private;
223 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
224 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
225 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
226 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
227 int head;
228
229#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
230 if (dcb->type == OUTPUT_TV) {
231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
232
233 if (dev_priv->vbios.tvdactestval)
234 testval = dev_priv->vbios.tvdactestval;
235 } else {
236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
237
238 if (dev_priv->vbios.dactestval)
239 testval = dev_priv->vbios.dactestval;
240 }
241
242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
243 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
244 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
245
246 saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
247
248 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
249 if (regoffset == 0x68) {
250 saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
251 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
252 }
253
254 saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
255 saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
256
257 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
258 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
259
260 msleep(4);
261
262 saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
263 head = (saved_routput & 0x100) >> 8;
264#if 0
265 /* if there's a spare crtc, using it will minimise flicker for the case
266 * where the in-use crtc is in use by an off-chip tmds encoder */
267 if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
268 head ^= 1;
269#endif
270 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
271 routput = (saved_routput & 0xfffffece) | head << 8;
272
273 if (dev_priv->card_type >= NV_40) {
274 if (dcb->type == OUTPUT_TV)
275 routput |= 0x1a << 16;
276 else
277 routput &= ~(0x1a << 16);
278 }
279
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
281 msleep(1);
282
283 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
284 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
285
286 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
287 NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
288 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
289 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
290 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
291 msleep(5);
292
293 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
294
295 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
296 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
297 temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
298 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
299
300 /* bios does something more complex for restoring, but I think this is good enough */
301 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
302 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
303 if (regoffset == 0x68)
304 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
305 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
306
307 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
308 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
309
310 return sample;
311}
312
313static enum drm_connector_status
314nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
315{
316 struct drm_device *dev = encoder->dev;
317 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
318 uint32_t sample = nv17_dac_sample_load(encoder);
319
320 if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
321 NV_INFO(dev, "Load detected on output %c\n",
322 '@' + ffs(dcb->or));
323 return connector_status_connected;
324 } else {
325 return connector_status_disconnected;
326 }
327}
328
329static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
330 struct drm_display_mode *mode,
331 struct drm_display_mode *adjusted_mode)
332{
333 return true;
334}
335
336static void nv04_dac_prepare(struct drm_encoder *encoder)
337{
338 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
339 struct drm_device *dev = encoder->dev;
340 struct drm_nouveau_private *dev_priv = dev->dev_private;
341 int head = nouveau_crtc(encoder->crtc)->index;
342 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
343
344 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
345
346 nv04_dfp_disable(dev, head);
347
348 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
349 * at LCD__INDEX which we don't alter
350 */
351 if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
352 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
353}
354
355
356static void nv04_dac_mode_set(struct drm_encoder *encoder,
357 struct drm_display_mode *mode,
358 struct drm_display_mode *adjusted_mode)
359{
360 struct drm_device *dev = encoder->dev;
361 struct drm_nouveau_private *dev_priv = dev->dev_private;
362 int head = nouveau_crtc(encoder->crtc)->index;
363
364 if (nv_gf4_disp_arch(dev)) {
365 struct drm_encoder *rebind;
366 uint32_t dac_offset = nv04_dac_output_offset(encoder);
367 uint32_t otherdac;
368
369 /* bit 16-19 are bits that are set on some G70 cards,
370 * but don't seem to have much effect */
371 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
372 head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
373 /* force any other vga encoders to bind to the other crtc */
374 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
375 if (rebind == encoder
376 || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
377 continue;
378
379 dac_offset = nv04_dac_output_offset(rebind);
380 otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
381 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
382 (otherdac & ~0x0100) | (head ^ 1) << 8);
383 }
384 }
385
386 /* This could use refinement for flatpanels, but it should work this way */
387 if (dev_priv->chipset < 0x44)
388 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
389 else
390 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
391}
392
393static void nv04_dac_commit(struct drm_encoder *encoder)
394{
395 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
396 struct drm_device *dev = encoder->dev;
397 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
398 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
399
400 helper->dpms(encoder, DRM_MODE_DPMS_ON);
401
402 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
403 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
404 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
405}
406
407void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
408{
409 struct drm_device *dev = encoder->dev;
410 struct drm_nouveau_private *dev_priv = dev->dev_private;
411 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
412
413 if (nv_gf4_disp_arch(dev)) {
414 uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
415 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
416 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
417
418 if (enable) {
419 *dac_users |= 1 << dcb->index;
420 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
421
422 } else {
423 *dac_users &= ~(1 << dcb->index);
424 if (!*dac_users)
425 NVWriteRAMDAC(dev, 0, dacclk_off,
426 dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
427 }
428 }
429}
430
431static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
432{
433 struct drm_device *dev = encoder->dev;
434 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
435
436 if (nv_encoder->last_dpms == mode)
437 return;
438 nv_encoder->last_dpms = mode;
439
440 NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
441 mode, nv_encoder->dcb->index);
442
443 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
444}
445
446static void nv04_dac_save(struct drm_encoder *encoder)
447{
448 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
449 struct drm_device *dev = encoder->dev;
450
451 if (nv_gf4_disp_arch(dev))
452 nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
453 nv04_dac_output_offset(encoder));
454}
455
456static void nv04_dac_restore(struct drm_encoder *encoder)
457{
458 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
459 struct drm_device *dev = encoder->dev;
460
461 if (nv_gf4_disp_arch(dev))
462 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
463 nv_encoder->restore.output);
464
465 nv_encoder->last_dpms = NV_DPMS_CLEARED;
466}
467
468static void nv04_dac_destroy(struct drm_encoder *encoder)
469{
470 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
471
472 NV_DEBUG_KMS(encoder->dev, "\n");
473
474 drm_encoder_cleanup(encoder);
475 kfree(nv_encoder);
476}
477
478static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
479 .dpms = nv04_dac_dpms,
480 .save = nv04_dac_save,
481 .restore = nv04_dac_restore,
482 .mode_fixup = nv04_dac_mode_fixup,
483 .prepare = nv04_dac_prepare,
484 .commit = nv04_dac_commit,
485 .mode_set = nv04_dac_mode_set,
486 .detect = nv04_dac_detect
487};
488
489static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
490 .dpms = nv04_dac_dpms,
491 .save = nv04_dac_save,
492 .restore = nv04_dac_restore,
493 .mode_fixup = nv04_dac_mode_fixup,
494 .prepare = nv04_dac_prepare,
495 .commit = nv04_dac_commit,
496 .mode_set = nv04_dac_mode_set,
497 .detect = nv17_dac_detect
498};
499
500static const struct drm_encoder_funcs nv04_dac_funcs = {
501 .destroy = nv04_dac_destroy,
502};
503
504int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
505{
506 const struct drm_encoder_helper_funcs *helper;
507 struct drm_encoder *encoder;
508 struct nouveau_encoder *nv_encoder = NULL;
509
510 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
511 if (!nv_encoder)
512 return -ENOMEM;
513
514 encoder = to_drm_encoder(nv_encoder);
515
516 nv_encoder->dcb = entry;
517 nv_encoder->or = ffs(entry->or) - 1;
518
519 if (nv_gf4_disp_arch(dev))
520 helper = &nv17_dac_helper_funcs;
521 else
522 helper = &nv04_dac_helper_funcs;
523
524 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
525 drm_encoder_helper_add(encoder, helper);
526
527 encoder->possible_crtcs = entry->heads;
528 encoder->possible_clones = 0;
529
530 return 0;
531}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 000000000000..41634d4752fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,623 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
38 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
39 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
40#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
41 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
42 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
43
44static inline bool is_fpc_off(uint32_t fpc)
45{
46 return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
47 FP_TG_CONTROL_OFF);
48}
49
50int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
51{
52 /* special case of nv_read_tmds to find crtc associated with an output.
53 * this does not give a correct answer for off-chip dvi, but there's no
54 * use for such an answer anyway
55 */
56 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
57
58 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
59 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
60 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
61}
62
63void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
64 int head, bool dl)
65{
66 /* The BIOS scripts don't do this for us, sadly
67 * Luckily we do know the values ;-)
68 *
69 * head < 0 indicates we wish to force a setting with the overrideval
70 * (for VT restore etc.)
71 */
72
73 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
74 uint8_t tmds04 = 0x80;
75
76 if (head != ramdac)
77 tmds04 = 0x88;
78
79 if (dcbent->type == OUTPUT_LVDS)
80 tmds04 |= 0x01;
81
82 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
83
84 if (dl) /* dual link */
85 nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
86}
87
88void nv04_dfp_disable(struct drm_device *dev, int head)
89{
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
92
93 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
94 FP_TG_CONTROL_ON) {
95 /* digital remnants must be cleaned before new crtc
96 * values programmed. delay is time for the vga stuff
97 * to realise it's in control again
98 */
99 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
100 FP_TG_CONTROL_OFF);
101 msleep(50);
102 }
103 /* don't inadvertently turn it on when state written later */
104 crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
105}
106
107void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct drm_crtc *crtc;
112 struct nouveau_crtc *nv_crtc;
113 uint32_t *fpc;
114
115 if (mode == DRM_MODE_DPMS_ON) {
116 nv_crtc = nouveau_crtc(encoder->crtc);
117 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
118
119 if (is_fpc_off(*fpc)) {
120 /* using saved value is ok, as (is_digital && dpms_on &&
121 * fp_control==OFF) is (at present) *only* true when
122 * fpc's most recent change was by below "off" code
123 */
124 *fpc = nv_crtc->dpms_saved_fp_control;
125 }
126
127 nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
128 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
129 } else {
130 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
131 nv_crtc = nouveau_crtc(crtc);
132 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
133
134 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
135 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
136 nv_crtc->dpms_saved_fp_control = *fpc;
137 /* cut the FP output */
138 *fpc &= ~FP_TG_CONTROL_ON;
139 *fpc |= FP_TG_CONTROL_OFF;
140 NVWriteRAMDAC(dev, nv_crtc->index,
141 NV_PRAMDAC_FP_TG_CONTROL, *fpc);
142 }
143 }
144 }
145}
146
147static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
148 struct drm_display_mode *mode,
149 struct drm_display_mode *adjusted_mode)
150{
151 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
152 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
153
154 /* For internal panels and gpu scaling on DVI we need the native mode */
155 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
156 if (!nv_connector->native_mode)
157 return false;
158 nv_encoder->mode = *nv_connector->native_mode;
159 adjusted_mode->clock = nv_connector->native_mode->clock;
160 } else {
161 nv_encoder->mode = *adjusted_mode;
162 }
163
164 return true;
165}
166
167static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
168 struct nouveau_encoder *nv_encoder, int head)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 struct nv04_mode_state *state = &dev_priv->mode_reg;
172 uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
173
174 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
175 return;
176
177 /* SEL_CLK is only used on the primary ramdac
178 * It toggles spread spectrum PLL output and sets the bindings of PLLs
179 * to heads on digital outputs
180 */
181 if (head)
182 state->sel_clk |= bits1618;
183 else
184 state->sel_clk &= ~bits1618;
185
186 /* nv30:
187 * bit 0 NVClk spread spectrum on/off
188 * bit 2 MemClk spread spectrum on/off
189 * bit 4 PixClk1 spread spectrum on/off toggle
190 * bit 6 PixClk2 spread spectrum on/off toggle
191 *
192 * nv40 (observations from bios behaviour and mmio traces):
193 * bits 4&6 as for nv30
194 * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
195 * maybe a different spread mode
196 * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
197 * The logic behind turning spread spectrum on/off in the first place,
198 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
199 * entry has the necessary info)
200 */
201 if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
202 int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
203
204 state->sel_clk &= ~0xf0;
205 state->sel_clk |= (head ? 0x40 : 0x10) << shift;
206 }
207}
208
209static void nv04_dfp_prepare(struct drm_encoder *encoder)
210{
211 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
212 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
213 struct drm_device *dev = encoder->dev;
214 struct drm_nouveau_private *dev_priv = dev->dev_private;
215 int head = nouveau_crtc(encoder->crtc)->index;
216 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
217 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
218 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
219
220 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
221
222 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
223
224 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
225 * at LCD__INDEX which we don't alter
226 */
227 if (!(*cr_lcd & 0x44)) {
228 *cr_lcd = 0x3;
229
230 if (nv_two_heads(dev)) {
231 if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
232 *cr_lcd |= head ? 0x0 : 0x8;
233 else {
234 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
235 if (nv_encoder->dcb->type == OUTPUT_LVDS)
236 *cr_lcd |= 0x30;
237 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
238 /* avoid being connected to both crtcs */
239 *cr_lcd_oth &= ~0x30;
240 NVWriteVgaCrtc(dev, head ^ 1,
241 NV_CIO_CRE_LCD__INDEX,
242 *cr_lcd_oth);
243 }
244 }
245 }
246 }
247}
248
249
250static void nv04_dfp_mode_set(struct drm_encoder *encoder,
251 struct drm_display_mode *mode,
252 struct drm_display_mode *adjusted_mode)
253{
254 struct drm_device *dev = encoder->dev;
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
257 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
258 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
259 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
260 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
261 struct drm_display_mode *output_mode = &nv_encoder->mode;
262 uint32_t mode_ratio, panel_ratio;
263
264 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
265 drm_mode_debug_printmodeline(output_mode);
266
267 /* Initialize the FP registers in this CRTC. */
268 regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios.digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
279 regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
280
281 regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
282 regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
283 regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
284 regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
285 regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
286 regp->fp_vert_regs[FP_VALID_START] = 0;
287 regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
288
289 /* bit26: a bit seen on some g7x, no as yet discernable purpose */
290 regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
291 (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
292 /* Deal with vsync/hsync polarity */
293 /* LVDS screens do set this, but modes with +ve syncs are very rare */
294 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
295 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
296 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
297 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
298 /* panel scaling first, as native would get set otherwise */
299 if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
300 nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
301 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
302 else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
303 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
304 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
305 else /* gpu needs to scale */
306 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
307 if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
308 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
309 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
310 output_mode->clock > 165000)
311 regp->fp_control |= (2 << 24);
312 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
313 bool duallink, dummy;
314
315 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
316 clock, &duallink, &dummy);
317 if (duallink)
318 regp->fp_control |= (8 << 28);
319 } else
320 if (output_mode->clock > 165000)
321 regp->fp_control |= (8 << 28);
322
323 regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
324 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
325 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
326 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
327 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
328 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
329 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
330
331 /* We want automatic scaling */
332 regp->fp_debug_1 = 0;
333 /* This can override HTOTAL and VTOTAL */
334 regp->fp_debug_2 = 0;
335
336 /* Use 20.12 fixed point format to avoid floats */
337 mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
338 panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
339 /* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
340 * get treated the same as SCALE_FULLSCREEN */
341 if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
342 mode_ratio != panel_ratio) {
343 uint32_t diff, scale;
344 bool divide_by_2 = nv_gf4_disp_arch(dev);
345
346 if (mode_ratio < panel_ratio) {
347 /* vertical needs to expand to glass size (automatic)
348 * horizontal needs to be scaled at vertical scale factor
349 * to maintain aspect */
350
351 scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
352 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
353 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
354
355 /* restrict area of screen used, horizontally */
356 diff = output_mode->hdisplay -
357 output_mode->vdisplay * mode_ratio / (1 << 12);
358 regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
359 regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
360 }
361
362 if (mode_ratio > panel_ratio) {
363 /* horizontal needs to expand to glass size (automatic)
364 * vertical needs to be scaled at horizontal scale factor
365 * to maintain aspect */
366
367 scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
368 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
369 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
370
371 /* restrict area of screen used, vertically */
372 diff = output_mode->vdisplay -
373 (1 << 12) * output_mode->hdisplay / mode_ratio;
374 regp->fp_vert_regs[FP_VALID_START] += diff / 2;
375 regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
376 }
377 }
378
379 /* Output property. */
380 if (nv_connector->use_dithering) {
381 if (dev_priv->chipset == 0x11)
382 regp->dither = savep->dither | 0x00010000;
383 else {
384 int i;
385 regp->dither = savep->dither | 0x00000001;
386 for (i = 0; i < 3; i++) {
387 regp->dither_regs[i] = 0xe4e4e4e4;
388 regp->dither_regs[i + 3] = 0x44444444;
389 }
390 }
391 } else {
392 if (dev_priv->chipset != 0x11) {
393 /* reset them */
394 int i;
395 for (i = 0; i < 3; i++) {
396 regp->dither_regs[i] = savep->dither_regs[i];
397 regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
398 }
399 }
400 regp->dither = savep->dither;
401 }
402
403 regp->fp_margin_color = 0;
404}
405
406static void nv04_dfp_commit(struct drm_encoder *encoder)
407{
408 struct drm_device *dev = encoder->dev;
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
411 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct dcb_entry *dcbe = nv_encoder->dcb;
414 int head = nouveau_crtc(encoder->crtc)->index;
415
416 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
419
420 if (dcbe->type == OUTPUT_TMDS)
421 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
422 else if (dcbe->type == OUTPUT_LVDS)
423 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
424
425 /* update fp_control state for any changes made by scripts,
426 * so correct value is written at DPMS on */
427 dev_priv->mode_reg.crtc_reg[head].fp_control =
428 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
429
430 /* This could use refinement for flatpanels, but it should work this way */
431 if (dev_priv->chipset < 0x44)
432 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
433 else
434 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
435
436 helper->dpms(encoder, DRM_MODE_DPMS_ON);
437
438 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
439 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
440 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
441}
442
443static inline bool is_powersaving_dpms(int mode)
444{
445 return (mode != DRM_MODE_DPMS_ON);
446}
447
448static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
449{
450 struct drm_device *dev = encoder->dev;
451 struct drm_crtc *crtc = encoder->crtc;
452 struct drm_nouveau_private *dev_priv = dev->dev_private;
453 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
454 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
455
456 if (nv_encoder->last_dpms == mode)
457 return;
458 nv_encoder->last_dpms = mode;
459
460 NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
461 mode, nv_encoder->dcb->index);
462
463 if (was_powersaving && is_powersaving_dpms(mode))
464 return;
465
466 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
467 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
468
469 /* when removing an output, crtc may not be set, but PANEL_OFF
470 * must still be run
471 */
472 int head = crtc ? nouveau_crtc(crtc)->index :
473 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
474
475 if (mode == DRM_MODE_DPMS_ON) {
476 if (!nv_connector->native_mode) {
477 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
478 return;
479 }
480 call_lvds_script(dev, nv_encoder->dcb, head,
481 LVDS_PANEL_ON, nv_connector->native_mode->clock);
482 } else
483 /* pxclk of 0 is fine for PANEL_OFF, and for a
484 * disconnected LVDS encoder there is no native_mode
485 */
486 call_lvds_script(dev, nv_encoder->dcb, head,
487 LVDS_PANEL_OFF, 0);
488 }
489
490 nv04_dfp_update_fp_control(encoder, mode);
491
492 if (mode == DRM_MODE_DPMS_ON)
493 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
494 else {
495 dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
496 dev_priv->mode_reg.sel_clk &= ~0xf0;
497 }
498 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
499}
500
501static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
502{
503 struct drm_device *dev = encoder->dev;
504 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
505
506 if (nv_encoder->last_dpms == mode)
507 return;
508 nv_encoder->last_dpms = mode;
509
510 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
511 mode, nv_encoder->dcb->index);
512
513 nv04_dfp_update_fp_control(encoder, mode);
514}
515
516static void nv04_dfp_save(struct drm_encoder *encoder)
517{
518 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
519 struct drm_device *dev = encoder->dev;
520
521 if (nv_two_heads(dev))
522 nv_encoder->restore.head =
523 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
524}
525
526static void nv04_dfp_restore(struct drm_encoder *encoder)
527{
528 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
529 struct drm_device *dev = encoder->dev;
530 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 int head = nv_encoder->restore.head;
532
533 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
534 struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
535 if (native_mode)
536 call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
537 native_mode->clock);
538 else
539 NV_ERROR(dev, "Not restoring LVDS without native mode\n");
540
541 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
542 int clock = nouveau_hw_pllvals_to_clk
543 (&dev_priv->saved_reg.crtc_reg[head].pllvals);
544
545 run_tmds_table(dev, nv_encoder->dcb, head, clock);
546 }
547
548 nv_encoder->last_dpms = NV_DPMS_CLEARED;
549}
550
551static void nv04_dfp_destroy(struct drm_encoder *encoder)
552{
553 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
554
555 NV_DEBUG_KMS(encoder->dev, "\n");
556
557 drm_encoder_cleanup(encoder);
558 kfree(nv_encoder);
559}
560
561static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
562 .dpms = nv04_lvds_dpms,
563 .save = nv04_dfp_save,
564 .restore = nv04_dfp_restore,
565 .mode_fixup = nv04_dfp_mode_fixup,
566 .prepare = nv04_dfp_prepare,
567 .commit = nv04_dfp_commit,
568 .mode_set = nv04_dfp_mode_set,
569 .detect = NULL,
570};
571
572static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
573 .dpms = nv04_tmds_dpms,
574 .save = nv04_dfp_save,
575 .restore = nv04_dfp_restore,
576 .mode_fixup = nv04_dfp_mode_fixup,
577 .prepare = nv04_dfp_prepare,
578 .commit = nv04_dfp_commit,
579 .mode_set = nv04_dfp_mode_set,
580 .detect = NULL,
581};
582
583static const struct drm_encoder_funcs nv04_dfp_funcs = {
584 .destroy = nv04_dfp_destroy,
585};
586
587int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
588{
589 const struct drm_encoder_helper_funcs *helper;
590 struct drm_encoder *encoder;
591 struct nouveau_encoder *nv_encoder = NULL;
592 int type;
593
594 switch (entry->type) {
595 case OUTPUT_TMDS:
596 type = DRM_MODE_ENCODER_TMDS;
597 helper = &nv04_tmds_helper_funcs;
598 break;
599 case OUTPUT_LVDS:
600 type = DRM_MODE_ENCODER_LVDS;
601 helper = &nv04_lvds_helper_funcs;
602 break;
603 default:
604 return -EINVAL;
605 }
606
607 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
608 if (!nv_encoder)
609 return -ENOMEM;
610
611 encoder = to_drm_encoder(nv_encoder);
612
613 nv_encoder->dcb = entry;
614 nv_encoder->or = ffs(entry->or) - 1;
615
616 drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
617 drm_encoder_helper_add(encoder, helper);
618
619 encoder->possible_crtcs = entry->heads;
620 encoder->possible_clones = 0;
621
622 return 0;
623}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 000000000000..c7898b4f6dfb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_hw.h"
32#include "nouveau_encoder.h"
33#include "nouveau_connector.h"
34
35#define MULTIPLE_ENCODERS(e) (e & (e - 1))
36
37static void
38nv04_display_store_initial_head_owner(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 if (dev_priv->chipset != 0x11) {
43 dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
44 goto ownerknown;
45 }
46
47 /* reading CR44 is broken on nv11, so we attempt to infer it */
48 if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
49 dev_priv->crtc_owner = 0x4;
50 else {
51 uint8_t slaved_on_A, slaved_on_B;
52 bool tvA = false;
53 bool tvB = false;
54
55 NVLockVgaCrtcs(dev, false);
56
57 slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
58 0x80;
59 if (slaved_on_B)
60 tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
61 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
62
63 slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
64 0x80;
65 if (slaved_on_A)
66 tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
67 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
68
69 NVLockVgaCrtcs(dev, true);
70
71 if (slaved_on_A && !tvA)
72 dev_priv->crtc_owner = 0x0;
73 else if (slaved_on_B && !tvB)
74 dev_priv->crtc_owner = 0x3;
75 else if (slaved_on_A)
76 dev_priv->crtc_owner = 0x0;
77 else if (slaved_on_B)
78 dev_priv->crtc_owner = 0x3;
79 else
80 dev_priv->crtc_owner = 0x0;
81 }
82
83ownerknown:
84 NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
85
86 /* we need to ensure the heads are not tied henceforth, or reading any
87 * 8 bit reg on head B will fail
88 * setting a single arbitrary head solves that */
89 NVSetOwner(dev, 0);
90}
91
92int
93nv04_display_create(struct drm_device *dev)
94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct dcb_table *dcb = &dev_priv->vbios.dcb;
97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc;
99 int i, ret;
100
101 NV_DEBUG_KMS(dev, "\n");
102
103 if (nv_two_heads(dev))
104 nv04_display_store_initial_head_owner(dev);
105 nouveau_hw_save_vga_fonts(dev, 1);
106
107 drm_mode_config_init(dev);
108 drm_mode_create_scaling_mode_property(dev);
109 drm_mode_create_dithering_property(dev);
110
111 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
112
113 dev->mode_config.min_width = 0;
114 dev->mode_config.min_height = 0;
115 switch (dev_priv->card_type) {
116 case NV_04:
117 dev->mode_config.max_width = 2048;
118 dev->mode_config.max_height = 2048;
119 break;
120 default:
121 dev->mode_config.max_width = 4096;
122 dev->mode_config.max_height = 4096;
123 break;
124 }
125
126 dev->mode_config.fb_base = dev_priv->fb_phys;
127
128 nv04_crtc_create(dev, 0);
129 if (nv_two_heads(dev))
130 nv04_crtc_create(dev, 1);
131
132 for (i = 0; i < dcb->entries; i++) {
133 struct dcb_entry *dcbent = &dcb->entry[i];
134
135 switch (dcbent->type) {
136 case OUTPUT_ANALOG:
137 ret = nv04_dac_create(dev, dcbent);
138 break;
139 case OUTPUT_LVDS:
140 case OUTPUT_TMDS:
141 ret = nv04_dfp_create(dev, dcbent);
142 break;
143 case OUTPUT_TV:
144 if (dcbent->location == DCB_LOC_ON_CHIP)
145 ret = nv17_tv_create(dev, dcbent);
146 else
147 ret = nv04_tv_create(dev, dcbent);
148 break;
149 default:
150 NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
151 continue;
152 }
153
154 if (ret)
155 continue;
156 }
157
158 for (i = 0; i < dcb->connector.entries; i++)
159 nouveau_connector_create(dev, &dcb->connector.entry[i]);
160
161 /* Save previous state */
162 NVLockVgaCrtcs(dev, false);
163
164 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
165 crtc->funcs->save(crtc);
166
167 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
168 struct drm_encoder_helper_funcs *func = encoder->helper_private;
169
170 func->save(encoder);
171 }
172
173 return 0;
174}
175
176void
177nv04_display_destroy(struct drm_device *dev)
178{
179 struct drm_encoder *encoder;
180 struct drm_crtc *crtc;
181
182 NV_DEBUG_KMS(dev, "\n");
183
184 /* Turn every CRTC off. */
185 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
186 struct drm_mode_set modeset = {
187 .crtc = crtc,
188 };
189
190 crtc->funcs->set_config(&modeset);
191 }
192
193 /* Restore state */
194 NVLockVgaCrtcs(dev, false);
195
196 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
197 struct drm_encoder_helper_funcs *func = encoder->helper_private;
198
199 func->restore(encoder);
200 }
201
202 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
203 crtc->funcs->restore(crtc);
204
205 drm_mode_config_cleanup(dev);
206
207 nouveau_hw_save_vga_fonts(dev, 0);
208}
209
210void
211nv04_display_restore(struct drm_device *dev)
212{
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct drm_encoder *encoder;
215 struct drm_crtc *crtc;
216
217 NVLockVgaCrtcs(dev, false);
218
219 /* meh.. modeset apparently doesn't setup all the regs and depends
220 * on pre-existing state, for now load the state of the card *before*
221 * nouveau was loaded, and then do a modeset.
222 *
223 * best thing to do probably is to make save/restore routines not
224 * save/restore "pre-load" state, but more general so we can save
225 * on suspend too.
226 */
227 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
228 struct drm_encoder_helper_funcs *func = encoder->helper_private;
229
230 func->restore(encoder);
231 }
232
233 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
234 crtc->funcs->restore(crtc);
235
236 if (nv_two_heads(dev)) {
237 NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
238 dev_priv->crtc_owner);
239 NVSetOwner(dev, dev_priv->crtc_owner);
240 }
241
242 NVLockVgaCrtcs(dev, true);
243}
244
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 000000000000..638cf601c427
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_fb_init(struct drm_device *dev)
8{
9 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
10 * nvidia reading PFB_CFG_0, then writing back its original value.
11 * (which was 0x701114 in this case)
12 */
13
14 nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
15 return 0;
16}
17
18void
19nv04_fb_takedown(struct drm_device *dev)
20{
21}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 000000000000..813b25cec726
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,312 @@
1/*
2 * Copyright 2009 Ben Skeggs
3 * Copyright 2008 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h"
29
30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{
33 struct nouveau_fbcon_par *par = info->par;
34 struct drm_device *dev = par->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_channel *chan = dev_priv->channel;
37
38 if (info->state != FBINFO_STATE_RUNNING)
39 return;
40
41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
42 nouveau_fbcon_gpu_lockup(info);
43 }
44
45 if (info->flags & FBINFO_HWACCEL_DISABLED) {
46 cfb_copyarea(info, region);
47 return;
48 }
49
50 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
51 OUT_RING(chan, (region->sy << 16) | region->sx);
52 OUT_RING(chan, (region->dy << 16) | region->dx);
53 OUT_RING(chan, (region->height << 16) | region->width);
54 FIRE_RING(chan);
55}
56
57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{
60 struct nouveau_fbcon_par *par = info->par;
61 struct drm_device *dev = par->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_channel *chan = dev_priv->channel;
64
65 if (info->state != FBINFO_STATE_RUNNING)
66 return;
67
68 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
69 nouveau_fbcon_gpu_lockup(info);
70 }
71
72 if (info->flags & FBINFO_HWACCEL_DISABLED) {
73 cfb_fillrect(info, rect);
74 return;
75 }
76
77 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
78 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
79 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
80 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
81 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
82 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
83 else
84 OUT_RING(chan, rect->color);
85 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
86 OUT_RING(chan, (rect->dx << 16) | rect->dy);
87 OUT_RING(chan, (rect->width << 16) | rect->height);
88 FIRE_RING(chan);
89}
90
91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{
94 struct nouveau_fbcon_par *par = info->par;
95 struct drm_device *dev = par->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_channel *chan = dev_priv->channel;
98 uint32_t fg;
99 uint32_t bg;
100 uint32_t dsize;
101 uint32_t width;
102 uint32_t *data = (uint32_t *)image->data;
103
104 if (info->state != FBINFO_STATE_RUNNING)
105 return;
106
107 if (image->depth != 1) {
108 cfb_imageblit(info, image);
109 return;
110 }
111
112 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
113 nouveau_fbcon_gpu_lockup(info);
114 }
115
116 if (info->flags & FBINFO_HWACCEL_DISABLED) {
117 cfb_imageblit(info, image);
118 return;
119 }
120
121 width = ALIGN(image->width, 8);
122 dsize = ALIGN(width * image->height, 32) >> 5;
123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
126 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
127 bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
128 } else {
129 fg = image->fg_color;
130 bg = image->bg_color;
131 }
132
133 BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
134 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
135 OUT_RING(chan, ((image->dy + image->height) << 16) |
136 ((image->dx + image->width) & 0xffff));
137 OUT_RING(chan, bg);
138 OUT_RING(chan, fg);
139 OUT_RING(chan, (image->height << 16) | width);
140 OUT_RING(chan, (image->height << 16) | image->width);
141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
142
143 while (dsize) {
144 int iter_len = dsize > 128 ? 128 : dsize;
145
146 if (RING_SPACE(chan, iter_len + 1)) {
147 nouveau_fbcon_gpu_lockup(info);
148 cfb_imageblit(info, image);
149 return;
150 }
151
152 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
153 OUT_RINGp(chan, data, iter_len);
154 data += iter_len;
155 dsize -= iter_len;
156 }
157
158 FIRE_RING(chan);
159}
160
161static int
162nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
163{
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nouveau_gpuobj *obj = NULL;
166 int ret;
167
168 ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
169 if (ret)
170 return ret;
171
172 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
173 if (ret)
174 return ret;
175
176 return 0;
177}
178
179int
180nv04_fbcon_accel_init(struct fb_info *info)
181{
182 struct nouveau_fbcon_par *par = info->par;
183 struct drm_device *dev = par->dev;
184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nouveau_channel *chan = dev_priv->channel;
186 const int sub = NvSubCtxSurf2D;
187 int surface_fmt, pattern_fmt, rect_fmt;
188 int ret;
189
190 switch (info->var.bits_per_pixel) {
191 case 8:
192 surface_fmt = 1;
193 pattern_fmt = 3;
194 rect_fmt = 3;
195 break;
196 case 16:
197 surface_fmt = 4;
198 pattern_fmt = 1;
199 rect_fmt = 1;
200 break;
201 case 32:
202 switch (info->var.transp.length) {
203 case 0: /* depth 24 */
204 case 8: /* depth 32 */
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 surface_fmt = 6;
211 pattern_fmt = 3;
212 rect_fmt = 3;
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
219 0x0062 : 0x0042, NvCtxSurf2D);
220 if (ret)
221 return ret;
222
223 ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
224 if (ret)
225 return ret;
226
227 ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
228 if (ret)
229 return ret;
230
231 ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
232 if (ret)
233 return ret;
234
235 ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
236 if (ret)
237 return ret;
238
239 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
240 0x009f : 0x005f, NvImageBlit);
241 if (ret)
242 return ret;
243
244 if (RING_SPACE(chan, 49)) {
245 nouveau_fbcon_gpu_lockup(info);
246 return 0;
247 }
248
249 BEGIN_RING(chan, sub, 0x0000, 1);
250 OUT_RING(chan, NvCtxSurf2D);
251 BEGIN_RING(chan, sub, 0x0184, 2);
252 OUT_RING(chan, NvDmaFB);
253 OUT_RING(chan, NvDmaFB);
254 BEGIN_RING(chan, sub, 0x0300, 4);
255 OUT_RING(chan, surface_fmt);
256 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
257 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
259
260 BEGIN_RING(chan, sub, 0x0000, 1);
261 OUT_RING(chan, NvRop);
262 BEGIN_RING(chan, sub, 0x0300, 1);
263 OUT_RING(chan, 0x55);
264
265 BEGIN_RING(chan, sub, 0x0000, 1);
266 OUT_RING(chan, NvImagePatt);
267 BEGIN_RING(chan, sub, 0x0300, 8);
268 OUT_RING(chan, pattern_fmt);
269#ifdef __BIG_ENDIAN
270 OUT_RING(chan, 2);
271#else
272 OUT_RING(chan, 1);
273#endif
274 OUT_RING(chan, 0);
275 OUT_RING(chan, 1);
276 OUT_RING(chan, ~0);
277 OUT_RING(chan, ~0);
278 OUT_RING(chan, ~0);
279 OUT_RING(chan, ~0);
280
281 BEGIN_RING(chan, sub, 0x0000, 1);
282 OUT_RING(chan, NvClipRect);
283 BEGIN_RING(chan, sub, 0x0300, 2);
284 OUT_RING(chan, 0);
285 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
286
287 BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
288 OUT_RING(chan, NvImageBlit);
289 BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
290 OUT_RING(chan, NvCtxSurf2D);
291 BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
292 OUT_RING(chan, 3);
293
294 BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
295 OUT_RING(chan, NvGdiRect);
296 BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
297 OUT_RING(chan, NvCtxSurf2D);
298 BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
299 OUT_RING(chan, NvImagePatt);
300 OUT_RING(chan, NvRop);
301 BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
302 OUT_RING(chan, 1);
303 BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
304 OUT_RING(chan, rect_fmt);
305 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
306 OUT_RING(chan, 3);
307
308 FIRE_RING(chan);
309
310 return 0;
311}
312
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 000000000000..66fe55983b6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,310 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
32#define NV04_RAMFC__SIZE 32
33#define NV04_RAMFC_DMA_PUT 0x00
34#define NV04_RAMFC_DMA_GET 0x04
35#define NV04_RAMFC_DMA_INSTANCE 0x08
36#define NV04_RAMFC_DMA_STATE 0x0C
37#define NV04_RAMFC_DMA_FETCH 0x10
38#define NV04_RAMFC_ENGINE 0x14
39#define NV04_RAMFC_PULL1_ENGINE 0x18
40
41#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
42 NV04_RAMFC_##offset/4, (val))
43#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
44 NV04_RAMFC_##offset/4)
45
46void
47nv04_fifo_disable(struct drm_device *dev)
48{
49 uint32_t tmp;
50
51 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
52 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
53 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
54 tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
55 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
56}
57
58void
59nv04_fifo_enable(struct drm_device *dev)
60{
61 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
62 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
63}
64
65bool
66nv04_fifo_reassign(struct drm_device *dev, bool enable)
67{
68 uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
69
70 nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
71 return (reassign == 1);
72}
73
74bool
75nv04_fifo_cache_flush(struct drm_device *dev)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
79 uint64_t start = ptimer->read(dev);
80
81 do {
82 if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
83 nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
84 return true;
85
86 } while (ptimer->read(dev) - start < 100000000);
87
88 NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
89
90 return false;
91}
92
93bool
94nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
95{
96 uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
97
98 if (enable) {
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
100 } else {
101 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
103 }
104
105 return !!(pull & 1);
106}
107
108int
109nv04_fifo_channel_id(struct drm_device *dev)
110{
111 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
112 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
113}
114
115int
116nv04_fifo_create_context(struct nouveau_channel *chan)
117{
118 struct drm_device *dev = chan->dev;
119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 unsigned long flags;
121 int ret;
122
123 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
124 NV04_RAMFC__SIZE,
125 NVOBJ_FLAG_ZERO_ALLOC |
126 NVOBJ_FLAG_ZERO_FREE,
127 NULL, &chan->ramfc);
128 if (ret)
129 return ret;
130
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132
133 /* Setup initial state */
134 dev_priv->engine.instmem.prepare_access(dev, true);
135 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
136 RAMFC_WR(DMA_GET, chan->pushbuf_base);
137 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
138 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
139 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
140 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
141#ifdef __BIG_ENDIAN
142 NV_PFIFO_CACHE1_BIG_ENDIAN |
143#endif
144 0));
145 dev_priv->engine.instmem.finish_access(dev);
146
147 /* enable the fifo dma operation */
148 nv_wr32(dev, NV04_PFIFO_MODE,
149 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
150
151 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
152 return 0;
153}
154
155void
156nv04_fifo_destroy_context(struct nouveau_channel *chan)
157{
158 struct drm_device *dev = chan->dev;
159
160 nv_wr32(dev, NV04_PFIFO_MODE,
161 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
162
163 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
164}
165
166static void
167nv04_fifo_do_load_context(struct drm_device *dev, int chid)
168{
169 struct drm_nouveau_private *dev_priv = dev->dev_private;
170 uint32_t fc = NV04_RAMFC(chid), tmp;
171
172 dev_priv->engine.instmem.prepare_access(dev, false);
173
174 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
175 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
176 tmp = nv_ri32(dev, fc + 8);
177 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
178 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
179 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
180 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
181 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
182 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
183
184 dev_priv->engine.instmem.finish_access(dev);
185
186 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
187 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
188}
189
190int
191nv04_fifo_load_context(struct nouveau_channel *chan)
192{
193 uint32_t tmp;
194
195 nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
196 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
197 nv04_fifo_do_load_context(chan->dev, chan->id);
198 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
199
200 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
201 tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
202 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
203
204 return 0;
205}
206
207int
208nv04_fifo_unload_context(struct drm_device *dev)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
212 struct nouveau_channel *chan = NULL;
213 uint32_t tmp;
214 int chid;
215
216 chid = pfifo->channel_id(dev);
217 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
218 return 0;
219
220 chan = dev_priv->fifos[chid];
221 if (!chan) {
222 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
223 return -EINVAL;
224 }
225
226 dev_priv->engine.instmem.prepare_access(dev, true);
227 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
228 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
229 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
230 tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
231 RAMFC_WR(DMA_INSTANCE, tmp);
232 RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
233 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
234 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
235 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
236 dev_priv->engine.instmem.finish_access(dev);
237
238 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
239 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
240 return 0;
241}
242
243static void
244nv04_fifo_init_reset(struct drm_device *dev)
245{
246 nv_wr32(dev, NV03_PMC_ENABLE,
247 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
248 nv_wr32(dev, NV03_PMC_ENABLE,
249 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
250
251 nv_wr32(dev, 0x003224, 0x000f0078);
252 nv_wr32(dev, 0x002044, 0x0101ffff);
253 nv_wr32(dev, 0x002040, 0x000000ff);
254 nv_wr32(dev, 0x002500, 0x00000000);
255 nv_wr32(dev, 0x003000, 0x00000000);
256 nv_wr32(dev, 0x003050, 0x00000000);
257 nv_wr32(dev, 0x003200, 0x00000000);
258 nv_wr32(dev, 0x003250, 0x00000000);
259 nv_wr32(dev, 0x003220, 0x00000000);
260
261 nv_wr32(dev, 0x003250, 0x00000000);
262 nv_wr32(dev, 0x003270, 0x00000000);
263 nv_wr32(dev, 0x003210, 0x00000000);
264}
265
266static void
267nv04_fifo_init_ramxx(struct drm_device *dev)
268{
269 struct drm_nouveau_private *dev_priv = dev->dev_private;
270
271 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
272 ((dev_priv->ramht_bits - 9) << 16) |
273 (dev_priv->ramht_offset >> 8));
274 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
275 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
276}
277
278static void
279nv04_fifo_init_intr(struct drm_device *dev)
280{
281 nv_wr32(dev, 0x002100, 0xffffffff);
282 nv_wr32(dev, 0x002140, 0xffffffff);
283}
284
285int
286nv04_fifo_init(struct drm_device *dev)
287{
288 struct drm_nouveau_private *dev_priv = dev->dev_private;
289 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
290 int i;
291
292 nv04_fifo_init_reset(dev);
293 nv04_fifo_init_ramxx(dev);
294
295 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
296 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
297
298 nv04_fifo_init_intr(dev);
299 pfifo->enable(dev);
300
301 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
302 if (dev_priv->fifos[i]) {
303 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
304 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
305 }
306 }
307
308 return 0;
309}
310
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 000000000000..e260986ea65a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,584 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30static uint32_t nv04_graph_ctx_regs[] = {
31 0x0040053c,
32 0x00400544,
33 0x00400540,
34 0x00400548,
35 NV04_PGRAPH_CTX_SWITCH1,
36 NV04_PGRAPH_CTX_SWITCH2,
37 NV04_PGRAPH_CTX_SWITCH3,
38 NV04_PGRAPH_CTX_SWITCH4,
39 NV04_PGRAPH_CTX_CACHE1,
40 NV04_PGRAPH_CTX_CACHE2,
41 NV04_PGRAPH_CTX_CACHE3,
42 NV04_PGRAPH_CTX_CACHE4,
43 0x00400184,
44 0x004001a4,
45 0x004001c4,
46 0x004001e4,
47 0x00400188,
48 0x004001a8,
49 0x004001c8,
50 0x004001e8,
51 0x0040018c,
52 0x004001ac,
53 0x004001cc,
54 0x004001ec,
55 0x00400190,
56 0x004001b0,
57 0x004001d0,
58 0x004001f0,
59 0x00400194,
60 0x004001b4,
61 0x004001d4,
62 0x004001f4,
63 0x00400198,
64 0x004001b8,
65 0x004001d8,
66 0x004001f8,
67 0x0040019c,
68 0x004001bc,
69 0x004001dc,
70 0x004001fc,
71 0x00400174,
72 NV04_PGRAPH_DMA_START_0,
73 NV04_PGRAPH_DMA_START_1,
74 NV04_PGRAPH_DMA_LENGTH,
75 NV04_PGRAPH_DMA_MISC,
76 NV04_PGRAPH_DMA_PITCH,
77 NV04_PGRAPH_BOFFSET0,
78 NV04_PGRAPH_BBASE0,
79 NV04_PGRAPH_BLIMIT0,
80 NV04_PGRAPH_BOFFSET1,
81 NV04_PGRAPH_BBASE1,
82 NV04_PGRAPH_BLIMIT1,
83 NV04_PGRAPH_BOFFSET2,
84 NV04_PGRAPH_BBASE2,
85 NV04_PGRAPH_BLIMIT2,
86 NV04_PGRAPH_BOFFSET3,
87 NV04_PGRAPH_BBASE3,
88 NV04_PGRAPH_BLIMIT3,
89 NV04_PGRAPH_BOFFSET4,
90 NV04_PGRAPH_BBASE4,
91 NV04_PGRAPH_BLIMIT4,
92 NV04_PGRAPH_BOFFSET5,
93 NV04_PGRAPH_BBASE5,
94 NV04_PGRAPH_BLIMIT5,
95 NV04_PGRAPH_BPITCH0,
96 NV04_PGRAPH_BPITCH1,
97 NV04_PGRAPH_BPITCH2,
98 NV04_PGRAPH_BPITCH3,
99 NV04_PGRAPH_BPITCH4,
100 NV04_PGRAPH_SURFACE,
101 NV04_PGRAPH_STATE,
102 NV04_PGRAPH_BSWIZZLE2,
103 NV04_PGRAPH_BSWIZZLE5,
104 NV04_PGRAPH_BPIXEL,
105 NV04_PGRAPH_NOTIFY,
106 NV04_PGRAPH_PATT_COLOR0,
107 NV04_PGRAPH_PATT_COLOR1,
108 NV04_PGRAPH_PATT_COLORRAM+0x00,
109 NV04_PGRAPH_PATT_COLORRAM+0x04,
110 NV04_PGRAPH_PATT_COLORRAM+0x08,
111 NV04_PGRAPH_PATT_COLORRAM+0x0c,
112 NV04_PGRAPH_PATT_COLORRAM+0x10,
113 NV04_PGRAPH_PATT_COLORRAM+0x14,
114 NV04_PGRAPH_PATT_COLORRAM+0x18,
115 NV04_PGRAPH_PATT_COLORRAM+0x1c,
116 NV04_PGRAPH_PATT_COLORRAM+0x20,
117 NV04_PGRAPH_PATT_COLORRAM+0x24,
118 NV04_PGRAPH_PATT_COLORRAM+0x28,
119 NV04_PGRAPH_PATT_COLORRAM+0x2c,
120 NV04_PGRAPH_PATT_COLORRAM+0x30,
121 NV04_PGRAPH_PATT_COLORRAM+0x34,
122 NV04_PGRAPH_PATT_COLORRAM+0x38,
123 NV04_PGRAPH_PATT_COLORRAM+0x3c,
124 NV04_PGRAPH_PATT_COLORRAM+0x40,
125 NV04_PGRAPH_PATT_COLORRAM+0x44,
126 NV04_PGRAPH_PATT_COLORRAM+0x48,
127 NV04_PGRAPH_PATT_COLORRAM+0x4c,
128 NV04_PGRAPH_PATT_COLORRAM+0x50,
129 NV04_PGRAPH_PATT_COLORRAM+0x54,
130 NV04_PGRAPH_PATT_COLORRAM+0x58,
131 NV04_PGRAPH_PATT_COLORRAM+0x5c,
132 NV04_PGRAPH_PATT_COLORRAM+0x60,
133 NV04_PGRAPH_PATT_COLORRAM+0x64,
134 NV04_PGRAPH_PATT_COLORRAM+0x68,
135 NV04_PGRAPH_PATT_COLORRAM+0x6c,
136 NV04_PGRAPH_PATT_COLORRAM+0x70,
137 NV04_PGRAPH_PATT_COLORRAM+0x74,
138 NV04_PGRAPH_PATT_COLORRAM+0x78,
139 NV04_PGRAPH_PATT_COLORRAM+0x7c,
140 NV04_PGRAPH_PATT_COLORRAM+0x80,
141 NV04_PGRAPH_PATT_COLORRAM+0x84,
142 NV04_PGRAPH_PATT_COLORRAM+0x88,
143 NV04_PGRAPH_PATT_COLORRAM+0x8c,
144 NV04_PGRAPH_PATT_COLORRAM+0x90,
145 NV04_PGRAPH_PATT_COLORRAM+0x94,
146 NV04_PGRAPH_PATT_COLORRAM+0x98,
147 NV04_PGRAPH_PATT_COLORRAM+0x9c,
148 NV04_PGRAPH_PATT_COLORRAM+0xa0,
149 NV04_PGRAPH_PATT_COLORRAM+0xa4,
150 NV04_PGRAPH_PATT_COLORRAM+0xa8,
151 NV04_PGRAPH_PATT_COLORRAM+0xac,
152 NV04_PGRAPH_PATT_COLORRAM+0xb0,
153 NV04_PGRAPH_PATT_COLORRAM+0xb4,
154 NV04_PGRAPH_PATT_COLORRAM+0xb8,
155 NV04_PGRAPH_PATT_COLORRAM+0xbc,
156 NV04_PGRAPH_PATT_COLORRAM+0xc0,
157 NV04_PGRAPH_PATT_COLORRAM+0xc4,
158 NV04_PGRAPH_PATT_COLORRAM+0xc8,
159 NV04_PGRAPH_PATT_COLORRAM+0xcc,
160 NV04_PGRAPH_PATT_COLORRAM+0xd0,
161 NV04_PGRAPH_PATT_COLORRAM+0xd4,
162 NV04_PGRAPH_PATT_COLORRAM+0xd8,
163 NV04_PGRAPH_PATT_COLORRAM+0xdc,
164 NV04_PGRAPH_PATT_COLORRAM+0xe0,
165 NV04_PGRAPH_PATT_COLORRAM+0xe4,
166 NV04_PGRAPH_PATT_COLORRAM+0xe8,
167 NV04_PGRAPH_PATT_COLORRAM+0xec,
168 NV04_PGRAPH_PATT_COLORRAM+0xf0,
169 NV04_PGRAPH_PATT_COLORRAM+0xf4,
170 NV04_PGRAPH_PATT_COLORRAM+0xf8,
171 NV04_PGRAPH_PATT_COLORRAM+0xfc,
172 NV04_PGRAPH_PATTERN,
173 0x0040080c,
174 NV04_PGRAPH_PATTERN_SHAPE,
175 0x00400600,
176 NV04_PGRAPH_ROP3,
177 NV04_PGRAPH_CHROMA,
178 NV04_PGRAPH_BETA_AND,
179 NV04_PGRAPH_BETA_PREMULT,
180 NV04_PGRAPH_CONTROL0,
181 NV04_PGRAPH_CONTROL1,
182 NV04_PGRAPH_CONTROL2,
183 NV04_PGRAPH_BLEND,
184 NV04_PGRAPH_STORED_FMT,
185 NV04_PGRAPH_SOURCE_COLOR,
186 0x00400560,
187 0x00400568,
188 0x00400564,
189 0x0040056c,
190 0x00400400,
191 0x00400480,
192 0x00400404,
193 0x00400484,
194 0x00400408,
195 0x00400488,
196 0x0040040c,
197 0x0040048c,
198 0x00400410,
199 0x00400490,
200 0x00400414,
201 0x00400494,
202 0x00400418,
203 0x00400498,
204 0x0040041c,
205 0x0040049c,
206 0x00400420,
207 0x004004a0,
208 0x00400424,
209 0x004004a4,
210 0x00400428,
211 0x004004a8,
212 0x0040042c,
213 0x004004ac,
214 0x00400430,
215 0x004004b0,
216 0x00400434,
217 0x004004b4,
218 0x00400438,
219 0x004004b8,
220 0x0040043c,
221 0x004004bc,
222 0x00400440,
223 0x004004c0,
224 0x00400444,
225 0x004004c4,
226 0x00400448,
227 0x004004c8,
228 0x0040044c,
229 0x004004cc,
230 0x00400450,
231 0x004004d0,
232 0x00400454,
233 0x004004d4,
234 0x00400458,
235 0x004004d8,
236 0x0040045c,
237 0x004004dc,
238 0x00400460,
239 0x004004e0,
240 0x00400464,
241 0x004004e4,
242 0x00400468,
243 0x004004e8,
244 0x0040046c,
245 0x004004ec,
246 0x00400470,
247 0x004004f0,
248 0x00400474,
249 0x004004f4,
250 0x00400478,
251 0x004004f8,
252 0x0040047c,
253 0x004004fc,
254 0x00400534,
255 0x00400538,
256 0x00400514,
257 0x00400518,
258 0x0040051c,
259 0x00400520,
260 0x00400524,
261 0x00400528,
262 0x0040052c,
263 0x00400530,
264 0x00400d00,
265 0x00400d40,
266 0x00400d80,
267 0x00400d04,
268 0x00400d44,
269 0x00400d84,
270 0x00400d08,
271 0x00400d48,
272 0x00400d88,
273 0x00400d0c,
274 0x00400d4c,
275 0x00400d8c,
276 0x00400d10,
277 0x00400d50,
278 0x00400d90,
279 0x00400d14,
280 0x00400d54,
281 0x00400d94,
282 0x00400d18,
283 0x00400d58,
284 0x00400d98,
285 0x00400d1c,
286 0x00400d5c,
287 0x00400d9c,
288 0x00400d20,
289 0x00400d60,
290 0x00400da0,
291 0x00400d24,
292 0x00400d64,
293 0x00400da4,
294 0x00400d28,
295 0x00400d68,
296 0x00400da8,
297 0x00400d2c,
298 0x00400d6c,
299 0x00400dac,
300 0x00400d30,
301 0x00400d70,
302 0x00400db0,
303 0x00400d34,
304 0x00400d74,
305 0x00400db4,
306 0x00400d38,
307 0x00400d78,
308 0x00400db8,
309 0x00400d3c,
310 0x00400d7c,
311 0x00400dbc,
312 0x00400590,
313 0x00400594,
314 0x00400598,
315 0x0040059c,
316 0x004005a8,
317 0x004005ac,
318 0x004005b0,
319 0x004005b4,
320 0x004005c0,
321 0x004005c4,
322 0x004005c8,
323 0x004005cc,
324 0x004005d0,
325 0x004005d4,
326 0x004005d8,
327 0x004005dc,
328 0x004005e0,
329 NV04_PGRAPH_PASSTHRU_0,
330 NV04_PGRAPH_PASSTHRU_1,
331 NV04_PGRAPH_PASSTHRU_2,
332 NV04_PGRAPH_DVD_COLORFMT,
333 NV04_PGRAPH_SCALED_FORMAT,
334 NV04_PGRAPH_MISC24_0,
335 NV04_PGRAPH_MISC24_1,
336 NV04_PGRAPH_MISC24_2,
337 0x00400500,
338 0x00400504,
339 NV04_PGRAPH_VALID1,
340 NV04_PGRAPH_VALID2,
341 NV04_PGRAPH_DEBUG_3
342};
343
344struct graph_state {
345 int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
346};
347
348struct nouveau_channel *
349nv04_graph_channel(struct drm_device *dev)
350{
351 struct drm_nouveau_private *dev_priv = dev->dev_private;
352 int chid = dev_priv->engine.fifo.channels;
353
354 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
355 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
356
357 if (chid >= dev_priv->engine.fifo.channels)
358 return NULL;
359
360 return dev_priv->fifos[chid];
361}
362
363void
364nv04_graph_context_switch(struct drm_device *dev)
365{
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
367 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
368 struct nouveau_channel *chan = NULL;
369 int chid;
370
371 pgraph->fifo_access(dev, false);
372 nouveau_wait_for_idle(dev);
373
374 /* If previous context is valid, we need to save it */
375 pgraph->unload_context(dev);
376
377 /* Load context for next channel */
378 chid = dev_priv->engine.fifo.channel_id(dev);
379 chan = dev_priv->fifos[chid];
380 if (chan)
381 nv04_graph_load_context(chan);
382
383 pgraph->fifo_access(dev, true);
384}
385
386static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
387{
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
391 if (nv04_graph_ctx_regs[i] == reg)
392 return &ctx->nv04[i];
393 }
394
395 return NULL;
396}
397
398int nv04_graph_create_context(struct nouveau_channel *chan)
399{
400 struct graph_state *pgraph_ctx;
401 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
402
403 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
404 GFP_KERNEL);
405 if (pgraph_ctx == NULL)
406 return -ENOMEM;
407
408 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
409
410 return 0;
411}
412
413void nv04_graph_destroy_context(struct nouveau_channel *chan)
414{
415 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
416
417 kfree(pgraph_ctx);
418 chan->pgraph_ctx = NULL;
419}
420
421int nv04_graph_load_context(struct nouveau_channel *chan)
422{
423 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 uint32_t tmp;
426 int i;
427
428 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
430
431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
432
433 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
434 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
435
436 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
437 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
438
439 return 0;
440}
441
442int
443nv04_graph_unload_context(struct drm_device *dev)
444{
445 struct drm_nouveau_private *dev_priv = dev->dev_private;
446 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
447 struct nouveau_channel *chan = NULL;
448 struct graph_state *ctx;
449 uint32_t tmp;
450 int i;
451
452 chan = pgraph->channel(dev);
453 if (!chan)
454 return 0;
455 ctx = chan->pgraph_ctx;
456
457 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
458 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
459
460 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
461 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
462 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
463 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
464 return 0;
465}
466
467int nv04_graph_init(struct drm_device *dev)
468{
469 struct drm_nouveau_private *dev_priv = dev->dev_private;
470 uint32_t tmp;
471
472 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
473 ~NV_PMC_ENABLE_PGRAPH);
474 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
475 NV_PMC_ENABLE_PGRAPH);
476
477 /* Enable PGRAPH interrupts */
478 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
479 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
480
481 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
482 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
483 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
484 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
485 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
486 /*1231C000 blob, 001 haiku*/
487 //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
488 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
489 /*0x72111100 blob , 01 haiku*/
490 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
491 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
492 /*haiku same*/
493
494 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
495 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
496 /*haiku and blob 10d4*/
497
498 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
499 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
500 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
501 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
502 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
503
504 /* These don't belong here, they're part of a per-channel context */
505 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
506 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
507
508 return 0;
509}
510
511void nv04_graph_takedown(struct drm_device *dev)
512{
513}
514
515void
516nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
517{
518 if (enabled)
519 nv_wr32(dev, NV04_PGRAPH_FIFO,
520 nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
521 else
522 nv_wr32(dev, NV04_PGRAPH_FIFO,
523 nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
524}
525
526static int
527nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
528 int mthd, uint32_t data)
529{
530 chan->fence.last_sequence_irq = data;
531 nouveau_fence_handler(chan->dev, chan->id);
532 return 0;
533}
534
535static int
536nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
537 int mthd, uint32_t data)
538{
539 struct drm_device *dev = chan->dev;
540 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
541 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
542 uint32_t tmp;
543
544 tmp = nv_ri32(dev, instance);
545 tmp &= ~0x00038000;
546 tmp |= ((data & 7) << 15);
547
548 nv_wi32(dev, instance, tmp);
549 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
550 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
551 return 0;
552}
553
554static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
555 { 0x0150, nv04_graph_mthd_set_ref },
556 {}
557};
558
559static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
560 { 0x02fc, nv04_graph_mthd_set_operation },
561 {},
562};
563
564struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
565 { 0x0039, false, NULL },
566 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
567 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
568 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
569 { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
570 { 0x0030, false, NULL }, /* null */
571 { 0x0042, false, NULL }, /* surf2d */
572 { 0x0043, false, NULL }, /* rop */
573 { 0x0012, false, NULL }, /* beta1 */
574 { 0x0072, false, NULL }, /* beta4 */
575 { 0x0019, false, NULL }, /* cliprect */
576 { 0x0044, false, NULL }, /* pattern */
577 { 0x0052, false, NULL }, /* swzsurf */
578 { 0x0053, false, NULL }, /* surf3d */
579 { 0x0054, false, NULL }, /* tex_tri */
580 { 0x0055, false, NULL }, /* multitex_tri */
581 { 0x506e, true, nv04_graph_mthds_sw },
582 {}
583};
584
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 000000000000..a3b9563a6f60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4
5/* returns the size of fifo context */
6static int
7nouveau_fifo_ctx_size(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10
11 if (dev_priv->chipset >= 0x40)
12 return 128;
13 else
14 if (dev_priv->chipset >= 0x17)
15 return 64;
16
17 return 32;
18}
19
20static void
21nv04_instmem_determine_amount(struct drm_device *dev)
22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 int i;
25
26 /* Figure out how much instance memory we need */
27 if (dev_priv->card_type >= NV_40) {
28 /* We'll want more instance memory than this on some NV4x cards.
29 * There's a 16MB aperture to play with that maps onto the end
30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires.
32 */
33 switch (dev_priv->chipset) {
34 case 0x40:
35 case 0x47:
36 case 0x49:
37 case 0x4b:
38 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
39 break;
40 default:
41 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
42 break;
43 }
44 } else {
45 /*XXX: what *are* the limits on <NV40 cards?
46 */
47 dev_priv->ramin_rsvd_vram = (512 * 1024);
48 }
49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
50
51 /* Clear all of it, except the BIOS image that's in the first 64KiB */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
54 nv_wi32(dev, i, 0x00000000);
55 dev_priv->engine.instmem.finish_access(dev);
56}
57
58static void
59nv04_instmem_configure_fixed_tables(struct drm_device *dev)
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_engine *engine = &dev_priv->engine;
63
64 /* FIFO hash table (RAMHT)
65 * use 4k hash table at RAMIN+0x10000
66 * TODO: extend the hash table
67 */
68 dev_priv->ramht_offset = 0x10000;
69 dev_priv->ramht_bits = 9;
70 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
71 dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
72 NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
73 dev_priv->ramht_size);
74
75 /* FIFO runout table (RAMRO) - 512k at 0x11200 */
76 dev_priv->ramro_offset = 0x11200;
77 dev_priv->ramro_size = 512;
78 NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
79 dev_priv->ramro_size);
80
81 /* FIFO context table (RAMFC)
82 * NV40 : Not sure exactly how to position RAMFC on some cards,
83 * 0x30002 seems to position it at RAMIN+0x20000 on these
84 * cards. RAMFC is 4kb (32 fifos, 128byte entries).
85 * Others: Position RAMFC at RAMIN+0x11400
86 */
87 dev_priv->ramfc_size = engine->fifo.channels *
88 nouveau_fifo_ctx_size(dev);
89 switch (dev_priv->card_type) {
90 case NV_40:
91 dev_priv->ramfc_offset = 0x20000;
92 break;
93 case NV_30:
94 case NV_20:
95 case NV_10:
96 case NV_04:
97 default:
98 dev_priv->ramfc_offset = 0x11400;
99 break;
100 }
101 NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
102 dev_priv->ramfc_size);
103}
104
105int nv04_instmem_init(struct drm_device *dev)
106{
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 uint32_t offset;
109 int ret = 0;
110
111 nv04_instmem_determine_amount(dev);
112 nv04_instmem_configure_fixed_tables(dev);
113
114 /* Create a heap to manage RAMIN allocations, we don't allocate
115 * the space that was reserved for RAMHT/FC/RO.
116 */
117 offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
118
119 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
120 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
121 * ("new style" control) the upper 16-bits of 0x2220 points at this
122 * other mysterious table that's clobbering important things.
123 *
124 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
125 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
126 */
127 if (dev_priv->card_type >= NV_40) {
128 if (offset < 0x40000)
129 offset = 0x40000;
130 }
131
132 ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
133 offset, dev_priv->ramin_rsvd_vram - offset);
134 if (ret) {
135 dev_priv->ramin_heap = NULL;
136 NV_ERROR(dev, "Failed to init RAMIN heap\n");
137 }
138
139 return ret;
140}
141
142void
143nv04_instmem_takedown(struct drm_device *dev)
144{
145}
146
147int
148nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
149{
150 if (gpuobj->im_backing)
151 return -EINVAL;
152
153 return 0;
154}
155
156void
157nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160
161 if (gpuobj && gpuobj->im_backing) {
162 if (gpuobj->im_bound)
163 dev_priv->engine.instmem.unbind(dev, gpuobj);
164 gpuobj->im_backing = NULL;
165 }
166}
167
168int
169nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
170{
171 if (!gpuobj->im_pramin || gpuobj->im_bound)
172 return -EINVAL;
173
174 gpuobj->im_bound = 1;
175 return 0;
176}
177
178int
179nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
180{
181 if (gpuobj->im_bound == 0)
182 return -EINVAL;
183
184 gpuobj->im_bound = 0;
185 return 0;
186}
187
188void
189nv04_instmem_prepare_access(struct drm_device *dev, bool write)
190{
191}
192
193void
194nv04_instmem_finish_access(struct drm_device *dev)
195{
196}
197
198int
199nv04_instmem_suspend(struct drm_device *dev)
200{
201 return 0;
202}
203
204void
205nv04_instmem_resume(struct drm_device *dev)
206{
207}
208
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 000000000000..617ed1e05269
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_mc_init(struct drm_device *dev)
8{
9 /* Power up everything, resetting each individual unit will
10 * be done later if needed.
11 */
12
13 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
14 return 0;
15}
16
17void
18nv04_mc_takedown(struct drm_device *dev)
19{
20}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 000000000000..1d09ddd57399
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_timer_init(struct drm_device *dev)
8{
9 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
10 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
11
12 /* Just use the pre-existing values when possible for now; these regs
13 * are not written in nv (driver writer missed a /4 on the address), and
14 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
15 * hardware sequencing microcode.
16 * A correct solution (involving calculations with the GPU PLL) can
17 * be done when kernel modesetting lands
18 */
19 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
20 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
21 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
22 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
23 }
24
25 return 0;
26}
27
28uint64_t
29nv04_timer_read(struct drm_device *dev)
30{
31 uint32_t low;
32 /* From kmmio dumps on nv28 this looks like how the blob does this.
33 * It reads the high dword twice, before and after.
34 * The only explanation seems to be that the 64-bit timer counter
35 * advances between high and low dword reads and may corrupt the
36 * result. Not confirmed.
37 */
38 uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
39 uint32_t high1;
40 do {
41 high1 = high2;
42 low = nv_rd32(dev, NV04_PTIMER_TIME_0);
43 high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
44 } while (high1 != high2);
45 return (((uint64_t)high2) << 32) | (uint64_t)low;
46}
47
48void
49nv04_timer_takedown(struct drm_device *dev)
50{
51}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 000000000000..c4e3404337d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "drm_crtc_helper.h"
34
35#include "i2c/ch7006.h"
36
37static struct {
38 struct i2c_board_info board_info;
39 struct drm_encoder_funcs funcs;
40 struct drm_encoder_helper_funcs hfuncs;
41 void *params;
42
43} nv04_tv_encoder_info[] = {
44 {
45 .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
46 .params = &(struct ch7006_encoder_params) {
47 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
48 0, 0, 0,
49 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
50 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
51 },
52 },
53};
54
55static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
56{
57 struct i2c_msg msg = {
58 .addr = addr,
59 .len = 0,
60 };
61
62 return i2c_transfer(adapter, &msg, 1) == 1;
63}
64
65int nv04_tv_identify(struct drm_device *dev, int i2c_index)
66{
67 struct nouveau_i2c_chan *i2c;
68 bool was_locked;
69 int i, ret;
70
71 NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
72
73 i2c = nouveau_i2c_find(dev, i2c_index);
74 if (!i2c)
75 return -ENODEV;
76
77 was_locked = NVLockVgaCrtcs(dev, false);
78
79 for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
80 if (probe_i2c_addr(&i2c->adapter,
81 nv04_tv_encoder_info[i].board_info.addr)) {
82 ret = i;
83 break;
84 }
85 }
86
87 if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
88 NV_TRACE(dev, "Detected TV encoder: %s\n",
89 nv04_tv_encoder_info[i].board_info.type);
90
91 } else {
92 NV_TRACE(dev, "No TV encoders found.\n");
93 i = -ENODEV;
94 }
95
96 NVLockVgaCrtcs(dev, was_locked);
97 return i;
98}
99
100#define PLLSEL_TV_CRTC1_MASK \
101 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
102 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
103#define PLLSEL_TV_CRTC2_MASK \
104 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
105 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
106
107static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
111 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv04_mode_state *state = &dev_priv->mode_reg;
113 uint8_t crtc1A;
114
115 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
116 mode, nv_encoder->dcb->index);
117
118 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
119
120 if (mode == DRM_MODE_DPMS_ON) {
121 int head = nouveau_crtc(encoder->crtc)->index;
122 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
123
124 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
125 PLLSEL_TV_CRTC1_MASK;
126
127 /* Inhibit hsync */
128 crtc1A |= 0x80;
129
130 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
131 }
132
133 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
134
135 to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
136}
137
138static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
142
143 state->tv_setup = 0;
144
145 if (bind) {
146 state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
147 state->CRTC[NV_CIO_CRE_49] |= 0x10;
148 } else {
149 state->CRTC[NV_CIO_CRE_49] &= ~0x10;
150 }
151
152 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
153 state->CRTC[NV_CIO_CRE_LCD__INDEX]);
154 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
155 state->CRTC[NV_CIO_CRE_49]);
156 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
157 state->tv_setup);
158}
159
160static void nv04_tv_prepare(struct drm_encoder *encoder)
161{
162 struct drm_device *dev = encoder->dev;
163 int head = nouveau_crtc(encoder->crtc)->index;
164 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
165
166 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
167
168 nv04_dfp_disable(dev, head);
169
170 if (nv_two_heads(dev))
171 nv04_tv_bind(dev, head ^ 1, false);
172
173 nv04_tv_bind(dev, head, true);
174}
175
176static void nv04_tv_mode_set(struct drm_encoder *encoder,
177 struct drm_display_mode *mode,
178 struct drm_display_mode *adjusted_mode)
179{
180 struct drm_device *dev = encoder->dev;
181 struct drm_nouveau_private *dev_priv = dev->dev_private;
182 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
183 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
184
185 regp->tv_htotal = adjusted_mode->htotal;
186 regp->tv_vtotal = adjusted_mode->vtotal;
187
188 /* These delay the TV signals with respect to the VGA port,
189 * they might be useful if we ever allow a CRTC to drive
190 * multiple outputs.
191 */
192 regp->tv_hskew = 1;
193 regp->tv_hsync_delay = 1;
194 regp->tv_hsync_delay2 = 64;
195 regp->tv_vskew = 1;
196 regp->tv_vsync_delay = 1;
197
198 to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
199}
200
201static void nv04_tv_commit(struct drm_encoder *encoder)
202{
203 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
204 struct drm_device *dev = encoder->dev;
205 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
206 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
207
208 helper->dpms(encoder, DRM_MODE_DPMS_ON);
209
210 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
211 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
212 '@' + ffs(nv_encoder->dcb->or));
213}
214
215static void nv04_tv_destroy(struct drm_encoder *encoder)
216{
217 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
218
219 to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
220
221 drm_encoder_cleanup(encoder);
222
223 kfree(nv_encoder);
224}
225
226int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
227{
228 struct nouveau_encoder *nv_encoder;
229 struct drm_encoder *encoder;
230 struct drm_nouveau_private *dev_priv = dev->dev_private;
231 struct i2c_adapter *adap;
232 struct drm_encoder_funcs *funcs = NULL;
233 struct drm_encoder_helper_funcs *hfuncs = NULL;
234 struct drm_encoder_slave_funcs *sfuncs = NULL;
235 int i2c_index = entry->i2c_index;
236 int type, ret;
237 bool was_locked;
238
239 /* Ensure that we can talk to this encoder */
240 type = nv04_tv_identify(dev, i2c_index);
241 if (type < 0)
242 return type;
243
244 /* Allocate the necessary memory */
245 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
246 if (!nv_encoder)
247 return -ENOMEM;
248
249 /* Initialize the common members */
250 encoder = to_drm_encoder(nv_encoder);
251
252 funcs = &nv04_tv_encoder_info[type].funcs;
253 hfuncs = &nv04_tv_encoder_info[type].hfuncs;
254
255 drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
256 drm_encoder_helper_add(encoder, hfuncs);
257
258 encoder->possible_crtcs = entry->heads;
259 encoder->possible_clones = 0;
260
261 nv_encoder->dcb = entry;
262 nv_encoder->or = ffs(entry->or) - 1;
263
264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
266
267 was_locked = NVLockVgaCrtcs(dev, false);
268
269 ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
270 &nv04_tv_encoder_info[type].board_info);
271
272 NVLockVgaCrtcs(dev, was_locked);
273
274 if (ret < 0)
275 goto fail;
276
277 /* Fill the function pointers */
278 sfuncs = to_encoder_slave(encoder)->slave_funcs;
279
280 *funcs = (struct drm_encoder_funcs) {
281 .destroy = nv04_tv_destroy,
282 };
283
284 *hfuncs = (struct drm_encoder_helper_funcs) {
285 .dpms = nv04_tv_dpms,
286 .save = sfuncs->save,
287 .restore = sfuncs->restore,
288 .mode_fixup = sfuncs->mode_fixup,
289 .prepare = nv04_tv_prepare,
290 .commit = nv04_tv_commit,
291 .mode_set = nv04_tv_mode_set,
292 .detect = sfuncs->detect,
293 };
294
295 /* Set the slave encoder configuration */
296 sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
297
298 return 0;
299
300fail:
301 drm_encoder_cleanup(encoder);
302
303 kfree(nv_encoder);
304 return ret;
305}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 000000000000..cc5cda44e501
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,44 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch) {
14 if (dev_priv->card_type >= NV_20)
15 addr |= 1;
16 else
17 addr |= 1 << 31;
18 }
19
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
22 nv_wr32(dev, NV10_PFB_TILE(i), addr);
23}
24
25int
26nv10_fb_init(struct drm_device *dev)
27{
28 struct drm_nouveau_private *dev_priv = dev->dev_private;
29 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
30 int i;
31
32 pfb->num_tiles = NV10_PFB_TILE__SIZE;
33
34 /* Turn all the tiling regions off. */
35 for (i = 0; i < pfb->num_tiles; i++)
36 pfb->set_region_tiling(dev, i, 0, 0, 0);
37
38 return 0;
39}
40
41void
42nv10_fb_takedown(struct drm_device *dev)
43{
44}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 000000000000..7aeabf262bc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
32#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
33
34int
35nv10_fifo_channel_id(struct drm_device *dev)
36{
37 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
38 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
39}
40
41int
42nv10_fifo_create_context(struct nouveau_channel *chan)
43{
44 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
45 struct drm_device *dev = chan->dev;
46 uint32_t fc = NV10_RAMFC(chan->id);
47 int ret;
48
49 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
50 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
51 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
52 if (ret)
53 return ret;
54
55 /* Fill entries that are seen filled in dumps of nvidia driver just
56 * after channel's is put into DMA mode
57 */
58 dev_priv->engine.instmem.prepare_access(dev, true);
59 nv_wi32(dev, fc + 0, chan->pushbuf_base);
60 nv_wi32(dev, fc + 4, chan->pushbuf_base);
61 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
62 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
63 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
64 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
65#ifdef __BIG_ENDIAN
66 NV_PFIFO_CACHE1_BIG_ENDIAN |
67#endif
68 0);
69 dev_priv->engine.instmem.finish_access(dev);
70
71 /* enable the fifo dma operation */
72 nv_wr32(dev, NV04_PFIFO_MODE,
73 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
74 return 0;
75}
76
77void
78nv10_fifo_destroy_context(struct nouveau_channel *chan)
79{
80 struct drm_device *dev = chan->dev;
81
82 nv_wr32(dev, NV04_PFIFO_MODE,
83 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
84
85 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
86}
87
88static void
89nv10_fifo_do_load_context(struct drm_device *dev, int chid)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t fc = NV10_RAMFC(chid), tmp;
93
94 dev_priv->engine.instmem.prepare_access(dev, false);
95
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
98 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
99
100 tmp = nv_ri32(dev, fc + 12);
101 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
103
104 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
105 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
106 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
107 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
108
109 if (dev_priv->chipset < 0x17)
110 goto out;
111
112 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
113 tmp = nv_ri32(dev, fc + 36);
114 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
115 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
116 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
117 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
118
119out:
120 dev_priv->engine.instmem.finish_access(dev);
121
122 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
124}
125
126int
127nv10_fifo_load_context(struct nouveau_channel *chan)
128{
129 struct drm_device *dev = chan->dev;
130 uint32_t tmp;
131
132 nv10_fifo_do_load_context(dev, chan->id);
133
134 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
135 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
136 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
137
138 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
139 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
140 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
141
142 return 0;
143}
144
145int
146nv10_fifo_unload_context(struct drm_device *dev)
147{
148 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
150 uint32_t fc, tmp;
151 int chid;
152
153 chid = pfifo->channel_id(dev);
154 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
155 return 0;
156 fc = NV10_RAMFC(chid);
157
158 dev_priv->engine.instmem.prepare_access(dev, true);
159
160 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
161 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
162 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
163 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
164 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
165 nv_wi32(dev, fc + 12, tmp);
166 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
167 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
168 nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
169 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
170
171 if (dev_priv->chipset < 0x17)
172 goto out;
173
174 nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
175 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
176 nv_wi32(dev, fc + 36, tmp);
177 nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
178 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
179 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
180
181out:
182 dev_priv->engine.instmem.finish_access(dev);
183
184 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
185 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
186 return 0;
187}
188
189static void
190nv10_fifo_init_reset(struct drm_device *dev)
191{
192 nv_wr32(dev, NV03_PMC_ENABLE,
193 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
194 nv_wr32(dev, NV03_PMC_ENABLE,
195 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
196
197 nv_wr32(dev, 0x003224, 0x000f0078);
198 nv_wr32(dev, 0x002044, 0x0101ffff);
199 nv_wr32(dev, 0x002040, 0x000000ff);
200 nv_wr32(dev, 0x002500, 0x00000000);
201 nv_wr32(dev, 0x003000, 0x00000000);
202 nv_wr32(dev, 0x003050, 0x00000000);
203
204 nv_wr32(dev, 0x003258, 0x00000000);
205 nv_wr32(dev, 0x003210, 0x00000000);
206 nv_wr32(dev, 0x003270, 0x00000000);
207}
208
209static void
210nv10_fifo_init_ramxx(struct drm_device *dev)
211{
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213
214 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
215 ((dev_priv->ramht_bits - 9) << 16) |
216 (dev_priv->ramht_offset >> 8));
217 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
218
219 if (dev_priv->chipset < 0x17) {
220 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
221 } else {
222 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
223 (1 << 16) /* 64 Bytes entry*/);
224 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
225 }
226}
227
228static void
229nv10_fifo_init_intr(struct drm_device *dev)
230{
231 nv_wr32(dev, 0x002100, 0xffffffff);
232 nv_wr32(dev, 0x002140, 0xffffffff);
233}
234
235int
236nv10_fifo_init(struct drm_device *dev)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
240 int i;
241
242 nv10_fifo_init_reset(dev);
243 nv10_fifo_init_ramxx(dev);
244
245 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
246 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
247
248 nv10_fifo_init_intr(dev);
249 pfifo->enable(dev);
250 pfifo->reassign(dev, true);
251
252 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
253 if (dev_priv->fifos[i]) {
254 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
255 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
256 }
257 }
258
259 return 0;
260}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 000000000000..fcf2cdd19493
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,1009 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30#define NV10_FIFO_NUMBER 32
31
32struct pipe_state {
33 uint32_t pipe_0x0000[0x040/4];
34 uint32_t pipe_0x0040[0x010/4];
35 uint32_t pipe_0x0200[0x0c0/4];
36 uint32_t pipe_0x4400[0x080/4];
37 uint32_t pipe_0x6400[0x3b0/4];
38 uint32_t pipe_0x6800[0x2f0/4];
39 uint32_t pipe_0x6c00[0x030/4];
40 uint32_t pipe_0x7000[0x130/4];
41 uint32_t pipe_0x7400[0x0c0/4];
42 uint32_t pipe_0x7800[0x0c0/4];
43};
44
45static int nv10_graph_ctx_regs[] = {
46 NV10_PGRAPH_CTX_SWITCH1,
47 NV10_PGRAPH_CTX_SWITCH2,
48 NV10_PGRAPH_CTX_SWITCH3,
49 NV10_PGRAPH_CTX_SWITCH4,
50 NV10_PGRAPH_CTX_SWITCH5,
51 NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
52 NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
53 NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
54 NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
55 NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
56 0x00400164,
57 0x00400184,
58 0x004001a4,
59 0x004001c4,
60 0x004001e4,
61 0x00400168,
62 0x00400188,
63 0x004001a8,
64 0x004001c8,
65 0x004001e8,
66 0x0040016c,
67 0x0040018c,
68 0x004001ac,
69 0x004001cc,
70 0x004001ec,
71 0x00400170,
72 0x00400190,
73 0x004001b0,
74 0x004001d0,
75 0x004001f0,
76 0x00400174,
77 0x00400194,
78 0x004001b4,
79 0x004001d4,
80 0x004001f4,
81 0x00400178,
82 0x00400198,
83 0x004001b8,
84 0x004001d8,
85 0x004001f8,
86 0x0040017c,
87 0x0040019c,
88 0x004001bc,
89 0x004001dc,
90 0x004001fc,
91 NV10_PGRAPH_CTX_USER,
92 NV04_PGRAPH_DMA_START_0,
93 NV04_PGRAPH_DMA_START_1,
94 NV04_PGRAPH_DMA_LENGTH,
95 NV04_PGRAPH_DMA_MISC,
96 NV10_PGRAPH_DMA_PITCH,
97 NV04_PGRAPH_BOFFSET0,
98 NV04_PGRAPH_BBASE0,
99 NV04_PGRAPH_BLIMIT0,
100 NV04_PGRAPH_BOFFSET1,
101 NV04_PGRAPH_BBASE1,
102 NV04_PGRAPH_BLIMIT1,
103 NV04_PGRAPH_BOFFSET2,
104 NV04_PGRAPH_BBASE2,
105 NV04_PGRAPH_BLIMIT2,
106 NV04_PGRAPH_BOFFSET3,
107 NV04_PGRAPH_BBASE3,
108 NV04_PGRAPH_BLIMIT3,
109 NV04_PGRAPH_BOFFSET4,
110 NV04_PGRAPH_BBASE4,
111 NV04_PGRAPH_BLIMIT4,
112 NV04_PGRAPH_BOFFSET5,
113 NV04_PGRAPH_BBASE5,
114 NV04_PGRAPH_BLIMIT5,
115 NV04_PGRAPH_BPITCH0,
116 NV04_PGRAPH_BPITCH1,
117 NV04_PGRAPH_BPITCH2,
118 NV04_PGRAPH_BPITCH3,
119 NV04_PGRAPH_BPITCH4,
120 NV10_PGRAPH_SURFACE,
121 NV10_PGRAPH_STATE,
122 NV04_PGRAPH_BSWIZZLE2,
123 NV04_PGRAPH_BSWIZZLE5,
124 NV04_PGRAPH_BPIXEL,
125 NV10_PGRAPH_NOTIFY,
126 NV04_PGRAPH_PATT_COLOR0,
127 NV04_PGRAPH_PATT_COLOR1,
128 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
129 0x00400904,
130 0x00400908,
131 0x0040090c,
132 0x00400910,
133 0x00400914,
134 0x00400918,
135 0x0040091c,
136 0x00400920,
137 0x00400924,
138 0x00400928,
139 0x0040092c,
140 0x00400930,
141 0x00400934,
142 0x00400938,
143 0x0040093c,
144 0x00400940,
145 0x00400944,
146 0x00400948,
147 0x0040094c,
148 0x00400950,
149 0x00400954,
150 0x00400958,
151 0x0040095c,
152 0x00400960,
153 0x00400964,
154 0x00400968,
155 0x0040096c,
156 0x00400970,
157 0x00400974,
158 0x00400978,
159 0x0040097c,
160 0x00400980,
161 0x00400984,
162 0x00400988,
163 0x0040098c,
164 0x00400990,
165 0x00400994,
166 0x00400998,
167 0x0040099c,
168 0x004009a0,
169 0x004009a4,
170 0x004009a8,
171 0x004009ac,
172 0x004009b0,
173 0x004009b4,
174 0x004009b8,
175 0x004009bc,
176 0x004009c0,
177 0x004009c4,
178 0x004009c8,
179 0x004009cc,
180 0x004009d0,
181 0x004009d4,
182 0x004009d8,
183 0x004009dc,
184 0x004009e0,
185 0x004009e4,
186 0x004009e8,
187 0x004009ec,
188 0x004009f0,
189 0x004009f4,
190 0x004009f8,
191 0x004009fc,
192 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
193 0x0040080c,
194 NV04_PGRAPH_PATTERN_SHAPE,
195 NV03_PGRAPH_MONO_COLOR0,
196 NV04_PGRAPH_ROP3,
197 NV04_PGRAPH_CHROMA,
198 NV04_PGRAPH_BETA_AND,
199 NV04_PGRAPH_BETA_PREMULT,
200 0x00400e70,
201 0x00400e74,
202 0x00400e78,
203 0x00400e7c,
204 0x00400e80,
205 0x00400e84,
206 0x00400e88,
207 0x00400e8c,
208 0x00400ea0,
209 0x00400ea4,
210 0x00400ea8,
211 0x00400e90,
212 0x00400e94,
213 0x00400e98,
214 0x00400e9c,
215 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
216 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
217 0x00400f04,
218 0x00400f24,
219 0x00400f08,
220 0x00400f28,
221 0x00400f0c,
222 0x00400f2c,
223 0x00400f10,
224 0x00400f30,
225 0x00400f14,
226 0x00400f34,
227 0x00400f18,
228 0x00400f38,
229 0x00400f1c,
230 0x00400f3c,
231 NV10_PGRAPH_XFMODE0,
232 NV10_PGRAPH_XFMODE1,
233 NV10_PGRAPH_GLOBALSTATE0,
234 NV10_PGRAPH_GLOBALSTATE1,
235 NV04_PGRAPH_STORED_FMT,
236 NV04_PGRAPH_SOURCE_COLOR,
237 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
238 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
239 0x00400404,
240 0x00400484,
241 0x00400408,
242 0x00400488,
243 0x0040040c,
244 0x0040048c,
245 0x00400410,
246 0x00400490,
247 0x00400414,
248 0x00400494,
249 0x00400418,
250 0x00400498,
251 0x0040041c,
252 0x0040049c,
253 0x00400420,
254 0x004004a0,
255 0x00400424,
256 0x004004a4,
257 0x00400428,
258 0x004004a8,
259 0x0040042c,
260 0x004004ac,
261 0x00400430,
262 0x004004b0,
263 0x00400434,
264 0x004004b4,
265 0x00400438,
266 0x004004b8,
267 0x0040043c,
268 0x004004bc,
269 0x00400440,
270 0x004004c0,
271 0x00400444,
272 0x004004c4,
273 0x00400448,
274 0x004004c8,
275 0x0040044c,
276 0x004004cc,
277 0x00400450,
278 0x004004d0,
279 0x00400454,
280 0x004004d4,
281 0x00400458,
282 0x004004d8,
283 0x0040045c,
284 0x004004dc,
285 0x00400460,
286 0x004004e0,
287 0x00400464,
288 0x004004e4,
289 0x00400468,
290 0x004004e8,
291 0x0040046c,
292 0x004004ec,
293 0x00400470,
294 0x004004f0,
295 0x00400474,
296 0x004004f4,
297 0x00400478,
298 0x004004f8,
299 0x0040047c,
300 0x004004fc,
301 NV03_PGRAPH_ABS_UCLIP_XMIN,
302 NV03_PGRAPH_ABS_UCLIP_XMAX,
303 NV03_PGRAPH_ABS_UCLIP_YMIN,
304 NV03_PGRAPH_ABS_UCLIP_YMAX,
305 0x00400550,
306 0x00400558,
307 0x00400554,
308 0x0040055c,
309 NV03_PGRAPH_ABS_UCLIPA_XMIN,
310 NV03_PGRAPH_ABS_UCLIPA_XMAX,
311 NV03_PGRAPH_ABS_UCLIPA_YMIN,
312 NV03_PGRAPH_ABS_UCLIPA_YMAX,
313 NV03_PGRAPH_ABS_ICLIP_XMAX,
314 NV03_PGRAPH_ABS_ICLIP_YMAX,
315 NV03_PGRAPH_XY_LOGIC_MISC0,
316 NV03_PGRAPH_XY_LOGIC_MISC1,
317 NV03_PGRAPH_XY_LOGIC_MISC2,
318 NV03_PGRAPH_XY_LOGIC_MISC3,
319 NV03_PGRAPH_CLIPX_0,
320 NV03_PGRAPH_CLIPX_1,
321 NV03_PGRAPH_CLIPY_0,
322 NV03_PGRAPH_CLIPY_1,
323 NV10_PGRAPH_COMBINER0_IN_ALPHA,
324 NV10_PGRAPH_COMBINER1_IN_ALPHA,
325 NV10_PGRAPH_COMBINER0_IN_RGB,
326 NV10_PGRAPH_COMBINER1_IN_RGB,
327 NV10_PGRAPH_COMBINER_COLOR0,
328 NV10_PGRAPH_COMBINER_COLOR1,
329 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
330 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
331 NV10_PGRAPH_COMBINER0_OUT_RGB,
332 NV10_PGRAPH_COMBINER1_OUT_RGB,
333 NV10_PGRAPH_COMBINER_FINAL0,
334 NV10_PGRAPH_COMBINER_FINAL1,
335 0x00400e00,
336 0x00400e04,
337 0x00400e08,
338 0x00400e0c,
339 0x00400e10,
340 0x00400e14,
341 0x00400e18,
342 0x00400e1c,
343 0x00400e20,
344 0x00400e24,
345 0x00400e28,
346 0x00400e2c,
347 0x00400e30,
348 0x00400e34,
349 0x00400e38,
350 0x00400e3c,
351 NV04_PGRAPH_PASSTHRU_0,
352 NV04_PGRAPH_PASSTHRU_1,
353 NV04_PGRAPH_PASSTHRU_2,
354 NV10_PGRAPH_DIMX_TEXTURE,
355 NV10_PGRAPH_WDIMX_TEXTURE,
356 NV10_PGRAPH_DVD_COLORFMT,
357 NV10_PGRAPH_SCALED_FORMAT,
358 NV04_PGRAPH_MISC24_0,
359 NV04_PGRAPH_MISC24_1,
360 NV04_PGRAPH_MISC24_2,
361 NV03_PGRAPH_X_MISC,
362 NV03_PGRAPH_Y_MISC,
363 NV04_PGRAPH_VALID1,
364 NV04_PGRAPH_VALID2,
365};
366
367static int nv17_graph_ctx_regs[] = {
368 NV10_PGRAPH_DEBUG_4,
369 0x004006b0,
370 0x00400eac,
371 0x00400eb0,
372 0x00400eb4,
373 0x00400eb8,
374 0x00400ebc,
375 0x00400ec0,
376 0x00400ec4,
377 0x00400ec8,
378 0x00400ecc,
379 0x00400ed0,
380 0x00400ed4,
381 0x00400ed8,
382 0x00400edc,
383 0x00400ee0,
384 0x00400a00,
385 0x00400a04,
386};
387
388struct graph_state {
389 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
390 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
391 struct pipe_state pipe_state;
392 uint32_t lma_window[4];
393};
394
395#define PIPE_SAVE(dev, state, addr) \
396 do { \
397 int __i; \
398 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
399 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
400 state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
401 } while (0)
402
403#define PIPE_RESTORE(dev, state, addr) \
404 do { \
405 int __i; \
406 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
407 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
408 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
409 } while (0)
410
411static void nv10_graph_save_pipe(struct nouveau_channel *chan)
412{
413 struct drm_device *dev = chan->dev;
414 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
415 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
416
417 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
418 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
419 PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
420 PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
421 PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
422 PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
423 PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
424 PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
425 PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
426 PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
427}
428
429static void nv10_graph_load_pipe(struct nouveau_channel *chan)
430{
431 struct drm_device *dev = chan->dev;
432 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
433 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
434 uint32_t xfmode0, xfmode1;
435 int i;
436
437 nouveau_wait_for_idle(dev);
438 /* XXX check haiku comments */
439 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
440 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
441 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
442 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
443 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
444 for (i = 0; i < 4; i++)
445 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
446 for (i = 0; i < 4; i++)
447 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
448
449 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
450 for (i = 0; i < 3; i++)
451 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
452
453 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
454 for (i = 0; i < 3; i++)
455 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
456
457 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
458 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
459
460
461 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
462 nouveau_wait_for_idle(dev);
463
464 /* restore XFMODE */
465 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
466 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
467 PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
468 PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
469 PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
470 PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
471 PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
472 PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
473 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
474 PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
475 PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
476 nouveau_wait_for_idle(dev);
477}
478
479static void nv10_graph_create_pipe(struct nouveau_channel *chan)
480{
481 struct drm_device *dev = chan->dev;
482 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
483 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
484 uint32_t *fifo_pipe_state_addr;
485 int i;
486#define PIPE_INIT(addr) \
487 do { \
488 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
489 } while (0)
490#define PIPE_INIT_END(addr) \
491 do { \
492 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
493 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
494 if (fifo_pipe_state_addr != __end_addr) \
495 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
496 addr, fifo_pipe_state_addr, __end_addr); \
497 } while (0)
498#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
499
500 PIPE_INIT(0x0200);
501 for (i = 0; i < 48; i++)
502 NV_WRITE_PIPE_INIT(0x00000000);
503 PIPE_INIT_END(0x0200);
504
505 PIPE_INIT(0x6400);
506 for (i = 0; i < 211; i++)
507 NV_WRITE_PIPE_INIT(0x00000000);
508 NV_WRITE_PIPE_INIT(0x3f800000);
509 NV_WRITE_PIPE_INIT(0x40000000);
510 NV_WRITE_PIPE_INIT(0x40000000);
511 NV_WRITE_PIPE_INIT(0x40000000);
512 NV_WRITE_PIPE_INIT(0x40000000);
513 NV_WRITE_PIPE_INIT(0x00000000);
514 NV_WRITE_PIPE_INIT(0x00000000);
515 NV_WRITE_PIPE_INIT(0x3f800000);
516 NV_WRITE_PIPE_INIT(0x00000000);
517 NV_WRITE_PIPE_INIT(0x3f000000);
518 NV_WRITE_PIPE_INIT(0x3f000000);
519 NV_WRITE_PIPE_INIT(0x00000000);
520 NV_WRITE_PIPE_INIT(0x00000000);
521 NV_WRITE_PIPE_INIT(0x00000000);
522 NV_WRITE_PIPE_INIT(0x00000000);
523 NV_WRITE_PIPE_INIT(0x3f800000);
524 NV_WRITE_PIPE_INIT(0x00000000);
525 NV_WRITE_PIPE_INIT(0x00000000);
526 NV_WRITE_PIPE_INIT(0x00000000);
527 NV_WRITE_PIPE_INIT(0x00000000);
528 NV_WRITE_PIPE_INIT(0x00000000);
529 NV_WRITE_PIPE_INIT(0x3f800000);
530 NV_WRITE_PIPE_INIT(0x3f800000);
531 NV_WRITE_PIPE_INIT(0x3f800000);
532 NV_WRITE_PIPE_INIT(0x3f800000);
533 PIPE_INIT_END(0x6400);
534
535 PIPE_INIT(0x6800);
536 for (i = 0; i < 162; i++)
537 NV_WRITE_PIPE_INIT(0x00000000);
538 NV_WRITE_PIPE_INIT(0x3f800000);
539 for (i = 0; i < 25; i++)
540 NV_WRITE_PIPE_INIT(0x00000000);
541 PIPE_INIT_END(0x6800);
542
543 PIPE_INIT(0x6c00);
544 NV_WRITE_PIPE_INIT(0x00000000);
545 NV_WRITE_PIPE_INIT(0x00000000);
546 NV_WRITE_PIPE_INIT(0x00000000);
547 NV_WRITE_PIPE_INIT(0x00000000);
548 NV_WRITE_PIPE_INIT(0xbf800000);
549 NV_WRITE_PIPE_INIT(0x00000000);
550 NV_WRITE_PIPE_INIT(0x00000000);
551 NV_WRITE_PIPE_INIT(0x00000000);
552 NV_WRITE_PIPE_INIT(0x00000000);
553 NV_WRITE_PIPE_INIT(0x00000000);
554 NV_WRITE_PIPE_INIT(0x00000000);
555 NV_WRITE_PIPE_INIT(0x00000000);
556 PIPE_INIT_END(0x6c00);
557
558 PIPE_INIT(0x7000);
559 NV_WRITE_PIPE_INIT(0x00000000);
560 NV_WRITE_PIPE_INIT(0x00000000);
561 NV_WRITE_PIPE_INIT(0x00000000);
562 NV_WRITE_PIPE_INIT(0x00000000);
563 NV_WRITE_PIPE_INIT(0x00000000);
564 NV_WRITE_PIPE_INIT(0x00000000);
565 NV_WRITE_PIPE_INIT(0x00000000);
566 NV_WRITE_PIPE_INIT(0x00000000);
567 NV_WRITE_PIPE_INIT(0x00000000);
568 NV_WRITE_PIPE_INIT(0x00000000);
569 NV_WRITE_PIPE_INIT(0x00000000);
570 NV_WRITE_PIPE_INIT(0x00000000);
571 NV_WRITE_PIPE_INIT(0x7149f2ca);
572 NV_WRITE_PIPE_INIT(0x00000000);
573 NV_WRITE_PIPE_INIT(0x00000000);
574 NV_WRITE_PIPE_INIT(0x00000000);
575 NV_WRITE_PIPE_INIT(0x7149f2ca);
576 NV_WRITE_PIPE_INIT(0x00000000);
577 NV_WRITE_PIPE_INIT(0x00000000);
578 NV_WRITE_PIPE_INIT(0x00000000);
579 NV_WRITE_PIPE_INIT(0x7149f2ca);
580 NV_WRITE_PIPE_INIT(0x00000000);
581 NV_WRITE_PIPE_INIT(0x00000000);
582 NV_WRITE_PIPE_INIT(0x00000000);
583 NV_WRITE_PIPE_INIT(0x7149f2ca);
584 NV_WRITE_PIPE_INIT(0x00000000);
585 NV_WRITE_PIPE_INIT(0x00000000);
586 NV_WRITE_PIPE_INIT(0x00000000);
587 NV_WRITE_PIPE_INIT(0x7149f2ca);
588 NV_WRITE_PIPE_INIT(0x00000000);
589 NV_WRITE_PIPE_INIT(0x00000000);
590 NV_WRITE_PIPE_INIT(0x00000000);
591 NV_WRITE_PIPE_INIT(0x7149f2ca);
592 NV_WRITE_PIPE_INIT(0x00000000);
593 NV_WRITE_PIPE_INIT(0x00000000);
594 NV_WRITE_PIPE_INIT(0x00000000);
595 NV_WRITE_PIPE_INIT(0x7149f2ca);
596 NV_WRITE_PIPE_INIT(0x00000000);
597 NV_WRITE_PIPE_INIT(0x00000000);
598 NV_WRITE_PIPE_INIT(0x00000000);
599 NV_WRITE_PIPE_INIT(0x7149f2ca);
600 for (i = 0; i < 35; i++)
601 NV_WRITE_PIPE_INIT(0x00000000);
602 PIPE_INIT_END(0x7000);
603
604 PIPE_INIT(0x7400);
605 for (i = 0; i < 48; i++)
606 NV_WRITE_PIPE_INIT(0x00000000);
607 PIPE_INIT_END(0x7400);
608
609 PIPE_INIT(0x7800);
610 for (i = 0; i < 48; i++)
611 NV_WRITE_PIPE_INIT(0x00000000);
612 PIPE_INIT_END(0x7800);
613
614 PIPE_INIT(0x4400);
615 for (i = 0; i < 32; i++)
616 NV_WRITE_PIPE_INIT(0x00000000);
617 PIPE_INIT_END(0x4400);
618
619 PIPE_INIT(0x0000);
620 for (i = 0; i < 16; i++)
621 NV_WRITE_PIPE_INIT(0x00000000);
622 PIPE_INIT_END(0x0000);
623
624 PIPE_INIT(0x0040);
625 for (i = 0; i < 4; i++)
626 NV_WRITE_PIPE_INIT(0x00000000);
627 PIPE_INIT_END(0x0040);
628
629#undef PIPE_INIT
630#undef PIPE_INIT_END
631#undef NV_WRITE_PIPE_INIT
632}
633
634static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
635{
636 int i;
637 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
638 if (nv10_graph_ctx_regs[i] == reg)
639 return i;
640 }
641 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
642 return -1;
643}
644
645static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
646{
647 int i;
648 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
649 if (nv17_graph_ctx_regs[i] == reg)
650 return i;
651 }
652 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
653 return -1;
654}
655
656int nv10_graph_load_context(struct nouveau_channel *chan)
657{
658 struct drm_device *dev = chan->dev;
659 struct drm_nouveau_private *dev_priv = dev->dev_private;
660 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
661 uint32_t tmp;
662 int i;
663
664 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
665 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
666 if (dev_priv->chipset >= 0x17) {
667 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
668 nv_wr32(dev, nv17_graph_ctx_regs[i],
669 pgraph_ctx->nv17[i]);
670 }
671
672 nv10_graph_load_pipe(chan);
673
674 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
675 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
676 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
677 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
678 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
679 return 0;
680}
681
682int
683nv10_graph_unload_context(struct drm_device *dev)
684{
685 struct drm_nouveau_private *dev_priv = dev->dev_private;
686 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
687 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
688 struct nouveau_channel *chan;
689 struct graph_state *ctx;
690 uint32_t tmp;
691 int i;
692
693 chan = pgraph->channel(dev);
694 if (!chan)
695 return 0;
696 ctx = chan->pgraph_ctx;
697
698 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
699 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
700
701 if (dev_priv->chipset >= 0x17) {
702 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
703 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
704 }
705
706 nv10_graph_save_pipe(chan);
707
708 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
709 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
710 tmp |= (pfifo->channels - 1) << 24;
711 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
712 return 0;
713}
714
715void
716nv10_graph_context_switch(struct drm_device *dev)
717{
718 struct drm_nouveau_private *dev_priv = dev->dev_private;
719 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
720 struct nouveau_channel *chan = NULL;
721 int chid;
722
723 pgraph->fifo_access(dev, false);
724 nouveau_wait_for_idle(dev);
725
726 /* If previous context is valid, we need to save it */
727 nv10_graph_unload_context(dev);
728
729 /* Load context for next channel */
730 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
731 chan = dev_priv->fifos[chid];
732 if (chan)
733 nv10_graph_load_context(chan);
734
735 pgraph->fifo_access(dev, true);
736}
737
738#define NV_WRITE_CTX(reg, val) do { \
739 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
740 if (offset > 0) \
741 pgraph_ctx->nv10[offset] = val; \
742 } while (0)
743
744#define NV17_WRITE_CTX(reg, val) do { \
745 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
746 if (offset > 0) \
747 pgraph_ctx->nv17[offset] = val; \
748 } while (0)
749
750struct nouveau_channel *
751nv10_graph_channel(struct drm_device *dev)
752{
753 struct drm_nouveau_private *dev_priv = dev->dev_private;
754 int chid = dev_priv->engine.fifo.channels;
755
756 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
757 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
758
759 if (chid >= dev_priv->engine.fifo.channels)
760 return NULL;
761
762 return dev_priv->fifos[chid];
763}
764
765int nv10_graph_create_context(struct nouveau_channel *chan)
766{
767 struct drm_device *dev = chan->dev;
768 struct drm_nouveau_private *dev_priv = dev->dev_private;
769 struct graph_state *pgraph_ctx;
770
771 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
772
773 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
774 GFP_KERNEL);
775 if (pgraph_ctx == NULL)
776 return -ENOMEM;
777
778
779 NV_WRITE_CTX(0x00400e88, 0x08000000);
780 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
781 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
782 NV_WRITE_CTX(0x00400e10, 0x00001000);
783 NV_WRITE_CTX(0x00400e14, 0x00001000);
784 NV_WRITE_CTX(0x00400e30, 0x00080008);
785 NV_WRITE_CTX(0x00400e34, 0x00080008);
786 if (dev_priv->chipset >= 0x17) {
787 /* is it really needed ??? */
788 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
789 nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
790 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
791 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
792 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
793 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
794 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
795 }
796 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
797
798 nv10_graph_create_pipe(chan);
799 return 0;
800}
801
802void nv10_graph_destroy_context(struct nouveau_channel *chan)
803{
804 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
805
806 kfree(pgraph_ctx);
807 chan->pgraph_ctx = NULL;
808}
809
810void
811nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
812 uint32_t size, uint32_t pitch)
813{
814 uint32_t limit = max(1u, addr + size) - 1;
815
816 if (pitch)
817 addr |= 1 << 31;
818
819 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
820 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
821 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
822}
823
824int nv10_graph_init(struct drm_device *dev)
825{
826 struct drm_nouveau_private *dev_priv = dev->dev_private;
827 uint32_t tmp;
828 int i;
829
830 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
831 ~NV_PMC_ENABLE_PGRAPH);
832 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
833 NV_PMC_ENABLE_PGRAPH);
834
835 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
836 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
837
838 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
839 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
840 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
841 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
842 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
843 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
844 (1<<29) |
845 (1<<31));
846 if (dev_priv->chipset >= 0x17) {
847 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
848 nv_wr32(dev, 0x400a10, 0x3ff3fb6);
849 nv_wr32(dev, 0x400838, 0x2f8684);
850 nv_wr32(dev, 0x40083c, 0x115f3f);
851 nv_wr32(dev, 0x004006b0, 0x40000020);
852 } else
853 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
854
855 /* Turn all the tiling regions off. */
856 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
857 nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
858
859 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
860 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
861 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
862 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
863 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
864
865 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
866 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
867 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
868 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
869 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
870
871 return 0;
872}
873
874void nv10_graph_takedown(struct drm_device *dev)
875{
876}
877
878static int
879nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
880 int mthd, uint32_t data)
881{
882 struct drm_device *dev = chan->dev;
883 struct graph_state *ctx = chan->pgraph_ctx;
884 struct pipe_state *pipe = &ctx->pipe_state;
885 struct drm_nouveau_private *dev_priv = dev->dev_private;
886 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
887 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
888 uint32_t xfmode0, xfmode1;
889 int i;
890
891 ctx->lma_window[(mthd - 0x1638) / 4] = data;
892
893 if (mthd != 0x1644)
894 return 0;
895
896 nouveau_wait_for_idle(dev);
897
898 PIPE_SAVE(dev, pipe_0x0040, 0x0040);
899 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
900
901 PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
902
903 nouveau_wait_for_idle(dev);
904
905 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
906 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
907
908 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
909 PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
910 PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
911 PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
912
913 nouveau_wait_for_idle(dev);
914
915 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
916 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
917 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
918 for (i = 0; i < 4; i++)
919 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
920 for (i = 0; i < 4; i++)
921 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
922
923 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
924 for (i = 0; i < 3; i++)
925 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
926
927 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
928 for (i = 0; i < 3; i++)
929 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
930
931 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
932 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
933
934 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
935
936 nouveau_wait_for_idle(dev);
937
938 PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
939
940 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
941 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
942
943 PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
944 PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
945 PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
946 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
947
948 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
949 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
950
951 nouveau_wait_for_idle(dev);
952
953 pgraph->fifo_access(dev, true);
954
955 return 0;
956}
957
958static int
959nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
960 int mthd, uint32_t data)
961{
962 struct drm_device *dev = chan->dev;
963 struct drm_nouveau_private *dev_priv = dev->dev_private;
964 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
965
966 nouveau_wait_for_idle(dev);
967
968 nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
969 nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
970 nv_wr32(dev, 0x004006b0,
971 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
972
973 pgraph->fifo_access(dev, true);
974
975 return 0;
976}
977
978static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
979 { 0x1638, nv17_graph_mthd_lma_window },
980 { 0x163c, nv17_graph_mthd_lma_window },
981 { 0x1640, nv17_graph_mthd_lma_window },
982 { 0x1644, nv17_graph_mthd_lma_window },
983 { 0x1658, nv17_graph_mthd_lma_enable },
984 {}
985};
986
987struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
988 { 0x0030, false, NULL }, /* null */
989 { 0x0039, false, NULL }, /* m2mf */
990 { 0x004a, false, NULL }, /* gdirect */
991 { 0x005f, false, NULL }, /* imageblit */
992 { 0x009f, false, NULL }, /* imageblit (nv12) */
993 { 0x008a, false, NULL }, /* ifc */
994 { 0x0089, false, NULL }, /* sifm */
995 { 0x0062, false, NULL }, /* surf2d */
996 { 0x0043, false, NULL }, /* rop */
997 { 0x0012, false, NULL }, /* beta1 */
998 { 0x0072, false, NULL }, /* beta4 */
999 { 0x0019, false, NULL }, /* cliprect */
1000 { 0x0044, false, NULL }, /* pattern */
1001 { 0x0052, false, NULL }, /* swzsurf */
1002 { 0x0093, false, NULL }, /* surf3d */
1003 { 0x0094, false, NULL }, /* tex_tri */
1004 { 0x0095, false, NULL }, /* multitex_tri */
1005 { 0x0056, false, NULL }, /* celcius (nv10) */
1006 { 0x0096, false, NULL }, /* celcius (nv11) */
1007 { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
1008 {}
1009};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 000000000000..2e58c331e9b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30
31static bool
32get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
33 uint32_t *mask)
34{
35 if (ent->line < 2) {
36 *reg = NV_PCRTC_GPIO;
37 *shift = ent->line * 16;
38 *mask = 0x11;
39
40 } else if (ent->line < 10) {
41 *reg = NV_PCRTC_GPIO_EXT;
42 *shift = (ent->line - 2) * 4;
43 *mask = 0x3;
44
45 } else if (ent->line < 14) {
46 *reg = NV_PCRTC_850;
47 *shift = (ent->line - 10) * 4;
48 *mask = 0x3;
49
50 } else {
51 return false;
52 }
53
54 return true;
55}
56
57int
58nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
59{
60 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
61 uint32_t reg, shift, mask, value;
62
63 if (!ent)
64 return -ENODEV;
65
66 if (!get_gpio_location(ent, &reg, &shift, &mask))
67 return -ENODEV;
68
69 value = NVReadCRTC(dev, 0, reg) >> shift;
70
71 return (ent->invert ? 1 : 0) ^ (value & 1);
72}
73
74int
75nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
76{
77 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
78 uint32_t reg, shift, mask, value;
79
80 if (!ent)
81 return -ENODEV;
82
83 if (!get_gpio_location(ent, &reg, &shift, &mask))
84 return -ENODEV;
85
86 value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
87 mask = ~(mask << shift);
88
89 NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
90
91 return 0;
92}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 000000000000..74c880374fb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,778 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_hw.h"
34#include "nv17_tv.h"
35
36static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
37{
38 struct drm_device *dev = encoder->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
41 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
42 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
43 uint32_t sample = 0;
44 int head;
45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios.tvdactestval)
49 testval = dev_priv->vbios.tvdactestval;
50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8;
53
54 /* Save the previous state. */
55 gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
56 gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
57 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
58 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
59 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
60 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
61 test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
62 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
63 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
64 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
65
66 /* Prepare the DAC for load detection. */
67 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
68 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
69
70 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
73 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
74 NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
75 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
76 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
77 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
78 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
79
80 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
81
82 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
83 (dacclk & ~0xff) | 0x22);
84 msleep(1);
85 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
86 (dacclk & ~0xff) | 0x21);
87
88 NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
89 NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
90
91 /* Sample pin 0x4 (usually S-video luma). */
92 NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
93 msleep(20);
94 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
95 & 0x4 << 28;
96
97 /* Sample the remaining pins. */
98 NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
99 msleep(20);
100 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
101 & 0xa << 28;
102
103 /* Restore the previous state. */
104 NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
105 NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
106 NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
107 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
108 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
109 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
110 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
113 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
114 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
115
116 return sample;
117}
118
119static enum drm_connector_status
120nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
121{
122 struct drm_device *dev = encoder->dev;
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct drm_mode_config *conf = &dev->mode_config;
125 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
126 struct dcb_entry *dcb = tv_enc->base.dcb;
127
128 if (dev_priv->chipset == 0x42 ||
129 dev_priv->chipset == 0x43)
130 tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
131 else
132 tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
133
134 switch (tv_enc->pin_mask) {
135 case 0x2:
136 case 0x4:
137 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
138 break;
139 case 0xc:
140 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
141 break;
142 case 0xe:
143 if (dcb->tvconf.has_component_output)
144 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
145 else
146 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
147 break;
148 default:
149 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
150 break;
151 }
152
153 drm_connector_property_set_value(connector,
154 conf->tv_subconnector_property,
155 tv_enc->subconnector);
156
157 if (tv_enc->subconnector) {
158 NV_INFO(dev, "Load detected on output %c\n",
159 '@' + ffs(dcb->or));
160 return connector_status_connected;
161 } else {
162 return connector_status_disconnected;
163 }
164}
165
166static const struct {
167 int hdisplay;
168 int vdisplay;
169} modes[] = {
170 { 640, 400 },
171 { 640, 480 },
172 { 720, 480 },
173 { 720, 576 },
174 { 800, 600 },
175 { 1024, 768 },
176 { 1280, 720 },
177 { 1280, 1024 },
178 { 1920, 1080 }
179};
180
181static int nv17_tv_get_modes(struct drm_encoder *encoder,
182 struct drm_connector *connector)
183{
184 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
185 struct drm_display_mode *mode;
186 struct drm_display_mode *output_mode;
187 int n = 0;
188 int i;
189
190 if (tv_norm->kind != CTV_ENC_MODE) {
191 struct drm_display_mode *tv_mode;
192
193 for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
194 mode = drm_mode_duplicate(encoder->dev, tv_mode);
195
196 mode->clock = tv_norm->tv_enc_mode.vrefresh *
197 mode->htotal / 1000 *
198 mode->vtotal / 1000;
199
200 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
201 mode->clock *= 2;
202
203 if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
204 mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
205 mode->type |= DRM_MODE_TYPE_PREFERRED;
206
207 drm_mode_probed_add(connector, mode);
208 n++;
209 }
210 return n;
211 }
212
213 /* tv_norm->kind == CTV_ENC_MODE */
214 output_mode = &tv_norm->ctv_enc_mode.mode;
215 for (i = 0; i < ARRAY_SIZE(modes); i++) {
216 if (modes[i].hdisplay > output_mode->hdisplay ||
217 modes[i].vdisplay > output_mode->vdisplay)
218 continue;
219
220 if (modes[i].hdisplay == output_mode->hdisplay &&
221 modes[i].vdisplay == output_mode->vdisplay) {
222 mode = drm_mode_duplicate(encoder->dev, output_mode);
223 mode->type |= DRM_MODE_TYPE_PREFERRED;
224 } else {
225 mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
226 modes[i].vdisplay, 60, false,
227 output_mode->flags & DRM_MODE_FLAG_INTERLACE,
228 false);
229 }
230
231 /* CVT modes are sometimes unsuitable... */
232 if (output_mode->hdisplay <= 720
233 || output_mode->hdisplay >= 1920) {
234 mode->htotal = output_mode->htotal;
235 mode->hsync_start = (mode->hdisplay + (mode->htotal
236 - mode->hdisplay) * 9 / 10) & ~7;
237 mode->hsync_end = mode->hsync_start + 8;
238 }
239 if (output_mode->vdisplay >= 1024) {
240 mode->vtotal = output_mode->vtotal;
241 mode->vsync_start = output_mode->vsync_start;
242 mode->vsync_end = output_mode->vsync_end;
243 }
244
245 mode->type |= DRM_MODE_TYPE_DRIVER;
246 drm_mode_probed_add(connector, mode);
247 n++;
248 }
249 return n;
250}
251
252static int nv17_tv_mode_valid(struct drm_encoder *encoder,
253 struct drm_display_mode *mode)
254{
255 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
256
257 if (tv_norm->kind == CTV_ENC_MODE) {
258 struct drm_display_mode *output_mode =
259 &tv_norm->ctv_enc_mode.mode;
260
261 if (mode->clock > 400000)
262 return MODE_CLOCK_HIGH;
263
264 if (mode->hdisplay > output_mode->hdisplay ||
265 mode->vdisplay > output_mode->vdisplay)
266 return MODE_BAD;
267
268 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
269 (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
270 return MODE_NO_INTERLACE;
271
272 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
273 return MODE_NO_DBLESCAN;
274
275 } else {
276 const int vsync_tolerance = 600;
277
278 if (mode->clock > 70000)
279 return MODE_CLOCK_HIGH;
280
281 if (abs(drm_mode_vrefresh(mode) * 1000 -
282 tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
283 return MODE_VSYNC;
284
285 /* The encoder takes care of the actual interlacing */
286 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
287 return MODE_NO_INTERLACE;
288 }
289
290 return MODE_OK;
291}
292
293static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
294 struct drm_display_mode *mode,
295 struct drm_display_mode *adjusted_mode)
296{
297 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
298
299 if (tv_norm->kind == CTV_ENC_MODE)
300 adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
301 else
302 adjusted_mode->clock = 90000;
303
304 return true;
305}
306
307static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
308{
309 struct drm_device *dev = encoder->dev;
310 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
311 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
312
313 if (nouveau_encoder(encoder)->last_dpms == mode)
314 return;
315 nouveau_encoder(encoder)->last_dpms = mode;
316
317 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
318 mode, nouveau_encoder(encoder)->dcb->index);
319
320 regs->ptv_200 &= ~1;
321
322 if (tv_norm->kind == CTV_ENC_MODE) {
323 nv04_dfp_update_fp_control(encoder, mode);
324
325 } else {
326 nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
327
328 if (mode == DRM_MODE_DPMS_ON)
329 regs->ptv_200 |= 1;
330 }
331
332 nv_load_ptv(dev, regs, 200);
333
334 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
335 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
336
337 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
338}
339
340static void nv17_tv_prepare(struct drm_encoder *encoder)
341{
342 struct drm_device *dev = encoder->dev;
343 struct drm_nouveau_private *dev_priv = dev->dev_private;
344 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
345 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
346 int head = nouveau_crtc(encoder->crtc)->index;
347 uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
348 NV_CIO_CRE_LCD__INDEX];
349 uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
350 nv04_dac_output_offset(encoder);
351 uint32_t dacclk;
352
353 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
354
355 nv04_dfp_disable(dev, head);
356
357 /* Unbind any FP encoders from this head if we need the FP
358 * stuff enabled. */
359 if (tv_norm->kind == CTV_ENC_MODE) {
360 struct drm_encoder *enc;
361
362 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
363 struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
364
365 if ((dcb->type == OUTPUT_TMDS ||
366 dcb->type == OUTPUT_LVDS) &&
367 !enc->crtc &&
368 nv04_dfp_get_bound_head(dev, dcb) == head) {
369 nv04_dfp_bind_head(dev, dcb, head ^ 1,
370 dev_priv->vbios.fp.dual_link);
371 }
372 }
373
374 }
375
376 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
377 * at LCD__INDEX which we don't alter
378 */
379 if (!(*cr_lcd & 0x44)) {
380 if (tv_norm->kind == CTV_ENC_MODE)
381 *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
382 else
383 *cr_lcd = 0;
384 }
385
386 /* Set the DACCLK register */
387 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
388
389 if (dev_priv->card_type == NV_40)
390 dacclk |= 0x1a << 16;
391
392 if (tv_norm->kind == CTV_ENC_MODE) {
393 dacclk |= 0x20;
394
395 if (head)
396 dacclk |= 0x100;
397 else
398 dacclk &= ~0x100;
399
400 } else {
401 dacclk |= 0x10;
402
403 }
404
405 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
406}
407
408static void nv17_tv_mode_set(struct drm_encoder *encoder,
409 struct drm_display_mode *drm_mode,
410 struct drm_display_mode *adjusted_mode)
411{
412 struct drm_device *dev = encoder->dev;
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 int head = nouveau_crtc(encoder->crtc)->index;
415 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
416 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
417 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
418 int i;
419
420 regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
421 regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
422 regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
423 regs->tv_setup = 1;
424 regs->ramdac_8c0 = 0x0;
425
426 if (tv_norm->kind == TV_ENC_MODE) {
427 tv_regs->ptv_200 = 0x13111100;
428 if (head)
429 tv_regs->ptv_200 |= 0x10;
430
431 tv_regs->ptv_20c = 0x808010;
432 tv_regs->ptv_304 = 0x2d00000;
433 tv_regs->ptv_600 = 0x0;
434 tv_regs->ptv_60c = 0x0;
435 tv_regs->ptv_610 = 0x1e00000;
436
437 if (tv_norm->tv_enc_mode.vdisplay == 576) {
438 tv_regs->ptv_508 = 0x1200000;
439 tv_regs->ptv_614 = 0x33;
440
441 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
442 tv_regs->ptv_508 = 0xf00000;
443 tv_regs->ptv_614 = 0x13;
444 }
445
446 if (dev_priv->card_type >= NV_30) {
447 tv_regs->ptv_500 = 0xe8e0;
448 tv_regs->ptv_504 = 0x1710;
449 tv_regs->ptv_604 = 0x0;
450 tv_regs->ptv_608 = 0x0;
451 } else {
452 if (tv_norm->tv_enc_mode.vdisplay == 576) {
453 tv_regs->ptv_604 = 0x20;
454 tv_regs->ptv_608 = 0x10;
455 tv_regs->ptv_500 = 0x19710;
456 tv_regs->ptv_504 = 0x68f0;
457
458 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
459 tv_regs->ptv_604 = 0x10;
460 tv_regs->ptv_608 = 0x20;
461 tv_regs->ptv_500 = 0x4b90;
462 tv_regs->ptv_504 = 0x1b480;
463 }
464 }
465
466 for (i = 0; i < 0x40; i++)
467 tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
468
469 } else {
470 struct drm_display_mode *output_mode =
471 &tv_norm->ctv_enc_mode.mode;
472
473 /* The registers in PRAMDAC+0xc00 control some timings and CSC
474 * parameters for the CTV encoder (It's only used for "HD" TV
475 * modes, I don't think I have enough working to guess what
476 * they exactly mean...), it's probably connected at the
477 * output of the FP encoder, but it also needs the analog
478 * encoder in its OR enabled and routed to the head it's
479 * using. It's enabled with the DACCLK register, bits [5:4].
480 */
481 for (i = 0; i < 38; i++)
482 regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
483
484 regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
485 regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
486 regs->fp_horiz_regs[FP_SYNC_START] =
487 output_mode->hsync_start - 1;
488 regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
489 regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
490 max((output_mode->hdisplay-600)/40 - 1, 1);
491
492 regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
493 regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
494 regs->fp_vert_regs[FP_SYNC_START] =
495 output_mode->vsync_start - 1;
496 regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
497 regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
498
499 regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
500 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
501 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
502
503 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
504 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
505 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
506 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
507
508 regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
509 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
510 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
511 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
512 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
513 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
514 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
515
516 regs->fp_debug_2 = 0;
517
518 regs->fp_margin_color = 0x801080;
519
520 }
521}
522
523static void nv17_tv_commit(struct drm_encoder *encoder)
524{
525 struct drm_device *dev = encoder->dev;
526 struct drm_nouveau_private *dev_priv = dev->dev_private;
527 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
528 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
529 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
530
531 if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
532 nv17_tv_update_rescaler(encoder);
533 nv17_tv_update_properties(encoder);
534 } else {
535 nv17_ctv_update_rescaler(encoder);
536 }
537
538 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
539
540 /* This could use refinement for flatpanels, but it should work */
541 if (dev_priv->chipset < 0x44)
542 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
543 nv04_dac_output_offset(encoder),
544 0xf0000000);
545 else
546 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
547 nv04_dac_output_offset(encoder),
548 0x00100000);
549
550 helper->dpms(encoder, DRM_MODE_DPMS_ON);
551
552 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
553 drm_get_connector_name(
554 &nouveau_encoder_connector_get(nv_encoder)->base),
555 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
556}
557
558static void nv17_tv_save(struct drm_encoder *encoder)
559{
560 struct drm_device *dev = encoder->dev;
561 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
562
563 nouveau_encoder(encoder)->restore.output =
564 NVReadRAMDAC(dev, 0,
565 NV_PRAMDAC_DACCLK +
566 nv04_dac_output_offset(encoder));
567
568 nv17_tv_state_save(dev, &tv_enc->saved_state);
569
570 tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
571}
572
573static void nv17_tv_restore(struct drm_encoder *encoder)
574{
575 struct drm_device *dev = encoder->dev;
576
577 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
578 nv04_dac_output_offset(encoder),
579 nouveau_encoder(encoder)->restore.output);
580
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
584}
585
586static int nv17_tv_create_resources(struct drm_encoder *encoder,
587 struct drm_connector *connector)
588{
589 struct drm_device *dev = encoder->dev;
590 struct drm_mode_config *conf = &dev->mode_config;
591 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
592 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
593 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
594 NUM_LD_TV_NORMS;
595 int i;
596
597 if (nouveau_tv_norm) {
598 for (i = 0; i < num_tv_norms; i++) {
599 if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
600 tv_enc->tv_norm = i;
601 break;
602 }
603 }
604
605 if (i == num_tv_norms)
606 NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
607 nouveau_tv_norm);
608 }
609
610 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
611
612 drm_connector_attach_property(connector,
613 conf->tv_select_subconnector_property,
614 tv_enc->select_subconnector);
615 drm_connector_attach_property(connector,
616 conf->tv_subconnector_property,
617 tv_enc->subconnector);
618 drm_connector_attach_property(connector,
619 conf->tv_mode_property,
620 tv_enc->tv_norm);
621 drm_connector_attach_property(connector,
622 conf->tv_flicker_reduction_property,
623 tv_enc->flicker);
624 drm_connector_attach_property(connector,
625 conf->tv_saturation_property,
626 tv_enc->saturation);
627 drm_connector_attach_property(connector,
628 conf->tv_hue_property,
629 tv_enc->hue);
630 drm_connector_attach_property(connector,
631 conf->tv_overscan_property,
632 tv_enc->overscan);
633
634 return 0;
635}
636
637static int nv17_tv_set_property(struct drm_encoder *encoder,
638 struct drm_connector *connector,
639 struct drm_property *property,
640 uint64_t val)
641{
642 struct drm_mode_config *conf = &encoder->dev->mode_config;
643 struct drm_crtc *crtc = encoder->crtc;
644 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
645 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
646 bool modes_changed = false;
647
648 if (property == conf->tv_overscan_property) {
649 tv_enc->overscan = val;
650 if (encoder->crtc) {
651 if (tv_norm->kind == CTV_ENC_MODE)
652 nv17_ctv_update_rescaler(encoder);
653 else
654 nv17_tv_update_rescaler(encoder);
655 }
656
657 } else if (property == conf->tv_saturation_property) {
658 if (tv_norm->kind != TV_ENC_MODE)
659 return -EINVAL;
660
661 tv_enc->saturation = val;
662 nv17_tv_update_properties(encoder);
663
664 } else if (property == conf->tv_hue_property) {
665 if (tv_norm->kind != TV_ENC_MODE)
666 return -EINVAL;
667
668 tv_enc->hue = val;
669 nv17_tv_update_properties(encoder);
670
671 } else if (property == conf->tv_flicker_reduction_property) {
672 if (tv_norm->kind != TV_ENC_MODE)
673 return -EINVAL;
674
675 tv_enc->flicker = val;
676 if (encoder->crtc)
677 nv17_tv_update_rescaler(encoder);
678
679 } else if (property == conf->tv_mode_property) {
680 if (connector->dpms != DRM_MODE_DPMS_OFF)
681 return -EINVAL;
682
683 tv_enc->tv_norm = val;
684
685 modes_changed = true;
686
687 } else if (property == conf->tv_select_subconnector_property) {
688 if (tv_norm->kind != TV_ENC_MODE)
689 return -EINVAL;
690
691 tv_enc->select_subconnector = val;
692 nv17_tv_update_properties(encoder);
693
694 } else {
695 return -EINVAL;
696 }
697
698 if (modes_changed) {
699 drm_helper_probe_single_connector_modes(connector, 0, 0);
700
701 /* Disable the crtc to ensure a full modeset is
702 * performed whenever it's turned on again. */
703 if (crtc) {
704 struct drm_mode_set modeset = {
705 .crtc = crtc,
706 };
707
708 crtc->funcs->set_config(&modeset);
709 }
710 }
711
712 return 0;
713}
714
715static void nv17_tv_destroy(struct drm_encoder *encoder)
716{
717 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
718
719 NV_DEBUG_KMS(encoder->dev, "\n");
720
721 drm_encoder_cleanup(encoder);
722 kfree(tv_enc);
723}
724
725static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
726 .dpms = nv17_tv_dpms,
727 .save = nv17_tv_save,
728 .restore = nv17_tv_restore,
729 .mode_fixup = nv17_tv_mode_fixup,
730 .prepare = nv17_tv_prepare,
731 .commit = nv17_tv_commit,
732 .mode_set = nv17_tv_mode_set,
733 .detect = nv17_tv_detect,
734};
735
736static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
737 .get_modes = nv17_tv_get_modes,
738 .mode_valid = nv17_tv_mode_valid,
739 .create_resources = nv17_tv_create_resources,
740 .set_property = nv17_tv_set_property,
741};
742
743static struct drm_encoder_funcs nv17_tv_funcs = {
744 .destroy = nv17_tv_destroy,
745};
746
747int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
748{
749 struct drm_encoder *encoder;
750 struct nv17_tv_encoder *tv_enc = NULL;
751
752 tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
753 if (!tv_enc)
754 return -ENOMEM;
755
756 tv_enc->overscan = 50;
757 tv_enc->flicker = 50;
758 tv_enc->saturation = 50;
759 tv_enc->hue = 0;
760 tv_enc->tv_norm = TV_NORM_PAL;
761 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
762 tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
763 tv_enc->pin_mask = 0;
764
765 encoder = to_drm_encoder(&tv_enc->base);
766
767 tv_enc->base.dcb = entry;
768 tv_enc->base.or = ffs(entry->or) - 1;
769
770 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
771 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
772 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
773
774 encoder->possible_crtcs = entry->heads;
775 encoder->possible_clones = 0;
776
777 return 0;
778}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 000000000000..c00977cedabd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV17_TV_H__
28#define __NV17_TV_H__
29
30struct nv17_tv_state {
31 uint8_t tv_enc[0x40];
32
33 uint32_t hfilter[4][7];
34 uint32_t hfilter2[4][7];
35 uint32_t vfilter[4][7];
36
37 uint32_t ptv_200;
38 uint32_t ptv_204;
39 uint32_t ptv_208;
40 uint32_t ptv_20c;
41 uint32_t ptv_304;
42 uint32_t ptv_500;
43 uint32_t ptv_504;
44 uint32_t ptv_508;
45 uint32_t ptv_600;
46 uint32_t ptv_604;
47 uint32_t ptv_608;
48 uint32_t ptv_60c;
49 uint32_t ptv_610;
50 uint32_t ptv_614;
51};
52
53enum nv17_tv_norm{
54 TV_NORM_PAL,
55 TV_NORM_PAL_M,
56 TV_NORM_PAL_N,
57 TV_NORM_PAL_NC,
58 TV_NORM_NTSC_M,
59 TV_NORM_NTSC_J,
60 NUM_LD_TV_NORMS,
61 TV_NORM_HD480I = NUM_LD_TV_NORMS,
62 TV_NORM_HD480P,
63 TV_NORM_HD576I,
64 TV_NORM_HD576P,
65 TV_NORM_HD720P,
66 TV_NORM_HD1080I,
67 NUM_TV_NORMS
68};
69
70struct nv17_tv_encoder {
71 struct nouveau_encoder base;
72
73 struct nv17_tv_state state;
74 struct nv17_tv_state saved_state;
75
76 int overscan;
77 int flicker;
78 int saturation;
79 int hue;
80 enum nv17_tv_norm tv_norm;
81 int subconnector;
82 int select_subconnector;
83 uint32_t pin_mask;
84};
85#define to_tv_enc(x) container_of(nouveau_encoder(x), \
86 struct nv17_tv_encoder, base)
87
88extern char *nv17_tv_norm_names[NUM_TV_NORMS];
89
90extern struct nv17_tv_norm_params {
91 enum {
92 TV_ENC_MODE,
93 CTV_ENC_MODE,
94 } kind;
95
96 union {
97 struct {
98 int hdisplay;
99 int vdisplay;
100 int vrefresh; /* mHz */
101
102 uint8_t tv_enc[0x40];
103 } tv_enc_mode;
104
105 struct {
106 struct drm_display_mode mode;
107
108 uint32_t ctv_regs[38];
109 } ctv_enc_mode;
110 };
111
112} nv17_tv_norms[NUM_TV_NORMS];
113#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
114
115extern struct drm_display_mode nv17_tv_modes[];
116
117static inline int interpolate(int y0, int y1, int y2, int x)
118{
119 return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
120}
121
122void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
123void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
124void nv17_tv_update_properties(struct drm_encoder *encoder);
125void nv17_tv_update_rescaler(struct drm_encoder *encoder);
126void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
127
128/* TV hardware access functions */
129
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
131{
132 nv_wr32(dev, reg, val);
133}
134
135static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
136{
137 return nv_rd32(dev, reg);
138}
139
140static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
141{
142 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
143 nv_write_ptv(dev, NV_PTV_TV_DATA, val);
144}
145
146static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
147{
148 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
149 return nv_read_ptv(dev, NV_PTV_TV_DATA);
150}
151
152#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
153#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
154#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
155
156#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 000000000000..d64683d97e0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "nv17_tv.h"
34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL",
37 [TV_NORM_PAL_M] = "PAL-M",
38 [TV_NORM_PAL_N] = "PAL-N",
39 [TV_NORM_PAL_NC] = "PAL-Nc",
40 [TV_NORM_NTSC_M] = "NTSC-M",
41 [TV_NORM_NTSC_J] = "NTSC-J",
42 [TV_NORM_HD480I] = "hd480i",
43 [TV_NORM_HD480P] = "hd480p",
44 [TV_NORM_HD576I] = "hd576i",
45 [TV_NORM_HD576P] = "hd576p",
46 [TV_NORM_HD720P] = "hd720p",
47 [TV_NORM_HD1080I] = "hd1080i"
48};
49
50/* TV standard specific parameters */
51
52struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
53 [TV_NORM_PAL] = { TV_ENC_MODE, {
54 .tv_enc_mode = { 720, 576, 50000, {
55 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
56 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
57 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
58 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
59 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
60 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
61 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
62 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
63 } } } },
64
65 [TV_NORM_PAL_M] = { TV_ENC_MODE, {
66 .tv_enc_mode = { 720, 480, 59940, {
67 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
68 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
69 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
70 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
71 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
72 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
73 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
74 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
75 } } } },
76
77 [TV_NORM_PAL_N] = { TV_ENC_MODE, {
78 .tv_enc_mode = { 720, 576, 50000, {
79 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
80 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
81 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
82 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
83 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
84 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
85 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
86 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
87 } } } },
88
89 [TV_NORM_PAL_NC] = { TV_ENC_MODE, {
90 .tv_enc_mode = { 720, 576, 50000, {
91 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
92 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
93 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
94 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
95 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
96 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
97 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
98 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
99 } } } },
100
101 [TV_NORM_NTSC_M] = { TV_ENC_MODE, {
102 .tv_enc_mode = { 720, 480, 59940, {
103 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
104 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
105 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
106 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
107 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
108 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
109 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
110 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
111 } } } },
112
113 [TV_NORM_NTSC_J] = { TV_ENC_MODE, {
114 .tv_enc_mode = { 720, 480, 59940, {
115 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
116 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
117 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
118 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
119 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
120 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
121 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
122 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
123 } } } },
124
125 [TV_NORM_HD480I] = { TV_ENC_MODE, {
126 .tv_enc_mode = { 720, 480, 59940, {
127 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
128 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
129 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
130 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
131 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
132 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
133 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
134 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
135 } } } },
136
137 [TV_NORM_HD576I] = { TV_ENC_MODE, {
138 .tv_enc_mode = { 720, 576, 50000, {
139 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
140 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
141 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
142 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
143 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
144 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
145 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
146 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
147 } } } },
148
149
150 [TV_NORM_HD480P] = { CTV_ENC_MODE, {
151 .ctv_enc_mode = {
152 .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
153 720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
154 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
155 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
156 0x354003a, 0x40000, 0x6f0344, 0x18100000,
157 0x10160004, 0x10060005, 0x1006000c, 0x10060020,
158 0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
159 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
160 0x10000fff, 0x10000fff, 0x10000fff, 0x70,
161 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
162 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
163 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
164 } } } },
165
166 [TV_NORM_HD576P] = { CTV_ENC_MODE, {
167 .ctv_enc_mode = {
168 .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
169 720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
170 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
171 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
172 0x354003a, 0x40000, 0x6f0344, 0x18100000,
173 0x10060001, 0x10060009, 0x10060026, 0x10060027,
174 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
175 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
176 0x10000fff, 0x10000fff, 0x10000fff, 0x69,
177 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
178 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
179 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
180 } } } },
181
182 [TV_NORM_HD720P] = { CTV_ENC_MODE, {
183 .ctv_enc_mode = {
184 .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
185 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
186 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
187 .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
188 0x66b0021, 0x6004a, 0x1210626, 0x8170000,
189 0x70004, 0x70016, 0x70017, 0x40f0018,
190 0x702e8, 0x81702ed, 0xfff, 0xfff,
191 0xfff, 0xfff, 0xfff, 0xfff,
192 0xfff, 0xfff, 0xfff, 0x0,
193 0x2e40001, 0x58, 0x2e001e, 0x258012c,
194 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
195 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
196 } } } },
197
198 [TV_NORM_HD1080I] = { CTV_ENC_MODE, {
199 .ctv_enc_mode = {
200 .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
201 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
202 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
203 | DRM_MODE_FLAG_INTERLACE) },
204 .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
205 0x8940028, 0x60054, 0xe80870, 0xbf70000,
206 0xbc70004, 0x70005, 0x70012, 0x70013,
207 0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
208 0x1c70237, 0x70238, 0x70244, 0x70245,
209 0x40f0246, 0x70462, 0x1f70464, 0x0,
210 0x2e40001, 0x58, 0x2e001e, 0x258012c,
211 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
212 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
213 } } } }
214};
215
216/*
217 * The following is some guesswork on how the TV encoder flicker
218 * filter/rescaler works:
219 *
220 * It seems to use some sort of resampling filter, it is controlled
221 * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
222 * control the horizontal and vertical stage respectively, there is
223 * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
224 * but they seem to do nothing. A rough guess might be that they could
225 * be used to independently control the filtering of each interlaced
226 * field, but I don't know how they are enabled. The whole filtering
227 * process seems to be disabled with bits 26:27 of PTV_200, but we
228 * aren't doing that.
229 *
230 * The layout of both register sets is the same:
231 *
232 * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
233 * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
234 *
235 * Each coefficient is stored in bits [31],[15:9] in two's complement
236 * format. They seem to be some kind of weights used in a low-pass
237 * filter. Both A and B coefficients are applied to the 14 nearest
238 * samples on each side (Listed from nearest to furthermost. They
239 * roughly cover 2 framebuffer pixels on each side). They are
240 * probably multiplied with some more hardwired weights before being
241 * used: B-coefficients are applied the same on both sides,
242 * A-coefficients are inverted before being applied to the opposite
243 * side.
244 *
245 * After all the hassle, I got the following formula by empirical
246 * means...
247 */
248
249#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
250
251#define id1 (1LL << 8)
252#define id2 (1LL << 16)
253#define id3 (1LL << 24)
254#define id4 (1LL << 32)
255#define id5 (1LL << 48)
256
257static struct filter_params{
258 int64_t k1;
259 int64_t ki;
260 int64_t ki2;
261 int64_t ki3;
262 int64_t kr;
263 int64_t kir;
264 int64_t ki2r;
265 int64_t ki3r;
266 int64_t kf;
267 int64_t kif;
268 int64_t ki2f;
269 int64_t ki3f;
270 int64_t krf;
271 int64_t kirf;
272 int64_t ki2rf;
273 int64_t ki3rf;
274} fparams[2][4] = {
275 /* Horizontal filter parameters */
276 {
277 {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
278 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
279 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
280 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
281 {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
282 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
283 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
284 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
285 {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
286 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
287 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
288 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
289 {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
290 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
291 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
292 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
293 },
294
295 /* Vertical filter parameters */
296 {
297 {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
298 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
299 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
300 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
301 {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
302 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
303 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
304 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
305 {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
306 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
307 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
308 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
309 {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
310 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
311 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
312 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
313 }
314};
315
316static void tv_setup_filter(struct drm_encoder *encoder)
317{
318 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
319 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
320 struct drm_display_mode *mode = &encoder->crtc->mode;
321 uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
322 &tv_enc->state.vfilter};
323 int i, j, k;
324 int32_t overscan = calc_overscan(tv_enc->overscan);
325 int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
326 uint64_t rs[] = {mode->hdisplay * id3,
327 mode->vdisplay * id3};
328
329 do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
330 do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
331
332 for (k = 0; k < 2; k++) {
333 rs[k] = max((int64_t)rs[k], id2);
334
335 for (j = 0; j < 4; j++) {
336 struct filter_params *p = &fparams[k][j];
337
338 for (i = 0; i < 7; i++) {
339 int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
340 + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
341 + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
342 + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
343
344 (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
345 }
346 }
347 }
348}
349
350/* Hardware state saving/restoring */
351
352static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
353{
354 int i, j;
355 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
356
357 for (i = 0; i < 4; i++) {
358 for (j = 0; j < 7; j++)
359 regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
360 }
361}
362
363static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
364{
365 int i, j;
366 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
367
368 for (i = 0; i < 4; i++) {
369 for (j = 0; j < 7; j++)
370 nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
371 }
372}
373
374void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
375{
376 int i;
377
378 for (i = 0; i < 0x40; i++)
379 state->tv_enc[i] = nv_read_tv_enc(dev, i);
380
381 tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
382 tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
383 tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
384
385 nv_save_ptv(dev, state, 200);
386 nv_save_ptv(dev, state, 204);
387 nv_save_ptv(dev, state, 208);
388 nv_save_ptv(dev, state, 20c);
389 nv_save_ptv(dev, state, 304);
390 nv_save_ptv(dev, state, 500);
391 nv_save_ptv(dev, state, 504);
392 nv_save_ptv(dev, state, 508);
393 nv_save_ptv(dev, state, 600);
394 nv_save_ptv(dev, state, 604);
395 nv_save_ptv(dev, state, 608);
396 nv_save_ptv(dev, state, 60c);
397 nv_save_ptv(dev, state, 610);
398 nv_save_ptv(dev, state, 614);
399}
400
401void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
402{
403 int i;
404
405 for (i = 0; i < 0x40; i++)
406 nv_write_tv_enc(dev, i, state->tv_enc[i]);
407
408 tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
409 tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
410 tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
411
412 nv_load_ptv(dev, state, 200);
413 nv_load_ptv(dev, state, 204);
414 nv_load_ptv(dev, state, 208);
415 nv_load_ptv(dev, state, 20c);
416 nv_load_ptv(dev, state, 304);
417 nv_load_ptv(dev, state, 500);
418 nv_load_ptv(dev, state, 504);
419 nv_load_ptv(dev, state, 508);
420 nv_load_ptv(dev, state, 600);
421 nv_load_ptv(dev, state, 604);
422 nv_load_ptv(dev, state, 608);
423 nv_load_ptv(dev, state, 60c);
424 nv_load_ptv(dev, state, 610);
425 nv_load_ptv(dev, state, 614);
426
427 /* This is required for some settings to kick in. */
428 nv_write_tv_enc(dev, 0x3e, 1);
429 nv_write_tv_enc(dev, 0x3e, 0);
430}
431
432/* Timings similar to the ones the blob sets */
433
434struct drm_display_mode nv17_tv_modes[] = {
435 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
436 320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
437 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
438 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
439 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
440 320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
441 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
442 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
443 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
444 400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
445 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
446 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
447 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
448 640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
449 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
450 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
451 720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
452 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
453 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
454 720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
455 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
456 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
457 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
459 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
460 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
461 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
462 {}
463};
464
465void nv17_tv_update_properties(struct drm_encoder *encoder)
466{
467 struct drm_device *dev = encoder->dev;
468 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
469 struct nv17_tv_state *regs = &tv_enc->state;
470 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
471 int subconnector = tv_enc->select_subconnector ?
472 tv_enc->select_subconnector :
473 tv_enc->subconnector;
474
475 switch (subconnector) {
476 case DRM_MODE_SUBCONNECTOR_Composite:
477 {
478 regs->ptv_204 = 0x2;
479
480 /* The composite connector may be found on either pin. */
481 if (tv_enc->pin_mask & 0x4)
482 regs->ptv_204 |= 0x010000;
483 else if (tv_enc->pin_mask & 0x2)
484 regs->ptv_204 |= 0x100000;
485 else
486 regs->ptv_204 |= 0x110000;
487
488 regs->tv_enc[0x7] = 0x10;
489 break;
490 }
491 case DRM_MODE_SUBCONNECTOR_SVIDEO:
492 regs->ptv_204 = 0x11012;
493 regs->tv_enc[0x7] = 0x18;
494 break;
495
496 case DRM_MODE_SUBCONNECTOR_Component:
497 regs->ptv_204 = 0x111333;
498 regs->tv_enc[0x7] = 0x14;
499 break;
500
501 case DRM_MODE_SUBCONNECTOR_SCART:
502 regs->ptv_204 = 0x111012;
503 regs->tv_enc[0x7] = 0x18;
504 break;
505 }
506
507 regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
508 tv_enc->saturation);
509 regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
510 tv_enc->saturation);
511 regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
512
513 nv_load_ptv(dev, regs, 204);
514 nv_load_tv_enc(dev, regs, 7);
515 nv_load_tv_enc(dev, regs, 20);
516 nv_load_tv_enc(dev, regs, 22);
517 nv_load_tv_enc(dev, regs, 25);
518}
519
520void nv17_tv_update_rescaler(struct drm_encoder *encoder)
521{
522 struct drm_device *dev = encoder->dev;
523 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
524 struct nv17_tv_state *regs = &tv_enc->state;
525
526 regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
527
528 tv_setup_filter(encoder);
529
530 nv_load_ptv(dev, regs, 208);
531 tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
532 tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
533 tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
534}
535
536void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
537{
538 struct drm_device *dev = encoder->dev;
539 struct drm_nouveau_private *dev_priv = dev->dev_private;
540 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
541 int head = nouveau_crtc(encoder->crtc)->index;
542 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
543 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
544 struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
545 int overscan, hmargin, vmargin, hratio, vratio;
546
547 /* The rescaler doesn't do the right thing for interlaced modes. */
548 if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
549 overscan = 100;
550 else
551 overscan = tv_enc->overscan;
552
553 hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
554 vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
555
556 hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
557 overscan);
558 vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
559 overscan);
560
561 hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
562 vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
563
564 regs->fp_horiz_regs[FP_VALID_START] = hmargin;
565 regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
566 regs->fp_vert_regs[FP_VALID_START] = vmargin;
567 regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
568
569 regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
570 XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
571 NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
572 XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
573
574 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
575 regs->fp_horiz_regs[FP_VALID_START]);
576 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
577 regs->fp_horiz_regs[FP_VALID_END]);
578 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
579 regs->fp_vert_regs[FP_VALID_START]);
580 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
581 regs->fp_vert_regs[FP_VALID_END]);
582 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
583}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 000000000000..d6fc0a82f03d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,775 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6/*
7 * NV20
8 * -----
9 * There are 3 families :
10 * NV20 is 0x10de:0x020*
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
12 * NV2A is 0x10de:0x02A0
13 *
14 * NV30
15 * -----
16 * There are 3 families :
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
18 * NV34 is 0x10de:0x032*
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
20 *
21 * Not seen in the wild, no dumps (probably NV35) :
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
24 *
25 */
26
27#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4)
30
31#define NV30_31_GRCTX_SIZE (24392)
32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396)
34
35static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{
38 int i;
39
40 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
41 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
42 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
43 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
44 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
45 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
46 for (i = 0x04d4; i <= 0x04e0; i += 4)
47 nv_wo32(dev, ctx, i/4, 0x00030303);
48 for (i = 0x04f4; i <= 0x0500; i += 4)
49 nv_wo32(dev, ctx, i/4, 0x00080000);
50 for (i = 0x050c; i <= 0x0518; i += 4)
51 nv_wo32(dev, ctx, i/4, 0x01012000);
52 for (i = 0x051c; i <= 0x0528; i += 4)
53 nv_wo32(dev, ctx, i/4, 0x000105b8);
54 for (i = 0x052c; i <= 0x0538; i += 4)
55 nv_wo32(dev, ctx, i/4, 0x00080008);
56 for (i = 0x055c; i <= 0x0598; i += 4)
57 nv_wo32(dev, ctx, i/4, 0x07ff0000);
58 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
59 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
60 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
61 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
62 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
63 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
64 for (i = 0x1c1c; i <= 0x248c; i += 16) {
65 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
66 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
67 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
68 }
69 nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
70 nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
71 nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
72 nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
73 nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
74 nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
75 nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
76 nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
77 nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
78 nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
79 nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
80 nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
81 for (i = 0x355c; i <= 0x3578; i += 4)
82 nv_wo32(dev, ctx, i/4, 0x001c527c);
83}
84
85static void
86nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87{
88 int i;
89
90 nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
91 nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
92 nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
93 nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
94 nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
95 nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
96 nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
97 nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
98 nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
99 nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
100 for (i = 0x0510; i <= 0x051c; i += 4)
101 nv_wo32(dev, ctx, i/4, 0x00030303);
102 for (i = 0x0530; i <= 0x053c; i += 4)
103 nv_wo32(dev, ctx, i/4, 0x00080000);
104 for (i = 0x0548; i <= 0x0554; i += 4)
105 nv_wo32(dev, ctx, i/4, 0x01012000);
106 for (i = 0x0558; i <= 0x0564; i += 4)
107 nv_wo32(dev, ctx, i/4, 0x000105b8);
108 for (i = 0x0568; i <= 0x0574; i += 4)
109 nv_wo32(dev, ctx, i/4, 0x00080008);
110 for (i = 0x0598; i <= 0x05d4; i += 4)
111 nv_wo32(dev, ctx, i/4, 0x07ff0000);
112 nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
113 nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
114 nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
115 nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
116 nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
117 nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
118 nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
119 nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
120 nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
121 nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
122 nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
123 for (i = 0x1b04; i <= 0x2374; i += 16) {
124 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
125 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
126 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
127 }
128 nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
129 nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
130 nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
131 nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
132 nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
133 nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
134 nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
135 nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
136 nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
137 nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
138 nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
139 nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
140 for (i = 0x3484; i <= 0x34a0; i += 4)
141 nv_wo32(dev, ctx, i/4, 0x001c527c);
142}
143
144static void
145nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146{
147 int i;
148
149 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
150 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
151 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
152 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
153 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
154 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
155 for (i = 0x04d4; i <= 0x04e0; i += 4)
156 nv_wo32(dev, ctx, i/4, 0x00030303);
157 for (i = 0x04f4; i <= 0x0500; i += 4)
158 nv_wo32(dev, ctx, i/4, 0x00080000);
159 for (i = 0x050c; i <= 0x0518; i += 4)
160 nv_wo32(dev, ctx, i/4, 0x01012000);
161 for (i = 0x051c; i <= 0x0528; i += 4)
162 nv_wo32(dev, ctx, i/4, 0x000105b8);
163 for (i = 0x052c; i <= 0x0538; i += 4)
164 nv_wo32(dev, ctx, i/4, 0x00080008);
165 for (i = 0x055c; i <= 0x0598; i += 4)
166 nv_wo32(dev, ctx, i/4, 0x07ff0000);
167 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
168 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
169 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
170 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
171 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
172 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
173 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
174 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
175 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
176 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
177 }
178 nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
179 nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
180 nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
181 nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
182 nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
183 nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
184 nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
185 nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
186 nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
187 nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
188 nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
189 nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
190 for (i = 0x341c; i <= 0x3438; i += 4)
191 nv_wo32(dev, ctx, i/4, 0x001c527c);
192}
193
194static void
195nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196{
197 int i;
198
199 nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
200 nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
201 nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
202 nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
203 nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
204 nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
205 nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
206 nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
207 for (i = 0x04e0; i < 0x04e8; i += 4)
208 nv_wo32(dev, ctx, i/4, 0x0fff0000);
209 nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
210 for (i = 0x0508; i < 0x0548; i += 4)
211 nv_wo32(dev, ctx, i/4, 0x07ff0000);
212 nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
213 nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
214 nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
215 nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
216 nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
217 nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
218 nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
219 for (i = 0x0600; i < 0x0640; i += 4)
220 nv_wo32(dev, ctx, i/4, 0x00010588);
221 for (i = 0x0640; i < 0x0680; i += 4)
222 nv_wo32(dev, ctx, i/4, 0x00030303);
223 for (i = 0x06c0; i < 0x0700; i += 4)
224 nv_wo32(dev, ctx, i/4, 0x0008aae4);
225 for (i = 0x0700; i < 0x0740; i += 4)
226 nv_wo32(dev, ctx, i/4, 0x01012000);
227 for (i = 0x0740; i < 0x0780; i += 4)
228 nv_wo32(dev, ctx, i/4, 0x00080008);
229 nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
230 nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
231 for (i = 0x0864; i < 0x0874; i += 4)
232 nv_wo32(dev, ctx, i/4, 0x00040004);
233 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
234 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
235 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
236 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
237 }
238 for (i = 0x30b8; i < 0x30c8; i += 4)
239 nv_wo32(dev, ctx, i/4, 0x0000ffff);
240 nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
241 nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
242 nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
243 nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
244 nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
245 nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
246 nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
247 nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
248 nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
249 nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
250}
251
252static void
253nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254{
255 int i;
256
257 nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
258 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
259 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
260 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
261 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
262 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
263 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
264 nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
265 for (i = 0x04d4; i < 0x04dc; i += 4)
266 nv_wo32(dev, ctx, i/4, 0x0fff0000);
267 nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
268 for (i = 0x04fc; i < 0x053c; i += 4)
269 nv_wo32(dev, ctx, i/4, 0x07ff0000);
270 nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
271 nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
272 nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
273 nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
274 nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
275 nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
276 nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
277 for (i = 0x05f0; i < 0x0630; i += 4)
278 nv_wo32(dev, ctx, i/4, 0x00010588);
279 for (i = 0x0630; i < 0x0670; i += 4)
280 nv_wo32(dev, ctx, i/4, 0x00030303);
281 for (i = 0x06b0; i < 0x06f0; i += 4)
282 nv_wo32(dev, ctx, i/4, 0x0008aae4);
283 for (i = 0x06f0; i < 0x0730; i += 4)
284 nv_wo32(dev, ctx, i/4, 0x01012000);
285 for (i = 0x0730; i < 0x0770; i += 4)
286 nv_wo32(dev, ctx, i/4, 0x00080008);
287 nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
288 nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
289 for (i = 0x0858; i < 0x0868; i += 4)
290 nv_wo32(dev, ctx, i/4, 0x00040004);
291 for (i = 0x15ac; i <= 0x271c ; i += 16) {
292 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
293 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
294 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
295 }
296 for (i = 0x274c; i < 0x275c; i += 4)
297 nv_wo32(dev, ctx, i/4, 0x0000ffff);
298 nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
299 nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
300 nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
301 nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
302 nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
303 nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
304 nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
305 nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
306 nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
307 nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
308}
309
310static void
311nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312{
313 int i;
314
315 nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
316 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
317 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
318 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
319 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
320 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
321 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
322 nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
323 for (i = 0x04dc; i < 0x04e4; i += 4)
324 nv_wo32(dev, ctx, i/4, 0x0fff0000);
325 nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
326 for (i = 0x0504; i < 0x0544; i += 4)
327 nv_wo32(dev, ctx, i/4, 0x07ff0000);
328 nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
329 nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
330 nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
331 nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
332 nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
333 nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
334 nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
335 for (i = 0x0604; i < 0x0644; i += 4)
336 nv_wo32(dev, ctx, i/4, 0x00010588);
337 for (i = 0x0644; i < 0x0684; i += 4)
338 nv_wo32(dev, ctx, i/4, 0x00030303);
339 for (i = 0x06c4; i < 0x0704; i += 4)
340 nv_wo32(dev, ctx, i/4, 0x0008aae4);
341 for (i = 0x0704; i < 0x0744; i += 4)
342 nv_wo32(dev, ctx, i/4, 0x01012000);
343 for (i = 0x0744; i < 0x0784; i += 4)
344 nv_wo32(dev, ctx, i/4, 0x00080008);
345 nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
346 nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
347 for (i = 0x0868; i < 0x0878; i += 4)
348 nv_wo32(dev, ctx, i/4, 0x00040004);
349 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
350 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
351 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
352 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
353 }
354 for (i = 0x30bc; i < 0x30cc; i += 4)
355 nv_wo32(dev, ctx, i/4, 0x0000ffff);
356 nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
357 nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
358 nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
359 nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
360 nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
361 nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
362 nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
363 nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
364 nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
365 nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
366}
367
368int
369nv20_graph_create_context(struct nouveau_channel *chan)
370{
371 struct drm_device *dev = chan->dev;
372 struct drm_nouveau_private *dev_priv = dev->dev_private;
373 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
374 unsigned int ctx_size;
375 unsigned int idoffs = 0x28/4;
376 int ret;
377
378 switch (dev_priv->chipset) {
379 case 0x20:
380 ctx_size = NV20_GRCTX_SIZE;
381 ctx_init = nv20_graph_context_init;
382 idoffs = 0;
383 break;
384 case 0x25:
385 case 0x28:
386 ctx_size = NV25_GRCTX_SIZE;
387 ctx_init = nv25_graph_context_init;
388 break;
389 case 0x2a:
390 ctx_size = NV2A_GRCTX_SIZE;
391 ctx_init = nv2a_graph_context_init;
392 idoffs = 0;
393 break;
394 case 0x30:
395 case 0x31:
396 ctx_size = NV30_31_GRCTX_SIZE;
397 ctx_init = nv30_31_graph_context_init;
398 break;
399 case 0x34:
400 ctx_size = NV34_GRCTX_SIZE;
401 ctx_init = nv34_graph_context_init;
402 break;
403 case 0x35:
404 case 0x36:
405 ctx_size = NV35_36_GRCTX_SIZE;
406 ctx_init = nv35_36_graph_context_init;
407 break;
408 default:
409 ctx_size = 0;
410 ctx_init = nv35_36_graph_context_init;
411 NV_ERROR(dev, "Please contact the devs if you want your NV%x"
412 " card to work\n", dev_priv->chipset);
413 return -ENOSYS;
414 break;
415 }
416
417 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
418 NVOBJ_FLAG_ZERO_ALLOC,
419 &chan->ramin_grctx);
420 if (ret)
421 return ret;
422
423 /* Initialise default context values */
424 dev_priv->engine.instmem.prepare_access(dev, true);
425 ctx_init(dev, chan->ramin_grctx->gpuobj);
426
427 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
428 nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
429 (chan->id << 24) | 0x1); /* CTX_USER */
430
431 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
432 chan->ramin_grctx->instance >> 4);
433
434 dev_priv->engine.instmem.finish_access(dev);
435 return 0;
436}
437
438void
439nv20_graph_destroy_context(struct nouveau_channel *chan)
440{
441 struct drm_device *dev = chan->dev;
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443
444 if (chan->ramin_grctx)
445 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
446
447 dev_priv->engine.instmem.prepare_access(dev, true);
448 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
449 dev_priv->engine.instmem.finish_access(dev);
450}
451
452int
453nv20_graph_load_context(struct nouveau_channel *chan)
454{
455 struct drm_device *dev = chan->dev;
456 uint32_t inst;
457
458 if (!chan->ramin_grctx)
459 return -EINVAL;
460 inst = chan->ramin_grctx->instance >> 4;
461
462 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
463 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
464 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
465 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
466
467 nouveau_wait_for_idle(dev);
468 return 0;
469}
470
471int
472nv20_graph_unload_context(struct drm_device *dev)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
476 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
477 struct nouveau_channel *chan;
478 uint32_t inst, tmp;
479
480 chan = pgraph->channel(dev);
481 if (!chan)
482 return 0;
483 inst = chan->ramin_grctx->instance >> 4;
484
485 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
486 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
487 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
488
489 nouveau_wait_for_idle(dev);
490
491 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
492 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
493 tmp |= (pfifo->channels - 1) << 24;
494 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
495 return 0;
496}
497
498static void
499nv20_graph_rdi(struct drm_device *dev)
500{
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502 int i, writecount = 32;
503 uint32_t rdi_index = 0x2c80000;
504
505 if (dev_priv->chipset == 0x20) {
506 rdi_index = 0x3d0000;
507 writecount = 15;
508 }
509
510 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
511 for (i = 0; i < writecount; i++)
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
513
514 nouveau_wait_for_idle(dev);
515}
516
517void
518nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
519 uint32_t size, uint32_t pitch)
520{
521 uint32_t limit = max(1u, addr + size) - 1;
522
523 if (pitch)
524 addr |= 1;
525
526 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
527 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
528 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
529
530 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
531 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
532 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
533 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
534 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
535 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
536}
537
538int
539nv20_graph_init(struct drm_device *dev)
540{
541 struct drm_nouveau_private *dev_priv =
542 (struct drm_nouveau_private *)dev->dev_private;
543 uint32_t tmp, vramsz;
544 int ret, i;
545
546 nv_wr32(dev, NV03_PMC_ENABLE,
547 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
548 nv_wr32(dev, NV03_PMC_ENABLE,
549 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
550
551 if (!dev_priv->ctx_table) {
552 /* Create Context Pointer Table */
553 dev_priv->ctx_table_size = 32 * 4;
554 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
555 dev_priv->ctx_table_size, 16,
556 NVOBJ_FLAG_ZERO_ALLOC,
557 &dev_priv->ctx_table);
558 if (ret)
559 return ret;
560 }
561
562 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
563 dev_priv->ctx_table->instance >> 4);
564
565 nv20_graph_rdi(dev);
566
567 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
568 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
569
570 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
571 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
572 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
573 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
574 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
575 nv_wr32(dev, 0x40009C , 0x00000040);
576
577 if (dev_priv->chipset >= 0x25) {
578 nv_wr32(dev, 0x400890, 0x00080000);
579 nv_wr32(dev, 0x400610, 0x304B1FB6);
580 nv_wr32(dev, 0x400B80, 0x18B82880);
581 nv_wr32(dev, 0x400B84, 0x44000000);
582 nv_wr32(dev, 0x400098, 0x40000080);
583 nv_wr32(dev, 0x400B88, 0x000000ff);
584 } else {
585 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
586 nv_wr32(dev, 0x400094, 0x00000005);
587 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
588 nv_wr32(dev, 0x400B84, 0x24000000);
589 nv_wr32(dev, 0x400098, 0x00000040);
590 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
591 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
592 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
594 }
595
596 /* Turn all the tiling regions off. */
597 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
598 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
599
600 for (i = 0; i < 8; i++) {
601 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
602 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
603 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
604 nv_rd32(dev, 0x100300 + i * 4));
605 }
606 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
607 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
608 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
609
610 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
611 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
612
613 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
614 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
615 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
616 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
617
618 /* begin RAM config */
619 vramsz = drm_get_resource_len(dev, 0) - 1;
620 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
621 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
622 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
623 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
626 nv_wr32(dev, 0x400820, 0);
627 nv_wr32(dev, 0x400824, 0);
628 nv_wr32(dev, 0x400864, vramsz - 1);
629 nv_wr32(dev, 0x400868, vramsz - 1);
630
631 /* interesting.. the below overwrites some of the tile setup above.. */
632 nv_wr32(dev, 0x400B20, 0x00000000);
633 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
634
635 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
636 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
637 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
638 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
639
640 return 0;
641}
642
643void
644nv20_graph_takedown(struct drm_device *dev)
645{
646 struct drm_nouveau_private *dev_priv = dev->dev_private;
647
648 nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
649}
650
651int
652nv30_graph_init(struct drm_device *dev)
653{
654 struct drm_nouveau_private *dev_priv = dev->dev_private;
655 int ret, i;
656
657 nv_wr32(dev, NV03_PMC_ENABLE,
658 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
659 nv_wr32(dev, NV03_PMC_ENABLE,
660 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
661
662 if (!dev_priv->ctx_table) {
663 /* Create Context Pointer Table */
664 dev_priv->ctx_table_size = 32 * 4;
665 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
666 dev_priv->ctx_table_size, 16,
667 NVOBJ_FLAG_ZERO_ALLOC,
668 &dev_priv->ctx_table);
669 if (ret)
670 return ret;
671 }
672
673 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
674 dev_priv->ctx_table->instance >> 4);
675
676 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
677 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
678
679 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
680 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
681 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
682 nv_wr32(dev, 0x400890, 0x01b463ff);
683 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
684 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
685 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
686 nv_wr32(dev, 0x400B80, 0x1003d888);
687 nv_wr32(dev, 0x400B84, 0x0c000000);
688 nv_wr32(dev, 0x400098, 0x00000000);
689 nv_wr32(dev, 0x40009C, 0x0005ad00);
690 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
691 nv_wr32(dev, 0x4000a0, 0x00000000);
692 nv_wr32(dev, 0x4000a4, 0x00000008);
693 nv_wr32(dev, 0x4008a8, 0xb784a400);
694 nv_wr32(dev, 0x400ba0, 0x002f8685);
695 nv_wr32(dev, 0x400ba4, 0x00231f3f);
696 nv_wr32(dev, 0x4008a4, 0x40000020);
697
698 if (dev_priv->chipset == 0x34) {
699 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
700 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
701 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
702 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
703 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
704 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
705 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
706 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
707 }
708
709 nv_wr32(dev, 0x4000c0, 0x00000016);
710
711 /* Turn all the tiling regions off. */
712 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
713 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
714
715 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
716 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
717 nv_wr32(dev, 0x0040075c , 0x00000001);
718
719 /* begin RAM config */
720 /* vramsz = drm_get_resource_len(dev, 0) - 1; */
721 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
722 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
723 if (dev_priv->chipset != 0x34) {
724 nv_wr32(dev, 0x400750, 0x00EA0000);
725 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
726 nv_wr32(dev, 0x400750, 0x00EA0004);
727 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
728 }
729
730 return 0;
731}
732
733struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
734 { 0x0030, false, NULL }, /* null */
735 { 0x0039, false, NULL }, /* m2mf */
736 { 0x004a, false, NULL }, /* gdirect */
737 { 0x009f, false, NULL }, /* imageblit (nv12) */
738 { 0x008a, false, NULL }, /* ifc */
739 { 0x0089, false, NULL }, /* sifm */
740 { 0x0062, false, NULL }, /* surf2d */
741 { 0x0043, false, NULL }, /* rop */
742 { 0x0012, false, NULL }, /* beta1 */
743 { 0x0072, false, NULL }, /* beta4 */
744 { 0x0019, false, NULL }, /* cliprect */
745 { 0x0044, false, NULL }, /* pattern */
746 { 0x009e, false, NULL }, /* swzsurf */
747 { 0x0096, false, NULL }, /* celcius */
748 { 0x0097, false, NULL }, /* kelvin (nv20) */
749 { 0x0597, false, NULL }, /* kelvin (nv25) */
750 {}
751};
752
753struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
754 { 0x0030, false, NULL }, /* null */
755 { 0x0039, false, NULL }, /* m2mf */
756 { 0x004a, false, NULL }, /* gdirect */
757 { 0x009f, false, NULL }, /* imageblit (nv12) */
758 { 0x008a, false, NULL }, /* ifc */
759 { 0x038a, false, NULL }, /* ifc (nv30) */
760 { 0x0089, false, NULL }, /* sifm */
761 { 0x0389, false, NULL }, /* sifm (nv30) */
762 { 0x0062, false, NULL }, /* surf2d */
763 { 0x0362, false, NULL }, /* surf2d (nv30) */
764 { 0x0043, false, NULL }, /* rop */
765 { 0x0012, false, NULL }, /* beta1 */
766 { 0x0072, false, NULL }, /* beta4 */
767 { 0x0019, false, NULL }, /* cliprect */
768 { 0x0044, false, NULL }, /* pattern */
769 { 0x039e, false, NULL }, /* swzsurf */
770 { 0x0397, false, NULL }, /* rankine (nv30) */
771 { 0x0497, false, NULL }, /* rankine (nv35) */
772 { 0x0697, false, NULL }, /* rankine (nv34) */
773 {}
774};
775
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 000000000000..3cd07d8d5bd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,75 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch)
14 addr |= 1;
15
16 switch (dev_priv->chipset) {
17 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr);
21 break;
22
23 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr);
27 break;
28 }
29}
30
31int
32nv40_fb_init(struct drm_device *dev)
33{
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
36 uint32_t tmp;
37 int i;
38
39 /* This is strictly a NV4x register (don't know about NV5x). */
40 /* The blob sets these to all kinds of values, and they mess up our setup. */
41 /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
42 /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
43 /* Any idea what this is? */
44 nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
45
46 switch (dev_priv->chipset) {
47 case 0x40:
48 case 0x45:
49 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
50 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
51 pfb->num_tiles = NV10_PFB_TILE__SIZE;
52 break;
53 case 0x46: /* G72 */
54 case 0x47: /* G70 */
55 case 0x49: /* G71 */
56 case 0x4b: /* G73 */
57 case 0x4c: /* C51 (G7X version) */
58 pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
59 break;
60 default:
61 pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
62 break;
63 }
64
65 /* Turn all the tiling regions off. */
66 for (i = 0; i < pfb->num_tiles; i++)
67 pfb->set_region_tiling(dev, i, 0, 0, 0);
68
69 return 0;
70}
71
72void
73nv40_fb_takedown(struct drm_device *dev)
74{
75}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 000000000000..500ccfd3a0b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_drm.h"
30
31#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
32#define NV40_RAMFC__SIZE 128
33
34int
35nv40_fifo_create_context(struct nouveau_channel *chan)
36{
37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id);
40 unsigned long flags;
41 int ret;
42
43 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
44 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
45 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
46 if (ret)
47 return ret;
48
49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50
51 dev_priv->engine.instmem.prepare_access(dev, true);
52 nv_wi32(dev, fc + 0, chan->pushbuf_base);
53 nv_wi32(dev, fc + 4, chan->pushbuf_base);
54 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
55 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
56 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
57 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
58#ifdef __BIG_ENDIAN
59 NV_PFIFO_CACHE1_BIG_ENDIAN |
60#endif
61 0x30000000 /* no idea.. */);
62 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
63 nv_wi32(dev, fc + 60, 0x0001FFFF);
64 dev_priv->engine.instmem.finish_access(dev);
65
66 /* enable the fifo dma operation */
67 nv_wr32(dev, NV04_PFIFO_MODE,
68 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
69
70 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
71 return 0;
72}
73
74void
75nv40_fifo_destroy_context(struct nouveau_channel *chan)
76{
77 struct drm_device *dev = chan->dev;
78
79 nv_wr32(dev, NV04_PFIFO_MODE,
80 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
81
82 if (chan->ramfc)
83 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
84}
85
86static void
87nv40_fifo_do_load_context(struct drm_device *dev, int chid)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
91
92 dev_priv->engine.instmem.prepare_access(dev, false);
93
94 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
95 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
96 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
98 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
99 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
100
101 /* No idea what 0x2058 is.. */
102 tmp = nv_ri32(dev, fc + 24);
103 tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
104 tmp2 |= (tmp & 0x30000000);
105 nv_wr32(dev, 0x2058, tmp2);
106 tmp &= ~0x30000000;
107 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
108
109 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
110 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
111 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
112 tmp = nv_ri32(dev, fc + 40);
113 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
114 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
115 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
116 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
117 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
118
119 /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
120 tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
121 tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
122 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
123
124 nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
125 /* NVIDIA does this next line twice... */
126 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
127 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
128 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
129
130 dev_priv->engine.instmem.finish_access(dev);
131
132 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
133 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
134}
135
136int
137nv40_fifo_load_context(struct nouveau_channel *chan)
138{
139 struct drm_device *dev = chan->dev;
140 uint32_t tmp;
141
142 nv40_fifo_do_load_context(dev, chan->id);
143
144 /* Set channel active, and in DMA mode */
145 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
146 NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
147 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
148
149 /* Reset DMA_CTL_AT_INFO to INVALID */
150 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
151 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
152
153 return 0;
154}
155
156int
157nv40_fifo_unload_context(struct drm_device *dev)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
161 uint32_t fc, tmp;
162 int chid;
163
164 chid = pfifo->channel_id(dev);
165 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
166 return 0;
167 fc = NV40_RAMFC(chid);
168
169 dev_priv->engine.instmem.prepare_access(dev, true);
170 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
171 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
172 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
173 nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
174 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
175 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
176 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
177 tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
178 nv_wi32(dev, fc + 24, tmp);
179 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
180 nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
181 nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
182 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
183 nv_wi32(dev, fc + 40, tmp);
184 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
185 nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
186 /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
187 * more involved depending on the value of 0x3228?
188 */
189 nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
190 nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
191 nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
192 /* No idea what the below is for exactly, ripped from a mmio-trace */
193 nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
194 /* NVIDIA do this next line twice.. bug? */
195 nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
196 nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
197 nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
198#if 0 /* no real idea which is PUT/GET in UNK_48.. */
199 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
200 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
201 nv_wi32(dev, fc + 72, tmp);
202#endif
203 dev_priv->engine.instmem.finish_access(dev);
204
205 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
206 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
207 NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
208 return 0;
209}
210
211static void
212nv40_fifo_init_reset(struct drm_device *dev)
213{
214 int i;
215
216 nv_wr32(dev, NV03_PMC_ENABLE,
217 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
218 nv_wr32(dev, NV03_PMC_ENABLE,
219 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
220
221 nv_wr32(dev, 0x003224, 0x000f0078);
222 nv_wr32(dev, 0x003210, 0x00000000);
223 nv_wr32(dev, 0x003270, 0x00000000);
224 nv_wr32(dev, 0x003240, 0x00000000);
225 nv_wr32(dev, 0x003244, 0x00000000);
226 nv_wr32(dev, 0x003258, 0x00000000);
227 nv_wr32(dev, 0x002504, 0x00000000);
228 for (i = 0; i < 16; i++)
229 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
230 nv_wr32(dev, 0x00250c, 0x0000ffff);
231 nv_wr32(dev, 0x002048, 0x00000000);
232 nv_wr32(dev, 0x003228, 0x00000000);
233 nv_wr32(dev, 0x0032e8, 0x00000000);
234 nv_wr32(dev, 0x002410, 0x00000000);
235 nv_wr32(dev, 0x002420, 0x00000000);
236 nv_wr32(dev, 0x002058, 0x00000001);
237 nv_wr32(dev, 0x00221c, 0x00000000);
238 /* something with 0x2084, read/modify/write, no change */
239 nv_wr32(dev, 0x002040, 0x000000ff);
240 nv_wr32(dev, 0x002500, 0x00000000);
241 nv_wr32(dev, 0x003200, 0x00000000);
242
243 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
244}
245
246static void
247nv40_fifo_init_ramxx(struct drm_device *dev)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250
251 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
252 ((dev_priv->ramht_bits - 9) << 16) |
253 (dev_priv->ramht_offset >> 8));
254 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
255
256 switch (dev_priv->chipset) {
257 case 0x47:
258 case 0x49:
259 case 0x4b:
260 nv_wr32(dev, 0x2230, 1);
261 break;
262 default:
263 break;
264 }
265
266 switch (dev_priv->chipset) {
267 case 0x40:
268 case 0x41:
269 case 0x42:
270 case 0x43:
271 case 0x45:
272 case 0x47:
273 case 0x48:
274 case 0x49:
275 case 0x4b:
276 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
277 break;
278 default:
279 nv_wr32(dev, 0x2230, 0);
280 nv_wr32(dev, NV40_PFIFO_RAMFC,
281 ((dev_priv->vram_size - 512 * 1024 +
282 dev_priv->ramfc_offset) >> 16) | (3 << 16));
283 break;
284 }
285}
286
287static void
288nv40_fifo_init_intr(struct drm_device *dev)
289{
290 nv_wr32(dev, 0x002100, 0xffffffff);
291 nv_wr32(dev, 0x002140, 0xffffffff);
292}
293
294int
295nv40_fifo_init(struct drm_device *dev)
296{
297 struct drm_nouveau_private *dev_priv = dev->dev_private;
298 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
299 int i;
300
301 nv40_fifo_init_reset(dev);
302 nv40_fifo_init_ramxx(dev);
303
304 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
305 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
306
307 nv40_fifo_init_intr(dev);
308 pfifo->enable(dev);
309 pfifo->reassign(dev, true);
310
311 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
312 if (dev_priv->fifos[i]) {
313 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
314 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
315 }
316 }
317
318 return 0;
319}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 000000000000..0616c96e4b67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,427 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_grctx.h"
31
32struct nouveau_channel *
33nv40_graph_channel(struct drm_device *dev)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 uint32_t inst;
37 int i;
38
39 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
40 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
41 return NULL;
42 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
43
44 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
45 struct nouveau_channel *chan = dev_priv->fifos[i];
46
47 if (chan && chan->ramin_grctx &&
48 chan->ramin_grctx->instance == inst)
49 return chan;
50 }
51
52 return NULL;
53}
54
55int
56nv40_graph_create_context(struct nouveau_channel *chan)
57{
58 struct drm_device *dev = chan->dev;
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
61 int ret;
62
63 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
64 16, NVOBJ_FLAG_ZERO_ALLOC,
65 &chan->ramin_grctx);
66 if (ret)
67 return ret;
68
69 /* Initialise default context values */
70 dev_priv->engine.instmem.prepare_access(dev, true);
71 if (!pgraph->ctxprog) {
72 struct nouveau_grctx ctx = {};
73
74 ctx.dev = chan->dev;
75 ctx.mode = NOUVEAU_GRCTX_VALS;
76 ctx.data = chan->ramin_grctx->gpuobj;
77 nv40_grctx_init(&ctx);
78 } else {
79 nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
80 }
81 nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
82 chan->ramin_grctx->gpuobj->im_pramin->start);
83 dev_priv->engine.instmem.finish_access(dev);
84 return 0;
85}
86
87void
88nv40_graph_destroy_context(struct nouveau_channel *chan)
89{
90 nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
91}
92
93static int
94nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
95{
96 uint32_t old_cp, tv = 1000, tmp;
97 int i;
98
99 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
100 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
101
102 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
103 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
104 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
105 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
106
107 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
108 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
109 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
110
111 nouveau_wait_for_idle(dev);
112
113 for (i = 0; i < tv; i++) {
114 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
115 break;
116 }
117
118 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
119
120 if (i == tv) {
121 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
122 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
123 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
124 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
125 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
126 NV_ERROR(dev, "0x40030C = 0x%08x\n",
127 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
128 return -EBUSY;
129 }
130
131 return 0;
132}
133
134/* Restore the context for a specific channel into PGRAPH */
135int
136nv40_graph_load_context(struct nouveau_channel *chan)
137{
138 struct drm_device *dev = chan->dev;
139 uint32_t inst;
140 int ret;
141
142 if (!chan->ramin_grctx)
143 return -EINVAL;
144 inst = chan->ramin_grctx->instance >> 4;
145
146 ret = nv40_graph_transfer_context(dev, inst, 0);
147 if (ret)
148 return ret;
149
150 /* 0x40032C, no idea of it's exact function. Could simply be a
151 * record of the currently active PGRAPH context. It's currently
152 * unknown as to what bit 24 does. The nv ddx has it set, so we will
153 * set it here too.
154 */
155 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
156 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
157 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
158 NV40_PGRAPH_CTXCTL_CUR_LOADED);
159 /* 0x32E0 records the instance address of the active FIFO's PGRAPH
160 * context. If at any time this doesn't match 0x40032C, you will
161 * recieve PGRAPH_INTR_CONTEXT_SWITCH
162 */
163 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
164 return 0;
165}
166
167int
168nv40_graph_unload_context(struct drm_device *dev)
169{
170 uint32_t inst;
171 int ret;
172
173 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
174 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
175 return 0;
176 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
177
178 ret = nv40_graph_transfer_context(dev, inst, 1);
179
180 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
181 return ret;
182}
183
184void
185nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
186 uint32_t size, uint32_t pitch)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 uint32_t limit = max(1u, addr + size) - 1;
190
191 if (pitch)
192 addr |= 1;
193
194 switch (dev_priv->chipset) {
195 case 0x44:
196 case 0x4a:
197 case 0x4e:
198 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
199 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
200 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
201 break;
202
203 case 0x46:
204 case 0x47:
205 case 0x49:
206 case 0x4b:
207 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
208 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
209 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
210 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
211 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
212 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
213 break;
214
215 default:
216 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
217 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
218 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
219 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
220 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
221 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
222 break;
223 }
224}
225
226/*
227 * G70 0x47
228 * G71 0x49
229 * NV45 0x48
230 * G72[M] 0x46
231 * G73 0x4b
232 * C51_G7X 0x4c
233 * C51 0x4e
234 */
235int
236nv40_graph_init(struct drm_device *dev)
237{
238 struct drm_nouveau_private *dev_priv =
239 (struct drm_nouveau_private *)dev->dev_private;
240 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
241 uint32_t vramsz;
242 int i, j;
243
244 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
245 ~NV_PMC_ENABLE_PGRAPH);
246 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
247 NV_PMC_ENABLE_PGRAPH);
248
249 if (nouveau_ctxfw) {
250 nouveau_grctx_prog_load(dev);
251 dev_priv->engine.graph.grctx_size = 175 * 1024;
252 }
253
254 if (!dev_priv->engine.graph.ctxprog) {
255 struct nouveau_grctx ctx = {};
256 uint32_t cp[256];
257
258 ctx.dev = dev;
259 ctx.mode = NOUVEAU_GRCTX_PROG;
260 ctx.data = cp;
261 ctx.ctxprog_max = 256;
262 nv40_grctx_init(&ctx);
263 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
264
265 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
266 for (i = 0; i < ctx.ctxprog_len; i++)
267 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
268 }
269
270 /* No context present currently */
271 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
272
273 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
274 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
275
276 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
277 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
278 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
279 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
280 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
281 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
282
283 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
284 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
285
286 j = nv_rd32(dev, 0x1540) & 0xff;
287 if (j) {
288 for (i = 0; !(j & 1); j >>= 1, i++)
289 ;
290 nv_wr32(dev, 0x405000, i);
291 }
292
293 if (dev_priv->chipset == 0x40) {
294 nv_wr32(dev, 0x4009b0, 0x83280fff);
295 nv_wr32(dev, 0x4009b4, 0x000000a0);
296 } else {
297 nv_wr32(dev, 0x400820, 0x83280eff);
298 nv_wr32(dev, 0x400824, 0x000000a0);
299 }
300
301 switch (dev_priv->chipset) {
302 case 0x40:
303 case 0x45:
304 nv_wr32(dev, 0x4009b8, 0x0078e366);
305 nv_wr32(dev, 0x4009bc, 0x0000014c);
306 break;
307 case 0x41:
308 case 0x42: /* pciid also 0x00Cx */
309 /* case 0x0120: XXX (pciid) */
310 nv_wr32(dev, 0x400828, 0x007596ff);
311 nv_wr32(dev, 0x40082c, 0x00000108);
312 break;
313 case 0x43:
314 nv_wr32(dev, 0x400828, 0x0072cb77);
315 nv_wr32(dev, 0x40082c, 0x00000108);
316 break;
317 case 0x44:
318 case 0x46: /* G72 */
319 case 0x4a:
320 case 0x4c: /* G7x-based C51 */
321 case 0x4e:
322 nv_wr32(dev, 0x400860, 0);
323 nv_wr32(dev, 0x400864, 0);
324 break;
325 case 0x47: /* G70 */
326 case 0x49: /* G71 */
327 case 0x4b: /* G73 */
328 nv_wr32(dev, 0x400828, 0x07830610);
329 nv_wr32(dev, 0x40082c, 0x0000016A);
330 break;
331 default:
332 break;
333 }
334
335 nv_wr32(dev, 0x400b38, 0x2ffff800);
336 nv_wr32(dev, 0x400b3c, 0x00006000);
337
338 /* Tiling related stuff. */
339 switch (dev_priv->chipset) {
340 case 0x44:
341 case 0x4a:
342 nv_wr32(dev, 0x400bc4, 0x1003d888);
343 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
344 break;
345 case 0x46:
346 nv_wr32(dev, 0x400bc4, 0x0000e024);
347 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
348 break;
349 case 0x4c:
350 case 0x4e:
351 case 0x67:
352 nv_wr32(dev, 0x400bc4, 0x1003d888);
353 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
354 break;
355 default:
356 break;
357 }
358
359 /* Turn all the tiling regions off. */
360 for (i = 0; i < pfb->num_tiles; i++)
361 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
362
363 /* begin RAM config */
364 vramsz = drm_get_resource_len(dev, 0) - 1;
365 switch (dev_priv->chipset) {
366 case 0x40:
367 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
368 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
369 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
370 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
371 nv_wr32(dev, 0x400820, 0);
372 nv_wr32(dev, 0x400824, 0);
373 nv_wr32(dev, 0x400864, vramsz);
374 nv_wr32(dev, 0x400868, vramsz);
375 break;
376 default:
377 switch (dev_priv->chipset) {
378 case 0x46:
379 case 0x47:
380 case 0x49:
381 case 0x4b:
382 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
383 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
384 break;
385 default:
386 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
387 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
388 break;
389 }
390 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
391 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
392 nv_wr32(dev, 0x400840, 0);
393 nv_wr32(dev, 0x400844, 0);
394 nv_wr32(dev, 0x4008A0, vramsz);
395 nv_wr32(dev, 0x4008A4, vramsz);
396 break;
397 }
398
399 return 0;
400}
401
402void nv40_graph_takedown(struct drm_device *dev)
403{
404 nouveau_grctx_fini(dev);
405}
406
407struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
408 { 0x0030, false, NULL }, /* null */
409 { 0x0039, false, NULL }, /* m2mf */
410 { 0x004a, false, NULL }, /* gdirect */
411 { 0x009f, false, NULL }, /* imageblit (nv12) */
412 { 0x008a, false, NULL }, /* ifc */
413 { 0x0089, false, NULL }, /* sifm */
414 { 0x3089, false, NULL }, /* sifm (nv40) */
415 { 0x0062, false, NULL }, /* surf2d */
416 { 0x3062, false, NULL }, /* surf2d (nv40) */
417 { 0x0043, false, NULL }, /* rop */
418 { 0x0012, false, NULL }, /* beta1 */
419 { 0x0072, false, NULL }, /* beta4 */
420 { 0x0019, false, NULL }, /* cliprect */
421 { 0x0044, false, NULL }, /* pattern */
422 { 0x309e, false, NULL }, /* swzsurf */
423 { 0x4097, false, NULL }, /* curie (nv40) */
424 { 0x4497, false, NULL }, /* curie (nv44) */
425 {}
426};
427
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
new file mode 100644
index 000000000000..11b11c31f543
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -0,0 +1,678 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25/* NVIDIA context programs handle a number of other conditions which are
26 * not implemented in our versions. It's not clear why NVIDIA context
27 * programs have this code, nor whether it's strictly necessary for
28 * correct operation. We'll implement additional handling if/when we
29 * discover it's necessary.
30 *
31 * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
32 * flag is set, this gets saved into the context.
33 * - On context save, the context program for all cards load nsource
34 * into a flag register and check for ILLEGAL_MTHD. If it's set,
35 * opcode 0x60000d is called before resuming normal operation.
36 * - Some context programs check more conditions than the above. NV44
37 * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
38 * and calls 0x60000d before resuming normal operation.
39 * - At the very beginning of NVIDIA's context programs, flag 9 is checked
40 * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
41 * and then the ctxprog is aborted. It looks like a complicated NOP,
42 * its purpose is unknown.
43 * - In the section of code that loads the per-vs state, NVIDIA check
44 * flag 10. If it's set, they only transfer the small 0x300 byte block
45 * of state + the state for a single vs as opposed to the state for
46 * all vs units. It doesn't seem likely that it'll occur in normal
47 * operation, especially seeing as it appears NVIDIA may have screwed
48 * up the ctxprogs for some cards and have an invalid instruction
49 * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
50 * - There's a number of places where context offset 0 (where we place
51 * the PRAMIN offset of the context) is loaded into either 0x408000,
52 * 0x408004 or 0x408008. Not sure what's up there either.
53 * - The ctxprogs for some cards save 0x400a00 again during the cleanup
54 * path for auto-loadctx.
55 */
56
57#define CP_FLAG_CLEAR 0
58#define CP_FLAG_SET 1
59#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
60#define CP_FLAG_SWAP_DIRECTION_LOAD 0
61#define CP_FLAG_SWAP_DIRECTION_SAVE 1
62#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
63#define CP_FLAG_USER_SAVE_NOT_PENDING 0
64#define CP_FLAG_USER_SAVE_PENDING 1
65#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
66#define CP_FLAG_USER_LOAD_NOT_PENDING 0
67#define CP_FLAG_USER_LOAD_PENDING 1
68#define CP_FLAG_STATUS ((3 * 32) + 0)
69#define CP_FLAG_STATUS_IDLE 0
70#define CP_FLAG_STATUS_BUSY 1
71#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
72#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
73#define CP_FLAG_AUTO_SAVE_PENDING 1
74#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
75#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
76#define CP_FLAG_AUTO_LOAD_PENDING 1
77#define CP_FLAG_UNK54 ((3 * 32) + 6)
78#define CP_FLAG_UNK54_CLEAR 0
79#define CP_FLAG_UNK54_SET 1
80#define CP_FLAG_ALWAYS ((3 * 32) + 8)
81#define CP_FLAG_ALWAYS_FALSE 0
82#define CP_FLAG_ALWAYS_TRUE 1
83#define CP_FLAG_UNK57 ((3 * 32) + 9)
84#define CP_FLAG_UNK57_CLEAR 0
85#define CP_FLAG_UNK57_SET 1
86
87#define CP_CTX 0x00100000
88#define CP_CTX_COUNT 0x000fc000
89#define CP_CTX_COUNT_SHIFT 14
90#define CP_CTX_REG 0x00003fff
91#define CP_LOAD_SR 0x00200000
92#define CP_LOAD_SR_VALUE 0x000fffff
93#define CP_BRA 0x00400000
94#define CP_BRA_IP 0x0000ff00
95#define CP_BRA_IP_SHIFT 8
96#define CP_BRA_IF_CLEAR 0x00000080
97#define CP_BRA_FLAG 0x0000007f
98#define CP_WAIT 0x00500000
99#define CP_WAIT_SET 0x00000080
100#define CP_WAIT_FLAG 0x0000007f
101#define CP_SET 0x00700000
102#define CP_SET_1 0x00000080
103#define CP_SET_FLAG 0x0000007f
104#define CP_NEXT_TO_SWAP 0x00600007
105#define CP_NEXT_TO_CURRENT 0x00600009
106#define CP_SET_CONTEXT_POINTER 0x0060000a
107#define CP_END 0x0060000e
108#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
109#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
110#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
111
112#include "drmP.h"
113#include "nouveau_drv.h"
114#include "nouveau_grctx.h"
115
116/* TODO:
117 * - get vs count from 0x1540
118 * - document unimplemented bits compared to nvidia
119 * - nsource handling
120 * - R0 & 0x0200 handling
121 * - single-vs handling
122 * - 400314 bit 0
123 */
124
125static int
126nv40_graph_4097(struct drm_device *dev)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 if ((dev_priv->chipset & 0xf0) == 0x60)
131 return 0;
132
133 return !!(0x0baf & (1 << dev_priv->chipset));
134}
135
136static int
137nv40_graph_vs_count(struct drm_device *dev)
138{
139 struct drm_nouveau_private *dev_priv = dev->dev_private;
140
141 switch (dev_priv->chipset) {
142 case 0x47:
143 case 0x49:
144 case 0x4b:
145 return 8;
146 case 0x40:
147 return 6;
148 case 0x41:
149 case 0x42:
150 return 5;
151 case 0x43:
152 case 0x44:
153 case 0x46:
154 case 0x4a:
155 return 3;
156 case 0x4c:
157 case 0x4e:
158 case 0x67:
159 default:
160 return 1;
161 }
162}
163
164
165enum cp_label {
166 cp_check_load = 1,
167 cp_setup_auto_load,
168 cp_setup_load,
169 cp_setup_save,
170 cp_swap_state,
171 cp_swap_state3d_3_is_save,
172 cp_prepare_exit,
173 cp_exit,
174};
175
176static void
177nv40_graph_construct_general(struct nouveau_grctx *ctx)
178{
179 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
180 int i;
181
182 cp_ctx(ctx, 0x4000a4, 1);
183 gr_def(ctx, 0x4000a4, 0x00000008);
184 cp_ctx(ctx, 0x400144, 58);
185 gr_def(ctx, 0x400144, 0x00000001);
186 cp_ctx(ctx, 0x400314, 1);
187 gr_def(ctx, 0x400314, 0x00000000);
188 cp_ctx(ctx, 0x400400, 10);
189 cp_ctx(ctx, 0x400480, 10);
190 cp_ctx(ctx, 0x400500, 19);
191 gr_def(ctx, 0x400514, 0x00040000);
192 gr_def(ctx, 0x400524, 0x55555555);
193 gr_def(ctx, 0x400528, 0x55555555);
194 gr_def(ctx, 0x40052c, 0x55555555);
195 gr_def(ctx, 0x400530, 0x55555555);
196 cp_ctx(ctx, 0x400560, 6);
197 gr_def(ctx, 0x400568, 0x0000ffff);
198 gr_def(ctx, 0x40056c, 0x0000ffff);
199 cp_ctx(ctx, 0x40057c, 5);
200 cp_ctx(ctx, 0x400710, 3);
201 gr_def(ctx, 0x400710, 0x20010001);
202 gr_def(ctx, 0x400714, 0x0f73ef00);
203 cp_ctx(ctx, 0x400724, 1);
204 gr_def(ctx, 0x400724, 0x02008821);
205 cp_ctx(ctx, 0x400770, 3);
206 if (dev_priv->chipset == 0x40) {
207 cp_ctx(ctx, 0x400814, 4);
208 cp_ctx(ctx, 0x400828, 5);
209 cp_ctx(ctx, 0x400840, 5);
210 gr_def(ctx, 0x400850, 0x00000040);
211 cp_ctx(ctx, 0x400858, 4);
212 gr_def(ctx, 0x400858, 0x00000040);
213 gr_def(ctx, 0x40085c, 0x00000040);
214 gr_def(ctx, 0x400864, 0x80000000);
215 cp_ctx(ctx, 0x40086c, 9);
216 gr_def(ctx, 0x40086c, 0x80000000);
217 gr_def(ctx, 0x400870, 0x80000000);
218 gr_def(ctx, 0x400874, 0x80000000);
219 gr_def(ctx, 0x400878, 0x80000000);
220 gr_def(ctx, 0x400888, 0x00000040);
221 gr_def(ctx, 0x40088c, 0x80000000);
222 cp_ctx(ctx, 0x4009c0, 8);
223 gr_def(ctx, 0x4009cc, 0x80000000);
224 gr_def(ctx, 0x4009dc, 0x80000000);
225 } else {
226 cp_ctx(ctx, 0x400840, 20);
227 if (!nv40_graph_4097(ctx->dev)) {
228 for (i = 0; i < 8; i++)
229 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
230 }
231 gr_def(ctx, 0x400880, 0x00000040);
232 gr_def(ctx, 0x400884, 0x00000040);
233 gr_def(ctx, 0x400888, 0x00000040);
234 cp_ctx(ctx, 0x400894, 11);
235 gr_def(ctx, 0x400894, 0x00000040);
236 if (nv40_graph_4097(ctx->dev)) {
237 for (i = 0; i < 8; i++)
238 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
239 }
240 cp_ctx(ctx, 0x4008e0, 2);
241 cp_ctx(ctx, 0x4008f8, 2);
242 if (dev_priv->chipset == 0x4c ||
243 (dev_priv->chipset & 0xf0) == 0x60)
244 cp_ctx(ctx, 0x4009f8, 1);
245 }
246 cp_ctx(ctx, 0x400a00, 73);
247 gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
248 cp_ctx(ctx, 0x401000, 4);
249 cp_ctx(ctx, 0x405004, 1);
250 switch (dev_priv->chipset) {
251 case 0x47:
252 case 0x49:
253 case 0x4b:
254 cp_ctx(ctx, 0x403448, 1);
255 gr_def(ctx, 0x403448, 0x00001010);
256 break;
257 default:
258 cp_ctx(ctx, 0x403440, 1);
259 switch (dev_priv->chipset) {
260 case 0x40:
261 gr_def(ctx, 0x403440, 0x00000010);
262 break;
263 case 0x44:
264 case 0x46:
265 case 0x4a:
266 gr_def(ctx, 0x403440, 0x00003010);
267 break;
268 case 0x41:
269 case 0x42:
270 case 0x43:
271 case 0x4c:
272 case 0x4e:
273 case 0x67:
274 default:
275 gr_def(ctx, 0x403440, 0x00001010);
276 break;
277 }
278 break;
279 }
280}
281
282static void
283nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
284{
285 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
286 int i;
287
288 if (dev_priv->chipset == 0x40) {
289 cp_ctx(ctx, 0x401880, 51);
290 gr_def(ctx, 0x401940, 0x00000100);
291 } else
292 if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
293 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
294 cp_ctx(ctx, 0x401880, 32);
295 for (i = 0; i < 16; i++)
296 gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
297 if (dev_priv->chipset == 0x46)
298 cp_ctx(ctx, 0x401900, 16);
299 cp_ctx(ctx, 0x401940, 3);
300 }
301 cp_ctx(ctx, 0x40194c, 18);
302 gr_def(ctx, 0x401954, 0x00000111);
303 gr_def(ctx, 0x401958, 0x00080060);
304 gr_def(ctx, 0x401974, 0x00000080);
305 gr_def(ctx, 0x401978, 0xffff0000);
306 gr_def(ctx, 0x40197c, 0x00000001);
307 gr_def(ctx, 0x401990, 0x46400000);
308 if (dev_priv->chipset == 0x40) {
309 cp_ctx(ctx, 0x4019a0, 2);
310 cp_ctx(ctx, 0x4019ac, 5);
311 } else {
312 cp_ctx(ctx, 0x4019a0, 1);
313 cp_ctx(ctx, 0x4019b4, 3);
314 }
315 gr_def(ctx, 0x4019bc, 0xffff0000);
316 switch (dev_priv->chipset) {
317 case 0x46:
318 case 0x47:
319 case 0x49:
320 case 0x4b:
321 cp_ctx(ctx, 0x4019c0, 18);
322 for (i = 0; i < 16; i++)
323 gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
324 break;
325 }
326 cp_ctx(ctx, 0x401a08, 8);
327 gr_def(ctx, 0x401a10, 0x0fff0000);
328 gr_def(ctx, 0x401a14, 0x0fff0000);
329 gr_def(ctx, 0x401a1c, 0x00011100);
330 cp_ctx(ctx, 0x401a2c, 4);
331 cp_ctx(ctx, 0x401a44, 26);
332 for (i = 0; i < 16; i++)
333 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
334 gr_def(ctx, 0x401a8c, 0x4b7fffff);
335 if (dev_priv->chipset == 0x40) {
336 cp_ctx(ctx, 0x401ab8, 3);
337 } else {
338 cp_ctx(ctx, 0x401ab8, 1);
339 cp_ctx(ctx, 0x401ac0, 1);
340 }
341 cp_ctx(ctx, 0x401ad0, 8);
342 gr_def(ctx, 0x401ad0, 0x30201000);
343 gr_def(ctx, 0x401ad4, 0x70605040);
344 gr_def(ctx, 0x401ad8, 0xb8a89888);
345 gr_def(ctx, 0x401adc, 0xf8e8d8c8);
346 cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
347 gr_def(ctx, 0x401b10, 0x40100000);
348 cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
349 gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
350 0x00000004 : 0x00000000);
351 cp_ctx(ctx, 0x401b30, 25);
352 gr_def(ctx, 0x401b34, 0x0000ffff);
353 gr_def(ctx, 0x401b68, 0x435185d6);
354 gr_def(ctx, 0x401b6c, 0x2155b699);
355 gr_def(ctx, 0x401b70, 0xfedcba98);
356 gr_def(ctx, 0x401b74, 0x00000098);
357 gr_def(ctx, 0x401b84, 0xffffffff);
358 gr_def(ctx, 0x401b88, 0x00ff7000);
359 gr_def(ctx, 0x401b8c, 0x0000ffff);
360 if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
361 dev_priv->chipset != 0x4e)
362 cp_ctx(ctx, 0x401b94, 1);
363 cp_ctx(ctx, 0x401b98, 8);
364 gr_def(ctx, 0x401b9c, 0x00ff0000);
365 cp_ctx(ctx, 0x401bc0, 9);
366 gr_def(ctx, 0x401be0, 0x00ffff00);
367 cp_ctx(ctx, 0x401c00, 192);
368 for (i = 0; i < 16; i++) { /* fragment texture units */
369 gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
370 gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
371 gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
372 gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
373 gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
374 gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
375 }
376 for (i = 0; i < 4; i++) { /* vertex texture units */
377 gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
378 gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
379 gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
380 gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
381 }
382 cp_ctx(ctx, 0x400f5c, 3);
383 gr_def(ctx, 0x400f5c, 0x00000002);
384 cp_ctx(ctx, 0x400f84, 1);
385}
386
387static void
388nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
389{
390 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
391 int i;
392
393 cp_ctx(ctx, 0x402000, 1);
394 cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
395 switch (dev_priv->chipset) {
396 case 0x40:
397 gr_def(ctx, 0x402404, 0x00000001);
398 break;
399 case 0x4c:
400 case 0x4e:
401 case 0x67:
402 gr_def(ctx, 0x402404, 0x00000020);
403 break;
404 case 0x46:
405 case 0x49:
406 case 0x4b:
407 gr_def(ctx, 0x402404, 0x00000421);
408 break;
409 default:
410 gr_def(ctx, 0x402404, 0x00000021);
411 }
412 if (dev_priv->chipset != 0x40)
413 gr_def(ctx, 0x402408, 0x030c30c3);
414 switch (dev_priv->chipset) {
415 case 0x44:
416 case 0x46:
417 case 0x4a:
418 case 0x4c:
419 case 0x4e:
420 case 0x67:
421 cp_ctx(ctx, 0x402440, 1);
422 gr_def(ctx, 0x402440, 0x00011001);
423 break;
424 default:
425 break;
426 }
427 cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
428 gr_def(ctx, 0x402488, 0x3e020200);
429 gr_def(ctx, 0x40248c, 0x00ffffff);
430 switch (dev_priv->chipset) {
431 case 0x40:
432 gr_def(ctx, 0x402490, 0x60103f00);
433 break;
434 case 0x47:
435 gr_def(ctx, 0x402490, 0x40103f00);
436 break;
437 case 0x41:
438 case 0x42:
439 case 0x49:
440 case 0x4b:
441 gr_def(ctx, 0x402490, 0x20103f00);
442 break;
443 default:
444 gr_def(ctx, 0x402490, 0x0c103f00);
445 break;
446 }
447 gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
448 0x00020000 : 0x00040000);
449 cp_ctx(ctx, 0x402500, 31);
450 gr_def(ctx, 0x402530, 0x00008100);
451 if (dev_priv->chipset == 0x40)
452 cp_ctx(ctx, 0x40257c, 6);
453 cp_ctx(ctx, 0x402594, 16);
454 cp_ctx(ctx, 0x402800, 17);
455 gr_def(ctx, 0x402800, 0x00000001);
456 switch (dev_priv->chipset) {
457 case 0x47:
458 case 0x49:
459 case 0x4b:
460 cp_ctx(ctx, 0x402864, 1);
461 gr_def(ctx, 0x402864, 0x00001001);
462 cp_ctx(ctx, 0x402870, 3);
463 gr_def(ctx, 0x402878, 0x00000003);
464 if (dev_priv->chipset != 0x47) { /* belong at end!! */
465 cp_ctx(ctx, 0x402900, 1);
466 cp_ctx(ctx, 0x402940, 1);
467 cp_ctx(ctx, 0x402980, 1);
468 cp_ctx(ctx, 0x4029c0, 1);
469 cp_ctx(ctx, 0x402a00, 1);
470 cp_ctx(ctx, 0x402a40, 1);
471 cp_ctx(ctx, 0x402a80, 1);
472 cp_ctx(ctx, 0x402ac0, 1);
473 }
474 break;
475 case 0x40:
476 cp_ctx(ctx, 0x402844, 1);
477 gr_def(ctx, 0x402844, 0x00000001);
478 cp_ctx(ctx, 0x402850, 1);
479 break;
480 default:
481 cp_ctx(ctx, 0x402844, 1);
482 gr_def(ctx, 0x402844, 0x00001001);
483 cp_ctx(ctx, 0x402850, 2);
484 gr_def(ctx, 0x402854, 0x00000003);
485 break;
486 }
487
488 cp_ctx(ctx, 0x402c00, 4);
489 gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
490 0x80800001 : 0x00888001);
491 switch (dev_priv->chipset) {
492 case 0x47:
493 case 0x49:
494 case 0x4b:
495 cp_ctx(ctx, 0x402c20, 40);
496 for (i = 0; i < 32; i++)
497 gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
498 cp_ctx(ctx, 0x4030b8, 13);
499 gr_def(ctx, 0x4030dc, 0x00000005);
500 gr_def(ctx, 0x4030e8, 0x0000ffff);
501 break;
502 default:
503 cp_ctx(ctx, 0x402c10, 4);
504 if (dev_priv->chipset == 0x40)
505 cp_ctx(ctx, 0x402c20, 36);
506 else
507 if (dev_priv->chipset <= 0x42)
508 cp_ctx(ctx, 0x402c20, 24);
509 else
510 if (dev_priv->chipset <= 0x4a)
511 cp_ctx(ctx, 0x402c20, 16);
512 else
513 cp_ctx(ctx, 0x402c20, 8);
514 cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
515 gr_def(ctx, 0x402cd4, 0x00000005);
516 if (dev_priv->chipset != 0x40)
517 gr_def(ctx, 0x402ce0, 0x0000ffff);
518 break;
519 }
520
521 cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
522 cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
523 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
524 for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
525 gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
526
527 if (dev_priv->chipset != 0x40) {
528 cp_ctx(ctx, 0x403600, 1);
529 gr_def(ctx, 0x403600, 0x00000001);
530 }
531 cp_ctx(ctx, 0x403800, 1);
532
533 cp_ctx(ctx, 0x403c18, 1);
534 gr_def(ctx, 0x403c18, 0x00000001);
535 switch (dev_priv->chipset) {
536 case 0x46:
537 case 0x47:
538 case 0x49:
539 case 0x4b:
540 cp_ctx(ctx, 0x405018, 1);
541 gr_def(ctx, 0x405018, 0x08e00001);
542 cp_ctx(ctx, 0x405c24, 1);
543 gr_def(ctx, 0x405c24, 0x000e3000);
544 break;
545 }
546 if (dev_priv->chipset != 0x4e)
547 cp_ctx(ctx, 0x405800, 11);
548 cp_ctx(ctx, 0x407000, 1);
549}
550
551static void
552nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
553{
554 int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
555
556 cp_out (ctx, 0x300000);
557 cp_lsr (ctx, len - 4);
558 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
559 cp_lsr (ctx, len);
560 cp_name(ctx, cp_swap_state3d_3_is_save);
561 cp_out (ctx, 0x800001);
562
563 ctx->ctxvals_pos += len;
564}
565
566static void
567nv40_graph_construct_shader(struct nouveau_grctx *ctx)
568{
569 struct drm_device *dev = ctx->dev;
570 struct drm_nouveau_private *dev_priv = dev->dev_private;
571 struct nouveau_gpuobj *obj = ctx->data;
572 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
573 int offset, i;
574
575 vs_nr = nv40_graph_vs_count(ctx->dev);
576 vs_nr_b0 = 363;
577 vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
578 if (dev_priv->chipset == 0x40) {
579 b0_offset = 0x2200/4; /* 33a0 */
580 b1_offset = 0x55a0/4; /* 1500 */
581 vs_len = 0x6aa0/4;
582 } else
583 if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
584 b0_offset = 0x2200/4; /* 2200 */
585 b1_offset = 0x4400/4; /* 0b00 */
586 vs_len = 0x4f00/4;
587 } else {
588 b0_offset = 0x1d40/4; /* 2200 */
589 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
590 vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
591 }
592
593 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
594 cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
595
596 offset = ctx->ctxvals_pos;
597 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
598
599 if (ctx->mode != NOUVEAU_GRCTX_VALS)
600 return;
601
602 offset += 0x0280/4;
603 for (i = 0; i < 16; i++, offset += 2)
604 nv_wo32(dev, obj, offset, 0x3f800000);
605
606 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
607 for (i = 0; i < vs_nr_b0 * 6; i += 6)
608 nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
609 for (i = 0; i < vs_nr_b1 * 4; i += 4)
610 nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
611 }
612}
613
614void
615nv40_grctx_init(struct nouveau_grctx *ctx)
616{
617 /* decide whether we're loading/unloading the context */
618 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
619 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
620
621 cp_name(ctx, cp_check_load);
622 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
623 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
624 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
625
626 /* setup for context load */
627 cp_name(ctx, cp_setup_auto_load);
628 cp_wait(ctx, STATUS, IDLE);
629 cp_out (ctx, CP_NEXT_TO_SWAP);
630 cp_name(ctx, cp_setup_load);
631 cp_wait(ctx, STATUS, IDLE);
632 cp_set (ctx, SWAP_DIRECTION, LOAD);
633 cp_out (ctx, 0x00910880); /* ?? */
634 cp_out (ctx, 0x00901ffe); /* ?? */
635 cp_out (ctx, 0x01940000); /* ?? */
636 cp_lsr (ctx, 0x20);
637 cp_out (ctx, 0x0060000b); /* ?? */
638 cp_wait(ctx, UNK57, CLEAR);
639 cp_out (ctx, 0x0060000c); /* ?? */
640 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
641
642 /* setup for context save */
643 cp_name(ctx, cp_setup_save);
644 cp_set (ctx, SWAP_DIRECTION, SAVE);
645
646 /* general PGRAPH state */
647 cp_name(ctx, cp_swap_state);
648 cp_pos (ctx, 0x00020/4);
649 nv40_graph_construct_general(ctx);
650 cp_wait(ctx, STATUS, IDLE);
651
652 /* 3D state, block 1 */
653 cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
654 nv40_graph_construct_state3d(ctx);
655 cp_wait(ctx, STATUS, IDLE);
656
657 /* 3D state, block 2 */
658 nv40_graph_construct_state3d_2(ctx);
659
660 /* Some other block of "random" state */
661 nv40_graph_construct_state3d_3(ctx);
662
663 /* Per-vertex shader state */
664 cp_pos (ctx, ctx->ctxvals_pos);
665 nv40_graph_construct_shader(ctx);
666
667 /* pre-exit state updates */
668 cp_name(ctx, cp_prepare_exit);
669 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
670 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
671 cp_out (ctx, CP_NEXT_TO_CURRENT);
672
673 cp_name(ctx, cp_exit);
674 cp_set (ctx, USER_SAVE, NOT_PENDING);
675 cp_set (ctx, USER_LOAD, NOT_PENDING);
676 cp_out (ctx, CP_END);
677}
678
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 000000000000..2a3495e848e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_mc_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will
13 * be done later if needed.
14 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16
17 switch (dev_priv->chipset) {
18 case 0x44:
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV40_PFB_020C);
23 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 }
31
32 return 0;
33}
34
35void
36nv40_mc_takedown(struct drm_device *dev)
37{
38}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 000000000000..cfabeb974a56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29#include "drm_crtc_helper.h"
30
31#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
32#include "nouveau_reg.h"
33#include "nouveau_drv.h"
34#include "nouveau_hw.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_fb.h"
38#include "nouveau_connector.h"
39#include "nv50_display.h"
40
41static void
42nv50_crtc_lut_load(struct drm_crtc *crtc)
43{
44 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
46 int i;
47
48 NV_DEBUG_KMS(crtc->dev, "\n");
49
50 for (i = 0; i < 256; i++) {
51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
52 writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
53 writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
54 }
55
56 if (nv_crtc->lut.depth == 30) {
57 writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
58 writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
59 writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
60 }
61}
62
63int
64nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
65{
66 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *evo = dev_priv->evo;
69 int index = nv_crtc->index, ret;
70
71 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
72 NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
73
74 if (blanked) {
75 nv_crtc->cursor.hide(nv_crtc, false);
76
77 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
78 if (ret) {
79 NV_ERROR(dev, "no space while blanking crtc\n");
80 return ret;
81 }
82 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) {
86 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 }
89
90 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
92 } else {
93 if (nv_crtc->cursor.visible)
94 nv_crtc->cursor.show(nv_crtc, false);
95 else
96 nv_crtc->cursor.hide(nv_crtc, false);
97
98 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
99 if (ret) {
100 NV_ERROR(dev, "no space while unblanking crtc\n");
101 return ret;
102 }
103 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
108 PAGE_SHIFT) >> 8);
109 if (dev_priv->chipset != 0x50) {
110 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
111 OUT_RING(evo, NvEvoVRAM);
112 }
113
114 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
115 OUT_RING(evo, nv_crtc->fb.offset >> 8);
116 OUT_RING(evo, 0);
117 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
118 if (dev_priv->chipset != 0x50)
119 if (nv_crtc->fb.tile_flags == 0x7a00)
120 OUT_RING(evo, NvEvoFB32);
121 else
122 if (nv_crtc->fb.tile_flags == 0x7000)
123 OUT_RING(evo, NvEvoFB16);
124 else
125 OUT_RING(evo, NvEvoVRAM);
126 else
127 OUT_RING(evo, NvEvoVRAM);
128 }
129
130 nv_crtc->fb.blanked = blanked;
131 return 0;
132}
133
134static int
135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
136{
137 struct drm_device *dev = nv_crtc->base.dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nouveau_channel *evo = dev_priv->evo;
140 int ret;
141
142 NV_DEBUG_KMS(dev, "\n");
143
144 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
145 if (ret) {
146 NV_ERROR(dev, "no space while setting dither\n");
147 return ret;
148 }
149
150 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
151 if (on)
152 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
153 else
154 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
155
156 if (update) {
157 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
158 OUT_RING(evo, 0);
159 FIRE_RING(evo);
160 }
161
162 return 0;
163}
164
165struct nouveau_connector *
166nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
167{
168 struct drm_device *dev = nv_crtc->base.dev;
169 struct drm_connector *connector;
170 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
171
172 /* The safest approach is to find an encoder with the right crtc, that
173 * is also linked to a connector. */
174 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
175 if (connector->encoder)
176 if (connector->encoder->crtc == crtc)
177 return nouveau_connector(connector);
178 }
179
180 return NULL;
181}
182
183static int
184nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
185{
186 struct nouveau_connector *nv_connector =
187 nouveau_crtc_connector_get(nv_crtc);
188 struct drm_device *dev = nv_crtc->base.dev;
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_channel *evo = dev_priv->evo;
191 struct drm_display_mode *native_mode = NULL;
192 struct drm_display_mode *mode = &nv_crtc->base.mode;
193 uint32_t outX, outY, horiz, vert;
194 int ret;
195
196 NV_DEBUG_KMS(dev, "\n");
197
198 switch (scaling_mode) {
199 case DRM_MODE_SCALE_NONE:
200 break;
201 default:
202 if (!nv_connector || !nv_connector->native_mode) {
203 NV_ERROR(dev, "No native mode, forcing panel scaling\n");
204 scaling_mode = DRM_MODE_SCALE_NONE;
205 } else {
206 native_mode = nv_connector->native_mode;
207 }
208 break;
209 }
210
211 switch (scaling_mode) {
212 case DRM_MODE_SCALE_ASPECT:
213 horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
214 vert = (native_mode->vdisplay << 19) / mode->vdisplay;
215
216 if (vert > horiz) {
217 outX = (mode->hdisplay * horiz) >> 19;
218 outY = (mode->vdisplay * horiz) >> 19;
219 } else {
220 outX = (mode->hdisplay * vert) >> 19;
221 outY = (mode->vdisplay * vert) >> 19;
222 }
223 break;
224 case DRM_MODE_SCALE_FULLSCREEN:
225 outX = native_mode->hdisplay;
226 outY = native_mode->vdisplay;
227 break;
228 case DRM_MODE_SCALE_CENTER:
229 case DRM_MODE_SCALE_NONE:
230 default:
231 outX = mode->hdisplay;
232 outY = mode->vdisplay;
233 break;
234 }
235
236 ret = RING_SPACE(evo, update ? 7 : 5);
237 if (ret)
238 return ret;
239
240 /* Got a better name for SCALER_ACTIVE? */
241 /* One day i've got to really figure out why this is needed. */
242 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
243 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
244 (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
245 mode->hdisplay != outX || mode->vdisplay != outY) {
246 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
247 } else {
248 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
249 }
250
251 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
252 OUT_RING(evo, outY << 16 | outX);
253 OUT_RING(evo, outY << 16 | outX);
254
255 if (update) {
256 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
257 OUT_RING(evo, 0);
258 FIRE_RING(evo);
259 }
260
261 return 0;
262}
263
264int
265nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
266{
267 uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
268 struct nouveau_pll_vals pll;
269 struct pll_lims limits;
270 uint32_t reg1, reg2;
271 int ret;
272
273 ret = get_pll_limits(dev, pll_reg, &limits);
274 if (ret)
275 return ret;
276
277 ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
278 if (ret <= 0)
279 return ret;
280
281 if (limits.vco2.maxfreq) {
282 reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
283 reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
284 nv_wr32(dev, pll_reg, 0x10000611);
285 nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
286 nv_wr32(dev, pll_reg + 8,
287 reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
288 } else {
289 reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
290 nv_wr32(dev, pll_reg, 0x50000610);
291 nv_wr32(dev, pll_reg + 4, reg1 |
292 (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
293 }
294
295 return 0;
296}
297
298static void
299nv50_crtc_destroy(struct drm_crtc *crtc)
300{
301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc;
303
304 if (!crtc)
305 return;
306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
312 drm_crtc_cleanup(&nv_crtc->base);
313
314 nv50_cursor_fini(nv_crtc);
315
316 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
317 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
318 kfree(nv_crtc->mode);
319 kfree(nv_crtc);
320}
321
322int
323nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
324 uint32_t buffer_handle, uint32_t width, uint32_t height)
325{
326 struct drm_device *dev = crtc->dev;
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
329 struct nouveau_bo *cursor = NULL;
330 struct drm_gem_object *gem;
331 int ret = 0, i;
332
333 if (width != 64 || height != 64)
334 return -EINVAL;
335
336 if (!buffer_handle) {
337 nv_crtc->cursor.hide(nv_crtc, true);
338 return 0;
339 }
340
341 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
342 if (!gem)
343 return -EINVAL;
344 cursor = nouveau_gem_object(gem);
345
346 ret = nouveau_bo_map(cursor);
347 if (ret)
348 goto out;
349
350 /* The simple will do for now. */
351 for (i = 0; i < 64 * 64; i++)
352 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
353
354 nouveau_bo_unmap(cursor);
355
356 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
357 dev_priv->vm_vram_base);
358 nv_crtc->cursor.show(nv_crtc, true);
359
360out:
361 drm_gem_object_unreference_unlocked(gem);
362 return ret;
363}
364
365int
366nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
367{
368 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
369
370 nv_crtc->cursor.set_pos(nv_crtc, x, y);
371 return 0;
372}
373
374static void
375nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
376 uint32_t size)
377{
378 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
379 int i;
380
381 if (size != 256)
382 return;
383
384 for (i = 0; i < 256; i++) {
385 nv_crtc->lut.r[i] = r[i];
386 nv_crtc->lut.g[i] = g[i];
387 nv_crtc->lut.b[i] = b[i];
388 }
389
390 /* We need to know the depth before we upload, but it's possible to
391 * get called before a framebuffer is bound. If this is the case,
392 * mark the lut values as dirty by setting depth==0, and it'll be
393 * uploaded on the first mode_set_base()
394 */
395 if (!nv_crtc->base.fb) {
396 nv_crtc->lut.depth = 0;
397 return;
398 }
399
400 nv50_crtc_lut_load(crtc);
401}
402
403static void
404nv50_crtc_save(struct drm_crtc *crtc)
405{
406 NV_ERROR(crtc->dev, "!!\n");
407}
408
409static void
410nv50_crtc_restore(struct drm_crtc *crtc)
411{
412 NV_ERROR(crtc->dev, "!!\n");
413}
414
415static const struct drm_crtc_funcs nv50_crtc_funcs = {
416 .save = nv50_crtc_save,
417 .restore = nv50_crtc_restore,
418 .cursor_set = nv50_crtc_cursor_set,
419 .cursor_move = nv50_crtc_cursor_move,
420 .gamma_set = nv50_crtc_gamma_set,
421 .set_config = drm_crtc_helper_set_config,
422 .destroy = nv50_crtc_destroy,
423};
424
425static void
426nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
427{
428}
429
430static void
431nv50_crtc_prepare(struct drm_crtc *crtc)
432{
433 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
434 struct drm_device *dev = crtc->dev;
435 struct drm_encoder *encoder;
436 uint32_t dac = 0, sor = 0;
437
438 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
439
440 /* Disconnect all unused encoders. */
441 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
442 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
443
444 if (!drm_helper_encoder_in_use(encoder))
445 continue;
446
447 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
448 nv_encoder->dcb->type == OUTPUT_TV)
449 dac |= (1 << nv_encoder->or);
450 else
451 sor |= (1 << nv_encoder->or);
452 }
453
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
456
457 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
458 nv_encoder->dcb->type == OUTPUT_TV) {
459 if (dac & (1 << nv_encoder->or))
460 continue;
461 } else {
462 if (sor & (1 << nv_encoder->or))
463 continue;
464 }
465
466 nv_encoder->disconnect(nv_encoder);
467 }
468
469 nv50_crtc_blank(nv_crtc, true);
470}
471
472static void
473nv50_crtc_commit(struct drm_crtc *crtc)
474{
475 struct drm_crtc *crtc2;
476 struct drm_device *dev = crtc->dev;
477 struct drm_nouveau_private *dev_priv = dev->dev_private;
478 struct nouveau_channel *evo = dev_priv->evo;
479 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
480 int ret;
481
482 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
483
484 nv50_crtc_blank(nv_crtc, false);
485
486 /* Explicitly blank all unused crtc's. */
487 list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
488 if (!drm_helper_crtc_in_use(crtc2))
489 nv50_crtc_blank(nouveau_crtc(crtc2), true);
490 }
491
492 ret = RING_SPACE(evo, 2);
493 if (ret) {
494 NV_ERROR(dev, "no space while committing crtc\n");
495 return;
496 }
497 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
498 OUT_RING(evo, 0);
499 FIRE_RING(evo);
500}
501
502static bool
503nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
504 struct drm_display_mode *adjusted_mode)
505{
506 return true;
507}
508
509static int
510nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
511 struct drm_framebuffer *old_fb, bool update)
512{
513 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
514 struct drm_device *dev = nv_crtc->base.dev;
515 struct drm_nouveau_private *dev_priv = dev->dev_private;
516 struct nouveau_channel *evo = dev_priv->evo;
517 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
518 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
519 int ret, format;
520
521 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
522
523 switch (drm_fb->depth) {
524 case 8:
525 format = NV50_EVO_CRTC_FB_DEPTH_8;
526 break;
527 case 15:
528 format = NV50_EVO_CRTC_FB_DEPTH_15;
529 break;
530 case 16:
531 format = NV50_EVO_CRTC_FB_DEPTH_16;
532 break;
533 case 24:
534 case 32:
535 format = NV50_EVO_CRTC_FB_DEPTH_24;
536 break;
537 case 30:
538 format = NV50_EVO_CRTC_FB_DEPTH_30;
539 break;
540 default:
541 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
542 return -EINVAL;
543 }
544
545 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
546 if (ret)
547 return ret;
548
549 if (old_fb) {
550 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
551 nouveau_bo_unpin(ofb->nvbo);
552 }
553
554 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
555 nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
556 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
557 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
558 ret = RING_SPACE(evo, 2);
559 if (ret)
560 return ret;
561
562 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
563 if (nv_crtc->fb.tile_flags == 0x7a00)
564 OUT_RING(evo, NvEvoFB32);
565 else
566 if (nv_crtc->fb.tile_flags == 0x7000)
567 OUT_RING(evo, NvEvoFB16);
568 else
569 OUT_RING(evo, NvEvoVRAM);
570 }
571
572 ret = RING_SPACE(evo, 12);
573 if (ret)
574 return ret;
575
576 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
577 OUT_RING(evo, nv_crtc->fb.offset >> 8);
578 OUT_RING(evo, 0);
579 OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
580 if (!nv_crtc->fb.tile_flags) {
581 OUT_RING(evo, drm_fb->pitch | (1 << 20));
582 } else {
583 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
584 fb->nvbo->tile_mode);
585 }
586 if (dev_priv->chipset == 0x50)
587 OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
588 else
589 OUT_RING(evo, format);
590
591 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
592 OUT_RING(evo, fb->base.depth == 8 ?
593 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
594
595 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
596 OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
597 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
598 OUT_RING(evo, (y << 16) | x);
599
600 if (nv_crtc->lut.depth != fb->base.depth) {
601 nv_crtc->lut.depth = fb->base.depth;
602 nv50_crtc_lut_load(crtc);
603 }
604
605 if (update) {
606 ret = RING_SPACE(evo, 2);
607 if (ret)
608 return ret;
609 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
610 OUT_RING(evo, 0);
611 FIRE_RING(evo);
612 }
613
614 return 0;
615}
616
617static int
618nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
619 struct drm_display_mode *adjusted_mode, int x, int y,
620 struct drm_framebuffer *old_fb)
621{
622 struct drm_device *dev = crtc->dev;
623 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct nouveau_channel *evo = dev_priv->evo;
625 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
626 struct nouveau_connector *nv_connector = NULL;
627 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
628 uint32_t hunk1, vunk1, vunk2a, vunk2b;
629 int ret;
630
631 /* Find the connector attached to this CRTC */
632 nv_connector = nouveau_crtc_connector_get(nv_crtc);
633
634 *nv_crtc->mode = *adjusted_mode;
635
636 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
637
638 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
639 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
640 hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
641 vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
642 /* I can't give this a proper name, anyone else can? */
643 hunk1 = adjusted_mode->htotal -
644 adjusted_mode->hsync_start + adjusted_mode->hdisplay;
645 vunk1 = adjusted_mode->vtotal -
646 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
647 /* Another strange value, this time only for interlaced adjusted_modes. */
648 vunk2a = 2 * adjusted_mode->vtotal -
649 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
650 vunk2b = adjusted_mode->vtotal -
651 adjusted_mode->vsync_start + adjusted_mode->vtotal;
652
653 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
654 vsync_dur /= 2;
655 vsync_start_to_end /= 2;
656 vunk1 /= 2;
657 vunk2a /= 2;
658 vunk2b /= 2;
659 /* magic */
660 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
661 vsync_start_to_end -= 1;
662 vunk1 -= 1;
663 vunk2a -= 1;
664 vunk2b -= 1;
665 }
666 }
667
668 ret = RING_SPACE(evo, 17);
669 if (ret)
670 return ret;
671
672 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
673 OUT_RING(evo, adjusted_mode->clock | 0x800000);
674 OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
675
676 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
677 OUT_RING(evo, 0);
678 OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
679 OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
680 OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
681 (hsync_start_to_end - 1));
682 OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
683
684 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
685 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
686 OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
687 } else {
688 OUT_RING(evo, 0);
689 OUT_RING(evo, 0);
690 }
691
692 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
693 OUT_RING(evo, 0);
694
695 /* This is the actual resolution of the mode. */
696 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
697 OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
698 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
699 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
700
701 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
702 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
703
704 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
705}
706
707static int
708nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
709 struct drm_framebuffer *old_fb)
710{
711 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
712}
713
714static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
715 .dpms = nv50_crtc_dpms,
716 .prepare = nv50_crtc_prepare,
717 .commit = nv50_crtc_commit,
718 .mode_fixup = nv50_crtc_mode_fixup,
719 .mode_set = nv50_crtc_mode_set,
720 .mode_set_base = nv50_crtc_mode_set_base,
721 .load_lut = nv50_crtc_lut_load,
722};
723
724int
725nv50_crtc_create(struct drm_device *dev, int index)
726{
727 struct nouveau_crtc *nv_crtc = NULL;
728 int ret, i;
729
730 NV_DEBUG_KMS(dev, "\n");
731
732 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
733 if (!nv_crtc)
734 return -ENOMEM;
735
736 nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
737 if (!nv_crtc->mode) {
738 kfree(nv_crtc);
739 return -ENOMEM;
740 }
741
742 /* Default CLUT parameters, will be activated on the hw upon
743 * first mode set.
744 */
745 for (i = 0; i < 256; i++) {
746 nv_crtc->lut.r[i] = i << 8;
747 nv_crtc->lut.g[i] = i << 8;
748 nv_crtc->lut.b[i] = i << 8;
749 }
750 nv_crtc->lut.depth = 0;
751
752 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
753 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
754 if (!ret) {
755 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
756 if (!ret)
757 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
758 if (ret)
759 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
760 }
761
762 if (ret) {
763 kfree(nv_crtc->mode);
764 kfree(nv_crtc);
765 return ret;
766 }
767
768 nv_crtc->index = index;
769
770 /* set function pointers */
771 nv_crtc->set_dither = nv50_crtc_set_dither;
772 nv_crtc->set_scale = nv50_crtc_set_scale;
773
774 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
775 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
776 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
777
778 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
779 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
780 if (!ret) {
781 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
782 if (!ret)
783 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
784 if (ret)
785 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
786 }
787
788 nv50_cursor_init(nv_crtc);
789 return 0;
790}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 000000000000..753e723adb3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_crtc.h"
34#include "nv50_display.h"
35
36static void
37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
38{
39 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
40 struct nouveau_channel *evo = dev_priv->evo;
41 struct drm_device *dev = nv_crtc->base.dev;
42 int ret;
43
44 NV_DEBUG_KMS(dev, "\n");
45
46 if (update && nv_crtc->cursor.visible)
47 return;
48
49 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while unhiding cursor\n");
52 return;
53 }
54
55 if (dev_priv->chipset != 0x50) {
56 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM);
58 }
59 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
61 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
62
63 if (update) {
64 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
65 OUT_RING(evo, 0);
66 FIRE_RING(evo);
67 nv_crtc->cursor.visible = true;
68 }
69}
70
71static void
72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
73{
74 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
75 struct nouveau_channel *evo = dev_priv->evo;
76 struct drm_device *dev = nv_crtc->base.dev;
77 int ret;
78
79 NV_DEBUG_KMS(dev, "\n");
80
81 if (update && !nv_crtc->cursor.visible)
82 return;
83
84 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
85 if (ret) {
86 NV_ERROR(dev, "no space while hiding cursor\n");
87 return;
88 }
89 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) {
93 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 }
96
97 if (update) {
98 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
99 OUT_RING(evo, 0);
100 FIRE_RING(evo);
101 nv_crtc->cursor.visible = false;
102 }
103}
104
105static void
106nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{
108 struct drm_device *dev = nv_crtc->base.dev;
109
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */
113 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
114}
115
116static void
117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
118{
119 NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
120 if (offset == nv_crtc->cursor.offset)
121 return;
122
123 nv_crtc->cursor.offset = offset;
124 if (nv_crtc->cursor.visible) {
125 nv_crtc->cursor.visible = false;
126 nv_crtc->cursor.show(nv_crtc, true);
127 }
128}
129
130int
131nv50_cursor_init(struct nouveau_crtc *nv_crtc)
132{
133 nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
134 nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
135 nv_crtc->cursor.hide = nv50_cursor_hide;
136 nv_crtc->cursor.show = nv50_cursor_show;
137 return 0;
138}
139
140void
141nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
142{
143 struct drm_device *dev = nv_crtc->base.dev;
144 int idx = nv_crtc->index;
145
146 NV_DEBUG_KMS(dev, "\n");
147
148 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
149 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
150 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
151 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
152 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
153 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
154 }
155}
156
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 000000000000..1fd9537beff6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting DAC\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static enum drm_connector_status
59nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
60{
61 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
62 struct drm_device *dev = encoder->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 enum drm_connector_status status = connector_status_disconnected;
65 uint32_t dpms_state, load_pattern, load_state;
66 int or = nv_encoder->or;
67
68 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
69 dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
70
71 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
72 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
73 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
74 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
75 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
76 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
77 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
78 return status;
79 }
80
81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios.dactestval) {
83 load_pattern = dev_priv->vbios.dactestval;
84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern);
86 } else {
87 load_pattern = 340;
88 NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
89 load_pattern);
90 }
91
92 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
93 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
94 mdelay(45); /* give it some time to process */
95 load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
96
97 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
98 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
99 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
100
101 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
102 NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
103 status = connector_status_connected;
104
105 if (status == connector_status_connected)
106 NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
107 else
108 NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
109
110 return status;
111}
112
113static void
114nv50_dac_dpms(struct drm_encoder *encoder, int mode)
115{
116 struct drm_device *dev = encoder->dev;
117 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
118 uint32_t val;
119 int or = nv_encoder->or;
120
121 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
122
123 /* wait for it to be done */
124 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
125 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
126 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
127 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
128 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
129 return;
130 }
131
132 val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
133
134 if (mode != DRM_MODE_DPMS_ON)
135 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
136
137 switch (mode) {
138 case DRM_MODE_DPMS_STANDBY:
139 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
140 break;
141 case DRM_MODE_DPMS_SUSPEND:
142 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
143 break;
144 case DRM_MODE_DPMS_OFF:
145 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
146 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
148 break;
149 default:
150 break;
151 }
152
153 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
154 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
155}
156
157static void
158nv50_dac_save(struct drm_encoder *encoder)
159{
160 NV_ERROR(encoder->dev, "!!\n");
161}
162
163static void
164nv50_dac_restore(struct drm_encoder *encoder)
165{
166 NV_ERROR(encoder->dev, "!!\n");
167}
168
169static bool
170nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
171 struct drm_display_mode *adjusted_mode)
172{
173 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
174 struct nouveau_connector *connector;
175
176 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
177
178 connector = nouveau_encoder_connector_get(nv_encoder);
179 if (!connector) {
180 NV_ERROR(encoder->dev, "Encoder has no connector\n");
181 return false;
182 }
183
184 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
185 connector->native_mode) {
186 int id = adjusted_mode->base.id;
187 *adjusted_mode = *connector->native_mode;
188 adjusted_mode->base.id = id;
189 }
190
191 return true;
192}
193
194static void
195nv50_dac_prepare(struct drm_encoder *encoder)
196{
197}
198
199static void
200nv50_dac_commit(struct drm_encoder *encoder)
201{
202}
203
204static void
205nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
209 struct drm_device *dev = encoder->dev;
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_channel *evo = dev_priv->evo;
212 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
213 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret;
215
216 NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
217
218 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
219
220 if (crtc->index == 1)
221 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
222 else
223 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
224
225 /* Lacking a working tv-out, this is not a 100% sure. */
226 if (nv_encoder->dcb->type == OUTPUT_ANALOG)
227 mode_ctl |= 0x40;
228 else
229 if (nv_encoder->dcb->type == OUTPUT_TV)
230 mode_ctl |= 0x100;
231
232 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
233 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
234
235 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
236 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
237
238 ret = RING_SPACE(evo, 3);
239 if (ret) {
240 NV_ERROR(dev, "no space while connecting DAC\n");
241 return;
242 }
243 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
244 OUT_RING(evo, mode_ctl);
245 OUT_RING(evo, mode_ctl2);
246}
247
248static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
249 .dpms = nv50_dac_dpms,
250 .save = nv50_dac_save,
251 .restore = nv50_dac_restore,
252 .mode_fixup = nv50_dac_mode_fixup,
253 .prepare = nv50_dac_prepare,
254 .commit = nv50_dac_commit,
255 .mode_set = nv50_dac_mode_set,
256 .detect = nv50_dac_detect
257};
258
259static void
260nv50_dac_destroy(struct drm_encoder *encoder)
261{
262 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
263
264 if (!encoder)
265 return;
266
267 NV_DEBUG_KMS(encoder->dev, "\n");
268
269 drm_encoder_cleanup(encoder);
270 kfree(nv_encoder);
271}
272
273static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
274 .destroy = nv50_dac_destroy,
275};
276
277int
278nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
279{
280 struct nouveau_encoder *nv_encoder;
281 struct drm_encoder *encoder;
282
283 NV_DEBUG_KMS(dev, "\n");
284 NV_INFO(dev, "Detected a DAC output\n");
285
286 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
287 if (!nv_encoder)
288 return -ENOMEM;
289 encoder = to_drm_encoder(nv_encoder);
290
291 nv_encoder->dcb = entry;
292 nv_encoder->or = ffs(entry->or) - 1;
293
294 nv_encoder->disconnect = nv50_dac_disconnect;
295
296 drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
297 DRM_MODE_ENCODER_DAC);
298 drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
299
300 encoder->possible_crtcs = entry->heads;
301 encoder->possible_clones = 0;
302 return 0;
303}
304
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 000000000000..649db4c1b690
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1004 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "nv50_display.h"
28#include "nouveau_crtc.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_fb.h"
32#include "drm_crtc_helper.h"
33
34static void
35nv50_evo_channel_del(struct nouveau_channel **pchan)
36{
37 struct nouveau_channel *chan = *pchan;
38
39 if (!chan)
40 return;
41 *pchan = NULL;
42
43 nouveau_gpuobj_channel_takedown(chan);
44 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
45
46 if (chan->user)
47 iounmap(chan->user);
48
49 kfree(chan);
50}
51
52static int
53nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
54 uint32_t tile_flags, uint32_t magic_flags,
55 uint32_t offset, uint32_t limit)
56{
57 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
58 struct drm_device *dev = evo->dev;
59 struct nouveau_gpuobj *obj = NULL;
60 int ret;
61
62 ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
63 if (ret)
64 return ret;
65 obj->engine = NVOBJ_ENGINE_DISPLAY;
66
67 ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
68 if (ret) {
69 nouveau_gpuobj_del(dev, &obj);
70 return ret;
71 }
72
73 dev_priv->engine.instmem.prepare_access(dev, true);
74 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
75 nv_wo32(dev, obj, 1, limit);
76 nv_wo32(dev, obj, 2, offset);
77 nv_wo32(dev, obj, 3, 0x00000000);
78 nv_wo32(dev, obj, 4, 0x00000000);
79 nv_wo32(dev, obj, 5, 0x00010000);
80 dev_priv->engine.instmem.finish_access(dev);
81
82 return 0;
83}
84
85static int
86nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
87{
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_channel *chan;
90 int ret;
91
92 chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
93 if (!chan)
94 return -ENOMEM;
95 *pchan = chan;
96
97 chan->id = -1;
98 chan->dev = dev;
99 chan->user_get = 4;
100 chan->user_put = 0;
101
102 INIT_LIST_HEAD(&chan->ramht_refs);
103
104 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
105 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
106 if (ret) {
107 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
108 nv50_evo_channel_del(pchan);
109 return ret;
110 }
111
112 ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
113 im_pramin->start, 32768);
114 if (ret) {
115 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
116 nv50_evo_channel_del(pchan);
117 return ret;
118 }
119
120 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
121 0, &chan->ramht);
122 if (ret) {
123 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
124 nv50_evo_channel_del(pchan);
125 return ret;
126 }
127
128 if (dev_priv->chipset != 0x50) {
129 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
130 0, 0xffffffff);
131 if (ret) {
132 nv50_evo_channel_del(pchan);
133 return ret;
134 }
135
136
137 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
138 0, 0xffffffff);
139 if (ret) {
140 nv50_evo_channel_del(pchan);
141 return ret;
142 }
143 }
144
145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
146 0, dev_priv->vram_size);
147 if (ret) {
148 nv50_evo_channel_del(pchan);
149 return ret;
150 }
151
152 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
153 false, true, &chan->pushbuf_bo);
154 if (ret == 0)
155 ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
156 if (ret) {
157 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
158 nv50_evo_channel_del(pchan);
159 return ret;
160 }
161
162 ret = nouveau_bo_map(chan->pushbuf_bo);
163 if (ret) {
164 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
165 nv50_evo_channel_del(pchan);
166 return ret;
167 }
168
169 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
170 NV50_PDISPLAY_USER(0), PAGE_SIZE);
171 if (!chan->user) {
172 NV_ERROR(dev, "Error mapping EVO control regs.\n");
173 nv50_evo_channel_del(pchan);
174 return -ENOMEM;
175 }
176
177 return 0;
178}
179
180int
181nv50_display_init(struct drm_device *dev)
182{
183 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
185 struct nouveau_channel *evo = dev_priv->evo;
186 struct drm_connector *connector;
187 uint32_t val, ram_amount, hpd_en[2];
188 uint64_t start;
189 int ret, i;
190
191 NV_DEBUG_KMS(dev, "\n");
192
193 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
194 /*
195 * I think the 0x006101XX range is some kind of main control area
196 * that enables things.
197 */
198 /* CRTC? */
199 for (i = 0; i < 2; i++) {
200 val = nv_rd32(dev, 0x00616100 + (i * 0x800));
201 nv_wr32(dev, 0x00610190 + (i * 0x10), val);
202 val = nv_rd32(dev, 0x00616104 + (i * 0x800));
203 nv_wr32(dev, 0x00610194 + (i * 0x10), val);
204 val = nv_rd32(dev, 0x00616108 + (i * 0x800));
205 nv_wr32(dev, 0x00610198 + (i * 0x10), val);
206 val = nv_rd32(dev, 0x0061610c + (i * 0x800));
207 nv_wr32(dev, 0x0061019c + (i * 0x10), val);
208 }
209 /* DAC */
210 for (i = 0; i < 3; i++) {
211 val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
212 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
213 }
214 /* SOR */
215 for (i = 0; i < 4; i++) {
216 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
217 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
218 }
219 /* Something not yet in use, tv-out maybe. */
220 for (i = 0; i < 3; i++) {
221 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
222 nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
223 }
224
225 for (i = 0; i < 3; i++) {
226 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
227 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
228 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
229 }
230
231 /* This used to be in crtc unblank, but seems out of place there. */
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */
234 ram_amount = dev_priv->vram_size;
235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024;
238 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
239 nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
240 nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
241
242 /* The precise purpose is unknown, i suspect it has something to do
243 * with text mode.
244 */
245 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
246 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
247 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
248 if (!nv_wait(0x006194e8, 2, 0)) {
249 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
250 NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
251 nv_rd32(dev, 0x6194e8));
252 return -EBUSY;
253 }
254 }
255
256 /* taken from nv bug #12637, attempts to un-wedge the hw if it's
257 * stuck in some unspecified state
258 */
259 start = ptimer->read(dev);
260 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
261 while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
262 if ((val & 0x9f0000) == 0x20000)
263 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
264 val | 0x800000);
265
266 if ((val & 0x3f0000) == 0x30000)
267 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
268 val | 0x200000);
269
270 if (ptimer->read(dev) - start > 1000000000ULL) {
271 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
272 NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
273 return -EBUSY;
274 }
275 }
276
277 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
278 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
279 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
280 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
281 NV_ERROR(dev, "0x610200 = 0x%08x\n",
282 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
283 return -EBUSY;
284 }
285
286 for (i = 0; i < 2; i++) {
287 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
288 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
289 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
290 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
291 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
292 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
293 return -EBUSY;
294 }
295
296 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
297 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
298 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
299 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
300 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
301 NV_ERROR(dev, "timeout: "
302 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
303 NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
304 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
305 return -EBUSY;
306 }
307 }
308
309 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
310
311 /* initialise fifo */
312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
313 ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
314 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
315 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
316 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
317 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
318 if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
319 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
320 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
321 return -EBUSY;
322 }
323 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
324 (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
325 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
326 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
327 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
328 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
329 nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
330
331 evo->dma.max = (4096/4) - 2;
332 evo->dma.put = 0;
333 evo->dma.cur = evo->dma.put;
334 evo->dma.free = evo->dma.max - evo->dma.cur;
335
336 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
337 if (ret)
338 return ret;
339
340 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
341 OUT_RING(evo, 0);
342
343 ret = RING_SPACE(evo, 11);
344 if (ret)
345 return ret;
346 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
347 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
348 OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
349 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
350 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
351 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
352 OUT_RING(evo, 0);
353 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
354 OUT_RING(evo, 0);
355 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
356 OUT_RING(evo, 0);
357 FIRE_RING(evo);
358 if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
359 NV_ERROR(dev, "evo pushbuf stalled\n");
360
361 /* enable clock change interrupts. */
362 nv_wr32(dev, 0x610028, 0x00010001);
363 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
364 NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
365 NV50_PDISPLAY_INTR_EN_CLK_UNK40));
366
367 /* enable hotplug interrupts */
368 hpd_en[0] = hpd_en[1] = 0;
369 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio;
372
373 if (conn->dcb->gpio_tag == 0xff)
374 continue;
375
376 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
377 if (!gpio)
378 continue;
379
380 hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
381 }
382
383 nv_wr32(dev, 0xe054, 0xffffffff);
384 nv_wr32(dev, 0xe050, hpd_en[0]);
385 if (dev_priv->chipset >= 0x90) {
386 nv_wr32(dev, 0xe074, 0xffffffff);
387 nv_wr32(dev, 0xe070, hpd_en[1]);
388 }
389
390 return 0;
391}
392
393static int nv50_display_disable(struct drm_device *dev)
394{
395 struct drm_nouveau_private *dev_priv = dev->dev_private;
396 struct drm_crtc *drm_crtc;
397 int ret, i;
398
399 NV_DEBUG_KMS(dev, "\n");
400
401 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
402 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
403
404 nv50_crtc_blank(crtc, true);
405 }
406
407 ret = RING_SPACE(dev_priv->evo, 2);
408 if (ret == 0) {
409 BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
410 OUT_RING(dev_priv->evo, 0);
411 }
412 FIRE_RING(dev_priv->evo);
413
414 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
415 * cleaning up?
416 */
417 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
418 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
419 uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
420
421 if (!crtc->base.enabled)
422 continue;
423
424 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
425 if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
426 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
427 "0x%08x\n", mask, mask);
428 NV_ERROR(dev, "0x610024 = 0x%08x\n",
429 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
430 }
431 }
432
433 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
434 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
435 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
436 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
437 NV_ERROR(dev, "0x610200 = 0x%08x\n",
438 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
439 }
440
441 for (i = 0; i < 3; i++) {
442 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
443 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
444 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
445 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
446 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
447 }
448 }
449
450 /* disable interrupts. */
451 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
452
453 /* disable hotplug interrupts */
454 nv_wr32(dev, 0xe054, 0xffffffff);
455 nv_wr32(dev, 0xe050, 0x00000000);
456 if (dev_priv->chipset >= 0x90) {
457 nv_wr32(dev, 0xe074, 0xffffffff);
458 nv_wr32(dev, 0xe070, 0x00000000);
459 }
460 return 0;
461}
462
463int nv50_display_create(struct drm_device *dev)
464{
465 struct drm_nouveau_private *dev_priv = dev->dev_private;
466 struct dcb_table *dcb = &dev_priv->vbios.dcb;
467 int ret, i;
468
469 NV_DEBUG_KMS(dev, "\n");
470
471 /* init basic kernel modesetting */
472 drm_mode_config_init(dev);
473
474 /* Initialise some optional connector properties. */
475 drm_mode_create_scaling_mode_property(dev);
476 drm_mode_create_dithering_property(dev);
477
478 dev->mode_config.min_width = 0;
479 dev->mode_config.min_height = 0;
480
481 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
482
483 dev->mode_config.max_width = 8192;
484 dev->mode_config.max_height = 8192;
485
486 dev->mode_config.fb_base = dev_priv->fb_phys;
487
488 /* Create EVO channel */
489 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
490 if (ret) {
491 NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
492 return ret;
493 }
494
495 /* Create CRTC objects */
496 for (i = 0; i < 2; i++)
497 nv50_crtc_create(dev, i);
498
499 /* We setup the encoders from the BIOS table */
500 for (i = 0 ; i < dcb->entries; i++) {
501 struct dcb_entry *entry = &dcb->entry[i];
502
503 if (entry->location != DCB_LOC_ON_CHIP) {
504 NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
505 entry->type, ffs(entry->or) - 1);
506 continue;
507 }
508
509 switch (entry->type) {
510 case OUTPUT_TMDS:
511 case OUTPUT_LVDS:
512 case OUTPUT_DP:
513 nv50_sor_create(dev, entry);
514 break;
515 case OUTPUT_ANALOG:
516 nv50_dac_create(dev, entry);
517 break;
518 default:
519 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
520 continue;
521 }
522 }
523
524 for (i = 0 ; i < dcb->connector.entries; i++) {
525 if (i != 0 && dcb->connector.entry[i].index2 ==
526 dcb->connector.entry[i - 1].index2)
527 continue;
528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
529 }
530
531 ret = nv50_display_init(dev);
532 if (ret) {
533 nv50_display_destroy(dev);
534 return ret;
535 }
536
537 return 0;
538}
539
540int nv50_display_destroy(struct drm_device *dev)
541{
542 struct drm_nouveau_private *dev_priv = dev->dev_private;
543
544 NV_DEBUG_KMS(dev, "\n");
545
546 drm_mode_config_cleanup(dev);
547
548 nv50_display_disable(dev);
549 nv50_evo_channel_del(&dev_priv->evo);
550
551 return 0;
552}
553
554static inline uint32_t
555nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
556{
557 struct drm_nouveau_private *dev_priv = dev->dev_private;
558 uint32_t mc;
559
560 if (sor) {
561 if (dev_priv->chipset < 0x90 ||
562 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
563 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
564 else
565 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
566 } else {
567 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
568 }
569
570 return mc;
571}
572
573static int
574nv50_display_irq_head(struct drm_device *dev, int *phead,
575 struct dcb_entry **pdcbent)
576{
577 struct drm_nouveau_private *dev_priv = dev->dev_private;
578 uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
579 uint32_t dac = 0, sor = 0;
580 int head, i, or = 0, type = OUTPUT_ANY;
581
582 /* We're assuming that head 0 *or* head 1 will be active here,
583 * and not both. I'm not sure if the hw will even signal both
584 * ever, but it definitely shouldn't for us as we commit each
585 * CRTC separately, and submission will be blocked by the GPU
586 * until we handle each in turn.
587 */
588 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
589 head = ffs((unk30 >> 9) & 3) - 1;
590 if (head < 0)
591 return -EINVAL;
592
593 /* This assumes CRTCs are never bound to multiple encoders, which
594 * should be the case.
595 */
596 for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
597 uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
598 if (!(mc & (1 << head)))
599 continue;
600
601 switch ((mc >> 8) & 0xf) {
602 case 0: type = OUTPUT_ANALOG; break;
603 case 1: type = OUTPUT_TV; break;
604 default:
605 NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
606 return -1;
607 }
608
609 or = i;
610 }
611
612 for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
613 uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
614 if (!(mc & (1 << head)))
615 continue;
616
617 switch ((mc >> 8) & 0xf) {
618 case 0: type = OUTPUT_LVDS; break;
619 case 1: type = OUTPUT_TMDS; break;
620 case 2: type = OUTPUT_TMDS; break;
621 case 5: type = OUTPUT_TMDS; break;
622 case 8: type = OUTPUT_DP; break;
623 case 9: type = OUTPUT_DP; break;
624 default:
625 NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
626 return -1;
627 }
628
629 or = i;
630 }
631
632 NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
633 if (type == OUTPUT_ANY) {
634 NV_ERROR(dev, "unknown encoder!!\n");
635 return -1;
636 }
637
638 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
639 struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
640
641 if (dcbent->type != type)
642 continue;
643
644 if (!(dcbent->or & (1 << or)))
645 continue;
646
647 *phead = head;
648 *pdcbent = dcbent;
649 return 0;
650 }
651
652 NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
653 return 0;
654}
655
656static uint32_t
657nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
658 int pxclk)
659{
660 struct drm_nouveau_private *dev_priv = dev->dev_private;
661 struct nouveau_connector *nv_connector = NULL;
662 struct drm_encoder *encoder;
663 struct nvbios *bios = &dev_priv->vbios;
664 uint32_t mc, script = 0, or;
665
666 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
667 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
668
669 if (nv_encoder->dcb != dcbent)
670 continue;
671
672 nv_connector = nouveau_encoder_connector_get(nv_encoder);
673 break;
674 }
675
676 or = ffs(dcbent->or) - 1;
677 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
678 switch (dcbent->type) {
679 case OUTPUT_LVDS:
680 script = (mc >> 8) & 0xf;
681 if (bios->fp_no_ddc) {
682 if (bios->fp.dual_link)
683 script |= 0x0100;
684 if (bios->fp.if_is_24bit)
685 script |= 0x0200;
686 } else {
687 if (pxclk >= bios->fp.duallink_transition_clk) {
688 script |= 0x0100;
689 if (bios->fp.strapless_is_24bit & 2)
690 script |= 0x0200;
691 } else
692 if (bios->fp.strapless_is_24bit & 1)
693 script |= 0x0200;
694
695 if (nv_connector && nv_connector->edid &&
696 (nv_connector->edid->revision >= 4) &&
697 (nv_connector->edid->input & 0x70) >= 0x20)
698 script |= 0x0200;
699 }
700
701 if (nouveau_uscript_lvds >= 0) {
702 NV_INFO(dev, "override script 0x%04x with 0x%04x "
703 "for output LVDS-%d\n", script,
704 nouveau_uscript_lvds, or);
705 script = nouveau_uscript_lvds;
706 }
707 break;
708 case OUTPUT_TMDS:
709 script = (mc >> 8) & 0xf;
710 if (pxclk >= 165000)
711 script |= 0x0100;
712
713 if (nouveau_uscript_tmds >= 0) {
714 NV_INFO(dev, "override script 0x%04x with 0x%04x "
715 "for output TMDS-%d\n", script,
716 nouveau_uscript_tmds, or);
717 script = nouveau_uscript_tmds;
718 }
719 break;
720 case OUTPUT_DP:
721 script = (mc >> 8) & 0xf;
722 break;
723 case OUTPUT_ANALOG:
724 script = 0xff;
725 break;
726 default:
727 NV_ERROR(dev, "modeset on unsupported output type!\n");
728 break;
729 }
730
731 return script;
732}
733
734static void
735nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
736{
737 struct drm_nouveau_private *dev_priv = dev->dev_private;
738 struct nouveau_channel *chan;
739 struct list_head *entry, *tmp;
740
741 list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
742 chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
743
744 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
745 chan->nvsw.vblsem_rval);
746 list_del(&chan->nvsw.vbl_wait);
747 }
748}
749
750static void
751nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
752{
753 intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
754
755 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
756 nv50_display_vblank_crtc_handler(dev, 0);
757
758 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
759 nv50_display_vblank_crtc_handler(dev, 1);
760
761 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
762 NV50_PDISPLAY_INTR_EN) & ~intr);
763 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
764}
765
766static void
767nv50_display_unk10_handler(struct drm_device *dev)
768{
769 struct dcb_entry *dcbent;
770 int head, ret;
771
772 ret = nv50_display_irq_head(dev, &head, &dcbent);
773 if (ret)
774 goto ack;
775
776 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
777
778 nouveau_bios_run_display_table(dev, dcbent, 0, -1);
779
780ack:
781 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
782 nv_wr32(dev, 0x610030, 0x80000000);
783}
784
785static void
786nv50_display_unk20_handler(struct drm_device *dev)
787{
788 struct dcb_entry *dcbent;
789 uint32_t tmp, pclk, script;
790 int head, or, ret;
791
792 ret = nv50_display_irq_head(dev, &head, &dcbent);
793 if (ret)
794 goto ack;
795 or = ffs(dcbent->or) - 1;
796 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
797 script = nv50_display_script_select(dev, dcbent, pclk);
798
799 NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
800
801 if (dcbent->type != OUTPUT_DP)
802 nouveau_bios_run_display_table(dev, dcbent, 0, -2);
803
804 nv50_crtc_set_clock(dev, head, pclk);
805
806 nouveau_bios_run_display_table(dev, dcbent, script, pclk);
807
808 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
809 tmp &= ~0x000000f;
810 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
811
812 if (dcbent->type != OUTPUT_ANALOG) {
813 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
814 tmp &= ~0x00000f0f;
815 if (script & 0x0100)
816 tmp |= 0x00000101;
817 nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
818 } else {
819 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
820 }
821
822ack:
823 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
824 nv_wr32(dev, 0x610030, 0x80000000);
825}
826
827static void
828nv50_display_unk40_handler(struct drm_device *dev)
829{
830 struct dcb_entry *dcbent;
831 int head, pclk, script, ret;
832
833 ret = nv50_display_irq_head(dev, &head, &dcbent);
834 if (ret)
835 goto ack;
836 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
837 script = nv50_display_script_select(dev, dcbent, pclk);
838
839 nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
840
841ack:
842 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
843 nv_wr32(dev, 0x610030, 0x80000000);
844 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
845}
846
847void
848nv50_display_irq_handler_bh(struct work_struct *work)
849{
850 struct drm_nouveau_private *dev_priv =
851 container_of(work, struct drm_nouveau_private, irq_work);
852 struct drm_device *dev = dev_priv->dev;
853
854 for (;;) {
855 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
856 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
857
858 NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
859
860 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
861 nv50_display_unk10_handler(dev);
862 else
863 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
864 nv50_display_unk20_handler(dev);
865 else
866 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
867 nv50_display_unk40_handler(dev);
868 else
869 break;
870 }
871
872 nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
873}
874
875static void
876nv50_display_error_handler(struct drm_device *dev)
877{
878 uint32_t addr, data;
879
880 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
881 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
882 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
883
884 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
885 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
886
887 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
888}
889
890void
891nv50_display_irq_hotplug_bh(struct work_struct *work)
892{
893 struct drm_nouveau_private *dev_priv =
894 container_of(work, struct drm_nouveau_private, hpd_work);
895 struct drm_device *dev = dev_priv->dev;
896 struct drm_connector *connector;
897 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
898 uint32_t unplug_mask, plug_mask, change_mask;
899 uint32_t hpd0, hpd1 = 0;
900
901 hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
902 if (dev_priv->chipset >= 0x90)
903 hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
904
905 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
906 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
907 change_mask = plug_mask | unplug_mask;
908
909 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
910 struct drm_encoder_helper_funcs *helper;
911 struct nouveau_connector *nv_connector =
912 nouveau_connector(connector);
913 struct nouveau_encoder *nv_encoder;
914 struct dcb_gpio_entry *gpio;
915 uint32_t reg;
916 bool plugged;
917
918 if (!nv_connector->dcb)
919 continue;
920
921 gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
922 if (!gpio || !(change_mask & (1 << gpio->line)))
923 continue;
924
925 reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
926 plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
927 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
928 drm_get_connector_name(connector)) ;
929
930 if (!connector->encoder || !connector->encoder->crtc ||
931 !connector->encoder->crtc->enabled)
932 continue;
933 nv_encoder = nouveau_encoder(connector->encoder);
934 helper = connector->encoder->helper_private;
935
936 if (nv_encoder->dcb->type != OUTPUT_DP)
937 continue;
938
939 if (plugged)
940 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
941 else
942 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
943 }
944
945 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
946 if (dev_priv->chipset >= 0x90)
947 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
948}
949
950void
951nv50_display_irq_handler(struct drm_device *dev)
952{
953 struct drm_nouveau_private *dev_priv = dev->dev_private;
954 uint32_t delayed = 0;
955
956 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
957 if (!work_pending(&dev_priv->hpd_work))
958 queue_work(dev_priv->wq, &dev_priv->hpd_work);
959 }
960
961 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
962 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
963 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
964 uint32_t clock;
965
966 NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
967
968 if (!intr0 && !(intr1 & ~delayed))
969 break;
970
971 if (intr0 & 0x00010000) {
972 nv50_display_error_handler(dev);
973 intr0 &= ~0x00010000;
974 }
975
976 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
977 nv50_display_vblank_handler(dev, intr1);
978 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
979 }
980
981 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
982 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
983 NV50_PDISPLAY_INTR_1_CLK_UNK40));
984 if (clock) {
985 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
986 if (!work_pending(&dev_priv->irq_work))
987 queue_work(dev_priv->wq, &dev_priv->irq_work);
988 delayed |= clock;
989 intr1 &= ~clock;
990 }
991
992 if (intr0) {
993 NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
994 nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
995 }
996
997 if (intr1) {
998 NV_ERROR(dev,
999 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
1000 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
1001 }
1002 }
1003}
1004
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 000000000000..581d405ac014
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV50_DISPLAY_H__
28#define __NV50_DISPLAY_H__
29
30#include "drmP.h"
31#include "drm.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_reg.h"
35#include "nouveau_crtc.h"
36#include "nv50_evo.h"
37
38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work);
40void nv50_display_irq_hotplug_bh(struct work_struct *work);
41int nv50_display_init(struct drm_device *dev);
42int nv50_display_create(struct drm_device *dev);
43int nv50_display_destroy(struct drm_device *dev);
44int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
45int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
46
47#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 000000000000..aae13343bcec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#define NV50_EVO_UPDATE 0x00000080
28#define NV50_EVO_UNK84 0x00000084
29#define NV50_EVO_UNK84_NOTIFY 0x40000000
30#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
31#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
32#define NV50_EVO_DMA_NOTIFY 0x00000088
33#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
34#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
35#define NV50_EVO_UNK8C 0x0000008C
36
37#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
38#define NV50_EVO_DAC_MODE_CTRL 0x00000400
39#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
40#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
41#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
42#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
43#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
44
45#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
46#define NV50_EVO_SOR_MODE_CTRL 0x00000600
47#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
48#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
49#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
50#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
51#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
52#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
53
54#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
55#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
56#define NV50_EVO_CRTC_UNK0800 0x00000800
57#define NV50_EVO_CRTC_CLOCK 0x00000804
58#define NV50_EVO_CRTC_INTERLACE 0x00000808
59#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
60#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
61#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
62#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
63#define NV50_EVO_CRTC_UNK0820 0x00000820
64#define NV50_EVO_CRTC_UNK0824 0x00000824
65#define NV50_EVO_CRTC_UNK082C 0x0000082c
66#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
67/* You can't have a palette in 8 bit mode (=OFF) */
68#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
69#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
70#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
71#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
72#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
73#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
74#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
75#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
76#define NV50_EVO_CRTC_FB_SIZE 0x00000868
77#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
78#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
79#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
80#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
81#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
82#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
83#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
84#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
85#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
86#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
87#define NV50_EVO_CRTC_FB_DMA 0x00000874
88#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
89#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
90#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
91#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
92#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
93#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
94#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
95#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
96#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
97#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
98#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
99#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
100#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
101#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
102#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
103#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
104#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
105#define NV50_EVO_CRTC_FB_POS 0x000008c0
106#define NV50_EVO_CRTC_REAL_RES 0x000008c8
107#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
108#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
109 ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
110/* Both of these are needed, otherwise nothing happens. */
111#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
112#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
113
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 000000000000..a95e6941ba88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,32 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv50_fb_init(struct drm_device *dev)
8{
9 /* This is needed to get meaningful information from 100c90
10 * on traps. No idea what these values mean exactly. */
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12
13 switch (dev_priv->chipset) {
14 case 0x50:
15 nv_wr32(dev, 0x100c90, 0x0707ff);
16 break;
17 case 0xa5:
18 case 0xa8:
19 nv_wr32(dev, 0x100c90, 0x0d0fff);
20 break;
21 default:
22 nv_wr32(dev, 0x100c90, 0x1d07ff);
23 break;
24 }
25
26 return 0;
27}
28
29void
30nv50_fb_takedown(struct drm_device *dev)
31{
32}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 000000000000..a8c70e7e9184
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,268 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h"
5
6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{
9 struct nouveau_fbcon_par *par = info->par;
10 struct drm_device *dev = par->dev;
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 struct nouveau_channel *chan = dev_priv->channel;
13
14 if (info->state != FBINFO_STATE_RUNNING)
15 return;
16
17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
19 nouveau_fbcon_gpu_lockup(info);
20 }
21
22 if (info->flags & FBINFO_HWACCEL_DISABLED) {
23 cfb_fillrect(info, rect);
24 return;
25 }
26
27 if (rect->rop != ROP_COPY) {
28 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
29 OUT_RING(chan, 1);
30 }
31 BEGIN_RING(chan, NvSub2D, 0x0588, 1);
32 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
33 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
34 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
35 else
36 OUT_RING(chan, rect->color);
37 BEGIN_RING(chan, NvSub2D, 0x0600, 4);
38 OUT_RING(chan, rect->dx);
39 OUT_RING(chan, rect->dy);
40 OUT_RING(chan, rect->dx + rect->width);
41 OUT_RING(chan, rect->dy + rect->height);
42 if (rect->rop != ROP_COPY) {
43 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
44 OUT_RING(chan, 3);
45 }
46 FIRE_RING(chan);
47}
48
49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{
52 struct nouveau_fbcon_par *par = info->par;
53 struct drm_device *dev = par->dev;
54 struct drm_nouveau_private *dev_priv = dev->dev_private;
55 struct nouveau_channel *chan = dev_priv->channel;
56
57 if (info->state != FBINFO_STATE_RUNNING)
58 return;
59
60 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
61 nouveau_fbcon_gpu_lockup(info);
62 }
63
64 if (info->flags & FBINFO_HWACCEL_DISABLED) {
65 cfb_copyarea(info, region);
66 return;
67 }
68
69 BEGIN_RING(chan, NvSub2D, 0x0110, 1);
70 OUT_RING(chan, 0);
71 BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
72 OUT_RING(chan, region->dx);
73 OUT_RING(chan, region->dy);
74 OUT_RING(chan, region->width);
75 OUT_RING(chan, region->height);
76 BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
77 OUT_RING(chan, 0);
78 OUT_RING(chan, region->sx);
79 OUT_RING(chan, 0);
80 OUT_RING(chan, region->sy);
81 FIRE_RING(chan);
82}
83
84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{
87 struct nouveau_fbcon_par *par = info->par;
88 struct drm_device *dev = par->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *chan = dev_priv->channel;
91 uint32_t width, dwords, *data = (uint32_t *)image->data;
92 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
93 uint32_t *palette = info->pseudo_palette;
94
95 if (info->state != FBINFO_STATE_RUNNING)
96 return;
97
98 if (image->depth != 1) {
99 cfb_imageblit(info, image);
100 return;
101 }
102
103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
104 nouveau_fbcon_gpu_lockup(info);
105 }
106
107 if (info->flags & FBINFO_HWACCEL_DISABLED) {
108 cfb_imageblit(info, image);
109 return;
110 }
111
112 width = ALIGN(image->width, 32);
113 dwords = (width * image->height) >> 5;
114
115 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
116 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
117 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
118 OUT_RING(chan, palette[image->bg_color] | mask);
119 OUT_RING(chan, palette[image->fg_color] | mask);
120 } else {
121 OUT_RING(chan, image->bg_color);
122 OUT_RING(chan, image->fg_color);
123 }
124 BEGIN_RING(chan, NvSub2D, 0x0838, 2);
125 OUT_RING(chan, image->width);
126 OUT_RING(chan, image->height);
127 BEGIN_RING(chan, NvSub2D, 0x0850, 4);
128 OUT_RING(chan, 0);
129 OUT_RING(chan, image->dx);
130 OUT_RING(chan, 0);
131 OUT_RING(chan, image->dy);
132
133 while (dwords) {
134 int push = dwords > 2047 ? 2047 : dwords;
135
136 if (RING_SPACE(chan, push + 1)) {
137 nouveau_fbcon_gpu_lockup(info);
138 cfb_imageblit(info, image);
139 return;
140 }
141
142 dwords -= push;
143
144 BEGIN_RING(chan, NvSub2D, 0x40000860, push);
145 OUT_RINGp(chan, data, push);
146 data += push;
147 }
148
149 FIRE_RING(chan);
150}
151
152int
153nv50_fbcon_accel_init(struct fb_info *info)
154{
155 struct nouveau_fbcon_par *par = info->par;
156 struct drm_device *dev = par->dev;
157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 struct nouveau_channel *chan = dev_priv->channel;
159 struct nouveau_gpuobj *eng2d = NULL;
160 uint64_t fb;
161 int ret, format;
162
163 fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
164
165 switch (info->var.bits_per_pixel) {
166 case 8:
167 format = 0xf3;
168 break;
169 case 15:
170 format = 0xf8;
171 break;
172 case 16:
173 format = 0xe8;
174 break;
175 case 32:
176 switch (info->var.transp.length) {
177 case 0: /* depth 24 */
178 case 8: /* depth 32, just use 24.. */
179 format = 0xe6;
180 break;
181 case 2: /* depth 30 */
182 format = 0xd1;
183 break;
184 default:
185 return -EINVAL;
186 }
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
193 if (ret)
194 return ret;
195
196 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
197 if (ret)
198 return ret;
199
200 ret = RING_SPACE(chan, 59);
201 if (ret) {
202 nouveau_fbcon_gpu_lockup(info);
203 return ret;
204 }
205
206 BEGIN_RING(chan, NvSub2D, 0x0000, 1);
207 OUT_RING(chan, Nv2D);
208 BEGIN_RING(chan, NvSub2D, 0x0180, 4);
209 OUT_RING(chan, NvNotify0);
210 OUT_RING(chan, chan->vram_handle);
211 OUT_RING(chan, chan->vram_handle);
212 OUT_RING(chan, chan->vram_handle);
213 BEGIN_RING(chan, NvSub2D, 0x0290, 1);
214 OUT_RING(chan, 0);
215 BEGIN_RING(chan, NvSub2D, 0x0888, 1);
216 OUT_RING(chan, 1);
217 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
218 OUT_RING(chan, 3);
219 BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
220 OUT_RING(chan, 0x55);
221 BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
222 OUT_RING(chan, 0);
223 OUT_RING(chan, 1);
224 OUT_RING(chan, 0);
225 OUT_RING(chan, 1);
226 BEGIN_RING(chan, NvSub2D, 0x0580, 2);
227 OUT_RING(chan, 4);
228 OUT_RING(chan, format);
229 BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
230 OUT_RING(chan, 2);
231 OUT_RING(chan, 1);
232 BEGIN_RING(chan, NvSub2D, 0x0804, 1);
233 OUT_RING(chan, format);
234 BEGIN_RING(chan, NvSub2D, 0x0800, 1);
235 OUT_RING(chan, 1);
236 BEGIN_RING(chan, NvSub2D, 0x0808, 3);
237 OUT_RING(chan, 0);
238 OUT_RING(chan, 0);
239 OUT_RING(chan, 1);
240 BEGIN_RING(chan, NvSub2D, 0x081c, 1);
241 OUT_RING(chan, 1);
242 BEGIN_RING(chan, NvSub2D, 0x0840, 4);
243 OUT_RING(chan, 0);
244 OUT_RING(chan, 1);
245 OUT_RING(chan, 0);
246 OUT_RING(chan, 1);
247 BEGIN_RING(chan, NvSub2D, 0x0200, 2);
248 OUT_RING(chan, format);
249 OUT_RING(chan, 1);
250 BEGIN_RING(chan, NvSub2D, 0x0214, 5);
251 OUT_RING(chan, info->fix.line_length);
252 OUT_RING(chan, info->var.xres_virtual);
253 OUT_RING(chan, info->var.yres_virtual);
254 OUT_RING(chan, upper_32_bits(fb));
255 OUT_RING(chan, lower_32_bits(fb));
256 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
257 OUT_RING(chan, format);
258 OUT_RING(chan, 1);
259 BEGIN_RING(chan, NvSub2D, 0x0244, 5);
260 OUT_RING(chan, info->fix.line_length);
261 OUT_RING(chan, info->var.xres_virtual);
262 OUT_RING(chan, info->var.yres_virtual);
263 OUT_RING(chan, upper_32_bits(fb));
264 OUT_RING(chan, lower_32_bits(fb));
265
266 return 0;
267}
268
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 000000000000..e20c0e2474f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,500 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31struct nv50_fifo_priv {
32 struct nouveau_gpuobj_ref *thingo[2];
33 int cur_thingo;
34};
35
36#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
37
38static void
39nv50_fifo_init_thingo(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
43 struct nouveau_gpuobj_ref *cur;
44 int i, nr;
45
46 NV_DEBUG(dev, "\n");
47
48 cur = priv->thingo[priv->cur_thingo];
49 priv->cur_thingo = !priv->cur_thingo;
50
51 /* We never schedule channel 0 or 127 */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 1, nr = 0; i < 127; i++) {
54 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
55 nv_wo32(dev, cur->gpuobj, nr++, i);
56 }
57 dev_priv->engine.instmem.finish_access(dev);
58
59 nv_wr32(dev, 0x32f4, cur->instance >> 12);
60 nv_wr32(dev, 0x32ec, nr);
61 nv_wr32(dev, 0x2500, 0x101);
62}
63
64static int
65nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *chan = dev_priv->fifos[channel];
69 uint32_t inst;
70
71 NV_DEBUG(dev, "ch%d\n", channel);
72
73 if (!chan->ramfc)
74 return -EINVAL;
75
76 if (IS_G80)
77 inst = chan->ramfc->instance >> 12;
78 else
79 inst = chan->ramfc->instance >> 8;
80 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
81 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
82
83 if (!nt)
84 nv50_fifo_init_thingo(dev);
85 return 0;
86}
87
88static void
89nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t inst;
93
94 NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
95
96 if (IS_G80)
97 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
98 else
99 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
100 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
101
102 if (!nt)
103 nv50_fifo_init_thingo(dev);
104}
105
106static void
107nv50_fifo_init_reset(struct drm_device *dev)
108{
109 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
110
111 NV_DEBUG(dev, "\n");
112
113 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
114 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
115}
116
117static void
118nv50_fifo_init_intr(struct drm_device *dev)
119{
120 NV_DEBUG(dev, "\n");
121
122 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
123 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
124}
125
126static void
127nv50_fifo_init_context_table(struct drm_device *dev)
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 int i;
131
132 NV_DEBUG(dev, "\n");
133
134 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
135 if (dev_priv->fifos[i])
136 nv50_fifo_channel_enable(dev, i, true);
137 else
138 nv50_fifo_channel_disable(dev, i, true);
139 }
140
141 nv50_fifo_init_thingo(dev);
142}
143
144static void
145nv50_fifo_init_regs__nv(struct drm_device *dev)
146{
147 NV_DEBUG(dev, "\n");
148
149 nv_wr32(dev, 0x250c, 0x6f3cfc34);
150}
151
152static void
153nv50_fifo_init_regs(struct drm_device *dev)
154{
155 NV_DEBUG(dev, "\n");
156
157 nv_wr32(dev, 0x2500, 0);
158 nv_wr32(dev, 0x3250, 0);
159 nv_wr32(dev, 0x3220, 0);
160 nv_wr32(dev, 0x3204, 0);
161 nv_wr32(dev, 0x3210, 0);
162 nv_wr32(dev, 0x3270, 0);
163
164 /* Enable dummy channels setup by nv50_instmem.c */
165 nv50_fifo_channel_enable(dev, 0, true);
166 nv50_fifo_channel_enable(dev, 127, true);
167}
168
169int
170nv50_fifo_init(struct drm_device *dev)
171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nv50_fifo_priv *priv;
174 int ret;
175
176 NV_DEBUG(dev, "\n");
177
178 priv = dev_priv->engine.fifo.priv;
179 if (priv) {
180 priv->cur_thingo = !priv->cur_thingo;
181 goto just_reset;
182 }
183
184 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
185 if (!priv)
186 return -ENOMEM;
187 dev_priv->engine.fifo.priv = priv;
188
189 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
190 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
191 if (ret) {
192 NV_ERROR(dev, "error creating thingo0: %d\n", ret);
193 return ret;
194 }
195
196 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
198 if (ret) {
199 NV_ERROR(dev, "error creating thingo1: %d\n", ret);
200 return ret;
201 }
202
203just_reset:
204 nv50_fifo_init_reset(dev);
205 nv50_fifo_init_intr(dev);
206 nv50_fifo_init_context_table(dev);
207 nv50_fifo_init_regs__nv(dev);
208 nv50_fifo_init_regs(dev);
209 dev_priv->engine.fifo.enable(dev);
210 dev_priv->engine.fifo.reassign(dev, true);
211
212 return 0;
213}
214
215void
216nv50_fifo_takedown(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
220
221 NV_DEBUG(dev, "\n");
222
223 if (!priv)
224 return;
225
226 nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
227 nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
228
229 dev_priv->engine.fifo.priv = NULL;
230 kfree(priv);
231}
232
233int
234nv50_fifo_channel_id(struct drm_device *dev)
235{
236 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
237 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
238}
239
240int
241nv50_fifo_create_context(struct nouveau_channel *chan)
242{
243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL;
246 unsigned long flags;
247 int ret;
248
249 NV_DEBUG(dev, "ch%d\n", chan->id);
250
251 if (IS_G80) {
252 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
253 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
254
255 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
256 0x100, NVOBJ_FLAG_ZERO_ALLOC |
257 NVOBJ_FLAG_ZERO_FREE, &ramfc,
258 &chan->ramfc);
259 if (ret)
260 return ret;
261
262 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
263 ramin_voffset + 0x0400, 4096,
264 0, NULL, &chan->cache);
265 if (ret)
266 return ret;
267 } else {
268 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
269 NVOBJ_FLAG_ZERO_ALLOC |
270 NVOBJ_FLAG_ZERO_FREE,
271 &chan->ramfc);
272 if (ret)
273 return ret;
274 ramfc = chan->ramfc->gpuobj;
275
276 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
277 0, &chan->cache);
278 if (ret)
279 return ret;
280 }
281
282 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283
284 dev_priv->engine.instmem.prepare_access(dev, true);
285
286 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
287 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
294 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
295 chan->dma.ib_base * 4);
296 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
297
298 if (!IS_G80) {
299 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
300 nv_wo32(dev, chan->ramin->gpuobj, 1,
301 chan->ramfc->instance >> 8);
302
303 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
304 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
305 }
306
307 dev_priv->engine.instmem.finish_access(dev);
308
309 ret = nv50_fifo_channel_enable(dev, chan->id, false);
310 if (ret) {
311 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
312 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
313 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
314 return ret;
315 }
316
317 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
318 return 0;
319}
320
321void
322nv50_fifo_destroy_context(struct nouveau_channel *chan)
323{
324 struct drm_device *dev = chan->dev;
325 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
326
327 NV_DEBUG(dev, "ch%d\n", chan->id);
328
329 /* This will ensure the channel is seen as disabled. */
330 chan->ramfc = NULL;
331 nv50_fifo_channel_disable(dev, chan->id, false);
332
333 /* Dummy channel, also used on ch 127 */
334 if (chan->id == 0)
335 nv50_fifo_channel_disable(dev, 127, false);
336
337 nouveau_gpuobj_ref_del(dev, &ramfc);
338 nouveau_gpuobj_ref_del(dev, &chan->cache);
339}
340
341int
342nv50_fifo_load_context(struct nouveau_channel *chan)
343{
344 struct drm_device *dev = chan->dev;
345 struct drm_nouveau_private *dev_priv = dev->dev_private;
346 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
347 struct nouveau_gpuobj *cache = chan->cache->gpuobj;
348 int ptr, cnt;
349
350 NV_DEBUG(dev, "ch%d\n", chan->id);
351
352 dev_priv->engine.instmem.prepare_access(dev, false);
353
354 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
355 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
356 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
357 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
358 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
359 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
360 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
361 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
362 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
363 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
364 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
365 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
366 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
367 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
368 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
369 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
370 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
371 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
372 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
373 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
374 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
375 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
376 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
377 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
378 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
379 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
380 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
381 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
382 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
383 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
384 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
385 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
386 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
387
388 cnt = nv_ro32(dev, ramfc, 0x84/4);
389 for (ptr = 0; ptr < cnt; ptr++) {
390 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
391 nv_ro32(dev, cache, (ptr * 2) + 0));
392 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
393 nv_ro32(dev, cache, (ptr * 2) + 1));
394 }
395 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
396 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
397
398 /* guessing that all the 0x34xx regs aren't on NV50 */
399 if (!IS_G80) {
400 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
401 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
402 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
403 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
404 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
405 }
406
407 dev_priv->engine.instmem.finish_access(dev);
408
409 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
410 return 0;
411}
412
413int
414nv50_fifo_unload_context(struct drm_device *dev)
415{
416 struct drm_nouveau_private *dev_priv = dev->dev_private;
417 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
418 struct nouveau_gpuobj *ramfc, *cache;
419 struct nouveau_channel *chan = NULL;
420 int chid, get, put, ptr;
421
422 NV_DEBUG(dev, "\n");
423
424 chid = pfifo->channel_id(dev);
425 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
426 return 0;
427
428 chan = dev_priv->fifos[chid];
429 if (!chan) {
430 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
431 return -EINVAL;
432 }
433 NV_DEBUG(dev, "ch%d\n", chan->id);
434 ramfc = chan->ramfc->gpuobj;
435 cache = chan->cache->gpuobj;
436
437 dev_priv->engine.instmem.prepare_access(dev, true);
438
439 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
440 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
441 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
442 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
443 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
444 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
445 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
446 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
447 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
448 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
449 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
450 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
451 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
452 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
453 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
454 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
455 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
456 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
457 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
458 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
459 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
460 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
461 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
462 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
463 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
464 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
465 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
466 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
467 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
468 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
469 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
470 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
471 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
472
473 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
474 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
475 ptr = 0;
476 while (put != get) {
477 nv_wo32(dev, cache, ptr++,
478 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
479 nv_wo32(dev, cache, ptr++,
480 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
481 get = (get + 1) & 0x1ff;
482 }
483
484 /* guessing that all the 0x34xx regs aren't on NV50 */
485 if (!IS_G80) {
486 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
487 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
488 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
489 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
490 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
491 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
492 }
493
494 dev_priv->engine.instmem.finish_access(dev);
495
496 /*XXX: probably reload ch127 (NULL) state back too */
497 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
498 return 0;
499}
500
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
new file mode 100644
index 000000000000..c61782b314e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29static int
30nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
31{
32 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
33
34 if (gpio->line > 32)
35 return -EINVAL;
36
37 *reg = nv50_gpio_reg[gpio->line >> 3];
38 *shift = (gpio->line & 7) << 2;
39 return 0;
40}
41
42int
43nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
44{
45 struct dcb_gpio_entry *gpio;
46 uint32_t r, s, v;
47
48 gpio = nouveau_bios_gpio_entry(dev, tag);
49 if (!gpio)
50 return -ENOENT;
51
52 if (nv50_gpio_location(gpio, &r, &s))
53 return -EINVAL;
54
55 v = nv_rd32(dev, r) >> (s + 2);
56 return ((v & 1) == (gpio->state[1] & 1));
57}
58
59int
60nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
61{
62 struct dcb_gpio_entry *gpio;
63 uint32_t r, s, v;
64
65 gpio = nouveau_bios_gpio_entry(dev, tag);
66 if (!gpio)
67 return -ENOENT;
68
69 if (nv50_gpio_location(gpio, &r, &s))
70 return -EINVAL;
71
72 v = nv_rd32(dev, r) & ~(0x3 << s);
73 v |= (gpio->state[state] ^ 2) << s;
74 nv_wr32(dev, r, v);
75 return 0;
76}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 000000000000..b203d06f601f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#include "nouveau_grctx.h"
32
33#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
34
35static void
36nv50_graph_init_reset(struct drm_device *dev)
37{
38 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
39
40 NV_DEBUG(dev, "\n");
41
42 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
43 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
44}
45
46static void
47nv50_graph_init_intr(struct drm_device *dev)
48{
49 NV_DEBUG(dev, "\n");
50
51 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
52 nv_wr32(dev, 0x400138, 0xffffffff);
53 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
54}
55
56static void
57nv50_graph_init_regs__nv(struct drm_device *dev)
58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 uint32_t units = nv_rd32(dev, 0x1540);
61 int i;
62
63 NV_DEBUG(dev, "\n");
64
65 nv_wr32(dev, 0x400804, 0xc0000000);
66 nv_wr32(dev, 0x406800, 0xc0000000);
67 nv_wr32(dev, 0x400c04, 0xc0000000);
68 nv_wr32(dev, 0x401800, 0xc0000000);
69 nv_wr32(dev, 0x405018, 0xc0000000);
70 nv_wr32(dev, 0x402000, 0xc0000000);
71
72 for (i = 0; i < 16; i++) {
73 if (units & 1 << i) {
74 if (dev_priv->chipset < 0xa0) {
75 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
76 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
77 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
78 } else {
79 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
80 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
81 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
82 }
83 }
84 }
85
86 nv_wr32(dev, 0x400108, 0xffffffff);
87
88 nv_wr32(dev, 0x400824, 0x00004000);
89 nv_wr32(dev, 0x400500, 0x00010001);
90}
91
92static void
93nv50_graph_init_regs(struct drm_device *dev)
94{
95 NV_DEBUG(dev, "\n");
96
97 nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
98 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
99 nv_wr32(dev, 0x402ca8, 0x800);
100}
101
102static int
103nv50_graph_init_ctxctl(struct drm_device *dev)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106
107 NV_DEBUG(dev, "\n");
108
109 if (nouveau_ctxfw) {
110 nouveau_grctx_prog_load(dev);
111 dev_priv->engine.graph.grctx_size = 0x70000;
112 }
113 if (!dev_priv->engine.graph.ctxprog) {
114 struct nouveau_grctx ctx = {};
115 uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
116 int i;
117 if (!cp) {
118 NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
119 dev_priv->engine.graph.accel_blocked = true;
120 return 0;
121 }
122 ctx.dev = dev;
123 ctx.mode = NOUVEAU_GRCTX_PROG;
124 ctx.data = cp;
125 ctx.ctxprog_max = 512;
126 if (!nv50_grctx_init(&ctx)) {
127 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
128
129 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
130 for (i = 0; i < ctx.ctxprog_len; i++)
131 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
132 } else {
133 dev_priv->engine.graph.accel_blocked = true;
134 }
135 kfree(cp);
136 }
137
138 nv_wr32(dev, 0x400320, 4);
139 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
140 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
141 return 0;
142}
143
144int
145nv50_graph_init(struct drm_device *dev)
146{
147 int ret;
148
149 NV_DEBUG(dev, "\n");
150
151 nv50_graph_init_reset(dev);
152 nv50_graph_init_regs__nv(dev);
153 nv50_graph_init_regs(dev);
154 nv50_graph_init_intr(dev);
155
156 ret = nv50_graph_init_ctxctl(dev);
157 if (ret)
158 return ret;
159
160 return 0;
161}
162
163void
164nv50_graph_takedown(struct drm_device *dev)
165{
166 NV_DEBUG(dev, "\n");
167 nouveau_grctx_fini(dev);
168}
169
170void
171nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
172{
173 const uint32_t mask = 0x00010001;
174
175 if (enabled)
176 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
177 else
178 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
179}
180
181struct nouveau_channel *
182nv50_graph_channel(struct drm_device *dev)
183{
184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 uint32_t inst;
186 int i;
187
188 /* Be sure we're not in the middle of a context switch or bad things
189 * will happen, such as unloading the wrong pgraph context.
190 */
191 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
192 NV_ERROR(dev, "Ctxprog is still running\n");
193
194 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
195 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
196 return NULL;
197 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
198
199 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
200 struct nouveau_channel *chan = dev_priv->fifos[i];
201
202 if (chan && chan->ramin && chan->ramin->instance == inst)
203 return chan;
204 }
205
206 return NULL;
207}
208
209int
210nv50_graph_create_context(struct nouveau_channel *chan)
211{
212 struct drm_device *dev = chan->dev;
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
215 struct nouveau_gpuobj *ctx;
216 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
217 int hdr, ret;
218
219 NV_DEBUG(dev, "ch%d\n", chan->id);
220
221 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
222 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
223 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
224 if (ret)
225 return ret;
226 ctx = chan->ramin_grctx->gpuobj;
227
228 hdr = IS_G80 ? 0x200 : 0x20;
229 dev_priv->engine.instmem.prepare_access(dev, true);
230 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
231 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
232 pgraph->grctx_size - 1);
233 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
234 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
235 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
236 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
237 dev_priv->engine.instmem.finish_access(dev);
238
239 dev_priv->engine.instmem.prepare_access(dev, true);
240 if (!pgraph->ctxprog) {
241 struct nouveau_grctx ctx = {};
242 ctx.dev = chan->dev;
243 ctx.mode = NOUVEAU_GRCTX_VALS;
244 ctx.data = chan->ramin_grctx->gpuobj;
245 nv50_grctx_init(&ctx);
246 } else {
247 nouveau_grctx_vals_load(dev, ctx);
248 }
249 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
250 dev_priv->engine.instmem.finish_access(dev);
251
252 return 0;
253}
254
255void
256nv50_graph_destroy_context(struct nouveau_channel *chan)
257{
258 struct drm_device *dev = chan->dev;
259 struct drm_nouveau_private *dev_priv = dev->dev_private;
260 int i, hdr = IS_G80 ? 0x200 : 0x20;
261
262 NV_DEBUG(dev, "ch%d\n", chan->id);
263
264 if (!chan->ramin || !chan->ramin->gpuobj)
265 return;
266
267 dev_priv->engine.instmem.prepare_access(dev, true);
268 for (i = hdr; i < hdr + 24; i += 4)
269 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
270 dev_priv->engine.instmem.finish_access(dev);
271
272 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
273}
274
275static int
276nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
277{
278 uint32_t fifo = nv_rd32(dev, 0x400500);
279
280 nv_wr32(dev, 0x400500, fifo & ~1);
281 nv_wr32(dev, 0x400784, inst);
282 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
283 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
284 nv_wr32(dev, 0x400040, 0xffffffff);
285 (void)nv_rd32(dev, 0x400040);
286 nv_wr32(dev, 0x400040, 0x00000000);
287 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
288
289 if (nouveau_wait_for_idle(dev))
290 nv_wr32(dev, 0x40032c, inst | (1<<31));
291 nv_wr32(dev, 0x400500, fifo);
292
293 return 0;
294}
295
296int
297nv50_graph_load_context(struct nouveau_channel *chan)
298{
299 uint32_t inst = chan->ramin->instance >> 12;
300
301 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
302 return nv50_graph_do_load_context(chan->dev, inst);
303}
304
305int
306nv50_graph_unload_context(struct drm_device *dev)
307{
308 uint32_t inst;
309
310 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
311 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
312 return 0;
313 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
314
315 nouveau_wait_for_idle(dev);
316 nv_wr32(dev, 0x400784, inst);
317 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
318 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
319 nouveau_wait_for_idle(dev);
320
321 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
322 return 0;
323}
324
325void
326nv50_graph_context_switch(struct drm_device *dev)
327{
328 uint32_t inst;
329
330 nv50_graph_unload_context(dev);
331
332 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
333 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
334 nv50_graph_do_load_context(dev, inst);
335
336 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
337 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
338}
339
340static int
341nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
342 int mthd, uint32_t data)
343{
344 struct nouveau_gpuobj_ref *ref = NULL;
345
346 if (nouveau_gpuobj_ref_find(chan, data, &ref))
347 return -ENOENT;
348
349 if (nouveau_notifier_offset(ref->gpuobj, NULL))
350 return -EINVAL;
351
352 chan->nvsw.vblsem = ref->gpuobj;
353 chan->nvsw.vblsem_offset = ~0;
354 return 0;
355}
356
357static int
358nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
359 int mthd, uint32_t data)
360{
361 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
362 return -ERANGE;
363
364 chan->nvsw.vblsem_offset = data >> 2;
365 return 0;
366}
367
368static int
369nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
370 int mthd, uint32_t data)
371{
372 chan->nvsw.vblsem_rval = data;
373 return 0;
374}
375
376static int
377nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
378 int mthd, uint32_t data)
379{
380 struct drm_device *dev = chan->dev;
381 struct drm_nouveau_private *dev_priv = dev->dev_private;
382
383 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
384 return -EINVAL;
385
386 if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
387 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
388 nv_wr32(dev, NV50_PDISPLAY_INTR_1,
389 NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
390 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
391 NV50_PDISPLAY_INTR_EN) |
392 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
393 }
394
395 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
396 return 0;
397}
398
399static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
400 { 0x018c, nv50_graph_nvsw_dma_vblsem },
401 { 0x0400, nv50_graph_nvsw_vblsem_offset },
402 { 0x0404, nv50_graph_nvsw_vblsem_release_val },
403 { 0x0408, nv50_graph_nvsw_vblsem_release },
404 {}
405};
406
407struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
408 { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
409 { 0x0030, false, NULL }, /* null */
410 { 0x5039, false, NULL }, /* m2mf */
411 { 0x502d, false, NULL }, /* 2d */
412 { 0x50c0, false, NULL }, /* compute */
413 { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
414 { 0x5097, false, NULL }, /* tesla (nv50) */
415 { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
416 { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
417 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
418 {}
419};
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 000000000000..42a8fb20c1e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2383 @@
1/*
2 * Copyright 2009 Marcin Kościelnicki
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
26#define CP_FLAG_SWAP_DIRECTION_LOAD 0
27#define CP_FLAG_SWAP_DIRECTION_SAVE 1
28#define CP_FLAG_UNK01 ((0 * 32) + 1)
29#define CP_FLAG_UNK01_CLEAR 0
30#define CP_FLAG_UNK01_SET 1
31#define CP_FLAG_UNK03 ((0 * 32) + 3)
32#define CP_FLAG_UNK03_CLEAR 0
33#define CP_FLAG_UNK03_SET 1
34#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
35#define CP_FLAG_USER_SAVE_NOT_PENDING 0
36#define CP_FLAG_USER_SAVE_PENDING 1
37#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
38#define CP_FLAG_USER_LOAD_NOT_PENDING 0
39#define CP_FLAG_USER_LOAD_PENDING 1
40#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
41#define CP_FLAG_UNK0B_CLEAR 0
42#define CP_FLAG_UNK0B_SET 1
43#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
44#define CP_FLAG_UNK1D_CLEAR 0
45#define CP_FLAG_UNK1D_SET 1
46#define CP_FLAG_UNK20 ((1 * 32) + 0)
47#define CP_FLAG_UNK20_CLEAR 0
48#define CP_FLAG_UNK20_SET 1
49#define CP_FLAG_STATUS ((2 * 32) + 0)
50#define CP_FLAG_STATUS_BUSY 0
51#define CP_FLAG_STATUS_IDLE 1
52#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
53#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
54#define CP_FLAG_AUTO_SAVE_PENDING 1
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_NEWCTX ((2 * 32) + 10)
59#define CP_FLAG_NEWCTX_BUSY 0
60#define CP_FLAG_NEWCTX_DONE 1
61#define CP_FLAG_XFER ((2 * 32) + 11)
62#define CP_FLAG_XFER_IDLE 0
63#define CP_FLAG_XFER_BUSY 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1
67#define CP_FLAG_INTR ((2 * 32) + 15)
68#define CP_FLAG_INTR_NOT_PENDING 0
69#define CP_FLAG_INTR_PENDING 1
70
71#define CP_CTX 0x00100000
72#define CP_CTX_COUNT 0x000f0000
73#define CP_CTX_COUNT_SHIFT 16
74#define CP_CTX_REG 0x00003fff
75#define CP_LOAD_SR 0x00200000
76#define CP_LOAD_SR_VALUE 0x000fffff
77#define CP_BRA 0x00400000
78#define CP_BRA_IP 0x0001ff00
79#define CP_BRA_IP_SHIFT 8
80#define CP_BRA_IF_CLEAR 0x00000080
81#define CP_BRA_FLAG 0x0000007f
82#define CP_WAIT 0x00500000
83#define CP_WAIT_SET 0x00000080
84#define CP_WAIT_FLAG 0x0000007f
85#define CP_SET 0x00700000
86#define CP_SET_1 0x00000080
87#define CP_SET_FLAG 0x0000007f
88#define CP_NEWCTX 0x00600004
89#define CP_NEXT_TO_SWAP 0x00600005
90#define CP_SET_CONTEXT_POINTER 0x00600006
91#define CP_SET_XFER_POINTER 0x00600007
92#define CP_ENABLE 0x00600009
93#define CP_END 0x0060000c
94#define CP_NEXT_TO_CURRENT 0x0060000d
95#define CP_DISABLE1 0x0090ffff
96#define CP_DISABLE2 0x0091ffff
97#define CP_XFER_1 0x008000ff
98#define CP_XFER_2 0x008800ff
99#define CP_SEEK_1 0x00c000ff
100#define CP_SEEK_2 0x00c800ff
101
102#include "drmP.h"
103#include "nouveau_drv.h"
104#include "nouveau_grctx.h"
105
106/*
107 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
108 * the GPU itself that does context-switching, but it needs a special
109 * microcode to do it. And it's the driver's task to supply this microcode,
110 * further known as ctxprog, as well as the initial context values, known
111 * as ctxvals.
112 *
113 * Without ctxprog, you cannot switch contexts. Not even in software, since
114 * the majority of context [xfer strands] isn't accessible directly. You're
115 * stuck with a single channel, and you also suffer all the problems resulting
116 * from missing ctxvals, since you cannot load them.
117 *
118 * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
119 * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
120 * since you don't have... some sort of needed setup.
121 *
122 * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
123 * it's too much hassle to handle no-ctxprog as a special case.
124 */
125
126/*
127 * How ctxprogs work.
128 *
129 * The ctxprog is written in its own kind of microcode, with very small and
130 * crappy set of available commands. You upload it to a small [512 insns]
131 * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
132 * switch channel. or when the driver explicitely requests it. Stuff visible
133 * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
134 * the per-channel context save area in VRAM [known as ctxvals or grctx],
135 * 4 flags registers, a scratch register, two grctx pointers, plus many
136 * random poorly-understood details.
137 *
138 * When ctxprog runs, it's supposed to check what operations are asked of it,
139 * save old context if requested, optionally reset PGRAPH and switch to the
140 * new channel, and load the new context. Context consists of three major
141 * parts: subset of MMIO registers and two "xfer areas".
142 */
143
144/* TODO:
145 * - document unimplemented bits compared to nvidia
146 * - NVAx: make a TP subroutine, use it.
147 * - use 0x4008fc instead of 0x1540?
148 */
149
150enum cp_label {
151 cp_check_load = 1,
152 cp_setup_auto_load,
153 cp_setup_load,
154 cp_setup_save,
155 cp_swap_state,
156 cp_prepare_exit,
157 cp_exit,
158};
159
160static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
161static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
162static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
163
164/* Main function: construct the ctxprog skeleton, call the other functions. */
165
166int
167nv50_grctx_init(struct nouveau_grctx *ctx)
168{
169 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
170
171 switch (dev_priv->chipset) {
172 case 0x50:
173 case 0x84:
174 case 0x86:
175 case 0x92:
176 case 0x94:
177 case 0x96:
178 case 0x98:
179 case 0xa0:
180 case 0xa3:
181 case 0xa5:
182 case 0xa8:
183 case 0xaa:
184 case 0xac:
185 break;
186 default:
187 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
188 "your NV%x card.\n", dev_priv->chipset);
189 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
190 "the devs.\n");
191 return -ENOSYS;
192 }
193 /* decide whether we're loading/unloading the context */
194 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
195 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
196
197 cp_name(ctx, cp_check_load);
198 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
199 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
200 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
201
202 /* setup for context load */
203 cp_name(ctx, cp_setup_auto_load);
204 cp_out (ctx, CP_DISABLE1);
205 cp_out (ctx, CP_DISABLE2);
206 cp_out (ctx, CP_ENABLE);
207 cp_out (ctx, CP_NEXT_TO_SWAP);
208 cp_set (ctx, UNK01, SET);
209 cp_name(ctx, cp_setup_load);
210 cp_out (ctx, CP_NEWCTX);
211 cp_wait(ctx, NEWCTX, BUSY);
212 cp_set (ctx, UNK1D, CLEAR);
213 cp_set (ctx, SWAP_DIRECTION, LOAD);
214 cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
215 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
216
217 /* setup for context save */
218 cp_name(ctx, cp_setup_save);
219 cp_set (ctx, UNK1D, SET);
220 cp_wait(ctx, STATUS, BUSY);
221 cp_wait(ctx, INTR, PENDING);
222 cp_bra (ctx, STATUS, BUSY, cp_setup_save);
223 cp_set (ctx, UNK01, SET);
224 cp_set (ctx, SWAP_DIRECTION, SAVE);
225
226 /* general PGRAPH state */
227 cp_name(ctx, cp_swap_state);
228 cp_set (ctx, UNK03, SET);
229 cp_pos (ctx, 0x00004/4);
230 cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
231 cp_pos (ctx, 0x00100/4);
232 nv50_graph_construct_mmio(ctx);
233 nv50_graph_construct_xfer1(ctx);
234 nv50_graph_construct_xfer2(ctx);
235
236 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
237
238 cp_set (ctx, UNK20, SET);
239 cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
240 cp_lsr (ctx, ctx->ctxvals_base);
241 cp_out (ctx, CP_SET_XFER_POINTER);
242 cp_lsr (ctx, 4);
243 cp_out (ctx, CP_SEEK_1);
244 cp_out (ctx, CP_XFER_1);
245 cp_wait(ctx, XFER, BUSY);
246
247 /* pre-exit state updates */
248 cp_name(ctx, cp_prepare_exit);
249 cp_set (ctx, UNK01, CLEAR);
250 cp_set (ctx, UNK03, CLEAR);
251 cp_set (ctx, UNK1D, CLEAR);
252
253 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
254 cp_out (ctx, CP_NEXT_TO_CURRENT);
255
256 cp_name(ctx, cp_exit);
257 cp_set (ctx, USER_SAVE, NOT_PENDING);
258 cp_set (ctx, USER_LOAD, NOT_PENDING);
259 cp_out (ctx, CP_END);
260 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
261
262 return 0;
263}
264
265/*
266 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
267 * registers to save/restore and the default values for them.
268 */
269
270static void
271nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
272{
273 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
274 int i, j;
275 int offset, base;
276 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
277
278 /* 0800: DISPATCH */
279 cp_ctx(ctx, 0x400808, 7);
280 gr_def(ctx, 0x400814, 0x00000030);
281 cp_ctx(ctx, 0x400834, 0x32);
282 if (dev_priv->chipset == 0x50) {
283 gr_def(ctx, 0x400834, 0xff400040);
284 gr_def(ctx, 0x400838, 0xfff00080);
285 gr_def(ctx, 0x40083c, 0xfff70090);
286 gr_def(ctx, 0x400840, 0xffe806a8);
287 }
288 gr_def(ctx, 0x400844, 0x00000002);
289 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
290 gr_def(ctx, 0x400894, 0x00001000);
291 gr_def(ctx, 0x4008e8, 0x00000003);
292 gr_def(ctx, 0x4008ec, 0x00001000);
293 if (dev_priv->chipset == 0x50)
294 cp_ctx(ctx, 0x400908, 0xb);
295 else if (dev_priv->chipset < 0xa0)
296 cp_ctx(ctx, 0x400908, 0xc);
297 else
298 cp_ctx(ctx, 0x400908, 0xe);
299
300 if (dev_priv->chipset >= 0xa0)
301 cp_ctx(ctx, 0x400b00, 0x1);
302 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
303 cp_ctx(ctx, 0x400b10, 0x1);
304 gr_def(ctx, 0x400b10, 0x0001629d);
305 cp_ctx(ctx, 0x400b20, 0x1);
306 gr_def(ctx, 0x400b20, 0x0001629d);
307 }
308
309 /* 0C00: VFETCH */
310 cp_ctx(ctx, 0x400c08, 0x2);
311 gr_def(ctx, 0x400c08, 0x0000fe0c);
312
313 /* 1000 */
314 if (dev_priv->chipset < 0xa0) {
315 cp_ctx(ctx, 0x401008, 0x4);
316 gr_def(ctx, 0x401014, 0x00001000);
317 } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
318 cp_ctx(ctx, 0x401008, 0x5);
319 gr_def(ctx, 0x401018, 0x00001000);
320 } else {
321 cp_ctx(ctx, 0x401008, 0x5);
322 gr_def(ctx, 0x401018, 0x00004000);
323 }
324
325 /* 1400 */
326 cp_ctx(ctx, 0x401400, 0x8);
327 cp_ctx(ctx, 0x401424, 0x3);
328 if (dev_priv->chipset == 0x50)
329 gr_def(ctx, 0x40142c, 0x0001fd87);
330 else
331 gr_def(ctx, 0x40142c, 0x00000187);
332 cp_ctx(ctx, 0x401540, 0x5);
333 gr_def(ctx, 0x401550, 0x00001018);
334
335 /* 1800: STREAMOUT */
336 cp_ctx(ctx, 0x401814, 0x1);
337 gr_def(ctx, 0x401814, 0x000000ff);
338 if (dev_priv->chipset == 0x50) {
339 cp_ctx(ctx, 0x40181c, 0xe);
340 gr_def(ctx, 0x401850, 0x00000004);
341 } else if (dev_priv->chipset < 0xa0) {
342 cp_ctx(ctx, 0x40181c, 0xf);
343 gr_def(ctx, 0x401854, 0x00000004);
344 } else {
345 cp_ctx(ctx, 0x40181c, 0x13);
346 gr_def(ctx, 0x401864, 0x00000004);
347 }
348
349 /* 1C00 */
350 cp_ctx(ctx, 0x401c00, 0x1);
351 switch (dev_priv->chipset) {
352 case 0x50:
353 gr_def(ctx, 0x401c00, 0x0001005f);
354 break;
355 case 0x84:
356 case 0x86:
357 case 0x94:
358 gr_def(ctx, 0x401c00, 0x044d00df);
359 break;
360 case 0x92:
361 case 0x96:
362 case 0x98:
363 case 0xa0:
364 case 0xaa:
365 case 0xac:
366 gr_def(ctx, 0x401c00, 0x042500df);
367 break;
368 case 0xa3:
369 case 0xa5:
370 case 0xa8:
371 gr_def(ctx, 0x401c00, 0x142500df);
372 break;
373 }
374
375 /* 2400 */
376 cp_ctx(ctx, 0x402400, 0x1);
377 if (dev_priv->chipset == 0x50)
378 cp_ctx(ctx, 0x402408, 0x1);
379 else
380 cp_ctx(ctx, 0x402408, 0x2);
381 gr_def(ctx, 0x402408, 0x00000600);
382
383 /* 2800 */
384 cp_ctx(ctx, 0x402800, 0x1);
385 if (dev_priv->chipset == 0x50)
386 gr_def(ctx, 0x402800, 0x00000006);
387
388 /* 2C00 */
389 cp_ctx(ctx, 0x402c08, 0x6);
390 if (dev_priv->chipset != 0x50)
391 gr_def(ctx, 0x402c14, 0x01000000);
392 gr_def(ctx, 0x402c18, 0x000000ff);
393 if (dev_priv->chipset == 0x50)
394 cp_ctx(ctx, 0x402ca0, 0x1);
395 else
396 cp_ctx(ctx, 0x402ca0, 0x2);
397 if (dev_priv->chipset < 0xa0)
398 gr_def(ctx, 0x402ca0, 0x00000400);
399 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
400 gr_def(ctx, 0x402ca0, 0x00000800);
401 else
402 gr_def(ctx, 0x402ca0, 0x00000400);
403 cp_ctx(ctx, 0x402cac, 0x4);
404
405 /* 3000 */
406 cp_ctx(ctx, 0x403004, 0x1);
407 gr_def(ctx, 0x403004, 0x00000001);
408
409 /* 3404 */
410 if (dev_priv->chipset >= 0xa0) {
411 cp_ctx(ctx, 0x403404, 0x1);
412 gr_def(ctx, 0x403404, 0x00000001);
413 }
414
415 /* 5000 */
416 cp_ctx(ctx, 0x405000, 0x1);
417 switch (dev_priv->chipset) {
418 case 0x50:
419 gr_def(ctx, 0x405000, 0x00300080);
420 break;
421 case 0x84:
422 case 0xa0:
423 case 0xa3:
424 case 0xa5:
425 case 0xa8:
426 case 0xaa:
427 case 0xac:
428 gr_def(ctx, 0x405000, 0x000e0080);
429 break;
430 case 0x86:
431 case 0x92:
432 case 0x94:
433 case 0x96:
434 case 0x98:
435 gr_def(ctx, 0x405000, 0x00000080);
436 break;
437 }
438 cp_ctx(ctx, 0x405014, 0x1);
439 gr_def(ctx, 0x405014, 0x00000004);
440 cp_ctx(ctx, 0x40501c, 0x1);
441 cp_ctx(ctx, 0x405024, 0x1);
442 cp_ctx(ctx, 0x40502c, 0x1);
443
444 /* 5400 or maybe 4800 */
445 if (dev_priv->chipset == 0x50) {
446 offset = 0x405400;
447 cp_ctx(ctx, 0x405400, 0xea);
448 } else if (dev_priv->chipset < 0x94) {
449 offset = 0x405400;
450 cp_ctx(ctx, 0x405400, 0xcb);
451 } else if (dev_priv->chipset < 0xa0) {
452 offset = 0x405400;
453 cp_ctx(ctx, 0x405400, 0xcc);
454 } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
455 offset = 0x404800;
456 cp_ctx(ctx, 0x404800, 0xda);
457 } else {
458 offset = 0x405400;
459 cp_ctx(ctx, 0x405400, 0xd4);
460 }
461 gr_def(ctx, offset + 0x0c, 0x00000002);
462 gr_def(ctx, offset + 0x10, 0x00000001);
463 if (dev_priv->chipset >= 0x94)
464 offset += 4;
465 gr_def(ctx, offset + 0x1c, 0x00000001);
466 gr_def(ctx, offset + 0x20, 0x00000100);
467 gr_def(ctx, offset + 0x38, 0x00000002);
468 gr_def(ctx, offset + 0x3c, 0x00000001);
469 gr_def(ctx, offset + 0x40, 0x00000001);
470 gr_def(ctx, offset + 0x50, 0x00000001);
471 gr_def(ctx, offset + 0x54, 0x003fffff);
472 gr_def(ctx, offset + 0x58, 0x00001fff);
473 gr_def(ctx, offset + 0x60, 0x00000001);
474 gr_def(ctx, offset + 0x64, 0x00000001);
475 gr_def(ctx, offset + 0x6c, 0x00000001);
476 gr_def(ctx, offset + 0x70, 0x00000001);
477 gr_def(ctx, offset + 0x74, 0x00000001);
478 gr_def(ctx, offset + 0x78, 0x00000004);
479 gr_def(ctx, offset + 0x7c, 0x00000001);
480 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
481 offset += 4;
482 gr_def(ctx, offset + 0x80, 0x00000001);
483 gr_def(ctx, offset + 0x84, 0x00000001);
484 gr_def(ctx, offset + 0x88, 0x00000007);
485 gr_def(ctx, offset + 0x8c, 0x00000001);
486 gr_def(ctx, offset + 0x90, 0x00000007);
487 gr_def(ctx, offset + 0x94, 0x00000001);
488 gr_def(ctx, offset + 0x98, 0x00000001);
489 gr_def(ctx, offset + 0x9c, 0x00000001);
490 if (dev_priv->chipset == 0x50) {
491 gr_def(ctx, offset + 0xb0, 0x00000001);
492 gr_def(ctx, offset + 0xb4, 0x00000001);
493 gr_def(ctx, offset + 0xbc, 0x00000001);
494 gr_def(ctx, offset + 0xc0, 0x0000000a);
495 gr_def(ctx, offset + 0xd0, 0x00000040);
496 gr_def(ctx, offset + 0xd8, 0x00000002);
497 gr_def(ctx, offset + 0xdc, 0x00000100);
498 gr_def(ctx, offset + 0xe0, 0x00000001);
499 gr_def(ctx, offset + 0xe4, 0x00000100);
500 gr_def(ctx, offset + 0x100, 0x00000001);
501 gr_def(ctx, offset + 0x124, 0x00000004);
502 gr_def(ctx, offset + 0x13c, 0x00000001);
503 gr_def(ctx, offset + 0x140, 0x00000100);
504 gr_def(ctx, offset + 0x148, 0x00000001);
505 gr_def(ctx, offset + 0x154, 0x00000100);
506 gr_def(ctx, offset + 0x158, 0x00000001);
507 gr_def(ctx, offset + 0x15c, 0x00000100);
508 gr_def(ctx, offset + 0x164, 0x00000001);
509 gr_def(ctx, offset + 0x170, 0x00000100);
510 gr_def(ctx, offset + 0x174, 0x00000001);
511 gr_def(ctx, offset + 0x17c, 0x00000001);
512 gr_def(ctx, offset + 0x188, 0x00000002);
513 gr_def(ctx, offset + 0x190, 0x00000001);
514 gr_def(ctx, offset + 0x198, 0x00000001);
515 gr_def(ctx, offset + 0x1ac, 0x00000003);
516 offset += 0xd0;
517 } else {
518 gr_def(ctx, offset + 0xb0, 0x00000001);
519 gr_def(ctx, offset + 0xb4, 0x00000100);
520 gr_def(ctx, offset + 0xbc, 0x00000001);
521 gr_def(ctx, offset + 0xc8, 0x00000100);
522 gr_def(ctx, offset + 0xcc, 0x00000001);
523 gr_def(ctx, offset + 0xd0, 0x00000100);
524 gr_def(ctx, offset + 0xd8, 0x00000001);
525 gr_def(ctx, offset + 0xe4, 0x00000100);
526 }
527 gr_def(ctx, offset + 0xf8, 0x00000004);
528 gr_def(ctx, offset + 0xfc, 0x00000070);
529 gr_def(ctx, offset + 0x100, 0x00000080);
530 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
531 offset += 4;
532 gr_def(ctx, offset + 0x114, 0x0000000c);
533 if (dev_priv->chipset == 0x50)
534 offset -= 4;
535 gr_def(ctx, offset + 0x11c, 0x00000008);
536 gr_def(ctx, offset + 0x120, 0x00000014);
537 if (dev_priv->chipset == 0x50) {
538 gr_def(ctx, offset + 0x124, 0x00000026);
539 offset -= 0x18;
540 } else {
541 gr_def(ctx, offset + 0x128, 0x00000029);
542 gr_def(ctx, offset + 0x12c, 0x00000027);
543 gr_def(ctx, offset + 0x130, 0x00000026);
544 gr_def(ctx, offset + 0x134, 0x00000008);
545 gr_def(ctx, offset + 0x138, 0x00000004);
546 gr_def(ctx, offset + 0x13c, 0x00000027);
547 }
548 gr_def(ctx, offset + 0x148, 0x00000001);
549 gr_def(ctx, offset + 0x14c, 0x00000002);
550 gr_def(ctx, offset + 0x150, 0x00000003);
551 gr_def(ctx, offset + 0x154, 0x00000004);
552 gr_def(ctx, offset + 0x158, 0x00000005);
553 gr_def(ctx, offset + 0x15c, 0x00000006);
554 gr_def(ctx, offset + 0x160, 0x00000007);
555 gr_def(ctx, offset + 0x164, 0x00000001);
556 gr_def(ctx, offset + 0x1a8, 0x000000cf);
557 if (dev_priv->chipset == 0x50)
558 offset -= 4;
559 gr_def(ctx, offset + 0x1d8, 0x00000080);
560 gr_def(ctx, offset + 0x1dc, 0x00000004);
561 gr_def(ctx, offset + 0x1e0, 0x00000004);
562 if (dev_priv->chipset == 0x50)
563 offset -= 4;
564 else
565 gr_def(ctx, offset + 0x1e4, 0x00000003);
566 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
567 gr_def(ctx, offset + 0x1ec, 0x00000003);
568 offset += 8;
569 }
570 gr_def(ctx, offset + 0x1e8, 0x00000001);
571 if (dev_priv->chipset == 0x50)
572 offset -= 4;
573 gr_def(ctx, offset + 0x1f4, 0x00000012);
574 gr_def(ctx, offset + 0x1f8, 0x00000010);
575 gr_def(ctx, offset + 0x1fc, 0x0000000c);
576 gr_def(ctx, offset + 0x200, 0x00000001);
577 gr_def(ctx, offset + 0x210, 0x00000004);
578 gr_def(ctx, offset + 0x214, 0x00000002);
579 gr_def(ctx, offset + 0x218, 0x00000004);
580 if (dev_priv->chipset >= 0xa0)
581 offset += 4;
582 gr_def(ctx, offset + 0x224, 0x003fffff);
583 gr_def(ctx, offset + 0x228, 0x00001fff);
584 if (dev_priv->chipset == 0x50)
585 offset -= 0x20;
586 else if (dev_priv->chipset >= 0xa0) {
587 gr_def(ctx, offset + 0x250, 0x00000001);
588 gr_def(ctx, offset + 0x254, 0x00000001);
589 gr_def(ctx, offset + 0x258, 0x00000002);
590 offset += 0x10;
591 }
592 gr_def(ctx, offset + 0x250, 0x00000004);
593 gr_def(ctx, offset + 0x254, 0x00000014);
594 gr_def(ctx, offset + 0x258, 0x00000001);
595 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
596 offset += 4;
597 gr_def(ctx, offset + 0x264, 0x00000002);
598 if (dev_priv->chipset >= 0xa0)
599 offset += 8;
600 gr_def(ctx, offset + 0x270, 0x00000001);
601 gr_def(ctx, offset + 0x278, 0x00000002);
602 gr_def(ctx, offset + 0x27c, 0x00001000);
603 if (dev_priv->chipset == 0x50)
604 offset -= 0xc;
605 else {
606 gr_def(ctx, offset + 0x280, 0x00000e00);
607 gr_def(ctx, offset + 0x284, 0x00001000);
608 gr_def(ctx, offset + 0x288, 0x00001e00);
609 }
610 gr_def(ctx, offset + 0x290, 0x00000001);
611 gr_def(ctx, offset + 0x294, 0x00000001);
612 gr_def(ctx, offset + 0x298, 0x00000001);
613 gr_def(ctx, offset + 0x29c, 0x00000001);
614 gr_def(ctx, offset + 0x2a0, 0x00000001);
615 gr_def(ctx, offset + 0x2b0, 0x00000200);
616 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
617 gr_def(ctx, offset + 0x2b4, 0x00000200);
618 offset += 4;
619 }
620 if (dev_priv->chipset < 0xa0) {
621 gr_def(ctx, offset + 0x2b8, 0x00000001);
622 gr_def(ctx, offset + 0x2bc, 0x00000070);
623 gr_def(ctx, offset + 0x2c0, 0x00000080);
624 gr_def(ctx, offset + 0x2cc, 0x00000001);
625 gr_def(ctx, offset + 0x2d0, 0x00000070);
626 gr_def(ctx, offset + 0x2d4, 0x00000080);
627 } else {
628 gr_def(ctx, offset + 0x2b8, 0x00000001);
629 gr_def(ctx, offset + 0x2bc, 0x000000f0);
630 gr_def(ctx, offset + 0x2c0, 0x000000ff);
631 gr_def(ctx, offset + 0x2cc, 0x00000001);
632 gr_def(ctx, offset + 0x2d0, 0x000000f0);
633 gr_def(ctx, offset + 0x2d4, 0x000000ff);
634 gr_def(ctx, offset + 0x2dc, 0x00000009);
635 offset += 4;
636 }
637 gr_def(ctx, offset + 0x2e4, 0x00000001);
638 gr_def(ctx, offset + 0x2e8, 0x000000cf);
639 gr_def(ctx, offset + 0x2f0, 0x00000001);
640 gr_def(ctx, offset + 0x300, 0x000000cf);
641 gr_def(ctx, offset + 0x308, 0x00000002);
642 gr_def(ctx, offset + 0x310, 0x00000001);
643 gr_def(ctx, offset + 0x318, 0x00000001);
644 gr_def(ctx, offset + 0x320, 0x000000cf);
645 gr_def(ctx, offset + 0x324, 0x000000cf);
646 gr_def(ctx, offset + 0x328, 0x00000001);
647
648 /* 6000? */
649 if (dev_priv->chipset == 0x50)
650 cp_ctx(ctx, 0x4063e0, 0x1);
651
652 /* 6800: M2MF */
653 if (dev_priv->chipset < 0x90) {
654 cp_ctx(ctx, 0x406814, 0x2b);
655 gr_def(ctx, 0x406818, 0x00000f80);
656 gr_def(ctx, 0x406860, 0x007f0080);
657 gr_def(ctx, 0x40689c, 0x007f0080);
658 } else {
659 cp_ctx(ctx, 0x406814, 0x4);
660 if (dev_priv->chipset == 0x98)
661 gr_def(ctx, 0x406818, 0x00000f80);
662 else
663 gr_def(ctx, 0x406818, 0x00001f80);
664 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
665 gr_def(ctx, 0x40681c, 0x00000030);
666 cp_ctx(ctx, 0x406830, 0x3);
667 }
668
669 /* 7000: per-ROP group state */
670 for (i = 0; i < 8; i++) {
671 if (units & (1<<(i+16))) {
672 cp_ctx(ctx, 0x407000 + (i<<8), 3);
673 if (dev_priv->chipset == 0x50)
674 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
675 else if (dev_priv->chipset != 0xa5)
676 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
677 else
678 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
679 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
680
681 if (dev_priv->chipset == 0x50) {
682 cp_ctx(ctx, 0x407010 + (i<<8), 1);
683 } else if (dev_priv->chipset < 0xa0) {
684 cp_ctx(ctx, 0x407010 + (i<<8), 2);
685 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
686 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
687 } else {
688 cp_ctx(ctx, 0x407010 + (i<<8), 3);
689 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
690 if (dev_priv->chipset != 0xa5)
691 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
692 else
693 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
694 }
695
696 cp_ctx(ctx, 0x407080 + (i<<8), 4);
697 if (dev_priv->chipset != 0xa5)
698 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
699 else
700 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
701 if (dev_priv->chipset == 0x50)
702 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
703 else
704 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
705 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
706
707 if (dev_priv->chipset < 0xa0)
708 cp_ctx(ctx, 0x407094 + (i<<8), 1);
709 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
710 cp_ctx(ctx, 0x407094 + (i<<8), 3);
711 else {
712 cp_ctx(ctx, 0x407094 + (i<<8), 4);
713 gr_def(ctx, 0x4070a0 + (i<<8), 1);
714 }
715 }
716 }
717
718 cp_ctx(ctx, 0x407c00, 0x3);
719 if (dev_priv->chipset < 0x90)
720 gr_def(ctx, 0x407c00, 0x00010040);
721 else if (dev_priv->chipset < 0xa0)
722 gr_def(ctx, 0x407c00, 0x00390040);
723 else
724 gr_def(ctx, 0x407c00, 0x003d0040);
725 gr_def(ctx, 0x407c08, 0x00000022);
726 if (dev_priv->chipset >= 0xa0) {
727 cp_ctx(ctx, 0x407c10, 0x3);
728 cp_ctx(ctx, 0x407c20, 0x1);
729 cp_ctx(ctx, 0x407c2c, 0x1);
730 }
731
732 if (dev_priv->chipset < 0xa0) {
733 cp_ctx(ctx, 0x407d00, 0x9);
734 } else {
735 cp_ctx(ctx, 0x407d00, 0x15);
736 }
737 if (dev_priv->chipset == 0x98)
738 gr_def(ctx, 0x407d08, 0x00380040);
739 else {
740 if (dev_priv->chipset < 0x90)
741 gr_def(ctx, 0x407d08, 0x00010040);
742 else if (dev_priv->chipset < 0xa0)
743 gr_def(ctx, 0x407d08, 0x00390040);
744 else
745 gr_def(ctx, 0x407d08, 0x003d0040);
746 gr_def(ctx, 0x407d0c, 0x00000022);
747 }
748
749 /* 8000+: per-TP state */
750 for (i = 0; i < 10; i++) {
751 if (units & (1<<i)) {
752 if (dev_priv->chipset < 0xa0)
753 base = 0x408000 + (i<<12);
754 else
755 base = 0x408000 + (i<<11);
756 if (dev_priv->chipset < 0xa0)
757 offset = base + 0xc00;
758 else
759 offset = base + 0x80;
760 cp_ctx(ctx, offset + 0x00, 1);
761 gr_def(ctx, offset + 0x00, 0x0000ff0a);
762 cp_ctx(ctx, offset + 0x08, 1);
763
764 /* per-MP state */
765 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
766 if (!(units & (1 << (j+24)))) continue;
767 if (dev_priv->chipset < 0xa0)
768 offset = base + 0x200 + (j<<7);
769 else
770 offset = base + 0x100 + (j<<7);
771 cp_ctx(ctx, offset, 0x20);
772 gr_def(ctx, offset + 0x00, 0x01800000);
773 gr_def(ctx, offset + 0x04, 0x00160000);
774 gr_def(ctx, offset + 0x08, 0x01800000);
775 gr_def(ctx, offset + 0x18, 0x0003ffff);
776 switch (dev_priv->chipset) {
777 case 0x50:
778 gr_def(ctx, offset + 0x1c, 0x00080000);
779 break;
780 case 0x84:
781 gr_def(ctx, offset + 0x1c, 0x00880000);
782 break;
783 case 0x86:
784 gr_def(ctx, offset + 0x1c, 0x008c0000);
785 break;
786 case 0x92:
787 case 0x96:
788 case 0x98:
789 gr_def(ctx, offset + 0x1c, 0x118c0000);
790 break;
791 case 0x94:
792 gr_def(ctx, offset + 0x1c, 0x10880000);
793 break;
794 case 0xa0:
795 case 0xa5:
796 gr_def(ctx, offset + 0x1c, 0x310c0000);
797 break;
798 case 0xa3:
799 case 0xa8:
800 case 0xaa:
801 case 0xac:
802 gr_def(ctx, offset + 0x1c, 0x300c0000);
803 break;
804 }
805 gr_def(ctx, offset + 0x40, 0x00010401);
806 if (dev_priv->chipset == 0x50)
807 gr_def(ctx, offset + 0x48, 0x00000040);
808 else
809 gr_def(ctx, offset + 0x48, 0x00000078);
810 gr_def(ctx, offset + 0x50, 0x000000bf);
811 gr_def(ctx, offset + 0x58, 0x00001210);
812 if (dev_priv->chipset == 0x50)
813 gr_def(ctx, offset + 0x5c, 0x00000080);
814 else
815 gr_def(ctx, offset + 0x5c, 0x08000080);
816 if (dev_priv->chipset >= 0xa0)
817 gr_def(ctx, offset + 0x68, 0x0000003e);
818 }
819
820 if (dev_priv->chipset < 0xa0)
821 cp_ctx(ctx, base + 0x300, 0x4);
822 else
823 cp_ctx(ctx, base + 0x300, 0x5);
824 if (dev_priv->chipset == 0x50)
825 gr_def(ctx, base + 0x304, 0x00007070);
826 else if (dev_priv->chipset < 0xa0)
827 gr_def(ctx, base + 0x304, 0x00027070);
828 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
829 gr_def(ctx, base + 0x304, 0x01127070);
830 else
831 gr_def(ctx, base + 0x304, 0x05127070);
832
833 if (dev_priv->chipset < 0xa0)
834 cp_ctx(ctx, base + 0x318, 1);
835 else
836 cp_ctx(ctx, base + 0x320, 1);
837 if (dev_priv->chipset == 0x50)
838 gr_def(ctx, base + 0x318, 0x0003ffff);
839 else if (dev_priv->chipset < 0xa0)
840 gr_def(ctx, base + 0x318, 0x03ffffff);
841 else
842 gr_def(ctx, base + 0x320, 0x07ffffff);
843
844 if (dev_priv->chipset < 0xa0)
845 cp_ctx(ctx, base + 0x324, 5);
846 else
847 cp_ctx(ctx, base + 0x328, 4);
848
849 if (dev_priv->chipset < 0xa0) {
850 cp_ctx(ctx, base + 0x340, 9);
851 offset = base + 0x340;
852 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
853 cp_ctx(ctx, base + 0x33c, 0xb);
854 offset = base + 0x344;
855 } else {
856 cp_ctx(ctx, base + 0x33c, 0xd);
857 offset = base + 0x344;
858 }
859 gr_def(ctx, offset + 0x0, 0x00120407);
860 gr_def(ctx, offset + 0x4, 0x05091507);
861 if (dev_priv->chipset == 0x84)
862 gr_def(ctx, offset + 0x8, 0x05100202);
863 else
864 gr_def(ctx, offset + 0x8, 0x05010202);
865 gr_def(ctx, offset + 0xc, 0x00030201);
866 if (dev_priv->chipset == 0xa3)
867 cp_ctx(ctx, base + 0x36c, 1);
868
869 cp_ctx(ctx, base + 0x400, 2);
870 gr_def(ctx, base + 0x404, 0x00000040);
871 cp_ctx(ctx, base + 0x40c, 2);
872 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
873 gr_def(ctx, base + 0x410, 0x00141210);
874
875 if (dev_priv->chipset < 0xa0)
876 offset = base + 0x800;
877 else
878 offset = base + 0x500;
879 cp_ctx(ctx, offset, 6);
880 gr_def(ctx, offset + 0x0, 0x000001f0);
881 gr_def(ctx, offset + 0x4, 0x00000001);
882 gr_def(ctx, offset + 0x8, 0x00000003);
883 if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
884 gr_def(ctx, offset + 0xc, 0x00008000);
885 gr_def(ctx, offset + 0x14, 0x00039e00);
886 cp_ctx(ctx, offset + 0x1c, 2);
887 if (dev_priv->chipset == 0x50)
888 gr_def(ctx, offset + 0x1c, 0x00000040);
889 else
890 gr_def(ctx, offset + 0x1c, 0x00000100);
891 gr_def(ctx, offset + 0x20, 0x00003800);
892
893 if (dev_priv->chipset >= 0xa0) {
894 cp_ctx(ctx, base + 0x54c, 2);
895 if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
896 gr_def(ctx, base + 0x54c, 0x003fe006);
897 else
898 gr_def(ctx, base + 0x54c, 0x003fe007);
899 gr_def(ctx, base + 0x550, 0x003fe000);
900 }
901
902 if (dev_priv->chipset < 0xa0)
903 offset = base + 0xa00;
904 else
905 offset = base + 0x680;
906 cp_ctx(ctx, offset, 1);
907 gr_def(ctx, offset, 0x00404040);
908
909 if (dev_priv->chipset < 0xa0)
910 offset = base + 0xe00;
911 else
912 offset = base + 0x700;
913 cp_ctx(ctx, offset, 2);
914 if (dev_priv->chipset < 0xa0)
915 gr_def(ctx, offset, 0x0077f005);
916 else if (dev_priv->chipset == 0xa5)
917 gr_def(ctx, offset, 0x6cf7f007);
918 else if (dev_priv->chipset == 0xa8)
919 gr_def(ctx, offset, 0x6cfff007);
920 else if (dev_priv->chipset == 0xac)
921 gr_def(ctx, offset, 0x0cfff007);
922 else
923 gr_def(ctx, offset, 0x0cf7f007);
924 if (dev_priv->chipset == 0x50)
925 gr_def(ctx, offset + 0x4, 0x00007fff);
926 else if (dev_priv->chipset < 0xa0)
927 gr_def(ctx, offset + 0x4, 0x003f7fff);
928 else
929 gr_def(ctx, offset + 0x4, 0x02bf7fff);
930 cp_ctx(ctx, offset + 0x2c, 1);
931 if (dev_priv->chipset == 0x50) {
932 cp_ctx(ctx, offset + 0x50, 9);
933 gr_def(ctx, offset + 0x54, 0x000003ff);
934 gr_def(ctx, offset + 0x58, 0x00000003);
935 gr_def(ctx, offset + 0x5c, 0x00000003);
936 gr_def(ctx, offset + 0x60, 0x000001ff);
937 gr_def(ctx, offset + 0x64, 0x0000001f);
938 gr_def(ctx, offset + 0x68, 0x0000000f);
939 gr_def(ctx, offset + 0x6c, 0x0000000f);
940 } else if(dev_priv->chipset < 0xa0) {
941 cp_ctx(ctx, offset + 0x50, 1);
942 cp_ctx(ctx, offset + 0x70, 1);
943 } else {
944 cp_ctx(ctx, offset + 0x50, 1);
945 cp_ctx(ctx, offset + 0x60, 5);
946 }
947 }
948 }
949}
950
951/*
952 * xfer areas. These are a pain.
953 *
954 * There are 2 xfer areas: the first one is big and contains all sorts of
955 * stuff, the second is small and contains some per-TP context.
956 *
957 * Each area is split into 8 "strands". The areas, when saved to grctx,
958 * are made of 8-word blocks. Each block contains a single word from
959 * each strand. The strands are independent of each other, their
960 * addresses are unrelated to each other, and data in them is closely
961 * packed together. The strand layout varies a bit between cards: here
962 * and there, a single word is thrown out in the middle and the whole
963 * strand is offset by a bit from corresponding one on another chipset.
964 * For this reason, addresses of stuff in strands are almost useless.
965 * Knowing sequence of stuff and size of gaps between them is much more
966 * useful, and that's how we build the strands in our generator.
967 *
968 * NVA0 takes this mess to a whole new level by cutting the old strands
969 * into a few dozen pieces [known as genes], rearranging them randomly,
970 * and putting them back together to make new strands. Hopefully these
971 * genes correspond more or less directly to the same PGRAPH subunits
972 * as in 400040 register.
973 *
974 * The most common value in default context is 0, and when the genes
975 * are separated by 0's, gene bounduaries are quite speculative...
976 * some of them can be clearly deduced, others can be guessed, and yet
977 * others won't be resolved without figuring out the real meaning of
978 * given ctxval. For the same reason, ending point of each strand
979 * is unknown. Except for strand 0, which is the longest strand and
980 * its end corresponds to end of the whole xfer.
981 *
982 * An unsolved mystery is the seek instruction: it takes an argument
983 * in bits 8-18, and that argument is clearly the place in strands to
984 * seek to... but the offsets don't seem to correspond to offsets as
985 * seen in grctx. Perhaps there's another, real, not randomly-changing
986 * addressing in strands, and the xfer insn just happens to skip over
987 * the unused bits? NV10-NV30 PIPE comes to mind...
988 *
989 * As far as I know, there's no way to access the xfer areas directly
990 * without the help of ctxprog.
991 */
992
993static inline void
994xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
995 int i;
996 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
997 for (i = 0; i < num; i++)
998 nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
999 ctx->ctxvals_pos += num << 3;
1000}
1001
1002/* Gene declarations... */
1003
1004static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
1005static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
1006static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
1007static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
1008static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
1009static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
1010static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
1011static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
1012static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
1013static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
1014static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
1015static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
1016static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1017
1018static void
1019nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1020{
1021 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1022 int i;
1023 int offset;
1024 int size = 0;
1025 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
1026
1027 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1028 ctx->ctxvals_base = offset;
1029
1030 if (dev_priv->chipset < 0xa0) {
1031 /* Strand 0 */
1032 ctx->ctxvals_pos = offset;
1033 switch (dev_priv->chipset) {
1034 case 0x50:
1035 xf_emit(ctx, 0x99, 0);
1036 break;
1037 case 0x84:
1038 case 0x86:
1039 xf_emit(ctx, 0x384, 0);
1040 break;
1041 case 0x92:
1042 case 0x94:
1043 case 0x96:
1044 case 0x98:
1045 xf_emit(ctx, 0x380, 0);
1046 break;
1047 }
1048 nv50_graph_construct_gene_m2mf (ctx);
1049 switch (dev_priv->chipset) {
1050 case 0x50:
1051 case 0x84:
1052 case 0x86:
1053 case 0x98:
1054 xf_emit(ctx, 0x4c4, 0);
1055 break;
1056 case 0x92:
1057 case 0x94:
1058 case 0x96:
1059 xf_emit(ctx, 0x984, 0);
1060 break;
1061 }
1062 nv50_graph_construct_gene_unk5(ctx);
1063 if (dev_priv->chipset == 0x50)
1064 xf_emit(ctx, 0xa, 0);
1065 else
1066 xf_emit(ctx, 0xb, 0);
1067 nv50_graph_construct_gene_unk4(ctx);
1068 nv50_graph_construct_gene_unk3(ctx);
1069 if ((ctx->ctxvals_pos-offset)/8 > size)
1070 size = (ctx->ctxvals_pos-offset)/8;
1071
1072 /* Strand 1 */
1073 ctx->ctxvals_pos = offset + 0x1;
1074 nv50_graph_construct_gene_unk6(ctx);
1075 nv50_graph_construct_gene_unk7(ctx);
1076 nv50_graph_construct_gene_unk8(ctx);
1077 switch (dev_priv->chipset) {
1078 case 0x50:
1079 case 0x92:
1080 xf_emit(ctx, 0xfb, 0);
1081 break;
1082 case 0x84:
1083 xf_emit(ctx, 0xd3, 0);
1084 break;
1085 case 0x94:
1086 case 0x96:
1087 xf_emit(ctx, 0xab, 0);
1088 break;
1089 case 0x86:
1090 case 0x98:
1091 xf_emit(ctx, 0x6b, 0);
1092 break;
1093 }
1094 xf_emit(ctx, 2, 0x4e3bfdf);
1095 xf_emit(ctx, 4, 0);
1096 xf_emit(ctx, 1, 0x0fac6881);
1097 xf_emit(ctx, 0xb, 0);
1098 xf_emit(ctx, 2, 0x4e3bfdf);
1099 if ((ctx->ctxvals_pos-offset)/8 > size)
1100 size = (ctx->ctxvals_pos-offset)/8;
1101
1102 /* Strand 2 */
1103 ctx->ctxvals_pos = offset + 0x2;
1104 switch (dev_priv->chipset) {
1105 case 0x50:
1106 case 0x92:
1107 xf_emit(ctx, 0xa80, 0);
1108 break;
1109 case 0x84:
1110 xf_emit(ctx, 0xa7e, 0);
1111 break;
1112 case 0x94:
1113 case 0x96:
1114 xf_emit(ctx, 0xa7c, 0);
1115 break;
1116 case 0x86:
1117 case 0x98:
1118 xf_emit(ctx, 0xa7a, 0);
1119 break;
1120 }
1121 xf_emit(ctx, 1, 0x3fffff);
1122 xf_emit(ctx, 2, 0);
1123 xf_emit(ctx, 1, 0x1fff);
1124 xf_emit(ctx, 0xe, 0);
1125 nv50_graph_construct_gene_unk9(ctx);
1126 nv50_graph_construct_gene_unk2(ctx);
1127 nv50_graph_construct_gene_unk1(ctx);
1128 nv50_graph_construct_gene_unk10(ctx);
1129 if ((ctx->ctxvals_pos-offset)/8 > size)
1130 size = (ctx->ctxvals_pos-offset)/8;
1131
1132 /* Strand 3: per-ROP group state */
1133 ctx->ctxvals_pos = offset + 3;
1134 for (i = 0; i < 6; i++)
1135 if (units & (1 << (i + 16)))
1136 nv50_graph_construct_gene_ropc(ctx);
1137 if ((ctx->ctxvals_pos-offset)/8 > size)
1138 size = (ctx->ctxvals_pos-offset)/8;
1139
1140 /* Strands 4-7: per-TP state */
1141 for (i = 0; i < 4; i++) {
1142 ctx->ctxvals_pos = offset + 4 + i;
1143 if (units & (1 << (2 * i)))
1144 nv50_graph_construct_xfer_tp(ctx);
1145 if (units & (1 << (2 * i + 1)))
1146 nv50_graph_construct_xfer_tp(ctx);
1147 if ((ctx->ctxvals_pos-offset)/8 > size)
1148 size = (ctx->ctxvals_pos-offset)/8;
1149 }
1150 } else {
1151 /* Strand 0 */
1152 ctx->ctxvals_pos = offset;
1153 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1154 xf_emit(ctx, 0x385, 0);
1155 else
1156 xf_emit(ctx, 0x384, 0);
1157 nv50_graph_construct_gene_m2mf(ctx);
1158 xf_emit(ctx, 0x950, 0);
1159 nv50_graph_construct_gene_unk10(ctx);
1160 xf_emit(ctx, 1, 0x0fac6881);
1161 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1162 xf_emit(ctx, 1, 1);
1163 xf_emit(ctx, 3, 0);
1164 }
1165 nv50_graph_construct_gene_unk8(ctx);
1166 if (dev_priv->chipset == 0xa0)
1167 xf_emit(ctx, 0x189, 0);
1168 else if (dev_priv->chipset == 0xa3)
1169 xf_emit(ctx, 0xd5, 0);
1170 else if (dev_priv->chipset == 0xa5)
1171 xf_emit(ctx, 0x99, 0);
1172 else if (dev_priv->chipset == 0xaa)
1173 xf_emit(ctx, 0x65, 0);
1174 else
1175 xf_emit(ctx, 0x6d, 0);
1176 nv50_graph_construct_gene_unk9(ctx);
1177 if ((ctx->ctxvals_pos-offset)/8 > size)
1178 size = (ctx->ctxvals_pos-offset)/8;
1179
1180 /* Strand 1 */
1181 ctx->ctxvals_pos = offset + 1;
1182 nv50_graph_construct_gene_unk1(ctx);
1183 if ((ctx->ctxvals_pos-offset)/8 > size)
1184 size = (ctx->ctxvals_pos-offset)/8;
1185
1186 /* Strand 2 */
1187 ctx->ctxvals_pos = offset + 2;
1188 if (dev_priv->chipset == 0xa0) {
1189 nv50_graph_construct_gene_unk2(ctx);
1190 }
1191 xf_emit(ctx, 0x36, 0);
1192 nv50_graph_construct_gene_unk5(ctx);
1193 if ((ctx->ctxvals_pos-offset)/8 > size)
1194 size = (ctx->ctxvals_pos-offset)/8;
1195
1196 /* Strand 3 */
1197 ctx->ctxvals_pos = offset + 3;
1198 xf_emit(ctx, 1, 0);
1199 xf_emit(ctx, 1, 1);
1200 nv50_graph_construct_gene_unk6(ctx);
1201 if ((ctx->ctxvals_pos-offset)/8 > size)
1202 size = (ctx->ctxvals_pos-offset)/8;
1203
1204 /* Strand 4 */
1205 ctx->ctxvals_pos = offset + 4;
1206 if (dev_priv->chipset == 0xa0)
1207 xf_emit(ctx, 0xa80, 0);
1208 else if (dev_priv->chipset == 0xa3)
1209 xf_emit(ctx, 0xa7c, 0);
1210 else
1211 xf_emit(ctx, 0xa7a, 0);
1212 xf_emit(ctx, 1, 0x3fffff);
1213 xf_emit(ctx, 2, 0);
1214 xf_emit(ctx, 1, 0x1fff);
1215 if ((ctx->ctxvals_pos-offset)/8 > size)
1216 size = (ctx->ctxvals_pos-offset)/8;
1217
1218 /* Strand 5 */
1219 ctx->ctxvals_pos = offset + 5;
1220 xf_emit(ctx, 1, 0);
1221 xf_emit(ctx, 1, 0x0fac6881);
1222 xf_emit(ctx, 0xb, 0);
1223 xf_emit(ctx, 2, 0x4e3bfdf);
1224 xf_emit(ctx, 3, 0);
1225 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1226 xf_emit(ctx, 1, 0x11);
1227 xf_emit(ctx, 1, 0);
1228 xf_emit(ctx, 2, 0x4e3bfdf);
1229 xf_emit(ctx, 2, 0);
1230 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1231 xf_emit(ctx, 1, 0x11);
1232 xf_emit(ctx, 1, 0);
1233 for (i = 0; i < 8; i++)
1234 if (units & (1<<(i+16)))
1235 nv50_graph_construct_gene_ropc(ctx);
1236 if ((ctx->ctxvals_pos-offset)/8 > size)
1237 size = (ctx->ctxvals_pos-offset)/8;
1238
1239 /* Strand 6 */
1240 ctx->ctxvals_pos = offset + 6;
1241 nv50_graph_construct_gene_unk3(ctx);
1242 xf_emit(ctx, 0xb, 0);
1243 nv50_graph_construct_gene_unk4(ctx);
1244 nv50_graph_construct_gene_unk7(ctx);
1245 if (units & (1 << 0))
1246 nv50_graph_construct_xfer_tp(ctx);
1247 if (units & (1 << 1))
1248 nv50_graph_construct_xfer_tp(ctx);
1249 if (units & (1 << 2))
1250 nv50_graph_construct_xfer_tp(ctx);
1251 if (units & (1 << 3))
1252 nv50_graph_construct_xfer_tp(ctx);
1253 if ((ctx->ctxvals_pos-offset)/8 > size)
1254 size = (ctx->ctxvals_pos-offset)/8;
1255
1256 /* Strand 7 */
1257 ctx->ctxvals_pos = offset + 7;
1258 if (dev_priv->chipset == 0xa0) {
1259 if (units & (1 << 4))
1260 nv50_graph_construct_xfer_tp(ctx);
1261 if (units & (1 << 5))
1262 nv50_graph_construct_xfer_tp(ctx);
1263 if (units & (1 << 6))
1264 nv50_graph_construct_xfer_tp(ctx);
1265 if (units & (1 << 7))
1266 nv50_graph_construct_xfer_tp(ctx);
1267 if (units & (1 << 8))
1268 nv50_graph_construct_xfer_tp(ctx);
1269 if (units & (1 << 9))
1270 nv50_graph_construct_xfer_tp(ctx);
1271 } else {
1272 nv50_graph_construct_gene_unk2(ctx);
1273 }
1274 if ((ctx->ctxvals_pos-offset)/8 > size)
1275 size = (ctx->ctxvals_pos-offset)/8;
1276 }
1277
1278 ctx->ctxvals_pos = offset + size * 8;
1279 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
1280 cp_lsr (ctx, offset);
1281 cp_out (ctx, CP_SET_XFER_POINTER);
1282 cp_lsr (ctx, size);
1283 cp_out (ctx, CP_SEEK_1);
1284 cp_out (ctx, CP_XFER_1);
1285 cp_wait(ctx, XFER, BUSY);
1286}
1287
1288/*
1289 * non-trivial demagiced parts of ctx init go here
1290 */
1291
1292static void
1293nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1294{
1295 /* m2mf state */
1296 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
1297 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
1298 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
1299 xf_emit (ctx, 1, 0); /* OFFSET_IN */
1300 xf_emit (ctx, 1, 0); /* OFFSET_OUT */
1301 xf_emit (ctx, 1, 0); /* PITCH_IN */
1302 xf_emit (ctx, 1, 0); /* PITCH_OUT */
1303 xf_emit (ctx, 1, 0); /* LINE_LENGTH */
1304 xf_emit (ctx, 1, 0); /* LINE_COUNT */
1305 xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
1306 xf_emit (ctx, 1, 1); /* LINEAR_IN */
1307 xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
1308 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
1309 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
1310 xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
1311 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
1312 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
1313 xf_emit (ctx, 1, 1); /* LINEAR_OUT */
1314 xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
1315 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
1316 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
1317 xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
1318 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
1319 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
1320 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
1321 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
1322}
1323
1324static void
1325nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1326{
1327 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1328 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1329 xf_emit(ctx, 2, 4);
1330 xf_emit(ctx, 1, 0);
1331 xf_emit(ctx, 1, 0x80);
1332 xf_emit(ctx, 1, 4);
1333 xf_emit(ctx, 1, 0x80c14);
1334 xf_emit(ctx, 1, 0);
1335 if (dev_priv->chipset == 0x50)
1336 xf_emit(ctx, 1, 0x3ff);
1337 else
1338 xf_emit(ctx, 1, 0x7ff);
1339 switch (dev_priv->chipset) {
1340 case 0x50:
1341 case 0x86:
1342 case 0x98:
1343 case 0xaa:
1344 case 0xac:
1345 xf_emit(ctx, 0x542, 0);
1346 break;
1347 case 0x84:
1348 case 0x92:
1349 case 0x94:
1350 case 0x96:
1351 xf_emit(ctx, 0x942, 0);
1352 break;
1353 case 0xa0:
1354 case 0xa3:
1355 xf_emit(ctx, 0x2042, 0);
1356 break;
1357 case 0xa5:
1358 case 0xa8:
1359 xf_emit(ctx, 0x842, 0);
1360 break;
1361 }
1362 xf_emit(ctx, 2, 4);
1363 xf_emit(ctx, 1, 0);
1364 xf_emit(ctx, 1, 0x80);
1365 xf_emit(ctx, 1, 4);
1366 xf_emit(ctx, 1, 1);
1367 xf_emit(ctx, 1, 0);
1368 xf_emit(ctx, 1, 0x27);
1369 xf_emit(ctx, 1, 0);
1370 xf_emit(ctx, 1, 0x26);
1371 xf_emit(ctx, 3, 0);
1372}
1373
1374static void
1375nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
1376{
1377 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1378 xf_emit(ctx, 0x10, 0x04000000);
1379 xf_emit(ctx, 0x24, 0);
1380 xf_emit(ctx, 2, 0x04e3bfdf);
1381 xf_emit(ctx, 2, 0);
1382 xf_emit(ctx, 1, 0x1fe21);
1383}
1384
1385static void
1386nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
1387{
1388 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1389 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1390 if (dev_priv->chipset != 0x50) {
1391 xf_emit(ctx, 5, 0);
1392 xf_emit(ctx, 1, 0x80c14);
1393 xf_emit(ctx, 2, 0);
1394 xf_emit(ctx, 1, 0x804);
1395 xf_emit(ctx, 1, 0);
1396 xf_emit(ctx, 2, 4);
1397 xf_emit(ctx, 1, 0x8100c12);
1398 }
1399 xf_emit(ctx, 1, 0);
1400 xf_emit(ctx, 2, 4);
1401 xf_emit(ctx, 1, 0);
1402 xf_emit(ctx, 1, 0x10);
1403 if (dev_priv->chipset == 0x50)
1404 xf_emit(ctx, 3, 0);
1405 else
1406 xf_emit(ctx, 4, 0);
1407 xf_emit(ctx, 1, 0x804);
1408 xf_emit(ctx, 1, 1);
1409 xf_emit(ctx, 1, 0x1a);
1410 if (dev_priv->chipset != 0x50)
1411 xf_emit(ctx, 1, 0x7f);
1412 xf_emit(ctx, 1, 0);
1413 xf_emit(ctx, 1, 1);
1414 xf_emit(ctx, 1, 0x80c14);
1415 xf_emit(ctx, 1, 0);
1416 xf_emit(ctx, 1, 0x8100c12);
1417 xf_emit(ctx, 2, 4);
1418 xf_emit(ctx, 1, 0);
1419 xf_emit(ctx, 1, 0x10);
1420 xf_emit(ctx, 3, 0);
1421 xf_emit(ctx, 1, 1);
1422 xf_emit(ctx, 1, 0x8100c12);
1423 xf_emit(ctx, 6, 0);
1424 if (dev_priv->chipset == 0x50)
1425 xf_emit(ctx, 1, 0x3ff);
1426 else
1427 xf_emit(ctx, 1, 0x7ff);
1428 xf_emit(ctx, 1, 0x80c14);
1429 xf_emit(ctx, 0x38, 0);
1430 xf_emit(ctx, 1, 1);
1431 xf_emit(ctx, 2, 0);
1432 xf_emit(ctx, 1, 0x10);
1433 xf_emit(ctx, 0x38, 0);
1434 xf_emit(ctx, 2, 0x88);
1435 xf_emit(ctx, 2, 0);
1436 xf_emit(ctx, 1, 4);
1437 xf_emit(ctx, 0x16, 0);
1438 xf_emit(ctx, 1, 0x26);
1439 xf_emit(ctx, 2, 0);
1440 xf_emit(ctx, 1, 0x3f800000);
1441 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1442 xf_emit(ctx, 4, 0);
1443 else
1444 xf_emit(ctx, 3, 0);
1445 xf_emit(ctx, 1, 0x1a);
1446 xf_emit(ctx, 1, 0x10);
1447 if (dev_priv->chipset != 0x50)
1448 xf_emit(ctx, 0x28, 0);
1449 else
1450 xf_emit(ctx, 0x25, 0);
1451 xf_emit(ctx, 1, 0x52);
1452 xf_emit(ctx, 1, 0);
1453 xf_emit(ctx, 1, 0x26);
1454 xf_emit(ctx, 1, 0);
1455 xf_emit(ctx, 2, 4);
1456 xf_emit(ctx, 1, 0);
1457 xf_emit(ctx, 1, 0x1a);
1458 xf_emit(ctx, 2, 0);
1459 xf_emit(ctx, 1, 0x00ffff00);
1460 xf_emit(ctx, 1, 0);
1461}
1462
1463static void
1464nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
1465{
1466 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1467 /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
1468 xf_emit(ctx, 1, 0x3f);
1469 xf_emit(ctx, 0xa, 0);
1470 xf_emit(ctx, 1, 2);
1471 xf_emit(ctx, 2, 0x04000000);
1472 xf_emit(ctx, 8, 0);
1473 xf_emit(ctx, 1, 4);
1474 xf_emit(ctx, 3, 0);
1475 xf_emit(ctx, 1, 4);
1476 if (dev_priv->chipset == 0x50)
1477 xf_emit(ctx, 0x10, 0);
1478 else
1479 xf_emit(ctx, 0x11, 0);
1480 xf_emit(ctx, 1, 1);
1481 xf_emit(ctx, 1, 0x1001);
1482 xf_emit(ctx, 4, 0xffff);
1483 xf_emit(ctx, 0x20, 0);
1484 xf_emit(ctx, 0x10, 0x3f800000);
1485 xf_emit(ctx, 1, 0x10);
1486 if (dev_priv->chipset == 0x50)
1487 xf_emit(ctx, 1, 0);
1488 else
1489 xf_emit(ctx, 2, 0);
1490 xf_emit(ctx, 1, 3);
1491 xf_emit(ctx, 2, 0);
1492}
1493
1494static void
1495nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
1496{
1497 /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
1498 xf_emit(ctx, 2, 0x04000000);
1499 xf_emit(ctx, 1, 0);
1500 xf_emit(ctx, 1, 0x80);
1501 xf_emit(ctx, 3, 0);
1502 xf_emit(ctx, 1, 0x80);
1503 xf_emit(ctx, 1, 0);
1504}
1505
1506static void
1507nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
1508{
1509 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1510 /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
1511 xf_emit(ctx, 2, 4);
1512 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1513 xf_emit(ctx, 0x1c4d, 0);
1514 else
1515 xf_emit(ctx, 0x1c4b, 0);
1516 xf_emit(ctx, 2, 4);
1517 xf_emit(ctx, 1, 0x8100c12);
1518 if (dev_priv->chipset != 0x50)
1519 xf_emit(ctx, 1, 3);
1520 xf_emit(ctx, 1, 0);
1521 xf_emit(ctx, 1, 0x8100c12);
1522 xf_emit(ctx, 1, 0);
1523 xf_emit(ctx, 1, 0x80c14);
1524 xf_emit(ctx, 1, 1);
1525 if (dev_priv->chipset >= 0xa0)
1526 xf_emit(ctx, 2, 4);
1527 xf_emit(ctx, 1, 0x80c14);
1528 xf_emit(ctx, 2, 0);
1529 xf_emit(ctx, 1, 0x8100c12);
1530 xf_emit(ctx, 1, 0x27);
1531 xf_emit(ctx, 2, 0);
1532 xf_emit(ctx, 1, 1);
1533 xf_emit(ctx, 0x3c1, 0);
1534 xf_emit(ctx, 1, 1);
1535 xf_emit(ctx, 0x16, 0);
1536 xf_emit(ctx, 1, 0x8100c12);
1537 xf_emit(ctx, 1, 0);
1538}
1539
1540static void
1541nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
1542{
1543 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1544 /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
1545 xf_emit(ctx, 4, 0);
1546 xf_emit(ctx, 1, 0xf);
1547 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1548 xf_emit(ctx, 8, 0);
1549 else
1550 xf_emit(ctx, 4, 0);
1551 xf_emit(ctx, 1, 0x20);
1552 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1553 xf_emit(ctx, 0x11, 0);
1554 else if (dev_priv->chipset >= 0xa0)
1555 xf_emit(ctx, 0xf, 0);
1556 else
1557 xf_emit(ctx, 0xe, 0);
1558 xf_emit(ctx, 1, 0x1a);
1559 xf_emit(ctx, 0xd, 0);
1560 xf_emit(ctx, 2, 4);
1561 xf_emit(ctx, 1, 0);
1562 xf_emit(ctx, 1, 4);
1563 xf_emit(ctx, 1, 8);
1564 xf_emit(ctx, 1, 0);
1565 if (dev_priv->chipset == 0x50)
1566 xf_emit(ctx, 1, 0x3ff);
1567 else
1568 xf_emit(ctx, 1, 0x7ff);
1569 if (dev_priv->chipset == 0xa8)
1570 xf_emit(ctx, 1, 0x1e00);
1571 xf_emit(ctx, 0xc, 0);
1572 xf_emit(ctx, 1, 0xf);
1573 if (dev_priv->chipset == 0x50)
1574 xf_emit(ctx, 0x125, 0);
1575 else if (dev_priv->chipset < 0xa0)
1576 xf_emit(ctx, 0x126, 0);
1577 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1578 xf_emit(ctx, 0x124, 0);
1579 else
1580 xf_emit(ctx, 0x1f7, 0);
1581 xf_emit(ctx, 1, 0xf);
1582 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1583 xf_emit(ctx, 3, 0);
1584 else
1585 xf_emit(ctx, 1, 0);
1586 xf_emit(ctx, 1, 1);
1587 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1588 xf_emit(ctx, 0xa1, 0);
1589 else
1590 xf_emit(ctx, 0x5a, 0);
1591 xf_emit(ctx, 1, 0xf);
1592 if (dev_priv->chipset < 0xa0)
1593 xf_emit(ctx, 0x834, 0);
1594 else if (dev_priv->chipset == 0xa0)
1595 xf_emit(ctx, 0x1873, 0);
1596 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1597 xf_emit(ctx, 0x8ba, 0);
1598 else
1599 xf_emit(ctx, 0x833, 0);
1600 xf_emit(ctx, 1, 0xf);
1601 xf_emit(ctx, 0xf, 0);
1602}
1603
1604static void
1605nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
1606{
1607 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1608 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
1609 xf_emit(ctx, 2, 0);
1610 if (dev_priv->chipset == 0x50)
1611 xf_emit(ctx, 2, 1);
1612 else
1613 xf_emit(ctx, 2, 0);
1614 xf_emit(ctx, 1, 0);
1615 xf_emit(ctx, 1, 1);
1616 xf_emit(ctx, 2, 0x100);
1617 xf_emit(ctx, 1, 0x11);
1618 xf_emit(ctx, 1, 0);
1619 xf_emit(ctx, 1, 8);
1620 xf_emit(ctx, 5, 0);
1621 xf_emit(ctx, 1, 1);
1622 xf_emit(ctx, 1, 0);
1623 xf_emit(ctx, 3, 1);
1624 xf_emit(ctx, 1, 0xcf);
1625 xf_emit(ctx, 1, 2);
1626 xf_emit(ctx, 6, 0);
1627 xf_emit(ctx, 1, 1);
1628 xf_emit(ctx, 1, 0);
1629 xf_emit(ctx, 3, 1);
1630 xf_emit(ctx, 4, 0);
1631 xf_emit(ctx, 1, 4);
1632 xf_emit(ctx, 1, 0);
1633 xf_emit(ctx, 1, 1);
1634 xf_emit(ctx, 1, 0x15);
1635 xf_emit(ctx, 3, 0);
1636 xf_emit(ctx, 1, 0x4444480);
1637 xf_emit(ctx, 0x37, 0);
1638}
1639
1640static void
1641nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
1642{
1643 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
1644 xf_emit(ctx, 4, 0);
1645 xf_emit(ctx, 1, 0x8100c12);
1646 xf_emit(ctx, 4, 0);
1647 xf_emit(ctx, 1, 0x100);
1648 xf_emit(ctx, 2, 0);
1649 xf_emit(ctx, 1, 0x10001);
1650 xf_emit(ctx, 1, 0);
1651 xf_emit(ctx, 1, 0x10001);
1652 xf_emit(ctx, 1, 1);
1653 xf_emit(ctx, 1, 0x10001);
1654 xf_emit(ctx, 1, 1);
1655 xf_emit(ctx, 1, 4);
1656 xf_emit(ctx, 1, 2);
1657}
1658
1659static void
1660nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
1661{
1662 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1663 /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
1664 xf_emit(ctx, 1, 0x3f800000);
1665 xf_emit(ctx, 6, 0);
1666 xf_emit(ctx, 1, 4);
1667 xf_emit(ctx, 1, 0x1a);
1668 xf_emit(ctx, 2, 0);
1669 xf_emit(ctx, 1, 1);
1670 xf_emit(ctx, 0x12, 0);
1671 xf_emit(ctx, 1, 0x00ffff00);
1672 xf_emit(ctx, 6, 0);
1673 xf_emit(ctx, 1, 0xf);
1674 xf_emit(ctx, 7, 0);
1675 xf_emit(ctx, 1, 0x0fac6881);
1676 xf_emit(ctx, 1, 0x11);
1677 xf_emit(ctx, 0xf, 0);
1678 xf_emit(ctx, 1, 4);
1679 xf_emit(ctx, 2, 0);
1680 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1681 xf_emit(ctx, 1, 3);
1682 else if (dev_priv->chipset >= 0xa0)
1683 xf_emit(ctx, 1, 1);
1684 xf_emit(ctx, 2, 0);
1685 xf_emit(ctx, 1, 2);
1686 xf_emit(ctx, 2, 0x04000000);
1687 xf_emit(ctx, 3, 0);
1688 xf_emit(ctx, 1, 5);
1689 xf_emit(ctx, 1, 0x52);
1690 if (dev_priv->chipset == 0x50) {
1691 xf_emit(ctx, 0x13, 0);
1692 } else {
1693 xf_emit(ctx, 4, 0);
1694 xf_emit(ctx, 1, 1);
1695 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1696 xf_emit(ctx, 0x11, 0);
1697 else
1698 xf_emit(ctx, 0x10, 0);
1699 }
1700 xf_emit(ctx, 0x10, 0x3f800000);
1701 xf_emit(ctx, 1, 0x10);
1702 xf_emit(ctx, 0x26, 0);
1703 xf_emit(ctx, 1, 0x8100c12);
1704 xf_emit(ctx, 1, 5);
1705 xf_emit(ctx, 2, 0);
1706 xf_emit(ctx, 1, 1);
1707 xf_emit(ctx, 1, 0);
1708 xf_emit(ctx, 4, 0xffff);
1709 if (dev_priv->chipset != 0x50)
1710 xf_emit(ctx, 1, 3);
1711 if (dev_priv->chipset < 0xa0)
1712 xf_emit(ctx, 0x1f, 0);
1713 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1714 xf_emit(ctx, 0xc, 0);
1715 else
1716 xf_emit(ctx, 3, 0);
1717 xf_emit(ctx, 1, 0x00ffff00);
1718 xf_emit(ctx, 1, 0x1a);
1719 if (dev_priv->chipset != 0x50) {
1720 xf_emit(ctx, 1, 0);
1721 xf_emit(ctx, 1, 3);
1722 }
1723 if (dev_priv->chipset < 0xa0)
1724 xf_emit(ctx, 0x26, 0);
1725 else
1726 xf_emit(ctx, 0x3c, 0);
1727 xf_emit(ctx, 1, 0x102);
1728 xf_emit(ctx, 1, 0);
1729 xf_emit(ctx, 4, 4);
1730 if (dev_priv->chipset >= 0xa0)
1731 xf_emit(ctx, 8, 0);
1732 xf_emit(ctx, 2, 4);
1733 xf_emit(ctx, 1, 0);
1734 if (dev_priv->chipset == 0x50)
1735 xf_emit(ctx, 1, 0x3ff);
1736 else
1737 xf_emit(ctx, 1, 0x7ff);
1738 xf_emit(ctx, 1, 0);
1739 xf_emit(ctx, 1, 0x102);
1740 xf_emit(ctx, 9, 0);
1741 xf_emit(ctx, 4, 4);
1742 xf_emit(ctx, 0x2c, 0);
1743}
1744
1745static void
1746nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
1747{
1748 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1749 int magic2;
1750 if (dev_priv->chipset == 0x50) {
1751 magic2 = 0x00003e60;
1752 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1753 magic2 = 0x001ffe67;
1754 } else {
1755 magic2 = 0x00087e67;
1756 }
1757 xf_emit(ctx, 8, 0);
1758 xf_emit(ctx, 1, 2);
1759 xf_emit(ctx, 1, 0);
1760 xf_emit(ctx, 1, magic2);
1761 xf_emit(ctx, 4, 0);
1762 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1763 xf_emit(ctx, 1, 1);
1764 xf_emit(ctx, 7, 0);
1765 if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
1766 xf_emit(ctx, 1, 0x15);
1767 xf_emit(ctx, 1, 0);
1768 xf_emit(ctx, 1, 1);
1769 xf_emit(ctx, 1, 0x10);
1770 xf_emit(ctx, 2, 0);
1771 xf_emit(ctx, 1, 1);
1772 xf_emit(ctx, 4, 0);
1773 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
1774 xf_emit(ctx, 1, 4);
1775 xf_emit(ctx, 1, 0x400);
1776 xf_emit(ctx, 1, 0x300);
1777 xf_emit(ctx, 1, 0x1001);
1778 if (dev_priv->chipset != 0xa0) {
1779 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1780 xf_emit(ctx, 1, 0);
1781 else
1782 xf_emit(ctx, 1, 0x15);
1783 }
1784 xf_emit(ctx, 3, 0);
1785 }
1786 xf_emit(ctx, 2, 0);
1787 xf_emit(ctx, 1, 2);
1788 xf_emit(ctx, 8, 0);
1789 xf_emit(ctx, 1, 1);
1790 xf_emit(ctx, 1, 0x10);
1791 xf_emit(ctx, 1, 0);
1792 xf_emit(ctx, 1, 1);
1793 xf_emit(ctx, 0x13, 0);
1794 xf_emit(ctx, 1, 0x10);
1795 xf_emit(ctx, 0x10, 0);
1796 xf_emit(ctx, 0x10, 0x3f800000);
1797 xf_emit(ctx, 0x19, 0);
1798 xf_emit(ctx, 1, 0x10);
1799 xf_emit(ctx, 1, 0);
1800 xf_emit(ctx, 1, 0x3f);
1801 xf_emit(ctx, 6, 0);
1802 xf_emit(ctx, 1, 1);
1803 xf_emit(ctx, 1, 0);
1804 xf_emit(ctx, 1, 1);
1805 xf_emit(ctx, 1, 0);
1806 xf_emit(ctx, 1, 1);
1807 if (dev_priv->chipset >= 0xa0) {
1808 xf_emit(ctx, 2, 0);
1809 xf_emit(ctx, 1, 0x1001);
1810 xf_emit(ctx, 0xb, 0);
1811 } else {
1812 xf_emit(ctx, 0xc, 0);
1813 }
1814 xf_emit(ctx, 1, 0x11);
1815 xf_emit(ctx, 7, 0);
1816 xf_emit(ctx, 1, 0xf);
1817 xf_emit(ctx, 7, 0);
1818 xf_emit(ctx, 1, 0x11);
1819 if (dev_priv->chipset == 0x50)
1820 xf_emit(ctx, 4, 0);
1821 else
1822 xf_emit(ctx, 6, 0);
1823 xf_emit(ctx, 3, 1);
1824 xf_emit(ctx, 1, 2);
1825 xf_emit(ctx, 1, 1);
1826 xf_emit(ctx, 1, 2);
1827 xf_emit(ctx, 1, 1);
1828 xf_emit(ctx, 1, 0);
1829 xf_emit(ctx, 1, magic2);
1830 xf_emit(ctx, 1, 0);
1831 xf_emit(ctx, 1, 0x0fac6881);
1832 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1833 xf_emit(ctx, 1, 0);
1834 xf_emit(ctx, 0x18, 1);
1835 xf_emit(ctx, 8, 2);
1836 xf_emit(ctx, 8, 1);
1837 xf_emit(ctx, 8, 2);
1838 xf_emit(ctx, 8, 1);
1839 xf_emit(ctx, 3, 0);
1840 xf_emit(ctx, 1, 1);
1841 xf_emit(ctx, 5, 0);
1842 xf_emit(ctx, 1, 1);
1843 xf_emit(ctx, 0x16, 0);
1844 } else {
1845 if (dev_priv->chipset >= 0xa0)
1846 xf_emit(ctx, 0x1b, 0);
1847 else
1848 xf_emit(ctx, 0x15, 0);
1849 }
1850 xf_emit(ctx, 1, 1);
1851 xf_emit(ctx, 1, 2);
1852 xf_emit(ctx, 2, 1);
1853 xf_emit(ctx, 1, 2);
1854 xf_emit(ctx, 2, 1);
1855 if (dev_priv->chipset >= 0xa0)
1856 xf_emit(ctx, 4, 0);
1857 else
1858 xf_emit(ctx, 3, 0);
1859 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1860 xf_emit(ctx, 0x10, 1);
1861 xf_emit(ctx, 8, 2);
1862 xf_emit(ctx, 0x10, 1);
1863 xf_emit(ctx, 8, 2);
1864 xf_emit(ctx, 8, 1);
1865 xf_emit(ctx, 3, 0);
1866 }
1867 xf_emit(ctx, 1, 0x11);
1868 xf_emit(ctx, 1, 1);
1869 xf_emit(ctx, 0x5b, 0);
1870}
1871
1872static void
1873nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
1874{
1875 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1876 int magic3;
1877 if (dev_priv->chipset == 0x50)
1878 magic3 = 0x1000;
1879 else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
1880 magic3 = 0x1e00;
1881 else
1882 magic3 = 0;
1883 xf_emit(ctx, 1, 0);
1884 xf_emit(ctx, 1, 4);
1885 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1886 xf_emit(ctx, 0x24, 0);
1887 else if (dev_priv->chipset >= 0xa0)
1888 xf_emit(ctx, 0x14, 0);
1889 else
1890 xf_emit(ctx, 0x15, 0);
1891 xf_emit(ctx, 2, 4);
1892 if (dev_priv->chipset >= 0xa0)
1893 xf_emit(ctx, 1, 0x03020100);
1894 else
1895 xf_emit(ctx, 1, 0x00608080);
1896 xf_emit(ctx, 4, 0);
1897 xf_emit(ctx, 1, 4);
1898 xf_emit(ctx, 2, 0);
1899 xf_emit(ctx, 2, 4);
1900 xf_emit(ctx, 1, 0x80);
1901 if (magic3)
1902 xf_emit(ctx, 1, magic3);
1903 xf_emit(ctx, 1, 4);
1904 xf_emit(ctx, 0x24, 0);
1905 xf_emit(ctx, 1, 4);
1906 xf_emit(ctx, 1, 0x80);
1907 xf_emit(ctx, 1, 4);
1908 xf_emit(ctx, 1, 0x03020100);
1909 xf_emit(ctx, 1, 3);
1910 if (magic3)
1911 xf_emit(ctx, 1, magic3);
1912 xf_emit(ctx, 1, 4);
1913 xf_emit(ctx, 4, 0);
1914 xf_emit(ctx, 1, 4);
1915 xf_emit(ctx, 1, 3);
1916 xf_emit(ctx, 3, 0);
1917 xf_emit(ctx, 1, 4);
1918 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
1919 xf_emit(ctx, 0x1024, 0);
1920 else if (dev_priv->chipset < 0xa0)
1921 xf_emit(ctx, 0xa24, 0);
1922 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1923 xf_emit(ctx, 0x214, 0);
1924 else
1925 xf_emit(ctx, 0x414, 0);
1926 xf_emit(ctx, 1, 4);
1927 xf_emit(ctx, 1, 3);
1928 xf_emit(ctx, 2, 0);
1929}
1930
1931static void
1932nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
1933{
1934 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1935 int magic1, magic2;
1936 if (dev_priv->chipset == 0x50) {
1937 magic1 = 0x3ff;
1938 magic2 = 0x00003e60;
1939 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1940 magic1 = 0x7ff;
1941 magic2 = 0x001ffe67;
1942 } else {
1943 magic1 = 0x7ff;
1944 magic2 = 0x00087e67;
1945 }
1946 xf_emit(ctx, 3, 0);
1947 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1948 xf_emit(ctx, 1, 1);
1949 xf_emit(ctx, 0xc, 0);
1950 xf_emit(ctx, 1, 0xf);
1951 xf_emit(ctx, 0xb, 0);
1952 xf_emit(ctx, 1, 4);
1953 xf_emit(ctx, 4, 0xffff);
1954 xf_emit(ctx, 8, 0);
1955 xf_emit(ctx, 1, 1);
1956 xf_emit(ctx, 3, 0);
1957 xf_emit(ctx, 1, 1);
1958 xf_emit(ctx, 5, 0);
1959 xf_emit(ctx, 1, 1);
1960 xf_emit(ctx, 2, 0);
1961 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1962 xf_emit(ctx, 1, 3);
1963 xf_emit(ctx, 1, 0);
1964 } else if (dev_priv->chipset >= 0xa0)
1965 xf_emit(ctx, 1, 1);
1966 xf_emit(ctx, 0xa, 0);
1967 xf_emit(ctx, 2, 1);
1968 xf_emit(ctx, 1, 2);
1969 xf_emit(ctx, 2, 1);
1970 xf_emit(ctx, 1, 2);
1971 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1972 xf_emit(ctx, 1, 0);
1973 xf_emit(ctx, 0x18, 1);
1974 xf_emit(ctx, 8, 2);
1975 xf_emit(ctx, 8, 1);
1976 xf_emit(ctx, 8, 2);
1977 xf_emit(ctx, 8, 1);
1978 xf_emit(ctx, 1, 0);
1979 }
1980 xf_emit(ctx, 1, 1);
1981 xf_emit(ctx, 1, 0);
1982 xf_emit(ctx, 1, 0x11);
1983 xf_emit(ctx, 7, 0);
1984 xf_emit(ctx, 1, 0x0fac6881);
1985 xf_emit(ctx, 2, 0);
1986 xf_emit(ctx, 1, 4);
1987 xf_emit(ctx, 3, 0);
1988 xf_emit(ctx, 1, 0x11);
1989 xf_emit(ctx, 1, 1);
1990 xf_emit(ctx, 1, 0);
1991 xf_emit(ctx, 3, 0xcf);
1992 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1993 xf_emit(ctx, 1, 1);
1994 xf_emit(ctx, 0xa, 0);
1995 xf_emit(ctx, 2, 1);
1996 xf_emit(ctx, 1, 2);
1997 xf_emit(ctx, 2, 1);
1998 xf_emit(ctx, 1, 2);
1999 xf_emit(ctx, 1, 1);
2000 xf_emit(ctx, 1, 0);
2001 xf_emit(ctx, 8, 1);
2002 xf_emit(ctx, 1, 0x11);
2003 xf_emit(ctx, 7, 0);
2004 xf_emit(ctx, 1, 0x0fac6881);
2005 xf_emit(ctx, 1, 0xf);
2006 xf_emit(ctx, 7, 0);
2007 xf_emit(ctx, 1, magic2);
2008 xf_emit(ctx, 2, 0);
2009 xf_emit(ctx, 1, 0x11);
2010 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2011 xf_emit(ctx, 2, 1);
2012 else
2013 xf_emit(ctx, 1, 1);
2014 if(dev_priv->chipset == 0x50)
2015 xf_emit(ctx, 1, 0);
2016 else
2017 xf_emit(ctx, 3, 0);
2018 xf_emit(ctx, 1, 4);
2019 xf_emit(ctx, 5, 0);
2020 xf_emit(ctx, 1, 1);
2021 xf_emit(ctx, 4, 0);
2022 xf_emit(ctx, 1, 0x11);
2023 xf_emit(ctx, 7, 0);
2024 xf_emit(ctx, 1, 0x0fac6881);
2025 xf_emit(ctx, 3, 0);
2026 xf_emit(ctx, 1, 0x11);
2027 xf_emit(ctx, 1, 1);
2028 xf_emit(ctx, 1, 0);
2029 xf_emit(ctx, 1, 1);
2030 xf_emit(ctx, 1, 0);
2031 xf_emit(ctx, 1, 1);
2032 xf_emit(ctx, 1, 0);
2033 xf_emit(ctx, 1, magic1);
2034 xf_emit(ctx, 1, 0);
2035 xf_emit(ctx, 1, 1);
2036 xf_emit(ctx, 1, 0);
2037 xf_emit(ctx, 1, 1);
2038 xf_emit(ctx, 2, 0);
2039 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2040 xf_emit(ctx, 1, 1);
2041 xf_emit(ctx, 0x28, 0);
2042 xf_emit(ctx, 8, 8);
2043 xf_emit(ctx, 1, 0x11);
2044 xf_emit(ctx, 7, 0);
2045 xf_emit(ctx, 1, 0x0fac6881);
2046 xf_emit(ctx, 8, 0x400);
2047 xf_emit(ctx, 8, 0x300);
2048 xf_emit(ctx, 1, 1);
2049 xf_emit(ctx, 1, 0xf);
2050 xf_emit(ctx, 7, 0);
2051 xf_emit(ctx, 1, 0x20);
2052 xf_emit(ctx, 1, 0x11);
2053 xf_emit(ctx, 1, 0x100);
2054 xf_emit(ctx, 1, 0);
2055 xf_emit(ctx, 1, 1);
2056 xf_emit(ctx, 2, 0);
2057 xf_emit(ctx, 1, 0x40);
2058 xf_emit(ctx, 1, 0x100);
2059 xf_emit(ctx, 1, 0);
2060 xf_emit(ctx, 1, 3);
2061 xf_emit(ctx, 4, 0);
2062 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2063 xf_emit(ctx, 1, 1);
2064 xf_emit(ctx, 1, magic2);
2065 xf_emit(ctx, 3, 0);
2066 xf_emit(ctx, 1, 2);
2067 xf_emit(ctx, 1, 0x0fac6881);
2068 xf_emit(ctx, 9, 0);
2069 xf_emit(ctx, 1, 1);
2070 xf_emit(ctx, 4, 0);
2071 xf_emit(ctx, 1, 4);
2072 xf_emit(ctx, 1, 0);
2073 xf_emit(ctx, 1, 1);
2074 xf_emit(ctx, 1, 0x400);
2075 xf_emit(ctx, 1, 0x300);
2076 xf_emit(ctx, 1, 0x1001);
2077 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2078 xf_emit(ctx, 4, 0);
2079 else
2080 xf_emit(ctx, 3, 0);
2081 xf_emit(ctx, 1, 0x11);
2082 xf_emit(ctx, 7, 0);
2083 xf_emit(ctx, 1, 0x0fac6881);
2084 xf_emit(ctx, 1, 0xf);
2085 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2086 xf_emit(ctx, 0x15, 0);
2087 xf_emit(ctx, 1, 1);
2088 xf_emit(ctx, 3, 0);
2089 } else
2090 xf_emit(ctx, 0x17, 0);
2091 if (dev_priv->chipset >= 0xa0)
2092 xf_emit(ctx, 1, 0x0fac6881);
2093 xf_emit(ctx, 1, magic2);
2094 xf_emit(ctx, 3, 0);
2095 xf_emit(ctx, 1, 0x11);
2096 xf_emit(ctx, 2, 0);
2097 xf_emit(ctx, 1, 4);
2098 xf_emit(ctx, 1, 0);
2099 xf_emit(ctx, 2, 1);
2100 xf_emit(ctx, 3, 0);
2101 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2102 xf_emit(ctx, 2, 1);
2103 else
2104 xf_emit(ctx, 1, 1);
2105 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2106 xf_emit(ctx, 2, 0);
2107 else if (dev_priv->chipset != 0x50)
2108 xf_emit(ctx, 1, 0);
2109}
2110
2111static void
2112nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
2113{
2114 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2115 xf_emit(ctx, 3, 0);
2116 xf_emit(ctx, 1, 1);
2117 xf_emit(ctx, 1, 0);
2118 xf_emit(ctx, 1, 1);
2119 if (dev_priv->chipset == 0x50)
2120 xf_emit(ctx, 2, 0);
2121 else
2122 xf_emit(ctx, 3, 0);
2123 xf_emit(ctx, 1, 0x2a712488);
2124 xf_emit(ctx, 1, 0);
2125 xf_emit(ctx, 1, 0x4085c000);
2126 xf_emit(ctx, 1, 0x40);
2127 xf_emit(ctx, 1, 0x100);
2128 xf_emit(ctx, 1, 0x10100);
2129 xf_emit(ctx, 1, 0x02800000);
2130}
2131
2132static void
2133nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
2134{
2135 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2136 xf_emit(ctx, 2, 0x04e3bfdf);
2137 xf_emit(ctx, 1, 1);
2138 xf_emit(ctx, 1, 0);
2139 xf_emit(ctx, 1, 0x00ffff00);
2140 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2141 xf_emit(ctx, 2, 1);
2142 else
2143 xf_emit(ctx, 1, 1);
2144 xf_emit(ctx, 2, 0);
2145 xf_emit(ctx, 1, 0x00ffff00);
2146 xf_emit(ctx, 8, 0);
2147 xf_emit(ctx, 1, 1);
2148 xf_emit(ctx, 1, 0);
2149 xf_emit(ctx, 1, 1);
2150 xf_emit(ctx, 1, 0x30201000);
2151 xf_emit(ctx, 1, 0x70605040);
2152 xf_emit(ctx, 1, 0xb8a89888);
2153 xf_emit(ctx, 1, 0xf8e8d8c8);
2154 xf_emit(ctx, 1, 0);
2155 xf_emit(ctx, 1, 0x1a);
2156}
2157
2158static void
2159nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
2160{
2161 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2162 xf_emit(ctx, 3, 0);
2163 xf_emit(ctx, 1, 0xfac6881);
2164 xf_emit(ctx, 4, 0);
2165 xf_emit(ctx, 1, 4);
2166 xf_emit(ctx, 1, 0);
2167 xf_emit(ctx, 2, 1);
2168 xf_emit(ctx, 2, 0);
2169 xf_emit(ctx, 1, 1);
2170 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2171 xf_emit(ctx, 0xb, 0);
2172 else
2173 xf_emit(ctx, 0xa, 0);
2174 xf_emit(ctx, 8, 1);
2175 xf_emit(ctx, 1, 0x11);
2176 xf_emit(ctx, 7, 0);
2177 xf_emit(ctx, 1, 0xfac6881);
2178 xf_emit(ctx, 1, 0xf);
2179 xf_emit(ctx, 7, 0);
2180 xf_emit(ctx, 1, 0x11);
2181 xf_emit(ctx, 1, 1);
2182 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2183 xf_emit(ctx, 6, 0);
2184 xf_emit(ctx, 1, 1);
2185 xf_emit(ctx, 6, 0);
2186 } else {
2187 xf_emit(ctx, 0xb, 0);
2188 }
2189}
2190
2191static void
2192nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
2193{
2194 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2195 if (dev_priv->chipset < 0xa0) {
2196 nv50_graph_construct_xfer_tp_x1(ctx);
2197 nv50_graph_construct_xfer_tp_x2(ctx);
2198 nv50_graph_construct_xfer_tp_x3(ctx);
2199 if (dev_priv->chipset == 0x50)
2200 xf_emit(ctx, 0xf, 0);
2201 else
2202 xf_emit(ctx, 0x12, 0);
2203 nv50_graph_construct_xfer_tp_x4(ctx);
2204 } else {
2205 nv50_graph_construct_xfer_tp_x3(ctx);
2206 if (dev_priv->chipset < 0xaa)
2207 xf_emit(ctx, 0xc, 0);
2208 else
2209 xf_emit(ctx, 0xa, 0);
2210 nv50_graph_construct_xfer_tp_x2(ctx);
2211 nv50_graph_construct_xfer_tp_x5(ctx);
2212 nv50_graph_construct_xfer_tp_x4(ctx);
2213 nv50_graph_construct_xfer_tp_x1(ctx);
2214 }
2215}
2216
2217static void
2218nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
2219{
2220 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2221 int i, mpcnt;
2222 if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2223 mpcnt = 1;
2224 else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
2225 mpcnt = 2;
2226 else
2227 mpcnt = 3;
2228 for (i = 0; i < mpcnt; i++) {
2229 xf_emit(ctx, 1, 0);
2230 xf_emit(ctx, 1, 0x80);
2231 xf_emit(ctx, 1, 0x80007004);
2232 xf_emit(ctx, 1, 0x04000400);
2233 if (dev_priv->chipset >= 0xa0)
2234 xf_emit(ctx, 1, 0xc0);
2235 xf_emit(ctx, 1, 0x1000);
2236 xf_emit(ctx, 2, 0);
2237 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
2238 xf_emit(ctx, 1, 0xe00);
2239 xf_emit(ctx, 1, 0x1e00);
2240 }
2241 xf_emit(ctx, 1, 1);
2242 xf_emit(ctx, 2, 0);
2243 if (dev_priv->chipset == 0x50)
2244 xf_emit(ctx, 2, 0x1000);
2245 xf_emit(ctx, 1, 1);
2246 xf_emit(ctx, 1, 0);
2247 xf_emit(ctx, 1, 4);
2248 xf_emit(ctx, 1, 2);
2249 if (dev_priv->chipset >= 0xaa)
2250 xf_emit(ctx, 0xb, 0);
2251 else if (dev_priv->chipset >= 0xa0)
2252 xf_emit(ctx, 0xc, 0);
2253 else
2254 xf_emit(ctx, 0xa, 0);
2255 }
2256 xf_emit(ctx, 1, 0x08100c12);
2257 xf_emit(ctx, 1, 0);
2258 if (dev_priv->chipset >= 0xa0) {
2259 xf_emit(ctx, 1, 0x1fe21);
2260 }
2261 xf_emit(ctx, 5, 0);
2262 xf_emit(ctx, 4, 0xffff);
2263 xf_emit(ctx, 1, 1);
2264 xf_emit(ctx, 2, 0x10001);
2265 xf_emit(ctx, 1, 1);
2266 xf_emit(ctx, 1, 0);
2267 xf_emit(ctx, 1, 0x1fe21);
2268 xf_emit(ctx, 1, 0);
2269 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2270 xf_emit(ctx, 1, 1);
2271 xf_emit(ctx, 4, 0);
2272 xf_emit(ctx, 1, 0x08100c12);
2273 xf_emit(ctx, 1, 4);
2274 xf_emit(ctx, 1, 0);
2275 xf_emit(ctx, 1, 2);
2276 xf_emit(ctx, 1, 0x11);
2277 xf_emit(ctx, 8, 0);
2278 xf_emit(ctx, 1, 0xfac6881);
2279 xf_emit(ctx, 1, 0);
2280 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2281 xf_emit(ctx, 1, 3);
2282 xf_emit(ctx, 3, 0);
2283 xf_emit(ctx, 1, 4);
2284 xf_emit(ctx, 9, 0);
2285 xf_emit(ctx, 1, 2);
2286 xf_emit(ctx, 2, 1);
2287 xf_emit(ctx, 1, 2);
2288 xf_emit(ctx, 3, 1);
2289 xf_emit(ctx, 1, 0);
2290 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2291 xf_emit(ctx, 8, 2);
2292 xf_emit(ctx, 0x10, 1);
2293 xf_emit(ctx, 8, 2);
2294 xf_emit(ctx, 0x18, 1);
2295 xf_emit(ctx, 3, 0);
2296 }
2297 xf_emit(ctx, 1, 4);
2298 if (dev_priv->chipset == 0x50)
2299 xf_emit(ctx, 0x3a0, 0);
2300 else if (dev_priv->chipset < 0x94)
2301 xf_emit(ctx, 0x3a2, 0);
2302 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2303 xf_emit(ctx, 0x39f, 0);
2304 else
2305 xf_emit(ctx, 0x3a3, 0);
2306 xf_emit(ctx, 1, 0x11);
2307 xf_emit(ctx, 1, 0);
2308 xf_emit(ctx, 1, 1);
2309 xf_emit(ctx, 0x2d, 0);
2310}
2311
2312static void
2313nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
2314{
2315 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2316 int i;
2317 uint32_t offset;
2318 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
2319 int size = 0;
2320
2321 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
2322
2323 if (dev_priv->chipset < 0xa0) {
2324 for (i = 0; i < 8; i++) {
2325 ctx->ctxvals_pos = offset + i;
2326 if (i == 0)
2327 xf_emit(ctx, 1, 0x08100c12);
2328 if (units & (1 << i))
2329 nv50_graph_construct_xfer_tp2(ctx);
2330 if ((ctx->ctxvals_pos-offset)/8 > size)
2331 size = (ctx->ctxvals_pos-offset)/8;
2332 }
2333 } else {
2334 /* Strand 0: TPs 0, 1 */
2335 ctx->ctxvals_pos = offset;
2336 xf_emit(ctx, 1, 0x08100c12);
2337 if (units & (1 << 0))
2338 nv50_graph_construct_xfer_tp2(ctx);
2339 if (units & (1 << 1))
2340 nv50_graph_construct_xfer_tp2(ctx);
2341 if ((ctx->ctxvals_pos-offset)/8 > size)
2342 size = (ctx->ctxvals_pos-offset)/8;
2343
2344 /* Strand 0: TPs 2, 3 */
2345 ctx->ctxvals_pos = offset + 1;
2346 if (units & (1 << 2))
2347 nv50_graph_construct_xfer_tp2(ctx);
2348 if (units & (1 << 3))
2349 nv50_graph_construct_xfer_tp2(ctx);
2350 if ((ctx->ctxvals_pos-offset)/8 > size)
2351 size = (ctx->ctxvals_pos-offset)/8;
2352
2353 /* Strand 0: TPs 4, 5, 6 */
2354 ctx->ctxvals_pos = offset + 2;
2355 if (units & (1 << 4))
2356 nv50_graph_construct_xfer_tp2(ctx);
2357 if (units & (1 << 5))
2358 nv50_graph_construct_xfer_tp2(ctx);
2359 if (units & (1 << 6))
2360 nv50_graph_construct_xfer_tp2(ctx);
2361 if ((ctx->ctxvals_pos-offset)/8 > size)
2362 size = (ctx->ctxvals_pos-offset)/8;
2363
2364 /* Strand 0: TPs 7, 8, 9 */
2365 ctx->ctxvals_pos = offset + 3;
2366 if (units & (1 << 7))
2367 nv50_graph_construct_xfer_tp2(ctx);
2368 if (units & (1 << 8))
2369 nv50_graph_construct_xfer_tp2(ctx);
2370 if (units & (1 << 9))
2371 nv50_graph_construct_xfer_tp2(ctx);
2372 if ((ctx->ctxvals_pos-offset)/8 > size)
2373 size = (ctx->ctxvals_pos-offset)/8;
2374 }
2375 ctx->ctxvals_pos = offset + size * 8;
2376 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
2377 cp_lsr (ctx, offset);
2378 cp_out (ctx, CP_SET_XFER_POINTER);
2379 cp_lsr (ctx, size);
2380 cp_out (ctx, CP_SEEK_2);
2381 cp_out (ctx, CP_XFER_2);
2382 cp_wait(ctx, XFER, BUSY);
2383}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 000000000000..5f21df31f3aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */
34
35 struct nouveau_gpuobj_ref *pramin_pt;
36 struct nouveau_gpuobj_ref *pramin_bar;
37 struct nouveau_gpuobj_ref *fb_bar;
38
39 bool last_access_wr;
40};
41
42#define NV50_INSTMEM_PAGE_SHIFT 12
43#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
44#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
45
46/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
47 */
48#define BAR0_WI32(g, o, v) do { \
49 uint32_t offset; \
50 if ((g)->im_backing) { \
51 offset = (g)->im_backing_start; \
52 } else { \
53 offset = chan->ramin->gpuobj->im_backing_start; \
54 offset += (g)->im_pramin->start; \
55 } \
56 offset += (o); \
57 nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
58} while (0)
59
60int
61nv50_instmem_init(struct drm_device *dev)
62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan;
65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
66 uint32_t save_nv001700;
67 uint64_t v;
68 struct nv50_instmem_priv *priv;
69 int ret, i;
70
71 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
72 if (!priv)
73 return -ENOMEM;
74 dev_priv->engine.instmem.priv = priv;
75
76 /* Save state, will restore at takedown. */
77 for (i = 0x1700; i <= 0x1710; i += 4)
78 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
79
80 /* Reserve the last MiB of VRAM, we should probably try to avoid
81 * setting up the below tables over the top of the VBIOS image at
82 * some point.
83 */
84 dev_priv->ramin_rsvd_vram = 1 << 20;
85 c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
86 c_size = 128 << 10;
87 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
88 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
89 c_base = c_vmpd + 0x4000;
90 pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
91
92 NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
93 NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
94 (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
95 NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
96 NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
97
98 /* Determine VM layout, we need to do this first to make sure
99 * we allocate enough memory for all the page tables.
100 */
101 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
102 dev_priv->vm_gart_size = NV50_VM_BLOCK;
103
104 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
105 dev_priv->vm_vram_size = dev_priv->vram_size;
106 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
107 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
108 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
109 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
110
111 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
112
113 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
114 dev_priv->vm_gart_base,
115 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
116 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
117 dev_priv->vm_vram_base,
118 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
119
120 c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
121
122 /* Map BAR0 PRAMIN aperture over the memory we want to use */
123 save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
124 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
125
126 /* Create a fake channel, and use it as our "dummy" channels 0/127.
127 * The main reason for creating a channel is so we can use the gpuobj
128 * code. However, it's probably worth noting that NVIDIA also setup
129 * their channels 0/127 with the same values they configure here.
130 * So, there may be some other reason for doing this.
131 *
132 * Have to create the entire channel manually, as the real channel
133 * creation code assumes we have PRAMIN access, and we don't until
134 * we're done here.
135 */
136 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
137 if (!chan)
138 return -ENOMEM;
139 chan->id = 0;
140 chan->dev = dev;
141 chan->file_priv = (struct drm_file *)-2;
142 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
143
144 /* Channel's PRAMIN object + heap */
145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
146 NULL, &chan->ramin);
147 if (ret)
148 return ret;
149
150 if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
151 return -ENOMEM;
152
153 /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
154 ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
155 0x4000, 0, NULL, &chan->ramfc);
156 if (ret)
157 return ret;
158
159 for (i = 0; i < c_vmpd; i += 4)
160 BAR0_WI32(chan->ramin->gpuobj, i, 0);
161
162 /* VM page directory */
163 ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
164 0x4000, 0, &chan->vm_pd, NULL);
165 if (ret)
166 return ret;
167 for (i = 0; i < 0x4000; i += 8) {
168 BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
169 BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
170 }
171
172 /* PRAMIN page table, cheat and map into VM at 0x0000000000.
173 * We map the entire fake channel into the start of the PRAMIN BAR
174 */
175 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
176 0, &priv->pramin_pt);
177 if (ret)
178 return ret;
179
180 v = c_offset | 1;
181 if (dev_priv->vram_sys_base) {
182 v += dev_priv->vram_sys_base;
183 v |= 0x30;
184 }
185
186 i = 0;
187 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
188 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
189 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
190 v += 0x1000;
191 i += 8;
192 }
193
194 while (i < pt_size) {
195 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
196 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
197 i += 8;
198 }
199
200 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
201 BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
202
203 /* VRAM page table(s), mapped into VM at +1GiB */
204 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
205 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
206 NV50_VM_BLOCK/65536*8, 0, 0,
207 &chan->vm_vram_pt[i]);
208 if (ret) {
209 NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
210 ret);
211 dev_priv->vm_vram_pt_nr = i;
212 return ret;
213 }
214 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
215
216 for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
217 v += 4)
218 BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
219
220 BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
221 chan->vm_vram_pt[i]->instance | 0x61);
222 BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
223 }
224
225 /* DMA object for PRAMIN BAR */
226 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
227 &priv->pramin_bar);
228 if (ret)
229 return ret;
230 BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
231 BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
232 BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
233 BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
234 BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
235 BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
236
237 /* DMA object for FB BAR */
238 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
239 &priv->fb_bar);
240 if (ret)
241 return ret;
242 BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
243 BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
244 drm_get_resource_len(dev, 1) - 1);
245 BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
246 BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
247 BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
248 BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
249
250 /* Poke the relevant regs, and pray it works :) */
251 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
252 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
253 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
254 NV50_PUNK_BAR_CFG_BASE_VALID);
255 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
256 NV50_PUNK_BAR1_CTXDMA_VALID);
257 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
258 NV50_PUNK_BAR3_CTXDMA_VALID);
259
260 for (i = 0; i < 8; i++)
261 nv_wr32(dev, 0x1900 + (i*4), 0);
262
263 /* Assume that praying isn't enough, check that we can re-read the
264 * entire fake channel back from the PRAMIN BAR */
265 dev_priv->engine.instmem.prepare_access(dev, false);
266 for (i = 0; i < c_size; i += 4) {
267 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
268 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
269 i);
270 dev_priv->engine.instmem.finish_access(dev);
271 return -EINVAL;
272 }
273 }
274 dev_priv->engine.instmem.finish_access(dev);
275
276 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
277
278 /* Global PRAMIN heap */
279 if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
280 c_size, dev_priv->ramin_size - c_size)) {
281 dev_priv->ramin_heap = NULL;
282 NV_ERROR(dev, "Failed to init RAMIN heap\n");
283 }
284
285 /*XXX: incorrect, but needed to make hash func "work" */
286 dev_priv->ramht_offset = 0x10000;
287 dev_priv->ramht_bits = 9;
288 dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
289 return 0;
290}
291
292void
293nv50_instmem_takedown(struct drm_device *dev)
294{
295 struct drm_nouveau_private *dev_priv = dev->dev_private;
296 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
297 struct nouveau_channel *chan = dev_priv->fifos[0];
298 int i;
299
300 NV_DEBUG(dev, "\n");
301
302 if (!priv)
303 return;
304
305 /* Restore state from before init */
306 for (i = 0x1700; i <= 0x1710; i += 4)
307 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
308
309 nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
310 nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
311 nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
312
313 /* Destroy dummy channel */
314 if (chan) {
315 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
316 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
317 dev_priv->vm_vram_pt[i] = NULL;
318 }
319 dev_priv->vm_vram_pt_nr = 0;
320
321 nouveau_gpuobj_del(dev, &chan->vm_pd);
322 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
323 nouveau_gpuobj_ref_del(dev, &chan->ramin);
324 nouveau_mem_takedown(&chan->ramin_heap);
325
326 dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
327 kfree(chan);
328 }
329
330 dev_priv->engine.instmem.priv = NULL;
331 kfree(priv);
332}
333
334int
335nv50_instmem_suspend(struct drm_device *dev)
336{
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_channel *chan = dev_priv->fifos[0];
339 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
340 int i;
341
342 ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
343 if (!ramin->im_backing_suspend)
344 return -ENOMEM;
345
346 for (i = 0; i < ramin->im_pramin->size; i += 4)
347 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
348 return 0;
349}
350
351void
352nv50_instmem_resume(struct drm_device *dev)
353{
354 struct drm_nouveau_private *dev_priv = dev->dev_private;
355 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
356 struct nouveau_channel *chan = dev_priv->fifos[0];
357 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
358 int i;
359
360 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
361 for (i = 0; i < ramin->im_pramin->size; i += 4)
362 BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
363 vfree(ramin->im_backing_suspend);
364 ramin->im_backing_suspend = NULL;
365
366 /* Poke the relevant regs, and pray it works :) */
367 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
368 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
369 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
370 NV50_PUNK_BAR_CFG_BASE_VALID);
371 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
372 NV50_PUNK_BAR1_CTXDMA_VALID);
373 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
374 NV50_PUNK_BAR3_CTXDMA_VALID);
375
376 for (i = 0; i < 8; i++)
377 nv_wr32(dev, 0x1900 + (i*4), 0);
378}
379
380int
381nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
382 uint32_t *sz)
383{
384 int ret;
385
386 if (gpuobj->im_backing)
387 return -EINVAL;
388
389 *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
390 if (*sz == 0)
391 return -EINVAL;
392
393 ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
394 true, false, &gpuobj->im_backing);
395 if (ret) {
396 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
397 return ret;
398 }
399
400 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
401 if (ret) {
402 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
403 nouveau_bo_ref(NULL, &gpuobj->im_backing);
404 return ret;
405 }
406
407 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
408 gpuobj->im_backing_start <<= PAGE_SHIFT;
409
410 return 0;
411}
412
413void
414nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
415{
416 struct drm_nouveau_private *dev_priv = dev->dev_private;
417
418 if (gpuobj && gpuobj->im_backing) {
419 if (gpuobj->im_bound)
420 dev_priv->engine.instmem.unbind(dev, gpuobj);
421 nouveau_bo_unpin(gpuobj->im_backing);
422 nouveau_bo_ref(NULL, &gpuobj->im_backing);
423 gpuobj->im_backing = NULL;
424 }
425}
426
427int
428nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
429{
430 struct drm_nouveau_private *dev_priv = dev->dev_private;
431 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
432 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
433 uint32_t pte, pte_end;
434 uint64_t vram;
435
436 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
437 return -EINVAL;
438
439 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
440 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
441
442 pte = (gpuobj->im_pramin->start >> 12) << 1;
443 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
444 vram = gpuobj->im_backing_start;
445
446 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
447 gpuobj->im_pramin->start, pte, pte_end);
448 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
449
450 vram |= 1;
451 if (dev_priv->vram_sys_base) {
452 vram += dev_priv->vram_sys_base;
453 vram |= 0x30;
454 }
455
456 dev_priv->engine.instmem.prepare_access(dev, true);
457 while (pte < pte_end) {
458 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
459 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
460 vram += NV50_INSTMEM_PAGE_SIZE;
461 }
462 dev_priv->engine.instmem.finish_access(dev);
463
464 nv_wr32(dev, 0x100c80, 0x00040001);
465 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
466 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
467 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
468 return -EBUSY;
469 }
470
471 nv_wr32(dev, 0x100c80, 0x00060001);
472 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
473 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
474 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
475 return -EBUSY;
476 }
477
478 gpuobj->im_bound = 1;
479 return 0;
480}
481
482int
483nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
484{
485 struct drm_nouveau_private *dev_priv = dev->dev_private;
486 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
487 uint32_t pte, pte_end;
488
489 if (gpuobj->im_bound == 0)
490 return -EINVAL;
491
492 pte = (gpuobj->im_pramin->start >> 12) << 1;
493 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
494
495 dev_priv->engine.instmem.prepare_access(dev, true);
496 while (pte < pte_end) {
497 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
498 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
499 }
500 dev_priv->engine.instmem.finish_access(dev);
501
502 gpuobj->im_bound = 0;
503 return 0;
504}
505
506void
507nv50_instmem_prepare_access(struct drm_device *dev, bool write)
508{
509 struct drm_nouveau_private *dev_priv = dev->dev_private;
510 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
511
512 priv->last_access_wr = write;
513}
514
515void
516nv50_instmem_finish_access(struct drm_device *dev)
517{
518 struct drm_nouveau_private *dev_priv = dev->dev_private;
519 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
520
521 if (priv->last_access_wr) {
522 nv_wr32(dev, 0x070000, 0x00000001);
523 if (!nv_wait(0x070000, 0x00000001, 0x00000000))
524 NV_ERROR(dev, "PRAMIN flush timeout\n");
525 }
526}
527
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 000000000000..e0a9c3faa202
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31int
32nv50_mc_init(struct drm_device *dev)
33{
34 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
35 return 0;
36}
37
38void nv50_mc_takedown(struct drm_device *dev)
39{
40}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 000000000000..0c68698f23df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting SOR\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static void
59nv50_sor_dp_link_train(struct drm_encoder *encoder)
60{
61 struct drm_device *dev = encoder->dev;
62 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
63 struct bit_displayport_encoder_table *dpe;
64 int dpe_headerlen;
65
66 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
67 if (!dpe) {
68 NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
69 return;
70 }
71
72 if (dpe->script0) {
73 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
74 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
75 nv_encoder->dcb);
76 }
77
78 if (!nouveau_dp_link_train(encoder))
79 NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
80
81 if (dpe->script1) {
82 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
83 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
84 nv_encoder->dcb);
85 }
86}
87
88static void
89nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{
91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
94 uint32_t val;
95 int or = nv_encoder->or;
96
97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
105 nvenc->dcb->or != nv_encoder->dcb->or)
106 continue;
107
108 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
109 return;
110 }
111
112 /* wait for it to be done */
113 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
115 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
116 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
117 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
118 }
119
120 val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
121
122 if (mode == DRM_MODE_DPMS_ON)
123 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
124 else
125 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
126
127 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
128 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
129 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
130 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
131 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
132 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
133 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
134 }
135
136 if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
137 nv50_sor_dp_link_train(encoder);
138}
139
140static void
141nv50_sor_save(struct drm_encoder *encoder)
142{
143 NV_ERROR(encoder->dev, "!!\n");
144}
145
146static void
147nv50_sor_restore(struct drm_encoder *encoder)
148{
149 NV_ERROR(encoder->dev, "!!\n");
150}
151
152static bool
153nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
154 struct drm_display_mode *adjusted_mode)
155{
156 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
157 struct nouveau_connector *connector;
158
159 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
160
161 connector = nouveau_encoder_connector_get(nv_encoder);
162 if (!connector) {
163 NV_ERROR(encoder->dev, "Encoder has no connector\n");
164 return false;
165 }
166
167 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
168 connector->native_mode) {
169 int id = adjusted_mode->base.id;
170 *adjusted_mode = *connector->native_mode;
171 adjusted_mode->base.id = id;
172 }
173
174 return true;
175}
176
177static void
178nv50_sor_prepare(struct drm_encoder *encoder)
179{
180}
181
182static void
183nv50_sor_commit(struct drm_encoder *encoder)
184{
185}
186
187static void
188nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
189 struct drm_display_mode *adjusted_mode)
190{
191 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
192 struct nouveau_channel *evo = dev_priv->evo;
193 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
194 struct drm_device *dev = encoder->dev;
195 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
196 uint32_t mode_ctl = 0;
197 int ret;
198
199 NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
200
201 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
202
203 switch (nv_encoder->dcb->type) {
204 case OUTPUT_TMDS:
205 if (nv_encoder->dcb->sorconf.link & 1) {
206 if (adjusted_mode->clock < 165000)
207 mode_ctl = 0x0100;
208 else
209 mode_ctl = 0x0500;
210 } else
211 mode_ctl = 0x0200;
212 break;
213 case OUTPUT_DP:
214 mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
215 if (nv_encoder->dcb->sorconf.link & 1)
216 mode_ctl |= 0x00000800;
217 else
218 mode_ctl |= 0x00000900;
219 break;
220 default:
221 break;
222 }
223
224 if (crtc->index == 1)
225 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
226 else
227 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
228
229 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
230 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
231
232 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
233 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
234
235 ret = RING_SPACE(evo, 2);
236 if (ret) {
237 NV_ERROR(dev, "no space while connecting SOR\n");
238 return;
239 }
240 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
241 OUT_RING(evo, mode_ctl);
242}
243
244static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
245 .dpms = nv50_sor_dpms,
246 .save = nv50_sor_save,
247 .restore = nv50_sor_restore,
248 .mode_fixup = nv50_sor_mode_fixup,
249 .prepare = nv50_sor_prepare,
250 .commit = nv50_sor_commit,
251 .mode_set = nv50_sor_mode_set,
252 .detect = NULL
253};
254
255static void
256nv50_sor_destroy(struct drm_encoder *encoder)
257{
258 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
259
260 if (!encoder)
261 return;
262
263 NV_DEBUG_KMS(encoder->dev, "\n");
264
265 drm_encoder_cleanup(encoder);
266
267 kfree(nv_encoder);
268}
269
270static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
271 .destroy = nv50_sor_destroy,
272};
273
274int
275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_encoder *nv_encoder = NULL;
279 struct drm_encoder *encoder;
280 bool dum;
281 int type;
282
283 NV_DEBUG_KMS(dev, "\n");
284
285 switch (entry->type) {
286 case OUTPUT_TMDS:
287 NV_INFO(dev, "Detected a TMDS output\n");
288 type = DRM_MODE_ENCODER_TMDS;
289 break;
290 case OUTPUT_LVDS:
291 NV_INFO(dev, "Detected a LVDS output\n");
292 type = DRM_MODE_ENCODER_LVDS;
293
294 if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
295 NV_ERROR(dev, "Failed parsing LVDS table\n");
296 return -EINVAL;
297 }
298 break;
299 case OUTPUT_DP:
300 NV_INFO(dev, "Detected a DP output\n");
301 type = DRM_MODE_ENCODER_TMDS;
302 break;
303 default:
304 return -EINVAL;
305 }
306
307 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
308 if (!nv_encoder)
309 return -ENOMEM;
310 encoder = to_drm_encoder(nv_encoder);
311
312 nv_encoder->dcb = entry;
313 nv_encoder->or = ffs(entry->or) - 1;
314
315 nv_encoder->disconnect = nv50_sor_disconnect;
316
317 drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
318 drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
319
320 encoder->possible_crtcs = entry->heads;
321 encoder->possible_clones = 0;
322
323 if (nv_encoder->dcb->type == OUTPUT_DP) {
324 uint32_t mc, or = nv_encoder->or;
325
326 if (dev_priv->chipset < 0x90 ||
327 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
328 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
329 else
330 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
331
332 switch ((mc & 0x00000f00) >> 8) {
333 case 8:
334 case 9:
335 nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
336 break;
337 default:
338 break;
339 }
340
341 if (!nv_encoder->dp.mc_unknown)
342 nv_encoder->dp.mc_unknown = 5;
343 }
344
345 return 0;
346}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 000000000000..5998c35237b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
1/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
2/*
3 * Copyright 1996-1997 David J. McKay
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
25
26#ifndef __NVREG_H_
27#define __NVREG_H_
28
29#define NV_PMC_OFFSET 0x00000000
30#define NV_PMC_SIZE 0x00001000
31
32#define NV_PBUS_OFFSET 0x00001000
33#define NV_PBUS_SIZE 0x00001000
34
35#define NV_PFIFO_OFFSET 0x00002000
36#define NV_PFIFO_SIZE 0x00002000
37
38#define NV_HDIAG_OFFSET 0x00005000
39#define NV_HDIAG_SIZE 0x00001000
40
41#define NV_PRAM_OFFSET 0x00006000
42#define NV_PRAM_SIZE 0x00001000
43
44#define NV_PVIDEO_OFFSET 0x00008000
45#define NV_PVIDEO_SIZE 0x00001000
46
47#define NV_PTIMER_OFFSET 0x00009000
48#define NV_PTIMER_SIZE 0x00001000
49
50#define NV_PPM_OFFSET 0x0000A000
51#define NV_PPM_SIZE 0x00001000
52
53#define NV_PTV_OFFSET 0x0000D000
54#define NV_PTV_SIZE 0x00001000
55
56#define NV_PRMVGA_OFFSET 0x000A0000
57#define NV_PRMVGA_SIZE 0x00020000
58
59#define NV_PRMVIO0_OFFSET 0x000C0000
60#define NV_PRMVIO_SIZE 0x00002000
61#define NV_PRMVIO1_OFFSET 0x000C2000
62
63#define NV_PFB_OFFSET 0x00100000
64#define NV_PFB_SIZE 0x00001000
65
66#define NV_PEXTDEV_OFFSET 0x00101000
67#define NV_PEXTDEV_SIZE 0x00001000
68
69#define NV_PME_OFFSET 0x00200000
70#define NV_PME_SIZE 0x00001000
71
72#define NV_PROM_OFFSET 0x00300000
73#define NV_PROM_SIZE 0x00010000
74
75#define NV_PGRAPH_OFFSET 0x00400000
76#define NV_PGRAPH_SIZE 0x00010000
77
78#define NV_PCRTC0_OFFSET 0x00600000
79#define NV_PCRTC0_SIZE 0x00002000 /* empirical */
80
81#define NV_PRMCIO0_OFFSET 0x00601000
82#define NV_PRMCIO_SIZE 0x00002000
83#define NV_PRMCIO1_OFFSET 0x00603000
84
85#define NV50_DISPLAY_OFFSET 0x00610000
86#define NV50_DISPLAY_SIZE 0x0000FFFF
87
88#define NV_PRAMDAC0_OFFSET 0x00680000
89#define NV_PRAMDAC0_SIZE 0x00002000
90
91#define NV_PRMDIO0_OFFSET 0x00681000
92#define NV_PRMDIO_SIZE 0x00002000
93#define NV_PRMDIO1_OFFSET 0x00683000
94
95#define NV_PRAMIN_OFFSET 0x00700000
96#define NV_PRAMIN_SIZE 0x00100000
97
98#define NV_FIFO_OFFSET 0x00800000
99#define NV_FIFO_SIZE 0x00800000
100
101#define NV_PMC_BOOT_0 0x00000000
102#define NV_PMC_ENABLE 0x00000200
103
104#define NV_VIO_VSE2 0x000003c3
105#define NV_VIO_SRX 0x000003c4
106
107#define NV_CIO_CRX__COLOR 0x000003d4
108#define NV_CIO_CR__COLOR 0x000003d5
109
110#define NV_PBUS_DEBUG_1 0x00001084
111#define NV_PBUS_DEBUG_4 0x00001098
112#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0
113#define NV_PBUS_POWERCTRL_1 0x00001584
114#define NV_PBUS_POWERCTRL_2 0x00001588
115#define NV_PBUS_POWERCTRL_4 0x00001590
116#define NV_PBUS_PCI_NV_19 0x0000184C
117#define NV_PBUS_PCI_NV_20 0x00001850
118# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
119# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
120
121#define NV_PFIFO_RAMHT 0x00002210
122
123#define NV_PTV_TV_INDEX 0x0000d220
124#define NV_PTV_TV_DATA 0x0000d224
125#define NV_PTV_HFILTER 0x0000d310
126#define NV_PTV_HFILTER2 0x0000d390
127#define NV_PTV_VFILTER 0x0000d510
128
129#define NV_PRMVIO_MISC__WRITE 0x000c03c2
130#define NV_PRMVIO_SRX 0x000c03c4
131#define NV_PRMVIO_SR 0x000c03c5
132# define NV_VIO_SR_RESET_INDEX 0x00
133# define NV_VIO_SR_CLOCK_INDEX 0x01
134# define NV_VIO_SR_PLANE_MASK_INDEX 0x02
135# define NV_VIO_SR_CHAR_MAP_INDEX 0x03
136# define NV_VIO_SR_MEM_MODE_INDEX 0x04
137#define NV_PRMVIO_MISC__READ 0x000c03cc
138#define NV_PRMVIO_GRX 0x000c03ce
139#define NV_PRMVIO_GX 0x000c03cf
140# define NV_VIO_GX_SR_INDEX 0x00
141# define NV_VIO_GX_SREN_INDEX 0x01
142# define NV_VIO_GX_CCOMP_INDEX 0x02
143# define NV_VIO_GX_ROP_INDEX 0x03
144# define NV_VIO_GX_READ_MAP_INDEX 0x04
145# define NV_VIO_GX_MODE_INDEX 0x05
146# define NV_VIO_GX_MISC_INDEX 0x06
147# define NV_VIO_GX_DONT_CARE_INDEX 0x07
148# define NV_VIO_GX_BIT_MASK_INDEX 0x08
149
150#define NV_PFB_BOOT_0 0x00100000
151#define NV_PFB_CFG0 0x00100200
152#define NV_PFB_CFG1 0x00100204
153#define NV_PFB_CSTATUS 0x0010020C
154#define NV_PFB_REFCTRL 0x00100210
155# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
156#define NV_PFB_PAD 0x0010021C
157# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
158#define NV_PFB_TILE_NV10 0x00100240
159#define NV_PFB_TILE_SIZE_NV10 0x00100244
160#define NV_PFB_REF 0x001002D0
161# define NV_PFB_REF_CMD_REFRESH (1 << 0)
162#define NV_PFB_PRE 0x001002D4
163# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
164#define NV_PFB_CLOSE_PAGE2 0x0010033C
165#define NV_PFB_TILE_NV40 0x00100600
166#define NV_PFB_TILE_SIZE_NV40 0x00100604
167
168#define NV_PEXTDEV_BOOT_0 0x00101000
169# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
170#define NV_PEXTDEV_BOOT_3 0x0010100c
171
172#define NV_PCRTC_INTR_0 0x00600100
173# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
174#define NV_PCRTC_INTR_EN_0 0x00600140
175#define NV_PCRTC_START 0x00600800
176#define NV_PCRTC_CONFIG 0x00600804
177# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
178# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
179#define NV_PCRTC_CURSOR_CONFIG 0x00600810
180# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
181# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
182# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8)
183# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12)
184# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16)
185# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24)
186# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24)
187# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28)
188
189/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
190#define NV_PCRTC_GPIO 0x00600818
191#define NV_PCRTC_GPIO_EXT 0x0060081c
192#define NV_PCRTC_830 0x00600830
193#define NV_PCRTC_834 0x00600834
194#define NV_PCRTC_850 0x00600850
195#define NV_PCRTC_ENGINE_CTRL 0x00600860
196# define NV_CRTC_FSEL_I2C (1 << 4)
197# define NV_CRTC_FSEL_OVERLAY (1 << 12)
198
199#define NV_PRMCIO_ARX 0x006013c0
200#define NV_PRMCIO_AR__WRITE 0x006013c0
201#define NV_PRMCIO_AR__READ 0x006013c1
202# define NV_CIO_AR_MODE_INDEX 0x10
203# define NV_CIO_AR_OSCAN_INDEX 0x11
204# define NV_CIO_AR_PLANE_INDEX 0x12
205# define NV_CIO_AR_HPP_INDEX 0x13
206# define NV_CIO_AR_CSEL_INDEX 0x14
207#define NV_PRMCIO_INP0 0x006013c2
208#define NV_PRMCIO_CRX__COLOR 0x006013d4
209#define NV_PRMCIO_CR__COLOR 0x006013d5
210 /* Standard VGA CRTC registers */
211# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */
212# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */
213# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */
214# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */
215# define NV_CIO_CR_HBE_4_0 4:0
216# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */
217# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */
218# define NV_CIO_CR_HRE_4_0 4:0
219# define NV_CIO_CR_HRE_HBE_5 7:7
220# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */
221# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */
222# define NV_CIO_CR_OVL_VDT_8 0:0
223# define NV_CIO_CR_OVL_VDE_8 1:1
224# define NV_CIO_CR_OVL_VRS_8 2:2
225# define NV_CIO_CR_OVL_VBS_8 3:3
226# define NV_CIO_CR_OVL_VDT_9 5:5
227# define NV_CIO_CR_OVL_VDE_9 6:6
228# define NV_CIO_CR_OVL_VRS_9 7:7
229# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */
230# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */
231# define NV_CIO_CR_CELL_HT_VBS_9 5:5
232# define NV_CIO_CR_CELL_HT_SCANDBL 7:7
233# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */
234# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */
235# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */
236# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */
237# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */
238# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */
239# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */
240# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */
241# define NV_CIO_CR_VRE_3_0 3:0
242# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */
243# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */
244# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */
245# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */
246# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */
247# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */
248# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */
249 /* Extended VGA CRTC registers */
250# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */
251# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5
252# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */
253# define NV_CIO_CRE_RPC1_LARGE 2:2
254# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */
255# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */
256# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */
257# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57
258# define NV_CIO_SR_LOCK_VALUE 0x99
259# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */
260# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */
261# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */
262# define NV_CIO_CRE_LSR_VDT_10 0:0
263# define NV_CIO_CRE_LSR_VDE_10 1:1
264# define NV_CIO_CRE_LSR_VRS_10 2:2
265# define NV_CIO_CRE_LSR_VBS_10 3:3
266# define NV_CIO_CRE_LSR_HBE_6 4:4
267# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */
268# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */
269# define NV_CIO_CRE_PIXEL_INDEX 0x28
270# define NV_CIO_CRE_PIXEL_FORMAT 1:0
271# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */
272# define NV_CIO_CRE_HEB_HDT_8 0:0
273# define NV_CIO_CRE_HEB_HDE_8 1:1
274# define NV_CIO_CRE_HEB_HBS_8 2:2
275# define NV_CIO_CRE_HEB_HRS_8 3:3
276# define NV_CIO_CRE_HEB_ILC_8 4:4
277# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */
278# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */
279# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */
280# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0
281# define NV_CIO_CRE_HCUR_ASI 7:7
282# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */
283# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0
284# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1
285# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
286# define NV_CIO_CRE_LCD__INDEX 0x33
287# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
288# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
289# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
290# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
291# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b
292# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c
293# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e
294# define NV_CIO_CRE_DDC_WR__INDEX 0x3f
295# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */
296# define NV_CIO_CRE_EBR_VDT_11 0:0
297# define NV_CIO_CRE_EBR_VDE_11 2:2
298# define NV_CIO_CRE_EBR_VRS_11 4:4
299# define NV_CIO_CRE_EBR_VBS_11 6:6
300# define NV_CIO_CRE_43 0x43
301# define NV_CIO_CRE_44 0x44 /* head control */
302# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
303# define NV_CIO_CRE_RCR 0x46
304# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7
305# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */
306# define NV_CIO_CRE_49 0x49
307# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
308# define NV_CIO_CRE_TVOUT_LATENCY 0x52
309# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */
310# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */
311# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */
312# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */
313# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */
314# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */
315# define NV_CIO_CRE_85 0x85
316# define NV_CIO_CRE_86 0x86
317#define NV_PRMCIO_INP0__COLOR 0x006013da
318
319#define NV_PRAMDAC_CU_START_POS 0x00680300
320# define NV_PRAMDAC_CU_START_POS_X 15:0
321# define NV_PRAMDAC_CU_START_POS_Y 31:16
322#define NV_RAMDAC_NV10_CURSYNC 0x00680404
323
324#define NV_PRAMDAC_NVPLL_COEFF 0x00680500
325#define NV_PRAMDAC_MPLL_COEFF 0x00680504
326#define NV_PRAMDAC_VPLL_COEFF 0x00680508
327# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4)
328
329#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c
330# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0)
331# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8)
332# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8)
333# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8)
334# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8)
335# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16)
336# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16)
337# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16)
338# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16)
339# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20)
340# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28)
341# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28)
342
343#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510
344#define NV_RAMDAC_VPLL2 0x00680520
345#define NV_PRAMDAC_SEL_CLK 0x00680524
346#define NV_RAMDAC_DITHER_NV11 0x00680528
347#define NV_PRAMDAC_DACCLK 0x0068052c
348# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0)
349
350#define NV_RAMDAC_NVPLL_B 0x00680570
351#define NV_RAMDAC_MPLL_B 0x00680574
352#define NV_RAMDAC_VPLL_B 0x00680578
353#define NV_RAMDAC_VPLL2_B 0x0068057c
354# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28)
355#define NV_PRAMDAC_580 0x00680580
356# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8)
357# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28)
358
359#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600
360# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4)
361# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8)
362# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12)
363# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16)
364# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20)
365# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28)
366#define NV_PRAMDAC_TEST_CONTROL 0x00680608
367# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12)
368# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16)
369# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28)
370#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610
371# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28)
372#define NV_PRAMDAC_630 0x00680630
373#define NV_PRAMDAC_634 0x00680634
374
375#define NV_PRAMDAC_TV_SETUP 0x00680700
376#define NV_PRAMDAC_TV_VTOTAL 0x00680720
377#define NV_PRAMDAC_TV_VSKEW 0x00680724
378#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728
379#define NV_PRAMDAC_TV_HTOTAL 0x0068072c
380#define NV_PRAMDAC_TV_HSKEW 0x00680730
381#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734
382#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738
383
384#define NV_PRAMDAC_TV_SETUP 0x00680700
385
386#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800
387#define NV_PRAMDAC_FP_VTOTAL 0x00680804
388#define NV_PRAMDAC_FP_VCRTC 0x00680808
389#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c
390#define NV_PRAMDAC_FP_VSYNC_END 0x00680810
391#define NV_PRAMDAC_FP_VVALID_START 0x00680814
392#define NV_PRAMDAC_FP_VVALID_END 0x00680818
393#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820
394#define NV_PRAMDAC_FP_HTOTAL 0x00680824
395#define NV_PRAMDAC_FP_HCRTC 0x00680828
396#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c
397#define NV_PRAMDAC_FP_HSYNC_END 0x00680830
398#define NV_PRAMDAC_FP_HVALID_START 0x00680834
399#define NV_PRAMDAC_FP_HVALID_END 0x00680838
400
401#define NV_RAMDAC_FP_DITHER 0x0068083c
402#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848
403# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0)
404# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0)
405# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4)
406# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4)
407# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8)
408# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8)
409# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8)
410# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20)
411# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24)
412# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28)
413# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28)
414#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c
415#define NV_PRAMDAC_850 0x00680850
416#define NV_PRAMDAC_85C 0x0068085c
417#define NV_PRAMDAC_FP_DEBUG_0 0x00680880
418# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0)
419# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4)
420/* This doesn't seem to be essential for tmds, but still often set */
421# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4)
422# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8)
423# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12)
424# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20)
425# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24)
426# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28)
427#define NV_PRAMDAC_FP_DEBUG_1 0x00680884
428# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0
429# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12)
430# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16
431# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28)
432#define NV_PRAMDAC_FP_DEBUG_2 0x00680888
433#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C
434
435/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
436#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0
437# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16)
438#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4
439
440#define NV_PRAMDAC_8C0 0x006808c0
441
442/* Some kind of switch */
443#define NV_PRAMDAC_900 0x00680900
444#define NV_PRAMDAC_A20 0x00680A20
445#define NV_PRAMDAC_A24 0x00680A24
446#define NV_PRAMDAC_A34 0x00680A34
447
448#define NV_PRAMDAC_CTV 0x00680c00
449
450/* names fabricated from NV_USER_DAC info */
451#define NV_PRMDIO_PIXEL_MASK 0x006813c6
452# define NV_PRMDIO_PIXEL_MASK_MASK 0xff
453#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7
454#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8
455#define NV_PRMDIO_PALETTE_DATA 0x006813c9
456
457#define NV_PGRAPH_DEBUG_0 0x00400080
458#define NV_PGRAPH_DEBUG_1 0x00400084
459#define NV_PGRAPH_DEBUG_2_NV04 0x00400088
460#define NV_PGRAPH_DEBUG_2 0x00400620
461#define NV_PGRAPH_DEBUG_3 0x0040008c
462#define NV_PGRAPH_DEBUG_4 0x00400090
463#define NV_PGRAPH_INTR 0x00400100
464#define NV_PGRAPH_INTR_EN 0x00400140
465#define NV_PGRAPH_CTX_CONTROL 0x00400144
466#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170
467#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
468#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540
469#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544
470#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548
471#define NV_PGRAPH_BETA_AND 0x00400608
472#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610
473#define NV_PGRAPH_BOFFSET0 0x00400640
474#define NV_PGRAPH_BOFFSET1 0x00400644
475#define NV_PGRAPH_BOFFSET2 0x00400648
476#define NV_PGRAPH_BLIMIT0 0x00400684
477#define NV_PGRAPH_BLIMIT1 0x00400688
478#define NV_PGRAPH_BLIMIT2 0x0040068c
479#define NV_PGRAPH_STATUS 0x00400700
480#define NV_PGRAPH_SURFACE 0x00400710
481#define NV_PGRAPH_STATE 0x00400714
482#define NV_PGRAPH_FIFO 0x00400720
483#define NV_PGRAPH_PATTERN_SHAPE 0x00400810
484#define NV_PGRAPH_TILE 0x00400b00
485
486#define NV_PVIDEO_INTR_EN 0x00008140
487#define NV_PVIDEO_BUFFER 0x00008700
488#define NV_PVIDEO_STOP 0x00008704
489#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4)
490#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4)
491#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4)
492#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4)
493#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4)
494#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4)
495#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4)
496#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4)
497#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4)
498#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4)
499#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4)
500#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4)
501#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4)
502#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4)
503#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4)
504# define NV_PVIDEO_FORMAT_PLANAR (1 << 0)
505# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16)
506# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20)
507# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24)
508#define NV_PVIDEO_COLOR_KEY 0x00008B00
509
510/* NV04 overlay defines from VIDIX & Haiku */
511#define NV_PVIDEO_INTR_EN_0 0x00680140
512#define NV_PVIDEO_STEP_SIZE 0x00680200
513#define NV_PVIDEO_CONTROL_Y 0x00680204
514#define NV_PVIDEO_CONTROL_X 0x00680208
515#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c
516#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214
517#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c
518#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210
519#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218
520#define NV_PVIDEO_BUFF1_OFFSET 0x00680220
521#define NV_PVIDEO_OE_STATE 0x00680224
522#define NV_PVIDEO_SU_STATE 0x00680228
523#define NV_PVIDEO_RM_STATE 0x0068022c
524#define NV_PVIDEO_WINDOW_START 0x00680230
525#define NV_PVIDEO_WINDOW_SIZE 0x00680234
526#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238
527#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c
528#define NV_PVIDEO_KEY 0x00680240
529#define NV_PVIDEO_OVERLAY 0x00680244
530#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280
531#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284
532#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288
533#define NV_PVIDEO_CSC_ADJUST 0x0068028c
534
535#endif