aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig44
-rw-r--r--drivers/gpu/drm/nouveau/Makefile31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c125
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c6095
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c671
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c478
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c468
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c824
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h157
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c569
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c405
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1286
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h91
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c380
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c992
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c1080
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h455
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c702
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c568
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1294
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h836
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c321
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c811
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c1002
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c528
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c621
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c288
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c316
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c271
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c579
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c305
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c892
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c681
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c583
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c780
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c62
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c314
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c560
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c769
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1015
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c273
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c385
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c509
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c309
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h535
76 files changed, 34498 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 000000000000..d823e6319516
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,44 @@
1config DRM_NOUVEAU
2 tristate "Nouveau (nVidia) cards"
3 depends on DRM
4 select FW_LOADER
5 select DRM_KMS_HELPER
6 select DRM_TTM
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 help
14 Choose this option for open-source nVidia support.
15
16config DRM_NOUVEAU_BACKLIGHT
17 bool "Support for backlight control"
18 depends on DRM_NOUVEAU
19 default y
20 help
21 Say Y here if you want to control the backlight of your display
22 (e.g. a laptop panel).
23
24config DRM_NOUVEAU_DEBUG
25 bool "Build in Nouveau's debugfs support"
26 depends on DRM_NOUVEAU && DEBUG_FS
27 default y
28 help
29 Say Y here if you want Nouveau to output debugging information
30 via debugfs.
31
32menu "I2C encoder or helper chips"
33 depends on DRM
34
35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder"
37 default m if DRM_NOUVEAU
38 help
39 Support for Chrontel ch7006 and similar TV encoders, found
40 on some nVidia video cards.
41
42 This driver is currently only useful if you're also using
43 the nouveau driver.
44endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 000000000000..1d90d4d0144f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,31 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \
8 nouveau_sgdma.o nouveau_dma.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o \
13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \
19 nv04_instmem.o nv50_instmem.o \
20 nv50_crtc.o nv50_dac.o nv50_sor.o \
21 nv50_cursor.o nv50_display.o nv50_fbcon.o \
22 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
23 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
24 nv17_gpio.o
25
26nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
27nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
28nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
29nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
30
31obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 000000000000..1cf488247a16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,125 @@
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <acpi/acpi_drivers.h>
4#include <acpi/acpi_bus.h>
5
6#include "drmP.h"
7#include "drm.h"
8#include "drm_sarea.h"
9#include "drm_crtc_helper.h"
10#include "nouveau_drv.h"
11#include "nouveau_drm.h"
12#include "nv50_display.h"
13
14#define NOUVEAU_DSM_SUPPORTED 0x00
15#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
16
17#define NOUVEAU_DSM_ACTIVE 0x01
18#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
19
20#define NOUVEAU_DSM_LED 0x02
21#define NOUVEAU_DSM_LED_STATE 0x00
22#define NOUVEAU_DSM_LED_OFF 0x10
23#define NOUVEAU_DSM_LED_STAMINA 0x11
24#define NOUVEAU_DSM_LED_SPEED 0x12
25
26#define NOUVEAU_DSM_POWER 0x03
27#define NOUVEAU_DSM_POWER_STATE 0x00
28#define NOUVEAU_DSM_POWER_SPEED 0x01
29#define NOUVEAU_DSM_POWER_STAMINA 0x02
30
31static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
32{
33 static char muid[] = {
34 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
35 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
36 };
37
38 struct pci_dev *pdev = dev->pdev;
39 struct acpi_handle *handle;
40 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
41 struct acpi_object_list input;
42 union acpi_object params[4];
43 union acpi_object *obj;
44 int err;
45
46 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
47
48 if (!handle)
49 return -ENODEV;
50
51 input.count = 4;
52 input.pointer = params;
53 params[0].type = ACPI_TYPE_BUFFER;
54 params[0].buffer.length = sizeof(muid);
55 params[0].buffer.pointer = (char *)muid;
56 params[1].type = ACPI_TYPE_INTEGER;
57 params[1].integer.value = 0x00000102;
58 params[2].type = ACPI_TYPE_INTEGER;
59 params[2].integer.value = func;
60 params[3].type = ACPI_TYPE_INTEGER;
61 params[3].integer.value = arg;
62
63 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
64 if (err) {
65 NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
66 return err;
67 }
68
69 obj = (union acpi_object *)output.pointer;
70
71 if (obj->type == ACPI_TYPE_INTEGER)
72 if (obj->integer.value == 0x80000002)
73 return -ENODEV;
74
75 if (obj->type == ACPI_TYPE_BUFFER) {
76 if (obj->buffer.length == 4 && result) {
77 *result = 0;
78 *result |= obj->buffer.pointer[0];
79 *result |= (obj->buffer.pointer[1] << 8);
80 *result |= (obj->buffer.pointer[2] << 16);
81 *result |= (obj->buffer.pointer[3] << 24);
82 }
83 }
84
85 kfree(output.pointer);
86 return 0;
87}
88
89int nouveau_hybrid_setup(struct drm_device *dev)
90{
91 int result;
92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
94 &result))
95 return -ENODEV;
96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 }
109
110 return 0;
111}
112
113bool nouveau_dsm_probe(struct drm_device *dev)
114{
115 int support = 0;
116
117 if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
118 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
119 return false;
120
121 if (!support)
122 return false;
123
124 return true;
125}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 000000000000..20564f8cb0ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 2009 Red Hat <mjg@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Matthew Garrett <mjg@redhat.com>
29 *
30 * Register locations derived from NVClock by Roderick Colenbrander
31 */
32
33#include <linux/backlight.h>
34
35#include "drmP.h"
36#include "nouveau_drv.h"
37#include "nouveau_drm.h"
38#include "nouveau_reg.h"
39
40static int nv40_get_intensity(struct backlight_device *bd)
41{
42 struct drm_device *dev = bl_get_data(bd);
43 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
44 >> 16;
45
46 return val;
47}
48
49static int nv40_set_intensity(struct backlight_device *bd)
50{
51 struct drm_device *dev = bl_get_data(bd);
52 int val = bd->props.brightness;
53 int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
54
55 nv_wr32(dev, NV40_PMC_BACKLIGHT,
56 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
57
58 return 0;
59}
60
61static struct backlight_ops nv40_bl_ops = {
62 .options = BL_CORE_SUSPENDRESUME,
63 .get_brightness = nv40_get_intensity,
64 .update_status = nv40_set_intensity,
65};
66
67static int nv50_get_intensity(struct backlight_device *bd)
68{
69 struct drm_device *dev = bl_get_data(bd);
70
71 return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
72}
73
74static int nv50_set_intensity(struct backlight_device *bd)
75{
76 struct drm_device *dev = bl_get_data(bd);
77 int val = bd->props.brightness;
78
79 nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
80 val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
81 return 0;
82}
83
84static struct backlight_ops nv50_bl_ops = {
85 .options = BL_CORE_SUSPENDRESUME,
86 .get_brightness = nv50_get_intensity,
87 .update_status = nv50_set_intensity,
88};
89
90static int nouveau_nv40_backlight_init(struct drm_device *dev)
91{
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct backlight_device *bd;
94
95 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
96 return 0;
97
98 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
99 &nv40_bl_ops);
100 if (IS_ERR(bd))
101 return PTR_ERR(bd);
102
103 dev_priv->backlight = bd;
104 bd->props.max_brightness = 31;
105 bd->props.brightness = nv40_get_intensity(bd);
106 backlight_update_status(bd);
107
108 return 0;
109}
110
111static int nouveau_nv50_backlight_init(struct drm_device *dev)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 struct backlight_device *bd;
115
116 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
117 return 0;
118
119 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
120 &nv50_bl_ops);
121 if (IS_ERR(bd))
122 return PTR_ERR(bd);
123
124 dev_priv->backlight = bd;
125 bd->props.max_brightness = 1025;
126 bd->props.brightness = nv50_get_intensity(bd);
127 backlight_update_status(bd);
128 return 0;
129}
130
131int nouveau_backlight_init(struct drm_device *dev)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134
135 switch (dev_priv->card_type) {
136 case NV_40:
137 return nouveau_nv40_backlight_init(dev);
138 case NV_50:
139 return nouveau_nv50_backlight_init(dev);
140 default:
141 break;
142 }
143
144 return 0;
145}
146
147void nouveau_backlight_exit(struct drm_device *dev)
148{
149 struct drm_nouveau_private *dev_priv = dev->dev_private;
150
151 if (dev_priv->backlight) {
152 backlight_device_unregister(dev_priv->backlight);
153 dev_priv->backlight = NULL;
154 }
155}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 000000000000..5eec5ed69489
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6095 @@
1/*
2 * Copyright 2005-2006 Erik Waling
3 * Copyright 2006 Stephane Marchesin
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#define NV_DEBUG_NOTRACE
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29
30/* these defines are made up */
31#define NV_CIO_CRE_44_HEADA 0x0
32#define NV_CIO_CRE_44_HEADB 0x3
33#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
34#define LEGACY_I2C_CRT 0x80
35#define LEGACY_I2C_PANEL 0x81
36#define LEGACY_I2C_TV 0x82
37
38#define EDID1_LEN 128
39
40#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
41#define LOG_OLD_VALUE(x)
42
43#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
44#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
45
46struct init_exec {
47 bool execute;
48 bool repeat;
49};
50
51static bool nv_cksum(const uint8_t *data, unsigned int length)
52{
53 /*
54 * There's a few checksums in the BIOS, so here's a generic checking
55 * function.
56 */
57 int i;
58 uint8_t sum = 0;
59
60 for (i = 0; i < length; i++)
61 sum += data[i];
62
63 if (sum)
64 return true;
65
66 return false;
67}
68
69static int
70score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
71{
72 if (!(data[0] == 0x55 && data[1] == 0xAA)) {
73 NV_TRACEWARN(dev, "... BIOS signature not found\n");
74 return 0;
75 }
76
77 if (nv_cksum(data, data[2] * 512)) {
78 NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
79 /* if a ro image is somewhat bad, it's probably all rubbish */
80 return writeable ? 2 : 1;
81 } else
82 NV_TRACE(dev, "... appears to be valid\n");
83
84 return 3;
85}
86
87static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 uint32_t pci_nv_20, save_pci_nv_20;
91 int pcir_ptr;
92 int i;
93
94 if (dev_priv->card_type >= NV_50)
95 pci_nv_20 = 0x88050;
96 else
97 pci_nv_20 = NV_PBUS_PCI_NV_20;
98
99 /* enable ROM access */
100 save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
101 nvWriteMC(dev, pci_nv_20,
102 save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
103
104 /* bail if no rom signature */
105 if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
106 nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
107 goto out;
108
109 /* additional check (see note below) - read PCI record header */
110 pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
111 nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
112 if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
113 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
114 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
115 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
116 goto out;
117
118 /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
119 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
120 * each byte. we'll hope pramin has something usable instead
121 */
122 for (i = 0; i < NV_PROM_SIZE; i++)
123 data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
124
125out:
126 /* disable ROM access */
127 nvWriteMC(dev, pci_nv_20,
128 save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
129}
130
131static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 uint32_t old_bar0_pramin = 0;
135 int i;
136
137 if (dev_priv->card_type >= NV_50) {
138 uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
139
140 if (!vbios_vram)
141 vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
142
143 old_bar0_pramin = nv_rd32(dev, 0x1700);
144 nv_wr32(dev, 0x1700, vbios_vram >> 16);
145 }
146
147 /* bail if no rom signature */
148 if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
149 nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
150 goto out;
151
152 for (i = 0; i < NV_PROM_SIZE; i++)
153 data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
154
155out:
156 if (dev_priv->card_type >= NV_50)
157 nv_wr32(dev, 0x1700, old_bar0_pramin);
158}
159
160static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
161{
162 void __iomem *rom = NULL;
163 size_t rom_len;
164 int ret;
165
166 ret = pci_enable_rom(dev->pdev);
167 if (ret)
168 return;
169
170 rom = pci_map_rom(dev->pdev, &rom_len);
171 if (!rom)
172 goto out;
173 memcpy_fromio(data, rom, rom_len);
174 pci_unmap_rom(dev->pdev, rom);
175
176out:
177 pci_disable_rom(dev->pdev);
178}
179
180struct methods {
181 const char desc[8];
182 void (*loadbios)(struct drm_device *, uint8_t *);
183 const bool rw;
184 int score;
185};
186
187static struct methods nv04_methods[] = {
188 { "PROM", load_vbios_prom, false },
189 { "PRAMIN", load_vbios_pramin, true },
190 { "PCIROM", load_vbios_pci, true },
191 { }
192};
193
194static struct methods nv50_methods[] = {
195 { "PRAMIN", load_vbios_pramin, true },
196 { "PROM", load_vbios_prom, false },
197 { "PCIROM", load_vbios_pci, true },
198 { }
199};
200
201static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
202{
203 struct drm_nouveau_private *dev_priv = dev->dev_private;
204 struct methods *methods, *method;
205 int testscore = 3;
206
207 if (nouveau_vbios) {
208 method = nv04_methods;
209 while (method->loadbios) {
210 if (!strcasecmp(nouveau_vbios, method->desc))
211 break;
212 method++;
213 }
214
215 if (method->loadbios) {
216 NV_INFO(dev, "Attempting to use BIOS image from %s\n",
217 method->desc);
218
219 method->loadbios(dev, data);
220 if (score_vbios(dev, data, method->rw))
221 return true;
222 }
223
224 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
225 }
226
227 if (dev_priv->card_type < NV_50)
228 methods = nv04_methods;
229 else
230 methods = nv50_methods;
231
232 method = methods;
233 while (method->loadbios) {
234 NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
235 method->desc);
236 data[0] = data[1] = 0; /* avoid reuse of previous image */
237 method->loadbios(dev, data);
238 method->score = score_vbios(dev, data, method->rw);
239 if (method->score == testscore)
240 return true;
241 method++;
242 }
243
244 while (--testscore > 0) {
245 method = methods;
246 while (method->loadbios) {
247 if (method->score == testscore) {
248 NV_TRACE(dev, "Using BIOS image from %s\n",
249 method->desc);
250 method->loadbios(dev, data);
251 return true;
252 }
253 method++;
254 }
255 }
256
257 NV_ERROR(dev, "No valid BIOS image found\n");
258 return false;
259}
260
261struct init_tbl_entry {
262 char *name;
263 uint8_t id;
264 int length;
265 int length_offset;
266 int length_multiplier;
267 bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
268};
269
270struct bit_entry {
271 uint8_t id[2];
272 uint16_t length;
273 uint16_t offset;
274};
275
276static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
277
278#define MACRO_INDEX_SIZE 2
279#define MACRO_SIZE 8
280#define CONDITION_SIZE 12
281#define IO_FLAG_CONDITION_SIZE 9
282#define IO_CONDITION_SIZE 5
283#define MEM_INIT_SIZE 66
284
285static void still_alive(void)
286{
287#if 0
288 sync();
289 msleep(2);
290#endif
291}
292
293static uint32_t
294munge_reg(struct nvbios *bios, uint32_t reg)
295{
296 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
297 struct dcb_entry *dcbent = bios->display.output;
298
299 if (dev_priv->card_type < NV_50)
300 return reg;
301
302 if (reg & 0x40000000) {
303 BUG_ON(!dcbent);
304
305 reg += (ffs(dcbent->or) - 1) * 0x800;
306 if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
307 reg += 0x00000080;
308 }
309
310 reg &= ~0x60000000;
311 return reg;
312}
313
314static int
315valid_reg(struct nvbios *bios, uint32_t reg)
316{
317 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
318 struct drm_device *dev = bios->dev;
319
320 /* C51 has misaligned regs on purpose. Marvellous */
321 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
322 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
323 reg);
324 return 0;
325 }
326 /*
327 * Warn on C51 regs that have not been verified accessible in
328 * mmiotracing
329 */
330 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
331 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
332 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
333 reg);
334
335 /* Trust the init scripts on G80 */
336 if (dev_priv->card_type >= NV_50)
337 return 1;
338
339 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
340 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
341 return 1;
342 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
343 return 1;
344 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
347 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
348 return 1;
349 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
350 WITHIN(reg, 0xc000, 0x48))
351 return 1;
352 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
353 return 1;
354 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
355 if (reg == 0x00011014 || reg == 0x00020328)
356 return 1;
357 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
358 return 1;
359 }
360 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
361 return 1;
362 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
363 return 1;
364 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
365 return 1;
366 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
367 return 1;
368 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
369 return 1;
370 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
371 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
372 return 1;
373 #undef WITHIN
374
375 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
376
377 return 0;
378}
379
380static bool
381valid_idx_port(struct nvbios *bios, uint16_t port)
382{
383 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
384 struct drm_device *dev = bios->dev;
385
386 /*
387 * If adding more ports here, the read/write functions below will need
388 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
389 * used for the port in question
390 */
391 if (dev_priv->card_type < NV_50) {
392 if (port == NV_CIO_CRX__COLOR)
393 return true;
394 if (port == NV_VIO_SRX)
395 return true;
396 } else {
397 if (port == NV_CIO_CRX__COLOR)
398 return true;
399 }
400
401 NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
402 port);
403
404 return false;
405}
406
407static bool
408valid_port(struct nvbios *bios, uint16_t port)
409{
410 struct drm_device *dev = bios->dev;
411
412 /*
413 * If adding more ports here, the read/write functions below will need
414 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
415 * used for the port in question
416 */
417 if (port == NV_VIO_VSE2)
418 return true;
419
420 NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
421
422 return false;
423}
424
425static uint32_t
426bios_rd32(struct nvbios *bios, uint32_t reg)
427{
428 uint32_t data;
429
430 reg = munge_reg(bios, reg);
431 if (!valid_reg(bios, reg))
432 return 0;
433
434 /*
435 * C51 sometimes uses regs with bit0 set in the address. For these
436 * cases there should exist a translation in a BIOS table to an IO
437 * port address which the BIOS uses for accessing the reg
438 *
439 * These only seem to appear for the power control regs to a flat panel,
440 * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
441 * for 0x1308 and 0x1310 are used - hence the mask below. An S3
442 * suspend-resume mmio trace from a C51 will be required to see if this
443 * is true for the power microcode in 0x14.., or whether the direct IO
444 * port access method is needed
445 */
446 if (reg & 0x1)
447 reg &= ~0x1;
448
449 data = nv_rd32(bios->dev, reg);
450
451 BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
452
453 return data;
454}
455
456static void
457bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
458{
459 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
460
461 reg = munge_reg(bios, reg);
462 if (!valid_reg(bios, reg))
463 return;
464
465 /* see note in bios_rd32 */
466 if (reg & 0x1)
467 reg &= 0xfffffffe;
468
469 LOG_OLD_VALUE(bios_rd32(bios, reg));
470 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
471
472 if (dev_priv->VBIOS.execute) {
473 still_alive();
474 nv_wr32(bios->dev, reg, data);
475 }
476}
477
478static uint8_t
479bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
480{
481 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
482 struct drm_device *dev = bios->dev;
483 uint8_t data;
484
485 if (!valid_idx_port(bios, port))
486 return 0;
487
488 if (dev_priv->card_type < NV_50) {
489 if (port == NV_VIO_SRX)
490 data = NVReadVgaSeq(dev, bios->state.crtchead, index);
491 else /* assume NV_CIO_CRX__COLOR */
492 data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
493 } else {
494 uint32_t data32;
495
496 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
497 data = (data32 >> ((index & 3) << 3)) & 0xff;
498 }
499
500 BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
501 "Head: 0x%02X, Data: 0x%02X\n",
502 port, index, bios->state.crtchead, data);
503 return data;
504}
505
506static void
507bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
508{
509 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
510 struct drm_device *dev = bios->dev;
511
512 if (!valid_idx_port(bios, port))
513 return;
514
515 /*
516 * The current head is maintained in the nvbios member state.crtchead.
517 * We trap changes to CR44 and update the head variable and hence the
518 * register set written.
519 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
520 * of the write, and to head1 after the write
521 */
522 if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
523 data != NV_CIO_CRE_44_HEADB)
524 bios->state.crtchead = 0;
525
526 LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
527 BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
528 "Head: 0x%02X, Data: 0x%02X\n",
529 port, index, bios->state.crtchead, data);
530
531 if (bios->execute && dev_priv->card_type < NV_50) {
532 still_alive();
533 if (port == NV_VIO_SRX)
534 NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
535 else /* assume NV_CIO_CRX__COLOR */
536 NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
537 } else
538 if (bios->execute) {
539 uint32_t data32, shift = (index & 3) << 3;
540
541 still_alive();
542
543 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
544 data32 &= ~(0xff << shift);
545 data32 |= (data << shift);
546 bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
547 }
548
549 if (port == NV_CIO_CRX__COLOR &&
550 index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
551 bios->state.crtchead = 1;
552}
553
554static uint8_t
555bios_port_rd(struct nvbios *bios, uint16_t port)
556{
557 uint8_t data, head = bios->state.crtchead;
558
559 if (!valid_port(bios, port))
560 return 0;
561
562 data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
563
564 BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
565 port, head, data);
566
567 return data;
568}
569
570static void
571bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
572{
573 int head = bios->state.crtchead;
574
575 if (!valid_port(bios, port))
576 return;
577
578 LOG_OLD_VALUE(bios_port_rd(bios, port));
579 BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
580 port, head, data);
581
582 if (!bios->execute)
583 return;
584
585 still_alive();
586 NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
587}
588
589static bool
590io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
591{
592 /*
593 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
594 * for the CRTC index; 1 byte for the mask to apply to the value
595 * retrieved from the CRTC; 1 byte for the shift right to apply to the
596 * masked CRTC value; 2 bytes for the offset to the flag array, to
597 * which the shifted value is added; 1 byte for the mask applied to the
598 * value read from the flag array; and 1 byte for the value to compare
599 * against the masked byte from the flag table.
600 */
601
602 uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
603 uint16_t crtcport = ROM16(bios->data[condptr]);
604 uint8_t crtcindex = bios->data[condptr + 2];
605 uint8_t mask = bios->data[condptr + 3];
606 uint8_t shift = bios->data[condptr + 4];
607 uint16_t flagarray = ROM16(bios->data[condptr + 5]);
608 uint8_t flagarraymask = bios->data[condptr + 7];
609 uint8_t cmpval = bios->data[condptr + 8];
610 uint8_t data;
611
612 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
613 "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
614 "Cmpval: 0x%02X\n",
615 offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
616
617 data = bios_idxprt_rd(bios, crtcport, crtcindex);
618
619 data = bios->data[flagarray + ((data & mask) >> shift)];
620 data &= flagarraymask;
621
622 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
623 offset, data, cmpval);
624
625 return (data == cmpval);
626}
627
628static bool
629bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
630{
631 /*
632 * The condition table entry has 4 bytes for the address of the
633 * register to check, 4 bytes for a mask to apply to the register and
634 * 4 for a test comparison value
635 */
636
637 uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
638 uint32_t reg = ROM32(bios->data[condptr]);
639 uint32_t mask = ROM32(bios->data[condptr + 4]);
640 uint32_t cmpval = ROM32(bios->data[condptr + 8]);
641 uint32_t data;
642
643 BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
644 offset, cond, reg, mask);
645
646 data = bios_rd32(bios, reg) & mask;
647
648 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
649 offset, data, cmpval);
650
651 return (data == cmpval);
652}
653
654static bool
655io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
656{
657 /*
658 * The IO condition entry has 2 bytes for the IO port address; 1 byte
659 * for the index to write to io_port; 1 byte for the mask to apply to
660 * the byte read from io_port+1; and 1 byte for the value to compare
661 * against the masked byte.
662 */
663
664 uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
665 uint16_t io_port = ROM16(bios->data[condptr]);
666 uint8_t port_index = bios->data[condptr + 2];
667 uint8_t mask = bios->data[condptr + 3];
668 uint8_t cmpval = bios->data[condptr + 4];
669
670 uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
671
672 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
673 offset, data, cmpval);
674
675 return (data == cmpval);
676}
677
678static int
679nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
680{
681 struct drm_nouveau_private *dev_priv = dev->dev_private;
682 uint32_t reg0 = nv_rd32(dev, reg + 0);
683 uint32_t reg1 = nv_rd32(dev, reg + 4);
684 struct nouveau_pll_vals pll;
685 struct pll_lims pll_limits;
686 int ret;
687
688 ret = get_pll_limits(dev, reg, &pll_limits);
689 if (ret)
690 return ret;
691
692 clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
693 if (!clk)
694 return -ERANGE;
695
696 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
697 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
698
699 if (dev_priv->VBIOS.execute) {
700 still_alive();
701 nv_wr32(dev, reg + 4, reg1);
702 nv_wr32(dev, reg + 0, reg0);
703 }
704
705 return 0;
706}
707
708static int
709setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
710{
711 struct drm_device *dev = bios->dev;
712 struct drm_nouveau_private *dev_priv = dev->dev_private;
713 /* clk in kHz */
714 struct pll_lims pll_lim;
715 struct nouveau_pll_vals pllvals;
716 int ret;
717
718 if (dev_priv->card_type >= NV_50)
719 return nv50_pll_set(dev, reg, clk);
720
721 /* high regs (such as in the mac g5 table) are not -= 4 */
722 ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
723 if (ret)
724 return ret;
725
726 clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
727 if (!clk)
728 return -ERANGE;
729
730 if (bios->execute) {
731 still_alive();
732 nouveau_hw_setpll(dev, reg, &pllvals);
733 }
734
735 return 0;
736}
737
738static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
739{
740 struct drm_nouveau_private *dev_priv = dev->dev_private;
741 struct nvbios *bios = &dev_priv->VBIOS;
742
743 /*
744 * For the results of this function to be correct, CR44 must have been
745 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
746 * and the DCB table parsed, before the script calling the function is
747 * run. run_digital_op_script is example of how to do such setup
748 */
749
750 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
751
752 if (dcb_entry > bios->bdcb.dcb.entries) {
753 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
754 "(%02X)\n", dcb_entry);
755 dcb_entry = 0x7f; /* unused / invalid marker */
756 }
757
758 return dcb_entry;
759}
760
761static struct nouveau_i2c_chan *
762init_i2c_device_find(struct drm_device *dev, int i2c_index)
763{
764 struct drm_nouveau_private *dev_priv = dev->dev_private;
765 struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
766
767 if (i2c_index == 0xff) {
768 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
769 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
770 int default_indices = bdcb->i2c_default_indices;
771
772 if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
773 shift = 4;
774
775 i2c_index = (default_indices >> shift) & 0xf;
776 }
777 if (i2c_index == 0x80) /* g80+ */
778 i2c_index = bdcb->i2c_default_indices & 0xf;
779
780 return nouveau_i2c_find(dev, i2c_index);
781}
782
783static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
784{
785 /*
786 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
787 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
788 * CR58 for CR57 = 0 to index a table of offsets to the basic
789 * 0x6808b0 address.
790 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
791 * CR58 for CR57 = 0 to index a table of offsets to the basic
792 * 0x6808b0 address, and then flip the offset by 8.
793 */
794
795 struct drm_nouveau_private *dev_priv = dev->dev_private;
796 const int pramdac_offset[13] = {
797 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
798 const uint32_t pramdac_table[4] = {
799 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
800
801 if (mlv >= 0x80) {
802 int dcb_entry, dacoffset;
803
804 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
805 dcb_entry = dcb_entry_idx_from_crtchead(dev);
806 if (dcb_entry == 0x7f)
807 return 0;
808 dacoffset = pramdac_offset[
809 dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
810 if (mlv == 0x81)
811 dacoffset ^= 8;
812 return 0x6808b0 + dacoffset;
813 } else {
814 if (mlv > ARRAY_SIZE(pramdac_table)) {
815 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
816 mlv);
817 return 0;
818 }
819 return pramdac_table[mlv];
820 }
821}
822
823static bool
824init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
825 struct init_exec *iexec)
826{
827 /*
828 * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
829 *
830 * offset (8 bit): opcode
831 * offset + 1 (16 bit): CRTC port
832 * offset + 3 (8 bit): CRTC index
833 * offset + 4 (8 bit): mask
834 * offset + 5 (8 bit): shift
835 * offset + 6 (8 bit): count
836 * offset + 7 (32 bit): register
837 * offset + 11 (32 bit): configuration 1
838 * ...
839 *
840 * Starting at offset + 11 there are "count" 32 bit values.
841 * To find out which value to use read index "CRTC index" on "CRTC
842 * port", AND this value with "mask" and then bit shift right "shift"
843 * bits. Read the appropriate value using this index and write to
844 * "register"
845 */
846
847 uint16_t crtcport = ROM16(bios->data[offset + 1]);
848 uint8_t crtcindex = bios->data[offset + 3];
849 uint8_t mask = bios->data[offset + 4];
850 uint8_t shift = bios->data[offset + 5];
851 uint8_t count = bios->data[offset + 6];
852 uint32_t reg = ROM32(bios->data[offset + 7]);
853 uint8_t config;
854 uint32_t configval;
855
856 if (!iexec->execute)
857 return true;
858
859 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
860 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
861 offset, crtcport, crtcindex, mask, shift, count, reg);
862
863 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
864 if (config > count) {
865 NV_ERROR(bios->dev,
866 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
867 offset, config, count);
868 return false;
869 }
870
871 configval = ROM32(bios->data[offset + 11 + config * 4]);
872
873 BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
874
875 bios_wr32(bios, reg, configval);
876
877 return true;
878}
879
880static bool
881init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
882{
883 /*
884 * INIT_REPEAT opcode: 0x33 ('3')
885 *
886 * offset (8 bit): opcode
887 * offset + 1 (8 bit): count
888 *
889 * Execute script following this opcode up to INIT_REPEAT_END
890 * "count" times
891 */
892
893 uint8_t count = bios->data[offset + 1];
894 uint8_t i;
895
896 /* no iexec->execute check by design */
897
898 BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
899 offset, count);
900
901 iexec->repeat = true;
902
903 /*
904 * count - 1, as the script block will execute once when we leave this
905 * opcode -- this is compatible with bios behaviour as:
906 * a) the block is always executed at least once, even if count == 0
907 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
908 * while we don't
909 */
910 for (i = 0; i < count - 1; i++)
911 parse_init_table(bios, offset + 2, iexec);
912
913 iexec->repeat = false;
914
915 return true;
916}
917
918static bool
919init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
920 struct init_exec *iexec)
921{
922 /*
923 * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
924 *
925 * offset (8 bit): opcode
926 * offset + 1 (16 bit): CRTC port
927 * offset + 3 (8 bit): CRTC index
928 * offset + 4 (8 bit): mask
929 * offset + 5 (8 bit): shift
930 * offset + 6 (8 bit): IO flag condition index
931 * offset + 7 (8 bit): count
932 * offset + 8 (32 bit): register
933 * offset + 12 (16 bit): frequency 1
934 * ...
935 *
936 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
937 * Set PLL register "register" to coefficients for frequency n,
938 * selected by reading index "CRTC index" of "CRTC port" ANDed with
939 * "mask" and shifted right by "shift".
940 *
941 * If "IO flag condition index" > 0, and condition met, double
942 * frequency before setting it.
943 */
944
945 uint16_t crtcport = ROM16(bios->data[offset + 1]);
946 uint8_t crtcindex = bios->data[offset + 3];
947 uint8_t mask = bios->data[offset + 4];
948 uint8_t shift = bios->data[offset + 5];
949 int8_t io_flag_condition_idx = bios->data[offset + 6];
950 uint8_t count = bios->data[offset + 7];
951 uint32_t reg = ROM32(bios->data[offset + 8]);
952 uint8_t config;
953 uint16_t freq;
954
955 if (!iexec->execute)
956 return true;
957
958 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
959 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
960 "Count: 0x%02X, Reg: 0x%08X\n",
961 offset, crtcport, crtcindex, mask, shift,
962 io_flag_condition_idx, count, reg);
963
964 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
965 if (config > count) {
966 NV_ERROR(bios->dev,
967 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
968 offset, config, count);
969 return false;
970 }
971
972 freq = ROM16(bios->data[offset + 12 + config * 2]);
973
974 if (io_flag_condition_idx > 0) {
975 if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
976 BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
977 "frequency doubled\n", offset);
978 freq *= 2;
979 } else
980 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
981 "frequency unchanged\n", offset);
982 }
983
984 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
985 offset, reg, config, freq);
986
987 setPLL(bios, reg, freq * 10);
988
989 return true;
990}
991
992static bool
993init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
994{
995 /*
996 * INIT_END_REPEAT opcode: 0x36 ('6')
997 *
998 * offset (8 bit): opcode
999 *
1000 * Marks the end of the block for INIT_REPEAT to repeat
1001 */
1002
1003 /* no iexec->execute check by design */
1004
1005 /*
1006 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
1007 * we're not in repeat mode
1008 */
1009 if (iexec->repeat)
1010 return false;
1011
1012 return true;
1013}
1014
1015static bool
1016init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1017{
1018 /*
1019 * INIT_COPY opcode: 0x37 ('7')
1020 *
1021 * offset (8 bit): opcode
1022 * offset + 1 (32 bit): register
1023 * offset + 5 (8 bit): shift
1024 * offset + 6 (8 bit): srcmask
1025 * offset + 7 (16 bit): CRTC port
1026 * offset + 9 (8 bit): CRTC index
1027 * offset + 10 (8 bit): mask
1028 *
1029 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
1030 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
1031 * port
1032 */
1033
1034 uint32_t reg = ROM32(bios->data[offset + 1]);
1035 uint8_t shift = bios->data[offset + 5];
1036 uint8_t srcmask = bios->data[offset + 6];
1037 uint16_t crtcport = ROM16(bios->data[offset + 7]);
1038 uint8_t crtcindex = bios->data[offset + 9];
1039 uint8_t mask = bios->data[offset + 10];
1040 uint32_t data;
1041 uint8_t crtcdata;
1042
1043 if (!iexec->execute)
1044 return true;
1045
1046 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1047 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
1048 offset, reg, shift, srcmask, crtcport, crtcindex, mask);
1049
1050 data = bios_rd32(bios, reg);
1051
1052 if (shift < 0x80)
1053 data >>= shift;
1054 else
1055 data <<= (0x100 - shift);
1056
1057 data &= srcmask;
1058
1059 crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
1060 crtcdata |= (uint8_t)data;
1061 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1062
1063 return true;
1064}
1065
1066static bool
1067init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1068{
1069 /*
1070 * INIT_NOT opcode: 0x38 ('8')
1071 *
1072 * offset (8 bit): opcode
1073 *
1074 * Invert the current execute / no-execute condition (i.e. "else")
1075 */
1076 if (iexec->execute)
1077 BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
1078 else
1079 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1080
1081 iexec->execute = !iexec->execute;
1082 return true;
1083}
1084
1085static bool
1086init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1087 struct init_exec *iexec)
1088{
1089 /*
1090 * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
1091 *
1092 * offset (8 bit): opcode
1093 * offset + 1 (8 bit): condition number
1094 *
1095 * Check condition "condition number" in the IO flag condition table.
1096 * If condition not met skip subsequent opcodes until condition is
1097 * inverted (INIT_NOT), or we hit INIT_RESUME
1098 */
1099
1100 uint8_t cond = bios->data[offset + 1];
1101
1102 if (!iexec->execute)
1103 return true;
1104
1105 if (io_flag_condition_met(bios, offset, cond))
1106 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
1107 else {
1108 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
1109 iexec->execute = false;
1110 }
1111
1112 return true;
1113}
1114
1115static bool
1116init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1117 struct init_exec *iexec)
1118{
1119 /*
1120 * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
1121 *
1122 * offset (8 bit): opcode
1123 * offset + 1 (32 bit): control register
1124 * offset + 5 (32 bit): data register
1125 * offset + 9 (32 bit): mask
1126 * offset + 13 (32 bit): data
1127 * offset + 17 (8 bit): count
1128 * offset + 18 (8 bit): address 1
1129 * offset + 19 (8 bit): data 1
1130 * ...
1131 *
1132 * For each of "count" address and data pairs, write "data n" to
1133 * "data register", read the current value of "control register",
1134 * and write it back once ANDed with "mask", ORed with "data",
1135 * and ORed with "address n"
1136 */
1137
1138 uint32_t controlreg = ROM32(bios->data[offset + 1]);
1139 uint32_t datareg = ROM32(bios->data[offset + 5]);
1140 uint32_t mask = ROM32(bios->data[offset + 9]);
1141 uint32_t data = ROM32(bios->data[offset + 13]);
1142 uint8_t count = bios->data[offset + 17];
1143 uint32_t value;
1144 int i;
1145
1146 if (!iexec->execute)
1147 return true;
1148
1149 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1150 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
1151 offset, controlreg, datareg, mask, data, count);
1152
1153 for (i = 0; i < count; i++) {
1154 uint8_t instaddress = bios->data[offset + 18 + i * 2];
1155 uint8_t instdata = bios->data[offset + 19 + i * 2];
1156
1157 BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
1158 offset, instaddress, instdata);
1159
1160 bios_wr32(bios, datareg, instdata);
1161 value = bios_rd32(bios, controlreg) & mask;
1162 value |= data;
1163 value |= instaddress;
1164 bios_wr32(bios, controlreg, value);
1165 }
1166
1167 return true;
1168}
1169
1170static bool
1171init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1172 struct init_exec *iexec)
1173{
1174 /*
1175 * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
1176 *
1177 * offset (8 bit): opcode
1178 * offset + 1 (16 bit): CRTC port
1179 * offset + 3 (8 bit): CRTC index
1180 * offset + 4 (8 bit): mask
1181 * offset + 5 (8 bit): shift
1182 * offset + 6 (8 bit): count
1183 * offset + 7 (32 bit): register
1184 * offset + 11 (32 bit): frequency 1
1185 * ...
1186 *
1187 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
1188 * Set PLL register "register" to coefficients for frequency n,
1189 * selected by reading index "CRTC index" of "CRTC port" ANDed with
1190 * "mask" and shifted right by "shift".
1191 */
1192
1193 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1194 uint8_t crtcindex = bios->data[offset + 3];
1195 uint8_t mask = bios->data[offset + 4];
1196 uint8_t shift = bios->data[offset + 5];
1197 uint8_t count = bios->data[offset + 6];
1198 uint32_t reg = ROM32(bios->data[offset + 7]);
1199 uint8_t config;
1200 uint32_t freq;
1201
1202 if (!iexec->execute)
1203 return true;
1204
1205 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1206 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1207 offset, crtcport, crtcindex, mask, shift, count, reg);
1208
1209 if (!reg)
1210 return true;
1211
1212 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1213 if (config > count) {
1214 NV_ERROR(bios->dev,
1215 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1216 offset, config, count);
1217 return false;
1218 }
1219
1220 freq = ROM32(bios->data[offset + 11 + config * 4]);
1221
1222 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
1223 offset, reg, config, freq);
1224
1225 setPLL(bios, reg, freq);
1226
1227 return true;
1228}
1229
1230static bool
1231init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1232{
1233 /*
1234 * INIT_PLL2 opcode: 0x4B ('K')
1235 *
1236 * offset (8 bit): opcode
1237 * offset + 1 (32 bit): register
1238 * offset + 5 (32 bit): freq
1239 *
1240 * Set PLL register "register" to coefficients for frequency "freq"
1241 */
1242
1243 uint32_t reg = ROM32(bios->data[offset + 1]);
1244 uint32_t freq = ROM32(bios->data[offset + 5]);
1245
1246 if (!iexec->execute)
1247 return true;
1248
1249 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1250 offset, reg, freq);
1251
1252 setPLL(bios, reg, freq);
1253 return true;
1254}
1255
1256static bool
1257init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1258{
1259 /*
1260 * INIT_I2C_BYTE opcode: 0x4C ('L')
1261 *
1262 * offset (8 bit): opcode
1263 * offset + 1 (8 bit): DCB I2C table entry index
1264 * offset + 2 (8 bit): I2C slave address
1265 * offset + 3 (8 bit): count
1266 * offset + 4 (8 bit): I2C register 1
1267 * offset + 5 (8 bit): mask 1
1268 * offset + 6 (8 bit): data 1
1269 * ...
1270 *
1271 * For each of "count" registers given by "I2C register n" on the device
1272 * addressed by "I2C slave address" on the I2C bus given by
1273 * "DCB I2C table entry index", read the register, AND the result with
1274 * "mask n" and OR it with "data n" before writing it back to the device
1275 */
1276
1277 uint8_t i2c_index = bios->data[offset + 1];
1278 uint8_t i2c_address = bios->data[offset + 2];
1279 uint8_t count = bios->data[offset + 3];
1280 struct nouveau_i2c_chan *chan;
1281 struct i2c_msg msg;
1282 int i;
1283
1284 if (!iexec->execute)
1285 return true;
1286
1287 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1288 "Count: 0x%02X\n",
1289 offset, i2c_index, i2c_address, count);
1290
1291 chan = init_i2c_device_find(bios->dev, i2c_index);
1292 if (!chan)
1293 return false;
1294
1295 for (i = 0; i < count; i++) {
1296 uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
1297 uint8_t mask = bios->data[offset + 5 + i * 3];
1298 uint8_t data = bios->data[offset + 6 + i * 3];
1299 uint8_t value;
1300
1301 msg.addr = i2c_address;
1302 msg.flags = I2C_M_RD;
1303 msg.len = 1;
1304 msg.buf = &value;
1305 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1306 return false;
1307
1308 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1309 "Mask: 0x%02X, Data: 0x%02X\n",
1310 offset, i2c_reg, value, mask, data);
1311
1312 value = (value & mask) | data;
1313
1314 if (bios->execute) {
1315 msg.addr = i2c_address;
1316 msg.flags = 0;
1317 msg.len = 1;
1318 msg.buf = &value;
1319 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1320 return false;
1321 }
1322 }
1323
1324 return true;
1325}
1326
1327static bool
1328init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1329{
1330 /*
1331 * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
1332 *
1333 * offset (8 bit): opcode
1334 * offset + 1 (8 bit): DCB I2C table entry index
1335 * offset + 2 (8 bit): I2C slave address
1336 * offset + 3 (8 bit): count
1337 * offset + 4 (8 bit): I2C register 1
1338 * offset + 5 (8 bit): data 1
1339 * ...
1340 *
1341 * For each of "count" registers given by "I2C register n" on the device
1342 * addressed by "I2C slave address" on the I2C bus given by
1343 * "DCB I2C table entry index", set the register to "data n"
1344 */
1345
1346 uint8_t i2c_index = bios->data[offset + 1];
1347 uint8_t i2c_address = bios->data[offset + 2];
1348 uint8_t count = bios->data[offset + 3];
1349 struct nouveau_i2c_chan *chan;
1350 struct i2c_msg msg;
1351 int i;
1352
1353 if (!iexec->execute)
1354 return true;
1355
1356 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1357 "Count: 0x%02X\n",
1358 offset, i2c_index, i2c_address, count);
1359
1360 chan = init_i2c_device_find(bios->dev, i2c_index);
1361 if (!chan)
1362 return false;
1363
1364 for (i = 0; i < count; i++) {
1365 uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
1366 uint8_t data = bios->data[offset + 5 + i * 2];
1367
1368 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1369 offset, i2c_reg, data);
1370
1371 if (bios->execute) {
1372 msg.addr = i2c_address;
1373 msg.flags = 0;
1374 msg.len = 1;
1375 msg.buf = &data;
1376 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1377 return false;
1378 }
1379 }
1380
1381 return true;
1382}
1383
1384static bool
1385init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1386{
1387 /*
1388 * INIT_ZM_I2C opcode: 0x4E ('N')
1389 *
1390 * offset (8 bit): opcode
1391 * offset + 1 (8 bit): DCB I2C table entry index
1392 * offset + 2 (8 bit): I2C slave address
1393 * offset + 3 (8 bit): count
1394 * offset + 4 (8 bit): data 1
1395 * ...
1396 *
1397 * Send "count" bytes ("data n") to the device addressed by "I2C slave
1398 * address" on the I2C bus given by "DCB I2C table entry index"
1399 */
1400
1401 uint8_t i2c_index = bios->data[offset + 1];
1402 uint8_t i2c_address = bios->data[offset + 2];
1403 uint8_t count = bios->data[offset + 3];
1404 struct nouveau_i2c_chan *chan;
1405 struct i2c_msg msg;
1406 uint8_t data[256];
1407 int i;
1408
1409 if (!iexec->execute)
1410 return true;
1411
1412 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1413 "Count: 0x%02X\n",
1414 offset, i2c_index, i2c_address, count);
1415
1416 chan = init_i2c_device_find(bios->dev, i2c_index);
1417 if (!chan)
1418 return false;
1419
1420 for (i = 0; i < count; i++) {
1421 data[i] = bios->data[offset + 4 + i];
1422
1423 BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
1424 }
1425
1426 if (bios->execute) {
1427 msg.addr = i2c_address;
1428 msg.flags = 0;
1429 msg.len = count;
1430 msg.buf = data;
1431 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1432 return false;
1433 }
1434
1435 return true;
1436}
1437
1438static bool
1439init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1440{
1441 /*
1442 * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
1443 *
1444 * offset (8 bit): opcode
1445 * offset + 1 (8 bit): magic lookup value
1446 * offset + 2 (8 bit): TMDS address
1447 * offset + 3 (8 bit): mask
1448 * offset + 4 (8 bit): data
1449 *
1450 * Read the data reg for TMDS address "TMDS address", AND it with mask
1451 * and OR it with data, then write it back
1452 * "magic lookup value" determines which TMDS base address register is
1453 * used -- see get_tmds_index_reg()
1454 */
1455
1456 uint8_t mlv = bios->data[offset + 1];
1457 uint32_t tmdsaddr = bios->data[offset + 2];
1458 uint8_t mask = bios->data[offset + 3];
1459 uint8_t data = bios->data[offset + 4];
1460 uint32_t reg, value;
1461
1462 if (!iexec->execute)
1463 return true;
1464
1465 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1466 "Mask: 0x%02X, Data: 0x%02X\n",
1467 offset, mlv, tmdsaddr, mask, data);
1468
1469 reg = get_tmds_index_reg(bios->dev, mlv);
1470 if (!reg)
1471 return false;
1472
1473 bios_wr32(bios, reg,
1474 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
1475 value = (bios_rd32(bios, reg + 4) & mask) | data;
1476 bios_wr32(bios, reg + 4, value);
1477 bios_wr32(bios, reg, tmdsaddr);
1478
1479 return true;
1480}
1481
1482static bool
1483init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1484 struct init_exec *iexec)
1485{
1486 /*
1487 * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
1488 *
1489 * offset (8 bit): opcode
1490 * offset + 1 (8 bit): magic lookup value
1491 * offset + 2 (8 bit): count
1492 * offset + 3 (8 bit): addr 1
1493 * offset + 4 (8 bit): data 1
1494 * ...
1495 *
1496 * For each of "count" TMDS address and data pairs write "data n" to
1497 * "addr n". "magic lookup value" determines which TMDS base address
1498 * register is used -- see get_tmds_index_reg()
1499 */
1500
1501 uint8_t mlv = bios->data[offset + 1];
1502 uint8_t count = bios->data[offset + 2];
1503 uint32_t reg;
1504 int i;
1505
1506 if (!iexec->execute)
1507 return true;
1508
1509 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1510 offset, mlv, count);
1511
1512 reg = get_tmds_index_reg(bios->dev, mlv);
1513 if (!reg)
1514 return false;
1515
1516 for (i = 0; i < count; i++) {
1517 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
1518 uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
1519
1520 bios_wr32(bios, reg + 4, tmdsdata);
1521 bios_wr32(bios, reg, tmdsaddr);
1522 }
1523
1524 return true;
1525}
1526
1527static bool
1528init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1529 struct init_exec *iexec)
1530{
1531 /*
1532 * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
1533 *
1534 * offset (8 bit): opcode
1535 * offset + 1 (8 bit): CRTC index1
1536 * offset + 2 (8 bit): CRTC index2
1537 * offset + 3 (8 bit): baseaddr
1538 * offset + 4 (8 bit): count
1539 * offset + 5 (8 bit): data 1
1540 * ...
1541 *
1542 * For each of "count" address and data pairs, write "baseaddr + n" to
1543 * "CRTC index1" and "data n" to "CRTC index2"
1544 * Once complete, restore initial value read from "CRTC index1"
1545 */
1546 uint8_t crtcindex1 = bios->data[offset + 1];
1547 uint8_t crtcindex2 = bios->data[offset + 2];
1548 uint8_t baseaddr = bios->data[offset + 3];
1549 uint8_t count = bios->data[offset + 4];
1550 uint8_t oldaddr, data;
1551 int i;
1552
1553 if (!iexec->execute)
1554 return true;
1555
1556 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1557 "BaseAddr: 0x%02X, Count: 0x%02X\n",
1558 offset, crtcindex1, crtcindex2, baseaddr, count);
1559
1560 oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
1561
1562 for (i = 0; i < count; i++) {
1563 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
1564 baseaddr + i);
1565 data = bios->data[offset + 5 + i];
1566 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
1567 }
1568
1569 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1570
1571 return true;
1572}
1573
1574static bool
1575init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1576{
1577 /*
1578 * INIT_CR opcode: 0x52 ('R')
1579 *
1580 * offset (8 bit): opcode
1581 * offset + 1 (8 bit): CRTC index
1582 * offset + 2 (8 bit): mask
1583 * offset + 3 (8 bit): data
1584 *
1585 * Assign the value of at "CRTC index" ANDed with mask and ORed with
1586 * data back to "CRTC index"
1587 */
1588
1589 uint8_t crtcindex = bios->data[offset + 1];
1590 uint8_t mask = bios->data[offset + 2];
1591 uint8_t data = bios->data[offset + 3];
1592 uint8_t value;
1593
1594 if (!iexec->execute)
1595 return true;
1596
1597 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1598 offset, crtcindex, mask, data);
1599
1600 value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
1601 value |= data;
1602 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1603
1604 return true;
1605}
1606
1607static bool
1608init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1609{
1610 /*
1611 * INIT_ZM_CR opcode: 0x53 ('S')
1612 *
1613 * offset (8 bit): opcode
1614 * offset + 1 (8 bit): CRTC index
1615 * offset + 2 (8 bit): value
1616 *
1617 * Assign "value" to CRTC register with index "CRTC index".
1618 */
1619
1620 uint8_t crtcindex = ROM32(bios->data[offset + 1]);
1621 uint8_t data = bios->data[offset + 2];
1622
1623 if (!iexec->execute)
1624 return true;
1625
1626 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1627
1628 return true;
1629}
1630
1631static bool
1632init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1633{
1634 /*
1635 * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
1636 *
1637 * offset (8 bit): opcode
1638 * offset + 1 (8 bit): count
1639 * offset + 2 (8 bit): CRTC index 1
1640 * offset + 3 (8 bit): value 1
1641 * ...
1642 *
1643 * For "count", assign "value n" to CRTC register with index
1644 * "CRTC index n".
1645 */
1646
1647 uint8_t count = bios->data[offset + 1];
1648 int i;
1649
1650 if (!iexec->execute)
1651 return true;
1652
1653 for (i = 0; i < count; i++)
1654 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1655
1656 return true;
1657}
1658
1659static bool
1660init_condition_time(struct nvbios *bios, uint16_t offset,
1661 struct init_exec *iexec)
1662{
1663 /*
1664 * INIT_CONDITION_TIME opcode: 0x56 ('V')
1665 *
1666 * offset (8 bit): opcode
1667 * offset + 1 (8 bit): condition number
1668 * offset + 2 (8 bit): retries / 50
1669 *
1670 * Check condition "condition number" in the condition table.
1671 * Bios code then sleeps for 2ms if the condition is not met, and
1672 * repeats up to "retries" times, but on one C51 this has proved
1673 * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
1674 * this, and bail after "retries" times, or 2s, whichever is less.
1675 * If still not met after retries, clear execution flag for this table.
1676 */
1677
1678 uint8_t cond = bios->data[offset + 1];
1679 uint16_t retries = bios->data[offset + 2] * 50;
1680 unsigned cnt;
1681
1682 if (!iexec->execute)
1683 return true;
1684
1685 if (retries > 100)
1686 retries = 100;
1687
1688 BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
1689 offset, cond, retries);
1690
1691 if (!bios->execute) /* avoid 2s delays when "faking" execution */
1692 retries = 1;
1693
1694 for (cnt = 0; cnt < retries; cnt++) {
1695 if (bios_condition_met(bios, offset, cond)) {
1696 BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
1697 offset);
1698 break;
1699 } else {
1700 BIOSLOG(bios, "0x%04X: "
1701 "Condition not met, sleeping for 20ms\n",
1702 offset);
1703 msleep(20);
1704 }
1705 }
1706
1707 if (!bios_condition_met(bios, offset, cond)) {
1708 NV_WARN(bios->dev,
1709 "0x%04X: Condition still not met after %dms, "
1710 "skipping following opcodes\n", offset, 20 * retries);
1711 iexec->execute = false;
1712 }
1713
1714 return true;
1715}
1716
1717static bool
1718init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1719 struct init_exec *iexec)
1720{
1721 /*
1722 * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
1723 *
1724 * offset (8 bit): opcode
1725 * offset + 1 (32 bit): base register
1726 * offset + 5 (8 bit): count
1727 * offset + 6 (32 bit): value 1
1728 * ...
1729 *
1730 * Starting at offset + 6 there are "count" 32 bit values.
1731 * For "count" iterations set "base register" + 4 * current_iteration
1732 * to "value current_iteration"
1733 */
1734
1735 uint32_t basereg = ROM32(bios->data[offset + 1]);
1736 uint32_t count = bios->data[offset + 5];
1737 int i;
1738
1739 if (!iexec->execute)
1740 return true;
1741
1742 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1743 offset, basereg, count);
1744
1745 for (i = 0; i < count; i++) {
1746 uint32_t reg = basereg + i * 4;
1747 uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
1748
1749 bios_wr32(bios, reg, data);
1750 }
1751
1752 return true;
1753}
1754
1755static bool
1756init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1757{
1758 /*
1759 * INIT_SUB_DIRECT opcode: 0x5B ('[')
1760 *
1761 * offset (8 bit): opcode
1762 * offset + 1 (16 bit): subroutine offset (in bios)
1763 *
1764 * Calls a subroutine that will execute commands until INIT_DONE
1765 * is found.
1766 */
1767
1768 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1769
1770 if (!iexec->execute)
1771 return true;
1772
1773 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1774 offset, sub_offset);
1775
1776 parse_init_table(bios, sub_offset, iexec);
1777
1778 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1779
1780 return true;
1781}
1782
1783static bool
1784init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1785{
1786 /*
1787 * INIT_COPY_NV_REG opcode: 0x5F ('_')
1788 *
1789 * offset (8 bit): opcode
1790 * offset + 1 (32 bit): src reg
1791 * offset + 5 (8 bit): shift
1792 * offset + 6 (32 bit): src mask
1793 * offset + 10 (32 bit): xor
1794 * offset + 14 (32 bit): dst reg
1795 * offset + 18 (32 bit): dst mask
1796 *
1797 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
1798 * "src mask", then XOR with "xor". Write this OR'd with
1799 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
1800 */
1801
1802 uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
1803 uint8_t shift = bios->data[offset + 5];
1804 uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
1805 uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
1806 uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
1807 uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
1808 uint32_t srcvalue, dstvalue;
1809
1810 if (!iexec->execute)
1811 return true;
1812
1813 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
1814 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
1815 offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
1816
1817 srcvalue = bios_rd32(bios, srcreg);
1818
1819 if (shift < 0x80)
1820 srcvalue >>= shift;
1821 else
1822 srcvalue <<= (0x100 - shift);
1823
1824 srcvalue = (srcvalue & srcmask) ^ xor;
1825
1826 dstvalue = bios_rd32(bios, dstreg) & dstmask;
1827
1828 bios_wr32(bios, dstreg, dstvalue | srcvalue);
1829
1830 return true;
1831}
1832
1833static bool
1834init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1835{
1836 /*
1837 * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
1838 *
1839 * offset (8 bit): opcode
1840 * offset + 1 (16 bit): CRTC port
1841 * offset + 3 (8 bit): CRTC index
1842 * offset + 4 (8 bit): data
1843 *
1844 * Write "data" to index "CRTC index" of "CRTC port"
1845 */
1846 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1847 uint8_t crtcindex = bios->data[offset + 3];
1848 uint8_t data = bios->data[offset + 4];
1849
1850 if (!iexec->execute)
1851 return true;
1852
1853 bios_idxprt_wr(bios, crtcport, crtcindex, data);
1854
1855 return true;
1856}
1857
1858static bool
1859init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1860{
1861 /*
1862 * INIT_COMPUTE_MEM opcode: 0x63 ('c')
1863 *
1864 * offset (8 bit): opcode
1865 *
1866 * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
1867 * that the hardware can correctly calculate how much VRAM it has
1868 * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
1869 *
1870 * The implementation of this opcode in general consists of two parts:
1871 * 1) determination of the memory bus width
1872 * 2) determination of how many of the card's RAM pads have ICs attached
1873 *
1874 * 1) is done by a cunning combination of writes to offsets 0x1c and
1875 * 0x3c in the framebuffer, and seeing whether the written values are
1876 * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
1877 *
1878 * 2) is done by a cunning combination of writes to an offset slightly
1879 * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
1880 * if the test pattern can be read back. This then affects bits 12-15 of
1881 * NV_PFB_CFG0
1882 *
1883 * In this context a "cunning combination" may include multiple reads
1884 * and writes to varying locations, often alternating the test pattern
1885 * and 0, doubtless to make sure buffers are filled, residual charges
1886 * on tracks are removed etc.
1887 *
1888 * Unfortunately, the "cunning combination"s mentioned above, and the
1889 * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
1890 * trace I have.
1891 *
1892 * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
1893 * we started was correct, and use that instead
1894 */
1895
1896 /* no iexec->execute check by design */
1897
1898 /*
1899 * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
1900 * and kmmio traces of the binary driver POSTing the card show nothing
1901 * being done for this opcode. why is it still listed in the table?!
1902 */
1903
1904 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1905
1906 if (dev_priv->card_type >= NV_50)
1907 return true;
1908
1909 /*
1910 * On every card I've seen, this step gets done for us earlier in
1911 * the init scripts
1912 uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
1913 bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
1914 */
1915
1916 /*
1917 * This also has probably been done in the scripts, but an mmio trace of
1918 * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
1919 */
1920 bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
1921
1922 /* write back the saved configuration value */
1923 bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
1924
1925 return true;
1926}
1927
1928static bool
1929init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1930{
1931 /*
1932 * INIT_RESET opcode: 0x65 ('e')
1933 *
1934 * offset (8 bit): opcode
1935 * offset + 1 (32 bit): register
1936 * offset + 5 (32 bit): value1
1937 * offset + 9 (32 bit): value2
1938 *
1939 * Assign "value1" to "register", then assign "value2" to "register"
1940 */
1941
1942 uint32_t reg = ROM32(bios->data[offset + 1]);
1943 uint32_t value1 = ROM32(bios->data[offset + 5]);
1944 uint32_t value2 = ROM32(bios->data[offset + 9]);
1945 uint32_t pci_nv_19, pci_nv_20;
1946
1947 /* no iexec->execute check by design */
1948
1949 pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
1950 bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
1951 bios_wr32(bios, reg, value1);
1952
1953 udelay(10);
1954
1955 bios_wr32(bios, reg, value2);
1956 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
1957
1958 pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
1959 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
1960 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
1961
1962 return true;
1963}
1964
1965static bool
1966init_configure_mem(struct nvbios *bios, uint16_t offset,
1967 struct init_exec *iexec)
1968{
1969 /*
1970 * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
1971 *
1972 * offset (8 bit): opcode
1973 *
1974 * Equivalent to INIT_DONE on bios version 3 or greater.
1975 * For early bios versions, sets up the memory registers, using values
1976 * taken from the memory init table
1977 */
1978
1979 /* no iexec->execute check by design */
1980
1981 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
1982 uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
1983 uint32_t reg, data;
1984
1985 if (bios->major_version > 2)
1986 return false;
1987
1988 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
1989 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
1990
1991 if (bios->data[meminitoffs] & 1)
1992 seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
1993
1994 for (reg = ROM32(bios->data[seqtbloffs]);
1995 reg != 0xffffffff;
1996 reg = ROM32(bios->data[seqtbloffs += 4])) {
1997
1998 switch (reg) {
1999 case NV_PFB_PRE:
2000 data = NV_PFB_PRE_CMD_PRECHARGE;
2001 break;
2002 case NV_PFB_PAD:
2003 data = NV_PFB_PAD_CKE_NORMAL;
2004 break;
2005 case NV_PFB_REF:
2006 data = NV_PFB_REF_CMD_REFRESH;
2007 break;
2008 default:
2009 data = ROM32(bios->data[meminitdata]);
2010 meminitdata += 4;
2011 if (data == 0xffffffff)
2012 continue;
2013 }
2014
2015 bios_wr32(bios, reg, data);
2016 }
2017
2018 return true;
2019}
2020
2021static bool
2022init_configure_clk(struct nvbios *bios, uint16_t offset,
2023 struct init_exec *iexec)
2024{
2025 /*
2026 * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
2027 *
2028 * offset (8 bit): opcode
2029 *
2030 * Equivalent to INIT_DONE on bios version 3 or greater.
2031 * For early bios versions, sets up the NVClk and MClk PLLs, using
2032 * values taken from the memory init table
2033 */
2034
2035 /* no iexec->execute check by design */
2036
2037 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2038 int clock;
2039
2040 if (bios->major_version > 2)
2041 return false;
2042
2043 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2044 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
2045
2046 clock = ROM16(bios->data[meminitoffs + 2]) * 10;
2047 if (bios->data[meminitoffs] & 1) /* DDR */
2048 clock *= 2;
2049 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2050
2051 return true;
2052}
2053
2054static bool
2055init_configure_preinit(struct nvbios *bios, uint16_t offset,
2056 struct init_exec *iexec)
2057{
2058 /*
2059 * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
2060 *
2061 * offset (8 bit): opcode
2062 *
2063 * Equivalent to INIT_DONE on bios version 3 or greater.
2064 * For early bios versions, does early init, loading ram and crystal
2065 * configuration from straps into CR3C
2066 */
2067
2068 /* no iexec->execute check by design */
2069
2070 uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
2071 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
2072
2073 if (bios->major_version > 2)
2074 return false;
2075
2076 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2077 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2078
2079 return true;
2080}
2081
2082static bool
2083init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2084{
2085 /*
2086 * INIT_IO opcode: 0x69 ('i')
2087 *
2088 * offset (8 bit): opcode
2089 * offset + 1 (16 bit): CRTC port
2090 * offset + 3 (8 bit): mask
2091 * offset + 4 (8 bit): data
2092 *
2093 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
2094 */
2095
2096 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2097 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2098 uint8_t mask = bios->data[offset + 3];
2099 uint8_t data = bios->data[offset + 4];
2100
2101 if (!iexec->execute)
2102 return true;
2103
2104 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2105 offset, crtcport, mask, data);
2106
2107 /*
2108 * I have no idea what this does, but NVIDIA do this magic sequence
2109 * in the places where this INIT_IO happens..
2110 */
2111 if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
2112 int i;
2113
2114 bios_wr32(bios, 0x614100, (bios_rd32(
2115 bios, 0x614100) & 0x0fffffff) | 0x00800000);
2116
2117 bios_wr32(bios, 0x00e18c, bios_rd32(
2118 bios, 0x00e18c) | 0x00020000);
2119
2120 bios_wr32(bios, 0x614900, (bios_rd32(
2121 bios, 0x614900) & 0x0fffffff) | 0x00800000);
2122
2123 bios_wr32(bios, 0x000200, bios_rd32(
2124 bios, 0x000200) & ~0x40000000);
2125
2126 mdelay(10);
2127
2128 bios_wr32(bios, 0x00e18c, bios_rd32(
2129 bios, 0x00e18c) & ~0x00020000);
2130
2131 bios_wr32(bios, 0x000200, bios_rd32(
2132 bios, 0x000200) | 0x40000000);
2133
2134 bios_wr32(bios, 0x614100, 0x00800018);
2135 bios_wr32(bios, 0x614900, 0x00800018);
2136
2137 mdelay(10);
2138
2139 bios_wr32(bios, 0x614100, 0x10000018);
2140 bios_wr32(bios, 0x614900, 0x10000018);
2141
2142 for (i = 0; i < 3; i++)
2143 bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
2144 bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
2145
2146 for (i = 0; i < 2; i++)
2147 bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
2148 bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
2149
2150 for (i = 0; i < 3; i++)
2151 bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
2152 bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
2153
2154 for (i = 0; i < 2; i++)
2155 bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
2156 bios, 0x614200 + (i*0x800)) & 0xfffffff0);
2157
2158 for (i = 0; i < 2; i++)
2159 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2160 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2161 return true;
2162 }
2163
2164 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2165 data);
2166 return true;
2167}
2168
2169static bool
2170init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2171{
2172 /*
2173 * INIT_SUB opcode: 0x6B ('k')
2174 *
2175 * offset (8 bit): opcode
2176 * offset + 1 (8 bit): script number
2177 *
2178 * Execute script number "script number", as a subroutine
2179 */
2180
2181 uint8_t sub = bios->data[offset + 1];
2182
2183 if (!iexec->execute)
2184 return true;
2185
2186 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2187
2188 parse_init_table(bios,
2189 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
2190 iexec);
2191
2192 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2193
2194 return true;
2195}
2196
2197static bool
2198init_ram_condition(struct nvbios *bios, uint16_t offset,
2199 struct init_exec *iexec)
2200{
2201 /*
2202 * INIT_RAM_CONDITION opcode: 0x6D ('m')
2203 *
2204 * offset (8 bit): opcode
2205 * offset + 1 (8 bit): mask
2206 * offset + 2 (8 bit): cmpval
2207 *
2208 * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
2209 * If condition not met skip subsequent opcodes until condition is
2210 * inverted (INIT_NOT), or we hit INIT_RESUME
2211 */
2212
2213 uint8_t mask = bios->data[offset + 1];
2214 uint8_t cmpval = bios->data[offset + 2];
2215 uint8_t data;
2216
2217 if (!iexec->execute)
2218 return true;
2219
2220 data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
2221
2222 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
2223 offset, data, cmpval);
2224
2225 if (data == cmpval)
2226 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2227 else {
2228 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2229 iexec->execute = false;
2230 }
2231
2232 return true;
2233}
2234
2235static bool
2236init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2237{
2238 /*
2239 * INIT_NV_REG opcode: 0x6E ('n')
2240 *
2241 * offset (8 bit): opcode
2242 * offset + 1 (32 bit): register
2243 * offset + 5 (32 bit): mask
2244 * offset + 9 (32 bit): data
2245 *
2246 * Assign ((REGVAL("register") & "mask") | "data") to "register"
2247 */
2248
2249 uint32_t reg = ROM32(bios->data[offset + 1]);
2250 uint32_t mask = ROM32(bios->data[offset + 5]);
2251 uint32_t data = ROM32(bios->data[offset + 9]);
2252
2253 if (!iexec->execute)
2254 return true;
2255
2256 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2257 offset, reg, mask, data);
2258
2259 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2260
2261 return true;
2262}
2263
2264static bool
2265init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2266{
2267 /*
2268 * INIT_MACRO opcode: 0x6F ('o')
2269 *
2270 * offset (8 bit): opcode
2271 * offset + 1 (8 bit): macro number
2272 *
2273 * Look up macro index "macro number" in the macro index table.
2274 * The macro index table entry has 1 byte for the index in the macro
2275 * table, and 1 byte for the number of times to repeat the macro.
2276 * The macro table entry has 4 bytes for the register address and
2277 * 4 bytes for the value to write to that register
2278 */
2279
2280 uint8_t macro_index_tbl_idx = bios->data[offset + 1];
2281 uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
2282 uint8_t macro_tbl_idx = bios->data[tmp];
2283 uint8_t count = bios->data[tmp + 1];
2284 uint32_t reg, data;
2285 int i;
2286
2287 if (!iexec->execute)
2288 return true;
2289
2290 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2291 "Count: 0x%02X\n",
2292 offset, macro_index_tbl_idx, macro_tbl_idx, count);
2293
2294 for (i = 0; i < count; i++) {
2295 uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
2296
2297 reg = ROM32(bios->data[macroentryptr]);
2298 data = ROM32(bios->data[macroentryptr + 4]);
2299
2300 bios_wr32(bios, reg, data);
2301 }
2302
2303 return true;
2304}
2305
2306static bool
2307init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2308{
2309 /*
2310 * INIT_DONE opcode: 0x71 ('q')
2311 *
2312 * offset (8 bit): opcode
2313 *
2314 * End the current script
2315 */
2316
2317 /* mild retval abuse to stop parsing this table */
2318 return false;
2319}
2320
2321static bool
2322init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2323{
2324 /*
2325 * INIT_RESUME opcode: 0x72 ('r')
2326 *
2327 * offset (8 bit): opcode
2328 *
2329 * End the current execute / no-execute condition
2330 */
2331
2332 if (iexec->execute)
2333 return true;
2334
2335 iexec->execute = true;
2336 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2337
2338 return true;
2339}
2340
2341static bool
2342init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2343{
2344 /*
2345 * INIT_TIME opcode: 0x74 ('t')
2346 *
2347 * offset (8 bit): opcode
2348 * offset + 1 (16 bit): time
2349 *
2350 * Sleep for "time" microseconds.
2351 */
2352
2353 unsigned time = ROM16(bios->data[offset + 1]);
2354
2355 if (!iexec->execute)
2356 return true;
2357
2358 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2359 offset, time);
2360
2361 if (time < 1000)
2362 udelay(time);
2363 else
2364 msleep((time + 900) / 1000);
2365
2366 return true;
2367}
2368
2369static bool
2370init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2371{
2372 /*
2373 * INIT_CONDITION opcode: 0x75 ('u')
2374 *
2375 * offset (8 bit): opcode
2376 * offset + 1 (8 bit): condition number
2377 *
2378 * Check condition "condition number" in the condition table.
2379 * If condition not met skip subsequent opcodes until condition is
2380 * inverted (INIT_NOT), or we hit INIT_RESUME
2381 */
2382
2383 uint8_t cond = bios->data[offset + 1];
2384
2385 if (!iexec->execute)
2386 return true;
2387
2388 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2389
2390 if (bios_condition_met(bios, offset, cond))
2391 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2392 else {
2393 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2394 iexec->execute = false;
2395 }
2396
2397 return true;
2398}
2399
2400static bool
2401init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2402{
2403 /*
2404 * INIT_IO_CONDITION opcode: 0x76
2405 *
2406 * offset (8 bit): opcode
2407 * offset + 1 (8 bit): condition number
2408 *
2409 * Check condition "condition number" in the io condition table.
2410 * If condition not met skip subsequent opcodes until condition is
2411 * inverted (INIT_NOT), or we hit INIT_RESUME
2412 */
2413
2414 uint8_t cond = bios->data[offset + 1];
2415
2416 if (!iexec->execute)
2417 return true;
2418
2419 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2420
2421 if (io_condition_met(bios, offset, cond))
2422 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2423 else {
2424 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2425 iexec->execute = false;
2426 }
2427
2428 return true;
2429}
2430
2431static bool
2432init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2433{
2434 /*
2435 * INIT_INDEX_IO opcode: 0x78 ('x')
2436 *
2437 * offset (8 bit): opcode
2438 * offset + 1 (16 bit): CRTC port
2439 * offset + 3 (8 bit): CRTC index
2440 * offset + 4 (8 bit): mask
2441 * offset + 5 (8 bit): data
2442 *
2443 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
2444 * OR with "data", write-back
2445 */
2446
2447 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2448 uint8_t crtcindex = bios->data[offset + 3];
2449 uint8_t mask = bios->data[offset + 4];
2450 uint8_t data = bios->data[offset + 5];
2451 uint8_t value;
2452
2453 if (!iexec->execute)
2454 return true;
2455
2456 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
2457 "Data: 0x%02X\n",
2458 offset, crtcport, crtcindex, mask, data);
2459
2460 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
2461 bios_idxprt_wr(bios, crtcport, crtcindex, value);
2462
2463 return true;
2464}
2465
2466static bool
2467init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2468{
2469 /*
2470 * INIT_PLL opcode: 0x79 ('y')
2471 *
2472 * offset (8 bit): opcode
2473 * offset + 1 (32 bit): register
2474 * offset + 5 (16 bit): freq
2475 *
2476 * Set PLL register "register" to coefficients for frequency (10kHz)
2477 * "freq"
2478 */
2479
2480 uint32_t reg = ROM32(bios->data[offset + 1]);
2481 uint16_t freq = ROM16(bios->data[offset + 5]);
2482
2483 if (!iexec->execute)
2484 return true;
2485
2486 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
2487
2488 setPLL(bios, reg, freq * 10);
2489
2490 return true;
2491}
2492
2493static bool
2494init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2495{
2496 /*
2497 * INIT_ZM_REG opcode: 0x7A ('z')
2498 *
2499 * offset (8 bit): opcode
2500 * offset + 1 (32 bit): register
2501 * offset + 5 (32 bit): value
2502 *
2503 * Assign "value" to "register"
2504 */
2505
2506 uint32_t reg = ROM32(bios->data[offset + 1]);
2507 uint32_t value = ROM32(bios->data[offset + 5]);
2508
2509 if (!iexec->execute)
2510 return true;
2511
2512 if (reg == 0x000200)
2513 value |= 1;
2514
2515 bios_wr32(bios, reg, value);
2516
2517 return true;
2518}
2519
2520static bool
2521init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2522 struct init_exec *iexec)
2523{
2524 /*
2525 * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
2526 *
2527 * offset (8 bit): opcode
2528 * offset + 1 (8 bit): PLL type
2529 * offset + 2 (32 bit): frequency 0
2530 *
2531 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2532 * ram_restrict_table_ptr. The value read from there is used to select
2533 * a frequency from the table starting at 'frequency 0' to be
2534 * programmed into the PLL corresponding to 'type'.
2535 *
2536 * The PLL limits table on cards using this opcode has a mapping of
2537 * 'type' to the relevant registers.
2538 */
2539
2540 struct drm_device *dev = bios->dev;
2541 uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
2542 uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
2543 uint8_t type = bios->data[offset + 1];
2544 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
2545 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
2546 int i;
2547
2548 if (!iexec->execute)
2549 return true;
2550
2551 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
2552 NV_ERROR(dev, "PLL limits table not version 3.x\n");
2553 return true; /* deliberate, allow default clocks to remain */
2554 }
2555
2556 entry = pll_limits + pll_limits[1];
2557 for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
2558 if (entry[0] == type) {
2559 uint32_t reg = ROM32(entry[3]);
2560
2561 BIOSLOG(bios, "0x%04X: "
2562 "Type %02x Reg 0x%08x Freq %dKHz\n",
2563 offset, type, reg, freq);
2564
2565 setPLL(bios, reg, freq);
2566 return true;
2567 }
2568 }
2569
2570 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
2571 return true;
2572}
2573
2574static bool
2575init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2576{
2577 /*
2578 * INIT_8C opcode: 0x8C ('')
2579 *
2580 * NOP so far....
2581 *
2582 */
2583
2584 return true;
2585}
2586
2587static bool
2588init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2589{
2590 /*
2591 * INIT_8D opcode: 0x8D ('')
2592 *
2593 * NOP so far....
2594 *
2595 */
2596
2597 return true;
2598}
2599
2600static bool
2601init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2602{
2603 /*
2604 * INIT_GPIO opcode: 0x8E ('')
2605 *
2606 * offset (8 bit): opcode
2607 *
2608 * Loop over all entries in the DCB GPIO table, and initialise
2609 * each GPIO according to various values listed in each entry
2610 */
2611
2612 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
2613 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2614 const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
2615 const uint8_t *gpio_entry;
2616 int i;
2617
2618 if (bios->bdcb.version != 0x40) {
2619 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2620 return false;
2621 }
2622
2623 if (!bios->bdcb.gpio_table_ptr) {
2624 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2625 return false;
2626 }
2627
2628 gpio_entry = gpio_table + gpio_table[1];
2629 for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
2630 uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
2631 int line = (entry & 0x0000001f);
2632
2633 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
2634
2635 if ((entry & 0x0000ff00) == 0x0000ff00)
2636 continue;
2637
2638 r = nv50_gpio_reg[line >> 3];
2639 s = (line & 0x07) << 2;
2640 v = bios_rd32(bios, r) & ~(0x00000003 << s);
2641 if (entry & 0x01000000)
2642 v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
2643 else
2644 v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
2645 bios_wr32(bios, r, v);
2646
2647 r = nv50_gpio_ctl[line >> 4];
2648 s = (line & 0x0f);
2649 v = bios_rd32(bios, r) & ~(0x00010001 << s);
2650 switch ((entry & 0x06000000) >> 25) {
2651 case 1:
2652 v |= (0x00000001 << s);
2653 break;
2654 case 2:
2655 v |= (0x00010000 << s);
2656 break;
2657 default:
2658 break;
2659 }
2660 bios_wr32(bios, r, v);
2661 }
2662
2663 return true;
2664}
2665
2666/* hack to avoid moving the itbl_entry array before this function */
2667int init_ram_restrict_zm_reg_group_blocklen;
2668
2669static bool
2670init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2671 struct init_exec *iexec)
2672{
2673 /*
2674 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
2675 *
2676 * offset (8 bit): opcode
2677 * offset + 1 (32 bit): reg
2678 * offset + 5 (8 bit): regincrement
2679 * offset + 6 (8 bit): count
2680 * offset + 7 (32 bit): value 1,1
2681 * ...
2682 *
2683 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2684 * ram_restrict_table_ptr. The value read from here is 'n', and
2685 * "value 1,n" gets written to "reg". This repeats "count" times and on
2686 * each iteration 'm', "reg" increases by "regincrement" and
2687 * "value m,n" is used. The extent of n is limited by a number read
2688 * from the 'M' BIT table, herein called "blocklen"
2689 */
2690
2691 uint32_t reg = ROM32(bios->data[offset + 1]);
2692 uint8_t regincrement = bios->data[offset + 5];
2693 uint8_t count = bios->data[offset + 6];
2694 uint32_t strap_ramcfg, data;
2695 uint16_t blocklen;
2696 uint8_t index;
2697 int i;
2698
2699 /* previously set by 'M' BIT table */
2700 blocklen = init_ram_restrict_zm_reg_group_blocklen;
2701
2702 if (!iexec->execute)
2703 return true;
2704
2705 if (!blocklen) {
2706 NV_ERROR(bios->dev,
2707 "0x%04X: Zero block length - has the M table "
2708 "been parsed?\n", offset);
2709 return false;
2710 }
2711
2712 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
2713 index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
2714
2715 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
2716 "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
2717 offset, reg, regincrement, count, strap_ramcfg, index);
2718
2719 for (i = 0; i < count; i++) {
2720 data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
2721
2722 bios_wr32(bios, reg, data);
2723
2724 reg += regincrement;
2725 }
2726
2727 return true;
2728}
2729
2730static bool
2731init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2732{
2733 /*
2734 * INIT_COPY_ZM_REG opcode: 0x90 ('')
2735 *
2736 * offset (8 bit): opcode
2737 * offset + 1 (32 bit): src reg
2738 * offset + 5 (32 bit): dst reg
2739 *
2740 * Put contents of "src reg" into "dst reg"
2741 */
2742
2743 uint32_t srcreg = ROM32(bios->data[offset + 1]);
2744 uint32_t dstreg = ROM32(bios->data[offset + 5]);
2745
2746 if (!iexec->execute)
2747 return true;
2748
2749 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
2750
2751 return true;
2752}
2753
2754static bool
2755init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
2756 struct init_exec *iexec)
2757{
2758 /*
2759 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
2760 *
2761 * offset (8 bit): opcode
2762 * offset + 1 (32 bit): dst reg
2763 * offset + 5 (8 bit): count
2764 * offset + 6 (32 bit): data 1
2765 * ...
2766 *
2767 * For each of "count" values write "data n" to "dst reg"
2768 */
2769
2770 uint32_t reg = ROM32(bios->data[offset + 1]);
2771 uint8_t count = bios->data[offset + 5];
2772 int i;
2773
2774 if (!iexec->execute)
2775 return true;
2776
2777 for (i = 0; i < count; i++) {
2778 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
2779 bios_wr32(bios, reg, data);
2780 }
2781
2782 return true;
2783}
2784
2785static bool
2786init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2787{
2788 /*
2789 * INIT_RESERVED opcode: 0x92 ('')
2790 *
2791 * offset (8 bit): opcode
2792 *
2793 * Seemingly does nothing
2794 */
2795
2796 return true;
2797}
2798
2799static bool
2800init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2801{
2802 /*
2803 * INIT_96 opcode: 0x96 ('')
2804 *
2805 * offset (8 bit): opcode
2806 * offset + 1 (32 bit): sreg
2807 * offset + 5 (8 bit): sshift
2808 * offset + 6 (8 bit): smask
2809 * offset + 7 (8 bit): index
2810 * offset + 8 (32 bit): reg
2811 * offset + 12 (32 bit): mask
2812 * offset + 16 (8 bit): shift
2813 *
2814 */
2815
2816 uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
2817 uint32_t reg = ROM32(bios->data[offset + 8]);
2818 uint32_t mask = ROM32(bios->data[offset + 12]);
2819 uint32_t val;
2820
2821 val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
2822 if (bios->data[offset + 5] < 0x80)
2823 val >>= bios->data[offset + 5];
2824 else
2825 val <<= (0x100 - bios->data[offset + 5]);
2826 val &= bios->data[offset + 6];
2827
2828 val = bios->data[ROM16(bios->data[xlatptr]) + val];
2829 val <<= bios->data[offset + 16];
2830
2831 if (!iexec->execute)
2832 return true;
2833
2834 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
2835 return true;
2836}
2837
2838static bool
2839init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2840{
2841 /*
2842 * INIT_97 opcode: 0x97 ('')
2843 *
2844 * offset (8 bit): opcode
2845 * offset + 1 (32 bit): register
2846 * offset + 5 (32 bit): mask
2847 * offset + 9 (32 bit): value
2848 *
2849 * Adds "value" to "register" preserving the fields specified
2850 * by "mask"
2851 */
2852
2853 uint32_t reg = ROM32(bios->data[offset + 1]);
2854 uint32_t mask = ROM32(bios->data[offset + 5]);
2855 uint32_t add = ROM32(bios->data[offset + 9]);
2856 uint32_t val;
2857
2858 val = bios_rd32(bios, reg);
2859 val = (val & mask) | ((val + add) & ~mask);
2860
2861 if (!iexec->execute)
2862 return true;
2863
2864 bios_wr32(bios, reg, val);
2865 return true;
2866}
2867
2868static bool
2869init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2870{
2871 /*
2872 * INIT_AUXCH opcode: 0x98 ('')
2873 *
2874 * offset (8 bit): opcode
2875 * offset + 1 (32 bit): address
2876 * offset + 5 (8 bit): count
2877 * offset + 6 (8 bit): mask 0
2878 * offset + 7 (8 bit): data 0
2879 * ...
2880 *
2881 */
2882
2883 struct drm_device *dev = bios->dev;
2884 struct nouveau_i2c_chan *auxch;
2885 uint32_t addr = ROM32(bios->data[offset + 1]);
2886 uint8_t len = bios->data[offset + 5];
2887 int ret, i;
2888
2889 if (!bios->display.output) {
2890 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
2891 return false;
2892 }
2893
2894 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2895 if (!auxch) {
2896 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
2897 bios->display.output->i2c_index);
2898 return false;
2899 }
2900
2901 if (!iexec->execute)
2902 return true;
2903
2904 offset += 6;
2905 for (i = 0; i < len; i++, offset += 2) {
2906 uint8_t data;
2907
2908 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
2909 if (ret) {
2910 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
2911 return false;
2912 }
2913
2914 data &= bios->data[offset + 0];
2915 data |= bios->data[offset + 1];
2916
2917 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
2918 if (ret) {
2919 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
2920 return false;
2921 }
2922 }
2923
2924 return true;
2925}
2926
2927static bool
2928init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2929{
2930 /*
2931 * INIT_ZM_AUXCH opcode: 0x99 ('')
2932 *
2933 * offset (8 bit): opcode
2934 * offset + 1 (32 bit): address
2935 * offset + 5 (8 bit): count
2936 * offset + 6 (8 bit): data 0
2937 * ...
2938 *
2939 */
2940
2941 struct drm_device *dev = bios->dev;
2942 struct nouveau_i2c_chan *auxch;
2943 uint32_t addr = ROM32(bios->data[offset + 1]);
2944 uint8_t len = bios->data[offset + 5];
2945 int ret, i;
2946
2947 if (!bios->display.output) {
2948 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
2949 return false;
2950 }
2951
2952 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2953 if (!auxch) {
2954 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
2955 bios->display.output->i2c_index);
2956 return false;
2957 }
2958
2959 if (!iexec->execute)
2960 return true;
2961
2962 offset += 6;
2963 for (i = 0; i < len; i++, offset++) {
2964 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
2965 if (ret) {
2966 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
2967 return false;
2968 }
2969 }
2970
2971 return true;
2972}
2973
2974static struct init_tbl_entry itbl_entry[] = {
2975 /* command name , id , length , offset , mult , command handler */
2976 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
2977 { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog },
2978 { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat },
2979 { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll },
2980 { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat },
2981 { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy },
2982 { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not },
2983 { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition },
2984 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched },
2985 { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 },
2986 { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 },
2987 { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte },
2988 { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte },
2989 { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c },
2990 { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds },
2991 { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group },
2992 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch },
2993 { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr },
2994 { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr },
2995 { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group },
2996 { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time },
2997 { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence },
2998 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
2999 { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct },
3000 { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg },
3001 { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io },
3002 { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem },
3003 { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset },
3004 { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem },
3005 { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk },
3006 { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit },
3007 { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io },
3008 { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub },
3009 { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition },
3010 { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg },
3011 { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro },
3012 { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done },
3013 { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume },
3014 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
3015 { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time },
3016 { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition },
3017 { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition },
3018 { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io },
3019 { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll },
3020 { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg },
3021 /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
3022 { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll },
3023 { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c },
3024 { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d },
3025 { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio },
3026 /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
3027 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group },
3028 { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg },
3029 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched },
3030 { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved },
3031 { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 },
3032 { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 },
3033 { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch },
3034 { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
3035 { NULL , 0 , 0 , 0 , 0 , NULL }
3036};
3037
3038static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
3039{
3040 /* Calculates the length of a given init table entry. */
3041 return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
3042}
3043
3044#define MAX_TABLE_OPS 1000
3045
3046static int
3047parse_init_table(struct nvbios *bios, unsigned int offset,
3048 struct init_exec *iexec)
3049{
3050 /*
3051 * Parses all commands in an init table.
3052 *
3053 * We start out executing all commands found in the init table. Some
3054 * opcodes may change the status of iexec->execute to SKIP, which will
3055 * cause the following opcodes to perform no operation until the value
3056 * is changed back to EXECUTE.
3057 */
3058
3059 int count = 0, i;
3060 uint8_t id;
3061
3062 /*
3063 * Loop until INIT_DONE causes us to break out of the loop
3064 * (or until offset > bios length just in case... )
3065 * (and no more than MAX_TABLE_OPS iterations, just in case... )
3066 */
3067 while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
3068 id = bios->data[offset];
3069
3070 /* Find matching id in itbl_entry */
3071 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3072 ;
3073
3074 if (itbl_entry[i].name) {
3075 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
3076 offset, itbl_entry[i].id, itbl_entry[i].name);
3077
3078 /* execute eventual command handler */
3079 if (itbl_entry[i].handler)
3080 if (!(*itbl_entry[i].handler)(bios, offset, iexec))
3081 break;
3082 } else {
3083 NV_ERROR(bios->dev,
3084 "0x%04X: Init table command not found: "
3085 "0x%02X\n", offset, id);
3086 return -ENOENT;
3087 }
3088
3089 /*
3090 * Add the offset of the current command including all data
3091 * of that command. The offset will then be pointing on the
3092 * next op code.
3093 */
3094 offset += get_init_table_entry_length(bios, offset, i);
3095 }
3096
3097 if (offset >= bios->length)
3098 NV_WARN(bios->dev,
3099 "Offset 0x%04X greater than known bios image length. "
3100 "Corrupt image?\n", offset);
3101 if (count >= MAX_TABLE_OPS)
3102 NV_WARN(bios->dev,
3103 "More than %d opcodes to a table is unlikely, "
3104 "is the bios image corrupt?\n", MAX_TABLE_OPS);
3105
3106 return 0;
3107}
3108
3109static void
3110parse_init_tables(struct nvbios *bios)
3111{
3112 /* Loops and calls parse_init_table() for each present table. */
3113
3114 int i = 0;
3115 uint16_t table;
3116 struct init_exec iexec = {true, false};
3117
3118 if (bios->old_style_init) {
3119 if (bios->init_script_tbls_ptr)
3120 parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
3121 if (bios->extra_init_script_tbl_ptr)
3122 parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
3123
3124 return;
3125 }
3126
3127 while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
3128 NV_INFO(bios->dev,
3129 "Parsing VBIOS init table %d at offset 0x%04X\n",
3130 i / 2, table);
3131 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
3132
3133 parse_init_table(bios, table, &iexec);
3134 i += 2;
3135 }
3136}
3137
3138static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3139{
3140 int compare_record_len, i = 0;
3141 uint16_t compareclk, scriptptr = 0;
3142
3143 if (bios->major_version < 5) /* pre BIT */
3144 compare_record_len = 3;
3145 else
3146 compare_record_len = 4;
3147
3148 do {
3149 compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
3150 if (pxclk >= compareclk * 10) {
3151 if (bios->major_version < 5) {
3152 uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
3153 scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
3154 } else
3155 scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
3156 break;
3157 }
3158 i++;
3159 } while (compareclk);
3160
3161 return scriptptr;
3162}
3163
3164static void
3165run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3166 struct dcb_entry *dcbent, int head, bool dl)
3167{
3168 struct drm_nouveau_private *dev_priv = dev->dev_private;
3169 struct nvbios *bios = &dev_priv->VBIOS;
3170 struct init_exec iexec = {true, false};
3171
3172 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
3173 scriptptr);
3174 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
3175 head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
3176 /* note: if dcb entries have been merged, index may be misleading */
3177 NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
3178 parse_init_table(bios, scriptptr, &iexec);
3179
3180 nv04_dfp_bind_head(dev, dcbent, head, dl);
3181}
3182
3183static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3184{
3185 struct drm_nouveau_private *dev_priv = dev->dev_private;
3186 struct nvbios *bios = &dev_priv->VBIOS;
3187 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3188 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3189
3190 if (!bios->fp.xlated_entry || !sub || !scriptofs)
3191 return -EINVAL;
3192
3193 run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
3194
3195 if (script == LVDS_PANEL_OFF) {
3196 /* off-on delay in ms */
3197 msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
3198 }
3199#ifdef __powerpc__
3200 /* Powerbook specific quirks */
3201 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
3202 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3203 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
3204 if (script == LVDS_PANEL_ON) {
3205 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
3206 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3207 }
3208 if (script == LVDS_PANEL_OFF) {
3209 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
3210 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3211 }
3212 }
3213#endif
3214
3215 return 0;
3216}
3217
3218static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3219{
3220 /*
3221 * The BIT LVDS table's header has the information to setup the
3222 * necessary registers. Following the standard 4 byte header are:
3223 * A bitmask byte and a dual-link transition pxclk value for use in
3224 * selecting the init script when not using straps; 4 script pointers
3225 * for panel power, selected by output and on/off; and 8 table pointers
3226 * for panel init, the needed one determined by output, and bits in the
3227 * conf byte. These tables are similar to the TMDS tables, consisting
3228 * of a list of pxclks and script pointers.
3229 */
3230 struct drm_nouveau_private *dev_priv = dev->dev_private;
3231 struct nvbios *bios = &dev_priv->VBIOS;
3232 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3233 uint16_t scriptptr = 0, clktable;
3234 uint8_t clktableptr = 0;
3235
3236 /*
3237 * For now we assume version 3.0 table - g80 support will need some
3238 * changes
3239 */
3240
3241 switch (script) {
3242 case LVDS_INIT:
3243 return -ENOSYS;
3244 case LVDS_BACKLIGHT_ON:
3245 case LVDS_PANEL_ON:
3246 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
3247 break;
3248 case LVDS_BACKLIGHT_OFF:
3249 case LVDS_PANEL_OFF:
3250 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
3251 break;
3252 case LVDS_RESET:
3253 if (dcbent->lvdsconf.use_straps_for_mode) {
3254 if (bios->fp.dual_link)
3255 clktableptr += 2;
3256 if (bios->fp.BITbit1)
3257 clktableptr++;
3258 } else {
3259 /* using EDID */
3260 uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3261 int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
3262
3263 if (bios->fp.dual_link) {
3264 clktableptr += 2;
3265 fallbackcmpval *= 2;
3266 }
3267 if (fallbackcmpval & fallback)
3268 clktableptr++;
3269 }
3270
3271 /* adding outputset * 8 may not be correct */
3272 clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
3273 if (!clktable) {
3274 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3275 return -ENOENT;
3276 }
3277 scriptptr = clkcmptable(bios, clktable, pxclk);
3278 }
3279
3280 if (!scriptptr) {
3281 NV_ERROR(dev, "LVDS output init script not found\n");
3282 return -ENOENT;
3283 }
3284 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
3285
3286 return 0;
3287}
3288
3289int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3290{
3291 /*
3292 * LVDS operations are multiplexed in an effort to present a single API
3293 * which works with two vastly differing underlying structures.
3294 * This acts as the demux
3295 */
3296
3297 struct drm_nouveau_private *dev_priv = dev->dev_private;
3298 struct nvbios *bios = &dev_priv->VBIOS;
3299 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3300 uint32_t sel_clk_binding, sel_clk;
3301 int ret;
3302
3303 if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
3304 (lvds_ver >= 0x30 && script == LVDS_INIT))
3305 return 0;
3306
3307 if (!bios->fp.lvds_init_run) {
3308 bios->fp.lvds_init_run = true;
3309 call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
3310 }
3311
3312 if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
3313 call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
3314 if (script == LVDS_RESET && bios->fp.power_off_for_reset)
3315 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
3316
3317 NV_TRACE(dev, "Calling LVDS script %d:\n", script);
3318
3319 /* don't let script change pll->head binding */
3320 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3321
3322 if (lvds_ver < 0x30)
3323 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
3324 else
3325 ret = run_lvds_table(dev, dcbent, head, script, pxclk);
3326
3327 bios->fp.last_script_invoc = (script << 1 | head);
3328
3329 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3330 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3331 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
3332 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
3333
3334 return ret;
3335}
3336
3337struct lvdstableheader {
3338 uint8_t lvds_ver, headerlen, recordlen;
3339};
3340
3341static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
3342{
3343 /*
3344 * BMP version (0xa) LVDS table has a simple header of version and
3345 * record length. The BIT LVDS table has the typical BIT table header:
3346 * version byte, header length byte, record length byte, and a byte for
3347 * the maximum number of records that can be held in the table.
3348 */
3349
3350 uint8_t lvds_ver, headerlen, recordlen;
3351
3352 memset(lth, 0, sizeof(struct lvdstableheader));
3353
3354 if (bios->fp.lvdsmanufacturerpointer == 0x0) {
3355 NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
3356 return -EINVAL;
3357 }
3358
3359 lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3360
3361 switch (lvds_ver) {
3362 case 0x0a: /* pre NV40 */
3363 headerlen = 2;
3364 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3365 break;
3366 case 0x30: /* NV4x */
3367 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3368 if (headerlen < 0x1f) {
3369 NV_ERROR(dev, "LVDS table header not understood\n");
3370 return -EINVAL;
3371 }
3372 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3373 break;
3374 case 0x40: /* G80/G90 */
3375 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3376 if (headerlen < 0x7) {
3377 NV_ERROR(dev, "LVDS table header not understood\n");
3378 return -EINVAL;
3379 }
3380 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3381 break;
3382 default:
3383 NV_ERROR(dev,
3384 "LVDS table revision %d.%d not currently supported\n",
3385 lvds_ver >> 4, lvds_ver & 0xf);
3386 return -ENOSYS;
3387 }
3388
3389 lth->lvds_ver = lvds_ver;
3390 lth->headerlen = headerlen;
3391 lth->recordlen = recordlen;
3392
3393 return 0;
3394}
3395
3396static int
3397get_fp_strap(struct drm_device *dev, struct nvbios *bios)
3398{
3399 struct drm_nouveau_private *dev_priv = dev->dev_private;
3400
3401 /*
3402 * The fp strap is normally dictated by the "User Strap" in
3403 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
3404 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
3405 * by the PCI subsystem ID during POST, but not before the previous user
3406 * strap has been committed to CR58 for CR57=0xf on head A, which may be
3407 * read and used instead
3408 */
3409
3410 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
3411 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
3412
3413 if (dev_priv->card_type >= NV_50)
3414 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
3415 else
3416 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
3417}
3418
3419static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3420{
3421 uint8_t *fptable;
3422 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
3423 int ret, ofs, fpstrapping;
3424 struct lvdstableheader lth;
3425
3426 if (bios->fp.fptablepointer == 0x0) {
3427 /* Apple cards don't have the fp table; the laptops use DDC */
3428 /* The table is also missing on some x86 IGPs */
3429#ifndef __powerpc__
3430 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3431#endif
3432 bios->pub.digital_min_front_porch = 0x4b;
3433 return 0;
3434 }
3435
3436 fptable = &bios->data[bios->fp.fptablepointer];
3437 fptable_ver = fptable[0];
3438
3439 switch (fptable_ver) {
3440 /*
3441 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
3442 * version field, and miss one of the spread spectrum/PWM bytes.
3443 * This could affect early GF2Go parts (not seen any appropriate ROMs
3444 * though). Here we assume that a version of 0x05 matches this case
3445 * (combining with a BMP version check would be better), as the
3446 * common case for the panel type field is 0x0005, and that is in
3447 * fact what we are reading the first byte of.
3448 */
3449 case 0x05: /* some NV10, 11, 15, 16 */
3450 recordlen = 42;
3451 ofs = -1;
3452 break;
3453 case 0x10: /* some NV15/16, and NV11+ */
3454 recordlen = 44;
3455 ofs = 0;
3456 break;
3457 case 0x20: /* NV40+ */
3458 headerlen = fptable[1];
3459 recordlen = fptable[2];
3460 fpentries = fptable[3];
3461 /*
3462 * fptable[4] is the minimum
3463 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3464 */
3465 bios->pub.digital_min_front_porch = fptable[4];
3466 ofs = -7;
3467 break;
3468 default:
3469 NV_ERROR(dev,
3470 "FP table revision %d.%d not currently supported\n",
3471 fptable_ver >> 4, fptable_ver & 0xf);
3472 return -ENOSYS;
3473 }
3474
3475 if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
3476 return 0;
3477
3478 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3479 if (ret)
3480 return ret;
3481
3482 if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
3483 bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
3484 lth.headerlen + 1;
3485 bios->fp.xlatwidth = lth.recordlen;
3486 }
3487 if (bios->fp.fpxlatetableptr == 0x0) {
3488 NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
3489 return -EINVAL;
3490 }
3491
3492 fpstrapping = get_fp_strap(dev, bios);
3493
3494 fpindex = bios->data[bios->fp.fpxlatetableptr +
3495 fpstrapping * bios->fp.xlatwidth];
3496
3497 if (fpindex > fpentries) {
3498 NV_ERROR(dev, "Bad flat panel table index\n");
3499 return -ENOENT;
3500 }
3501
3502 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3503 if (lth.lvds_ver > 0x10)
3504 bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3505
3506 /*
3507 * If either the strap or xlated fpindex value are 0xf there is no
3508 * panel using a strap-derived bios mode present. this condition
3509 * includes, but is different from, the DDC panel indicator above
3510 */
3511 if (fpstrapping == 0xf || fpindex == 0xf)
3512 return 0;
3513
3514 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
3515 recordlen * fpindex + ofs;
3516
3517 NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
3518 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
3519 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
3520 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
3521
3522 return 0;
3523}
3524
3525bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3526{
3527 struct drm_nouveau_private *dev_priv = dev->dev_private;
3528 struct nvbios *bios = &dev_priv->VBIOS;
3529 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3530
3531 if (!mode) /* just checking whether we can produce a mode */
3532 return bios->fp.mode_ptr;
3533
3534 memset(mode, 0, sizeof(struct drm_display_mode));
3535 /*
3536 * For version 1.0 (version in byte 0):
3537 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
3538 * single/dual link, and type (TFT etc.)
3539 * bytes 3-6 are bits per colour in RGBX
3540 */
3541 mode->clock = ROM16(mode_entry[7]) * 10;
3542 /* bytes 9-10 is HActive */
3543 mode->hdisplay = ROM16(mode_entry[11]) + 1;
3544 /*
3545 * bytes 13-14 is HValid Start
3546 * bytes 15-16 is HValid End
3547 */
3548 mode->hsync_start = ROM16(mode_entry[17]) + 1;
3549 mode->hsync_end = ROM16(mode_entry[19]) + 1;
3550 mode->htotal = ROM16(mode_entry[21]) + 1;
3551 /* bytes 23-24, 27-30 similarly, but vertical */
3552 mode->vdisplay = ROM16(mode_entry[25]) + 1;
3553 mode->vsync_start = ROM16(mode_entry[31]) + 1;
3554 mode->vsync_end = ROM16(mode_entry[33]) + 1;
3555 mode->vtotal = ROM16(mode_entry[35]) + 1;
3556 mode->flags |= (mode_entry[37] & 0x10) ?
3557 DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
3558 mode->flags |= (mode_entry[37] & 0x1) ?
3559 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
3560 /*
3561 * bytes 38-39 relate to spread spectrum settings
3562 * bytes 40-43 are something to do with PWM
3563 */
3564
3565 mode->status = MODE_OK;
3566 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
3567 drm_mode_set_name(mode);
3568 return bios->fp.mode_ptr;
3569}
3570
3571int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
3572{
3573 /*
3574 * The LVDS table header is (mostly) described in
3575 * parse_lvds_manufacturer_table_header(): the BIT header additionally
3576 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
3577 * straps are not being used for the panel, this specifies the frequency
3578 * at which modes should be set up in the dual link style.
3579 *
3580 * Following the header, the BMP (ver 0xa) table has several records,
3581 * indexed by a seperate xlat table, indexed in turn by the fp strap in
3582 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
3583 * numbers for use by INIT_SUB which controlled panel init and power,
3584 * and finally a dword of ms to sleep between power off and on
3585 * operations.
3586 *
3587 * In the BIT versions, the table following the header serves as an
3588 * integrated config and xlat table: the records in the table are
3589 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
3590 * two bytes - the first as a config byte, the second for indexing the
3591 * fp mode table pointed to by the BIT 'D' table
3592 *
3593 * DDC is not used until after card init, so selecting the correct table
3594 * entry and setting the dual link flag for EDID equipped panels,
3595 * requiring tests against the native-mode pixel clock, cannot be done
3596 * until later, when this function should be called with non-zero pxclk
3597 */
3598 struct drm_nouveau_private *dev_priv = dev->dev_private;
3599 struct nvbios *bios = &dev_priv->VBIOS;
3600 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3601 struct lvdstableheader lth;
3602 uint16_t lvdsofs;
3603 int ret, chip_version = bios->pub.chip_version;
3604
3605 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3606 if (ret)
3607 return ret;
3608
3609 switch (lth.lvds_ver) {
3610 case 0x0a: /* pre NV40 */
3611 lvdsmanufacturerindex = bios->data[
3612 bios->fp.fpxlatemanufacturertableptr +
3613 fpstrapping];
3614
3615 /* we're done if this isn't the EDID panel case */
3616 if (!pxclk)
3617 break;
3618
3619 if (chip_version < 0x25) {
3620 /* nv17 behaviour
3621 *
3622 * It seems the old style lvds script pointer is reused
3623 * to select 18/24 bit colour depth for EDID panels.
3624 */
3625 lvdsmanufacturerindex =
3626 (bios->legacy.lvds_single_a_script_ptr & 1) ?
3627 2 : 0;
3628 if (pxclk >= bios->fp.duallink_transition_clk)
3629 lvdsmanufacturerindex++;
3630 } else if (chip_version < 0x30) {
3631 /* nv28 behaviour (off-chip encoder)
3632 *
3633 * nv28 does a complex dance of first using byte 121 of
3634 * the EDID to choose the lvdsmanufacturerindex, then
3635 * later attempting to match the EDID manufacturer and
3636 * product IDs in a table (signature 'pidt' (panel id
3637 * table?)), setting an lvdsmanufacturerindex of 0 and
3638 * an fp strap of the match index (or 0xf if none)
3639 */
3640 lvdsmanufacturerindex = 0;
3641 } else {
3642 /* nv31, nv34 behaviour */
3643 lvdsmanufacturerindex = 0;
3644 if (pxclk >= bios->fp.duallink_transition_clk)
3645 lvdsmanufacturerindex = 2;
3646 if (pxclk >= 140000)
3647 lvdsmanufacturerindex = 3;
3648 }
3649
3650 /*
3651 * nvidia set the high nibble of (cr57=f, cr58) to
3652 * lvdsmanufacturerindex in this case; we don't
3653 */
3654 break;
3655 case 0x30: /* NV4x */
3656 case 0x40: /* G80/G90 */
3657 lvdsmanufacturerindex = fpstrapping;
3658 break;
3659 default:
3660 NV_ERROR(dev, "LVDS table revision not currently supported\n");
3661 return -ENOSYS;
3662 }
3663
3664 lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
3665 switch (lth.lvds_ver) {
3666 case 0x0a:
3667 bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
3668 bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
3669 bios->fp.dual_link = bios->data[lvdsofs] & 4;
3670 bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
3671 *if_is_24bit = bios->data[lvdsofs] & 16;
3672 break;
3673 case 0x30:
3674 /*
3675 * My money would be on there being a 24 bit interface bit in
3676 * this table, but I have no example of a laptop bios with a
3677 * 24 bit panel to confirm that. Hence we shout loudly if any
3678 * bit other than bit 0 is set (I've not even seen bit 1)
3679 */
3680 if (bios->data[lvdsofs] > 1)
3681 NV_ERROR(dev,
3682 "You have a very unusual laptop display; please report it\n");
3683 /*
3684 * No sign of the "power off for reset" or "reset for panel
3685 * on" bits, but it's safer to assume we should
3686 */
3687 bios->fp.power_off_for_reset = true;
3688 bios->fp.reset_after_pclk_change = true;
3689 /*
3690 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
3691 * over-written, and BITbit1 isn't used
3692 */
3693 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3694 bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
3695 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3696 break;
3697 case 0x40:
3698 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3699 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
3700 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3701 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3702 break;
3703 }
3704
3705 /* set dual_link flag for EDID case */
3706 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
3707 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
3708
3709 *dl = bios->fp.dual_link;
3710
3711 return 0;
3712}
3713
3714static uint8_t *
3715bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3716 uint16_t record, int record_len, int record_nr)
3717{
3718 struct drm_nouveau_private *dev_priv = dev->dev_private;
3719 struct nvbios *bios = &dev_priv->VBIOS;
3720 uint32_t entry;
3721 uint16_t table;
3722 int i, v;
3723
3724 for (i = 0; i < record_nr; i++, record += record_len) {
3725 table = ROM16(bios->data[record]);
3726 if (!table)
3727 continue;
3728 entry = ROM32(bios->data[table]);
3729
3730 v = (entry & 0x000f0000) >> 16;
3731 if (!(v & dcbent->or))
3732 continue;
3733
3734 v = (entry & 0x000000f0) >> 4;
3735 if (v != dcbent->location)
3736 continue;
3737
3738 v = (entry & 0x0000000f);
3739 if (v != dcbent->type)
3740 continue;
3741
3742 return &bios->data[table];
3743 }
3744
3745 return NULL;
3746}
3747
3748void *
3749nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3750 int *length)
3751{
3752 struct drm_nouveau_private *dev_priv = dev->dev_private;
3753 struct nvbios *bios = &dev_priv->VBIOS;
3754 uint8_t *table;
3755
3756 if (!bios->display.dp_table_ptr) {
3757 NV_ERROR(dev, "No pointer to DisplayPort table\n");
3758 return NULL;
3759 }
3760 table = &bios->data[bios->display.dp_table_ptr];
3761
3762 if (table[0] != 0x21) {
3763 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3764 table[0]);
3765 return NULL;
3766 }
3767
3768 *length = table[4];
3769 return bios_output_config_match(dev, dcbent,
3770 bios->display.dp_table_ptr + table[1],
3771 table[2], table[3]);
3772}
3773
3774int
3775nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3776 uint32_t sub, int pxclk)
3777{
3778 /*
3779 * The display script table is located by the BIT 'U' table.
3780 *
3781 * It contains an array of pointers to various tables describing
3782 * a particular output type. The first 32-bits of the output
3783 * tables contains similar information to a DCB entry, and is
3784 * used to decide whether that particular table is suitable for
3785 * the output you want to access.
3786 *
3787 * The "record header length" field here seems to indicate the
3788 * offset of the first configuration entry in the output tables.
3789 * This is 10 on most cards I've seen, but 12 has been witnessed
3790 * on DP cards, and there's another script pointer within the
3791 * header.
3792 *
3793 * offset + 0 ( 8 bits): version
3794 * offset + 1 ( 8 bits): header length
3795 * offset + 2 ( 8 bits): record length
3796 * offset + 3 ( 8 bits): number of records
3797 * offset + 4 ( 8 bits): record header length
3798 * offset + 5 (16 bits): pointer to first output script table
3799 */
3800
3801 struct drm_nouveau_private *dev_priv = dev->dev_private;
3802 struct init_exec iexec = {true, false};
3803 struct nvbios *bios = &dev_priv->VBIOS;
3804 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3805 uint8_t *otable = NULL;
3806 uint16_t script;
3807 int i = 0;
3808
3809 if (!bios->display.script_table_ptr) {
3810 NV_ERROR(dev, "No pointer to output script table\n");
3811 return 1;
3812 }
3813
3814 /*
3815 * Nothing useful has been in any of the pre-2.0 tables I've seen,
3816 * so until they are, we really don't need to care.
3817 */
3818 if (table[0] < 0x20)
3819 return 1;
3820
3821 if (table[0] != 0x20 && table[0] != 0x21) {
3822 NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
3823 table[0]);
3824 return 1;
3825 }
3826
3827 /*
3828 * The output script tables describing a particular output type
3829 * look as follows:
3830 *
3831 * offset + 0 (32 bits): output this table matches (hash of DCB)
3832 * offset + 4 ( 8 bits): unknown
3833 * offset + 5 ( 8 bits): number of configurations
3834 * offset + 6 (16 bits): pointer to some script
3835 * offset + 8 (16 bits): pointer to some script
3836 *
3837 * headerlen == 10
3838 * offset + 10 : configuration 0
3839 *
3840 * headerlen == 12
3841 * offset + 10 : pointer to some script
3842 * offset + 12 : configuration 0
3843 *
3844 * Each config entry is as follows:
3845 *
3846 * offset + 0 (16 bits): unknown, assumed to be a match value
3847 * offset + 2 (16 bits): pointer to script table (clock set?)
3848 * offset + 4 (16 bits): pointer to script table (reset?)
3849 *
3850 * There doesn't appear to be a count value to say how many
3851 * entries exist in each script table, instead, a 0 value in
3852 * the first 16-bit word seems to indicate both the end of the
3853 * list and the default entry. The second 16-bit word in the
3854 * script tables is a pointer to the script to execute.
3855 */
3856
3857 NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
3858 dcbent->type, dcbent->location, dcbent->or);
3859 otable = bios_output_config_match(dev, dcbent, table[1] +
3860 bios->display.script_table_ptr,
3861 table[2], table[3]);
3862 if (!otable) {
3863 NV_ERROR(dev, "Couldn't find matching output script table\n");
3864 return 1;
3865 }
3866
3867 if (pxclk < -2 || pxclk > 0) {
3868 /* Try to find matching script table entry */
3869 for (i = 0; i < otable[5]; i++) {
3870 if (ROM16(otable[table[4] + i*6]) == sub)
3871 break;
3872 }
3873
3874 if (i == otable[5]) {
3875 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
3876 "using first\n",
3877 sub, dcbent->type, dcbent->or);
3878 i = 0;
3879 }
3880 }
3881
3882 bios->display.output = dcbent;
3883
3884 if (pxclk == 0) {
3885 script = ROM16(otable[6]);
3886 if (!script) {
3887 NV_DEBUG(dev, "output script 0 not found\n");
3888 return 1;
3889 }
3890
3891 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3892 parse_init_table(bios, script, &iexec);
3893 } else
3894 if (pxclk == -1) {
3895 script = ROM16(otable[8]);
3896 if (!script) {
3897 NV_DEBUG(dev, "output script 1 not found\n");
3898 return 1;
3899 }
3900
3901 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3902 parse_init_table(bios, script, &iexec);
3903 } else
3904 if (pxclk == -2) {
3905 if (table[4] >= 12)
3906 script = ROM16(otable[10]);
3907 else
3908 script = 0;
3909 if (!script) {
3910 NV_DEBUG(dev, "output script 2 not found\n");
3911 return 1;
3912 }
3913
3914 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3915 parse_init_table(bios, script, &iexec);
3916 } else
3917 if (pxclk > 0) {
3918 script = ROM16(otable[table[4] + i*6 + 2]);
3919 if (script)
3920 script = clkcmptable(bios, script, pxclk);
3921 if (!script) {
3922 NV_ERROR(dev, "clock script 0 not found\n");
3923 return 1;
3924 }
3925
3926 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3927 parse_init_table(bios, script, &iexec);
3928 } else
3929 if (pxclk < 0) {
3930 script = ROM16(otable[table[4] + i*6 + 4]);
3931 if (script)
3932 script = clkcmptable(bios, script, -pxclk);
3933 if (!script) {
3934 NV_DEBUG(dev, "clock script 1 not found\n");
3935 return 1;
3936 }
3937
3938 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3939 parse_init_table(bios, script, &iexec);
3940 }
3941
3942 return 0;
3943}
3944
3945
3946int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
3947{
3948 /*
3949 * the pxclk parameter is in kHz
3950 *
3951 * This runs the TMDS regs setting code found on BIT bios cards
3952 *
3953 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
3954 * ffs(or) == 3, use the second.
3955 */
3956
3957 struct drm_nouveau_private *dev_priv = dev->dev_private;
3958 struct nvbios *bios = &dev_priv->VBIOS;
3959 int cv = bios->pub.chip_version;
3960 uint16_t clktable = 0, scriptptr;
3961 uint32_t sel_clk_binding, sel_clk;
3962
3963 /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
3964 if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
3965 dcbent->location != DCB_LOC_ON_CHIP)
3966 return 0;
3967
3968 switch (ffs(dcbent->or)) {
3969 case 1:
3970 clktable = bios->tmds.output0_script_ptr;
3971 break;
3972 case 2:
3973 case 3:
3974 clktable = bios->tmds.output1_script_ptr;
3975 break;
3976 }
3977
3978 if (!clktable) {
3979 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3980 return -EINVAL;
3981 }
3982
3983 scriptptr = clkcmptable(bios, clktable, pxclk);
3984
3985 if (!scriptptr) {
3986 NV_ERROR(dev, "TMDS output init script not found\n");
3987 return -ENOENT;
3988 }
3989
3990 /* don't let script change pll->head binding */
3991 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3992 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
3993 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3994 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3995
3996 return 0;
3997}
3998
3999int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
4000{
4001 /*
4002 * PLL limits table
4003 *
4004 * Version 0x10: NV30, NV31
4005 * One byte header (version), one record of 24 bytes
4006 * Version 0x11: NV36 - Not implemented
4007 * Seems to have same record style as 0x10, but 3 records rather than 1
4008 * Version 0x20: Found on Geforce 6 cards
4009 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
4010 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
4011 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
4012 * length in general, some (integrated) have an extra configuration byte
4013 * Version 0x30: Found on Geforce 8, separates the register mapping
4014 * from the limits tables.
4015 */
4016
4017 struct drm_nouveau_private *dev_priv = dev->dev_private;
4018 struct nvbios *bios = &dev_priv->VBIOS;
4019 int cv = bios->pub.chip_version, pllindex = 0;
4020 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
4021 uint32_t crystal_strap_mask, crystal_straps;
4022
4023 if (!bios->pll_limit_tbl_ptr) {
4024 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
4025 cv >= 0x40) {
4026 NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
4027 return -EINVAL;
4028 }
4029 } else
4030 pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
4031
4032 crystal_strap_mask = 1 << 6;
4033 /* open coded dev->twoHeads test */
4034 if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
4035 crystal_strap_mask |= 1 << 22;
4036 crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
4037 crystal_strap_mask;
4038
4039 switch (pll_lim_ver) {
4040 /*
4041 * We use version 0 to indicate a pre limit table bios (single stage
4042 * pll) and load the hard coded limits instead.
4043 */
4044 case 0:
4045 break;
4046 case 0x10:
4047 case 0x11:
4048 /*
4049 * Strictly v0x11 has 3 entries, but the last two don't seem
4050 * to get used.
4051 */
4052 headerlen = 1;
4053 recordlen = 0x18;
4054 entries = 1;
4055 pllindex = 0;
4056 break;
4057 case 0x20:
4058 case 0x21:
4059 case 0x30:
4060 case 0x40:
4061 headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
4062 recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
4063 entries = bios->data[bios->pll_limit_tbl_ptr + 3];
4064 break;
4065 default:
4066 NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
4067 "supported\n", pll_lim_ver);
4068 return -ENOSYS;
4069 }
4070
4071 /* initialize all members to zero */
4072 memset(pll_lim, 0, sizeof(struct pll_lims));
4073
4074 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4075 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4076
4077 pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
4078 pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
4079 pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
4080 pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
4081 pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
4082 pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
4083 pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
4084
4085 /* these values taken from nv30/31/36 */
4086 pll_lim->vco1.min_n = 0x1;
4087 if (cv == 0x36)
4088 pll_lim->vco1.min_n = 0x5;
4089 pll_lim->vco1.max_n = 0xff;
4090 pll_lim->vco1.min_m = 0x1;
4091 pll_lim->vco1.max_m = 0xd;
4092 pll_lim->vco2.min_n = 0x4;
4093 /*
4094 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
4095 * table version (apart from nv35)), N2 is compared to
4096 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
4097 * save a comparison
4098 */
4099 pll_lim->vco2.max_n = 0x28;
4100 if (cv == 0x30 || cv == 0x35)
4101 /* only 5 bits available for N2 on nv30/35 */
4102 pll_lim->vco2.max_n = 0x1f;
4103 pll_lim->vco2.min_m = 0x1;
4104 pll_lim->vco2.max_m = 0x4;
4105 pll_lim->max_log2p = 0x7;
4106 pll_lim->max_usable_log2p = 0x6;
4107 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4108 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4109 uint32_t reg = 0; /* default match */
4110 uint8_t *pll_rec;
4111 int i;
4112
4113 /*
4114 * First entry is default match, if nothing better. warn if
4115 * reg field nonzero
4116 */
4117 if (ROM32(bios->data[plloffs]))
4118 NV_WARN(dev, "Default PLL limit entry has non-zero "
4119 "register field\n");
4120
4121 if (limit_match > MAX_PLL_TYPES)
4122 /* we've been passed a reg as the match */
4123 reg = limit_match;
4124 else /* limit match is a pll type */
4125 for (i = 1; i < entries && !reg; i++) {
4126 uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
4127
4128 if (limit_match == NVPLL &&
4129 (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
4130 reg = cmpreg;
4131 if (limit_match == MPLL &&
4132 (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
4133 reg = cmpreg;
4134 if (limit_match == VPLL1 &&
4135 (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
4136 reg = cmpreg;
4137 if (limit_match == VPLL2 &&
4138 (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
4139 reg = cmpreg;
4140 }
4141
4142 for (i = 1; i < entries; i++)
4143 if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
4144 pllindex = i;
4145 break;
4146 }
4147
4148 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4149
4150 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4151 pllindex ? reg : 0);
4152
4153 /*
4154 * Frequencies are stored in tables in MHz, kHz are more
4155 * useful, so we convert.
4156 */
4157
4158 /* What output frequencies can each VCO generate? */
4159 pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
4160 pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
4161 pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
4162 pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
4163
4164 /* What input frequencies they accept (past the m-divider)? */
4165 pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
4166 pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
4167 pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
4168 pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
4169
4170 /* What values are accepted as multiplier and divider? */
4171 pll_lim->vco1.min_n = pll_rec[20];
4172 pll_lim->vco1.max_n = pll_rec[21];
4173 pll_lim->vco1.min_m = pll_rec[22];
4174 pll_lim->vco1.max_m = pll_rec[23];
4175 pll_lim->vco2.min_n = pll_rec[24];
4176 pll_lim->vco2.max_n = pll_rec[25];
4177 pll_lim->vco2.min_m = pll_rec[26];
4178 pll_lim->vco2.max_m = pll_rec[27];
4179
4180 pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
4181 if (pll_lim->max_log2p > 0x7)
4182 /* pll decoding in nv_hw.c assumes never > 7 */
4183 NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
4184 pll_lim->max_log2p);
4185 if (cv < 0x60)
4186 pll_lim->max_usable_log2p = 0x6;
4187 pll_lim->log2p_bias = pll_rec[30];
4188
4189 if (recordlen > 0x22)
4190 pll_lim->refclk = ROM32(pll_rec[31]);
4191
4192 if (recordlen > 0x23 && pll_rec[35])
4193 NV_WARN(dev,
4194 "Bits set in PLL configuration byte (%x)\n",
4195 pll_rec[35]);
4196
4197 /* C51 special not seen elsewhere */
4198 if (cv == 0x51 && !pll_lim->refclk) {
4199 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4200
4201 if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
4202 ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
4203 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4204 pll_lim->refclk = 200000;
4205 else
4206 pll_lim->refclk = 25000;
4207 }
4208 }
4209 } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
4210 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4211 uint8_t *record = NULL;
4212 int i;
4213
4214 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4215 limit_match);
4216
4217 for (i = 0; i < entries; i++, entry += recordlen) {
4218 if (ROM32(entry[3]) == limit_match) {
4219 record = &bios->data[ROM16(entry[1])];
4220 break;
4221 }
4222 }
4223
4224 if (!record) {
4225 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4226 "limits table", limit_match);
4227 return -ENOENT;
4228 }
4229
4230 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4231 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4232 pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
4233 pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
4234 pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
4235 pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
4236 pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
4237 pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
4238 pll_lim->vco1.min_n = record[16];
4239 pll_lim->vco1.max_n = record[17];
4240 pll_lim->vco1.min_m = record[18];
4241 pll_lim->vco1.max_m = record[19];
4242 pll_lim->vco2.min_n = record[20];
4243 pll_lim->vco2.max_n = record[21];
4244 pll_lim->vco2.min_m = record[22];
4245 pll_lim->vco2.max_m = record[23];
4246 pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
4247 pll_lim->log2p_bias = record[27];
4248 pll_lim->refclk = ROM32(record[28]);
4249 } else if (pll_lim_ver) { /* ver 0x40 */
4250 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4251 uint8_t *record = NULL;
4252 int i;
4253
4254 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4255 limit_match);
4256
4257 for (i = 0; i < entries; i++, entry += recordlen) {
4258 if (ROM32(entry[3]) == limit_match) {
4259 record = &bios->data[ROM16(entry[1])];
4260 break;
4261 }
4262 }
4263
4264 if (!record) {
4265 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4266 "limits table", limit_match);
4267 return -ENOENT;
4268 }
4269
4270 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4271 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4272 pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
4273 pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
4274 pll_lim->vco1.min_m = record[8];
4275 pll_lim->vco1.max_m = record[9];
4276 pll_lim->vco1.min_n = record[10];
4277 pll_lim->vco1.max_n = record[11];
4278 pll_lim->min_p = record[12];
4279 pll_lim->max_p = record[13];
4280 /* where did this go to?? */
4281 if (limit_match == 0x00614100 || limit_match == 0x00614900)
4282 pll_lim->refclk = 27000;
4283 else
4284 pll_lim->refclk = 100000;
4285 }
4286
4287 /*
4288 * By now any valid limit table ought to have set a max frequency for
4289 * vco1, so if it's zero it's either a pre limit table bios, or one
4290 * with an empty limit table (seen on nv18)
4291 */
4292 if (!pll_lim->vco1.maxfreq) {
4293 pll_lim->vco1.minfreq = bios->fminvco;
4294 pll_lim->vco1.maxfreq = bios->fmaxvco;
4295 pll_lim->vco1.min_inputfreq = 0;
4296 pll_lim->vco1.max_inputfreq = INT_MAX;
4297 pll_lim->vco1.min_n = 0x1;
4298 pll_lim->vco1.max_n = 0xff;
4299 pll_lim->vco1.min_m = 0x1;
4300 if (crystal_straps == 0) {
4301 /* nv05 does this, nv11 doesn't, nv10 unknown */
4302 if (cv < 0x11)
4303 pll_lim->vco1.min_m = 0x7;
4304 pll_lim->vco1.max_m = 0xd;
4305 } else {
4306 if (cv < 0x11)
4307 pll_lim->vco1.min_m = 0x8;
4308 pll_lim->vco1.max_m = 0xe;
4309 }
4310 if (cv < 0x17 || cv == 0x1a || cv == 0x20)
4311 pll_lim->max_log2p = 4;
4312 else
4313 pll_lim->max_log2p = 5;
4314 pll_lim->max_usable_log2p = pll_lim->max_log2p;
4315 }
4316
4317 if (!pll_lim->refclk)
4318 switch (crystal_straps) {
4319 case 0:
4320 pll_lim->refclk = 13500;
4321 break;
4322 case (1 << 6):
4323 pll_lim->refclk = 14318;
4324 break;
4325 case (1 << 22):
4326 pll_lim->refclk = 27000;
4327 break;
4328 case (1 << 22 | 1 << 6):
4329 pll_lim->refclk = 25000;
4330 break;
4331 }
4332
4333#if 0 /* for easy debugging */
4334 ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4335 ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4336 ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4337 ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4338
4339 ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4340 ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4341 ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4342 ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4343
4344 ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4345 ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4346 ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4347 ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4348 ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4349 ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4350 ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4351 ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4352
4353 ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
4354 ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4355
4356 ErrorF("pll.refclk: %d\n", pll_lim->refclk);
4357#endif
4358
4359 return 0;
4360}
4361
4362static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
4363{
4364 /*
4365 * offset + 0 (8 bits): Micro version
4366 * offset + 1 (8 bits): Minor version
4367 * offset + 2 (8 bits): Chip version
4368 * offset + 3 (8 bits): Major version
4369 */
4370
4371 bios->major_version = bios->data[offset + 3];
4372 bios->pub.chip_version = bios->data[offset + 2];
4373 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4374 bios->data[offset + 3], bios->data[offset + 2],
4375 bios->data[offset + 1], bios->data[offset]);
4376}
4377
4378static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
4379{
4380 /*
4381 * Parses the init table segment for pointers used in script execution.
4382 *
4383 * offset + 0 (16 bits): init script tables pointer
4384 * offset + 2 (16 bits): macro index table pointer
4385 * offset + 4 (16 bits): macro table pointer
4386 * offset + 6 (16 bits): condition table pointer
4387 * offset + 8 (16 bits): io condition table pointer
4388 * offset + 10 (16 bits): io flag condition table pointer
4389 * offset + 12 (16 bits): init function table pointer
4390 */
4391
4392 bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
4393 bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
4394 bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
4395 bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
4396 bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
4397 bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
4398 bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
4399}
4400
4401static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4402{
4403 /*
4404 * Parses the load detect values for g80 cards.
4405 *
4406 * offset + 0 (16 bits): loadval table pointer
4407 */
4408
4409 uint16_t load_table_ptr;
4410 uint8_t version, headerlen, entrylen, num_entries;
4411
4412 if (bitentry->length != 3) {
4413 NV_ERROR(dev, "Do not understand BIT A table\n");
4414 return -EINVAL;
4415 }
4416
4417 load_table_ptr = ROM16(bios->data[bitentry->offset]);
4418
4419 if (load_table_ptr == 0x0) {
4420 NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
4421 return -EINVAL;
4422 }
4423
4424 version = bios->data[load_table_ptr];
4425
4426 if (version != 0x10) {
4427 NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
4428 version >> 4, version & 0xF);
4429 return -ENOSYS;
4430 }
4431
4432 headerlen = bios->data[load_table_ptr + 1];
4433 entrylen = bios->data[load_table_ptr + 2];
4434 num_entries = bios->data[load_table_ptr + 3];
4435
4436 if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
4437 NV_ERROR(dev, "Do not understand BIT loadval table\n");
4438 return -EINVAL;
4439 }
4440
4441 /* First entry is normal dac, 2nd tv-out perhaps? */
4442 bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4443
4444 return 0;
4445}
4446
4447static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4448{
4449 /*
4450 * offset + 8 (16 bits): PLL limits table pointer
4451 *
4452 * There's more in here, but that's unknown.
4453 */
4454
4455 if (bitentry->length < 10) {
4456 NV_ERROR(dev, "Do not understand BIT C table\n");
4457 return -EINVAL;
4458 }
4459
4460 bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
4461
4462 return 0;
4463}
4464
4465static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4466{
4467 /*
4468 * Parses the flat panel table segment that the bit entry points to.
4469 * Starting at bitentry->offset:
4470 *
4471 * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
4472 * records beginning with a freq.
4473 * offset + 2 (16 bits): mode table pointer
4474 */
4475
4476 if (bitentry->length != 4) {
4477 NV_ERROR(dev, "Do not understand BIT display table\n");
4478 return -EINVAL;
4479 }
4480
4481 bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
4482
4483 return 0;
4484}
4485
4486static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4487{
4488 /*
4489 * Parses the init table segment that the bit entry points to.
4490 *
4491 * See parse_script_table_pointers for layout
4492 */
4493
4494 if (bitentry->length < 14) {
4495 NV_ERROR(dev, "Do not understand init table\n");
4496 return -EINVAL;
4497 }
4498
4499 parse_script_table_pointers(bios, bitentry->offset);
4500
4501 if (bitentry->length >= 16)
4502 bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
4503 if (bitentry->length >= 18)
4504 bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
4505
4506 return 0;
4507}
4508
4509static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4510{
4511 /*
4512 * BIT 'i' (info?) table
4513 *
4514 * offset + 0 (32 bits): BIOS version dword (as in B table)
4515 * offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
4516 * offset + 13 (16 bits): pointer to table containing DAC load
4517 * detection comparison values
4518 *
4519 * There's other things in the table, purpose unknown
4520 */
4521
4522 uint16_t daccmpoffset;
4523 uint8_t dacver, dacheaderlen;
4524
4525 if (bitentry->length < 6) {
4526 NV_ERROR(dev, "BIT i table too short for needed information\n");
4527 return -EINVAL;
4528 }
4529
4530 parse_bios_version(dev, bios, bitentry->offset);
4531
4532 /*
4533 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
4534 * Quadro identity crisis), other bits possibly as for BMP feature byte
4535 */
4536 bios->feature_byte = bios->data[bitentry->offset + 5];
4537 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
4538
4539 if (bitentry->length < 15) {
4540 NV_WARN(dev, "BIT i table not long enough for DAC load "
4541 "detection comparison table\n");
4542 return -EINVAL;
4543 }
4544
4545 daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
4546
4547 /* doesn't exist on g80 */
4548 if (!daccmpoffset)
4549 return 0;
4550
4551 /*
4552 * The first value in the table, following the header, is the
4553 * comparison value, the second entry is a comparison value for
4554 * TV load detection.
4555 */
4556
4557 dacver = bios->data[daccmpoffset];
4558 dacheaderlen = bios->data[daccmpoffset + 1];
4559
4560 if (dacver != 0x00 && dacver != 0x10) {
4561 NV_WARN(dev, "DAC load detection comparison table version "
4562 "%d.%d not known\n", dacver >> 4, dacver & 0xf);
4563 return -ENOSYS;
4564 }
4565
4566 bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4567 bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4568
4569 return 0;
4570}
4571
4572static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4573{
4574 /*
4575 * Parses the LVDS table segment that the bit entry points to.
4576 * Starting at bitentry->offset:
4577 *
4578 * offset + 0 (16 bits): LVDS strap xlate table pointer
4579 */
4580
4581 if (bitentry->length != 2) {
4582 NV_ERROR(dev, "Do not understand BIT LVDS table\n");
4583 return -EINVAL;
4584 }
4585
4586 /*
4587 * No idea if it's still called the LVDS manufacturer table, but
4588 * the concept's close enough.
4589 */
4590 bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
4591
4592 return 0;
4593}
4594
4595static int
4596parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4597 struct bit_entry *bitentry)
4598{
4599 /*
4600 * offset + 2 (8 bits): number of options in an
4601 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
4602 * offset + 3 (16 bits): pointer to strap xlate table for RAM
4603 * restrict option selection
4604 *
4605 * There's a bunch of bits in this table other than the RAM restrict
4606 * stuff that we don't use - their use currently unknown
4607 */
4608
4609 uint16_t rr_strap_xlat;
4610 uint8_t rr_group_count;
4611 int i;
4612
4613 /*
4614 * Older bios versions don't have a sufficiently long table for
4615 * what we want
4616 */
4617 if (bitentry->length < 0x5)
4618 return 0;
4619
4620 if (bitentry->id[1] < 2) {
4621 rr_group_count = bios->data[bitentry->offset + 2];
4622 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
4623 } else {
4624 rr_group_count = bios->data[bitentry->offset + 0];
4625 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
4626 }
4627
4628 /* adjust length of INIT_87 */
4629 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
4630 itbl_entry[i].length += rr_group_count * 4;
4631
4632 /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
4633 for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
4634 itbl_entry[i].length_multiplier = rr_group_count * 4;
4635
4636 init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
4637 bios->ram_restrict_tbl_ptr = rr_strap_xlat;
4638
4639 return 0;
4640}
4641
4642static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4643{
4644 /*
4645 * Parses the pointer to the TMDS table
4646 *
4647 * Starting at bitentry->offset:
4648 *
4649 * offset + 0 (16 bits): TMDS table pointer
4650 *
4651 * The TMDS table is typically found just before the DCB table, with a
4652 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
4653 * length?)
4654 *
4655 * At offset +7 is a pointer to a script, which I don't know how to
4656 * run yet.
4657 * At offset +9 is a pointer to another script, likewise
4658 * Offset +11 has a pointer to a table where the first word is a pxclk
4659 * frequency and the second word a pointer to a script, which should be
4660 * run if the comparison pxclk frequency is less than the pxclk desired.
4661 * This repeats for decreasing comparison frequencies
4662 * Offset +13 has a pointer to a similar table
4663 * The selection of table (and possibly +7/+9 script) is dictated by
4664 * "or" from the DCB.
4665 */
4666
4667 uint16_t tmdstableptr, script1, script2;
4668
4669 if (bitentry->length != 2) {
4670 NV_ERROR(dev, "Do not understand BIT TMDS table\n");
4671 return -EINVAL;
4672 }
4673
4674 tmdstableptr = ROM16(bios->data[bitentry->offset]);
4675
4676 if (tmdstableptr == 0x0) {
4677 NV_ERROR(dev, "Pointer to TMDS table invalid\n");
4678 return -EINVAL;
4679 }
4680
4681 /* nv50+ has v2.0, but we don't parse it atm */
4682 if (bios->data[tmdstableptr] != 0x11) {
4683 NV_WARN(dev,
4684 "TMDS table revision %d.%d not currently supported\n",
4685 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
4686 return -ENOSYS;
4687 }
4688
4689 /*
4690 * These two scripts are odd: they don't seem to get run even when
4691 * they are not stubbed.
4692 */
4693 script1 = ROM16(bios->data[tmdstableptr + 7]);
4694 script2 = ROM16(bios->data[tmdstableptr + 9]);
4695 if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
4696 NV_WARN(dev, "TMDS table script pointers not stubbed\n");
4697
4698 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
4699 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
4700
4701 return 0;
4702}
4703
4704static int
4705parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4706 struct bit_entry *bitentry)
4707{
4708 /*
4709 * Parses the pointer to the G80 output script tables
4710 *
4711 * Starting at bitentry->offset:
4712 *
4713 * offset + 0 (16 bits): output script table pointer
4714 */
4715
4716 uint16_t outputscripttableptr;
4717
4718 if (bitentry->length != 3) {
4719 NV_ERROR(dev, "Do not understand BIT U table\n");
4720 return -EINVAL;
4721 }
4722
4723 outputscripttableptr = ROM16(bios->data[bitentry->offset]);
4724 bios->display.script_table_ptr = outputscripttableptr;
4725 return 0;
4726}
4727
4728static int
4729parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4730 struct bit_entry *bitentry)
4731{
4732 bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
4733 return 0;
4734}
4735
4736struct bit_table {
4737 const char id;
4738 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
4739};
4740
4741#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
4742
4743static int
4744parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
4745 struct bit_table *table)
4746{
4747 struct drm_device *dev = bios->dev;
4748 uint8_t maxentries = bios->data[bitoffset + 4];
4749 int i, offset;
4750 struct bit_entry bitentry;
4751
4752 for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
4753 bitentry.id[0] = bios->data[offset];
4754
4755 if (bitentry.id[0] != table->id)
4756 continue;
4757
4758 bitentry.id[1] = bios->data[offset + 1];
4759 bitentry.length = ROM16(bios->data[offset + 2]);
4760 bitentry.offset = ROM16(bios->data[offset + 4]);
4761
4762 return table->parse_fn(dev, bios, &bitentry);
4763 }
4764
4765 NV_INFO(dev, "BIT table '%c' not found\n", table->id);
4766 return -ENOSYS;
4767}
4768
4769static int
4770parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
4771{
4772 int ret;
4773
4774 /*
4775 * The only restriction on parsing order currently is having 'i' first
4776 * for use of bios->*_version or bios->feature_byte while parsing;
4777 * functions shouldn't be actually *doing* anything apart from pulling
4778 * data from the image into the bios struct, thus no interdependencies
4779 */
4780 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
4781 if (ret) /* info? */
4782 return ret;
4783 if (bios->major_version >= 0x60) /* g80+ */
4784 parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
4785 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
4786 if (ret)
4787 return ret;
4788 parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
4789 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
4790 if (ret)
4791 return ret;
4792 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
4793 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
4794 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
4795 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
4796 parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
4797
4798 return 0;
4799}
4800
4801static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
4802{
4803 /*
4804 * Parses the BMP structure for useful things, but does not act on them
4805 *
4806 * offset + 5: BMP major version
4807 * offset + 6: BMP minor version
4808 * offset + 9: BMP feature byte
4809 * offset + 10: BCD encoded BIOS version
4810 *
4811 * offset + 18: init script table pointer (for bios versions < 5.10h)
4812 * offset + 20: extra init script table pointer (for bios
4813 * versions < 5.10h)
4814 *
4815 * offset + 24: memory init table pointer (used on early bios versions)
4816 * offset + 26: SDR memory sequencing setup data table
4817 * offset + 28: DDR memory sequencing setup data table
4818 *
4819 * offset + 54: index of I2C CRTC pair to use for CRT output
4820 * offset + 55: index of I2C CRTC pair to use for TV output
4821 * offset + 56: index of I2C CRTC pair to use for flat panel output
4822 * offset + 58: write CRTC index for I2C pair 0
4823 * offset + 59: read CRTC index for I2C pair 0
4824 * offset + 60: write CRTC index for I2C pair 1
4825 * offset + 61: read CRTC index for I2C pair 1
4826 *
4827 * offset + 67: maximum internal PLL frequency (single stage PLL)
4828 * offset + 71: minimum internal PLL frequency (single stage PLL)
4829 *
4830 * offset + 75: script table pointers, as described in
4831 * parse_script_table_pointers
4832 *
4833 * offset + 89: TMDS single link output A table pointer
4834 * offset + 91: TMDS single link output B table pointer
4835 * offset + 95: LVDS single link output A table pointer
4836 * offset + 105: flat panel timings table pointer
4837 * offset + 107: flat panel strapping translation table pointer
4838 * offset + 117: LVDS manufacturer panel config table pointer
4839 * offset + 119: LVDS manufacturer strapping translation table pointer
4840 *
4841 * offset + 142: PLL limits table pointer
4842 *
4843 * offset + 156: minimum pixel clock for LVDS dual link
4844 */
4845
4846 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
4847 uint16_t bmplength;
4848 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4849
4850 /* load needed defaults in case we can't parse this info */
4851 bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4852 bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4853 bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4854 bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4855 bios->pub.digital_min_front_porch = 0x4b;
4856 bios->fmaxvco = 256000;
4857 bios->fminvco = 128000;
4858 bios->fp.duallink_transition_clk = 90000;
4859
4860 bmp_version_major = bmp[5];
4861 bmp_version_minor = bmp[6];
4862
4863 NV_TRACE(dev, "BMP version %d.%d\n",
4864 bmp_version_major, bmp_version_minor);
4865
4866 /*
4867 * Make sure that 0x36 is blank and can't be mistaken for a DCB
4868 * pointer on early versions
4869 */
4870 if (bmp_version_major < 5)
4871 *(uint16_t *)&bios->data[0x36] = 0;
4872
4873 /*
4874 * Seems that the minor version was 1 for all major versions prior
4875 * to 5. Version 6 could theoretically exist, but I suspect BIT
4876 * happened instead.
4877 */
4878 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
4879 NV_ERROR(dev, "You have an unsupported BMP version. "
4880 "Please send in your bios\n");
4881 return -ENOSYS;
4882 }
4883
4884 if (bmp_version_major == 0)
4885 /* nothing that's currently useful in this version */
4886 return 0;
4887 else if (bmp_version_major == 1)
4888 bmplength = 44; /* exact for 1.01 */
4889 else if (bmp_version_major == 2)
4890 bmplength = 48; /* exact for 2.01 */
4891 else if (bmp_version_major == 3)
4892 bmplength = 54;
4893 /* guessed - mem init tables added in this version */
4894 else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
4895 /* don't know if 5.0 exists... */
4896 bmplength = 62;
4897 /* guessed - BMP I2C indices added in version 4*/
4898 else if (bmp_version_minor < 0x6)
4899 bmplength = 67; /* exact for 5.01 */
4900 else if (bmp_version_minor < 0x10)
4901 bmplength = 75; /* exact for 5.06 */
4902 else if (bmp_version_minor == 0x10)
4903 bmplength = 89; /* exact for 5.10h */
4904 else if (bmp_version_minor < 0x14)
4905 bmplength = 118; /* exact for 5.11h */
4906 else if (bmp_version_minor < 0x24)
4907 /*
4908 * Not sure of version where pll limits came in;
4909 * certainly exist by 0x24 though.
4910 */
4911 /* length not exact: this is long enough to get lvds members */
4912 bmplength = 123;
4913 else if (bmp_version_minor < 0x27)
4914 /*
4915 * Length not exact: this is long enough to get pll limit
4916 * member
4917 */
4918 bmplength = 144;
4919 else
4920 /*
4921 * Length not exact: this is long enough to get dual link
4922 * transition clock.
4923 */
4924 bmplength = 158;
4925
4926 /* checksum */
4927 if (nv_cksum(bmp, 8)) {
4928 NV_ERROR(dev, "Bad BMP checksum\n");
4929 return -EINVAL;
4930 }
4931
4932 /*
4933 * Bit 4 seems to indicate either a mobile bios or a quadro card --
4934 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
4935 * (not nv10gl), bit 5 that the flat panel tables are present, and
4936 * bit 6 a tv bios.
4937 */
4938 bios->feature_byte = bmp[9];
4939
4940 parse_bios_version(dev, bios, offset + 10);
4941
4942 if (bmp_version_major < 5 || bmp_version_minor < 0x10)
4943 bios->old_style_init = true;
4944 legacy_scripts_offset = 18;
4945 if (bmp_version_major < 2)
4946 legacy_scripts_offset -= 4;
4947 bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
4948 bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
4949
4950 if (bmp_version_major > 2) { /* appears in BMP 3 */
4951 bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
4952 bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
4953 bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
4954 }
4955
4956 legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
4957 if (bmplength > 61)
4958 legacy_i2c_offset = offset + 54;
4959 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4960 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4961 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4962 bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4963 bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4964 bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4965 bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4966
4967 if (bmplength > 74) {
4968 bios->fmaxvco = ROM32(bmp[67]);
4969 bios->fminvco = ROM32(bmp[71]);
4970 }
4971 if (bmplength > 88)
4972 parse_script_table_pointers(bios, offset + 75);
4973 if (bmplength > 94) {
4974 bios->tmds.output0_script_ptr = ROM16(bmp[89]);
4975 bios->tmds.output1_script_ptr = ROM16(bmp[91]);
4976 /*
4977 * Never observed in use with lvds scripts, but is reused for
4978 * 18/24 bit panel interface default for EDID equipped panels
4979 * (if_is_24bit not set directly to avoid any oscillation).
4980 */
4981 bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
4982 }
4983 if (bmplength > 108) {
4984 bios->fp.fptablepointer = ROM16(bmp[105]);
4985 bios->fp.fpxlatetableptr = ROM16(bmp[107]);
4986 bios->fp.xlatwidth = 1;
4987 }
4988 if (bmplength > 120) {
4989 bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
4990 bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
4991 }
4992 if (bmplength > 143)
4993 bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
4994
4995 if (bmplength > 157)
4996 bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
4997
4998 return 0;
4999}
5000
5001static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5002{
5003 int i, j;
5004
5005 for (i = 0; i <= (n - len); i++) {
5006 for (j = 0; j < len; j++)
5007 if (data[i + j] != str[j])
5008 break;
5009 if (j == len)
5010 return i;
5011 }
5012
5013 return 0;
5014}
5015
5016static int
5017read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
5018{
5019 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
5020 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
5021 int recordoffset = 0, rdofs = 1, wrofs = 0;
5022 uint8_t port_type = 0;
5023
5024 if (!i2ctable)
5025 return -EINVAL;
5026
5027 if (dcb_version >= 0x30) {
5028 if (i2ctable[0] != dcb_version) /* necessary? */
5029 NV_WARN(dev,
5030 "DCB I2C table version mismatch (%02X vs %02X)\n",
5031 i2ctable[0], dcb_version);
5032 dcb_i2c_ver = i2ctable[0];
5033 headerlen = i2ctable[1];
5034 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
5035 i2c_entries = i2ctable[2];
5036 else
5037 NV_WARN(dev,
5038 "DCB I2C table has more entries than indexable "
5039 "(%d entries, max index 15)\n", i2ctable[2]);
5040 entry_len = i2ctable[3];
5041 /* [4] is i2c_default_indices, read in parse_dcb_table() */
5042 }
5043 /*
5044 * It's your own fault if you call this function on a DCB 1.1 BIOS --
5045 * the test below is for DCB 1.2
5046 */
5047 if (dcb_version < 0x14) {
5048 recordoffset = 2;
5049 rdofs = 0;
5050 wrofs = 1;
5051 }
5052
5053 if (index == 0xf)
5054 return 0;
5055 if (index > i2c_entries) {
5056 NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
5057 index, i2ctable[2]);
5058 return -ENOENT;
5059 }
5060 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
5061 NV_ERROR(dev, "DCB I2C entry invalid\n");
5062 return -EINVAL;
5063 }
5064
5065 if (dcb_i2c_ver >= 0x30) {
5066 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
5067
5068 /*
5069 * Fixup for chips using same address offset for read and
5070 * write.
5071 */
5072 if (port_type == 4) /* seen on C51 */
5073 rdofs = wrofs = 1;
5074 if (port_type >= 5) /* G80+ */
5075 rdofs = wrofs = 0;
5076 }
5077
5078 if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
5079 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
5080
5081 i2c->port_type = port_type;
5082 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
5083 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
5084
5085 return 0;
5086}
5087
5088static struct dcb_gpio_entry *
5089new_gpio_entry(struct nvbios *bios)
5090{
5091 struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
5092
5093 return &gpio->entry[gpio->entries++];
5094}
5095
5096struct dcb_gpio_entry *
5097nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5098{
5099 struct drm_nouveau_private *dev_priv = dev->dev_private;
5100 struct nvbios *bios = &dev_priv->VBIOS;
5101 int i;
5102
5103 for (i = 0; i < bios->bdcb.gpio.entries; i++) {
5104 if (bios->bdcb.gpio.entry[i].tag != tag)
5105 continue;
5106
5107 return &bios->bdcb.gpio.entry[i];
5108 }
5109
5110 return NULL;
5111}
5112
5113static void
5114parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5115{
5116 struct dcb_gpio_entry *gpio;
5117 uint16_t ent = ROM16(bios->data[offset]);
5118 uint8_t line = ent & 0x1f,
5119 tag = ent >> 5 & 0x3f,
5120 flags = ent >> 11 & 0x1f;
5121
5122 if (tag == 0x3f)
5123 return;
5124
5125 gpio = new_gpio_entry(bios);
5126
5127 gpio->tag = tag;
5128 gpio->line = line;
5129 gpio->invert = flags != 4;
5130}
5131
5132static void
5133parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5134{
5135 struct dcb_gpio_entry *gpio;
5136 uint32_t ent = ROM32(bios->data[offset]);
5137 uint8_t line = ent & 0x1f,
5138 tag = ent >> 8 & 0xff;
5139
5140 if (tag == 0xff)
5141 return;
5142
5143 gpio = new_gpio_entry(bios);
5144
5145 /* Currently unused, we may need more fields parsed at some
5146 * point. */
5147 gpio->tag = tag;
5148 gpio->line = line;
5149}
5150
5151static void
5152parse_dcb_gpio_table(struct nvbios *bios)
5153{
5154 struct drm_device *dev = bios->dev;
5155 uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
5156 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5157 int header_len = gpio_table[1],
5158 entries = gpio_table[2],
5159 entry_len = gpio_table[3];
5160 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5161 int i;
5162
5163 if (bios->bdcb.version >= 0x40) {
5164 if (gpio_table_ptr && entry_len != 4) {
5165 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5166 return;
5167 }
5168
5169 parse_entry = parse_dcb40_gpio_entry;
5170
5171 } else if (bios->bdcb.version >= 0x30) {
5172 if (gpio_table_ptr && entry_len != 2) {
5173 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5174 return;
5175 }
5176
5177 parse_entry = parse_dcb30_gpio_entry;
5178
5179 } else if (bios->bdcb.version >= 0x22) {
5180 /*
5181 * DCBs older than v3.0 don't really have a GPIO
5182 * table, instead they keep some GPIO info at fixed
5183 * locations.
5184 */
5185 uint16_t dcbptr = ROM16(bios->data[0x36]);
5186 uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
5187
5188 if (tvdac_gpio[0] & 1) {
5189 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5190
5191 gpio->tag = DCB_GPIO_TVDAC0;
5192 gpio->line = tvdac_gpio[1] >> 4;
5193 gpio->invert = tvdac_gpio[0] & 2;
5194 }
5195 }
5196
5197 if (!gpio_table_ptr)
5198 return;
5199
5200 if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
5201 NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
5202 entries = DCB_MAX_NUM_GPIO_ENTRIES;
5203 }
5204
5205 for (i = 0; i < entries; i++)
5206 parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
5207}
5208
5209struct dcb_connector_table_entry *
5210nouveau_bios_connector_entry(struct drm_device *dev, int index)
5211{
5212 struct drm_nouveau_private *dev_priv = dev->dev_private;
5213 struct nvbios *bios = &dev_priv->VBIOS;
5214 struct dcb_connector_table_entry *cte;
5215
5216 if (index >= bios->bdcb.connector.entries)
5217 return NULL;
5218
5219 cte = &bios->bdcb.connector.entry[index];
5220 if (cte->type == 0xff)
5221 return NULL;
5222
5223 return cte;
5224}
5225
5226static void
5227parse_dcb_connector_table(struct nvbios *bios)
5228{
5229 struct drm_device *dev = bios->dev;
5230 struct dcb_connector_table *ct = &bios->bdcb.connector;
5231 struct dcb_connector_table_entry *cte;
5232 uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
5233 uint8_t *entry;
5234 int i;
5235
5236 if (!bios->bdcb.connector_table_ptr) {
5237 NV_DEBUG(dev, "No DCB connector table present\n");
5238 return;
5239 }
5240
5241 NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
5242 conntab[0], conntab[1], conntab[2], conntab[3]);
5243 if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
5244 (conntab[3] != 2 && conntab[3] != 4)) {
5245 NV_ERROR(dev, " Unknown! Please report.\n");
5246 return;
5247 }
5248
5249 ct->entries = conntab[2];
5250
5251 entry = conntab + conntab[1];
5252 cte = &ct->entry[0];
5253 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
5254 if (conntab[3] == 2)
5255 cte->entry = ROM16(entry[0]);
5256 else
5257 cte->entry = ROM32(entry[0]);
5258 cte->type = (cte->entry & 0x000000ff) >> 0;
5259 cte->index = (cte->entry & 0x00000f00) >> 8;
5260 switch (cte->entry & 0x00033000) {
5261 case 0x00001000:
5262 cte->gpio_tag = 0x07;
5263 break;
5264 case 0x00002000:
5265 cte->gpio_tag = 0x08;
5266 break;
5267 case 0x00010000:
5268 cte->gpio_tag = 0x51;
5269 break;
5270 case 0x00020000:
5271 cte->gpio_tag = 0x52;
5272 break;
5273 default:
5274 cte->gpio_tag = 0xff;
5275 break;
5276 }
5277
5278 if (cte->type == 0xff)
5279 continue;
5280
5281 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5282 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5283 }
5284}
5285
5286static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
5287{
5288 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5289
5290 memset(entry, 0, sizeof(struct dcb_entry));
5291 entry->index = dcb->entries++;
5292
5293 return entry;
5294}
5295
5296static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
5297{
5298 struct dcb_entry *entry = new_dcb_entry(dcb);
5299
5300 entry->type = 0;
5301 entry->i2c_index = i2c;
5302 entry->heads = heads;
5303 entry->location = DCB_LOC_ON_CHIP;
5304 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5305}
5306
5307static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
5308{
5309 struct dcb_entry *entry = new_dcb_entry(dcb);
5310
5311 entry->type = 2;
5312 entry->i2c_index = LEGACY_I2C_PANEL;
5313 entry->heads = twoHeads ? 3 : 1;
5314 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5315 entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
5316 entry->duallink_possible = false; /* SiI164 and co. are single link */
5317
5318#if 0
5319 /*
5320 * For dvi-a either crtc probably works, but my card appears to only
5321 * support dvi-d. "nvidia" still attempts to program it for dvi-a,
5322 * doing the full fp output setup (program 0x6808.. fp dimension regs,
5323 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
5324 * the monitor picks up the mode res ok and lights up, but no pixel
5325 * data appears, so the board manufacturer probably connected up the
5326 * sync lines, but missed the video traces / components
5327 *
5328 * with this introduction, dvi-a left as an exercise for the reader.
5329 */
5330 fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
5331#endif
5332}
5333
5334static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
5335{
5336 struct dcb_entry *entry = new_dcb_entry(dcb);
5337
5338 entry->type = 1;
5339 entry->i2c_index = LEGACY_I2C_TV;
5340 entry->heads = twoHeads ? 3 : 1;
5341 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5342}
5343
5344static bool
5345parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5346 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5347{
5348 entry->type = conn & 0xf;
5349 entry->i2c_index = (conn >> 4) & 0xf;
5350 entry->heads = (conn >> 8) & 0xf;
5351 if (bdcb->version >= 0x40)
5352 entry->connector = (conn >> 12) & 0xf;
5353 entry->bus = (conn >> 16) & 0xf;
5354 entry->location = (conn >> 20) & 0x3;
5355 entry->or = (conn >> 24) & 0xf;
5356 /*
5357 * Normal entries consist of a single bit, but dual link has the
5358 * next most significant bit set too
5359 */
5360 entry->duallink_possible =
5361 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5362
5363 switch (entry->type) {
5364 case OUTPUT_ANALOG:
5365 /*
5366 * Although the rest of a CRT conf dword is usually
5367 * zeros, mac biosen have stuff there so we must mask
5368 */
5369 entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
5370 (conf & 0xffff) * 10 :
5371 (conf & 0xff) * 10000;
5372 break;
5373 case OUTPUT_LVDS:
5374 {
5375 uint32_t mask;
5376 if (conf & 0x1)
5377 entry->lvdsconf.use_straps_for_mode = true;
5378 if (bdcb->version < 0x22) {
5379 mask = ~0xd;
5380 /*
5381 * The laptop in bug 14567 lies and claims to not use
5382 * straps when it does, so assume all DCB 2.0 laptops
5383 * use straps, until a broken EDID using one is produced
5384 */
5385 entry->lvdsconf.use_straps_for_mode = true;
5386 /*
5387 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
5388 * mean the same thing (probably wrong, but might work)
5389 */
5390 if (conf & 0x4 || conf & 0x8)
5391 entry->lvdsconf.use_power_scripts = true;
5392 } else {
5393 mask = ~0x5;
5394 if (conf & 0x4)
5395 entry->lvdsconf.use_power_scripts = true;
5396 }
5397 if (conf & mask) {
5398 /*
5399 * Until we even try to use these on G8x, it's
5400 * useless reporting unknown bits. They all are.
5401 */
5402 if (bdcb->version >= 0x40)
5403 break;
5404
5405 NV_ERROR(dev, "Unknown LVDS configuration bits, "
5406 "please report\n");
5407 }
5408 break;
5409 }
5410 case OUTPUT_TV:
5411 {
5412 if (bdcb->version >= 0x30)
5413 entry->tvconf.has_component_output = conf & (0x8 << 4);
5414 else
5415 entry->tvconf.has_component_output = false;
5416
5417 break;
5418 }
5419 case OUTPUT_DP:
5420 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
5421 entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
5422 switch ((conf & 0x0f000000) >> 24) {
5423 case 0xf:
5424 entry->dpconf.link_nr = 4;
5425 break;
5426 case 0x3:
5427 entry->dpconf.link_nr = 2;
5428 break;
5429 default:
5430 entry->dpconf.link_nr = 1;
5431 break;
5432 }
5433 break;
5434 case OUTPUT_TMDS:
5435 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
5436 break;
5437 case 0xe:
5438 /* weird g80 mobile type that "nv" treats as a terminator */
5439 bdcb->dcb.entries--;
5440 return false;
5441 }
5442
5443 /* unsure what DCB version introduces this, 3.0? */
5444 if (conf & 0x100000)
5445 entry->i2c_upper_default = true;
5446
5447 return true;
5448}
5449
5450static bool
5451parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5452 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5453{
5454 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
5455 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
5456 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
5457 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
5458 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
5459 conn != 0xf2205004 && conn != 0xf2209004) {
5460 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
5461
5462 /* cause output setting to fail for !TV, so message is seen */
5463 if ((conn & 0xf) != 0x1)
5464 dcb->entries = 0;
5465
5466 return false;
5467 }
5468 /* most of the below is a "best guess" atm */
5469 entry->type = conn & 0xf;
5470 if (entry->type == 2)
5471 /* another way of specifying straps based lvds... */
5472 entry->type = OUTPUT_LVDS;
5473 if (entry->type == 4) { /* digital */
5474 if (conn & 0x10)
5475 entry->type = OUTPUT_LVDS;
5476 else
5477 entry->type = OUTPUT_TMDS;
5478 }
5479 /* what's in bits 5-13? could be some encoder maker thing, in tv case */
5480 entry->i2c_index = (conn >> 14) & 0xf;
5481 /* raw heads field is in range 0-1, so move to 1-2 */
5482 entry->heads = ((conn >> 18) & 0x7) + 1;
5483 entry->location = (conn >> 21) & 0xf;
5484 /* unused: entry->bus = (conn >> 25) & 0x7; */
5485 /* set or to be same as heads -- hopefully safe enough */
5486 entry->or = entry->heads;
5487 entry->duallink_possible = false;
5488
5489 switch (entry->type) {
5490 case OUTPUT_ANALOG:
5491 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5492 break;
5493 case OUTPUT_LVDS:
5494 /*
5495 * This is probably buried in conn's unknown bits.
5496 * This will upset EDID-ful models, if they exist
5497 */
5498 entry->lvdsconf.use_straps_for_mode = true;
5499 entry->lvdsconf.use_power_scripts = true;
5500 break;
5501 case OUTPUT_TMDS:
5502 /*
5503 * Invent a DVI-A output, by copying the fields of the DVI-D
5504 * output; reported to work by math_b on an NV20(!).
5505 */
5506 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5507 break;
5508 case OUTPUT_TV:
5509 entry->tvconf.has_component_output = false;
5510 break;
5511 }
5512
5513 return true;
5514}
5515
5516static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5517 uint32_t conn, uint32_t conf)
5518{
5519 struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
5520 bool ret;
5521
5522 if (bdcb->version >= 0x20)
5523 ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
5524 else
5525 ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
5526 if (!ret)
5527 return ret;
5528
5529 read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
5530 entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
5531
5532 return true;
5533}
5534
5535static
5536void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5537{
5538 /*
5539 * DCB v2.0 lists each output combination separately.
5540 * Here we merge compatible entries to have fewer outputs, with
5541 * more options
5542 */
5543
5544 int i, newentries = 0;
5545
5546 for (i = 0; i < dcb->entries; i++) {
5547 struct dcb_entry *ient = &dcb->entry[i];
5548 int j;
5549
5550 for (j = i + 1; j < dcb->entries; j++) {
5551 struct dcb_entry *jent = &dcb->entry[j];
5552
5553 if (jent->type == 100) /* already merged entry */
5554 continue;
5555
5556 /* merge heads field when all other fields the same */
5557 if (jent->i2c_index == ient->i2c_index &&
5558 jent->type == ient->type &&
5559 jent->location == ient->location &&
5560 jent->or == ient->or) {
5561 NV_TRACE(dev, "Merging DCB entries %d and %d\n",
5562 i, j);
5563 ient->heads |= jent->heads;
5564 jent->type = 100; /* dummy value */
5565 }
5566 }
5567 }
5568
5569 /* Compact entries merged into others out of dcb */
5570 for (i = 0; i < dcb->entries; i++) {
5571 if (dcb->entry[i].type == 100)
5572 continue;
5573
5574 if (newentries != i) {
5575 dcb->entry[newentries] = dcb->entry[i];
5576 dcb->entry[newentries].index = newentries;
5577 }
5578 newentries++;
5579 }
5580
5581 dcb->entries = newentries;
5582}
5583
5584static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5585{
5586 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5587 struct parsed_dcb *dcb;
5588 uint16_t dcbptr, i2ctabptr = 0;
5589 uint8_t *dcbtable;
5590 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5591 bool configblock = true;
5592 int recordlength = 8, confofs = 4;
5593 int i;
5594
5595 dcb = bios->pub.dcb = &bdcb->dcb;
5596 dcb->entries = 0;
5597
5598 /* get the offset from 0x36 */
5599 dcbptr = ROM16(bios->data[0x36]);
5600
5601 if (dcbptr == 0x0) {
5602 NV_WARN(dev, "No output data (DCB) found in BIOS, "
5603 "assuming a CRT output exists\n");
5604 /* this situation likely means a really old card, pre DCB */
5605 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5606
5607 if (nv04_tv_identify(dev,
5608 bios->legacy.i2c_indices.tv) >= 0)
5609 fabricate_tv_output(dcb, twoHeads);
5610
5611 return 0;
5612 }
5613
5614 dcbtable = &bios->data[dcbptr];
5615
5616 /* get DCB version */
5617 bdcb->version = dcbtable[0];
5618 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5619 bdcb->version >> 4, bdcb->version & 0xf);
5620
5621 if (bdcb->version >= 0x20) { /* NV17+ */
5622 uint32_t sig;
5623
5624 if (bdcb->version >= 0x30) { /* NV40+ */
5625 headerlen = dcbtable[1];
5626 entries = dcbtable[2];
5627 recordlength = dcbtable[3];
5628 i2ctabptr = ROM16(dcbtable[4]);
5629 sig = ROM32(dcbtable[6]);
5630 bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
5631 bdcb->connector_table_ptr = ROM16(dcbtable[20]);
5632 } else {
5633 i2ctabptr = ROM16(dcbtable[2]);
5634 sig = ROM32(dcbtable[4]);
5635 headerlen = 8;
5636 }
5637
5638 if (sig != 0x4edcbdcb) {
5639 NV_ERROR(dev, "Bad Display Configuration Block "
5640 "signature (%08X)\n", sig);
5641 return -EINVAL;
5642 }
5643 } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
5644 char sig[8] = { 0 };
5645
5646 strncpy(sig, (char *)&dcbtable[-7], 7);
5647 i2ctabptr = ROM16(dcbtable[2]);
5648 recordlength = 10;
5649 confofs = 6;
5650
5651 if (strcmp(sig, "DEV_REC")) {
5652 NV_ERROR(dev, "Bad Display Configuration Block "
5653 "signature (%s)\n", sig);
5654 return -EINVAL;
5655 }
5656 } else {
5657 /*
5658 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
5659 * has the same single (crt) entry, even when tv-out present, so
5660 * the conclusion is this version cannot really be used.
5661 * v1.2 tables (some NV6/10, and NV15+) normally have the same
5662 * 5 entries, which are not specific to the card and so no use.
5663 * v1.2 does have an I2C table that read_dcb_i2c_table can
5664 * handle, but cards exist (nv11 in #14821) with a bad i2c table
5665 * pointer, so use the indices parsed in parse_bmp_structure.
5666 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5667 */
5668 NV_TRACEWARN(dev, "No useful information in BIOS output table; "
5669 "adding all possible outputs\n");
5670 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5671
5672 /*
5673 * Attempt to detect TV before DVI because the test
5674 * for the former is more accurate and it rules the
5675 * latter out.
5676 */
5677 if (nv04_tv_identify(dev,
5678 bios->legacy.i2c_indices.tv) >= 0)
5679 fabricate_tv_output(dcb, twoHeads);
5680
5681 else if (bios->tmds.output0_script_ptr ||
5682 bios->tmds.output1_script_ptr)
5683 fabricate_dvi_i_output(dcb, twoHeads);
5684
5685 return 0;
5686 }
5687
5688 if (!i2ctabptr)
5689 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5690 else {
5691 bdcb->i2c_table = &bios->data[i2ctabptr];
5692 if (bdcb->version >= 0x30)
5693 bdcb->i2c_default_indices = bdcb->i2c_table[4];
5694 }
5695
5696 parse_dcb_gpio_table(bios);
5697 parse_dcb_connector_table(bios);
5698
5699 if (entries > DCB_MAX_NUM_ENTRIES)
5700 entries = DCB_MAX_NUM_ENTRIES;
5701
5702 for (i = 0; i < entries; i++) {
5703 uint32_t connection, config = 0;
5704
5705 connection = ROM32(dcbtable[headerlen + recordlength * i]);
5706 if (configblock)
5707 config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
5708
5709 /* seen on an NV11 with DCB v1.5 */
5710 if (connection == 0x00000000)
5711 break;
5712
5713 /* seen on an NV17 with DCB v2.0 */
5714 if (connection == 0xffffffff)
5715 break;
5716
5717 if ((connection & 0x0000000f) == 0x0000000f)
5718 continue;
5719
5720 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5721 dcb->entries, connection, config);
5722
5723 if (!parse_dcb_entry(dev, bdcb, connection, config))
5724 break;
5725 }
5726
5727 /*
5728 * apart for v2.1+ not being known for requiring merging, this
5729 * guarantees dcbent->index is the index of the entry in the rom image
5730 */
5731 if (bdcb->version < 0x21)
5732 merge_like_dcb_entries(dev, dcb);
5733
5734 return dcb->entries ? 0 : -ENXIO;
5735}
5736
5737static void
5738fixup_legacy_connector(struct nvbios *bios)
5739{
5740 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5741 struct parsed_dcb *dcb = &bdcb->dcb;
5742 int high = 0, i;
5743
5744 /*
5745 * DCB 3.0 also has the table in most cases, but there are some cards
5746 * where the table is filled with stub entries, and the DCB entriy
5747 * indices are all 0. We don't need the connector indices on pre-G80
5748 * chips (yet?) so limit the use to DCB 4.0 and above.
5749 */
5750 if (bdcb->version >= 0x40)
5751 return;
5752
5753 /*
5754 * No known connector info before v3.0, so make it up. the rule here
5755 * is: anything on the same i2c bus is considered to be on the same
5756 * connector. any output without an associated i2c bus is assigned
5757 * its own unique connector index.
5758 */
5759 for (i = 0; i < dcb->entries; i++) {
5760 if (dcb->entry[i].i2c_index == 0xf)
5761 continue;
5762
5763 /*
5764 * Ignore the I2C index for on-chip TV-out, as there
5765 * are cards with bogus values (nv31m in bug 23212),
5766 * and it's otherwise useless.
5767 */
5768 if (dcb->entry[i].type == OUTPUT_TV &&
5769 dcb->entry[i].location == DCB_LOC_ON_CHIP) {
5770 dcb->entry[i].i2c_index = 0xf;
5771 continue;
5772 }
5773
5774 dcb->entry[i].connector = dcb->entry[i].i2c_index;
5775 if (dcb->entry[i].connector > high)
5776 high = dcb->entry[i].connector;
5777 }
5778
5779 for (i = 0; i < dcb->entries; i++) {
5780 if (dcb->entry[i].i2c_index != 0xf)
5781 continue;
5782
5783 dcb->entry[i].connector = ++high;
5784 }
5785}
5786
5787static void
5788fixup_legacy_i2c(struct nvbios *bios)
5789{
5790 struct parsed_dcb *dcb = &bios->bdcb.dcb;
5791 int i;
5792
5793 for (i = 0; i < dcb->entries; i++) {
5794 if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
5795 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
5796 if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
5797 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
5798 if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
5799 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
5800 }
5801}
5802
5803static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
5804{
5805 /*
5806 * The header following the "HWSQ" signature has the number of entries,
5807 * and the entry size
5808 *
5809 * An entry consists of a dword to write to the sequencer control reg
5810 * (0x00001304), followed by the ucode bytes, written sequentially,
5811 * starting at reg 0x00001400
5812 */
5813
5814 uint8_t bytes_to_write;
5815 uint16_t hwsq_entry_offset;
5816 int i;
5817
5818 if (bios->data[hwsq_offset] <= entry) {
5819 NV_ERROR(dev, "Too few entries in HW sequencer table for "
5820 "requested entry\n");
5821 return -ENOENT;
5822 }
5823
5824 bytes_to_write = bios->data[hwsq_offset + 1];
5825
5826 if (bytes_to_write != 36) {
5827 NV_ERROR(dev, "Unknown HW sequencer entry size\n");
5828 return -EINVAL;
5829 }
5830
5831 NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
5832
5833 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
5834
5835 /* set sequencer control */
5836 bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
5837 bytes_to_write -= 4;
5838
5839 /* write ucode */
5840 for (i = 0; i < bytes_to_write; i += 4)
5841 bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
5842
5843 /* twiddle NV_PBUS_DEBUG_4 */
5844 bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
5845
5846 return 0;
5847}
5848
5849static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5850 struct nvbios *bios)
5851{
5852 /*
5853 * BMP based cards, from NV17, need a microcode loading to correctly
5854 * control the GPIO etc for LVDS panels
5855 *
5856 * BIT based cards seem to do this directly in the init scripts
5857 *
5858 * The microcode entries are found by the "HWSQ" signature.
5859 */
5860
5861 const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
5862 const int sz = sizeof(hwsq_signature);
5863 int hwsq_offset;
5864
5865 hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
5866 if (!hwsq_offset)
5867 return 0;
5868
5869 /* always use entry 0? */
5870 return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
5871}
5872
5873uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5874{
5875 struct drm_nouveau_private *dev_priv = dev->dev_private;
5876 struct nvbios *bios = &dev_priv->VBIOS;
5877 const uint8_t edid_sig[] = {
5878 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5879 uint16_t offset = 0;
5880 uint16_t newoffset;
5881 int searchlen = NV_PROM_SIZE;
5882
5883 if (bios->fp.edid)
5884 return bios->fp.edid;
5885
5886 while (searchlen) {
5887 newoffset = findstr(&bios->data[offset], searchlen,
5888 edid_sig, 8);
5889 if (!newoffset)
5890 return NULL;
5891 offset += newoffset;
5892 if (!nv_cksum(&bios->data[offset], EDID1_LEN))
5893 break;
5894
5895 searchlen -= offset;
5896 offset++;
5897 }
5898
5899 NV_TRACE(dev, "Found EDID in BIOS\n");
5900
5901 return bios->fp.edid = &bios->data[offset];
5902}
5903
5904void
5905nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5906 struct dcb_entry *dcbent)
5907{
5908 struct drm_nouveau_private *dev_priv = dev->dev_private;
5909 struct nvbios *bios = &dev_priv->VBIOS;
5910 struct init_exec iexec = { true, false };
5911
5912 bios->display.output = dcbent;
5913 parse_init_table(bios, table, &iexec);
5914 bios->display.output = NULL;
5915}
5916
5917static bool NVInitVBIOS(struct drm_device *dev)
5918{
5919 struct drm_nouveau_private *dev_priv = dev->dev_private;
5920 struct nvbios *bios = &dev_priv->VBIOS;
5921
5922 memset(bios, 0, sizeof(struct nvbios));
5923 bios->dev = dev;
5924
5925 if (!NVShadowVBIOS(dev, bios->data))
5926 return false;
5927
5928 bios->length = NV_PROM_SIZE;
5929 return true;
5930}
5931
5932static int nouveau_parse_vbios_struct(struct drm_device *dev)
5933{
5934 struct drm_nouveau_private *dev_priv = dev->dev_private;
5935 struct nvbios *bios = &dev_priv->VBIOS;
5936 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5937 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5938 int offset;
5939
5940 offset = findstr(bios->data, bios->length,
5941 bit_signature, sizeof(bit_signature));
5942 if (offset) {
5943 NV_TRACE(dev, "BIT BIOS found\n");
5944 return parse_bit_structure(bios, offset + 6);
5945 }
5946
5947 offset = findstr(bios->data, bios->length,
5948 bmp_signature, sizeof(bmp_signature));
5949 if (offset) {
5950 NV_TRACE(dev, "BMP BIOS found\n");
5951 return parse_bmp_structure(dev, bios, offset);
5952 }
5953
5954 NV_ERROR(dev, "No known BIOS signature found\n");
5955 return -ENODEV;
5956}
5957
5958int
5959nouveau_run_vbios_init(struct drm_device *dev)
5960{
5961 struct drm_nouveau_private *dev_priv = dev->dev_private;
5962 struct nvbios *bios = &dev_priv->VBIOS;
5963 int i, ret = 0;
5964
5965 NVLockVgaCrtcs(dev, false);
5966 if (nv_two_heads(dev))
5967 NVSetOwner(dev, bios->state.crtchead);
5968
5969 if (bios->major_version < 5) /* BMP only */
5970 load_nv17_hw_sequencer_ucode(dev, bios);
5971
5972 if (bios->execute) {
5973 bios->fp.last_script_invoc = 0;
5974 bios->fp.lvds_init_run = false;
5975 }
5976
5977 parse_init_tables(bios);
5978
5979 /*
5980 * Runs some additional script seen on G8x VBIOSen. The VBIOS'
5981 * parser will run this right after the init tables, the binary
5982 * driver appears to run it at some point later.
5983 */
5984 if (bios->some_script_ptr) {
5985 struct init_exec iexec = {true, false};
5986
5987 NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
5988 bios->some_script_ptr);
5989 parse_init_table(bios, bios->some_script_ptr, &iexec);
5990 }
5991
5992 if (dev_priv->card_type >= NV_50) {
5993 for (i = 0; i < bios->bdcb.dcb.entries; i++) {
5994 nouveau_bios_run_display_table(dev,
5995 &bios->bdcb.dcb.entry[i],
5996 0, 0);
5997 }
5998 }
5999
6000 NVLockVgaCrtcs(dev, true);
6001
6002 return ret;
6003}
6004
6005static void
6006nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6007{
6008 struct drm_nouveau_private *dev_priv = dev->dev_private;
6009 struct nvbios *bios = &dev_priv->VBIOS;
6010 struct dcb_i2c_entry *entry;
6011 int i;
6012
6013 entry = &bios->bdcb.dcb.i2c[0];
6014 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
6015 nouveau_i2c_fini(dev, entry);
6016}
6017
6018int
6019nouveau_bios_init(struct drm_device *dev)
6020{
6021 struct drm_nouveau_private *dev_priv = dev->dev_private;
6022 struct nvbios *bios = &dev_priv->VBIOS;
6023 uint32_t saved_nv_pextdev_boot_0;
6024 bool was_locked;
6025 int ret;
6026
6027 dev_priv->vbios = &bios->pub;
6028
6029 if (!NVInitVBIOS(dev))
6030 return -ENODEV;
6031
6032 ret = nouveau_parse_vbios_struct(dev);
6033 if (ret)
6034 return ret;
6035
6036 ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
6037 if (ret)
6038 return ret;
6039
6040 fixup_legacy_i2c(bios);
6041 fixup_legacy_connector(bios);
6042
6043 if (!bios->major_version) /* we don't run version 0 bios */
6044 return 0;
6045
6046 /* these will need remembering across a suspend */
6047 saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
6048 bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
6049
6050 /* init script execution disabled */
6051 bios->execute = false;
6052
6053 /* ... unless card isn't POSTed already */
6054 if (dev_priv->card_type >= NV_10 &&
6055 NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6056 NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
6057 NV_INFO(dev, "Adaptor not initialised\n");
6058 if (dev_priv->card_type < NV_50) {
6059 NV_ERROR(dev, "Unable to POST this chipset\n");
6060 return -ENODEV;
6061 }
6062
6063 NV_INFO(dev, "Running VBIOS init tables\n");
6064 bios->execute = true;
6065 }
6066
6067 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6068
6069 ret = nouveau_run_vbios_init(dev);
6070 if (ret) {
6071 dev_priv->vbios = NULL;
6072 return ret;
6073 }
6074
6075 /* feature_byte on BMP is poor, but init always sets CR4B */
6076 was_locked = NVLockVgaCrtcs(dev, false);
6077 if (bios->major_version < 5)
6078 bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
6079
6080 /* all BIT systems need p_f_m_t for digital_min_front_porch */
6081 if (bios->is_mobile || bios->major_version >= 5)
6082 ret = parse_fp_mode_table(dev, bios);
6083 NVLockVgaCrtcs(dev, was_locked);
6084
6085 /* allow subsequent scripts to execute */
6086 bios->execute = true;
6087
6088 return 0;
6089}
6090
6091void
6092nouveau_bios_takedown(struct drm_device *dev)
6093{
6094 nouveau_bios_i2c_devices_takedown(dev);
6095}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 000000000000..1d5f10bd78ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,289 @@
1/*
2 * Copyright 2007-2008 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef __NOUVEAU_BIOS_H__
25#define __NOUVEAU_BIOS_H__
26
27#include "nvreg.h"
28#include "nouveau_i2c.h"
29
30#define DCB_MAX_NUM_ENTRIES 16
31#define DCB_MAX_NUM_I2C_ENTRIES 16
32#define DCB_MAX_NUM_GPIO_ENTRIES 32
33#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
34
35#define DCB_LOC_ON_CHIP 0
36
37struct dcb_entry {
38 int index; /* may not be raw dcb index if merging has happened */
39 uint8_t type;
40 uint8_t i2c_index;
41 uint8_t heads;
42 uint8_t connector;
43 uint8_t bus;
44 uint8_t location;
45 uint8_t or;
46 bool duallink_possible;
47 union {
48 struct sor_conf {
49 int link;
50 } sorconf;
51 struct {
52 int maxfreq;
53 } crtconf;
54 struct {
55 struct sor_conf sor;
56 bool use_straps_for_mode;
57 bool use_power_scripts;
58 } lvdsconf;
59 struct {
60 bool has_component_output;
61 } tvconf;
62 struct {
63 struct sor_conf sor;
64 int link_nr;
65 int link_bw;
66 } dpconf;
67 struct {
68 struct sor_conf sor;
69 } tmdsconf;
70 };
71 bool i2c_upper_default;
72};
73
74struct dcb_i2c_entry {
75 uint8_t port_type;
76 uint8_t read, write;
77 struct nouveau_i2c_chan *chan;
78};
79
80struct parsed_dcb {
81 int entries;
82 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
83 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
84};
85
86enum dcb_gpio_tag {
87 DCB_GPIO_TVDAC0 = 0xc,
88 DCB_GPIO_TVDAC1 = 0x2d,
89};
90
91struct dcb_gpio_entry {
92 enum dcb_gpio_tag tag;
93 int line;
94 bool invert;
95};
96
97struct parsed_dcb_gpio {
98 int entries;
99 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
100};
101
102struct dcb_connector_table_entry {
103 uint32_t entry;
104 uint8_t type;
105 uint8_t index;
106 uint8_t gpio_tag;
107};
108
109struct dcb_connector_table {
110 int entries;
111 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
112};
113
114struct bios_parsed_dcb {
115 uint8_t version;
116
117 struct parsed_dcb dcb;
118
119 uint8_t *i2c_table;
120 uint8_t i2c_default_indices;
121
122 uint16_t gpio_table_ptr;
123 struct parsed_dcb_gpio gpio;
124 uint16_t connector_table_ptr;
125 struct dcb_connector_table connector;
126};
127
128enum nouveau_encoder_type {
129 OUTPUT_ANALOG = 0,
130 OUTPUT_TV = 1,
131 OUTPUT_TMDS = 2,
132 OUTPUT_LVDS = 3,
133 OUTPUT_DP = 6,
134 OUTPUT_ANY = -1
135};
136
137enum nouveau_or {
138 OUTPUT_A = (1 << 0),
139 OUTPUT_B = (1 << 1),
140 OUTPUT_C = (1 << 2)
141};
142
143enum LVDS_script {
144 /* Order *does* matter here */
145 LVDS_INIT = 1,
146 LVDS_RESET,
147 LVDS_BACKLIGHT_ON,
148 LVDS_BACKLIGHT_OFF,
149 LVDS_PANEL_ON,
150 LVDS_PANEL_OFF
151};
152
153/* changing these requires matching changes to reg tables in nv_get_clock */
154#define MAX_PLL_TYPES 4
155enum pll_types {
156 NVPLL,
157 MPLL,
158 VPLL1,
159 VPLL2
160};
161
162struct pll_lims {
163 struct {
164 int minfreq;
165 int maxfreq;
166 int min_inputfreq;
167 int max_inputfreq;
168
169 uint8_t min_m;
170 uint8_t max_m;
171 uint8_t min_n;
172 uint8_t max_n;
173 } vco1, vco2;
174
175 uint8_t max_log2p;
176 /*
177 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
178 * value) is no different to 6 (at least for vplls) so allowing the MNP
179 * calc to use 7 causes the generated clock to be out by a factor of 2.
180 * however, max_log2p cannot be fixed-up during parsing as the
181 * unmodified max_log2p value is still needed for setting mplls, hence
182 * an additional max_usable_log2p member
183 */
184 uint8_t max_usable_log2p;
185 uint8_t log2p_bias;
186
187 uint8_t min_p;
188 uint8_t max_p;
189
190 int refclk;
191};
192
193struct nouveau_bios_info {
194 struct parsed_dcb *dcb;
195
196 uint8_t chip_version;
197
198 uint32_t dactestval;
199 uint32_t tvdactestval;
200 uint8_t digital_min_front_porch;
201 bool fp_no_ddc;
202};
203
204struct nvbios {
205 struct drm_device *dev;
206 struct nouveau_bios_info pub;
207
208 uint8_t data[NV_PROM_SIZE];
209 unsigned int length;
210 bool execute;
211
212 uint8_t major_version;
213 uint8_t feature_byte;
214 bool is_mobile;
215
216 uint32_t fmaxvco, fminvco;
217
218 bool old_style_init;
219 uint16_t init_script_tbls_ptr;
220 uint16_t extra_init_script_tbl_ptr;
221 uint16_t macro_index_tbl_ptr;
222 uint16_t macro_tbl_ptr;
223 uint16_t condition_tbl_ptr;
224 uint16_t io_condition_tbl_ptr;
225 uint16_t io_flag_condition_tbl_ptr;
226 uint16_t init_function_tbl_ptr;
227
228 uint16_t pll_limit_tbl_ptr;
229 uint16_t ram_restrict_tbl_ptr;
230
231 uint16_t some_script_ptr; /* BIT I + 14 */
232 uint16_t init96_tbl_ptr; /* BIT I + 16 */
233
234 struct bios_parsed_dcb bdcb;
235
236 struct {
237 int crtchead;
238 /* these need remembering across suspend */
239 uint32_t saved_nv_pfb_cfg0;
240 } state;
241
242 struct {
243 struct dcb_entry *output;
244 uint16_t script_table_ptr;
245 uint16_t dp_table_ptr;
246 } display;
247
248 struct {
249 uint16_t fptablepointer; /* also used by tmds */
250 uint16_t fpxlatetableptr;
251 int xlatwidth;
252 uint16_t lvdsmanufacturerpointer;
253 uint16_t fpxlatemanufacturertableptr;
254 uint16_t mode_ptr;
255 uint16_t xlated_entry;
256 bool power_off_for_reset;
257 bool reset_after_pclk_change;
258 bool dual_link;
259 bool link_c_increment;
260 bool BITbit1;
261 bool if_is_24bit;
262 int duallink_transition_clk;
263 uint8_t strapless_is_24bit;
264 uint8_t *edid;
265
266 /* will need resetting after suspend */
267 int last_script_invoc;
268 bool lvds_init_run;
269 } fp;
270
271 struct {
272 uint16_t output0_script_ptr;
273 uint16_t output1_script_ptr;
274 } tmds;
275
276 struct {
277 uint16_t mem_init_tbl_ptr;
278 uint16_t sdr_seq_tbl_ptr;
279 uint16_t ddr_seq_tbl_ptr;
280
281 struct {
282 uint8_t crt, tv, panel;
283 } i2c_indices;
284
285 uint16_t lvds_single_a_script_ptr;
286 } legacy;
287};
288
289#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 000000000000..320a14bceb99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,671 @@
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
36static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40 struct nouveau_bo *nvbo = nouveau_bo(bo);
41
42 ttm_bo_kunmap(&nvbo->kmap);
43
44 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46
47 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo);
51}
52
53int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode,
56 uint32_t tile_flags, bool no_vm, bool mappable,
57 struct nouveau_bo **pnvbo)
58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo;
61 int ret, n = 0;
62
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64 if (!nvbo)
65 return -ENOMEM;
66 INIT_LIST_HEAD(&nvbo->head);
67 INIT_LIST_HEAD(&nvbo->entry);
68 nvbo->mappable = mappable;
69 nvbo->no_vm = no_vm;
70 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags;
72
73 /*
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT;
105
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements;
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
123
124 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm);
129 nvbo->channel = NULL;
130 if (ret) {
131 /* ttm will call nouveau_bo_del_ttm if it fails.. */
132 return ret;
133 }
134
135 spin_lock(&dev_priv->ttm.bo_list_lock);
136 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
137 spin_unlock(&dev_priv->ttm.bo_list_lock);
138 *pnvbo = nvbo;
139 return 0;
140}
141
142void
143nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
144{
145 int n = 0;
146
147 if (memtype & TTM_PL_FLAG_VRAM)
148 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
149 if (memtype & TTM_PL_FLAG_TT)
150 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
151 if (memtype & TTM_PL_FLAG_SYSTEM)
152 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
153 nvbo->placement.placement = nvbo->placements;
154 nvbo->placement.busy_placement = nvbo->placements;
155 nvbo->placement.num_placement = n;
156 nvbo->placement.num_busy_placement = n;
157}
158
159int
160nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
161{
162 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
163 struct ttm_buffer_object *bo = &nvbo->bo;
164 int ret, i;
165
166 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
167 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
168 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
169 1 << bo->mem.mem_type, memtype);
170 return -EINVAL;
171 }
172
173 if (nvbo->pin_refcnt++)
174 return 0;
175
176 ret = ttm_bo_reserve(bo, false, false, false, 0);
177 if (ret)
178 goto out;
179
180 nouveau_bo_placement_set(nvbo, memtype);
181 for (i = 0; i < nvbo->placement.num_placement; i++)
182 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
183
184 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
185 if (ret == 0) {
186 switch (bo->mem.mem_type) {
187 case TTM_PL_VRAM:
188 dev_priv->fb_aper_free -= bo->mem.size;
189 break;
190 case TTM_PL_TT:
191 dev_priv->gart_info.aper_free -= bo->mem.size;
192 break;
193 default:
194 break;
195 }
196 }
197 ttm_bo_unreserve(bo);
198out:
199 if (unlikely(ret))
200 nvbo->pin_refcnt--;
201 return ret;
202}
203
204int
205nouveau_bo_unpin(struct nouveau_bo *nvbo)
206{
207 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
208 struct ttm_buffer_object *bo = &nvbo->bo;
209 int ret, i;
210
211 if (--nvbo->pin_refcnt)
212 return 0;
213
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
215 if (ret)
216 return ret;
217
218 for (i = 0; i < nvbo->placement.num_placement; i++)
219 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
220
221 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
222 if (ret == 0) {
223 switch (bo->mem.mem_type) {
224 case TTM_PL_VRAM:
225 dev_priv->fb_aper_free += bo->mem.size;
226 break;
227 case TTM_PL_TT:
228 dev_priv->gart_info.aper_free += bo->mem.size;
229 break;
230 default:
231 break;
232 }
233 }
234
235 ttm_bo_unreserve(bo);
236 return ret;
237}
238
239int
240nouveau_bo_map(struct nouveau_bo *nvbo)
241{
242 int ret;
243
244 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
245 if (ret)
246 return ret;
247
248 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
249 ttm_bo_unreserve(&nvbo->bo);
250 return ret;
251}
252
253void
254nouveau_bo_unmap(struct nouveau_bo *nvbo)
255{
256 ttm_bo_kunmap(&nvbo->kmap);
257}
258
259u16
260nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
261{
262 bool is_iomem;
263 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
264 mem = &mem[index];
265 if (is_iomem)
266 return ioread16_native((void __force __iomem *)mem);
267 else
268 return *mem;
269}
270
271void
272nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
273{
274 bool is_iomem;
275 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
276 mem = &mem[index];
277 if (is_iomem)
278 iowrite16_native(val, (void __force __iomem *)mem);
279 else
280 *mem = val;
281}
282
283u32
284nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
285{
286 bool is_iomem;
287 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
288 mem = &mem[index];
289 if (is_iomem)
290 return ioread32_native((void __force __iomem *)mem);
291 else
292 return *mem;
293}
294
295void
296nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
297{
298 bool is_iomem;
299 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
300 mem = &mem[index];
301 if (is_iomem)
302 iowrite32_native(val, (void __force __iomem *)mem);
303 else
304 *mem = val;
305}
306
307static struct ttm_backend *
308nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
309{
310 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
311 struct drm_device *dev = dev_priv->dev;
312
313 switch (dev_priv->gart_info.type) {
314 case NOUVEAU_GART_AGP:
315 return ttm_agp_backend_init(bdev, dev->agp->bridge);
316 case NOUVEAU_GART_SGDMA:
317 return nouveau_sgdma_init_ttm(dev);
318 default:
319 NV_ERROR(dev, "Unknown GART type %d\n",
320 dev_priv->gart_info.type);
321 break;
322 }
323
324 return NULL;
325}
326
327static int
328nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
329{
330 /* We'll do this from user space. */
331 return 0;
332}
333
334static int
335nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
336 struct ttm_mem_type_manager *man)
337{
338 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
339 struct drm_device *dev = dev_priv->dev;
340
341 switch (type) {
342 case TTM_PL_SYSTEM:
343 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
344 man->available_caching = TTM_PL_MASK_CACHING;
345 man->default_caching = TTM_PL_FLAG_CACHED;
346 break;
347 case TTM_PL_VRAM:
348 man->flags = TTM_MEMTYPE_FLAG_FIXED |
349 TTM_MEMTYPE_FLAG_MAPPABLE |
350 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
351 man->available_caching = TTM_PL_FLAG_UNCACHED |
352 TTM_PL_FLAG_WC;
353 man->default_caching = TTM_PL_FLAG_WC;
354
355 man->io_addr = NULL;
356 man->io_offset = drm_get_resource_start(dev, 1);
357 man->io_size = drm_get_resource_len(dev, 1);
358 if (man->io_size > nouveau_mem_fb_amount(dev))
359 man->io_size = nouveau_mem_fb_amount(dev);
360
361 man->gpu_offset = dev_priv->vm_vram_base;
362 break;
363 case TTM_PL_TT:
364 switch (dev_priv->gart_info.type) {
365 case NOUVEAU_GART_AGP:
366 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
367 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
368 man->available_caching = TTM_PL_FLAG_UNCACHED;
369 man->default_caching = TTM_PL_FLAG_UNCACHED;
370 break;
371 case NOUVEAU_GART_SGDMA:
372 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
373 TTM_MEMTYPE_FLAG_CMA;
374 man->available_caching = TTM_PL_MASK_CACHING;
375 man->default_caching = TTM_PL_FLAG_CACHED;
376 break;
377 default:
378 NV_ERROR(dev, "Unknown GART type: %d\n",
379 dev_priv->gart_info.type);
380 return -EINVAL;
381 }
382
383 man->io_offset = dev_priv->gart_info.aper_base;
384 man->io_size = dev_priv->gart_info.aper_size;
385 man->io_addr = NULL;
386 man->gpu_offset = dev_priv->vm_gart_base;
387 break;
388 default:
389 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
390 return -EINVAL;
391 }
392 return 0;
393}
394
395static void
396nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
397{
398 struct nouveau_bo *nvbo = nouveau_bo(bo);
399
400 switch (bo->mem.mem_type) {
401 default:
402 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
403 break;
404 }
405}
406
407
408/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
409 * TTM_PL_{VRAM,TT} directly.
410 */
411static int
412nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
413 struct nouveau_bo *nvbo, bool evict, bool no_wait,
414 struct ttm_mem_reg *new_mem)
415{
416 struct nouveau_fence *fence = NULL;
417 int ret;
418
419 ret = nouveau_fence_new(chan, &fence, true);
420 if (ret)
421 return ret;
422
423 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
424 evict, no_wait, new_mem);
425 nouveau_fence_unref((void *)&fence);
426 return ret;
427}
428
429static inline uint32_t
430nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
431 struct ttm_mem_reg *mem)
432{
433 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
434 if (mem->mem_type == TTM_PL_TT)
435 return NvDmaGART;
436 return NvDmaVRAM;
437 }
438
439 if (mem->mem_type == TTM_PL_TT)
440 return chan->gart_handle;
441 return chan->vram_handle;
442}
443
444static int
445nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
446 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
447{
448 struct nouveau_bo *nvbo = nouveau_bo(bo);
449 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
450 struct nouveau_channel *chan;
451 uint64_t src_offset, dst_offset;
452 uint32_t page_count;
453 int ret;
454
455 chan = nvbo->channel;
456 if (!chan || nvbo->tile_flags || nvbo->no_vm) {
457 chan = dev_priv->channel;
458 if (!chan)
459 return -EINVAL;
460 }
461
462 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
463 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
464 if (chan != dev_priv->channel) {
465 if (old_mem->mem_type == TTM_PL_TT)
466 src_offset += dev_priv->vm_gart_base;
467 else
468 src_offset += dev_priv->vm_vram_base;
469
470 if (new_mem->mem_type == TTM_PL_TT)
471 dst_offset += dev_priv->vm_gart_base;
472 else
473 dst_offset += dev_priv->vm_vram_base;
474 }
475
476 ret = RING_SPACE(chan, 3);
477 if (ret)
478 return ret;
479 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
480 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
481 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
482
483 if (dev_priv->card_type >= NV_50) {
484 ret = RING_SPACE(chan, 4);
485 if (ret)
486 return ret;
487 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
488 OUT_RING(chan, 1);
489 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
490 OUT_RING(chan, 1);
491 }
492
493 page_count = new_mem->num_pages;
494 while (page_count) {
495 int line_count = (page_count > 2047) ? 2047 : page_count;
496
497 if (dev_priv->card_type >= NV_50) {
498 ret = RING_SPACE(chan, 3);
499 if (ret)
500 return ret;
501 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
502 OUT_RING(chan, upper_32_bits(src_offset));
503 OUT_RING(chan, upper_32_bits(dst_offset));
504 }
505 ret = RING_SPACE(chan, 11);
506 if (ret)
507 return ret;
508 BEGIN_RING(chan, NvSubM2MF,
509 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
510 OUT_RING(chan, lower_32_bits(src_offset));
511 OUT_RING(chan, lower_32_bits(dst_offset));
512 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
513 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
514 OUT_RING(chan, PAGE_SIZE); /* line_length */
515 OUT_RING(chan, line_count);
516 OUT_RING(chan, (1<<8)|(1<<0));
517 OUT_RING(chan, 0);
518 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
519 OUT_RING(chan, 0);
520
521 page_count -= line_count;
522 src_offset += (PAGE_SIZE * line_count);
523 dst_offset += (PAGE_SIZE * line_count);
524 }
525
526 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
527}
528
529static int
530nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
531 bool no_wait, struct ttm_mem_reg *new_mem)
532{
533 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
534 struct ttm_placement placement;
535 struct ttm_mem_reg tmp_mem;
536 int ret;
537
538 placement.fpfn = placement.lpfn = 0;
539 placement.num_placement = placement.num_busy_placement = 1;
540 placement.placement = &placement_memtype;
541
542 tmp_mem = *new_mem;
543 tmp_mem.mm_node = NULL;
544 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
545 if (ret)
546 return ret;
547
548 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
549 if (ret)
550 goto out;
551
552 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
553 if (ret)
554 goto out;
555
556 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
557out:
558 if (tmp_mem.mm_node) {
559 spin_lock(&bo->bdev->glob->lru_lock);
560 drm_mm_put_block(tmp_mem.mm_node);
561 spin_unlock(&bo->bdev->glob->lru_lock);
562 }
563
564 return ret;
565}
566
567static int
568nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
569 bool no_wait, struct ttm_mem_reg *new_mem)
570{
571 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
572 struct ttm_placement placement;
573 struct ttm_mem_reg tmp_mem;
574 int ret;
575
576 placement.fpfn = placement.lpfn = 0;
577 placement.num_placement = placement.num_busy_placement = 1;
578 placement.placement = &placement_memtype;
579
580 tmp_mem = *new_mem;
581 tmp_mem.mm_node = NULL;
582 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
583 if (ret)
584 return ret;
585
586 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
587 if (ret)
588 goto out;
589
590 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
591 if (ret)
592 goto out;
593
594out:
595 if (tmp_mem.mm_node) {
596 spin_lock(&bo->bdev->glob->lru_lock);
597 drm_mm_put_block(tmp_mem.mm_node);
598 spin_unlock(&bo->bdev->glob->lru_lock);
599 }
600
601 return ret;
602}
603
604static int
605nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
606 bool no_wait, struct ttm_mem_reg *new_mem)
607{
608 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
609 struct nouveau_bo *nvbo = nouveau_bo(bo);
610 struct drm_device *dev = dev_priv->dev;
611 struct ttm_mem_reg *old_mem = &bo->mem;
612 int ret;
613
614 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
615 !nvbo->no_vm) {
616 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
617
618 ret = nv50_mem_vm_bind_linear(dev,
619 offset + dev_priv->vm_vram_base,
620 new_mem->size, nvbo->tile_flags,
621 offset);
622 if (ret)
623 return ret;
624 }
625
626 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
627 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
628
629 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
630 BUG_ON(bo->mem.mm_node != NULL);
631 bo->mem = *new_mem;
632 new_mem->mm_node = NULL;
633 return 0;
634 }
635
636 if (new_mem->mem_type == TTM_PL_SYSTEM) {
637 if (old_mem->mem_type == TTM_PL_SYSTEM)
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
639 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
640 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
641 } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
642 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
643 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
644 } else {
645 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
646 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
647 }
648
649 return 0;
650}
651
652static int
653nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
654{
655 return 0;
656}
657
658struct ttm_bo_driver nouveau_bo_driver = {
659 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
660 .invalidate_caches = nouveau_bo_invalidate_caches,
661 .init_mem_type = nouveau_bo_init_mem_type,
662 .evict_flags = nouveau_bo_evict_flags,
663 .move = nouveau_bo_move,
664 .verify_access = nouveau_bo_verify_access,
665 .sync_obj_signaled = nouveau_fence_signalled,
666 .sync_obj_wait = nouveau_fence_wait,
667 .sync_obj_flush = nouveau_fence_flush,
668 .sync_obj_unref = nouveau_fence_unref,
669 .sync_obj_ref = nouveau_fence_ref,
670};
671
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 000000000000..ee2b84504d05
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "nouveau_drv.h"
26#include "nouveau_hw.h"
27
28/****************************************************************************\
29* *
30* The video arbitration routines calculate some "magic" numbers. Fixes *
31* the snow seen when accessing the framebuffer without it. *
32* It just works (I hope). *
33* *
34\****************************************************************************/
35
36struct nv_fifo_info {
37 int lwm;
38 int burst;
39};
40
41struct nv_sim_state {
42 int pclk_khz;
43 int mclk_khz;
44 int nvclk_khz;
45 int bpp;
46 int mem_page_miss;
47 int mem_latency;
48 int memory_type;
49 int memory_width;
50 int two_heads;
51};
52
53static void
54nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
55{
56 int pagemiss, cas, width, bpp;
57 int nvclks, mclks, pclks, crtpagemiss;
58 int found, mclk_extra, mclk_loop, cbs, m1, p1;
59 int mclk_freq, pclk_freq, nvclk_freq;
60 int us_m, us_n, us_p, crtc_drain_rate;
61 int cpm_us, us_crt, clwm;
62
63 pclk_freq = arb->pclk_khz;
64 mclk_freq = arb->mclk_khz;
65 nvclk_freq = arb->nvclk_khz;
66 pagemiss = arb->mem_page_miss;
67 cas = arb->mem_latency;
68 width = arb->memory_width >> 6;
69 bpp = arb->bpp;
70 cbs = 128;
71
72 pclks = 2;
73 nvclks = 10;
74 mclks = 13 + cas;
75 mclk_extra = 3;
76 found = 0;
77
78 while (!found) {
79 found = 1;
80
81 mclk_loop = mclks + mclk_extra;
82 us_m = mclk_loop * 1000 * 1000 / mclk_freq;
83 us_n = nvclks * 1000 * 1000 / nvclk_freq;
84 us_p = nvclks * 1000 * 1000 / pclk_freq;
85
86 crtc_drain_rate = pclk_freq * bpp / 8;
87 crtpagemiss = 2;
88 crtpagemiss += 1;
89 cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
90 us_crt = cpm_us + us_m + us_n + us_p;
91 clwm = us_crt * crtc_drain_rate / (1000 * 1000);
92 clwm++;
93
94 m1 = clwm + cbs - 512;
95 p1 = m1 * pclk_freq / mclk_freq;
96 p1 = p1 * bpp / 8;
97 if ((p1 < m1 && m1 > 0) || clwm > 519) {
98 found = !mclk_extra;
99 mclk_extra--;
100 }
101 if (clwm < 384)
102 clwm = 384;
103
104 fifo->lwm = clwm;
105 fifo->burst = cbs;
106 }
107}
108
109static void
110nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
111{
112 int fill_rate, drain_rate;
113 int pclks, nvclks, mclks, xclks;
114 int pclk_freq, nvclk_freq, mclk_freq;
115 int fill_lat, extra_lat;
116 int max_burst_o, max_burst_l;
117 int fifo_len, min_lwm, max_lwm;
118 const int burst_lat = 80; /* Maximum allowable latency due
119 * to the CRTC FIFO burst. (ns) */
120
121 pclk_freq = arb->pclk_khz;
122 nvclk_freq = arb->nvclk_khz;
123 mclk_freq = arb->mclk_khz;
124
125 fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
126 drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
127
128 fifo_len = arb->two_heads ? 1536 : 1024; /* B */
129
130 /* Fixed FIFO refill latency. */
131
132 pclks = 4; /* lwm detect. */
133
134 nvclks = 3 /* lwm -> sync. */
135 + 2 /* fbi bus cycles (1 req + 1 busy) */
136 + 1 /* 2 edge sync. may be very close to edge so
137 * just put one. */
138 + 1 /* fbi_d_rdv_n */
139 + 1 /* Fbi_d_rdata */
140 + 1; /* crtfifo load */
141
142 mclks = 1 /* 2 edge sync. may be very close to edge so
143 * just put one. */
144 + 1 /* arb_hp_req */
145 + 5 /* tiling pipeline */
146 + 2 /* latency fifo */
147 + 2 /* memory request to fbio block */
148 + 7; /* data returned from fbio block */
149
150 /* Need to accumulate 256 bits for read */
151 mclks += (arb->memory_type == 0 ? 2 : 1)
152 * arb->memory_width / 32;
153
154 fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
155 + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
156 + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
157
158 /* Conditional FIFO refill latency. */
159
160 xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
161 * the overlay. */
162 + 2 * arb->mem_page_miss /* Extra pagemiss latency. */
163 + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
164
165 extra_lat = xclks * 1000 * 1000 / mclk_freq;
166
167 if (arb->two_heads)
168 /* Account for another CRTC. */
169 extra_lat += fill_lat + extra_lat + burst_lat;
170
171 /* FIFO burst */
172
173 /* Max burst not leading to overflows. */
174 max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
175 * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
176 fifo->burst = min(max_burst_o, 1024);
177
178 /* Max burst value with an acceptable latency. */
179 max_burst_l = burst_lat * fill_rate / (1000 * 1000);
180 fifo->burst = min(max_burst_l, fifo->burst);
181
182 fifo->burst = rounddown_pow_of_two(fifo->burst);
183
184 /* FIFO low watermark */
185
186 min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
187 max_lwm = fifo_len - fifo->burst
188 + fill_lat * drain_rate / (1000 * 1000)
189 + fifo->burst * drain_rate / fill_rate;
190
191 fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
192}
193
194static void
195nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
196 int *burst, int *lwm)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, MPLL);
202 int NVClk = nouveau_hw_get_clock(dev, NVPLL);
203 uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
204
205 sim_data.pclk_khz = VClk;
206 sim_data.mclk_khz = MClk;
207 sim_data.nvclk_khz = NVClk;
208 sim_data.bpp = bpp;
209 sim_data.two_heads = nv_two_heads(dev);
210 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
211 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
212 uint32_t type;
213
214 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
215
216 sim_data.memory_type = (type >> 12) & 1;
217 sim_data.memory_width = 64;
218 sim_data.mem_latency = 3;
219 sim_data.mem_page_miss = 10;
220 } else {
221 sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
222 sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
223 sim_data.mem_latency = cfg1 & 0xf;
224 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
225 }
226
227 if (dev_priv->card_type == NV_04)
228 nv04_calc_arb(&fifo_data, &sim_data);
229 else
230 nv10_calc_arb(&fifo_data, &sim_data);
231
232 *burst = ilog2(fifo_data.burst >> 4);
233 *lwm = fifo_data.lwm >> 3;
234}
235
236static void
237nv30_update_arb(int *burst, int *lwm)
238{
239 unsigned int fifo_size, burst_size, graphics_lwm;
240
241 fifo_size = 2048;
242 burst_size = 512;
243 graphics_lwm = fifo_size - burst_size;
244
245 *burst = ilog2(burst_size >> 5);
246 *lwm = graphics_lwm >> 3;
247}
248
249void
250nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253
254 if (dev_priv->card_type < NV_30)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
258 *burst = 128;
259 *lwm = 0x0480;
260 } else
261 nv30_update_arb(burst, lwm);
262}
263
264static int
265getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
266 struct nouveau_pll_vals *bestpv)
267{
268 /* Find M, N and P for a single stage PLL
269 *
270 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
271 * values, but we're too lazy to use those atm
272 *
273 * "clk" parameter in kHz
274 * returns calculated clock
275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios->chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
281 int minU = pll_lim->vco1.min_inputfreq;
282 int maxU = pll_lim->vco1.max_inputfreq;
283 int minP = pll_lim->max_p ? pll_lim->min_p : 0;
284 int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
285 int crystal = pll_lim->refclk;
286 int M, N, thisP, P;
287 int clkP, calcclk;
288 int delta, bestdelta = INT_MAX;
289 int bestclk = 0;
290
291 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
292 /* possibly correlated with introduction of 27MHz crystal */
293 if (dev_priv->card_type < NV_50) {
294 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
295 if (clk > 250000)
296 maxM = 6;
297 if (clk > 340000)
298 maxM = 2;
299 } else if (cv < 0x40) {
300 if (clk > 150000)
301 maxM = 6;
302 if (clk > 200000)
303 maxM = 4;
304 if (clk > 340000)
305 maxM = 2;
306 }
307 }
308
309 P = pll_lim->max_p ? maxP : (1 << maxP);
310 if ((clk * P) < minvco) {
311 minvco = clk * maxP;
312 maxvco = minvco * 2;
313 }
314
315 if (clk + clk/200 > maxvco) /* +0.5% */
316 maxvco = clk + clk/200;
317
318 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
319 for (thisP = minP; thisP <= maxP; thisP++) {
320 P = pll_lim->max_p ? thisP : (1 << thisP);
321 clkP = clk * P;
322
323 if (clkP < minvco)
324 continue;
325 if (clkP > maxvco)
326 return bestclk;
327
328 for (M = minM; M <= maxM; M++) {
329 if (crystal/M < minU)
330 return bestclk;
331 if (crystal/M > maxU)
332 continue;
333
334 /* add crystal/2 to round better */
335 N = (clkP * M + crystal/2) / crystal;
336
337 if (N < minN)
338 continue;
339 if (N > maxN)
340 break;
341
342 /* more rounding additions */
343 calcclk = ((N * crystal + P/2) / P + M/2) / M;
344 delta = abs(calcclk - clk);
345 /* we do an exhaustive search rather than terminating
346 * on an optimality condition...
347 */
348 if (delta < bestdelta) {
349 bestdelta = delta;
350 bestclk = calcclk;
351 bestpv->N1 = N;
352 bestpv->M1 = M;
353 bestpv->log2P = thisP;
354 if (delta == 0) /* except this one */
355 return bestclk;
356 }
357 }
358 }
359
360 return bestclk;
361}
362
363static int
364getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
365 struct nouveau_pll_vals *bestpv)
366{
367 /* Find M, N and P for a two stage PLL
368 *
369 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
370 * values, but we're too lazy to use those atm
371 *
372 * "clk" parameter in kHz
373 * returns calculated clock
374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios->chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
380 int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
381 int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
382 int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
383 int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
384 int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
385 int maxlog2P = pll_lim->max_usable_log2p;
386 int crystal = pll_lim->refclk;
387 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
388 int M1, N1, M2, N2, log2P;
389 int clkP, calcclk1, calcclk2, calcclkout;
390 int delta, bestdelta = INT_MAX;
391 int bestclk = 0;
392
393 int vco2 = (maxvco2 - maxvco2/200) / 2;
394 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
395 ;
396 clkP = clk << log2P;
397
398 if (maxvco2 < clk + clk/200) /* +0.5% */
399 maxvco2 = clk + clk/200;
400
401 for (M1 = minM1; M1 <= maxM1; M1++) {
402 if (crystal/M1 < minU1)
403 return bestclk;
404 if (crystal/M1 > maxU1)
405 continue;
406
407 for (N1 = minN1; N1 <= maxN1; N1++) {
408 calcclk1 = crystal * N1 / M1;
409 if (calcclk1 < minvco1)
410 continue;
411 if (calcclk1 > maxvco1)
412 break;
413
414 for (M2 = minM2; M2 <= maxM2; M2++) {
415 if (calcclk1/M2 < minU2)
416 break;
417 if (calcclk1/M2 > maxU2)
418 continue;
419
420 /* add calcclk1/2 to round better */
421 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
422 if (N2 < minN2)
423 continue;
424 if (N2 > maxN2)
425 break;
426
427 if (!fixedgain2) {
428 if (chip_version < 0x60)
429 if (N2/M2 < 4 || N2/M2 > 10)
430 continue;
431
432 calcclk2 = calcclk1 * N2 / M2;
433 if (calcclk2 < minvco2)
434 break;
435 if (calcclk2 > maxvco2)
436 continue;
437 } else
438 calcclk2 = calcclk1;
439
440 calcclkout = calcclk2 >> log2P;
441 delta = abs(calcclkout - clk);
442 /* we do an exhaustive search rather than terminating
443 * on an optimality condition...
444 */
445 if (delta < bestdelta) {
446 bestdelta = delta;
447 bestclk = calcclkout;
448 bestpv->N1 = N1;
449 bestpv->M1 = M1;
450 bestpv->N2 = N2;
451 bestpv->M2 = M2;
452 bestpv->log2P = log2P;
453 if (delta == 0) /* except this one */
454 return bestclk;
455 }
456 }
457 }
458 }
459
460 return bestclk;
461}
462
463int
464nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
465 struct nouveau_pll_vals *pv)
466{
467 int outclk;
468
469 if (!pll_lim->vco2.maxfreq)
470 outclk = getMNP_single(dev, pll_lim, clk, pv);
471 else
472 outclk = getMNP_double(dev, pll_lim, clk, pv);
473
474 if (!outclk)
475 NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
476
477 return outclk;
478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 000000000000..9aaa972f8822
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30
31static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33{
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret;
40
41 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL);
46 chan->pushbuf_base = start;
47 } else
48 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start;
54 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in
57 * VRAM.
58 */
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
60 drm_get_resource_start(dev, 1),
61 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start;
65 }
66
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
68 if (ret) {
69 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
70 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
71 nouveau_gpuobj_del(dev, &pushbuf);
72 return ret;
73 }
74
75 return 0;
76}
77
78static struct nouveau_bo *
79nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
80{
81 struct nouveau_bo *pushbuf = NULL;
82 int location, ret;
83
84 if (nouveau_vram_pushbuf)
85 location = TTM_PL_FLAG_VRAM;
86 else
87 location = TTM_PL_FLAG_TT;
88
89 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
90 true, &pushbuf);
91 if (ret) {
92 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
93 return NULL;
94 }
95
96 ret = nouveau_bo_pin(pushbuf, location);
97 if (ret) {
98 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
99 nouveau_bo_ref(NULL, &pushbuf);
100 return NULL;
101 }
102
103 return pushbuf;
104}
105
106/* allocates and initializes a fifo for user space consumption */
107int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t tt_handle)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan;
116 int channel, user;
117 int ret;
118
119 /*
120 * Alright, here is the full story
121 * Nvidia cards have multiple hw fifo contexts (praise them for that,
122 * no complicated crash-prone context switches)
123 * We allocate a new context for each app and let it write to it
124 * directly (woo, full userspace command submission !)
125 * When there are no more contexts, you lost
126 */
127 for (channel = 0; channel < pfifo->channels; channel++) {
128 if (dev_priv->fifos[channel] == NULL)
129 break;
130 }
131
132 /* no more fifos. you lost. */
133 if (channel == pfifo->channels)
134 return -EINVAL;
135
136 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
137 GFP_KERNEL);
138 if (!dev_priv->fifos[channel])
139 return -ENOMEM;
140 dev_priv->fifo_alloc_count++;
141 chan = dev_priv->fifos[channel];
142 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
143 INIT_LIST_HEAD(&chan->fence.pending);
144 chan->dev = dev;
145 chan->id = channel;
146 chan->file_priv = file_priv;
147 chan->vram_handle = vram_handle;
148 chan->gart_handle = tt_handle;
149
150 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
151
152 /* Allocate DMA push buffer */
153 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
154 if (!chan->pushbuf_bo) {
155 ret = -ENOMEM;
156 NV_ERROR(dev, "pushbuf %d\n", ret);
157 nouveau_channel_free(chan);
158 return ret;
159 }
160
161 /* Locate channel's user control regs */
162 if (dev_priv->card_type < NV_40)
163 user = NV03_USER(channel);
164 else
165 if (dev_priv->card_type < NV_50)
166 user = NV40_USER(channel);
167 else
168 user = NV50_USER(channel);
169
170 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
171 PAGE_SIZE);
172 if (!chan->user) {
173 NV_ERROR(dev, "ioremap of regs failed.\n");
174 nouveau_channel_free(chan);
175 return -ENOMEM;
176 }
177 chan->user_put = 0x40;
178 chan->user_get = 0x44;
179
180 /* Allocate space for per-channel fixed notifier memory */
181 ret = nouveau_notifier_init_channel(chan);
182 if (ret) {
183 NV_ERROR(dev, "ntfy %d\n", ret);
184 nouveau_channel_free(chan);
185 return ret;
186 }
187
188 /* Setup channel's default objects */
189 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
190 if (ret) {
191 NV_ERROR(dev, "gpuobj %d\n", ret);
192 nouveau_channel_free(chan);
193 return ret;
194 }
195
196 /* Create a dma object for the push buffer */
197 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
198 if (ret) {
199 NV_ERROR(dev, "pbctxdma %d\n", ret);
200 nouveau_channel_free(chan);
201 return ret;
202 }
203
204 /* disable the fifo caches */
205 pfifo->reassign(dev, false);
206
207 /* Create a graphics context for new channel */
208 ret = pgraph->create_context(chan);
209 if (ret) {
210 nouveau_channel_free(chan);
211 return ret;
212 }
213
214 /* Construct inital RAMFC for new channel */
215 ret = pfifo->create_context(chan);
216 if (ret) {
217 nouveau_channel_free(chan);
218 return ret;
219 }
220
221 pfifo->reassign(dev, true);
222
223 ret = nouveau_dma_init(chan);
224 if (!ret)
225 ret = nouveau_fence_init(chan);
226 if (ret) {
227 nouveau_channel_free(chan);
228 return ret;
229 }
230
231 nouveau_debugfs_channel_init(chan);
232
233 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
234 *chan_ret = chan;
235 return 0;
236}
237
238int
239nouveau_channel_idle(struct nouveau_channel *chan)
240{
241 struct drm_device *dev = chan->dev;
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_engine *engine = &dev_priv->engine;
244 uint32_t caches;
245 int idle;
246
247 if (!chan) {
248 NV_ERROR(dev, "no channel...\n");
249 return 1;
250 }
251
252 caches = nv_rd32(dev, NV03_PFIFO_CACHES);
253 nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
254
255 if (engine->fifo.channel_id(dev) != chan->id) {
256 struct nouveau_gpuobj *ramfc =
257 chan->ramfc ? chan->ramfc->gpuobj : NULL;
258
259 if (!ramfc) {
260 NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
261 return 1;
262 }
263
264 engine->instmem.prepare_access(dev, false);
265 if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
266 idle = 0;
267 else
268 idle = 1;
269 engine->instmem.finish_access(dev);
270 } else {
271 idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
272 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
273 }
274
275 nv_wr32(dev, NV03_PFIFO_CACHES, caches);
276 return idle;
277}
278
279/* stops a fifo */
280void
281nouveau_channel_free(struct nouveau_channel *chan)
282{
283 struct drm_device *dev = chan->dev;
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
286 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
287 unsigned long flags;
288 int ret;
289
290 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
291
292 nouveau_debugfs_channel_fini(chan);
293
294 /* Give outstanding push buffers a chance to complete */
295 spin_lock_irqsave(&chan->fence.lock, flags);
296 nouveau_fence_update(chan);
297 spin_unlock_irqrestore(&chan->fence.lock, flags);
298 if (chan->fence.sequence != chan->fence.sequence_ack) {
299 struct nouveau_fence *fence = NULL;
300
301 ret = nouveau_fence_new(chan, &fence, true);
302 if (ret == 0) {
303 ret = nouveau_fence_wait(fence, NULL, false, false);
304 nouveau_fence_unref((void *)&fence);
305 }
306
307 if (ret)
308 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
309 }
310
311 /* Ensure all outstanding fences are signaled. They should be if the
312 * above attempts at idling were OK, but if we failed this'll tell TTM
313 * we're done with the buffers.
314 */
315 nouveau_fence_fini(chan);
316
317 /* Ensure the channel is no longer active on the GPU */
318 pfifo->reassign(dev, false);
319
320 if (pgraph->channel(dev) == chan) {
321 pgraph->fifo_access(dev, false);
322 pgraph->unload_context(dev);
323 pgraph->fifo_access(dev, true);
324 }
325 pgraph->destroy_context(chan);
326
327 if (pfifo->channel_id(dev) == chan->id) {
328 pfifo->disable(dev);
329 pfifo->unload_context(dev);
330 pfifo->enable(dev);
331 }
332 pfifo->destroy_context(chan);
333
334 pfifo->reassign(dev, true);
335
336 /* Release the channel's resources */
337 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
338 if (chan->pushbuf_bo) {
339 nouveau_bo_unpin(chan->pushbuf_bo);
340 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
341 }
342 nouveau_gpuobj_channel_takedown(chan);
343 nouveau_notifier_takedown_channel(chan);
344 if (chan->user)
345 iounmap(chan->user);
346
347 dev_priv->fifos[chan->id] = NULL;
348 dev_priv->fifo_alloc_count--;
349 kfree(chan);
350}
351
352/* cleans up all the fifos from file_priv */
353void
354nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_engine *engine = &dev_priv->engine;
358 int i;
359
360 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
361 for (i = 0; i < engine->fifo.channels; i++) {
362 struct nouveau_channel *chan = dev_priv->fifos[i];
363
364 if (chan && chan->file_priv == file_priv)
365 nouveau_channel_free(chan);
366 }
367}
368
369int
370nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
371 int channel)
372{
373 struct drm_nouveau_private *dev_priv = dev->dev_private;
374 struct nouveau_engine *engine = &dev_priv->engine;
375
376 if (channel >= engine->fifo.channels)
377 return 0;
378 if (dev_priv->fifos[channel] == NULL)
379 return 0;
380
381 return (dev_priv->fifos[channel]->file_priv == file_priv);
382}
383
384/***********************************
385 * ioctls wrapping the functions
386 ***********************************/
387
388static int
389nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
391{
392 struct drm_nouveau_private *dev_priv = dev->dev_private;
393 struct drm_nouveau_channel_alloc *init = data;
394 struct nouveau_channel *chan;
395 int ret;
396
397 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
398
399 if (dev_priv->engine.graph.accel_blocked)
400 return -ENODEV;
401
402 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
403 return -EINVAL;
404
405 ret = nouveau_channel_alloc(dev, &chan, file_priv,
406 init->fb_ctxdma_handle,
407 init->tt_ctxdma_handle);
408 if (ret)
409 return ret;
410 init->channel = chan->id;
411
412 init->subchan[0].handle = NvM2MF;
413 if (dev_priv->card_type < NV_50)
414 init->subchan[0].grclass = 0x0039;
415 else
416 init->subchan[0].grclass = 0x5039;
417 init->nr_subchan = 1;
418
419 /* Named memory object area */
420 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
421 &init->notifier_handle);
422 if (ret) {
423 nouveau_channel_free(chan);
424 return ret;
425 }
426
427 return 0;
428}
429
430static int
431nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
432 struct drm_file *file_priv)
433{
434 struct drm_nouveau_channel_free *cfree = data;
435 struct nouveau_channel *chan;
436
437 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
438 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
439
440 nouveau_channel_free(chan);
441 return 0;
442}
443
444/***********************************
445 * finally, the ioctl table
446 ***********************************/
447
448struct drm_ioctl_desc nouveau_ioctls[] = {
449 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
450 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
451 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
452 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
453 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
454 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
455 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
456 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
457 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
458 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
459 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
460 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
461 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
462 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
463 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
464 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
465 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
466};
467
468int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 000000000000..032cf098fa1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,824 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_edid.h"
29#include "drm_crtc_helper.h"
30#include "nouveau_reg.h"
31#include "nouveau_drv.h"
32#include "nouveau_encoder.h"
33#include "nouveau_crtc.h"
34#include "nouveau_connector.h"
35#include "nouveau_hw.h"
36
37static inline struct drm_encoder_slave_funcs *
38get_slave_funcs(struct nouveau_encoder *enc)
39{
40 return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
41}
42
43static struct nouveau_encoder *
44find_encoder_by_type(struct drm_connector *connector, int type)
45{
46 struct drm_device *dev = connector->dev;
47 struct nouveau_encoder *nv_encoder;
48 struct drm_mode_object *obj;
49 int i, id;
50
51 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
52 id = connector->encoder_ids[i];
53 if (!id)
54 break;
55
56 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
57 if (!obj)
58 continue;
59 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
60
61 if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
62 return nv_encoder;
63 }
64
65 return NULL;
66}
67
68struct nouveau_connector *
69nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
70{
71 struct drm_device *dev = to_drm_encoder(encoder)->dev;
72 struct drm_connector *drm_connector;
73
74 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
75 if (drm_connector->encoder == to_drm_encoder(encoder))
76 return nouveau_connector(drm_connector);
77 }
78
79 return NULL;
80}
81
82
83static void
84nouveau_connector_destroy(struct drm_connector *drm_connector)
85{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector);
87 struct drm_device *dev = connector->base.dev;
88
89 NV_DEBUG(dev, "\n");
90
91 if (!connector)
92 return;
93
94 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector);
97}
98
99static void
100nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
101{
102 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
103
104 if (dev_priv->card_type >= NV_50)
105 return;
106
107 *flags = 0;
108 if (NVLockVgaCrtcs(dev_priv->dev, false))
109 *flags |= 1;
110 if (nv_heads_tied(dev_priv->dev))
111 *flags |= 2;
112
113 if (*flags & 2)
114 NVSetOwner(dev_priv->dev, 0); /* necessary? */
115}
116
117static void
118nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
119{
120 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
121
122 if (dev_priv->card_type >= NV_50)
123 return;
124
125 if (flags & 2)
126 NVSetOwner(dev_priv->dev, 4);
127 if (flags & 1)
128 NVLockVgaCrtcs(dev_priv->dev, true);
129}
130
131static struct nouveau_i2c_chan *
132nouveau_connector_ddc_detect(struct drm_connector *connector,
133 struct nouveau_encoder **pnv_encoder)
134{
135 struct drm_device *dev = connector->dev;
136 uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
137 int ret, flags, i;
138
139 struct i2c_msg msgs[] = {
140 {
141 .addr = 0x50,
142 .flags = 0,
143 .len = 1,
144 .buf = out_buf,
145 },
146 {
147 .addr = 0x50,
148 .flags = I2C_M_RD,
149 .len = 1,
150 .buf = buf,
151 }
152 };
153
154 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
155 struct nouveau_i2c_chan *i2c = NULL;
156 struct nouveau_encoder *nv_encoder;
157 struct drm_mode_object *obj;
158 int id;
159
160 id = connector->encoder_ids[i];
161 if (!id)
162 break;
163
164 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
165 if (!obj)
166 continue;
167 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
168
169 if (nv_encoder->dcb->i2c_index < 0xf)
170 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
171 if (!i2c)
172 continue;
173
174 nouveau_connector_ddc_prepare(connector, &flags);
175 ret = i2c_transfer(&i2c->adapter, msgs, 2);
176 nouveau_connector_ddc_finish(connector, flags);
177
178 if (ret == 2) {
179 *pnv_encoder = nv_encoder;
180 return i2c;
181 }
182 }
183
184 return NULL;
185}
186
187static void
188nouveau_connector_set_encoder(struct drm_connector *connector,
189 struct nouveau_encoder *nv_encoder)
190{
191 struct nouveau_connector *nv_connector = nouveau_connector(connector);
192 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
193 struct drm_device *dev = connector->dev;
194
195 if (nv_connector->detected_encoder == nv_encoder)
196 return;
197 nv_connector->detected_encoder = nv_encoder;
198
199 if (nv_encoder->dcb->type == OUTPUT_LVDS ||
200 nv_encoder->dcb->type == OUTPUT_TMDS) {
201 connector->doublescan_allowed = false;
202 connector->interlace_allowed = false;
203 } else {
204 connector->doublescan_allowed = true;
205 if (dev_priv->card_type == NV_20 ||
206 (dev_priv->card_type == NV_10 &&
207 (dev->pci_device & 0x0ff0) != 0x0100 &&
208 (dev->pci_device & 0x0ff0) != 0x0150))
209 /* HW is broken */
210 connector->interlace_allowed = false;
211 else
212 connector->interlace_allowed = true;
213 }
214
215 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
216 drm_connector_property_set_value(connector,
217 dev->mode_config.dvi_i_subconnector_property,
218 nv_encoder->dcb->type == OUTPUT_TMDS ?
219 DRM_MODE_SUBCONNECTOR_DVID :
220 DRM_MODE_SUBCONNECTOR_DVIA);
221 }
222}
223
224static enum drm_connector_status
225nouveau_connector_detect(struct drm_connector *connector)
226{
227 struct drm_device *dev = connector->dev;
228 struct nouveau_connector *nv_connector = nouveau_connector(connector);
229 struct nouveau_encoder *nv_encoder = NULL;
230 struct nouveau_i2c_chan *i2c;
231 int type, flags;
232
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) {
236 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected;
238 }
239
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags);
243 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
244 nouveau_connector_ddc_finish(connector, flags);
245 drm_mode_connector_update_edid_property(connector,
246 nv_connector->edid);
247 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector));
250 return connector_status_disconnected;
251 }
252
253 if (nv_encoder->dcb->type == OUTPUT_DP &&
254 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
255 NV_ERROR(dev, "Detected %s, but failed init\n",
256 drm_get_connector_name(connector));
257 return connector_status_disconnected;
258 }
259
260 /* Override encoder type for DVI-I based on whether EDID
261 * says the display is digital or analog, both use the
262 * same i2c channel so the value returned from ddc_detect
263 * isn't necessarily correct.
264 */
265 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
266 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
267 type = OUTPUT_TMDS;
268 else
269 type = OUTPUT_ANALOG;
270
271 nv_encoder = find_encoder_by_type(connector, type);
272 if (!nv_encoder) {
273 NV_ERROR(dev, "Detected %d encoder on %s, "
274 "but no object!\n", type,
275 drm_get_connector_name(connector));
276 return connector_status_disconnected;
277 }
278 }
279
280 nouveau_connector_set_encoder(connector, nv_encoder);
281 return connector_status_connected;
282 }
283
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
287 if (nv_encoder) {
288 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
289 struct drm_encoder_helper_funcs *helper =
290 encoder->helper_private;
291
292 if (helper->detect(encoder, connector) ==
293 connector_status_connected) {
294 nouveau_connector_set_encoder(connector, nv_encoder);
295 return connector_status_connected;
296 }
297
298 }
299
300 return connector_status_disconnected;
301}
302
303static void
304nouveau_connector_force(struct drm_connector *connector)
305{
306 struct drm_device *dev = connector->dev;
307 struct nouveau_encoder *nv_encoder;
308 int type;
309
310 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
311 if (connector->force == DRM_FORCE_ON_DIGITAL)
312 type = OUTPUT_TMDS;
313 else
314 type = OUTPUT_ANALOG;
315 } else
316 type = OUTPUT_ANY;
317
318 nv_encoder = find_encoder_by_type(connector, type);
319 if (!nv_encoder) {
320 NV_ERROR(dev, "can't find encoder to force %s on!\n",
321 drm_get_connector_name(connector));
322 connector->status = connector_status_disconnected;
323 return;
324 }
325
326 nouveau_connector_set_encoder(connector, nv_encoder);
327}
328
329static int
330nouveau_connector_set_property(struct drm_connector *connector,
331 struct drm_property *property, uint64_t value)
332{
333 struct nouveau_connector *nv_connector = nouveau_connector(connector);
334 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
335 struct drm_device *dev = connector->dev;
336 int ret;
337
338 /* Scaling mode */
339 if (property == dev->mode_config.scaling_mode_property) {
340 struct nouveau_crtc *nv_crtc = NULL;
341 bool modeset = false;
342
343 switch (value) {
344 case DRM_MODE_SCALE_NONE:
345 case DRM_MODE_SCALE_FULLSCREEN:
346 case DRM_MODE_SCALE_CENTER:
347 case DRM_MODE_SCALE_ASPECT:
348 break;
349 default:
350 return -EINVAL;
351 }
352
353 /* LVDS always needs gpu scaling */
354 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
355 value == DRM_MODE_SCALE_NONE)
356 return -EINVAL;
357
358 /* Changing between GPU and panel scaling requires a full
359 * modeset
360 */
361 if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
362 (value == DRM_MODE_SCALE_NONE))
363 modeset = true;
364 nv_connector->scaling_mode = value;
365
366 if (connector->encoder && connector->encoder->crtc)
367 nv_crtc = nouveau_crtc(connector->encoder->crtc);
368 if (!nv_crtc)
369 return 0;
370
371 if (modeset || !nv_crtc->set_scale) {
372 ret = drm_crtc_helper_set_mode(&nv_crtc->base,
373 &nv_crtc->base.mode,
374 nv_crtc->base.x,
375 nv_crtc->base.y, NULL);
376 if (!ret)
377 return -EINVAL;
378 } else {
379 ret = nv_crtc->set_scale(nv_crtc, value, true);
380 if (ret)
381 return ret;
382 }
383
384 return 0;
385 }
386
387 /* Dithering */
388 if (property == dev->mode_config.dithering_mode_property) {
389 struct nouveau_crtc *nv_crtc = NULL;
390
391 if (value == DRM_MODE_DITHERING_ON)
392 nv_connector->use_dithering = true;
393 else
394 nv_connector->use_dithering = false;
395
396 if (connector->encoder && connector->encoder->crtc)
397 nv_crtc = nouveau_crtc(connector->encoder->crtc);
398
399 if (!nv_crtc || !nv_crtc->set_dither)
400 return 0;
401
402 return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
403 true);
404 }
405
406 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
407 return get_slave_funcs(nv_encoder)->
408 set_property(to_drm_encoder(nv_encoder), connector, property, value);
409
410 return -EINVAL;
411}
412
413static struct drm_display_mode *
414nouveau_connector_native_mode(struct nouveau_connector *connector)
415{
416 struct drm_device *dev = connector->base.dev;
417 struct drm_display_mode *mode, *largest = NULL;
418 int high_w = 0, high_h = 0, high_v = 0;
419
420 /* Use preferred mode if there is one.. */
421 list_for_each_entry(mode, &connector->base.probed_modes, head) {
422 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
423 NV_DEBUG(dev, "native mode from preferred\n");
424 return drm_mode_duplicate(dev, mode);
425 }
426 }
427
428 /* Otherwise, take the resolution with the largest width, then height,
429 * then vertical refresh
430 */
431 list_for_each_entry(mode, &connector->base.probed_modes, head) {
432 if (mode->hdisplay < high_w)
433 continue;
434
435 if (mode->hdisplay == high_w && mode->vdisplay < high_h)
436 continue;
437
438 if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
439 mode->vrefresh < high_v)
440 continue;
441
442 high_w = mode->hdisplay;
443 high_h = mode->vdisplay;
444 high_v = mode->vrefresh;
445 largest = mode;
446 }
447
448 NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
449 high_w, high_h, high_v);
450 return largest ? drm_mode_duplicate(dev, largest) : NULL;
451}
452
453struct moderec {
454 int hdisplay;
455 int vdisplay;
456};
457
458static struct moderec scaler_modes[] = {
459 { 1920, 1200 },
460 { 1920, 1080 },
461 { 1680, 1050 },
462 { 1600, 1200 },
463 { 1400, 1050 },
464 { 1280, 1024 },
465 { 1280, 960 },
466 { 1152, 864 },
467 { 1024, 768 },
468 { 800, 600 },
469 { 720, 400 },
470 { 640, 480 },
471 { 640, 400 },
472 { 640, 350 },
473 {}
474};
475
476static int
477nouveau_connector_scaler_modes_add(struct drm_connector *connector)
478{
479 struct nouveau_connector *nv_connector = nouveau_connector(connector);
480 struct drm_display_mode *native = nv_connector->native_mode, *m;
481 struct drm_device *dev = connector->dev;
482 struct moderec *mode = &scaler_modes[0];
483 int modes = 0;
484
485 if (!native)
486 return 0;
487
488 while (mode->hdisplay) {
489 if (mode->hdisplay <= native->hdisplay &&
490 mode->vdisplay <= native->vdisplay) {
491 m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
492 drm_mode_vrefresh(native), false,
493 false, false);
494 if (!m)
495 continue;
496
497 m->type |= DRM_MODE_TYPE_DRIVER;
498
499 drm_mode_probed_add(connector, m);
500 modes++;
501 }
502
503 mode++;
504 }
505
506 return modes;
507}
508
509static int
510nouveau_connector_get_modes(struct drm_connector *connector)
511{
512 struct drm_device *dev = connector->dev;
513 struct nouveau_connector *nv_connector = nouveau_connector(connector);
514 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
515 int ret = 0;
516
517 /* If we're not LVDS, destroy the previous native mode, the attached
518 * monitor could have changed.
519 */
520 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
521 nv_connector->native_mode) {
522 drm_mode_destroy(dev, nv_connector->native_mode);
523 nv_connector->native_mode = NULL;
524 }
525
526 if (nv_connector->edid)
527 ret = drm_add_edid_modes(connector, nv_connector->edid);
528
529 /* Find the native mode if this is a digital panel, if we didn't
530 * find any modes through DDC previously add the native mode to
531 * the list of modes.
532 */
533 if (!nv_connector->native_mode)
534 nv_connector->native_mode =
535 nouveau_connector_native_mode(nv_connector);
536 if (ret == 0 && nv_connector->native_mode) {
537 struct drm_display_mode *mode;
538
539 mode = drm_mode_duplicate(dev, nv_connector->native_mode);
540 drm_mode_probed_add(connector, mode);
541 ret = 1;
542 }
543
544 if (nv_encoder->dcb->type == OUTPUT_TV)
545 ret = get_slave_funcs(nv_encoder)->
546 get_modes(to_drm_encoder(nv_encoder), connector);
547
548 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
549 ret += nouveau_connector_scaler_modes_add(connector);
550
551 return ret;
552}
553
554static int
555nouveau_connector_mode_valid(struct drm_connector *connector,
556 struct drm_display_mode *mode)
557{
558 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
559 struct nouveau_connector *nv_connector = nouveau_connector(connector);
560 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
561 unsigned min_clock = 25000, max_clock = min_clock;
562 unsigned clock = mode->clock;
563
564 switch (nv_encoder->dcb->type) {
565 case OUTPUT_LVDS:
566 BUG_ON(!nv_connector->native_mode);
567 if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
568 mode->vdisplay > nv_connector->native_mode->vdisplay)
569 return MODE_PANEL;
570
571 min_clock = 0;
572 max_clock = 400000;
573 break;
574 case OUTPUT_TMDS:
575 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
576 (dev_priv->card_type < NV_50 &&
577 !nv_encoder->dcb->duallink_possible))
578 max_clock = 165000;
579 else
580 max_clock = 330000;
581 break;
582 case OUTPUT_ANALOG:
583 max_clock = nv_encoder->dcb->crtconf.maxfreq;
584 if (!max_clock)
585 max_clock = 350000;
586 break;
587 case OUTPUT_TV:
588 return get_slave_funcs(nv_encoder)->
589 mode_valid(to_drm_encoder(nv_encoder), mode);
590 case OUTPUT_DP:
591 if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
592 max_clock = nv_encoder->dp.link_nr * 270000;
593 else
594 max_clock = nv_encoder->dp.link_nr * 162000;
595
596 clock *= 3;
597 break;
598 }
599
600 if (clock < min_clock)
601 return MODE_CLOCK_LOW;
602
603 if (clock > max_clock)
604 return MODE_CLOCK_HIGH;
605
606 return MODE_OK;
607}
608
609static struct drm_encoder *
610nouveau_connector_best_encoder(struct drm_connector *connector)
611{
612 struct nouveau_connector *nv_connector = nouveau_connector(connector);
613
614 if (nv_connector->detected_encoder)
615 return to_drm_encoder(nv_connector->detected_encoder);
616
617 return NULL;
618}
619
620static const struct drm_connector_helper_funcs
621nouveau_connector_helper_funcs = {
622 .get_modes = nouveau_connector_get_modes,
623 .mode_valid = nouveau_connector_mode_valid,
624 .best_encoder = nouveau_connector_best_encoder,
625};
626
627static const struct drm_connector_funcs
628nouveau_connector_funcs = {
629 .dpms = drm_helper_connector_dpms,
630 .save = NULL,
631 .restore = NULL,
632 .detect = nouveau_connector_detect,
633 .destroy = nouveau_connector_destroy,
634 .fill_modes = drm_helper_probe_single_connector_modes,
635 .set_property = nouveau_connector_set_property,
636 .force = nouveau_connector_force
637};
638
639static int
640nouveau_connector_create_lvds(struct drm_device *dev,
641 struct drm_connector *connector)
642{
643 struct nouveau_connector *nv_connector = nouveau_connector(connector);
644 struct drm_nouveau_private *dev_priv = dev->dev_private;
645 struct nouveau_i2c_chan *i2c = NULL;
646 struct nouveau_encoder *nv_encoder;
647 struct drm_display_mode native, *mode, *temp;
648 bool dummy, if_is_24bit = false;
649 int ret, flags;
650
651 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
652 if (!nv_encoder)
653 return -ENODEV;
654
655 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
656 if (ret) {
657 NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
658 return ret;
659 }
660 nv_connector->use_dithering = !if_is_24bit;
661
662 /* Firstly try getting EDID over DDC, if allowed and I2C channel
663 * is available.
664 */
665 if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
666 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
667
668 if (i2c) {
669 nouveau_connector_ddc_prepare(connector, &flags);
670 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
671 nouveau_connector_ddc_finish(connector, flags);
672 }
673
674 /* If no EDID found above, and the VBIOS indicates a hardcoded
675 * modeline is avalilable for the panel, set it as the panel's
676 * native mode and exit.
677 */
678 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
679 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
680 dev_priv->VBIOS.pub.fp_no_ddc)) {
681 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
682 goto out;
683 }
684
685 /* Still nothing, some VBIOS images have a hardcoded EDID block
686 * stored for the panel stored in them.
687 */
688 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev);
692 }
693
694 if (!nv_connector->edid)
695 goto out;
696
697 /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
698 * block and look for the preferred mode there.
699 */
700 ret = drm_add_edid_modes(connector, nv_connector->edid);
701 if (ret == 0)
702 goto out;
703 nv_connector->detected_encoder = nv_encoder;
704 nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
705 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
706 drm_mode_remove(connector, mode);
707
708out:
709 if (!nv_connector->native_mode) {
710 NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
711 "determine its native mode. Disabling.\n");
712 return -ENODEV;
713 }
714
715 drm_mode_connector_update_edid_property(connector, nv_connector->edid);
716 return 0;
717}
718
719int
720nouveau_connector_create(struct drm_device *dev, int index, int type)
721{
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct nouveau_connector *nv_connector = NULL;
724 struct drm_connector *connector;
725 struct drm_encoder *encoder;
726 int ret;
727
728 NV_DEBUG(dev, "\n");
729
730 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
731 if (!nv_connector)
732 return -ENOMEM;
733 nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
734 connector = &nv_connector->base;
735
736 switch (type) {
737 case DRM_MODE_CONNECTOR_VGA:
738 NV_INFO(dev, "Detected a VGA connector\n");
739 break;
740 case DRM_MODE_CONNECTOR_DVID:
741 NV_INFO(dev, "Detected a DVI-D connector\n");
742 break;
743 case DRM_MODE_CONNECTOR_DVII:
744 NV_INFO(dev, "Detected a DVI-I connector\n");
745 break;
746 case DRM_MODE_CONNECTOR_LVDS:
747 NV_INFO(dev, "Detected a LVDS connector\n");
748 break;
749 case DRM_MODE_CONNECTOR_TV:
750 NV_INFO(dev, "Detected a TV connector\n");
751 break;
752 case DRM_MODE_CONNECTOR_DisplayPort:
753 NV_INFO(dev, "Detected a DisplayPort connector\n");
754 break;
755 default:
756 NV_ERROR(dev, "Unknown connector, this is not good.\n");
757 break;
758 }
759
760 /* defaults, will get overridden in detect() */
761 connector->interlace_allowed = false;
762 connector->doublescan_allowed = false;
763
764 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
765 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
766
767 /* Init DVI-I specific properties */
768 if (type == DRM_MODE_CONNECTOR_DVII) {
769 drm_mode_create_dvi_i_properties(dev);
770 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
771 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
772 }
773
774 if (type != DRM_MODE_CONNECTOR_LVDS)
775 nv_connector->use_dithering = false;
776
777 if (type == DRM_MODE_CONNECTOR_DVID ||
778 type == DRM_MODE_CONNECTOR_DVII ||
779 type == DRM_MODE_CONNECTOR_LVDS ||
780 type == DRM_MODE_CONNECTOR_DisplayPort) {
781 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
782
783 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
784 nv_connector->scaling_mode);
785 drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
786 nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
787 : DRM_MODE_DITHERING_OFF);
788
789 } else {
790 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
791
792 if (type == DRM_MODE_CONNECTOR_VGA &&
793 dev_priv->card_type >= NV_50) {
794 drm_connector_attach_property(connector,
795 dev->mode_config.scaling_mode_property,
796 nv_connector->scaling_mode);
797 }
798 }
799
800 /* attach encoders */
801 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
802 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
803
804 if (nv_encoder->dcb->connector != index)
805 continue;
806
807 if (get_slave_funcs(nv_encoder))
808 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
809
810 drm_mode_connector_attach_encoder(connector, encoder);
811 }
812
813 drm_sysfs_connector_add(connector);
814
815 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
816 ret = nouveau_connector_create_lvds(dev, connector);
817 if (ret) {
818 connector->funcs->destroy(connector);
819 return ret;
820 }
821 }
822
823 return 0;
824}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 000000000000..728b8090e5ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CONNECTOR_H__
28#define __NOUVEAU_CONNECTOR_H__
29
30#include "drm_edid.h"
31#include "nouveau_i2c.h"
32
33struct nouveau_connector {
34 struct drm_connector base;
35
36 struct dcb_connector_table_entry *dcb;
37
38 int scaling_mode;
39 bool use_dithering;
40
41 struct nouveau_encoder *detected_encoder;
42 struct edid *edid;
43 struct drm_display_mode *native_mode;
44};
45
46static inline struct nouveau_connector *nouveau_connector(
47 struct drm_connector *con)
48{
49 return container_of(con, struct nouveau_connector, base);
50}
51
52int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
53
54#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 000000000000..49fa7b2d257e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CRTC_H__
28#define __NOUVEAU_CRTC_H__
29
30struct nouveau_crtc {
31 struct drm_crtc base;
32
33 int index;
34
35 struct drm_display_mode *mode;
36
37 uint32_t dpms_saved_fp_control;
38 uint32_t fp_users;
39 int saturation;
40 int sharpness;
41 int last_dpms;
42
43 struct {
44 int cpp;
45 bool blanked;
46 uint32_t offset;
47 uint32_t tile_flags;
48 } fb;
49
50 struct {
51 struct nouveau_bo *nvbo;
52 bool visible;
53 uint32_t offset;
54 void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
55 void (*set_pos)(struct nouveau_crtc *, int x, int y);
56 void (*hide)(struct nouveau_crtc *, bool update);
57 void (*show)(struct nouveau_crtc *, bool update);
58 } cursor;
59
60 struct {
61 struct nouveau_bo *nvbo;
62 uint16_t r[256];
63 uint16_t g[256];
64 uint16_t b[256];
65 int depth;
66 } lut;
67
68 int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
69 int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
70};
71
72static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
73{
74 return container_of(crtc, struct nouveau_crtc, base);
75}
76
77static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
78{
79 return &crtc->base;
80}
81
82int nv50_crtc_create(struct drm_device *dev, int index);
83int nv50_cursor_init(struct nouveau_crtc *);
84void nv50_cursor_fini(struct nouveau_crtc *);
85int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
86 uint32_t buffer_handle, uint32_t width,
87 uint32_t height);
88int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
89
90int nv04_cursor_init(struct nouveau_crtc *);
91
92struct nouveau_connector *
93nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
94
95#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..d79db3698f16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "nouveau_drv.h"
35
36static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{
39 struct drm_info_node *node = (struct drm_info_node *) m->private;
40 struct nouveau_channel *chan = node->info_ent->data;
41
42 seq_printf(m, "channel id : %d\n", chan->id);
43
44 seq_printf(m, "cpu fifo state:\n");
45 seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
46 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50
51 seq_printf(m, "gpu fifo state:\n");
52 seq_printf(m, " get: 0x%08x\n",
53 nvchan_rd32(chan, chan->user_get));
54 seq_printf(m, " put: 0x%08x\n",
55 nvchan_rd32(chan, chan->user_put));
56
57 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
58 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
59 return 0;
60}
61
62int
63nouveau_debugfs_channel_init(struct nouveau_channel *chan)
64{
65 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
66 struct drm_minor *minor = chan->dev->primary;
67 int ret;
68
69 if (!dev_priv->debugfs.channel_root) {
70 dev_priv->debugfs.channel_root =
71 debugfs_create_dir("channel", minor->debugfs_root);
72 if (!dev_priv->debugfs.channel_root)
73 return -ENOENT;
74 }
75
76 snprintf(chan->debugfs.name, 32, "%d", chan->id);
77 chan->debugfs.info.name = chan->debugfs.name;
78 chan->debugfs.info.show = nouveau_debugfs_channel_info;
79 chan->debugfs.info.driver_features = 0;
80 chan->debugfs.info.data = chan;
81
82 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
83 dev_priv->debugfs.channel_root,
84 chan->dev->primary);
85 if (ret == 0)
86 chan->debugfs.active = true;
87 return ret;
88}
89
90void
91nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
92{
93 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
94
95 if (!chan->debugfs.active)
96 return;
97
98 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
99 chan->debugfs.active = false;
100
101 if (chan == dev_priv->channel) {
102 debugfs_remove(dev_priv->debugfs.channel_root);
103 dev_priv->debugfs.channel_root = NULL;
104 }
105}
106
107static int
108nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
109{
110 struct drm_info_node *node = (struct drm_info_node *) m->private;
111 struct drm_minor *minor = node->minor;
112 struct drm_device *dev = minor->dev;
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 uint32_t ppci_0;
115
116 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
117
118 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
119 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
120 ppci_0 & 0xffff, ppci_0 >> 16);
121 return 0;
122}
123
124static int
125nouveau_debugfs_memory_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_minor *minor = node->minor;
129 struct drm_device *dev = minor->dev;
130
131 seq_printf(m, "VRAM total: %dKiB\n",
132 (int)(nouveau_mem_fb_amount(dev) >> 10));
133 return 0;
134}
135
136static struct drm_info_list nouveau_debugfs_list[] = {
137 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
138 { "memory", nouveau_debugfs_memory_info, 0, NULL },
139};
140#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
141
142int
143nouveau_debugfs_init(struct drm_minor *minor)
144{
145 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
146 minor->debugfs_root, minor);
147 return 0;
148}
149
150void
151nouveau_debugfs_takedown(struct drm_minor *minor)
152{
153 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
154 minor);
155}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 000000000000..dfc94391d71e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h"
32
33static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
35{
36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
37 struct drm_device *dev = drm_fb->dev;
38
39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb);
41
42 if (fb->nvbo) {
43 mutex_lock(&dev->struct_mutex);
44 drm_gem_object_unreference(fb->nvbo->gem);
45 mutex_unlock(&dev->struct_mutex);
46 }
47
48 drm_framebuffer_cleanup(drm_fb);
49 kfree(fb);
50}
51
52static int
53nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
54 struct drm_file *file_priv,
55 unsigned int *handle)
56{
57 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
58
59 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
60}
61
62static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
63 .destroy = nouveau_user_framebuffer_destroy,
64 .create_handle = nouveau_user_framebuffer_create_handle,
65};
66
67struct drm_framebuffer *
68nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
69 struct drm_mode_fb_cmd *mode_cmd)
70{
71 struct nouveau_framebuffer *fb;
72 int ret;
73
74 fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
75 if (!fb)
76 return NULL;
77
78 ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
79 if (ret) {
80 kfree(fb);
81 return NULL;
82 }
83
84 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
85
86 fb->nvbo = nvbo;
87 return &fb->base;
88}
89
90static struct drm_framebuffer *
91nouveau_user_framebuffer_create(struct drm_device *dev,
92 struct drm_file *file_priv,
93 struct drm_mode_fb_cmd *mode_cmd)
94{
95 struct drm_framebuffer *fb;
96 struct drm_gem_object *gem;
97
98 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
99 if (!gem)
100 return NULL;
101
102 fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
103 if (!fb) {
104 drm_gem_object_unreference(gem);
105 return NULL;
106 }
107
108 return fb;
109}
110
111const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
112 .fb_create = nouveau_user_framebuffer_create,
113 .fb_changed = nouveau_fbcon_probe,
114};
115
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 000000000000..703553687b20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31
32int
33nouveau_dma_init(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *m2mf = NULL;
38 int ret, i;
39
40 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
41 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
42 0x0039 : 0x5039, &m2mf);
43 if (ret)
44 return ret;
45
46 ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
47 if (ret)
48 return ret;
49
50 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
51 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
52 if (ret)
53 return ret;
54
55 /* Map push buffer */
56 ret = nouveau_bo_map(chan->pushbuf_bo);
57 if (ret)
58 return ret;
59
60 /* Map M2MF notifier object - fbcon. */
61 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
62 ret = nouveau_bo_map(chan->notifier_bo);
63 if (ret)
64 return ret;
65 }
66
67 /* Initialise DMA vars */
68 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
69 chan->dma.put = 0;
70 chan->dma.cur = chan->dma.put;
71 chan->dma.free = chan->dma.max - chan->dma.cur;
72
73 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
74 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
75 if (ret)
76 return ret;
77
78 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
79 OUT_RING(chan, 0);
80
81 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
82 ret = RING_SPACE(chan, 4);
83 if (ret)
84 return ret;
85 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
86 OUT_RING(chan, NvM2MF);
87 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
88 OUT_RING(chan, NvNotify0);
89
90 /* Sit back and pray the channel works.. */
91 FIRE_RING(chan);
92
93 return 0;
94}
95
96void
97OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
98{
99 bool is_iomem;
100 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
101 mem = &mem[chan->dma.cur];
102 if (is_iomem)
103 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
104 else
105 memcpy(mem, data, nr_dwords * 4);
106 chan->dma.cur += nr_dwords;
107}
108
109static inline bool
110READ_GET(struct nouveau_channel *chan, uint32_t *get)
111{
112 uint32_t val;
113
114 val = nvchan_rd32(chan, chan->user_get);
115 if (val < chan->pushbuf_base ||
116 val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
117 /* meaningless to dma_wait() except to know whether the
118 * GPU has stalled or not
119 */
120 *get = val;
121 return false;
122 }
123
124 *get = (val - chan->pushbuf_base) >> 2;
125 return true;
126}
127
128int
129nouveau_dma_wait(struct nouveau_channel *chan, int size)
130{
131 uint32_t get, prev_get = 0, cnt = 0;
132 bool get_valid;
133
134 while (chan->dma.free < size) {
135 /* reset counter as long as GET is still advancing, this is
136 * to avoid misdetecting a GPU lockup if the GPU happens to
137 * just be processing an operation that takes a long time
138 */
139 get_valid = READ_GET(chan, &get);
140 if (get != prev_get) {
141 prev_get = get;
142 cnt = 0;
143 }
144
145 if ((++cnt & 0xff) == 0) {
146 DRM_UDELAY(1);
147 if (cnt > 100000)
148 return -EBUSY;
149 }
150
151 /* loop until we have a usable GET pointer. the value
152 * we read from the GPU may be outside the main ring if
153 * PFIFO is processing a buffer called from the main ring,
154 * discard these values until something sensible is seen.
155 *
156 * the other case we discard GET is while the GPU is fetching
157 * from the SKIPS area, so the code below doesn't have to deal
158 * with some fun corner cases.
159 */
160 if (!get_valid || get < NOUVEAU_DMA_SKIPS)
161 continue;
162
163 if (get <= chan->dma.cur) {
164 /* engine is fetching behind us, or is completely
165 * idle (GET == PUT) so we have free space up until
166 * the end of the push buffer
167 *
168 * we can only hit that path once per call due to
169 * looping back to the beginning of the push buffer,
170 * we'll hit the fetching-ahead-of-us path from that
171 * point on.
172 *
173 * the *one* exception to that rule is if we read
174 * GET==PUT, in which case the below conditional will
175 * always succeed and break us out of the wait loop.
176 */
177 chan->dma.free = chan->dma.max - chan->dma.cur;
178 if (chan->dma.free >= size)
179 break;
180
181 /* not enough space left at the end of the push buffer,
182 * instruct the GPU to jump back to the start right
183 * after processing the currently pending commands.
184 */
185 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
186 WRITE_PUT(NOUVEAU_DMA_SKIPS);
187
188 /* we're now submitting commands at the start of
189 * the push buffer.
190 */
191 chan->dma.cur =
192 chan->dma.put = NOUVEAU_DMA_SKIPS;
193 }
194
195 /* engine fetching ahead of us, we have space up until the
196 * current GET pointer. the "- 1" is to ensure there's
197 * space left to emit a jump back to the beginning of the
198 * push buffer if we require it. we can never get GET == PUT
199 * here, so this is safe.
200 */
201 chan->dma.free = get - chan->dma.cur - 1;
202 }
203
204 return 0;
205}
206
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 000000000000..04e85d8f757e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__
29
30#ifndef NOUVEAU_DMA_DEBUG
31#define NOUVEAU_DMA_DEBUG 0
32#endif
33
34/*
35 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with
37 * NOPs.
38 *
39 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
40 * a SKIPS value of 8. Lets assume that the race condition is to do
41 * with writing into the fetch area, we configure a fetch size of 128
42 * bytes so we need a larger SKIPS value.
43 */
44#define NOUVEAU_DMA_SKIPS (128 / 4)
45
46/* Hardcoded object assignments to subchannels (subchannel id). */
47enum {
48 NvSubM2MF = 0,
49 NvSub2D = 1,
50 NvSubCtxSurf2D = 1,
51 NvSubGdiRect = 2,
52 NvSubImageBlit = 3
53};
54
55/* Object handles. */
56enum {
57 NvM2MF = 0x80000001,
58 NvDmaFB = 0x80000002,
59 NvDmaTT = 0x80000003,
60 NvDmaVRAM = 0x80000004,
61 NvDmaGART = 0x80000005,
62 NvNotify0 = 0x80000006,
63 Nv2D = 0x80000007,
64 NvCtxSurf2D = 0x80000008,
65 NvRop = 0x80000009,
66 NvImagePatt = 0x8000000a,
67 NvClipRect = 0x8000000b,
68 NvGdiRect = 0x8000000c,
69 NvImageBlit = 0x8000000d,
70
71 /* G80+ display objects */
72 NvEvoVRAM = 0x01000000,
73 NvEvoFB16 = 0x01000001,
74 NvEvoFB32 = 0x01000002
75};
76
77#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
78#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
79#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
80#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
81#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
82#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
83#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
84#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
85#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
86#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
87
88#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
89#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
90#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
91#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
92#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
93
94static __must_check inline int
95RING_SPACE(struct nouveau_channel *chan, int size)
96{
97 if (chan->dma.free < size) {
98 int ret;
99
100 ret = nouveau_dma_wait(chan, size);
101 if (ret)
102 return ret;
103 }
104
105 chan->dma.free -= size;
106 return 0;
107}
108
109static inline void
110OUT_RING(struct nouveau_channel *chan, int data)
111{
112 if (NOUVEAU_DMA_DEBUG) {
113 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
114 chan->id, chan->dma.cur << 2, data);
115 }
116
117 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
118}
119
120extern void
121OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
122
123static inline void
124BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
125{
126 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
127}
128
129#define WRITE_PUT(val) do { \
130 DRM_MEMORYBARRIER(); \
131 nouveau_bo_rd32(chan->pushbuf_bo, 0); \
132 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
133} while (0)
134
135static inline void
136FIRE_RING(struct nouveau_channel *chan)
137{
138 if (NOUVEAU_DMA_DEBUG) {
139 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
140 chan->id, chan->dma.cur << 2);
141 }
142
143 if (chan->dma.cur == chan->dma.put)
144 return;
145 chan->accel_done = true;
146
147 WRITE_PUT(chan->dma.cur);
148 chan->dma.put = chan->dma.cur;
149}
150
151static inline void
152WIND_RING(struct nouveau_channel *chan)
153{
154 chan->dma.cur = chan->dma.put;
155}
156
157#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 000000000000..de61f4640e12
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,569 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_encoder.h"
29
30static int
31auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
32{
33 struct drm_device *dev = encoder->dev;
34 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
35 struct nouveau_i2c_chan *auxch;
36 int ret;
37
38 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
39 if (!auxch)
40 return -ENODEV;
41
42 ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
43 if (ret)
44 return ret;
45
46 return 0;
47}
48
49static int
50auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
51{
52 struct drm_device *dev = encoder->dev;
53 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
54 struct nouveau_i2c_chan *auxch;
55 int ret;
56
57 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
58 if (!auxch)
59 return -ENODEV;
60
61 ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
62 return ret;
63}
64
65static int
66nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
67{
68 struct drm_device *dev = encoder->dev;
69 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
70 uint32_t tmp;
71 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
72
73 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
74 tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
75 NV50_SOR_DP_CTRL_LANE_MASK);
76 tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
77 if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
78 tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
79 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
80
81 return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
82}
83
84static int
85nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
86{
87 struct drm_device *dev = encoder->dev;
88 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
89 uint32_t tmp;
90 int reg = 0x614300 + (nv_encoder->or * 0x800);
91
92 tmp = nv_rd32(dev, reg);
93 tmp &= 0xfff3ffff;
94 if (cmd == DP_LINK_BW_2_7)
95 tmp |= 0x00040000;
96 nv_wr32(dev, reg, tmp);
97
98 return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
99}
100
101static int
102nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
103{
104 struct drm_device *dev = encoder->dev;
105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
106 uint32_t tmp;
107 uint8_t cmd;
108 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
109 int ret;
110
111 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
112 tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
113 tmp |= (pattern << 24);
114 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
115
116 ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
117 if (ret)
118 return ret;
119 cmd &= ~DP_TRAINING_PATTERN_MASK;
120 cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
121 return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
122}
123
124static int
125nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
126{
127 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
128 struct drm_device *dev = encoder->dev;
129 struct bit_displayport_encoder_table_entry *dpse;
130 struct bit_displayport_encoder_table *dpe;
131 int i, dpe_headerlen, max_vs = 0;
132
133 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
134 if (!dpe)
135 return false;
136 dpse = (void *)((char *)dpe + dpe_headerlen);
137
138 for (i = 0; i < dpe_headerlen; i++, dpse++) {
139 if (dpse->vs_level > max_vs)
140 max_vs = dpse->vs_level;
141 }
142
143 return max_vs;
144}
145
146static int
147nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
148{
149 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
150 struct drm_device *dev = encoder->dev;
151 struct bit_displayport_encoder_table_entry *dpse;
152 struct bit_displayport_encoder_table *dpe;
153 int i, dpe_headerlen, max_pre = 0;
154
155 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
156 if (!dpe)
157 return false;
158 dpse = (void *)((char *)dpe + dpe_headerlen);
159
160 for (i = 0; i < dpe_headerlen; i++, dpse++) {
161 if (dpse->vs_level != vs)
162 continue;
163
164 if (dpse->pre_level > max_pre)
165 max_pre = dpse->pre_level;
166 }
167
168 return max_pre;
169}
170
171static bool
172nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
173{
174 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
175 struct drm_device *dev = encoder->dev;
176 struct bit_displayport_encoder_table_entry *dpse;
177 struct bit_displayport_encoder_table *dpe;
178 int ret, i, dpe_headerlen, vs = 0, pre = 0;
179 uint8_t request[2];
180
181 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
182 if (!dpe)
183 return false;
184 dpse = (void *)((char *)dpe + dpe_headerlen);
185
186 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
187 if (ret)
188 return false;
189
190 NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
191
192 /* Keep all lanes at the same level.. */
193 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
194 int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
195 int lane_vs = lane_req & 3;
196 int lane_pre = (lane_req >> 2) & 3;
197
198 if (lane_vs > vs)
199 vs = lane_vs;
200 if (lane_pre > pre)
201 pre = lane_pre;
202 }
203
204 if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
205 vs = nouveau_dp_max_voltage_swing(encoder);
206 vs |= 4;
207 }
208
209 if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
210 pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
211 pre |= 4;
212 }
213
214 /* Update the configuration for all lanes.. */
215 for (i = 0; i < nv_encoder->dp.link_nr; i++)
216 config[i] = (pre << 3) | vs;
217
218 return true;
219}
220
221static bool
222nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
223{
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
225 struct drm_device *dev = encoder->dev;
226 struct bit_displayport_encoder_table_entry *dpse;
227 struct bit_displayport_encoder_table *dpe;
228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
229 int dpe_headerlen, ret, i;
230
231 NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
232 config[0], config[1], config[2], config[3]);
233
234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
235 if (!dpe)
236 return false;
237 dpse = (void *)((char *)dpe + dpe_headerlen);
238
239 for (i = 0; i < dpe->record_nr; i++, dpse++) {
240 if (dpse->vs_level == (config[0] & 3) &&
241 dpse->pre_level == ((config[0] >> 3) & 3))
242 break;
243 }
244 BUG_ON(i == dpe->record_nr);
245
246 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
247 const int shift[4] = { 16, 8, 0, 24 };
248 uint32_t mask = 0xff << shift[i];
249 uint32_t reg0, reg1, reg2;
250
251 reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
252 reg0 |= (dpse->reg0 << shift[i]);
253 reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
254 reg1 |= (dpse->reg1 << shift[i]);
255 reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
256 reg2 |= (dpse->reg2 << 8);
257 nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
258 nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
259 nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
260 }
261
262 ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
263 if (ret)
264 return false;
265
266 return true;
267}
268
269bool
270nouveau_dp_link_train(struct drm_encoder *encoder)
271{
272 struct drm_device *dev = encoder->dev;
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
274 uint8_t config[4];
275 uint8_t status[3];
276 bool cr_done, cr_max_vs, eq_done;
277 int ret = 0, i, tries, voltage;
278
279 NV_DEBUG(dev, "link training!!\n");
280train:
281 cr_done = eq_done = false;
282
283 /* set link configuration */
284 NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
285 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
286
287 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
288 if (ret)
289 return false;
290
291 config[0] = nv_encoder->dp.link_nr;
292 if (nv_encoder->dp.dpcd_version >= 0x11)
293 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
294
295 ret = nouveau_dp_lane_count_set(encoder, config[0]);
296 if (ret)
297 return false;
298
299 /* clock recovery */
300 NV_DEBUG(dev, "\tbegin cr\n");
301 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
302 if (ret)
303 goto stop;
304
305 tries = 0;
306 voltage = -1;
307 memset(config, 0x00, sizeof(config));
308 for (;;) {
309 if (!nouveau_dp_link_train_commit(encoder, config))
310 break;
311
312 udelay(100);
313
314 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
315 if (ret)
316 break;
317 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
318 status[0], status[1]);
319
320 cr_done = true;
321 cr_max_vs = false;
322 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
323 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
324
325 if (!(lane & DP_LANE_CR_DONE)) {
326 cr_done = false;
327 if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
328 cr_max_vs = true;
329 break;
330 }
331 }
332
333 if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
334 voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
335 tries = 0;
336 }
337
338 if (cr_done || cr_max_vs || (++tries == 5))
339 break;
340
341 if (!nouveau_dp_link_train_adjust(encoder, config))
342 break;
343 }
344
345 if (!cr_done)
346 goto stop;
347
348 /* channel equalisation */
349 NV_DEBUG(dev, "\tbegin eq\n");
350 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
351 if (ret)
352 goto stop;
353
354 for (tries = 0; tries <= 5; tries++) {
355 udelay(400);
356
357 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
358 if (ret)
359 break;
360 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
361 status[0], status[1]);
362
363 eq_done = true;
364 if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
365 eq_done = false;
366
367 for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
368 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
369
370 if (!(lane & DP_LANE_CR_DONE)) {
371 cr_done = false;
372 break;
373 }
374
375 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
376 !(lane & DP_LANE_SYMBOL_LOCKED)) {
377 eq_done = false;
378 break;
379 }
380 }
381
382 if (eq_done || !cr_done)
383 break;
384
385 if (!nouveau_dp_link_train_adjust(encoder, config) ||
386 !nouveau_dp_link_train_commit(encoder, config))
387 break;
388 }
389
390stop:
391 /* end link training */
392 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
393 if (ret)
394 return false;
395
396 /* retry at a lower setting, if possible */
397 if (!ret && !(eq_done && cr_done)) {
398 NV_DEBUG(dev, "\twe failed\n");
399 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
400 NV_DEBUG(dev, "retry link training at low rate\n");
401 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
402 goto train;
403 }
404 }
405
406 return eq_done;
407}
408
409bool
410nouveau_dp_detect(struct drm_encoder *encoder)
411{
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct drm_device *dev = encoder->dev;
414 uint8_t dpcd[4];
415 int ret;
416
417 ret = auxch_rd(encoder, 0x0000, dpcd, 4);
418 if (ret)
419 return false;
420
421 NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
422 "display: link_bw %d, link_nr %d version 0x%02x\n",
423 nv_encoder->dcb->dpconf.link_bw,
424 nv_encoder->dcb->dpconf.link_nr,
425 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
426
427 nv_encoder->dp.dpcd_version = dpcd[0];
428
429 nv_encoder->dp.link_bw = dpcd[1];
430 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
431 !nv_encoder->dcb->dpconf.link_bw)
432 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
433
434 nv_encoder->dp.link_nr = dpcd[2] & 0xf;
435 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
436 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
437
438 return true;
439}
440
441int
442nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
443 uint8_t *data, int data_nr)
444{
445 struct drm_device *dev = auxch->dev;
446 uint32_t tmp, ctrl, stat = 0, data32[4] = {};
447 int ret = 0, i, index = auxch->rd;
448
449 NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
450
451 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
452 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
453 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
454 if (!(tmp & 0x01000000)) {
455 NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
456 ret = -EIO;
457 goto out;
458 }
459
460 for (i = 0; i < 3; i++) {
461 tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
462 if (tmp & NV50_AUXCH_STAT_STATE_READY)
463 break;
464 udelay(100);
465 }
466
467 if (i == 3) {
468 ret = -EBUSY;
469 goto out;
470 }
471
472 if (!(cmd & 1)) {
473 memcpy(data32, data, data_nr);
474 for (i = 0; i < 4; i++) {
475 NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
476 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
477 }
478 }
479
480 nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
481 ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
482 ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
485
486 for (;;) {
487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY;
494 }
495
496 udelay(400);
497
498 stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
499 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
500 NV50_AUXCH_STAT_REPLY_AUX_DEFER)
501 break;
502 }
503
504 if (cmd & 1) {
505 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
508 }
509 memcpy(data, data32, data_nr);
510 }
511
512out:
513 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
514 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
515 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
516 if (tmp & 0x01000000) {
517 NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
518 ret = -EIO;
519 }
520
521 udelay(400);
522
523 return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
524}
525
526int
527nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
528 uint8_t write_byte, uint8_t *read_byte)
529{
530 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
531 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
532 struct drm_device *dev = auxch->dev;
533 int ret = 0, cmd, addr = algo_data->address;
534 uint8_t *buf;
535
536 if (mode == MODE_I2C_READ) {
537 cmd = AUX_I2C_READ;
538 buf = read_byte;
539 } else {
540 cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
541 buf = &write_byte;
542 }
543
544 if (!(mode & MODE_I2C_STOP))
545 cmd |= AUX_I2C_MOT;
546
547 if (mode & MODE_I2C_START)
548 return 1;
549
550 for (;;) {
551 ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
552 if (ret < 0)
553 return ret;
554
555 switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
556 case NV50_AUXCH_STAT_REPLY_I2C_ACK:
557 return 1;
558 case NV50_AUXCH_STAT_REPLY_I2C_NACK:
559 return -EREMOTEIO;
560 case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
561 udelay(100);
562 break;
563 default:
564 NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
565 return -EREMOTEIO;
566 }
567 }
568}
569
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 000000000000..35249c35118f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,405 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/console.h>
26
27#include "drmP.h"
28#include "drm.h"
29#include "drm_crtc_helper.h"
30#include "nouveau_drv.h"
31#include "nouveau_hw.h"
32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h"
34#include "nv50_display.h"
35
36#include "drm_pciids.h"
37
38MODULE_PARM_DESC(noagp, "Disable AGP");
39int nouveau_noagp;
40module_param_named(noagp, nouveau_noagp, int, 0400);
41
42MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
43static int nouveau_modeset = -1; /* kms */
44module_param_named(modeset, nouveau_modeset, int, 0400);
45
46MODULE_PARM_DESC(vbios, "Override default VBIOS location");
47char *nouveau_vbios;
48module_param_named(vbios, nouveau_vbios, charp, 0400);
49
50MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
51int nouveau_vram_pushbuf;
52module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
53
54MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
55int nouveau_vram_notify;
56module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
57
58MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
59int nouveau_duallink = 1;
60module_param_named(duallink, nouveau_duallink, int, 0400);
61
62MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
63int nouveau_uscript_lvds = -1;
64module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
65
66MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
67int nouveau_uscript_tmds = -1;
68module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
69
70MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
71 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
72 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
73 "\t\tDefault: PAL\n"
74 "\t\t*NOTE* Ignored for cards with external TV encoders.");
75char *nouveau_tv_norm;
76module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
77
78MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
79 "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
80 "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
81 "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
82int nouveau_reg_debug;
83module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
84
85int nouveau_fbpercrtc;
86#if 0
87module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
88#endif
89
90static struct pci_device_id pciidlist[] = {
91 {
92 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
93 .class = PCI_BASE_CLASS_DISPLAY << 16,
94 .class_mask = 0xff << 16,
95 },
96 {
97 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
98 .class = PCI_BASE_CLASS_DISPLAY << 16,
99 .class_mask = 0xff << 16,
100 },
101 {}
102};
103
104MODULE_DEVICE_TABLE(pci, pciidlist);
105
106static struct drm_driver driver;
107
108static int __devinit
109nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
110{
111 return drm_get_dev(pdev, ent, &driver);
112}
113
114static void
115nouveau_pci_remove(struct pci_dev *pdev)
116{
117 struct drm_device *dev = pci_get_drvdata(pdev);
118
119 drm_put_dev(dev);
120}
121
122static int
123nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
124{
125 struct drm_device *dev = pci_get_drvdata(pdev);
126 struct drm_nouveau_private *dev_priv = dev->dev_private;
127 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
128 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
129 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
130 struct nouveau_channel *chan;
131 struct drm_crtc *crtc;
132 uint32_t fbdev_flags;
133 int ret, i;
134
135 if (!drm_core_check_feature(dev, DRIVER_MODESET))
136 return -ENODEV;
137
138 if (pm_state.event == PM_EVENT_PRETHAW)
139 return 0;
140
141 fbdev_flags = dev_priv->fbdev_info->flags;
142 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
143
144 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
145 struct nouveau_framebuffer *nouveau_fb;
146
147 nouveau_fb = nouveau_framebuffer(crtc->fb);
148 if (!nouveau_fb || !nouveau_fb->nvbo)
149 continue;
150
151 nouveau_bo_unpin(nouveau_fb->nvbo);
152 }
153
154 NV_INFO(dev, "Evicting buffers...\n");
155 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
156
157 NV_INFO(dev, "Idling channels...\n");
158 for (i = 0; i < pfifo->channels; i++) {
159 struct nouveau_fence *fence = NULL;
160
161 chan = dev_priv->fifos[i];
162 if (!chan || (dev_priv->card_type >= NV_50 &&
163 chan == dev_priv->fifos[0]))
164 continue;
165
166 ret = nouveau_fence_new(chan, &fence, true);
167 if (ret == 0) {
168 ret = nouveau_fence_wait(fence, NULL, false, false);
169 nouveau_fence_unref((void *)&fence);
170 }
171
172 if (ret) {
173 NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
174 chan->id);
175 }
176 }
177
178 pgraph->fifo_access(dev, false);
179 nouveau_wait_for_idle(dev);
180 pfifo->reassign(dev, false);
181 pfifo->disable(dev);
182 pfifo->unload_context(dev);
183 pgraph->unload_context(dev);
184
185 NV_INFO(dev, "Suspending GPU objects...\n");
186 ret = nouveau_gpuobj_suspend(dev);
187 if (ret) {
188 NV_ERROR(dev, "... failed: %d\n", ret);
189 goto out_abort;
190 }
191
192 ret = pinstmem->suspend(dev);
193 if (ret) {
194 NV_ERROR(dev, "... failed: %d\n", ret);
195 nouveau_gpuobj_suspend_cleanup(dev);
196 goto out_abort;
197 }
198
199 NV_INFO(dev, "And we're gone!\n");
200 pci_save_state(pdev);
201 if (pm_state.event == PM_EVENT_SUSPEND) {
202 pci_disable_device(pdev);
203 pci_set_power_state(pdev, PCI_D3hot);
204 }
205
206 acquire_console_sem();
207 fb_set_suspend(dev_priv->fbdev_info, 1);
208 release_console_sem();
209 dev_priv->fbdev_info->flags = fbdev_flags;
210 return 0;
211
212out_abort:
213 NV_INFO(dev, "Re-enabling acceleration..\n");
214 pfifo->enable(dev);
215 pfifo->reassign(dev, true);
216 pgraph->fifo_access(dev, true);
217 return ret;
218}
219
220static int
221nouveau_pci_resume(struct pci_dev *pdev)
222{
223 struct drm_device *dev = pci_get_drvdata(pdev);
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_engine *engine = &dev_priv->engine;
226 struct drm_crtc *crtc;
227 uint32_t fbdev_flags;
228 int ret, i;
229
230 if (!drm_core_check_feature(dev, DRIVER_MODESET))
231 return -ENODEV;
232
233 fbdev_flags = dev_priv->fbdev_info->flags;
234 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
235
236 NV_INFO(dev, "We're back, enabling device...\n");
237 pci_set_power_state(pdev, PCI_D0);
238 pci_restore_state(pdev);
239 if (pci_enable_device(pdev))
240 return -1;
241 pci_set_master(dev->pdev);
242
243 NV_INFO(dev, "POSTing device...\n");
244 ret = nouveau_run_vbios_init(dev);
245 if (ret)
246 return ret;
247
248 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
249 ret = nouveau_mem_init_agp(dev);
250 if (ret) {
251 NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
252 return ret;
253 }
254 }
255
256 NV_INFO(dev, "Reinitialising engines...\n");
257 engine->instmem.resume(dev);
258 engine->mc.init(dev);
259 engine->timer.init(dev);
260 engine->fb.init(dev);
261 engine->graph.init(dev);
262 engine->fifo.init(dev);
263
264 NV_INFO(dev, "Restoring GPU objects...\n");
265 nouveau_gpuobj_resume(dev);
266
267 nouveau_irq_postinstall(dev);
268
269 /* Re-write SKIPS, they'll have been lost over the suspend */
270 if (nouveau_vram_pushbuf) {
271 struct nouveau_channel *chan;
272 int j;
273
274 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
275 chan = dev_priv->fifos[i];
276 if (!chan)
277 continue;
278
279 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
280 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
281 }
282 }
283
284 NV_INFO(dev, "Restoring mode...\n");
285 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
286 struct nouveau_framebuffer *nouveau_fb;
287
288 nouveau_fb = nouveau_framebuffer(crtc->fb);
289 if (!nouveau_fb || !nouveau_fb->nvbo)
290 continue;
291
292 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
293 }
294
295 if (dev_priv->card_type < NV_50) {
296 nv04_display_restore(dev);
297 NVLockVgaCrtcs(dev, false);
298 } else
299 nv50_display_init(dev);
300
301 /* Force CLUT to get re-loaded during modeset */
302 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
303 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
304
305 nv_crtc->lut.depth = 0;
306 }
307
308 acquire_console_sem();
309 fb_set_suspend(dev_priv->fbdev_info, 0);
310 release_console_sem();
311
312 nouveau_fbcon_zfill(dev);
313
314 drm_helper_resume_force_mode(dev);
315 dev_priv->fbdev_info->flags = fbdev_flags;
316 return 0;
317}
318
319static struct drm_driver driver = {
320 .driver_features =
321 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
322 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
323 .load = nouveau_load,
324 .firstopen = nouveau_firstopen,
325 .lastclose = nouveau_lastclose,
326 .unload = nouveau_unload,
327 .preclose = nouveau_preclose,
328#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
329 .debugfs_init = nouveau_debugfs_init,
330 .debugfs_cleanup = nouveau_debugfs_takedown,
331#endif
332 .irq_preinstall = nouveau_irq_preinstall,
333 .irq_postinstall = nouveau_irq_postinstall,
334 .irq_uninstall = nouveau_irq_uninstall,
335 .irq_handler = nouveau_irq_handler,
336 .reclaim_buffers = drm_core_reclaim_buffers,
337 .get_map_ofs = drm_core_get_map_ofs,
338 .get_reg_ofs = drm_core_get_reg_ofs,
339 .ioctls = nouveau_ioctls,
340 .fops = {
341 .owner = THIS_MODULE,
342 .open = drm_open,
343 .release = drm_release,
344 .ioctl = drm_ioctl,
345 .mmap = nouveau_ttm_mmap,
346 .poll = drm_poll,
347 .fasync = drm_fasync,
348#if defined(CONFIG_COMPAT)
349 .compat_ioctl = nouveau_compat_ioctl,
350#endif
351 },
352 .pci_driver = {
353 .name = DRIVER_NAME,
354 .id_table = pciidlist,
355 .probe = nouveau_pci_probe,
356 .remove = nouveau_pci_remove,
357 .suspend = nouveau_pci_suspend,
358 .resume = nouveau_pci_resume
359 },
360
361 .gem_init_object = nouveau_gem_object_new,
362 .gem_free_object = nouveau_gem_object_del,
363
364 .name = DRIVER_NAME,
365 .desc = DRIVER_DESC,
366#ifdef GIT_REVISION
367 .date = GIT_REVISION,
368#else
369 .date = DRIVER_DATE,
370#endif
371 .major = DRIVER_MAJOR,
372 .minor = DRIVER_MINOR,
373 .patchlevel = DRIVER_PATCHLEVEL,
374};
375
376static int __init nouveau_init(void)
377{
378 driver.num_ioctls = nouveau_max_ioctl;
379
380 if (nouveau_modeset == -1) {
381#ifdef CONFIG_VGA_CONSOLE
382 if (vgacon_text_force())
383 nouveau_modeset = 0;
384 else
385#endif
386 nouveau_modeset = 1;
387 }
388
389 if (nouveau_modeset == 1)
390 driver.driver_features |= DRIVER_MODESET;
391
392 return drm_init(&driver);
393}
394
395static void __exit nouveau_exit(void)
396{
397 drm_exit(&driver);
398}
399
400module_init(nouveau_init);
401module_exit(nouveau_exit);
402
403MODULE_AUTHOR(DRIVER_AUTHOR);
404MODULE_DESCRIPTION(DRIVER_DESC);
405MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 000000000000..88b4c7b77e7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1286 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRV_H__
26#define __NOUVEAU_DRV_H__
27
28#define DRIVER_AUTHOR "Stephane Marchesin"
29#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
30
31#define DRIVER_NAME "nouveau"
32#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
33#define DRIVER_DATE "20090420"
34
35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15
38
39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000
41
42#include "ttm/ttm_bo_api.h"
43#include "ttm/ttm_bo_driver.h"
44#include "ttm/ttm_placement.h"
45#include "ttm/ttm_memory.h"
46#include "ttm/ttm_module.h"
47
48struct nouveau_fpriv {
49 struct ttm_object_file *tfile;
50};
51
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
54#include "nouveau_drm.h"
55#include "nouveau_reg.h"
56#include "nouveau_bios.h"
57
58#define MAX_NUM_DCB_ENTRIES 16
59
60#define NOUVEAU_MAX_CHANNEL_NR 128
61
62#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
63#define NV50_VM_BLOCK (512*1024*1024ULL)
64#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
65
66struct nouveau_bo {
67 struct ttm_buffer_object bo;
68 struct ttm_placement placement;
69 u32 placements[3];
70 struct ttm_bo_kmap_obj kmap;
71 struct list_head head;
72
73 /* protected by ttm_bo_reserve() */
74 struct drm_file *reserved_by;
75 struct list_head entry;
76 int pbbo_index;
77
78 struct nouveau_channel *channel;
79
80 bool mappable;
81 bool no_vm;
82
83 uint32_t tile_mode;
84 uint32_t tile_flags;
85
86 struct drm_gem_object *gem;
87 struct drm_file *cpu_filp;
88 int pin_refcnt;
89};
90
91static inline struct nouveau_bo *
92nouveau_bo(struct ttm_buffer_object *bo)
93{
94 return container_of(bo, struct nouveau_bo, bo);
95}
96
97static inline struct nouveau_bo *
98nouveau_gem_object(struct drm_gem_object *gem)
99{
100 return gem ? gem->driver_private : NULL;
101}
102
103/* TODO: submit equivalent to TTM generic API upstream? */
104static inline void __iomem *
105nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
106{
107 bool is_iomem;
108 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
109 &nvbo->kmap, &is_iomem);
110 WARN_ON_ONCE(ioptr && !is_iomem);
111 return ioptr;
112}
113
114struct mem_block {
115 struct mem_block *next;
116 struct mem_block *prev;
117 uint64_t start;
118 uint64_t size;
119 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
120};
121
122enum nouveau_flags {
123 NV_NFORCE = 0x10000000,
124 NV_NFORCE2 = 0x20000000
125};
126
127#define NVOBJ_ENGINE_SW 0
128#define NVOBJ_ENGINE_GR 1
129#define NVOBJ_ENGINE_DISPLAY 2
130#define NVOBJ_ENGINE_INT 0xdeadbeef
131
132#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
133#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
134#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
135#define NVOBJ_FLAG_FAKE (1 << 3)
136struct nouveau_gpuobj {
137 struct list_head list;
138
139 struct nouveau_channel *im_channel;
140 struct mem_block *im_pramin;
141 struct nouveau_bo *im_backing;
142 uint32_t im_backing_start;
143 uint32_t *im_backing_suspend;
144 int im_bound;
145
146 uint32_t flags;
147 int refcount;
148
149 uint32_t engine;
150 uint32_t class;
151
152 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
153 void *priv;
154};
155
156struct nouveau_gpuobj_ref {
157 struct list_head list;
158
159 struct nouveau_gpuobj *gpuobj;
160 uint32_t instance;
161
162 struct nouveau_channel *channel;
163 int handle;
164};
165
166struct nouveau_channel {
167 struct drm_device *dev;
168 int id;
169
170 /* owner of this fifo */
171 struct drm_file *file_priv;
172 /* mapping of the fifo itself */
173 struct drm_local_map *map;
174
175 /* mapping of the regs controling the fifo */
176 void __iomem *user;
177 uint32_t user_get;
178 uint32_t user_put;
179
180 /* Fencing */
181 struct {
182 /* lock protects the pending list only */
183 spinlock_t lock;
184 struct list_head pending;
185 uint32_t sequence;
186 uint32_t sequence_ack;
187 uint32_t last_sequence_irq;
188 } fence;
189
190 /* DMA push buffer */
191 struct nouveau_gpuobj_ref *pushbuf;
192 struct nouveau_bo *pushbuf_bo;
193 uint32_t pushbuf_base;
194
195 /* Notifier memory */
196 struct nouveau_bo *notifier_bo;
197 struct mem_block *notifier_heap;
198
199 /* PFIFO context */
200 struct nouveau_gpuobj_ref *ramfc;
201 struct nouveau_gpuobj_ref *cache;
202
203 /* PGRAPH context */
204 /* XXX may be merge 2 pointers as private data ??? */
205 struct nouveau_gpuobj_ref *ramin_grctx;
206 void *pgraph_ctx;
207
208 /* NV50 VM */
209 struct nouveau_gpuobj *vm_pd;
210 struct nouveau_gpuobj_ref *vm_gart_pt;
211 struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
212
213 /* Objects */
214 struct nouveau_gpuobj_ref *ramin; /* Private instmem */
215 struct mem_block *ramin_heap; /* Private PRAMIN heap */
216 struct nouveau_gpuobj_ref *ramht; /* Hash table */
217 struct list_head ramht_refs; /* Objects referenced by RAMHT */
218
219 /* GPU object info for stuff used in-kernel (mm_enabled) */
220 uint32_t m2mf_ntfy;
221 uint32_t vram_handle;
222 uint32_t gart_handle;
223 bool accel_done;
224
225 /* Push buffer state (only for drm's channel on !mm_enabled) */
226 struct {
227 int max;
228 int free;
229 int cur;
230 int put;
231 /* access via pushbuf_bo */
232 } dma;
233
234 uint32_t sw_subchannel[8];
235
236 struct {
237 struct nouveau_gpuobj *vblsem;
238 uint32_t vblsem_offset;
239 uint32_t vblsem_rval;
240 struct list_head vbl_wait;
241 } nvsw;
242
243 struct {
244 bool active;
245 char name[32];
246 struct drm_info_list info;
247 } debugfs;
248};
249
250struct nouveau_instmem_engine {
251 void *priv;
252
253 int (*init)(struct drm_device *dev);
254 void (*takedown)(struct drm_device *dev);
255 int (*suspend)(struct drm_device *dev);
256 void (*resume)(struct drm_device *dev);
257
258 int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
259 uint32_t *size);
260 void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
261 int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
262 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
263 void (*prepare_access)(struct drm_device *, bool write);
264 void (*finish_access)(struct drm_device *);
265};
266
267struct nouveau_mc_engine {
268 int (*init)(struct drm_device *dev);
269 void (*takedown)(struct drm_device *dev);
270};
271
272struct nouveau_timer_engine {
273 int (*init)(struct drm_device *dev);
274 void (*takedown)(struct drm_device *dev);
275 uint64_t (*read)(struct drm_device *dev);
276};
277
278struct nouveau_fb_engine {
279 int (*init)(struct drm_device *dev);
280 void (*takedown)(struct drm_device *dev);
281};
282
283struct nouveau_fifo_engine {
284 void *priv;
285
286 int channels;
287
288 int (*init)(struct drm_device *);
289 void (*takedown)(struct drm_device *);
290
291 void (*disable)(struct drm_device *);
292 void (*enable)(struct drm_device *);
293 bool (*reassign)(struct drm_device *, bool enable);
294
295 int (*channel_id)(struct drm_device *);
296
297 int (*create_context)(struct nouveau_channel *);
298 void (*destroy_context)(struct nouveau_channel *);
299 int (*load_context)(struct nouveau_channel *);
300 int (*unload_context)(struct drm_device *);
301};
302
303struct nouveau_pgraph_object_method {
304 int id;
305 int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
306 uint32_t data);
307};
308
309struct nouveau_pgraph_object_class {
310 int id;
311 bool software;
312 struct nouveau_pgraph_object_method *methods;
313};
314
315struct nouveau_pgraph_engine {
316 struct nouveau_pgraph_object_class *grclass;
317 bool accel_blocked;
318 void *ctxprog;
319 void *ctxvals;
320
321 int (*init)(struct drm_device *);
322 void (*takedown)(struct drm_device *);
323
324 void (*fifo_access)(struct drm_device *, bool);
325
326 struct nouveau_channel *(*channel)(struct drm_device *);
327 int (*create_context)(struct nouveau_channel *);
328 void (*destroy_context)(struct nouveau_channel *);
329 int (*load_context)(struct nouveau_channel *);
330 int (*unload_context)(struct drm_device *);
331};
332
333struct nouveau_engine {
334 struct nouveau_instmem_engine instmem;
335 struct nouveau_mc_engine mc;
336 struct nouveau_timer_engine timer;
337 struct nouveau_fb_engine fb;
338 struct nouveau_pgraph_engine graph;
339 struct nouveau_fifo_engine fifo;
340};
341
342struct nouveau_pll_vals {
343 union {
344 struct {
345#ifdef __BIG_ENDIAN
346 uint8_t N1, M1, N2, M2;
347#else
348 uint8_t M1, N1, M2, N2;
349#endif
350 };
351 struct {
352 uint16_t NM1, NM2;
353 } __attribute__((packed));
354 };
355 int log2P;
356
357 int refclk;
358};
359
360enum nv04_fp_display_regs {
361 FP_DISPLAY_END,
362 FP_TOTAL,
363 FP_CRTC,
364 FP_SYNC_START,
365 FP_SYNC_END,
366 FP_VALID_START,
367 FP_VALID_END
368};
369
370struct nv04_crtc_reg {
371 unsigned char MiscOutReg; /* */
372 uint8_t CRTC[0x9f];
373 uint8_t CR58[0x10];
374 uint8_t Sequencer[5];
375 uint8_t Graphics[9];
376 uint8_t Attribute[21];
377 unsigned char DAC[768]; /* Internal Colorlookuptable */
378
379 /* PCRTC regs */
380 uint32_t fb_start;
381 uint32_t crtc_cfg;
382 uint32_t cursor_cfg;
383 uint32_t gpio_ext;
384 uint32_t crtc_830;
385 uint32_t crtc_834;
386 uint32_t crtc_850;
387 uint32_t crtc_eng_ctrl;
388
389 /* PRAMDAC regs */
390 uint32_t nv10_cursync;
391 struct nouveau_pll_vals pllvals;
392 uint32_t ramdac_gen_ctrl;
393 uint32_t ramdac_630;
394 uint32_t ramdac_634;
395 uint32_t tv_setup;
396 uint32_t tv_vtotal;
397 uint32_t tv_vskew;
398 uint32_t tv_vsync_delay;
399 uint32_t tv_htotal;
400 uint32_t tv_hskew;
401 uint32_t tv_hsync_delay;
402 uint32_t tv_hsync_delay2;
403 uint32_t fp_horiz_regs[7];
404 uint32_t fp_vert_regs[7];
405 uint32_t dither;
406 uint32_t fp_control;
407 uint32_t dither_regs[6];
408 uint32_t fp_debug_0;
409 uint32_t fp_debug_1;
410 uint32_t fp_debug_2;
411 uint32_t fp_margin_color;
412 uint32_t ramdac_8c0;
413 uint32_t ramdac_a20;
414 uint32_t ramdac_a24;
415 uint32_t ramdac_a34;
416 uint32_t ctv_regs[38];
417};
418
419struct nv04_output_reg {
420 uint32_t output;
421 int head;
422};
423
424struct nv04_mode_state {
425 uint32_t bpp;
426 uint32_t width;
427 uint32_t height;
428 uint32_t interlace;
429 uint32_t repaint0;
430 uint32_t repaint1;
431 uint32_t screen;
432 uint32_t scale;
433 uint32_t dither;
434 uint32_t extra;
435 uint32_t fifo;
436 uint32_t pixel;
437 uint32_t horiz;
438 int arbitration0;
439 int arbitration1;
440 uint32_t pll;
441 uint32_t pllB;
442 uint32_t vpll;
443 uint32_t vpll2;
444 uint32_t vpllB;
445 uint32_t vpll2B;
446 uint32_t pllsel;
447 uint32_t sel_clk;
448 uint32_t general;
449 uint32_t crtcOwner;
450 uint32_t head;
451 uint32_t head2;
452 uint32_t cursorConfig;
453 uint32_t cursor0;
454 uint32_t cursor1;
455 uint32_t cursor2;
456 uint32_t timingH;
457 uint32_t timingV;
458 uint32_t displayV;
459 uint32_t crtcSync;
460
461 struct nv04_crtc_reg crtc_reg[2];
462};
463
464enum nouveau_card_type {
465 NV_04 = 0x00,
466 NV_10 = 0x10,
467 NV_20 = 0x20,
468 NV_30 = 0x30,
469 NV_40 = 0x40,
470 NV_50 = 0x50,
471};
472
473struct drm_nouveau_private {
474 struct drm_device *dev;
475 enum {
476 NOUVEAU_CARD_INIT_DOWN,
477 NOUVEAU_CARD_INIT_DONE,
478 NOUVEAU_CARD_INIT_FAILED
479 } init_state;
480
481 /* the card type, takes NV_* as values */
482 enum nouveau_card_type card_type;
483 /* exact chipset, derived from NV_PMC_BOOT_0 */
484 int chipset;
485 int flags;
486
487 void __iomem *mmio;
488 void __iomem *ramin;
489 uint32_t ramin_size;
490
491 struct workqueue_struct *wq;
492 struct work_struct irq_work;
493
494 struct list_head vbl_waiting;
495
496 struct {
497 struct ttm_global_reference mem_global_ref;
498 struct ttm_bo_global_ref bo_global_ref;
499 struct ttm_bo_device bdev;
500 spinlock_t bo_list_lock;
501 struct list_head bo_list;
502 atomic_t validate_sequence;
503 } ttm;
504
505 struct fb_info *fbdev_info;
506
507 int fifo_alloc_count;
508 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
509
510 struct nouveau_engine engine;
511 struct nouveau_channel *channel;
512
513 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
514 struct nouveau_gpuobj *ramht;
515 uint32_t ramin_rsvd_vram;
516 uint32_t ramht_offset;
517 uint32_t ramht_size;
518 uint32_t ramht_bits;
519 uint32_t ramfc_offset;
520 uint32_t ramfc_size;
521 uint32_t ramro_offset;
522 uint32_t ramro_size;
523
524 /* base physical adresses */
525 uint64_t fb_phys;
526 uint64_t fb_available_size;
527 uint64_t fb_mappable_pages;
528 uint64_t fb_aper_free;
529
530 struct {
531 enum {
532 NOUVEAU_GART_NONE = 0,
533 NOUVEAU_GART_AGP,
534 NOUVEAU_GART_SGDMA
535 } type;
536 uint64_t aper_base;
537 uint64_t aper_size;
538 uint64_t aper_free;
539
540 struct nouveau_gpuobj *sg_ctxdma;
541 struct page *sg_dummy_page;
542 dma_addr_t sg_dummy_bus;
543
544 /* nottm hack */
545 struct drm_ttm_backend *sg_be;
546 unsigned long sg_handle;
547 } gart_info;
548
549 /* G8x/G9x virtual address space */
550 uint64_t vm_gart_base;
551 uint64_t vm_gart_size;
552 uint64_t vm_vram_base;
553 uint64_t vm_vram_size;
554 uint64_t vm_end;
555 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
556 int vm_vram_pt_nr;
557
558 /* the mtrr covering the FB */
559 int fb_mtrr;
560
561 struct mem_block *ramin_heap;
562
563 /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
564 uint32_t ctx_table_size;
565 struct nouveau_gpuobj_ref *ctx_table;
566
567 struct list_head gpuobj_list;
568
569 struct nvbios VBIOS;
570 struct nouveau_bios_info *vbios;
571
572 struct nv04_mode_state mode_reg;
573 struct nv04_mode_state saved_reg;
574 uint32_t saved_vga_font[4][16384];
575 uint32_t crtc_owner;
576 uint32_t dac_users[4];
577
578 struct nouveau_suspend_resume {
579 uint32_t fifo_mode;
580 uint32_t graph_ctx_control;
581 uint32_t graph_state;
582 uint32_t *ramin_copy;
583 uint64_t ramin_size;
584 } susres;
585
586 struct backlight_device *backlight;
587 bool acpi_dsm;
588
589 struct nouveau_channel *evo;
590
591 struct {
592 struct dentry *channel_root;
593 } debugfs;
594};
595
596static inline struct drm_nouveau_private *
597nouveau_bdev(struct ttm_bo_device *bd)
598{
599 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
600}
601
602static inline int
603nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
604{
605 struct nouveau_bo *prev;
606
607 if (!pnvbo)
608 return -EINVAL;
609 prev = *pnvbo;
610
611 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
612 if (prev) {
613 struct ttm_buffer_object *bo = &prev->bo;
614
615 ttm_bo_unref(&bo);
616 }
617
618 return 0;
619}
620
621#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
622 struct drm_nouveau_private *nv = dev->dev_private; \
623 if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
624 NV_ERROR(dev, "called without init\n"); \
625 return -EINVAL; \
626 } \
627} while (0)
628
629#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
630 struct drm_nouveau_private *nv = dev->dev_private; \
631 if (!nouveau_channel_owner(dev, (cl), (id))) { \
632 NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
633 DRM_CURRENTPID, (id)); \
634 return -EPERM; \
635 } \
636 (ch) = nv->fifos[(id)]; \
637} while (0)
638
639/* nouveau_drv.c */
640extern int nouveau_noagp;
641extern int nouveau_duallink;
642extern int nouveau_uscript_lvds;
643extern int nouveau_uscript_tmds;
644extern int nouveau_vram_pushbuf;
645extern int nouveau_vram_notify;
646extern int nouveau_fbpercrtc;
647extern char *nouveau_tv_norm;
648extern int nouveau_reg_debug;
649extern char *nouveau_vbios;
650
651/* nouveau_state.c */
652extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
653extern int nouveau_load(struct drm_device *, unsigned long flags);
654extern int nouveau_firstopen(struct drm_device *);
655extern void nouveau_lastclose(struct drm_device *);
656extern int nouveau_unload(struct drm_device *);
657extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
658 struct drm_file *);
659extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
660 struct drm_file *);
661extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
662 uint32_t reg, uint32_t mask, uint32_t val);
663extern bool nouveau_wait_for_idle(struct drm_device *);
664extern int nouveau_card_init(struct drm_device *);
665extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
666 struct drm_file *);
667extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
668 struct drm_file *);
669extern int nouveau_ioctl_resume(struct drm_device *, void *data,
670 struct drm_file *);
671
672/* nouveau_mem.c */
673extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
674 uint64_t size);
675extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
676 uint64_t size, int align2,
677 struct drm_file *, int tail);
678extern void nouveau_mem_takedown(struct mem_block **heap);
679extern void nouveau_mem_free_block(struct mem_block *);
680extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
681extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
682extern int nouveau_mem_init(struct drm_device *);
683extern int nouveau_mem_init_agp(struct drm_device *);
684extern void nouveau_mem_close(struct drm_device *);
685extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
686 uint32_t size, uint32_t flags,
687 uint64_t phys);
688extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
689 uint32_t size);
690
691/* nouveau_notifier.c */
692extern int nouveau_notifier_init_channel(struct nouveau_channel *);
693extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
694extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
695 int cout, uint32_t *offset);
696extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
697extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
698 struct drm_file *);
699extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
700 struct drm_file *);
701
702/* nouveau_channel.c */
703extern struct drm_ioctl_desc nouveau_ioctls[];
704extern int nouveau_max_ioctl;
705extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
706extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
707 int channel);
708extern int nouveau_channel_alloc(struct drm_device *dev,
709 struct nouveau_channel **chan,
710 struct drm_file *file_priv,
711 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
712extern void nouveau_channel_free(struct nouveau_channel *);
713extern int nouveau_channel_idle(struct nouveau_channel *chan);
714
715/* nouveau_object.c */
716extern int nouveau_gpuobj_early_init(struct drm_device *);
717extern int nouveau_gpuobj_init(struct drm_device *);
718extern void nouveau_gpuobj_takedown(struct drm_device *);
719extern void nouveau_gpuobj_late_takedown(struct drm_device *);
720extern int nouveau_gpuobj_suspend(struct drm_device *dev);
721extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
722extern void nouveau_gpuobj_resume(struct drm_device *dev);
723extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
724 uint32_t vram_h, uint32_t tt_h);
725extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
726extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
727 uint32_t size, int align, uint32_t flags,
728 struct nouveau_gpuobj **);
729extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
730extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
731 uint32_t handle, struct nouveau_gpuobj *,
732 struct nouveau_gpuobj_ref **);
733extern int nouveau_gpuobj_ref_del(struct drm_device *,
734 struct nouveau_gpuobj_ref **);
735extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
736 struct nouveau_gpuobj_ref **ref_ret);
737extern int nouveau_gpuobj_new_ref(struct drm_device *,
738 struct nouveau_channel *alloc_chan,
739 struct nouveau_channel *ref_chan,
740 uint32_t handle, uint32_t size, int align,
741 uint32_t flags, struct nouveau_gpuobj_ref **);
742extern int nouveau_gpuobj_new_fake(struct drm_device *,
743 uint32_t p_offset, uint32_t b_offset,
744 uint32_t size, uint32_t flags,
745 struct nouveau_gpuobj **,
746 struct nouveau_gpuobj_ref**);
747extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
748 uint64_t offset, uint64_t size, int access,
749 int target, struct nouveau_gpuobj **);
750extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
751 uint64_t offset, uint64_t size,
752 int access, struct nouveau_gpuobj **,
753 uint32_t *o_ret);
754extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
755 struct nouveau_gpuobj **);
756extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
757 struct drm_file *);
758extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
759 struct drm_file *);
760
761/* nouveau_irq.c */
762extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
763extern void nouveau_irq_preinstall(struct drm_device *);
764extern int nouveau_irq_postinstall(struct drm_device *);
765extern void nouveau_irq_uninstall(struct drm_device *);
766
767/* nouveau_sgdma.c */
768extern int nouveau_sgdma_init(struct drm_device *);
769extern void nouveau_sgdma_takedown(struct drm_device *);
770extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
771 uint32_t *page);
772extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
773
774/* nouveau_debugfs.c */
775#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
776extern int nouveau_debugfs_init(struct drm_minor *);
777extern void nouveau_debugfs_takedown(struct drm_minor *);
778extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
779extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
780#else
781static inline int
782nouveau_debugfs_init(struct drm_minor *minor)
783{
784 return 0;
785}
786
787static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
788{
789}
790
791static inline int
792nouveau_debugfs_channel_init(struct nouveau_channel *chan)
793{
794 return 0;
795}
796
797static inline void
798nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
799{
800}
801#endif
802
803/* nouveau_dma.c */
804extern int nouveau_dma_init(struct nouveau_channel *);
805extern int nouveau_dma_wait(struct nouveau_channel *, int size);
806
807/* nouveau_acpi.c */
808#ifdef CONFIG_ACPI
809extern int nouveau_hybrid_setup(struct drm_device *dev);
810extern bool nouveau_dsm_probe(struct drm_device *dev);
811#else
812static inline int nouveau_hybrid_setup(struct drm_device *dev)
813{
814 return 0;
815}
816static inline bool nouveau_dsm_probe(struct drm_device *dev)
817{
818 return false;
819}
820#endif
821
822/* nouveau_backlight.c */
823#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
824extern int nouveau_backlight_init(struct drm_device *);
825extern void nouveau_backlight_exit(struct drm_device *);
826#else
827static inline int nouveau_backlight_init(struct drm_device *dev)
828{
829 return 0;
830}
831
832static inline void nouveau_backlight_exit(struct drm_device *dev) { }
833#endif
834
835/* nouveau_bios.c */
836extern int nouveau_bios_init(struct drm_device *);
837extern void nouveau_bios_takedown(struct drm_device *dev);
838extern int nouveau_run_vbios_init(struct drm_device *);
839extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
840 struct dcb_entry *);
841extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
842 enum dcb_gpio_tag);
843extern struct dcb_connector_table_entry *
844nouveau_bios_connector_entry(struct drm_device *, int index);
845extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
846 struct pll_lims *);
847extern int nouveau_bios_run_display_table(struct drm_device *,
848 struct dcb_entry *,
849 uint32_t script, int pxclk);
850extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
851 int *length);
852extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
853extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
854extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
855 bool *dl, bool *if_is_24bit);
856extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
857 int head, int pxclk);
858extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
859 enum LVDS_script, int pxclk);
860
861/* nouveau_ttm.c */
862int nouveau_ttm_global_init(struct drm_nouveau_private *);
863void nouveau_ttm_global_release(struct drm_nouveau_private *);
864int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
865
866/* nouveau_dp.c */
867int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
868 uint8_t *data, int data_nr);
869bool nouveau_dp_detect(struct drm_encoder *);
870bool nouveau_dp_link_train(struct drm_encoder *);
871
872/* nv04_fb.c */
873extern int nv04_fb_init(struct drm_device *);
874extern void nv04_fb_takedown(struct drm_device *);
875
876/* nv10_fb.c */
877extern int nv10_fb_init(struct drm_device *);
878extern void nv10_fb_takedown(struct drm_device *);
879
880/* nv40_fb.c */
881extern int nv40_fb_init(struct drm_device *);
882extern void nv40_fb_takedown(struct drm_device *);
883
884/* nv04_fifo.c */
885extern int nv04_fifo_init(struct drm_device *);
886extern void nv04_fifo_disable(struct drm_device *);
887extern void nv04_fifo_enable(struct drm_device *);
888extern bool nv04_fifo_reassign(struct drm_device *, bool);
889extern int nv04_fifo_channel_id(struct drm_device *);
890extern int nv04_fifo_create_context(struct nouveau_channel *);
891extern void nv04_fifo_destroy_context(struct nouveau_channel *);
892extern int nv04_fifo_load_context(struct nouveau_channel *);
893extern int nv04_fifo_unload_context(struct drm_device *);
894
895/* nv10_fifo.c */
896extern int nv10_fifo_init(struct drm_device *);
897extern int nv10_fifo_channel_id(struct drm_device *);
898extern int nv10_fifo_create_context(struct nouveau_channel *);
899extern void nv10_fifo_destroy_context(struct nouveau_channel *);
900extern int nv10_fifo_load_context(struct nouveau_channel *);
901extern int nv10_fifo_unload_context(struct drm_device *);
902
903/* nv40_fifo.c */
904extern int nv40_fifo_init(struct drm_device *);
905extern int nv40_fifo_create_context(struct nouveau_channel *);
906extern void nv40_fifo_destroy_context(struct nouveau_channel *);
907extern int nv40_fifo_load_context(struct nouveau_channel *);
908extern int nv40_fifo_unload_context(struct drm_device *);
909
910/* nv50_fifo.c */
911extern int nv50_fifo_init(struct drm_device *);
912extern void nv50_fifo_takedown(struct drm_device *);
913extern int nv50_fifo_channel_id(struct drm_device *);
914extern int nv50_fifo_create_context(struct nouveau_channel *);
915extern void nv50_fifo_destroy_context(struct nouveau_channel *);
916extern int nv50_fifo_load_context(struct nouveau_channel *);
917extern int nv50_fifo_unload_context(struct drm_device *);
918
919/* nv04_graph.c */
920extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
921extern int nv04_graph_init(struct drm_device *);
922extern void nv04_graph_takedown(struct drm_device *);
923extern void nv04_graph_fifo_access(struct drm_device *, bool);
924extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
925extern int nv04_graph_create_context(struct nouveau_channel *);
926extern void nv04_graph_destroy_context(struct nouveau_channel *);
927extern int nv04_graph_load_context(struct nouveau_channel *);
928extern int nv04_graph_unload_context(struct drm_device *);
929extern void nv04_graph_context_switch(struct drm_device *);
930
931/* nv10_graph.c */
932extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
933extern int nv10_graph_init(struct drm_device *);
934extern void nv10_graph_takedown(struct drm_device *);
935extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
936extern int nv10_graph_create_context(struct nouveau_channel *);
937extern void nv10_graph_destroy_context(struct nouveau_channel *);
938extern int nv10_graph_load_context(struct nouveau_channel *);
939extern int nv10_graph_unload_context(struct drm_device *);
940extern void nv10_graph_context_switch(struct drm_device *);
941
942/* nv20_graph.c */
943extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
944extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
945extern int nv20_graph_create_context(struct nouveau_channel *);
946extern void nv20_graph_destroy_context(struct nouveau_channel *);
947extern int nv20_graph_load_context(struct nouveau_channel *);
948extern int nv20_graph_unload_context(struct drm_device *);
949extern int nv20_graph_init(struct drm_device *);
950extern void nv20_graph_takedown(struct drm_device *);
951extern int nv30_graph_init(struct drm_device *);
952
953/* nv40_graph.c */
954extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
955extern int nv40_graph_init(struct drm_device *);
956extern void nv40_graph_takedown(struct drm_device *);
957extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
958extern int nv40_graph_create_context(struct nouveau_channel *);
959extern void nv40_graph_destroy_context(struct nouveau_channel *);
960extern int nv40_graph_load_context(struct nouveau_channel *);
961extern int nv40_graph_unload_context(struct drm_device *);
962extern int nv40_grctx_init(struct drm_device *);
963extern void nv40_grctx_fini(struct drm_device *);
964extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
965
966/* nv50_graph.c */
967extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
968extern int nv50_graph_init(struct drm_device *);
969extern void nv50_graph_takedown(struct drm_device *);
970extern void nv50_graph_fifo_access(struct drm_device *, bool);
971extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
972extern int nv50_graph_create_context(struct nouveau_channel *);
973extern void nv50_graph_destroy_context(struct nouveau_channel *);
974extern int nv50_graph_load_context(struct nouveau_channel *);
975extern int nv50_graph_unload_context(struct drm_device *);
976extern void nv50_graph_context_switch(struct drm_device *);
977
978/* nv04_instmem.c */
979extern int nv04_instmem_init(struct drm_device *);
980extern void nv04_instmem_takedown(struct drm_device *);
981extern int nv04_instmem_suspend(struct drm_device *);
982extern void nv04_instmem_resume(struct drm_device *);
983extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
984 uint32_t *size);
985extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
986extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
987extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
988extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
989extern void nv04_instmem_finish_access(struct drm_device *);
990
991/* nv50_instmem.c */
992extern int nv50_instmem_init(struct drm_device *);
993extern void nv50_instmem_takedown(struct drm_device *);
994extern int nv50_instmem_suspend(struct drm_device *);
995extern void nv50_instmem_resume(struct drm_device *);
996extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
997 uint32_t *size);
998extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
999extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1000extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1001extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
1002extern void nv50_instmem_finish_access(struct drm_device *);
1003
1004/* nv04_mc.c */
1005extern int nv04_mc_init(struct drm_device *);
1006extern void nv04_mc_takedown(struct drm_device *);
1007
1008/* nv40_mc.c */
1009extern int nv40_mc_init(struct drm_device *);
1010extern void nv40_mc_takedown(struct drm_device *);
1011
1012/* nv50_mc.c */
1013extern int nv50_mc_init(struct drm_device *);
1014extern void nv50_mc_takedown(struct drm_device *);
1015
1016/* nv04_timer.c */
1017extern int nv04_timer_init(struct drm_device *);
1018extern uint64_t nv04_timer_read(struct drm_device *);
1019extern void nv04_timer_takedown(struct drm_device *);
1020
1021extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1022 unsigned long arg);
1023
1024/* nv04_dac.c */
1025extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
1026extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
1027 struct drm_connector *connector);
1028extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1029extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1030
1031/* nv04_dfp.c */
1032extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
1033extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
1034extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
1035 int head, bool dl);
1036extern void nv04_dfp_disable(struct drm_device *dev, int head);
1037extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
1038
1039/* nv04_tv.c */
1040extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
1041extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1042
1043/* nv17_tv.c */
1044extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1045extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
1046 struct drm_connector *connector,
1047 uint32_t pin_mask);
1048
1049/* nv04_display.c */
1050extern int nv04_display_create(struct drm_device *);
1051extern void nv04_display_destroy(struct drm_device *);
1052extern void nv04_display_restore(struct drm_device *);
1053
1054/* nv04_crtc.c */
1055extern int nv04_crtc_create(struct drm_device *, int index);
1056
1057/* nouveau_bo.c */
1058extern struct ttm_bo_driver nouveau_bo_driver;
1059extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
1060 int size, int align, uint32_t flags,
1061 uint32_t tile_mode, uint32_t tile_flags,
1062 bool no_vm, bool mappable, struct nouveau_bo **);
1063extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1064extern int nouveau_bo_unpin(struct nouveau_bo *);
1065extern int nouveau_bo_map(struct nouveau_bo *);
1066extern void nouveau_bo_unmap(struct nouveau_bo *);
1067extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
1068extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1069extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1070extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1071extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1072
1073/* nouveau_fence.c */
1074struct nouveau_fence;
1075extern int nouveau_fence_init(struct nouveau_channel *);
1076extern void nouveau_fence_fini(struct nouveau_channel *);
1077extern void nouveau_fence_update(struct nouveau_channel *);
1078extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1079 bool emit);
1080extern int nouveau_fence_emit(struct nouveau_fence *);
1081struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1082extern bool nouveau_fence_signalled(void *obj, void *arg);
1083extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1084extern int nouveau_fence_flush(void *obj, void *arg);
1085extern void nouveau_fence_unref(void **obj);
1086extern void *nouveau_fence_ref(void *obj);
1087extern void nouveau_fence_handler(struct drm_device *dev, int channel);
1088
1089/* nouveau_gem.c */
1090extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
1091 int size, int align, uint32_t flags,
1092 uint32_t tile_mode, uint32_t tile_flags,
1093 bool no_vm, bool mappable, struct nouveau_bo **);
1094extern int nouveau_gem_object_new(struct drm_gem_object *);
1095extern void nouveau_gem_object_del(struct drm_gem_object *);
1096extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1097 struct drm_file *);
1098extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1099 struct drm_file *);
1100extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1101 struct drm_file *);
1102extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1103 struct drm_file *);
1104extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1105 struct drm_file *);
1106extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1107 struct drm_file *);
1108extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1109 struct drm_file *);
1110extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1111 struct drm_file *);
1112extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1113 struct drm_file *);
1114extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1115 struct drm_file *);
1116
1117/* nv17_gpio.c */
1118int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1119int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1120
1121#ifndef ioread32_native
1122#ifdef __BIG_ENDIAN
1123#define ioread16_native ioread16be
1124#define iowrite16_native iowrite16be
1125#define ioread32_native ioread32be
1126#define iowrite32_native iowrite32be
1127#else /* def __BIG_ENDIAN */
1128#define ioread16_native ioread16
1129#define iowrite16_native iowrite16
1130#define ioread32_native ioread32
1131#define iowrite32_native iowrite32
1132#endif /* def __BIG_ENDIAN else */
1133#endif /* !ioread32_native */
1134
1135/* channel control reg access */
1136static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
1137{
1138 return ioread32_native(chan->user + reg);
1139}
1140
1141static inline void nvchan_wr32(struct nouveau_channel *chan,
1142 unsigned reg, u32 val)
1143{
1144 iowrite32_native(val, chan->user + reg);
1145}
1146
1147/* register access */
1148static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
1149{
1150 struct drm_nouveau_private *dev_priv = dev->dev_private;
1151 return ioread32_native(dev_priv->mmio + reg);
1152}
1153
1154static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1155{
1156 struct drm_nouveau_private *dev_priv = dev->dev_private;
1157 iowrite32_native(val, dev_priv->mmio + reg);
1158}
1159
1160static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
1161{
1162 struct drm_nouveau_private *dev_priv = dev->dev_private;
1163 return ioread8(dev_priv->mmio + reg);
1164}
1165
1166static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1167{
1168 struct drm_nouveau_private *dev_priv = dev->dev_private;
1169 iowrite8(val, dev_priv->mmio + reg);
1170}
1171
1172#define nv_wait(reg, mask, val) \
1173 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
1174
1175/* PRAMIN access */
1176static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1177{
1178 struct drm_nouveau_private *dev_priv = dev->dev_private;
1179 return ioread32_native(dev_priv->ramin + offset);
1180}
1181
1182static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1183{
1184 struct drm_nouveau_private *dev_priv = dev->dev_private;
1185 iowrite32_native(val, dev_priv->ramin + offset);
1186}
1187
1188/* object access */
1189static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1190 unsigned index)
1191{
1192 return nv_ri32(dev, obj->im_pramin->start + index * 4);
1193}
1194
1195static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1196 unsigned index, u32 val)
1197{
1198 nv_wi32(dev, obj->im_pramin->start + index * 4, val);
1199}
1200
1201/*
1202 * Logging
1203 * Argument d is (struct drm_device *).
1204 */
1205#define NV_PRINTK(level, d, fmt, arg...) \
1206 printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
1207 pci_name(d->pdev), ##arg)
1208#ifndef NV_DEBUG_NOTRACE
1209#define NV_DEBUG(d, fmt, arg...) do { \
1210 if (drm_debug) { \
1211 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1212 __LINE__, ##arg); \
1213 } \
1214} while (0)
1215#else
1216#define NV_DEBUG(d, fmt, arg...) do { \
1217 if (drm_debug) \
1218 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1219} while (0)
1220#endif
1221#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
1222#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1223#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1224#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1225#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1226
1227/* nouveau_reg_debug bitmask */
1228enum {
1229 NOUVEAU_REG_DEBUG_MC = 0x1,
1230 NOUVEAU_REG_DEBUG_VIDEO = 0x2,
1231 NOUVEAU_REG_DEBUG_FB = 0x4,
1232 NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
1233 NOUVEAU_REG_DEBUG_CRTC = 0x10,
1234 NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
1235 NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
1236 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1237 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1238 NOUVEAU_REG_DEBUG_EVO = 0x200,
1239};
1240
1241#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
1242 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
1243 NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
1244} while (0)
1245
1246static inline bool
1247nv_two_heads(struct drm_device *dev)
1248{
1249 struct drm_nouveau_private *dev_priv = dev->dev_private;
1250 const int impl = dev->pci_device & 0x0ff0;
1251
1252 if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
1253 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
1254 return true;
1255
1256 return false;
1257}
1258
1259static inline bool
1260nv_gf4_disp_arch(struct drm_device *dev)
1261{
1262 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
1263}
1264
1265static inline bool
1266nv_two_reg_pll(struct drm_device *dev)
1267{
1268 struct drm_nouveau_private *dev_priv = dev->dev_private;
1269 const int impl = dev->pci_device & 0x0ff0;
1270
1271 if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
1272 return true;
1273 return false;
1274}
1275
1276#define NV50_NVSW 0x0000506e
1277#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
1278#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
1279#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
1280#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
1281#define NV50_NVSW_DMA_VBLSEM 0x0000018c
1282#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
1283#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
1284#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
1285
1286#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 000000000000..bc4a24029ed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_ENCODER_H__
28#define __NOUVEAU_ENCODER_H__
29
30#include "drm_encoder_slave.h"
31#include "nouveau_drv.h"
32
33#define NV_DPMS_CLEARED 0x80
34
35struct nouveau_encoder {
36 struct drm_encoder_slave base;
37
38 struct dcb_entry *dcb;
39 int or;
40
41 struct drm_display_mode mode;
42 int last_dpms;
43
44 struct nv04_output_reg restore;
45
46 void (*disconnect)(struct nouveau_encoder *encoder);
47
48 union {
49 struct {
50 int dpcd_version;
51 int link_nr;
52 int link_bw;
53 } dp;
54 };
55};
56
57static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
58{
59 struct drm_encoder_slave *slave = to_encoder_slave(enc);
60
61 return container_of(slave, struct nouveau_encoder, base);
62}
63
64static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
65{
66 return &enc->base.base;
67}
68
69struct nouveau_connector *
70nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
71int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
72int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
73
74struct bit_displayport_encoder_table {
75 uint32_t match;
76 uint8_t record_nr;
77 uint8_t unknown;
78 uint16_t script0;
79 uint16_t script1;
80 uint16_t unknown_table;
81} __attribute__ ((packed));
82
83struct bit_displayport_encoder_table_entry {
84 uint8_t vs_level;
85 uint8_t pre_level;
86 uint8_t reg0;
87 uint8_t reg1;
88 uint8_t reg2;
89} __attribute__ ((packed));
90
91#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 000000000000..4a3f31aa1949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FB_H__
28#define __NOUVEAU_FB_H__
29
30struct nouveau_framebuffer {
31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo;
33};
34
35static inline struct nouveau_framebuffer *
36nouveau_framebuffer(struct drm_framebuffer *fb)
37{
38 return container_of(fb, struct nouveau_framebuffer, base);
39}
40
41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
42
43struct drm_framebuffer *
44nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
45 struct drm_mode_fb_cmd *);
46
47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 000000000000..36e8c5e4503a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h>
35#include <linux/delay.h>
36#include <linux/fb.h>
37#include <linux/init.h>
38#include <linux/screen_info.h>
39
40#include "drmP.h"
41#include "drm.h"
42#include "drm_crtc.h"
43#include "drm_crtc_helper.h"
44#include "drm_fb_helper.h"
45#include "nouveau_drv.h"
46#include "nouveau_drm.h"
47#include "nouveau_crtc.h"
48#include "nouveau_fb.h"
49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h"
51
52static int
53nouveau_fbcon_sync(struct fb_info *info)
54{
55 struct nouveau_fbcon_par *par = info->par;
56 struct drm_device *dev = par->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i;
60
61 if (!chan->accel_done ||
62 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0;
65
66 if (RING_SPACE(chan, 4)) {
67 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
68 info->flags |= FBINFO_HWACCEL_DISABLED;
69 return 0;
70 }
71
72 BEGIN_RING(chan, 0, 0x0104, 1);
73 OUT_RING(chan, 0);
74 BEGIN_RING(chan, 0, 0x0100, 1);
75 OUT_RING(chan, 0);
76 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
77 FIRE_RING(chan);
78
79 ret = -EBUSY;
80 for (i = 0; i < 100000; i++) {
81 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
82 ret = 0;
83 break;
84 }
85 DRM_UDELAY(1);
86 }
87
88 if (ret) {
89 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
90 info->flags |= FBINFO_HWACCEL_DISABLED;
91 return 0;
92 }
93
94 chan->accel_done = false;
95 return 0;
96}
97
98static struct fb_ops nouveau_fbcon_ops = {
99 .owner = THIS_MODULE,
100 .fb_check_var = drm_fb_helper_check_var,
101 .fb_set_par = drm_fb_helper_set_par,
102 .fb_setcolreg = drm_fb_helper_setcolreg,
103 .fb_fillrect = cfb_fillrect,
104 .fb_copyarea = cfb_copyarea,
105 .fb_imageblit = cfb_imageblit,
106 .fb_sync = nouveau_fbcon_sync,
107 .fb_pan_display = drm_fb_helper_pan_display,
108 .fb_blank = drm_fb_helper_blank,
109 .fb_setcmap = drm_fb_helper_setcmap,
110};
111
112static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
113 u16 blue, int regno)
114{
115 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
116
117 nv_crtc->lut.r[regno] = red;
118 nv_crtc->lut.g[regno] = green;
119 nv_crtc->lut.b[regno] = blue;
120}
121
122static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, int regno)
124{
125 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
126
127 *red = nv_crtc->lut.r[regno];
128 *green = nv_crtc->lut.g[regno];
129 *blue = nv_crtc->lut.b[regno];
130}
131
132static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
133 .gamma_set = nouveau_fbcon_gamma_set,
134 .gamma_get = nouveau_fbcon_gamma_get
135};
136
137#if defined(__i386__) || defined(__x86_64__)
138static bool
139nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
140{
141 struct pci_dev *pdev = dev->pdev;
142 int ramin;
143
144 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
145 screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
146 return false;
147
148 if (screen_info.lfb_base < pci_resource_start(pdev, 1))
149 goto not_fb;
150
151 if (screen_info.lfb_base + screen_info.lfb_size >=
152 pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
153 goto not_fb;
154
155 return true;
156not_fb:
157 ramin = 2;
158 if (pci_resource_len(pdev, ramin) == 0) {
159 ramin = 3;
160 if (pci_resource_len(pdev, ramin) == 0)
161 return false;
162 }
163
164 if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
165 return false;
166
167 if (screen_info.lfb_base + screen_info.lfb_size >=
168 pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
169 return false;
170
171 return true;
172}
173#endif
174
175void
176nouveau_fbcon_zfill(struct drm_device *dev)
177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 struct fb_info *info = dev_priv->fbdev_info;
180 struct fb_fillrect rect;
181
182 /* Clear the entire fbcon. The drm will program every connector
183 * with it's preferred mode. If the sizes differ, one display will
184 * quite likely have garbage around the console.
185 */
186 rect.dx = rect.dy = 0;
187 rect.width = info->var.xres_virtual;
188 rect.height = info->var.yres_virtual;
189 rect.color = 0;
190 rect.rop = ROP_COPY;
191 info->fbops->fb_fillrect(info, &rect);
192}
193
194static int
195nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
196 uint32_t fb_height, uint32_t surface_width,
197 uint32_t surface_height, uint32_t surface_depth,
198 uint32_t surface_bpp, struct drm_framebuffer **pfb)
199{
200 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct fb_info *info;
202 struct nouveau_fbcon_par *par;
203 struct drm_framebuffer *fb;
204 struct nouveau_framebuffer *nouveau_fb;
205 struct nouveau_bo *nvbo;
206 struct drm_mode_fb_cmd mode_cmd;
207 struct device *device = &dev->pdev->dev;
208 int size, ret;
209
210 mode_cmd.width = surface_width;
211 mode_cmd.height = surface_height;
212
213 mode_cmd.bpp = surface_bpp;
214 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
215 mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
216 mode_cmd.depth = surface_depth;
217
218 size = mode_cmd.pitch * mode_cmd.height;
219 size = ALIGN(size, PAGE_SIZE);
220
221 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
222 0, 0x0000, false, true, &nvbo);
223 if (ret) {
224 NV_ERROR(dev, "failed to allocate framebuffer\n");
225 goto out;
226 }
227
228 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
229 if (ret) {
230 NV_ERROR(dev, "failed to pin fb: %d\n", ret);
231 nouveau_bo_ref(NULL, &nvbo);
232 goto out;
233 }
234
235 ret = nouveau_bo_map(nvbo);
236 if (ret) {
237 NV_ERROR(dev, "failed to map fb: %d\n", ret);
238 nouveau_bo_unpin(nvbo);
239 nouveau_bo_ref(NULL, &nvbo);
240 goto out;
241 }
242
243 mutex_lock(&dev->struct_mutex);
244
245 fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
246 if (!fb) {
247 ret = -ENOMEM;
248 NV_ERROR(dev, "failed to allocate fb.\n");
249 goto out_unref;
250 }
251
252 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
253
254 nouveau_fb = nouveau_framebuffer(fb);
255 *pfb = fb;
256
257 info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
258 if (!info) {
259 ret = -ENOMEM;
260 goto out_unref;
261 }
262
263 par = info->par;
264 par->helper.funcs = &nouveau_fbcon_helper_funcs;
265 par->helper.dev = dev;
266 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
267 if (ret)
268 goto out_unref;
269 dev_priv->fbdev_info = info;
270
271 strcpy(info->fix.id, "nouveaufb");
272 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
273 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
274 info->fbops = &nouveau_fbcon_ops;
275 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
276 dev_priv->vm_vram_base;
277 info->fix.smem_len = size;
278
279 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
280 info->screen_size = size;
281
282 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
283 drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
284
285 /* FIXME: we really shouldn't expose mmio space at all */
286 info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
287 info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
288
289 /* Set aperture base/size for vesafb takeover */
290#if defined(__i386__) || defined(__x86_64__)
291 if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
292 /* Some NVIDIA VBIOS' are stupid and decide to put the
293 * framebuffer in the middle of the PRAMIN BAR for
294 * whatever reason. We need to know the exact lfb_base
295 * to get vesafb kicked off, and the only reliable way
296 * we have left is to find out lfb_base the same way
297 * vesafb did.
298 */
299 info->aperture_base = screen_info.lfb_base;
300 info->aperture_size = screen_info.lfb_size;
301 if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
302 info->aperture_size *= 65536;
303 } else
304#endif
305 {
306 info->aperture_base = info->fix.mmio_start;
307 info->aperture_size = info->fix.mmio_len;
308 }
309
310 info->pixmap.size = 64*1024;
311 info->pixmap.buf_align = 8;
312 info->pixmap.access_align = 32;
313 info->pixmap.flags = FB_PIXMAP_SYSTEM;
314 info->pixmap.scan_align = 1;
315
316 fb->fbdev = info;
317
318 par->nouveau_fb = nouveau_fb;
319 par->dev = dev;
320
321 switch (dev_priv->card_type) {
322 case NV_50:
323 nv50_fbcon_accel_init(info);
324 break;
325 default:
326 nv04_fbcon_accel_init(info);
327 break;
328 };
329
330 nouveau_fbcon_zfill(dev);
331
332 /* To allow resizeing without swapping buffers */
333 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
334 nouveau_fb->base.width,
335 nouveau_fb->base.height,
336 nvbo->bo.offset, nvbo);
337
338 mutex_unlock(&dev->struct_mutex);
339 return 0;
340
341out_unref:
342 mutex_unlock(&dev->struct_mutex);
343out:
344 return ret;
345}
346
347int
348nouveau_fbcon_probe(struct drm_device *dev)
349{
350 NV_DEBUG(dev, "\n");
351
352 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
353}
354
355int
356nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
357{
358 struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
359 struct fb_info *info;
360
361 if (!fb)
362 return -EINVAL;
363
364 info = fb->fbdev;
365 if (info) {
366 struct nouveau_fbcon_par *par = info->par;
367
368 unregister_framebuffer(info);
369 nouveau_bo_unmap(nouveau_fb->nvbo);
370 mutex_lock(&dev->struct_mutex);
371 drm_gem_object_unreference(nouveau_fb->nvbo->gem);
372 nouveau_fb->nvbo = NULL;
373 mutex_unlock(&dev->struct_mutex);
374 if (par)
375 drm_fb_helper_free(&par->helper);
376 framebuffer_release(info);
377 }
378
379 return 0;
380}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 000000000000..8531140fedbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FBCON_H__
28#define __NOUVEAU_FBCON_H__
29
30#include "drm_fb_helper.h"
31
32struct nouveau_fbcon_par {
33 struct drm_fb_helper helper;
34 struct drm_device *dev;
35 struct nouveau_framebuffer *nouveau_fb;
36};
37
38int nouveau_fbcon_probe(struct drm_device *dev);
39int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev);
42
43int nv04_fbcon_accel_init(struct fb_info *info);
44int nv50_fbcon_accel_init(struct fb_info *info);
45
46#endif /* __NV50_FBCON_H__ */
47
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 000000000000..0cff7eb3690a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
67 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48);
69 else
70 sequence = chan->fence.last_sequence_irq;
71
72 if (chan->fence.sequence_ack == sequence)
73 return;
74 chan->fence.sequence_ack = sequence;
75
76 list_for_each_safe(entry, tmp, &chan->fence.pending) {
77 fence = list_entry(entry, struct nouveau_fence, entry);
78
79 sequence = fence->sequence;
80 fence->signalled = true;
81 list_del(&fence->entry);
82 kref_put(&fence->refcount, nouveau_fence_del);
83
84 if (sequence == chan->fence.sequence_ack)
85 break;
86 }
87}
88
89int
90nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
91 bool emit)
92{
93 struct nouveau_fence *fence;
94 int ret = 0;
95
96 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
97 if (!fence)
98 return -ENOMEM;
99 kref_init(&fence->refcount);
100 fence->channel = chan;
101
102 if (emit)
103 ret = nouveau_fence_emit(fence);
104
105 if (ret)
106 nouveau_fence_unref((void *)&fence);
107 *pfence = fence;
108 return ret;
109}
110
111struct nouveau_channel *
112nouveau_fence_channel(struct nouveau_fence *fence)
113{
114 return fence ? fence->channel : NULL;
115}
116
117int
118nouveau_fence_emit(struct nouveau_fence *fence)
119{
120 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
121 struct nouveau_channel *chan = fence->channel;
122 unsigned long flags;
123 int ret;
124
125 ret = RING_SPACE(chan, 2);
126 if (ret)
127 return ret;
128
129 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
130 spin_lock_irqsave(&chan->fence.lock, flags);
131 nouveau_fence_update(chan);
132 spin_unlock_irqrestore(&chan->fence.lock, flags);
133
134 BUG_ON(chan->fence.sequence ==
135 chan->fence.sequence_ack - 1);
136 }
137
138 fence->sequence = ++chan->fence.sequence;
139
140 kref_get(&fence->refcount);
141 spin_lock_irqsave(&chan->fence.lock, flags);
142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144
145 BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan);
148
149 return 0;
150}
151
152void
153nouveau_fence_unref(void **sync_obj)
154{
155 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
156
157 if (fence)
158 kref_put(&fence->refcount, nouveau_fence_del);
159 *sync_obj = NULL;
160}
161
162void *
163nouveau_fence_ref(void *sync_obj)
164{
165 struct nouveau_fence *fence = nouveau_fence(sync_obj);
166
167 kref_get(&fence->refcount);
168 return sync_obj;
169}
170
171bool
172nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173{
174 struct nouveau_fence *fence = nouveau_fence(sync_obj);
175 struct nouveau_channel *chan = fence->channel;
176 unsigned long flags;
177
178 if (fence->signalled)
179 return true;
180
181 spin_lock_irqsave(&chan->fence.lock, flags);
182 nouveau_fence_update(chan);
183 spin_unlock_irqrestore(&chan->fence.lock, flags);
184 return fence->signalled;
185}
186
187int
188nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
189{
190 unsigned long timeout = jiffies + (3 * DRM_HZ);
191 int ret = 0;
192
193 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
194
195 while (1) {
196 if (nouveau_fence_signalled(sync_obj, sync_arg))
197 break;
198
199 if (time_after_eq(jiffies, timeout)) {
200 ret = -EBUSY;
201 break;
202 }
203
204 if (lazy)
205 schedule_timeout(1);
206
207 if (intr && signal_pending(current)) {
208 ret = -ERESTART;
209 break;
210 }
211 }
212
213 __set_current_state(TASK_RUNNING);
214
215 return ret;
216}
217
218int
219nouveau_fence_flush(void *sync_obj, void *sync_arg)
220{
221 return 0;
222}
223
224void
225nouveau_fence_handler(struct drm_device *dev, int channel)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_channel *chan = NULL;
229
230 if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
231 chan = dev_priv->fifos[channel];
232
233 if (chan) {
234 spin_lock_irq(&chan->fence.lock);
235 nouveau_fence_update(chan);
236 spin_unlock_irq(&chan->fence.lock);
237 }
238}
239
240int
241nouveau_fence_init(struct nouveau_channel *chan)
242{
243 INIT_LIST_HEAD(&chan->fence.pending);
244 spin_lock_init(&chan->fence.lock);
245 return 0;
246}
247
248void
249nouveau_fence_fini(struct nouveau_channel *chan)
250{
251 struct list_head *entry, *tmp;
252 struct nouveau_fence *fence;
253
254 list_for_each_safe(entry, tmp, &chan->fence.pending) {
255 fence = list_entry(entry, struct nouveau_fence, entry);
256
257 fence->signalled = true;
258 list_del(&fence->entry);
259 kref_put(&fence->refcount, nouveau_fence_del);
260 }
261}
262
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 000000000000..11f831f0ddc5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,992 @@
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 ttm_bo_unref(&bo);
60}
61
62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64 int size, int align, uint32_t flags, uint32_t tile_mode,
65 uint32_t tile_flags, bool no_vm, bool mappable,
66 struct nouveau_bo **pnvbo)
67{
68 struct nouveau_bo *nvbo;
69 int ret;
70
71 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
72 tile_flags, no_vm, mappable, pnvbo);
73 if (ret)
74 return ret;
75 nvbo = *pnvbo;
76
77 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
78 if (!nvbo->gem) {
79 nouveau_bo_ref(NULL, pnvbo);
80 return -ENOMEM;
81 }
82
83 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
84 nvbo->gem->driver_private = nvbo;
85 return 0;
86}
87
88static int
89nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
90{
91 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
92
93 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
94 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
95 else
96 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
97
98 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
99 rep->offset = nvbo->bo.offset;
100 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
101 rep->tile_mode = nvbo->tile_mode;
102 rep->tile_flags = nvbo->tile_flags;
103 return 0;
104}
105
106static bool
107nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
108 switch (tile_flags) {
109 case 0x0000:
110 case 0x1800:
111 case 0x2800:
112 case 0x4800:
113 case 0x7000:
114 case 0x7400:
115 case 0x7a00:
116 case 0xe000:
117 break;
118 default:
119 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
120 return false;
121 }
122
123 return true;
124}
125
126int
127nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
128 struct drm_file *file_priv)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 struct drm_nouveau_gem_new *req = data;
132 struct nouveau_bo *nvbo = NULL;
133 struct nouveau_channel *chan = NULL;
134 uint32_t flags = 0;
135 int ret = 0;
136
137 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
138
139 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
140 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
141
142 if (req->channel_hint) {
143 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
144 file_priv, chan);
145 }
146
147 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
148 flags |= TTM_PL_FLAG_VRAM;
149 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
150 flags |= TTM_PL_FLAG_TT;
151 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
152 flags |= TTM_PL_FLAG_SYSTEM;
153
154 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
155 return -EINVAL;
156
157 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
158 req->info.tile_mode, req->info.tile_flags, false,
159 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
160 &nvbo);
161 if (ret)
162 return ret;
163
164 ret = nouveau_gem_info(nvbo->gem, &req->info);
165 if (ret)
166 goto out;
167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out:
170 mutex_lock(&dev->struct_mutex);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173
174 if (ret)
175 drm_gem_object_unreference(nvbo->gem);
176 return ret;
177}
178
179static int
180nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
181 uint32_t write_domains, uint32_t valid_domains)
182{
183 struct nouveau_bo *nvbo = gem->driver_private;
184 struct ttm_buffer_object *bo = &nvbo->bo;
185 uint64_t flags;
186
187 if (!valid_domains || (!read_domains && !write_domains))
188 return -EINVAL;
189
190 if (write_domains) {
191 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
192 (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
193 flags = TTM_PL_FLAG_VRAM;
194 else
195 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
196 (write_domains & NOUVEAU_GEM_DOMAIN_GART))
197 flags = TTM_PL_FLAG_TT;
198 else
199 return -EINVAL;
200 } else {
201 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
202 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
203 bo->mem.mem_type == TTM_PL_VRAM)
204 flags = TTM_PL_FLAG_VRAM;
205 else
206 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
207 (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
208 bo->mem.mem_type == TTM_PL_TT)
209 flags = TTM_PL_FLAG_TT;
210 else
211 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
212 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
213 flags = TTM_PL_FLAG_VRAM;
214 else
215 flags = TTM_PL_FLAG_TT;
216 }
217
218 nouveau_bo_placement_set(nvbo, flags);
219 return 0;
220}
221
222struct validate_op {
223 struct nouveau_fence *fence;
224 struct list_head vram_list;
225 struct list_head gart_list;
226 struct list_head both_list;
227};
228
229static void
230validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
231{
232 struct list_head *entry, *tmp;
233 struct nouveau_bo *nvbo;
234
235 list_for_each_safe(entry, tmp, list) {
236 nvbo = list_entry(entry, struct nouveau_bo, entry);
237 if (likely(fence)) {
238 struct nouveau_fence *prev_fence;
239
240 spin_lock(&nvbo->bo.lock);
241 prev_fence = nvbo->bo.sync_obj;
242 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
243 spin_unlock(&nvbo->bo.lock);
244 nouveau_fence_unref((void *)&prev_fence);
245 }
246
247 list_del(&nvbo->entry);
248 nvbo->reserved_by = NULL;
249 ttm_bo_unreserve(&nvbo->bo);
250 drm_gem_object_unreference(nvbo->gem);
251 }
252}
253
254static void
255validate_fini(struct validate_op *op, bool success)
256{
257 struct nouveau_fence *fence = op->fence;
258
259 if (unlikely(!success))
260 op->fence = NULL;
261
262 validate_fini_list(&op->vram_list, op->fence);
263 validate_fini_list(&op->gart_list, op->fence);
264 validate_fini_list(&op->both_list, op->fence);
265 nouveau_fence_unref((void *)&fence);
266}
267
268static int
269validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
270 struct drm_nouveau_gem_pushbuf_bo *pbbo,
271 int nr_buffers, struct validate_op *op)
272{
273 struct drm_device *dev = chan->dev;
274 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 uint32_t sequence;
276 int trycnt = 0;
277 int ret, i;
278
279 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
280retry:
281 if (++trycnt > 100000) {
282 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
283 return -EINVAL;
284 }
285
286 for (i = 0; i < nr_buffers; i++) {
287 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
288 struct drm_gem_object *gem;
289 struct nouveau_bo *nvbo;
290
291 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
292 if (!gem) {
293 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
294 validate_fini(op, NULL);
295 return -EINVAL;
296 }
297 nvbo = gem->driver_private;
298
299 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
300 NV_ERROR(dev, "multiple instances of buffer %d on "
301 "validation list\n", b->handle);
302 validate_fini(op, NULL);
303 return -EINVAL;
304 }
305
306 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
307 if (ret) {
308 validate_fini(op, NULL);
309 if (ret == -EAGAIN)
310 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
311 drm_gem_object_unreference(gem);
312 if (ret)
313 return ret;
314 goto retry;
315 }
316
317 nvbo->reserved_by = file_priv;
318 nvbo->pbbo_index = i;
319 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
320 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
321 list_add_tail(&nvbo->entry, &op->both_list);
322 else
323 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
324 list_add_tail(&nvbo->entry, &op->vram_list);
325 else
326 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
327 list_add_tail(&nvbo->entry, &op->gart_list);
328 else {
329 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
330 b->valid_domains);
331 validate_fini(op, NULL);
332 return -EINVAL;
333 }
334
335 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
336 validate_fini(op, NULL);
337
338 if (nvbo->cpu_filp == file_priv) {
339 NV_ERROR(dev, "bo %p mapped by process trying "
340 "to validate it!\n", nvbo);
341 return -EINVAL;
342 }
343
344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
345 if (ret == -ERESTART)
346 ret = -EAGAIN;
347 if (ret)
348 return ret;
349 goto retry;
350 }
351 }
352
353 return 0;
354}
355
356static int
357validate_list(struct nouveau_channel *chan, struct list_head *list,
358 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
359{
360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct nouveau_bo *nvbo;
363 int ret, relocs = 0;
364
365 list_for_each_entry(nvbo, list, entry) {
366 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
367 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
368
369 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
370 spin_lock(&nvbo->bo.lock);
371 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
372 spin_unlock(&nvbo->bo.lock);
373 if (unlikely(ret))
374 return ret;
375 }
376
377 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
378 b->write_domains,
379 b->valid_domains);
380 if (unlikely(ret))
381 return ret;
382
383 nvbo->channel = chan;
384 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
385 false, false);
386 nvbo->channel = NULL;
387 if (unlikely(ret))
388 return ret;
389
390 if (nvbo->bo.offset == b->presumed_offset &&
391 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
392 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
393 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
394 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
395 continue;
396
397 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
398 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
399 else
400 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
401 b->presumed_offset = nvbo->bo.offset;
402 b->presumed_ok = 0;
403 relocs++;
404
405 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
406 return -EFAULT;
407 }
408
409 return relocs;
410}
411
412static int
413nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
414 struct drm_file *file_priv,
415 struct drm_nouveau_gem_pushbuf_bo *pbbo,
416 uint64_t user_buffers, int nr_buffers,
417 struct validate_op *op, int *apply_relocs)
418{
419 int ret, relocs = 0;
420
421 INIT_LIST_HEAD(&op->vram_list);
422 INIT_LIST_HEAD(&op->gart_list);
423 INIT_LIST_HEAD(&op->both_list);
424
425 ret = nouveau_fence_new(chan, &op->fence, false);
426 if (ret)
427 return ret;
428
429 if (nr_buffers == 0)
430 return 0;
431
432 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
433 if (unlikely(ret))
434 return ret;
435
436 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
437 if (unlikely(ret < 0)) {
438 validate_fini(op, NULL);
439 return ret;
440 }
441 relocs += ret;
442
443 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
444 if (unlikely(ret < 0)) {
445 validate_fini(op, NULL);
446 return ret;
447 }
448 relocs += ret;
449
450 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
451 if (unlikely(ret < 0)) {
452 validate_fini(op, NULL);
453 return ret;
454 }
455 relocs += ret;
456
457 *apply_relocs = relocs;
458 return 0;
459}
460
461static inline void *
462u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
463{
464 void *mem;
465 void __user *userptr = (void __force __user *)(uintptr_t)user;
466
467 mem = kmalloc(nmemb * size, GFP_KERNEL);
468 if (!mem)
469 return ERR_PTR(-ENOMEM);
470
471 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
472 kfree(mem);
473 return ERR_PTR(-EFAULT);
474 }
475
476 return mem;
477}
478
479static int
480nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
481 struct drm_nouveau_gem_pushbuf_bo *bo,
482 int nr_relocs, uint64_t ptr_relocs,
483 int nr_dwords, int first_dword,
484 uint32_t *pushbuf, bool is_iomem)
485{
486 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
487 struct drm_device *dev = chan->dev;
488 int ret = 0, i;
489
490 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
491 if (IS_ERR(reloc))
492 return PTR_ERR(reloc);
493
494 for (i = 0; i < nr_relocs; i++) {
495 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
496 struct drm_nouveau_gem_pushbuf_bo *b;
497 uint32_t data;
498
499 if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
500 r->reloc_index >= first_dword + nr_dwords) {
501 NV_ERROR(dev, "Bad relocation %d\n", i);
502 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
503 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
504 ret = -EINVAL;
505 break;
506 }
507
508 b = &bo[r->bo_index];
509 if (b->presumed_ok)
510 continue;
511
512 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
513 data = b->presumed_offset + r->data;
514 else
515 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
516 data = (b->presumed_offset + r->data) >> 32;
517 else
518 data = r->data;
519
520 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
521 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
522 data |= r->tor;
523 else
524 data |= r->vor;
525 }
526
527 if (is_iomem)
528 iowrite32_native(data, (void __force __iomem *)
529 &pushbuf[r->reloc_index]);
530 else
531 pushbuf[r->reloc_index] = data;
532 }
533
534 kfree(reloc);
535 return ret;
536}
537
538int
539nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
540 struct drm_file *file_priv)
541{
542 struct drm_nouveau_gem_pushbuf *req = data;
543 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
544 struct nouveau_channel *chan;
545 struct validate_op op;
546 uint32_t *pushbuf = NULL;
547 int ret = 0, do_reloc = 0, i;
548
549 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
550 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
551
552 if (req->nr_dwords >= chan->dma.max ||
553 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
554 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
555 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
556 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
557 chan->dma.max - 1);
558 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
559 NOUVEAU_GEM_MAX_BUFFERS);
560 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
561 NOUVEAU_GEM_MAX_RELOCS);
562 return -EINVAL;
563 }
564
565 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
566 if (IS_ERR(pushbuf))
567 return PTR_ERR(pushbuf);
568
569 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
570 if (IS_ERR(bo)) {
571 kfree(pushbuf);
572 return PTR_ERR(bo);
573 }
574
575 mutex_lock(&dev->struct_mutex);
576
577 /* Validate buffer list */
578 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
579 req->nr_buffers, &op, &do_reloc);
580 if (ret)
581 goto out;
582
583 /* Apply any relocations that are required */
584 if (do_reloc) {
585 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
586 bo, req->nr_relocs,
587 req->relocs,
588 req->nr_dwords, 0,
589 pushbuf, false);
590 if (ret)
591 goto out;
592 }
593
594 /* Emit push buffer to the hw
595 */
596 ret = RING_SPACE(chan, req->nr_dwords);
597 if (ret)
598 goto out;
599
600 OUT_RINGp(chan, pushbuf, req->nr_dwords);
601
602 ret = nouveau_fence_emit(op.fence);
603 if (ret) {
604 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
605 WIND_RING(chan);
606 goto out;
607 }
608
609 if (nouveau_gem_pushbuf_sync(chan)) {
610 ret = nouveau_fence_wait(op.fence, NULL, false, false);
611 if (ret) {
612 for (i = 0; i < req->nr_dwords; i++)
613 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
614 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
615 }
616 }
617
618out:
619 validate_fini(&op, ret == 0);
620 mutex_unlock(&dev->struct_mutex);
621 kfree(pushbuf);
622 kfree(bo);
623 return ret;
624}
625
626#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
627
628int
629nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
630 struct drm_file *file_priv)
631{
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633 struct drm_nouveau_gem_pushbuf_call *req = data;
634 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
635 struct nouveau_channel *chan;
636 struct drm_gem_object *gem;
637 struct nouveau_bo *pbbo;
638 struct validate_op op;
639 int i, ret = 0, do_reloc = 0;
640
641 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
642 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
643
644 if (unlikely(req->handle == 0))
645 goto out_next;
646
647 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
648 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
649 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
650 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
651 NOUVEAU_GEM_MAX_BUFFERS);
652 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
653 NOUVEAU_GEM_MAX_RELOCS);
654 return -EINVAL;
655 }
656
657 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
658 if (IS_ERR(bo))
659 return PTR_ERR(bo);
660
661 mutex_lock(&dev->struct_mutex);
662
663 /* Validate buffer list */
664 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
665 req->nr_buffers, &op, &do_reloc);
666 if (ret) {
667 NV_ERROR(dev, "validate: %d\n", ret);
668 goto out;
669 }
670
671 /* Validate DMA push buffer */
672 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
673 if (!gem) {
674 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
675 ret = -EINVAL;
676 goto out;
677 }
678 pbbo = nouveau_gem_object(gem);
679
680 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
681 chan->fence.sequence);
682 if (ret) {
683 NV_ERROR(dev, "resv pb: %d\n", ret);
684 drm_gem_object_unreference(gem);
685 goto out;
686 }
687
688 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
689 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
690 if (ret) {
691 NV_ERROR(dev, "validate pb: %d\n", ret);
692 ttm_bo_unreserve(&pbbo->bo);
693 drm_gem_object_unreference(gem);
694 goto out;
695 }
696
697 list_add_tail(&pbbo->entry, &op.both_list);
698
699 /* If presumed return address doesn't match, we need to map the
700 * push buffer and fix it..
701 */
702 if (!PUSHBUF_CAL) {
703 uint32_t retaddy;
704
705 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
706 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
707 if (ret) {
708 NV_ERROR(dev, "jmp_space: %d\n", ret);
709 goto out;
710 }
711 }
712
713 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
714 retaddy |= 0x20000000;
715 if (retaddy != req->suffix0) {
716 req->suffix0 = retaddy;
717 do_reloc = 1;
718 }
719 }
720
721 /* Apply any relocations that are required */
722 if (do_reloc) {
723 void *pbvirt;
724 bool is_iomem;
725 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
726 &pbbo->kmap);
727 if (ret) {
728 NV_ERROR(dev, "kmap pb: %d\n", ret);
729 goto out;
730 }
731
732 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
733 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
734 req->nr_relocs,
735 req->relocs,
736 req->nr_dwords,
737 req->offset / 4,
738 pbvirt, is_iomem);
739
740 if (!PUSHBUF_CAL) {
741 nouveau_bo_wr32(pbbo,
742 req->offset / 4 + req->nr_dwords - 2,
743 req->suffix0);
744 }
745
746 ttm_bo_kunmap(&pbbo->kmap);
747 if (ret) {
748 NV_ERROR(dev, "reloc apply: %d\n", ret);
749 goto out;
750 }
751 }
752
753 if (PUSHBUF_CAL) {
754 ret = RING_SPACE(chan, 2);
755 if (ret) {
756 NV_ERROR(dev, "cal_space: %d\n", ret);
757 goto out;
758 }
759 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
760 req->offset) | 2);
761 OUT_RING(chan, 0);
762 } else {
763 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
764 if (ret) {
765 NV_ERROR(dev, "jmp_space: %d\n", ret);
766 goto out;
767 }
768 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
769 req->offset) | 0x20000000);
770 OUT_RING(chan, 0);
771
772 /* Space the jumps apart with NOPs. */
773 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
774 OUT_RING(chan, 0);
775 }
776
777 ret = nouveau_fence_emit(op.fence);
778 if (ret) {
779 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
780 WIND_RING(chan);
781 goto out;
782 }
783
784out:
785 validate_fini(&op, ret == 0);
786 mutex_unlock(&dev->struct_mutex);
787 kfree(bo);
788
789out_next:
790 if (PUSHBUF_CAL) {
791 req->suffix0 = 0x00020000;
792 req->suffix1 = 0x00000000;
793 } else {
794 req->suffix0 = 0x20000000 |
795 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
796 req->suffix1 = 0x00000000;
797 }
798
799 return ret;
800}
801
802int
803nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
804 struct drm_file *file_priv)
805{
806 struct drm_nouveau_private *dev_priv = dev->dev_private;
807 struct drm_nouveau_gem_pushbuf_call *req = data;
808
809 req->vram_available = dev_priv->fb_aper_free;
810 req->gart_available = dev_priv->gart_info.aper_free;
811
812 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
813}
814
815static inline uint32_t
816domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
817{
818 uint32_t flags = 0;
819
820 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
821 flags |= TTM_PL_FLAG_VRAM;
822 if (domain & NOUVEAU_GEM_DOMAIN_GART)
823 flags |= TTM_PL_FLAG_TT;
824
825 return flags;
826}
827
828int
829nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
830 struct drm_file *file_priv)
831{
832 struct drm_nouveau_gem_pin *req = data;
833 struct drm_gem_object *gem;
834 struct nouveau_bo *nvbo;
835 int ret = 0;
836
837 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
838
839 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
840 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
841 return -EINVAL;
842 }
843
844 if (!DRM_SUSER(DRM_CURPROC))
845 return -EPERM;
846
847 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
848 if (!gem)
849 return -EINVAL;
850 nvbo = nouveau_gem_object(gem);
851
852 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
853 if (ret)
854 goto out;
855
856 req->offset = nvbo->bo.offset;
857 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
858 req->domain = NOUVEAU_GEM_DOMAIN_GART;
859 else
860 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
861
862out:
863 mutex_lock(&dev->struct_mutex);
864 drm_gem_object_unreference(gem);
865 mutex_unlock(&dev->struct_mutex);
866
867 return ret;
868}
869
870int
871nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
872 struct drm_file *file_priv)
873{
874 struct drm_nouveau_gem_pin *req = data;
875 struct drm_gem_object *gem;
876 int ret;
877
878 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
879
880 if (drm_core_check_feature(dev, DRIVER_MODESET))
881 return -EINVAL;
882
883 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
884 if (!gem)
885 return -EINVAL;
886
887 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
888
889 mutex_lock(&dev->struct_mutex);
890 drm_gem_object_unreference(gem);
891 mutex_unlock(&dev->struct_mutex);
892
893 return ret;
894}
895
896int
897nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
898 struct drm_file *file_priv)
899{
900 struct drm_nouveau_gem_cpu_prep *req = data;
901 struct drm_gem_object *gem;
902 struct nouveau_bo *nvbo;
903 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
904 int ret = -EINVAL;
905
906 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
907
908 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
909 if (!gem)
910 return ret;
911 nvbo = nouveau_gem_object(gem);
912
913 if (nvbo->cpu_filp) {
914 if (nvbo->cpu_filp == file_priv)
915 goto out;
916
917 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
918 if (ret == -ERESTART)
919 ret = -EAGAIN;
920 if (ret)
921 goto out;
922 }
923
924 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
925 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
926 } else {
927 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
928 if (ret == -ERESTART)
929 ret = -EAGAIN;
930 else
931 if (ret == 0)
932 nvbo->cpu_filp = file_priv;
933 }
934
935out:
936 mutex_lock(&dev->struct_mutex);
937 drm_gem_object_unreference(gem);
938 mutex_unlock(&dev->struct_mutex);
939 return ret;
940}
941
942int
943nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
944 struct drm_file *file_priv)
945{
946 struct drm_nouveau_gem_cpu_prep *req = data;
947 struct drm_gem_object *gem;
948 struct nouveau_bo *nvbo;
949 int ret = -EINVAL;
950
951 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
952
953 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
954 if (!gem)
955 return ret;
956 nvbo = nouveau_gem_object(gem);
957
958 if (nvbo->cpu_filp != file_priv)
959 goto out;
960 nvbo->cpu_filp = NULL;
961
962 ttm_bo_synccpu_write_release(&nvbo->bo);
963 ret = 0;
964
965out:
966 mutex_lock(&dev->struct_mutex);
967 drm_gem_object_unreference(gem);
968 mutex_unlock(&dev->struct_mutex);
969 return ret;
970}
971
972int
973nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
974 struct drm_file *file_priv)
975{
976 struct drm_nouveau_gem_info *req = data;
977 struct drm_gem_object *gem;
978 int ret;
979
980 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
981
982 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
983 if (!gem)
984 return -EINVAL;
985
986 ret = nouveau_gem_info(gem, req);
987 mutex_lock(&dev->struct_mutex);
988 drm_gem_object_unreference(gem);
989 mutex_unlock(&dev->struct_mutex);
990 return ret;
991}
992
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 000000000000..dc46792a5c96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright 2006 Dave Airlie
3 * Copyright 2007 Maarten Maathuis
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29#define CHIPSET_NFORCE 0x01a0
30#define CHIPSET_NFORCE2 0x01f0
31
32/*
33 * misc hw access wrappers/control functions
34 */
35
36void
37NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
38{
39 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
40 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
41}
42
43uint8_t
44NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
45{
46 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
47 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
48}
49
50void
51NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
52{
53 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
54 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
55}
56
57uint8_t
58NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
59{
60 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
61 return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
62}
63
64/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
65 * it affects only the 8 bit vga io regs, which we access using mmio at
66 * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
67 * in general, the set value of cr44 does not matter: reg access works as
68 * expected and values can be set for the appropriate head by using a 0x2000
69 * offset as required
70 * however:
71 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
72 * cr44 must be set to 0 or 3 for accessing values on the correct head
73 * through the common 0xc03c* addresses
74 * b) in tied mode (4) head B is programmed to the values set on head A, and
75 * access using the head B addresses can have strange results, ergo we leave
76 * tied mode in init once we know to what cr44 should be restored on exit
77 *
78 * the owner parameter is slightly abused:
79 * 0 and 1 are treated as head values and so the set value is (owner * 3)
80 * other values are treated as literal values to set
81 */
82void
83NVSetOwner(struct drm_device *dev, int owner)
84{
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86
87 if (owner == 1)
88 owner *= 3;
89
90 if (dev_priv->chipset == 0x11) {
91 /* This might seem stupid, but the blob does it and
92 * omitting it often locks the system up.
93 */
94 NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
95 NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
96 }
97
98 /* CR44 is always changed on CRTC0 */
99 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
100
101 if (dev_priv->chipset == 0x11) { /* set me harder */
102 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
104 }
105}
106
107void
108NVBlankScreen(struct drm_device *dev, int head, bool blank)
109{
110 unsigned char seq1;
111
112 if (nv_two_heads(dev))
113 NVSetOwner(dev, head);
114
115 seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
116
117 NVVgaSeqReset(dev, head, true);
118 if (blank)
119 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
120 else
121 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
122 NVVgaSeqReset(dev, head, false);
123}
124
125/*
126 * PLL setting
127 */
128
129static int
130powerctrl_1_shift(int chip_version, int reg)
131{
132 int shift = -4;
133
134 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
135 return shift;
136
137 switch (reg) {
138 case NV_RAMDAC_VPLL2:
139 shift += 4;
140 case NV_PRAMDAC_VPLL_COEFF:
141 shift += 4;
142 case NV_PRAMDAC_MPLL_COEFF:
143 shift += 4;
144 case NV_PRAMDAC_NVPLL_COEFF:
145 shift += 4;
146 }
147
148 /*
149 * the shift for vpll regs is only used for nv3x chips with a single
150 * stage pll
151 */
152 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
153 chip_version == 0x36 || chip_version >= 0x40))
154 shift = -4;
155
156 return shift;
157}
158
159static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios->chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
167 uint32_t saved_powerctrl_1 = 0;
168 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
169
170 if (oldpll == pll)
171 return; /* already set */
172
173 if (shift_powerctrl_1 >= 0) {
174 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
175 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
176 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
177 1 << shift_powerctrl_1);
178 }
179
180 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
181 /* upclock -- write new post divider first */
182 NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
183 else
184 /* downclock -- write new NM first */
185 NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
186
187 if (chip_version < 0x17 && chip_version != 0x11)
188 /* wait a bit on older chips */
189 msleep(64);
190 NVReadRAMDAC(dev, 0, reg);
191
192 /* then write the other half as well */
193 NVWriteRAMDAC(dev, 0, reg, pll);
194
195 if (shift_powerctrl_1 >= 0)
196 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
197}
198
199static uint32_t
200new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
201{
202 bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
203
204 if (ss) /* single stage pll mode */
205 ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
206 NV_RAMDAC_580_VPLL2_ACTIVE;
207 else
208 ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
209 ~NV_RAMDAC_580_VPLL2_ACTIVE;
210
211 return ramdac580;
212}
213
214static void
215setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios->chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
223 uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
224 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
225 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
226 uint32_t oldramdac580 = 0, ramdac580 = 0;
227 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
228 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
229 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
230
231 /* model specific additions to generic pll1 and pll2 set up above */
232 if (nv3035) {
233 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
234 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
235 pll2 = 0;
236 }
237 if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
238 oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
239 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
240 if (oldramdac580 != ramdac580)
241 oldpll1 = ~0; /* force mismatch */
242 if (single_stage)
243 /* magic value used by nvidia in single stage mode */
244 pll2 |= 0x011f;
245 }
246 if (chip_version > 0x70)
247 /* magic bits set by the blob (but not the bios) on g71-73 */
248 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
249
250 if (oldpll1 == pll1 && oldpll2 == pll2)
251 return; /* already set */
252
253 if (shift_powerctrl_1 >= 0) {
254 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
255 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
256 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
257 1 << shift_powerctrl_1);
258 }
259
260 if (chip_version >= 0x40) {
261 int shift_c040 = 14;
262
263 switch (reg1) {
264 case NV_PRAMDAC_MPLL_COEFF:
265 shift_c040 += 2;
266 case NV_PRAMDAC_NVPLL_COEFF:
267 shift_c040 += 2;
268 case NV_RAMDAC_VPLL2:
269 shift_c040 += 2;
270 case NV_PRAMDAC_VPLL_COEFF:
271 shift_c040 += 2;
272 }
273
274 savedc040 = nvReadMC(dev, 0xc040);
275 if (shift_c040 != 14)
276 nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
277 }
278
279 if (oldramdac580 != ramdac580)
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
281
282 if (!nv3035)
283 NVWriteRAMDAC(dev, 0, reg2, pll2);
284 NVWriteRAMDAC(dev, 0, reg1, pll1);
285
286 if (shift_powerctrl_1 >= 0)
287 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
288 if (chip_version >= 0x40)
289 nvWriteMC(dev, 0xc040, savedc040);
290}
291
292static void
293setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
294 struct nouveau_pll_vals *pv)
295{
296 /* When setting PLLs, there is a merry game of disabling and enabling
297 * various bits of hardware during the process. This function is a
298 * synthesis of six nv4x traces, nearly each card doing a subtly
299 * different thing. With luck all the necessary bits for each card are
300 * combined herein. Without luck it deviates from each card's formula
301 * so as to not work on any :)
302 */
303
304 uint32_t Preg = NMNMreg - 4;
305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */
312 uint32_t maskc040 = ~(3 << 14), savedc040;
313 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
314
315 if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
316 return;
317
318 if (Preg == 0x4000)
319 maskc040 = ~0x333;
320 if (Preg == 0x4058)
321 maskc040 = ~(0xc << 24);
322
323 if (mpll) {
324 struct pll_lims pll_lim;
325 uint8_t Pval2;
326
327 if (get_pll_limits(dev, Preg, &pll_lim))
328 return;
329
330 Pval2 = pv->log2P + pll_lim.log2p_bias;
331 if (Pval2 > pll_lim.max_log2p)
332 Pval2 = pll_lim.max_log2p;
333 Pval |= 1 << 28 | Pval2 << 20;
334
335 saved4600 = nvReadMC(dev, 0x4600);
336 nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
337 }
338 if (single_stage)
339 Pval |= mpll ? 1 << 12 : 1 << 8;
340
341 nvWriteMC(dev, Preg, oldPval | 1 << 28);
342 nvWriteMC(dev, Preg, Pval & ~(4 << 28));
343 if (mpll) {
344 Pval |= 8 << 20;
345 nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
346 nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
347 }
348
349 savedc040 = nvReadMC(dev, 0xc040);
350 nvWriteMC(dev, 0xc040, savedc040 & maskc040);
351
352 nvWriteMC(dev, NMNMreg, NMNM);
353 if (NMNMreg == 0x4024)
354 nvWriteMC(dev, 0x403c, NMNM);
355
356 nvWriteMC(dev, Preg, Pval);
357 if (mpll) {
358 Pval &= ~(8 << 20);
359 nvWriteMC(dev, 0x4020, Pval);
360 nvWriteMC(dev, 0x4038, Pval);
361 nvWriteMC(dev, 0x4600, saved4600);
362 }
363
364 nvWriteMC(dev, 0xc040, savedc040);
365
366 if (mpll) {
367 nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
368 nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
369 }
370}
371
372void
373nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios->chip_version;
378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) {
381 if (reg1 > 0x405c)
382 setPLL_double_highregs(dev, reg1, pv);
383 else
384 setPLL_double_lowregs(dev, reg1, pv);
385 } else
386 setPLL_single(dev, reg1, pv);
387}
388
389/*
390 * PLL getting
391 */
392
393static void
394nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
395 uint32_t pll2, struct nouveau_pll_vals *pllvals)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398
399 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
400
401 /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
402 pllvals->log2P = (pll1 >> 16) & 0x7;
403 pllvals->N2 = pllvals->M2 = 1;
404
405 if (reg1 <= 0x405c) {
406 pllvals->NM1 = pll2 & 0xffff;
407 /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
408 if (!(pll1 & 0x1100))
409 pllvals->NM2 = pll2 >> 16;
410 } else {
411 pllvals->NM1 = pll1 & 0xffff;
412 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
413 pllvals->NM2 = pll2 & 0xffff;
414 else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
415 pllvals->M1 &= 0xf; /* only 4 bits */
416 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
417 pllvals->M2 = (pll1 >> 4) & 0x7;
418 pllvals->N2 = ((pll1 >> 21) & 0x18) |
419 ((pll1 >> 19) & 0x7);
420 }
421 }
422 }
423}
424
425int
426nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
427 struct nouveau_pll_vals *pllvals)
428{
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430 const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
431 NV_PRAMDAC_MPLL_COEFF,
432 NV_PRAMDAC_VPLL_COEFF,
433 NV_RAMDAC_VPLL2 };
434 const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
435 0x4020,
436 NV_PRAMDAC_VPLL_COEFF,
437 NV_RAMDAC_VPLL2 };
438 uint32_t reg1, pll1, pll2 = 0;
439 struct pll_lims pll_lim;
440 int ret;
441
442 if (dev_priv->card_type < NV_40)
443 reg1 = nv04_regs[plltype];
444 else
445 reg1 = nv40_regs[plltype];
446
447 pll1 = nvReadMC(dev, reg1);
448
449 if (reg1 <= 0x405c)
450 pll2 = nvReadMC(dev, reg1 + 4);
451 else if (nv_two_reg_pll(dev)) {
452 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
453
454 pll2 = nvReadMC(dev, reg2);
455 }
456
457 if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
458 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
459
460 /* check whether vpll has been forced into single stage mode */
461 if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
462 if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
463 pll2 = 0;
464 } else
465 if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
466 pll2 = 0;
467 }
468
469 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
470
471 ret = get_pll_limits(dev, plltype, &pll_lim);
472 if (ret)
473 return ret;
474
475 pllvals->refclk = pll_lim.refclk;
476
477 return 0;
478}
479
480int
481nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
482{
483 /* Avoid divide by zero if called at an inappropriate time */
484 if (!pv->M1 || !pv->M2)
485 return 0;
486
487 return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
488}
489
490int
491nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
492{
493 struct nouveau_pll_vals pllvals;
494
495 if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
496 uint32_t mpllP;
497
498 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
499 if (!mpllP)
500 mpllP = 4;
501
502 return 400000 / mpllP;
503 } else
504 if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
505 uint32_t clock;
506
507 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
508 return clock;
509 }
510
511 nouveau_hw_get_pllvals(dev, plltype, &pllvals);
512
513 return nouveau_hw_pllvals_to_clk(&pllvals);
514}
515
516static void
517nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
518{
519 /* the vpll on an unused head can come up with a random value, way
520 * beyond the pll limits. for some reason this causes the chip to
521 * lock up when reading the dac palette regs, so set a valid pll here
522 * when such a condition detected. only seen on nv11 to date
523 */
524
525 struct pll_lims pll_lim;
526 struct nouveau_pll_vals pv;
527 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
528
529 if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
530 return;
531 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
532
533 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
534 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
535 pv.log2P <= pll_lim.max_log2p)
536 return;
537
538 NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
539
540 /* set lowest clock within static limits */
541 pv.M1 = pll_lim.vco1.max_m;
542 pv.N1 = pll_lim.vco1.min_n;
543 pv.log2P = pll_lim.max_usable_log2p;
544 nouveau_hw_setpll(dev, pllreg, &pv);
545}
546
547/*
548 * vga font save/restore
549 */
550
551static void nouveau_vga_font_io(struct drm_device *dev,
552 void __iomem *iovram,
553 bool save, unsigned plane)
554{
555 struct drm_nouveau_private *dev_priv = dev->dev_private;
556 unsigned i;
557
558 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
559 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
560 for (i = 0; i < 16384; i++) {
561 if (save) {
562 dev_priv->saved_vga_font[plane][i] =
563 ioread32_native(iovram + i * 4);
564 } else {
565 iowrite32_native(dev_priv->saved_vga_font[plane][i],
566 iovram + i * 4);
567 }
568 }
569}
570
571void
572nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
573{
574 uint8_t misc, gr4, gr5, gr6, seq2, seq4;
575 bool graphicsmode;
576 unsigned plane;
577 void __iomem *iovram;
578
579 if (nv_two_heads(dev))
580 NVSetOwner(dev, 0);
581
582 NVSetEnablePalette(dev, 0, true);
583 graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
584 NVSetEnablePalette(dev, 0, false);
585
586 if (graphicsmode) /* graphics mode => framebuffer => no need to save */
587 return;
588
589 NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
590
591 /* map first 64KiB of VRAM, holds VGA fonts etc */
592 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
593 if (!iovram) {
594 NV_ERROR(dev, "Failed to map VRAM, "
595 "cannot save/restore VGA fonts.\n");
596 return;
597 }
598
599 if (nv_two_heads(dev))
600 NVBlankScreen(dev, 1, true);
601 NVBlankScreen(dev, 0, true);
602
603 /* save control regs */
604 misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
605 seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
606 seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
607 gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
608 gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
609 gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
610
611 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
612 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
613 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
614 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
615
616 /* store font in planes 0..3 */
617 for (plane = 0; plane < 4; plane++)
618 nouveau_vga_font_io(dev, iovram, save, plane);
619
620 /* restore control regs */
621 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
622 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
623 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
624 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
625 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
626 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
627
628 if (nv_two_heads(dev))
629 NVBlankScreen(dev, 1, false);
630 NVBlankScreen(dev, 0, false);
631
632 iounmap(iovram);
633}
634
635/*
636 * mode state save/load
637 */
638
639static void
640rd_cio_state(struct drm_device *dev, int head,
641 struct nv04_crtc_reg *crtcstate, int index)
642{
643 crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
644}
645
646static void
647wr_cio_state(struct drm_device *dev, int head,
648 struct nv04_crtc_reg *crtcstate, int index)
649{
650 NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
651}
652
653static void
654nv_save_state_ramdac(struct drm_device *dev, int head,
655 struct nv04_mode_state *state)
656{
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
659 int i;
660
661 if (dev_priv->card_type >= NV_10)
662 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
663
664 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
665 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
666 if (nv_two_heads(dev))
667 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
668 if (dev_priv->chipset == 0x11)
669 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
670
671 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
672
673 if (nv_gf4_disp_arch(dev))
674 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
675 if (dev_priv->chipset >= 0x30)
676 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
677
678 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
679 regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
680 regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
681 regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
682 regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
683 regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
684 regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
685 regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
686
687 for (i = 0; i < 7; i++) {
688 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
689 regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
690 regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
691 }
692
693 if (nv_gf4_disp_arch(dev)) {
694 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
695 for (i = 0; i < 3; i++) {
696 regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
697 regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
698 }
699 }
700
701 regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
702 regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
703 if (!nv_gf4_disp_arch(dev) && head == 0) {
704 /* early chips don't allow access to PRAMDAC_TMDS_* without
705 * the head A FPCLK on (nv11 even locks up) */
706 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
707 ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
708 }
709 regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
710 regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
711
712 regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
713
714 if (nv_gf4_disp_arch(dev))
715 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
716
717 if (dev_priv->card_type == NV_40) {
718 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
719 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
720 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
721
722 for (i = 0; i < 38; i++)
723 regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
724 NV_PRAMDAC_CTV + 4*i);
725 }
726}
727
728static void
729nv_load_state_ramdac(struct drm_device *dev, int head,
730 struct nv04_mode_state *state)
731{
732 struct drm_nouveau_private *dev_priv = dev->dev_private;
733 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
734 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
735 int i;
736
737 if (dev_priv->card_type >= NV_10)
738 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
739
740 nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
741 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
742 if (nv_two_heads(dev))
743 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
744 if (dev_priv->chipset == 0x11)
745 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
746
747 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
748
749 if (nv_gf4_disp_arch(dev))
750 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
751 if (dev_priv->chipset >= 0x30)
752 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
753
754 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
755 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
756 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
757 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
758 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
759 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
760 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
761 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
762
763 for (i = 0; i < 7; i++) {
764 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
765
766 NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
767 NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
768 }
769
770 if (nv_gf4_disp_arch(dev)) {
771 NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
772 for (i = 0; i < 3; i++) {
773 NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
774 NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
775 }
776 }
777
778 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
779 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
780 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
781 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
782
783 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
784
785 if (nv_gf4_disp_arch(dev))
786 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
787
788 if (dev_priv->card_type == NV_40) {
789 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
790 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
791 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
792
793 for (i = 0; i < 38; i++)
794 NVWriteRAMDAC(dev, head,
795 NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
796 }
797}
798
799static void
800nv_save_state_vga(struct drm_device *dev, int head,
801 struct nv04_mode_state *state)
802{
803 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
804 int i;
805
806 regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
807
808 for (i = 0; i < 25; i++)
809 rd_cio_state(dev, head, regp, i);
810
811 NVSetEnablePalette(dev, head, true);
812 for (i = 0; i < 21; i++)
813 regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
814 NVSetEnablePalette(dev, head, false);
815
816 for (i = 0; i < 9; i++)
817 regp->Graphics[i] = NVReadVgaGr(dev, head, i);
818
819 for (i = 0; i < 5; i++)
820 regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
821}
822
823static void
824nv_load_state_vga(struct drm_device *dev, int head,
825 struct nv04_mode_state *state)
826{
827 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
828 int i;
829
830 NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
831
832 for (i = 0; i < 5; i++)
833 NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
834
835 nv_lock_vga_crtc_base(dev, head, false);
836 for (i = 0; i < 25; i++)
837 wr_cio_state(dev, head, regp, i);
838 nv_lock_vga_crtc_base(dev, head, true);
839
840 for (i = 0; i < 9; i++)
841 NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
842
843 NVSetEnablePalette(dev, head, true);
844 for (i = 0; i < 21; i++)
845 NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
846 NVSetEnablePalette(dev, head, false);
847}
848
849static void
850nv_save_state_ext(struct drm_device *dev, int head,
851 struct nv04_mode_state *state)
852{
853 struct drm_nouveau_private *dev_priv = dev->dev_private;
854 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
855 int i;
856
857 rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
858 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
859 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
860 rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
861 rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
862 rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
863 rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
864
865 rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
866 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
867 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
868 if (dev_priv->card_type >= NV_30)
869 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
871 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
872 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
873 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
875
876 if (dev_priv->card_type >= NV_10) {
877 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
878 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
879
880 if (dev_priv->card_type >= NV_30)
881 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
882
883 if (dev_priv->card_type == NV_40)
884 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
885
886 if (nv_two_heads(dev))
887 regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
888 regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
889 }
890
891 regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
892
893 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
894 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
895 if (dev_priv->card_type >= NV_10) {
896 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
897 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
898 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
899 rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905
906 for (i = 0; i < 0x10; i++)
907 regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
908 rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
909 rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
910
911 rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
912 rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
913 }
914
915 regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
916}
917
918static void
919nv_load_state_ext(struct drm_device *dev, int head,
920 struct nv04_mode_state *state)
921{
922 struct drm_nouveau_private *dev_priv = dev->dev_private;
923 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
924 uint32_t reg900;
925 int i;
926
927 if (dev_priv->card_type >= NV_10) {
928 if (nv_two_heads(dev))
929 /* setting ENGINE_CTRL (EC) *must* come before
930 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
931 * EC that should not be overwritten by writing stale EC
932 */
933 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
934
935 nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
936 nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
937 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
938 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
939 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
940 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
941 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
942 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
943 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
944
945 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
946 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
947 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
948
949 if (dev_priv->card_type >= NV_30)
950 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
951
952 if (dev_priv->card_type == NV_40) {
953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
954
955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
956 if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
958 else
959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
960 }
961 }
962
963 NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
964
965 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
966 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
967 wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
968 wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
969 wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
970 wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
971 wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
972 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
974 if (dev_priv->card_type >= NV_30)
975 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
976
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
978 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
979 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
980 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
981 if (dev_priv->card_type == NV_40)
982 nv_fix_nv40_hw_cursor(dev, head);
983 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
984
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
986 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
987 if (dev_priv->card_type >= NV_10) {
988 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
989 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
990 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
991 wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
992 }
993 /* NV11 and NV20 stop at 0x52. */
994 if (nv_gf4_disp_arch(dev)) {
995 if (dev_priv->card_type == NV_10) {
996 /* Not waiting for vertical retrace before modifying
997 CRE_53/CRE_54 causes lockups. */
998 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
999 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1000 }
1001
1002 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1003 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1004
1005 for (i = 0; i < 0x10; i++)
1006 NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
1009
1010 wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
1011 wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
1012 }
1013
1014 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1015
1016 /* Setting 1 on this value gives you interrupts for every vblank period. */
1017 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
1018 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1019}
1020
1021static void
1022nv_save_state_palette(struct drm_device *dev, int head,
1023 struct nv04_mode_state *state)
1024{
1025 int head_offset = head * NV_PRMDIO_SIZE, i;
1026
1027 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1028 NV_PRMDIO_PIXEL_MASK_MASK);
1029 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
1030
1031 for (i = 0; i < 768; i++) {
1032 state->crtc_reg[head].DAC[i] = nv_rd08(dev,
1033 NV_PRMDIO_PALETTE_DATA + head_offset);
1034 }
1035
1036 NVSetEnablePalette(dev, head, false);
1037}
1038
1039void
1040nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1041 struct nv04_mode_state *state)
1042{
1043 int head_offset = head * NV_PRMDIO_SIZE, i;
1044
1045 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1046 NV_PRMDIO_PIXEL_MASK_MASK);
1047 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
1048
1049 for (i = 0; i < 768; i++) {
1050 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
1051 state->crtc_reg[head].DAC[i]);
1052 }
1053
1054 NVSetEnablePalette(dev, head, false);
1055}
1056
1057void nouveau_hw_save_state(struct drm_device *dev, int head,
1058 struct nv04_mode_state *state)
1059{
1060 struct drm_nouveau_private *dev_priv = dev->dev_private;
1061
1062 if (dev_priv->chipset == 0x11)
1063 /* NB: no attempt is made to restore the bad pll later on */
1064 nouveau_hw_fix_bad_vpll(dev, head);
1065 nv_save_state_ramdac(dev, head, state);
1066 nv_save_state_vga(dev, head, state);
1067 nv_save_state_palette(dev, head, state);
1068 nv_save_state_ext(dev, head, state);
1069}
1070
1071void nouveau_hw_load_state(struct drm_device *dev, int head,
1072 struct nv04_mode_state *state)
1073{
1074 NVVgaProtect(dev, head, true);
1075 nv_load_state_ramdac(dev, head, state);
1076 nv_load_state_ext(dev, head, state);
1077 nouveau_hw_load_state_palette(dev, head, state);
1078 nv_load_state_vga(dev, head, state);
1079 NVVgaProtect(dev, head, false);
1080}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 000000000000..869130f83602
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
1/*
2 * Copyright 2008 Stuart Bennett
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_HW_H__
24#define __NOUVEAU_HW_H__
25
26#include "drmP.h"
27#include "nouveau_drv.h"
28
29#define MASK(field) ( \
30 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
31
32#define XLATE(src, srclowbit, outfield) ( \
33 (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
34
35void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
36uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
37void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
38uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
39void NVSetOwner(struct drm_device *, int owner);
40void NVBlankScreen(struct drm_device *, int head, bool blank);
41void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
42 struct nouveau_pll_vals *pv);
43int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
44 struct nouveau_pll_vals *pllvals);
45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
46int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
48void nouveau_hw_save_state(struct drm_device *, int head,
49 struct nv04_mode_state *state);
50void nouveau_hw_load_state(struct drm_device *, int head,
51 struct nv04_mode_state *state);
52void nouveau_hw_load_state_palette(struct drm_device *, int head,
53 struct nv04_mode_state *state);
54
55/* nouveau_calc.c */
56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
57 int *burst, int *lwm);
58extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
59 int clk, struct nouveau_pll_vals *pv);
60
61static inline uint32_t
62nvReadMC(struct drm_device *dev, uint32_t reg)
63{
64 uint32_t val = nv_rd32(dev, reg);
65 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
66 return val;
67}
68
69static inline void
70nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
71{
72 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
73 nv_wr32(dev, reg, val);
74}
75
76static inline uint32_t
77nvReadVIDEO(struct drm_device *dev, uint32_t reg)
78{
79 uint32_t val = nv_rd32(dev, reg);
80 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
81 return val;
82}
83
84static inline void
85nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
86{
87 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
88 nv_wr32(dev, reg, val);
89}
90
91static inline uint32_t
92nvReadFB(struct drm_device *dev, uint32_t reg)
93{
94 uint32_t val = nv_rd32(dev, reg);
95 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
96 return val;
97}
98
99static inline void
100nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
101{
102 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
103 nv_wr32(dev, reg, val);
104}
105
106static inline uint32_t
107nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
108{
109 uint32_t val = nv_rd32(dev, reg);
110 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
111 return val;
112}
113
114static inline void
115nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
116{
117 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
118 nv_wr32(dev, reg, val);
119}
120
121static inline uint32_t NVReadCRTC(struct drm_device *dev,
122 int head, uint32_t reg)
123{
124 uint32_t val;
125 if (head)
126 reg += NV_PCRTC0_SIZE;
127 val = nv_rd32(dev, reg);
128 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
129 return val;
130}
131
132static inline void NVWriteCRTC(struct drm_device *dev,
133 int head, uint32_t reg, uint32_t val)
134{
135 if (head)
136 reg += NV_PCRTC0_SIZE;
137 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
138 nv_wr32(dev, reg, val);
139}
140
141static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
142 int head, uint32_t reg)
143{
144 uint32_t val;
145 if (head)
146 reg += NV_PRAMDAC0_SIZE;
147 val = nv_rd32(dev, reg);
148 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
149 head, reg, val);
150 return val;
151}
152
153static inline void NVWriteRAMDAC(struct drm_device *dev,
154 int head, uint32_t reg, uint32_t val)
155{
156 if (head)
157 reg += NV_PRAMDAC0_SIZE;
158 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
159 head, reg, val);
160 nv_wr32(dev, reg, val);
161}
162
163static inline uint8_t nv_read_tmds(struct drm_device *dev,
164 int or, int dl, uint8_t address)
165{
166 int ramdac = (or & OUTPUT_C) >> 2;
167
168 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
169 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
170 return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
171}
172
173static inline void nv_write_tmds(struct drm_device *dev,
174 int or, int dl, uint8_t address,
175 uint8_t data)
176{
177 int ramdac = (or & OUTPUT_C) >> 2;
178
179 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
180 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
181}
182
183static inline void NVWriteVgaCrtc(struct drm_device *dev,
184 int head, uint8_t index, uint8_t value)
185{
186 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
187 head, index, value);
188 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
189 nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
190}
191
192static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
193 int head, uint8_t index)
194{
195 uint8_t val;
196 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
197 val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
198 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
199 head, index, val);
200 return val;
201}
202
203/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
204 * I suspect they in fact do nothing, but are merely a way to carry useful
205 * per-head variables around
206 *
207 * Known uses:
208 * CR57 CR58
209 * 0x00 index to the appropriate dcb entry (or 7f for inactive)
210 * 0x02 dcb entry's "or" value (or 00 for inactive)
211 * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too)
212 * 0x08 or 0x09 pxclk in MHz
213 * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap
214 * high nibble for xlat strap value
215 */
216
217static inline void
218NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
219{
220 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
221 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
222}
223
224static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
225{
226 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
227 return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
228}
229
230static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
231 int head, uint32_t reg)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 uint8_t val;
235
236 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
237 * NVSetOwner for the relevant head to be programmed */
238 if (head && dev_priv->card_type == NV_40)
239 reg += NV_PRMVIO_SIZE;
240
241 val = nv_rd08(dev, reg);
242 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
243 return val;
244}
245
246static inline void NVWritePRMVIO(struct drm_device *dev,
247 int head, uint32_t reg, uint8_t value)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250
251 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
252 * NVSetOwner for the relevant head to be programmed */
253 if (head && dev_priv->card_type == NV_40)
254 reg += NV_PRMVIO_SIZE;
255
256 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
257 head, reg, value);
258 nv_wr08(dev, reg, value);
259}
260
261static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
262{
263 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
264 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
265}
266
267static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
268{
269 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
270 return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
271}
272
273static inline void NVWriteVgaAttr(struct drm_device *dev,
274 int head, uint8_t index, uint8_t value)
275{
276 if (NVGetEnablePalette(dev, head))
277 index &= ~0x20;
278 else
279 index |= 0x20;
280
281 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
282 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
283 head, index, value);
284 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
285 nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
286}
287
288static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
289 int head, uint8_t index)
290{
291 uint8_t val;
292 if (NVGetEnablePalette(dev, head))
293 index &= ~0x20;
294 else
295 index |= 0x20;
296
297 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
298 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
299 val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
300 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
301 head, index, val);
302 return val;
303}
304
305static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
306{
307 NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
308}
309
310static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
311{
312 uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
313
314 if (protect) {
315 NVVgaSeqReset(dev, head, true);
316 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
317 } else {
318 /* Reenable sequencer, then turn on screen */
319 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */
320 NVVgaSeqReset(dev, head, false);
321 }
322 NVSetEnablePalette(dev, head, protect);
323}
324
325static inline bool
326nv_heads_tied(struct drm_device *dev)
327{
328 struct drm_nouveau_private *dev_priv = dev->dev_private;
329
330 if (dev_priv->chipset == 0x11)
331 return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
332
333 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
334}
335
336/* makes cr0-7 on the specified head read-only */
337static inline bool
338nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
339{
340 uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
341 bool waslocked = cr11 & 0x80;
342
343 if (lock)
344 cr11 |= 0x80;
345 else
346 cr11 &= ~0x80;
347 NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
348
349 return waslocked;
350}
351
352static inline void
353nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
354{
355 /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
356 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
357 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
358 * bit5: unlocks HDE
359 * bit4: unlocks VDE
360 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
361 * bit2: same as bit 1 of 0x60?804
362 * bit0: same as bit 0 of 0x60?804
363 */
364
365 uint8_t cr21 = lock;
366
367 if (lock < 0)
368 /* 0xfa is generic "unlock all" mask */
369 cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
370
371 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
372}
373
374/* renders the extended crtc regs (cr19+) on all crtcs impervious:
375 * immutable and unreadable
376 */
377static inline bool
378NVLockVgaCrtcs(struct drm_device *dev, bool lock)
379{
380 struct drm_nouveau_private *dev_priv = dev->dev_private;
381 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
382
383 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
384 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
385 /* NV11 has independently lockable extended crtcs, except when tied */
386 if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
387 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
388 lock ? NV_CIO_SR_LOCK_VALUE :
389 NV_CIO_SR_UNLOCK_RW_VALUE);
390
391 return waslocked;
392}
393
394/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
395#define NV04_CURSOR_SIZE 32
396/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
397#define NV10_CURSOR_SIZE 64
398
399static inline int nv_cursor_width(struct drm_device *dev)
400{
401 struct drm_nouveau_private *dev_priv = dev->dev_private;
402
403 return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
404}
405
406static inline void
407nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
408{
409 /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
410 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
411 * for changes to the CRTC CURCTL regs to take effect, whether changing
412 * the pixmap location, or just showing/hiding the cursor
413 */
414 uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
415 NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
416}
417
418static inline void
419nv_show_cursor(struct drm_device *dev, int head, bool show)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 uint8_t *curctl1 =
423 &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
424
425 if (show)
426 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
427 else
428 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
429 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
430
431 if (dev_priv->card_type == NV_40)
432 nv_fix_nv40_hw_cursor(dev, head);
433}
434
435static inline uint32_t
436nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
437{
438 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 int mask;
440
441 if (bpp == 15)
442 bpp = 16;
443 if (bpp == 24)
444 bpp = 8;
445
446 /* Alignment requirements taken from the Haiku driver */
447 if (dev_priv->card_type == NV_04)
448 mask = 128 / bpp - 1;
449 else
450 mask = 512 / bpp - 1;
451
452 return (width + mask) & ~mask;
453}
454
455#endif /* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 000000000000..70e994d28122
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_hw.h"
29
30static void
31nv04_i2c_setscl(void *data, int state)
32{
33 struct nouveau_i2c_chan *i2c = data;
34 struct drm_device *dev = i2c->dev;
35 uint8_t val;
36
37 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
38 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
39}
40
41static void
42nv04_i2c_setsda(void *data, int state)
43{
44 struct nouveau_i2c_chan *i2c = data;
45 struct drm_device *dev = i2c->dev;
46 uint8_t val;
47
48 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
49 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
50}
51
52static int
53nv04_i2c_getscl(void *data)
54{
55 struct nouveau_i2c_chan *i2c = data;
56 struct drm_device *dev = i2c->dev;
57
58 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
59}
60
61static int
62nv04_i2c_getsda(void *data)
63{
64 struct nouveau_i2c_chan *i2c = data;
65 struct drm_device *dev = i2c->dev;
66
67 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
68}
69
70static void
71nv4e_i2c_setscl(void *data, int state)
72{
73 struct nouveau_i2c_chan *i2c = data;
74 struct drm_device *dev = i2c->dev;
75 uint8_t val;
76
77 val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
78 nv_wr32(dev, i2c->wr, val | 0x01);
79}
80
81static void
82nv4e_i2c_setsda(void *data, int state)
83{
84 struct nouveau_i2c_chan *i2c = data;
85 struct drm_device *dev = i2c->dev;
86 uint8_t val;
87
88 val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
89 nv_wr32(dev, i2c->wr, val | 0x01);
90}
91
92static int
93nv4e_i2c_getscl(void *data)
94{
95 struct nouveau_i2c_chan *i2c = data;
96 struct drm_device *dev = i2c->dev;
97
98 return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
99}
100
101static int
102nv4e_i2c_getsda(void *data)
103{
104 struct nouveau_i2c_chan *i2c = data;
105 struct drm_device *dev = i2c->dev;
106
107 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
108}
109
110static int
111nv50_i2c_getscl(void *data)
112{
113 struct nouveau_i2c_chan *i2c = data;
114 struct drm_device *dev = i2c->dev;
115
116 return !!(nv_rd32(dev, i2c->rd) & 1);
117}
118
119
120static int
121nv50_i2c_getsda(void *data)
122{
123 struct nouveau_i2c_chan *i2c = data;
124 struct drm_device *dev = i2c->dev;
125
126 return !!(nv_rd32(dev, i2c->rd) & 2);
127}
128
129static void
130nv50_i2c_setscl(void *data, int state)
131{
132 struct nouveau_i2c_chan *i2c = data;
133 struct drm_device *dev = i2c->dev;
134
135 nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
136}
137
138static void
139nv50_i2c_setsda(void *data, int state)
140{
141 struct nouveau_i2c_chan *i2c = data;
142 struct drm_device *dev = i2c->dev;
143
144 nv_wr32(dev, i2c->wr,
145 (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
146 i2c->data = state;
147}
148
149static const uint32_t nv50_i2c_port[] = {
150 0x00e138, 0x00e150, 0x00e168, 0x00e180,
151 0x00e254, 0x00e274, 0x00e764, 0x00e780,
152 0x00e79c, 0x00e7b8
153};
154#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
155
156int
157nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nouveau_i2c_chan *i2c;
161 int ret;
162
163 if (entry->chan)
164 return -EEXIST;
165
166 if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL;
169 }
170
171 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
172 if (i2c == NULL)
173 return -ENOMEM;
174
175 switch (entry->port_type) {
176 case 0:
177 i2c->algo.bit.setsda = nv04_i2c_setsda;
178 i2c->algo.bit.setscl = nv04_i2c_setscl;
179 i2c->algo.bit.getsda = nv04_i2c_getsda;
180 i2c->algo.bit.getscl = nv04_i2c_getscl;
181 i2c->rd = entry->read;
182 i2c->wr = entry->write;
183 break;
184 case 4:
185 i2c->algo.bit.setsda = nv4e_i2c_setsda;
186 i2c->algo.bit.setscl = nv4e_i2c_setscl;
187 i2c->algo.bit.getsda = nv4e_i2c_getsda;
188 i2c->algo.bit.getscl = nv4e_i2c_getscl;
189 i2c->rd = 0x600800 + entry->read;
190 i2c->wr = 0x600800 + entry->write;
191 break;
192 case 5:
193 i2c->algo.bit.setsda = nv50_i2c_setsda;
194 i2c->algo.bit.setscl = nv50_i2c_setscl;
195 i2c->algo.bit.getsda = nv50_i2c_getsda;
196 i2c->algo.bit.getscl = nv50_i2c_getscl;
197 i2c->rd = nv50_i2c_port[entry->read];
198 i2c->wr = i2c->rd;
199 break;
200 case 6:
201 i2c->rd = entry->read;
202 i2c->wr = entry->write;
203 break;
204 default:
205 NV_ERROR(dev, "DCB I2C port type %d unknown\n",
206 entry->port_type);
207 kfree(i2c);
208 return -EINVAL;
209 }
210
211 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
212 "nouveau-%s-%d", pci_name(dev->pdev), index);
213 i2c->adapter.owner = THIS_MODULE;
214 i2c->adapter.dev.parent = &dev->pdev->dev;
215 i2c->dev = dev;
216 i2c_set_adapdata(&i2c->adapter, i2c);
217
218 if (entry->port_type < 6) {
219 i2c->adapter.algo_data = &i2c->algo.bit;
220 i2c->algo.bit.udelay = 40;
221 i2c->algo.bit.timeout = usecs_to_jiffies(5000);
222 i2c->algo.bit.data = i2c;
223 ret = i2c_bit_add_bus(&i2c->adapter);
224 } else {
225 i2c->adapter.algo_data = &i2c->algo.dp;
226 i2c->algo.dp.running = false;
227 i2c->algo.dp.address = 0;
228 i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
229 ret = i2c_dp_aux_add_bus(&i2c->adapter);
230 }
231
232 if (ret) {
233 NV_ERROR(dev, "Failed to register i2c %d\n", index);
234 kfree(i2c);
235 return ret;
236 }
237
238 entry->chan = i2c;
239 return 0;
240}
241
242void
243nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
244{
245 if (!entry->chan)
246 return;
247
248 i2c_del_adapter(&entry->chan->adapter);
249 kfree(entry->chan);
250 entry->chan = NULL;
251}
252
253struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index)
255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->VBIOS;
258
259 if (index > DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL;
261
262 if (!bios->bdcb.dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
264 return NULL;
265 }
266
267 return bios->bdcb.dcb.i2c[index].chan;
268}
269
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 000000000000..c8eaf7a9fcbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_I2C_H__
24#define __NOUVEAU_I2C_H__
25
26#include <linux/i2c.h>
27#include <linux/i2c-id.h>
28#include <linux/i2c-algo-bit.h>
29#include "drm_dp_helper.h"
30
31struct dcb_i2c_entry;
32
33struct nouveau_i2c_chan {
34 struct i2c_adapter adapter;
35 struct drm_device *dev;
36 union {
37 struct i2c_algo_bit_data bit;
38 struct i2c_algo_dp_aux_data dp;
39 } algo;
40 unsigned rd;
41 unsigned wr;
42 unsigned data;
43};
44
45int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
46void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
47struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
48
49int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
50 uint8_t *read_byte);
51
52#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 000000000000..a2c30f4611ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,72 @@
1/**
2 * \file mga_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the MGA DRM.
5 *
6 * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Egbert Eich 2003,2004
11 * Copyright (C) Dave Airlie 2005
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
29 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/compat.h>
35
36#include "drmP.h"
37#include "drm.h"
38
39#include "nouveau_drv.h"
40
41/**
42 * Called whenever a 32-bit process running under a 64-bit kernel
43 * performs an ioctl on /dev/dri/card<n>.
44 *
45 * \param filp file pointer.
46 * \param cmd command.
47 * \param arg user argument.
48 * \return zero on success or negative number on failure.
49 */
50long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
51 unsigned long arg)
52{
53 unsigned int nr = DRM_IOCTL_NR(cmd);
54 drm_ioctl_compat_t *fn = NULL;
55 int ret;
56
57 if (nr < DRM_COMMAND_BASE)
58 return drm_compat_ioctl(filp, cmd, arg);
59
60#if 0
61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
63#endif
64 lock_kernel(); /* XXX for now */
65 if (fn != NULL)
66 ret = (*fn)(filp, cmd, arg);
67 else
68 ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
69 unlock_kernel();
70
71 return ret;
72}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 000000000000..370c72c968d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,702 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h"
36#include "nouveau_drv.h"
37#include "nouveau_reg.h"
38#include <linux/ratelimit.h>
39
40/* needed for hotplug irq */
41#include "nouveau_connector.h"
42#include "nv50_display.h"
43
44void
45nouveau_irq_preinstall(struct drm_device *dev)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48
49 /* Master disable */
50 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
51
52 if (dev_priv->card_type == NV_50) {
53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
54 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
55 }
56}
57
58int
59nouveau_irq_postinstall(struct drm_device *dev)
60{
61 /* Master enable */
62 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
63 return 0;
64}
65
66void
67nouveau_irq_uninstall(struct drm_device *dev)
68{
69 /* Master disable */
70 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
71}
72
73static int
74nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct nouveau_pgraph_object_method *grm;
78 struct nouveau_pgraph_object_class *grc;
79
80 grc = dev_priv->engine.graph.grclass;
81 while (grc->id) {
82 if (grc->id == class)
83 break;
84 grc++;
85 }
86
87 if (grc->id != class || !grc->methods)
88 return -ENOENT;
89
90 grm = grc->methods;
91 while (grm->id) {
92 if (grm->id == mthd)
93 return grm->exec(chan, class, mthd, data);
94 grm++;
95 }
96
97 return -ENOENT;
98}
99
100static bool
101nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
102{
103 struct drm_device *dev = chan->dev;
104 const int subc = (addr >> 13) & 0x7;
105 const int mthd = addr & 0x1ffc;
106
107 if (mthd == 0x0000) {
108 struct nouveau_gpuobj_ref *ref = NULL;
109
110 if (nouveau_gpuobj_ref_find(chan, data, &ref))
111 return false;
112
113 if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
114 return false;
115
116 chan->sw_subchannel[subc] = ref->gpuobj->class;
117 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
118 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
119 return true;
120 }
121
122 /* hw object */
123 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
124 return false;
125
126 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
127 return false;
128
129 return true;
130}
131
132static void
133nouveau_fifo_irq_handler(struct drm_device *dev)
134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_engine *engine = &dev_priv->engine;
137 uint32_t status, reassign;
138 int cnt = 0;
139
140 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
141 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
142 struct nouveau_channel *chan = NULL;
143 uint32_t chid, get;
144
145 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
146
147 chid = engine->fifo.channel_id(dev);
148 if (chid >= 0 && chid < engine->fifo.channels)
149 chan = dev_priv->fifos[chid];
150 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
151
152 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
153 uint32_t mthd, data;
154 int ptr;
155
156 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
157 * wrapping on my G80 chips, but CACHE1 isn't big
158 * enough for this much data.. Tests show that it
159 * wraps around to the start at GET=0x800.. No clue
160 * as to why..
161 */
162 ptr = (get & 0x7ff) >> 2;
163
164 if (dev_priv->card_type < NV_40) {
165 mthd = nv_rd32(dev,
166 NV04_PFIFO_CACHE1_METHOD(ptr));
167 data = nv_rd32(dev,
168 NV04_PFIFO_CACHE1_DATA(ptr));
169 } else {
170 mthd = nv_rd32(dev,
171 NV40_PFIFO_CACHE1_METHOD(ptr));
172 data = nv_rd32(dev,
173 NV40_PFIFO_CACHE1_DATA(ptr));
174 }
175
176 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
177 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
178 "Mthd 0x%04x Data 0x%08x\n",
179 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
180 data);
181 }
182
183 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
184 nv_wr32(dev, NV03_PFIFO_INTR_0,
185 NV_PFIFO_INTR_CACHE_ERROR);
186
187 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
188 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
189 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
190 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
191 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
192 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
195 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
196 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
197
198 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
199 }
200
201 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
202 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
203
204 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
205 nv_wr32(dev, NV03_PFIFO_INTR_0,
206 NV_PFIFO_INTR_DMA_PUSHER);
207
208 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
209 if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
210 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
211 get + 4);
212 }
213
214 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid);
217 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
218 status = 0;
219 }
220
221 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
222 }
223
224 if (status) {
225 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
226 nv_wr32(dev, 0x2140, 0);
227 nv_wr32(dev, 0x140, 0);
228 }
229
230 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
231}
232
233struct nouveau_bitfield_names {
234 uint32_t mask;
235 const char *name;
236};
237
238static struct nouveau_bitfield_names nstatus_names[] =
239{
240 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
241 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
242 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
243 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
244};
245
246static struct nouveau_bitfield_names nstatus_names_nv10[] =
247{
248 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
249 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
250 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
251 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
252};
253
254static struct nouveau_bitfield_names nsource_names[] =
255{
256 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
257 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
258 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
259 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
260 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
261 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
262 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
263 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
264 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
265 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
266 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
267 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
268 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
269 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
270 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
271 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
272 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
273 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
274 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
275};
276
277static void
278nouveau_print_bitfield_names_(uint32_t value,
279 const struct nouveau_bitfield_names *namelist,
280 const int namelist_len)
281{
282 /*
283 * Caller must have already printed the KERN_* log level for us.
284 * Also the caller is responsible for adding the newline.
285 */
286 int i;
287 for (i = 0; i < namelist_len; ++i) {
288 uint32_t mask = namelist[i].mask;
289 if (value & mask) {
290 printk(" %s", namelist[i].name);
291 value &= ~mask;
292 }
293 }
294 if (value)
295 printk(" (unknown bits 0x%08x)", value);
296}
297#define nouveau_print_bitfield_names(val, namelist) \
298 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
299
300
301static int
302nouveau_graph_chid_from_grctx(struct drm_device *dev)
303{
304 struct drm_nouveau_private *dev_priv = dev->dev_private;
305 uint32_t inst;
306 int i;
307
308 if (dev_priv->card_type < NV_40)
309 return dev_priv->engine.fifo.channels;
310 else
311 if (dev_priv->card_type < NV_50) {
312 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
313
314 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
315 struct nouveau_channel *chan = dev_priv->fifos[i];
316
317 if (!chan || !chan->ramin_grctx)
318 continue;
319
320 if (inst == chan->ramin_grctx->instance)
321 break;
322 }
323 } else {
324 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
325
326 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
327 struct nouveau_channel *chan = dev_priv->fifos[i];
328
329 if (!chan || !chan->ramin)
330 continue;
331
332 if (inst == chan->ramin->instance)
333 break;
334 }
335 }
336
337
338 return i;
339}
340
341static int
342nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
343{
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct nouveau_engine *engine = &dev_priv->engine;
346 int channel;
347
348 if (dev_priv->card_type < NV_10)
349 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
350 else
351 if (dev_priv->card_type < NV_40)
352 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
353 else
354 channel = nouveau_graph_chid_from_grctx(dev);
355
356 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
357 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
358 return -EINVAL;
359 }
360
361 *channel_ret = channel;
362 return 0;
363}
364
365struct nouveau_pgraph_trap {
366 int channel;
367 int class;
368 int subc, mthd, size;
369 uint32_t data, data2;
370 uint32_t nsource, nstatus;
371};
372
373static void
374nouveau_graph_trap_info(struct drm_device *dev,
375 struct nouveau_pgraph_trap *trap)
376{
377 struct drm_nouveau_private *dev_priv = dev->dev_private;
378 uint32_t address;
379
380 trap->nsource = trap->nstatus = 0;
381 if (dev_priv->card_type < NV_50) {
382 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
383 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
384 }
385
386 if (nouveau_graph_trapped_channel(dev, &trap->channel))
387 trap->channel = -1;
388 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
389
390 trap->mthd = address & 0x1FFC;
391 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
392 if (dev_priv->card_type < NV_10) {
393 trap->subc = (address >> 13) & 0x7;
394 } else {
395 trap->subc = (address >> 16) & 0x7;
396 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
397 }
398
399 if (dev_priv->card_type < NV_10)
400 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
401 else if (dev_priv->card_type < NV_40)
402 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
403 else if (dev_priv->card_type < NV_50)
404 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
405 else
406 trap->class = nv_rd32(dev, 0x400814);
407}
408
409static void
410nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
411 struct nouveau_pgraph_trap *trap)
412{
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
415
416 NV_INFO(dev, "%s - nSource:", id);
417 nouveau_print_bitfield_names(nsource, nsource_names);
418 printk(", nStatus:");
419 if (dev_priv->card_type < NV_10)
420 nouveau_print_bitfield_names(nstatus, nstatus_names);
421 else
422 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
423 printk("\n");
424
425 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
426 "Data 0x%08x:0x%08x\n",
427 id, trap->channel, trap->subc,
428 trap->class, trap->mthd,
429 trap->data2, trap->data);
430}
431
432static int
433nouveau_pgraph_intr_swmthd(struct drm_device *dev,
434 struct nouveau_pgraph_trap *trap)
435{
436 struct drm_nouveau_private *dev_priv = dev->dev_private;
437
438 if (trap->channel < 0 ||
439 trap->channel >= dev_priv->engine.fifo.channels ||
440 !dev_priv->fifos[trap->channel])
441 return -ENODEV;
442
443 return nouveau_call_method(dev_priv->fifos[trap->channel],
444 trap->class, trap->mthd, trap->data);
445}
446
447static inline void
448nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
449{
450 struct nouveau_pgraph_trap trap;
451 int unhandled = 0;
452
453 nouveau_graph_trap_info(dev, &trap);
454
455 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
456 if (nouveau_pgraph_intr_swmthd(dev, &trap))
457 unhandled = 1;
458 } else {
459 unhandled = 1;
460 }
461
462 if (unhandled)
463 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
464}
465
466static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
467
468static int nouveau_ratelimit(void)
469{
470 return __ratelimit(&nouveau_ratelimit_state);
471}
472
473
474static inline void
475nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
476{
477 struct nouveau_pgraph_trap trap;
478 int unhandled = 0;
479
480 nouveau_graph_trap_info(dev, &trap);
481 trap.nsource = nsource;
482
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1;
486 } else {
487 unhandled = 1;
488 }
489
490 if (unhandled && nouveau_ratelimit())
491 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
492}
493
494static inline void
495nouveau_pgraph_intr_context_switch(struct drm_device *dev)
496{
497 struct drm_nouveau_private *dev_priv = dev->dev_private;
498 struct nouveau_engine *engine = &dev_priv->engine;
499 uint32_t chid;
500
501 chid = engine->fifo.channel_id(dev);
502 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
503
504 switch (dev_priv->card_type) {
505 case NV_04:
506 nv04_graph_context_switch(dev);
507 break;
508 case NV_10:
509 nv10_graph_context_switch(dev);
510 break;
511 default:
512 NV_ERROR(dev, "Context switch not implemented\n");
513 break;
514 }
515}
516
517static void
518nouveau_pgraph_irq_handler(struct drm_device *dev)
519{
520 uint32_t status;
521
522 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
523 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
524
525 if (status & NV_PGRAPH_INTR_NOTIFY) {
526 nouveau_pgraph_intr_notify(dev, nsource);
527
528 status &= ~NV_PGRAPH_INTR_NOTIFY;
529 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
530 }
531
532 if (status & NV_PGRAPH_INTR_ERROR) {
533 nouveau_pgraph_intr_error(dev, nsource);
534
535 status &= ~NV_PGRAPH_INTR_ERROR;
536 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
537 }
538
539 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
540 nouveau_pgraph_intr_context_switch(dev);
541
542 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
543 nv_wr32(dev, NV03_PGRAPH_INTR,
544 NV_PGRAPH_INTR_CONTEXT_SWITCH);
545 }
546
547 if (status) {
548 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
549 nv_wr32(dev, NV03_PGRAPH_INTR, status);
550 }
551
552 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
553 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
554 }
555
556 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
557}
558
559static void
560nv50_pgraph_irq_handler(struct drm_device *dev)
561{
562 uint32_t status, nsource;
563
564 status = nv_rd32(dev, NV03_PGRAPH_INTR);
565 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
566
567 if (status & 0x00000001) {
568 nouveau_pgraph_intr_notify(dev, nsource);
569 status &= ~0x00000001;
570 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
571 }
572
573 if (status & 0x00000010) {
574 nouveau_pgraph_intr_error(dev, nsource |
575 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
576
577 status &= ~0x00000010;
578 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
579 }
580
581 if (status & 0x00001000) {
582 nv_wr32(dev, 0x400500, 0x00000000);
583 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
584 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
585 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
586 nv_wr32(dev, 0x400500, 0x00010001);
587
588 nv50_graph_context_switch(dev);
589
590 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
591 }
592
593 if (status & 0x00100000) {
594 nouveau_pgraph_intr_error(dev, nsource |
595 NV03_PGRAPH_NSOURCE_DATA_ERROR);
596
597 status &= ~0x00100000;
598 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
599 }
600
601 if (status & 0x00200000) {
602 int r;
603
604 nouveau_pgraph_intr_error(dev, nsource |
605 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
606
607 NV_ERROR(dev, "magic set 1:\n");
608 for (r = 0x408900; r <= 0x408910; r += 4)
609 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
610 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
611 for (r = 0x408e08; r <= 0x408e24; r += 4)
612 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
613 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
614
615 NV_ERROR(dev, "magic set 2:\n");
616 for (r = 0x409900; r <= 0x409910; r += 4)
617 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
618 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
619 for (r = 0x409e08; r <= 0x409e24; r += 4)
620 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
621 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
622
623 status &= ~0x00200000;
624 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
625 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
626 }
627
628 if (status) {
629 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
630 nv_wr32(dev, NV03_PGRAPH_INTR, status);
631 }
632
633 {
634 const int isb = (1 << 16) | (1 << 0);
635
636 if ((nv_rd32(dev, 0x400500) & isb) != isb)
637 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
638 }
639
640 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
641}
642
643static void
644nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
645{
646 if (crtc & 1)
647 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
648
649 if (crtc & 2)
650 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
651}
652
653irqreturn_t
654nouveau_irq_handler(DRM_IRQ_ARGS)
655{
656 struct drm_device *dev = (struct drm_device *)arg;
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 uint32_t status, fbdev_flags = 0;
659
660 status = nv_rd32(dev, NV03_PMC_INTR_0);
661 if (!status)
662 return IRQ_NONE;
663
664 if (dev_priv->fbdev_info) {
665 fbdev_flags = dev_priv->fbdev_info->flags;
666 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
667 }
668
669 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
670 nouveau_fifo_irq_handler(dev);
671 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
672 }
673
674 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
675 if (dev_priv->card_type >= NV_50)
676 nv50_pgraph_irq_handler(dev);
677 else
678 nouveau_pgraph_irq_handler(dev);
679
680 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
681 }
682
683 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
684 nouveau_crtc_irq_handler(dev, (status>>24)&3);
685 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
686 }
687
688 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
689 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
690 nv50_display_irq_handler(dev);
691 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
692 NV_PMC_INTR_0_NV50_I2C_PENDING);
693 }
694
695 if (status)
696 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
697
698 if (dev_priv->fbdev_info)
699 dev_priv->fbdev_info->flags = fbdev_flags;
700
701 return IRQ_HANDLED;
702}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 000000000000..02755712ed3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37
38static struct mem_block *
39split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
41{
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 kmalloc(sizeof(*newblock), GFP_KERNEL);
46 if (!newblock)
47 goto out;
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
52 newblock->prev = p;
53 p->next->prev = newblock;
54 p->next = newblock;
55 p->size -= newblock->size;
56 p = newblock;
57 }
58
59 /* Maybe cut off the end of an existing block */
60 if (size < p->size) {
61 struct mem_block *newblock =
62 kmalloc(sizeof(*newblock), GFP_KERNEL);
63 if (!newblock)
64 goto out;
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
69 newblock->prev = p;
70 p->next->prev = newblock;
71 p->next = newblock;
72 p->size = size;
73 }
74
75out:
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
78 return p;
79}
80
81struct mem_block *
82nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
84{
85 struct mem_block *p;
86 uint64_t mask = (1 << align2) - 1;
87
88 if (!heap)
89 return NULL;
90
91 if (tail) {
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
94
95 if (p->file_priv == NULL && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
98 }
99 } else {
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
102
103 if (p->file_priv == NULL &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
106 }
107 }
108
109 return NULL;
110}
111
112void nouveau_mem_free_block(struct mem_block *p)
113{
114 p->file_priv = NULL;
115
116 /* Assumes a single contiguous range. Needs a special file_priv in
117 * 'heap' to stop it being subsumed.
118 */
119 if (p->next->file_priv == NULL) {
120 struct mem_block *q = p->next;
121 p->size += q->size;
122 p->next = q->next;
123 p->next->prev = p;
124 kfree(q);
125 }
126
127 if (p->prev->file_priv == NULL) {
128 struct mem_block *q = p->prev;
129 q->size += p->size;
130 q->next = p->next;
131 q->next->prev = q;
132 kfree(p);
133 }
134}
135
136/* Initialize. How to check for an uninitialized heap?
137 */
138int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
139 uint64_t size)
140{
141 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
142
143 if (!blocks)
144 return -ENOMEM;
145
146 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
147 if (!*heap) {
148 kfree(blocks);
149 return -ENOMEM;
150 }
151
152 blocks->start = start;
153 blocks->size = size;
154 blocks->file_priv = NULL;
155 blocks->next = blocks->prev = *heap;
156
157 memset(*heap, 0, sizeof(**heap));
158 (*heap)->file_priv = (struct drm_file *) -1;
159 (*heap)->next = (*heap)->prev = blocks;
160 return 0;
161}
162
163/*
164 * Free all blocks associated with the releasing file_priv
165 */
166void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
167{
168 struct mem_block *p;
169
170 if (!heap || !heap->next)
171 return;
172
173 list_for_each(p, heap) {
174 if (p->file_priv == file_priv)
175 p->file_priv = NULL;
176 }
177
178 /* Assumes a single contiguous range. Needs a special file_priv in
179 * 'heap' to stop it being subsumed.
180 */
181 list_for_each(p, heap) {
182 while ((p->file_priv == NULL) &&
183 (p->next->file_priv == NULL) &&
184 (p->next != heap)) {
185 struct mem_block *q = p->next;
186 p->size += q->size;
187 p->next = q->next;
188 p->next->prev = p;
189 kfree(q);
190 }
191 }
192}
193
194/*
195 * NV50 VM helpers
196 */
197int
198nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
199 uint32_t flags, uint64_t phys)
200{
201 struct drm_nouveau_private *dev_priv = dev->dev_private;
202 struct nouveau_gpuobj **pgt;
203 unsigned psz, pfl, pages;
204
205 if (virt >= dev_priv->vm_gart_base &&
206 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
207 psz = 12;
208 pgt = &dev_priv->gart_info.sg_ctxdma;
209 pfl = 0x21;
210 virt -= dev_priv->vm_gart_base;
211 } else
212 if (virt >= dev_priv->vm_vram_base &&
213 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
214 psz = 16;
215 pgt = dev_priv->vm_vram_pt;
216 pfl = 0x01;
217 virt -= dev_priv->vm_vram_base;
218 } else {
219 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
220 virt, virt + size - 1);
221 return -EINVAL;
222 }
223
224 pages = size >> psz;
225
226 dev_priv->engine.instmem.prepare_access(dev, true);
227 if (flags & 0x80000000) {
228 while (pages--) {
229 struct nouveau_gpuobj *pt = pgt[virt >> 29];
230 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
231
232 nv_wo32(dev, pt, pte++, 0x00000000);
233 nv_wo32(dev, pt, pte++, 0x00000000);
234
235 virt += (1 << psz);
236 }
237 } else {
238 while (pages--) {
239 struct nouveau_gpuobj *pt = pgt[virt >> 29];
240 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
241 unsigned offset_h = upper_32_bits(phys) & 0xff;
242 unsigned offset_l = lower_32_bits(phys);
243
244 nv_wo32(dev, pt, pte++, offset_l | pfl);
245 nv_wo32(dev, pt, pte++, offset_h | flags);
246
247 phys += (1 << psz);
248 virt += (1 << psz);
249 }
250 }
251 dev_priv->engine.instmem.finish_access(dev);
252
253 nv_wr32(dev, 0x100c80, 0x00050001);
254 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
255 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
256 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
257 return -EBUSY;
258 }
259
260 nv_wr32(dev, 0x100c80, 0x00000001);
261 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
262 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
263 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
264 return -EBUSY;
265 }
266
267 return 0;
268}
269
270void
271nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
272{
273 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
274}
275
276/*
277 * Cleanup everything
278 */
279void nouveau_mem_takedown(struct mem_block **heap)
280{
281 struct mem_block *p;
282
283 if (!*heap)
284 return;
285
286 for (p = (*heap)->next; p != *heap;) {
287 struct mem_block *q = p;
288 p = p->next;
289 kfree(q);
290 }
291
292 kfree(*heap);
293 *heap = NULL;
294}
295
296void nouveau_mem_close(struct drm_device *dev)
297{
298 struct drm_nouveau_private *dev_priv = dev->dev_private;
299
300 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
301 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
302 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
303
304 ttm_bo_device_release(&dev_priv->ttm.bdev);
305
306 nouveau_ttm_global_release(dev_priv);
307
308 if (drm_core_has_AGP(dev) && dev->agp &&
309 drm_core_check_feature(dev, DRIVER_MODESET)) {
310 struct drm_agp_mem *entry, *tempe;
311
312 /* Remove AGP resources, but leave dev->agp
313 intact until drv_cleanup is called. */
314 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
315 if (entry->bound)
316 drm_unbind_agp(entry->memory);
317 drm_free_agp(entry->memory, entry->pages);
318 kfree(entry);
319 }
320 INIT_LIST_HEAD(&dev->agp->memory);
321
322 if (dev->agp->acquired)
323 drm_agp_release(dev);
324
325 dev->agp->acquired = 0;
326 dev->agp->enabled = 0;
327 }
328
329 if (dev_priv->fb_mtrr) {
330 drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
331 drm_get_resource_len(dev, 1), DRM_MTRR_WC);
332 dev_priv->fb_mtrr = 0;
333 }
334}
335
336/*XXX won't work on BSD because of pci_read_config_dword */
337static uint32_t
338nouveau_mem_fb_amount_igp(struct drm_device *dev)
339{
340 struct drm_nouveau_private *dev_priv = dev->dev_private;
341 struct pci_dev *bridge;
342 uint32_t mem;
343
344 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
345 if (!bridge) {
346 NV_ERROR(dev, "no bridge device\n");
347 return 0;
348 }
349
350 if (dev_priv->flags&NV_NFORCE) {
351 pci_read_config_dword(bridge, 0x7C, &mem);
352 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
353 } else
354 if (dev_priv->flags&NV_NFORCE2) {
355 pci_read_config_dword(bridge, 0x84, &mem);
356 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
357 }
358
359 NV_ERROR(dev, "impossible!\n");
360 return 0;
361}
362
363/* returns the amount of FB ram in bytes */
364uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
365{
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
367 uint32_t boot0;
368
369 switch (dev_priv->card_type) {
370 case NV_04:
371 boot0 = nv_rd32(dev, NV03_BOOT_0);
372 if (boot0 & 0x00000100)
373 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
374
375 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
376 case NV04_BOOT_0_RAM_AMOUNT_32MB:
377 return 32 * 1024 * 1024;
378 case NV04_BOOT_0_RAM_AMOUNT_16MB:
379 return 16 * 1024 * 1024;
380 case NV04_BOOT_0_RAM_AMOUNT_8MB:
381 return 8 * 1024 * 1024;
382 case NV04_BOOT_0_RAM_AMOUNT_4MB:
383 return 4 * 1024 * 1024;
384 }
385 break;
386 case NV_10:
387 case NV_20:
388 case NV_30:
389 case NV_40:
390 case NV_50:
391 default:
392 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
393 return nouveau_mem_fb_amount_igp(dev);
394 } else {
395 uint64_t mem;
396 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
397 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
398 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
399 return mem * 1024 * 1024;
400 }
401 break;
402 }
403
404 NV_ERROR(dev,
405 "Unable to detect video ram size. Please report your setup to "
406 DRIVER_EMAIL "\n");
407 return 0;
408}
409
410static void nouveau_mem_reset_agp(struct drm_device *dev)
411{
412 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
413
414 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
415 saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
416
417 /* clear busmaster bit */
418 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
419 /* clear SBA and AGP bits */
420 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
421
422 /* power cycle pgraph, if enabled */
423 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
424 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
425 nv_wr32(dev, NV03_PMC_ENABLE,
426 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
427 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
428 NV_PMC_ENABLE_PGRAPH);
429 }
430
431 /* and restore (gives effect of resetting AGP) */
432 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
433 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
434}
435
436int
437nouveau_mem_init_agp(struct drm_device *dev)
438{
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct drm_agp_info info;
441 struct drm_agp_mode mode;
442 int ret;
443
444 if (nouveau_noagp)
445 return 0;
446
447 nouveau_mem_reset_agp(dev);
448
449 if (!dev->agp->acquired) {
450 ret = drm_agp_acquire(dev);
451 if (ret) {
452 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
453 return ret;
454 }
455 }
456
457 ret = drm_agp_info(dev, &info);
458 if (ret) {
459 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
460 return ret;
461 }
462
463 /* see agp.h for the AGPSTAT_* modes available */
464 mode.mode = info.mode;
465 ret = drm_agp_enable(dev, mode);
466 if (ret) {
467 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
468 return ret;
469 }
470
471 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
472 dev_priv->gart_info.aper_base = info.aperture_base;
473 dev_priv->gart_info.aper_size = info.aperture_size;
474 return 0;
475}
476
477int
478nouveau_mem_init(struct drm_device *dev)
479{
480 struct drm_nouveau_private *dev_priv = dev->dev_private;
481 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
482 int ret, dma_bits = 32;
483
484 dev_priv->fb_phys = drm_get_resource_start(dev, 1);
485 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
486
487 if (dev_priv->card_type >= NV_50 &&
488 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
489 dma_bits = 40;
490
491 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
492 if (ret) {
493 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
494 return ret;
495 }
496
497 ret = nouveau_ttm_global_init(dev_priv);
498 if (ret)
499 return ret;
500
501 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
502 dev_priv->ttm.bo_global_ref.ref.object,
503 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
504 dma_bits <= 32 ? true : false);
505 if (ret) {
506 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
507 return ret;
508 }
509
510 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
511 spin_lock_init(&dev_priv->ttm.bo_list_lock);
512
513 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
514
515 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
516 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
517 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
518 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
519
520 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
521
522 /* remove reserved space at end of vram from available amount */
523 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
524 dev_priv->fb_aper_free = dev_priv->fb_available_size;
525
526 /* mappable vram */
527 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
528 dev_priv->fb_available_size >> PAGE_SHIFT);
529 if (ret) {
530 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
531 return ret;
532 }
533
534 /* GART */
535#if !defined(__powerpc__) && !defined(__ia64__)
536 if (drm_device_is_agp(dev) && dev->agp) {
537 ret = nouveau_mem_init_agp(dev);
538 if (ret)
539 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
540 }
541#endif
542
543 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
544 ret = nouveau_sgdma_init(dev);
545 if (ret) {
546 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
547 return ret;
548 }
549 }
550
551 NV_INFO(dev, "%d MiB GART (aperture)\n",
552 (int)(dev_priv->gart_info.aper_size >> 20));
553 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
554
555 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
556 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
557 if (ret) {
558 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
559 return ret;
560 }
561
562 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
563 drm_get_resource_len(dev, 1),
564 DRM_MTRR_WC);
565 return 0;
566}
567
568
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 000000000000..6c66a34b6345
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32int
33nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL;
37 int ret;
38
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
41 0, 0x0000, false, true, &ntfy);
42 if (ret)
43 return ret;
44
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
46 if (ret)
47 goto out_err;
48
49 ret = nouveau_bo_map(ntfy);
50 if (ret)
51 goto out_err;
52
53 ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
54 if (ret)
55 goto out_err;
56
57 chan->notifier_bo = ntfy;
58out_err:
59 if (ret) {
60 mutex_lock(&dev->struct_mutex);
61 drm_gem_object_unreference(ntfy->gem);
62 mutex_unlock(&dev->struct_mutex);
63 }
64
65 return ret;
66}
67
68void
69nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
70{
71 struct drm_device *dev = chan->dev;
72
73 if (!chan->notifier_bo)
74 return;
75
76 nouveau_bo_unmap(chan->notifier_bo);
77 mutex_lock(&dev->struct_mutex);
78 nouveau_bo_unpin(chan->notifier_bo);
79 drm_gem_object_unreference(chan->notifier_bo->gem);
80 mutex_unlock(&dev->struct_mutex);
81 nouveau_mem_takedown(&chan->notifier_heap);
82}
83
84static void
85nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
86 struct nouveau_gpuobj *gpuobj)
87{
88 NV_DEBUG(dev, "\n");
89
90 if (gpuobj->priv)
91 nouveau_mem_free_block(gpuobj->priv);
92}
93
94int
95nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
96 int size, uint32_t *b_offset)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_gpuobj *nobj = NULL;
101 struct mem_block *mem;
102 uint32_t offset;
103 int target, ret;
104
105 if (!chan->notifier_heap) {
106 NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
107 chan->id);
108 return -EINVAL;
109 }
110
111 mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
112 (struct drm_file *)-2, 0);
113 if (!mem) {
114 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
115 return -ENOMEM;
116 }
117
118 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
119 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
120 target = NV_DMA_TARGET_VIDMEM;
121 } else
122 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
123 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
124 dev_priv->card_type < NV_50) {
125 ret = nouveau_sgdma_get_page(dev, offset, &offset);
126 if (ret)
127 return ret;
128 target = NV_DMA_TARGET_PCI;
129 } else {
130 target = NV_DMA_TARGET_AGP;
131 }
132 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
134 chan->notifier_bo->bo.mem.mem_type);
135 return -EINVAL;
136 }
137 offset += mem->start;
138
139 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
140 mem->size, NV_DMA_ACCESS_RW, target,
141 &nobj);
142 if (ret) {
143 nouveau_mem_free_block(mem);
144 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
145 return ret;
146 }
147 nobj->dtor = nouveau_notifier_gpuobj_dtor;
148 nobj->priv = mem;
149
150 ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
151 if (ret) {
152 nouveau_gpuobj_del(dev, &nobj);
153 nouveau_mem_free_block(mem);
154 NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
155 return ret;
156 }
157
158 *b_offset = mem->start;
159 return 0;
160}
161
162int
163nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
164{
165 if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
166 return -EINVAL;
167
168 if (poffset) {
169 struct mem_block *mem = nobj->priv;
170
171 if (*poffset >= mem->size)
172 return false;
173
174 *poffset += mem->start;
175 }
176
177 return 0;
178}
179
180int
181nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 struct drm_nouveau_notifierobj_alloc *na = data;
185 struct nouveau_channel *chan;
186 int ret;
187
188 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
189 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
190
191 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
192 if (ret)
193 return ret;
194
195 return 0;
196}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 000000000000..93379bb81bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1294 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37
38/* NVidia uses context objects to drive drawing operations.
39
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
42
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
45
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
49
50 The format of the second CARD32 seems to be:
51
52 NV4 to NV30:
53
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
57 31 valid (use 1)
58
59 NV40:
60
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
64
65 The key into the hash table depends on the object handle and channel id and
66 is given as:
67*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
139 NV_DEBUG(dev,
140 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
141 chan->id, co, ref->handle, ctx);
142 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
143 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144
145 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev);
147 return 0;
148 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
150 chan->id, co, nv_ro32(dev, ramht, co/4));
151
152 co += 8;
153 if (co >= dev_priv->ramht_size)
154 co = 0;
155 } while (co != ho);
156 instmem->finish_access(dev);
157
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM;
160}
161
162static void
163nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
167 struct nouveau_channel *chan = ref->channel;
168 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
169 uint32_t co, ho;
170
171 if (!ramht) {
172 NV_ERROR(dev, "No hash table!\n");
173 return;
174 }
175
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
180 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
181 NV_DEBUG(dev,
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, ref->handle,
184 nv_ro32(dev, ramht, (co + 4)));
185 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187
188 list_del(&ref->list);
189 instmem->finish_access(dev);
190 return;
191 }
192
193 co += 8;
194 if (co >= dev_priv->ramht_size)
195 co = 0;
196 } while (co != ho);
197 list_del(&ref->list);
198 instmem->finish_access(dev);
199
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle);
202}
203
204int
205nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
206 uint32_t size, int align, uint32_t flags,
207 struct nouveau_gpuobj **gpuobj_ret)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nouveau_engine *engine = &dev_priv->engine;
211 struct nouveau_gpuobj *gpuobj;
212 struct mem_block *pramin = NULL;
213 int ret;
214
215 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
216 chan ? chan->id : -1, size, align, flags);
217
218 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
219 return -EINVAL;
220
221 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
222 if (!gpuobj)
223 return -ENOMEM;
224 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
225 gpuobj->flags = flags;
226 gpuobj->im_channel = chan;
227
228 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
229
230 /* Choose between global instmem heap, and per-channel private
231 * instmem heap. On <NV50 allow requests for private instmem
232 * to be satisfied from global heap if no per-channel area
233 * available.
234 */
235 if (chan) {
236 if (chan->ramin_heap) {
237 NV_DEBUG(dev, "private heap\n");
238 pramin = chan->ramin_heap;
239 } else
240 if (dev_priv->card_type < NV_50) {
241 NV_DEBUG(dev, "global heap fallback\n");
242 pramin = dev_priv->ramin_heap;
243 }
244 } else {
245 NV_DEBUG(dev, "global heap\n");
246 pramin = dev_priv->ramin_heap;
247 }
248
249 if (!pramin) {
250 NV_ERROR(dev, "No PRAMIN heap!\n");
251 return -EINVAL;
252 }
253
254 if (!chan) {
255 ret = engine->instmem.populate(dev, gpuobj, &size);
256 if (ret) {
257 nouveau_gpuobj_del(dev, &gpuobj);
258 return ret;
259 }
260 }
261
262 /* Allocate a chunk of the PRAMIN aperture */
263 gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
264 drm_order(align),
265 (struct drm_file *)-2, 0);
266 if (!gpuobj->im_pramin) {
267 nouveau_gpuobj_del(dev, &gpuobj);
268 return -ENOMEM;
269 }
270
271 if (!chan) {
272 ret = engine->instmem.bind(dev, gpuobj);
273 if (ret) {
274 nouveau_gpuobj_del(dev, &gpuobj);
275 return ret;
276 }
277 }
278
279 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
280 int i;
281
282 engine->instmem.prepare_access(dev, true);
283 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
284 nv_wo32(dev, gpuobj, i/4, 0);
285 engine->instmem.finish_access(dev);
286 }
287
288 *gpuobj_ret = gpuobj;
289 return 0;
290}
291
292int
293nouveau_gpuobj_early_init(struct drm_device *dev)
294{
295 struct drm_nouveau_private *dev_priv = dev->dev_private;
296
297 NV_DEBUG(dev, "\n");
298
299 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
300
301 return 0;
302}
303
304int
305nouveau_gpuobj_init(struct drm_device *dev)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 int ret;
309
310 NV_DEBUG(dev, "\n");
311
312 if (dev_priv->card_type < NV_50) {
313 ret = nouveau_gpuobj_new_fake(dev,
314 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
315 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
316 &dev_priv->ramht, NULL);
317 if (ret)
318 return ret;
319 }
320
321 return 0;
322}
323
324void
325nouveau_gpuobj_takedown(struct drm_device *dev)
326{
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328
329 NV_DEBUG(dev, "\n");
330
331 nouveau_gpuobj_del(dev, &dev_priv->ramht);
332}
333
334void
335nouveau_gpuobj_late_takedown(struct drm_device *dev)
336{
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *gpuobj = NULL;
339 struct list_head *entry, *tmp;
340
341 NV_DEBUG(dev, "\n");
342
343 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
344 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
345
346 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
347 gpuobj, gpuobj->refcount);
348 gpuobj->refcount = 0;
349 nouveau_gpuobj_del(dev, &gpuobj);
350 }
351}
352
353int
354nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_engine *engine = &dev_priv->engine;
358 struct nouveau_gpuobj *gpuobj;
359 int i;
360
361 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
362
363 if (!dev_priv || !pgpuobj || !(*pgpuobj))
364 return -EINVAL;
365 gpuobj = *pgpuobj;
366
367 if (gpuobj->refcount != 0) {
368 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
369 return -EINVAL;
370 }
371
372 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
373 engine->instmem.prepare_access(dev, true);
374 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
375 nv_wo32(dev, gpuobj, i/4, 0);
376 engine->instmem.finish_access(dev);
377 }
378
379 if (gpuobj->dtor)
380 gpuobj->dtor(dev, gpuobj);
381
382 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
383 engine->instmem.clear(dev, gpuobj);
384
385 if (gpuobj->im_pramin) {
386 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
387 kfree(gpuobj->im_pramin);
388 else
389 nouveau_mem_free_block(gpuobj->im_pramin);
390 }
391
392 list_del(&gpuobj->list);
393
394 *pgpuobj = NULL;
395 kfree(gpuobj);
396 return 0;
397}
398
399static int
400nouveau_gpuobj_instance_get(struct drm_device *dev,
401 struct nouveau_channel *chan,
402 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
403{
404 struct drm_nouveau_private *dev_priv = dev->dev_private;
405 struct nouveau_gpuobj *cpramin;
406
407 /* <NV50 use PRAMIN address everywhere */
408 if (dev_priv->card_type < NV_50) {
409 *inst = gpuobj->im_pramin->start;
410 return 0;
411 }
412
413 if (chan && gpuobj->im_channel != chan) {
414 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
415 gpuobj->im_channel->id, chan->id);
416 return -EINVAL;
417 }
418
419 /* NV50 channel-local instance */
420 if (chan) {
421 cpramin = chan->ramin->gpuobj;
422 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
423 return 0;
424 }
425
426 /* NV50 global (VRAM) instance */
427 if (!gpuobj->im_channel) {
428 /* ...from global heap */
429 if (!gpuobj->im_backing) {
430 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
431 return -EINVAL;
432 }
433 *inst = gpuobj->im_backing_start;
434 return 0;
435 } else {
436 /* ...from local heap */
437 cpramin = gpuobj->im_channel->ramin->gpuobj;
438 *inst = cpramin->im_backing_start +
439 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
440 return 0;
441 }
442
443 return -EINVAL;
444}
445
446int
447nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
448 uint32_t handle, struct nouveau_gpuobj *gpuobj,
449 struct nouveau_gpuobj_ref **ref_ret)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_gpuobj_ref *ref;
453 uint32_t instance;
454 int ret;
455
456 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
457 chan ? chan->id : -1, handle, gpuobj);
458
459 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
460 return -EINVAL;
461
462 if (!chan && !ref_ret)
463 return -EINVAL;
464
465 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
466 /* sw object */
467 instance = 0x40;
468 } else {
469 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
470 if (ret)
471 return ret;
472 }
473
474 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
475 if (!ref)
476 return -ENOMEM;
477 INIT_LIST_HEAD(&ref->list);
478 ref->gpuobj = gpuobj;
479 ref->channel = chan;
480 ref->instance = instance;
481
482 if (!ref_ret) {
483 ref->handle = handle;
484
485 ret = nouveau_ramht_insert(dev, ref);
486 if (ret) {
487 kfree(ref);
488 return ret;
489 }
490 } else {
491 ref->handle = ~0;
492 *ref_ret = ref;
493 }
494
495 ref->gpuobj->refcount++;
496 return 0;
497}
498
499int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
500{
501 struct nouveau_gpuobj_ref *ref;
502
503 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
504
505 if (!dev || !pref || *pref == NULL)
506 return -EINVAL;
507 ref = *pref;
508
509 if (ref->handle != ~0)
510 nouveau_ramht_remove(dev, ref);
511
512 if (ref->gpuobj) {
513 ref->gpuobj->refcount--;
514
515 if (ref->gpuobj->refcount == 0) {
516 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
517 nouveau_gpuobj_del(dev, &ref->gpuobj);
518 }
519 }
520
521 *pref = NULL;
522 kfree(ref);
523 return 0;
524}
525
526int
527nouveau_gpuobj_new_ref(struct drm_device *dev,
528 struct nouveau_channel *oc, struct nouveau_channel *rc,
529 uint32_t handle, uint32_t size, int align,
530 uint32_t flags, struct nouveau_gpuobj_ref **ref)
531{
532 struct nouveau_gpuobj *gpuobj = NULL;
533 int ret;
534
535 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
536 if (ret)
537 return ret;
538
539 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
540 if (ret) {
541 nouveau_gpuobj_del(dev, &gpuobj);
542 return ret;
543 }
544
545 return 0;
546}
547
548int
549nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
550 struct nouveau_gpuobj_ref **ref_ret)
551{
552 struct nouveau_gpuobj_ref *ref;
553 struct list_head *entry, *tmp;
554
555 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
556 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
557
558 if (ref->handle == handle) {
559 if (ref_ret)
560 *ref_ret = ref;
561 return 0;
562 }
563 }
564
565 return -EINVAL;
566}
567
568int
569nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
570 uint32_t b_offset, uint32_t size,
571 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
572 struct nouveau_gpuobj_ref **pref)
573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575 struct nouveau_gpuobj *gpuobj = NULL;
576 int i;
577
578 NV_DEBUG(dev,
579 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
580 p_offset, b_offset, size, flags);
581
582 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
583 if (!gpuobj)
584 return -ENOMEM;
585 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
586 gpuobj->im_channel = NULL;
587 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
588
589 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
590
591 if (p_offset != ~0) {
592 gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
593 GFP_KERNEL);
594 if (!gpuobj->im_pramin) {
595 nouveau_gpuobj_del(dev, &gpuobj);
596 return -ENOMEM;
597 }
598 gpuobj->im_pramin->start = p_offset;
599 gpuobj->im_pramin->size = size;
600 }
601
602 if (b_offset != ~0) {
603 gpuobj->im_backing = (struct nouveau_bo *)-1;
604 gpuobj->im_backing_start = b_offset;
605 }
606
607 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
608 dev_priv->engine.instmem.prepare_access(dev, true);
609 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
610 nv_wo32(dev, gpuobj, i/4, 0);
611 dev_priv->engine.instmem.finish_access(dev);
612 }
613
614 if (pref) {
615 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
616 if (i) {
617 nouveau_gpuobj_del(dev, &gpuobj);
618 return i;
619 }
620 }
621
622 if (pgpuobj)
623 *pgpuobj = gpuobj;
624 return 0;
625}
626
627
628static uint32_t
629nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
630{
631 struct drm_nouveau_private *dev_priv = dev->dev_private;
632
633 /*XXX: dodgy hack for now */
634 if (dev_priv->card_type >= NV_50)
635 return 24;
636 if (dev_priv->card_type >= NV_40)
637 return 32;
638 return 16;
639}
640
641/*
642 DMA objects are used to reference a piece of memory in the
643 framebuffer, PCI or AGP address space. Each object is 16 bytes big
644 and looks as follows:
645
646 entry[0]
647 11:0 class (seems like I can always use 0 here)
648 12 page table present?
649 13 page entry linear?
650 15:14 access: 0 rw, 1 ro, 2 wo
651 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
652 31:20 dma adjust (bits 0-11 of the address)
653 entry[1]
654 dma limit (size of transfer)
655 entry[X]
656 1 0 readonly, 1 readwrite
657 31:12 dma frame address of the page (bits 12-31 of the address)
658 entry[N]
659 page table terminator, same value as the first pte, as does nvidia
660 rivatv uses 0xffffffff
661
662 Non linear page tables need a list of frame addresses afterwards,
663 the rivatv project has some info on this.
664
665 The method below creates a DMA object in instance RAM and returns a handle
666 to it that can be used to set up context objects.
667*/
668int
669nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
670 uint64_t offset, uint64_t size, int access,
671 int target, struct nouveau_gpuobj **gpuobj)
672{
673 struct drm_device *dev = chan->dev;
674 struct drm_nouveau_private *dev_priv = dev->dev_private;
675 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
676 int ret;
677
678 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
679 chan->id, class, offset, size);
680 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
681
682 switch (target) {
683 case NV_DMA_TARGET_AGP:
684 offset += dev_priv->gart_info.aper_base;
685 break;
686 default:
687 break;
688 }
689
690 ret = nouveau_gpuobj_new(dev, chan,
691 nouveau_gpuobj_class_instmem_size(dev, class),
692 16, NVOBJ_FLAG_ZERO_ALLOC |
693 NVOBJ_FLAG_ZERO_FREE, gpuobj);
694 if (ret) {
695 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
696 return ret;
697 }
698
699 instmem->prepare_access(dev, true);
700
701 if (dev_priv->card_type < NV_50) {
702 uint32_t frame, adjust, pte_flags = 0;
703
704 if (access != NV_DMA_ACCESS_RO)
705 pte_flags |= (1<<1);
706 adjust = offset & 0x00000fff;
707 frame = offset & ~0x00000fff;
708
709 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
710 (adjust << 20) |
711 (access << 14) |
712 (target << 16) |
713 class));
714 nv_wo32(dev, *gpuobj, 1, size - 1);
715 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
716 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
717 } else {
718 uint64_t limit = offset + size - 1;
719 uint32_t flags0, flags5;
720
721 if (target == NV_DMA_TARGET_VIDMEM) {
722 flags0 = 0x00190000;
723 flags5 = 0x00010000;
724 } else {
725 flags0 = 0x7fc00000;
726 flags5 = 0x00080000;
727 }
728
729 nv_wo32(dev, *gpuobj, 0, flags0 | class);
730 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
731 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
732 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
733 (upper_32_bits(offset) & 0xff));
734 nv_wo32(dev, *gpuobj, 5, flags5);
735 }
736
737 instmem->finish_access(dev);
738
739 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
740 (*gpuobj)->class = class;
741 return 0;
742}
743
744int
745nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
746 uint64_t offset, uint64_t size, int access,
747 struct nouveau_gpuobj **gpuobj,
748 uint32_t *o_ret)
749{
750 struct drm_device *dev = chan->dev;
751 struct drm_nouveau_private *dev_priv = dev->dev_private;
752 int ret;
753
754 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
755 (dev_priv->card_type >= NV_50 &&
756 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
757 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
758 offset + dev_priv->vm_gart_base,
759 size, access, NV_DMA_TARGET_AGP,
760 gpuobj);
761 if (o_ret)
762 *o_ret = 0;
763 } else
764 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
765 *gpuobj = dev_priv->gart_info.sg_ctxdma;
766 if (offset & ~0xffffffffULL) {
767 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
768 return -EINVAL;
769 }
770 if (o_ret)
771 *o_ret = (uint32_t)offset;
772 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
773 } else {
774 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
775 return -EINVAL;
776 }
777
778 return ret;
779}
780
781/* Context objects in the instance RAM have the following structure.
782 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
783
784 NV4 - NV30:
785
786 entry[0]
787 11:0 class
788 12 chroma key enable
789 13 user clip enable
790 14 swizzle enable
791 17:15 patch config:
792 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
793 18 synchronize enable
794 19 endian: 1 big, 0 little
795 21:20 dither mode
796 23 single step enable
797 24 patch status: 0 invalid, 1 valid
798 25 context_surface 0: 1 valid
799 26 context surface 1: 1 valid
800 27 context pattern: 1 valid
801 28 context rop: 1 valid
802 29,30 context beta, beta4
803 entry[1]
804 7:0 mono format
805 15:8 color format
806 31:16 notify instance address
807 entry[2]
808 15:0 dma 0 instance address
809 31:16 dma 1 instance address
810 entry[3]
811 dma method traps
812
813 NV40:
814 No idea what the exact format is. Here's what can be deducted:
815
816 entry[0]:
817 11:0 class (maybe uses more bits here?)
818 17 user clip enable
819 21:19 patch config
820 25 patch status valid ?
821 entry[1]:
822 15:0 DMA notifier (maybe 20:0)
823 entry[2]:
824 15:0 DMA 0 instance (maybe 20:0)
825 24 big endian
826 entry[3]:
827 15:0 DMA 1 instance (maybe 20:0)
828 entry[4]:
829 entry[5]:
830 set to 0?
831*/
832int
833nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
834 struct nouveau_gpuobj **gpuobj)
835{
836 struct drm_device *dev = chan->dev;
837 struct drm_nouveau_private *dev_priv = dev->dev_private;
838 int ret;
839
840 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
841
842 ret = nouveau_gpuobj_new(dev, chan,
843 nouveau_gpuobj_class_instmem_size(dev, class),
844 16,
845 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
846 gpuobj);
847 if (ret) {
848 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
849 return ret;
850 }
851
852 dev_priv->engine.instmem.prepare_access(dev, true);
853 if (dev_priv->card_type >= NV_50) {
854 nv_wo32(dev, *gpuobj, 0, class);
855 nv_wo32(dev, *gpuobj, 5, 0x00010000);
856 } else {
857 switch (class) {
858 case NV_CLASS_NULL:
859 nv_wo32(dev, *gpuobj, 0, 0x00001030);
860 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
861 break;
862 default:
863 if (dev_priv->card_type >= NV_40) {
864 nv_wo32(dev, *gpuobj, 0, class);
865#ifdef __BIG_ENDIAN
866 nv_wo32(dev, *gpuobj, 2, 0x01000000);
867#endif
868 } else {
869#ifdef __BIG_ENDIAN
870 nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
871#else
872 nv_wo32(dev, *gpuobj, 0, class);
873#endif
874 }
875 }
876 }
877 dev_priv->engine.instmem.finish_access(dev);
878
879 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
880 (*gpuobj)->class = class;
881 return 0;
882}
883
884static int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret)
887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
889 struct nouveau_gpuobj *gpuobj;
890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL;
893
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj)
896 return -ENOMEM;
897 gpuobj->engine = NVOBJ_ENGINE_SW;
898 gpuobj->class = class;
899
900 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
901 *gpuobj_ret = gpuobj;
902 return 0;
903}
904
905static int
906nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
907{
908 struct drm_device *dev = chan->dev;
909 struct drm_nouveau_private *dev_priv = dev->dev_private;
910 struct nouveau_gpuobj *pramin = NULL;
911 uint32_t size;
912 uint32_t base;
913 int ret;
914
915 NV_DEBUG(dev, "ch%d\n", chan->id);
916
917 /* Base amount for object storage (4KiB enough?) */
918 size = 0x1000;
919 base = 0;
920
921 /* PGRAPH context */
922
923 if (dev_priv->card_type == NV_50) {
924 /* Various fixed table thingos */
925 size += 0x1400; /* mostly unknown stuff */
926 size += 0x4000; /* vm pd */
927 base = 0x6000;
928 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
929 size += 0x8000;
930 /* RAMFC */
931 size += 0x1000;
932 /* PGRAPH context */
933 size += 0x70000;
934 }
935
936 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
937 chan->id, size, base);
938 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
939 &chan->ramin);
940 if (ret) {
941 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
942 return ret;
943 }
944 pramin = chan->ramin->gpuobj;
945
946 ret = nouveau_mem_init_heap(&chan->ramin_heap,
947 pramin->im_pramin->start + base, size);
948 if (ret) {
949 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
950 nouveau_gpuobj_ref_del(dev, &chan->ramin);
951 return ret;
952 }
953
954 return 0;
955}
956
957int
958nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
959 uint32_t vram_h, uint32_t tt_h)
960{
961 struct drm_device *dev = chan->dev;
962 struct drm_nouveau_private *dev_priv = dev->dev_private;
963 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
964 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
965 int ret, i;
966
967 INIT_LIST_HEAD(&chan->ramht_refs);
968
969 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
970
971 /* Reserve a block of PRAMIN for the channel
972 *XXX: maybe on <NV50 too at some point
973 */
974 if (0 || dev_priv->card_type == NV_50) {
975 ret = nouveau_gpuobj_channel_init_pramin(chan);
976 if (ret) {
977 NV_ERROR(dev, "init pramin\n");
978 return ret;
979 }
980 }
981
982 /* NV50 VM
983 * - Allocate per-channel page-directory
984 * - Map GART and VRAM into the channel's address space at the
985 * locations determined during init.
986 */
987 if (dev_priv->card_type >= NV_50) {
988 uint32_t vm_offset, pde;
989
990 instmem->prepare_access(dev, true);
991
992 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
993 vm_offset += chan->ramin->gpuobj->im_pramin->start;
994
995 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
996 0, &chan->vm_pd, NULL);
997 if (ret) {
998 instmem->finish_access(dev);
999 return ret;
1000 }
1001 for (i = 0; i < 0x4000; i += 8) {
1002 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1003 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
1004 }
1005
1006 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
1007 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1008 dev_priv->gart_info.sg_ctxdma,
1009 &chan->vm_gart_pt);
1010 if (ret) {
1011 instmem->finish_access(dev);
1012 return ret;
1013 }
1014 nv_wo32(dev, chan->vm_pd, pde++,
1015 chan->vm_gart_pt->instance | 0x03);
1016 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1017
1018 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
1019 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
1020 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1021 dev_priv->vm_vram_pt[i],
1022 &chan->vm_vram_pt[i]);
1023 if (ret) {
1024 instmem->finish_access(dev);
1025 return ret;
1026 }
1027
1028 nv_wo32(dev, chan->vm_pd, pde++,
1029 chan->vm_vram_pt[i]->instance | 0x61);
1030 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1031 }
1032
1033 instmem->finish_access(dev);
1034 }
1035
1036 /* RAMHT */
1037 if (dev_priv->card_type < NV_50) {
1038 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1039 &chan->ramht);
1040 if (ret)
1041 return ret;
1042 } else {
1043 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1044 0x8000, 16,
1045 NVOBJ_FLAG_ZERO_ALLOC,
1046 &chan->ramht);
1047 if (ret)
1048 return ret;
1049 }
1050
1051 /* VRAM ctxdma */
1052 if (dev_priv->card_type >= NV_50) {
1053 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1054 0, dev_priv->vm_end,
1055 NV_DMA_ACCESS_RW,
1056 NV_DMA_TARGET_AGP, &vram);
1057 if (ret) {
1058 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1059 return ret;
1060 }
1061 } else {
1062 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1063 0, dev_priv->fb_available_size,
1064 NV_DMA_ACCESS_RW,
1065 NV_DMA_TARGET_VIDMEM, &vram);
1066 if (ret) {
1067 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1068 return ret;
1069 }
1070 }
1071
1072 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
1073 if (ret) {
1074 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
1075 return ret;
1076 }
1077
1078 /* TT memory ctxdma */
1079 if (dev_priv->card_type >= NV_50) {
1080 tt = vram;
1081 } else
1082 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1083 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1084 dev_priv->gart_info.aper_size,
1085 NV_DMA_ACCESS_RW, &tt, NULL);
1086 } else {
1087 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
1088 ret = -EINVAL;
1089 }
1090
1091 if (ret) {
1092 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
1093 return ret;
1094 }
1095
1096 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1097 if (ret) {
1098 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
1099 return ret;
1100 }
1101
1102 return 0;
1103}
1104
1105void
1106nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1107{
1108 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1109 struct drm_device *dev = chan->dev;
1110 struct list_head *entry, *tmp;
1111 struct nouveau_gpuobj_ref *ref;
1112 int i;
1113
1114 NV_DEBUG(dev, "ch%d\n", chan->id);
1115
1116 if (!chan->ramht_refs.next)
1117 return;
1118
1119 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1120 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1121
1122 nouveau_gpuobj_ref_del(dev, &ref);
1123 }
1124
1125 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1126
1127 nouveau_gpuobj_del(dev, &chan->vm_pd);
1128 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1129 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1130 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1131
1132 if (chan->ramin_heap)
1133 nouveau_mem_takedown(&chan->ramin_heap);
1134 if (chan->ramin)
1135 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1136
1137}
1138
1139int
1140nouveau_gpuobj_suspend(struct drm_device *dev)
1141{
1142 struct drm_nouveau_private *dev_priv = dev->dev_private;
1143 struct nouveau_gpuobj *gpuobj;
1144 int i;
1145
1146 if (dev_priv->card_type < NV_50) {
1147 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
1148 if (!dev_priv->susres.ramin_copy)
1149 return -ENOMEM;
1150
1151 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1152 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1153 return 0;
1154 }
1155
1156 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1157 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1158 continue;
1159
1160 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1161 if (!gpuobj->im_backing_suspend) {
1162 nouveau_gpuobj_resume(dev);
1163 return -ENOMEM;
1164 }
1165
1166 dev_priv->engine.instmem.prepare_access(dev, false);
1167 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1168 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1169 dev_priv->engine.instmem.finish_access(dev);
1170 }
1171
1172 return 0;
1173}
1174
1175void
1176nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1177{
1178 struct drm_nouveau_private *dev_priv = dev->dev_private;
1179 struct nouveau_gpuobj *gpuobj;
1180
1181 if (dev_priv->card_type < NV_50) {
1182 vfree(dev_priv->susres.ramin_copy);
1183 dev_priv->susres.ramin_copy = NULL;
1184 return;
1185 }
1186
1187 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1188 if (!gpuobj->im_backing_suspend)
1189 continue;
1190
1191 vfree(gpuobj->im_backing_suspend);
1192 gpuobj->im_backing_suspend = NULL;
1193 }
1194}
1195
1196void
1197nouveau_gpuobj_resume(struct drm_device *dev)
1198{
1199 struct drm_nouveau_private *dev_priv = dev->dev_private;
1200 struct nouveau_gpuobj *gpuobj;
1201 int i;
1202
1203 if (dev_priv->card_type < NV_50) {
1204 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1205 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1206 nouveau_gpuobj_suspend_cleanup(dev);
1207 return;
1208 }
1209
1210 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1211 if (!gpuobj->im_backing_suspend)
1212 continue;
1213
1214 dev_priv->engine.instmem.prepare_access(dev, true);
1215 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1216 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1217 dev_priv->engine.instmem.finish_access(dev);
1218 }
1219
1220 nouveau_gpuobj_suspend_cleanup(dev);
1221}
1222
1223int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1224 struct drm_file *file_priv)
1225{
1226 struct drm_nouveau_private *dev_priv = dev->dev_private;
1227 struct drm_nouveau_grobj_alloc *init = data;
1228 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1229 struct nouveau_pgraph_object_class *grc;
1230 struct nouveau_gpuobj *gr = NULL;
1231 struct nouveau_channel *chan;
1232 int ret;
1233
1234 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1235 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1236
1237 if (init->handle == ~0)
1238 return -EINVAL;
1239
1240 grc = pgraph->grclass;
1241 while (grc->id) {
1242 if (grc->id == init->class)
1243 break;
1244 grc++;
1245 }
1246
1247 if (!grc->id) {
1248 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1249 return -EPERM;
1250 }
1251
1252 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1253 return -EEXIST;
1254
1255 if (!grc->software)
1256 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1257 else
1258 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1259
1260 if (ret) {
1261 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1262 ret, init->channel, init->handle);
1263 return ret;
1264 }
1265
1266 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1267 if (ret) {
1268 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1269 ret, init->channel, init->handle);
1270 nouveau_gpuobj_del(dev, &gr);
1271 return ret;
1272 }
1273
1274 return 0;
1275}
1276
1277int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1278 struct drm_file *file_priv)
1279{
1280 struct drm_nouveau_gpuobj_free *objfree = data;
1281 struct nouveau_gpuobj_ref *ref;
1282 struct nouveau_channel *chan;
1283 int ret;
1284
1285 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1286 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1287
1288 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1289 if (ret)
1290 return ret;
1291 nouveau_gpuobj_ref_del(dev, &ref);
1292
1293 return 0;
1294}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 000000000000..fa1b0e7165b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,836 @@
1
2
3#define NV03_BOOT_0 0x00100000
4# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
5# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
6# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
7# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
8# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
9# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
10# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
11# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
12# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
13
14#define NV04_FIFO_DATA 0x0010020c
15# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
16# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
17
18#define NV_RAMIN 0x00700000
19
20#define NV_RAMHT_HANDLE_OFFSET 0
21#define NV_RAMHT_CONTEXT_OFFSET 4
22# define NV_RAMHT_CONTEXT_VALID (1<<31)
23# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
24# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
25# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
26# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
27# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
28# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
29# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
30# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
31
32/* DMA object defines */
33#define NV_DMA_ACCESS_RW 0
34#define NV_DMA_ACCESS_RO 1
35#define NV_DMA_ACCESS_WO 2
36#define NV_DMA_TARGET_VIDMEM 0
37#define NV_DMA_TARGET_PCI 2
38#define NV_DMA_TARGET_AGP 3
39/* The following is not a real value used by the card, it's changed by
40 * nouveau_object_dma_create */
41#define NV_DMA_TARGET_PCI_NONLINEAR 8
42
43/* Some object classes we care about in the drm */
44#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
45#define NV_CLASS_DMA_TO_MEMORY 0x00000003
46#define NV_CLASS_NULL 0x00000030
47#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
48
49#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
50#define NV03_USER__SIZE 16
51#define NV10_USER__SIZE 32
52#define NV03_USER_SIZE 0x00010000
53#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
54#define NV03_USER_DMA_PUT__SIZE 16
55#define NV10_USER_DMA_PUT__SIZE 32
56#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
57#define NV03_USER_DMA_GET__SIZE 16
58#define NV10_USER_DMA_GET__SIZE 32
59#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
60#define NV03_USER_REF_CNT__SIZE 16
61#define NV10_USER_REF_CNT__SIZE 32
62
63#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
64#define NV40_USER_SIZE 0x00001000
65#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
66#define NV40_USER_DMA_PUT__SIZE 32
67#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
68#define NV40_USER_DMA_GET__SIZE 32
69#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
70#define NV40_USER_REF_CNT__SIZE 32
71
72#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
73#define NV50_USER_SIZE 0x00002000
74#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
75#define NV50_USER_DMA_PUT__SIZE 128
76#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
77#define NV50_USER_DMA_GET__SIZE 128
78#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
79#define NV50_USER_REF_CNT__SIZE 128
80
81#define NV03_FIFO_SIZE 0x8000UL
82
83#define NV03_PMC_BOOT_0 0x00000000
84#define NV03_PMC_BOOT_1 0x00000004
85#define NV03_PMC_INTR_0 0x00000100
86# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8)
87# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
88# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
89# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
90# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
91# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
92# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
93#define NV03_PMC_INTR_EN_0 0x00000140
94# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0)
95#define NV03_PMC_ENABLE 0x00000200
96# define NV_PMC_ENABLE_PFIFO (1<<8)
97# define NV_PMC_ENABLE_PGRAPH (1<<12)
98/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
99 * the card will hang early on in the X init process.
100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700
105#define NV40_PMC_1704 0x00001704
106#define NV40_PMC_1708 0x00001708
107#define NV40_PMC_170C 0x0000170C
108
109/* probably PMC ? */
110#define NV50_PUNK_BAR0_PRAMIN 0x00001700
111#define NV50_PUNK_BAR_CFG_BASE 0x00001704
112#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
113#define NV50_PUNK_BAR1_CTXDMA 0x00001708
114#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
115#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
116#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
117#define NV50_PUNK_UNK1710 0x00001710
118
119#define NV04_PBUS_PCI_NV_1 0x00001804
120#define NV04_PBUS_PCI_NV_19 0x0000184C
121#define NV04_PBUS_PCI_NV_20 0x00001850
122# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
123# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
124
125#define NV04_PTIMER_INTR_0 0x00009100
126#define NV04_PTIMER_INTR_EN_0 0x00009140
127#define NV04_PTIMER_NUMERATOR 0x00009200
128#define NV04_PTIMER_DENOMINATOR 0x00009210
129#define NV04_PTIMER_TIME_0 0x00009400
130#define NV04_PTIMER_TIME_1 0x00009410
131#define NV04_PTIMER_ALARM_0 0x00009420
132
133#define NV04_PFB_CFG0 0x00100200
134#define NV04_PFB_CFG1 0x00100204
135#define NV40_PFB_020C 0x0010020C
136#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
137#define NV10_PFB_TILE__SIZE 8
138#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
139#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
140#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
141#define NV10_PFB_CLOSE_PAGE2 0x0010033C
142#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
143#define NV40_PFB_TILE__SIZE_0 12
144#define NV40_PFB_TILE__SIZE_1 15
145#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
146#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
147#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
148#define NV40_PFB_UNK_800 0x00100800
149
150#define NV04_PGRAPH_DEBUG_0 0x00400080
151#define NV04_PGRAPH_DEBUG_1 0x00400084
152#define NV04_PGRAPH_DEBUG_2 0x00400088
153#define NV04_PGRAPH_DEBUG_3 0x0040008c
154#define NV10_PGRAPH_DEBUG_4 0x00400090
155#define NV03_PGRAPH_INTR 0x00400100
156#define NV03_PGRAPH_NSTATUS 0x00400104
157# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
158# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
159# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
160# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
161# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
162# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
163# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
164# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
165#define NV03_PGRAPH_NSOURCE 0x00400108
166# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
167# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
168# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
169# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
170# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
171# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
172# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
173# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
174# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
175# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
176# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
177# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
178# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
179# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
180# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
181# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
182# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
183# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
184# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
185#define NV03_PGRAPH_INTR_EN 0x00400140
186#define NV40_PGRAPH_INTR_EN 0x0040013C
187# define NV_PGRAPH_INTR_NOTIFY (1<<0)
188# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
189# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
190# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
191# define NV_PGRAPH_INTR_ERROR (1<<20)
192#define NV10_PGRAPH_CTX_CONTROL 0x00400144
193#define NV10_PGRAPH_CTX_USER 0x00400148
194#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
195#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
196#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
197#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
198#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
199#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
200#define NV10_PGRAPH_CTX_CACHE1 0x00400160
201#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
202#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
203#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
204#define NV04_PGRAPH_CTX_CONTROL 0x00400170
205#define NV04_PGRAPH_CTX_USER 0x00400174
206#define NV04_PGRAPH_CTX_CACHE1 0x00400180
207#define NV10_PGRAPH_CTX_CACHE2 0x00400180
208#define NV03_PGRAPH_CTX_CONTROL 0x00400190
209#define NV03_PGRAPH_CTX_USER 0x00400194
210#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
211#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
212#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
213#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
214#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
215#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
216#define NV40_PGRAPH_CTXCTL_0304 0x00400304
217#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
218#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
219#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
220#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
221#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
222#define NV40_PGRAPH_CTXCTL_0310 0x00400310
223#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
224#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
225#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
226#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
227#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
228#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
229#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
230#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
231#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
232#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
233#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
234#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
235#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
236#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
237#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
238#define NV03_PGRAPH_ABS_X_RAM 0x00400400
239#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
240#define NV03_PGRAPH_X_MISC 0x00400500
241#define NV03_PGRAPH_Y_MISC 0x00400504
242#define NV04_PGRAPH_VALID1 0x00400508
243#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
244#define NV04_PGRAPH_MISC24_0 0x00400510
245#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
246#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
247#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
248#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
249#define NV03_PGRAPH_CLIPX_0 0x00400524
250#define NV03_PGRAPH_CLIPX_1 0x00400528
251#define NV03_PGRAPH_CLIPY_0 0x0040052C
252#define NV03_PGRAPH_CLIPY_1 0x00400530
253#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
254#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
255#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
256#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
257#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
258#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
259#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
260#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
261#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
262#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
263#define NV04_PGRAPH_MISC24_1 0x00400570
264#define NV04_PGRAPH_MISC24_2 0x00400574
265#define NV04_PGRAPH_VALID2 0x00400578
266#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
267#define NV04_PGRAPH_PASSTHRU_1 0x00400580
268#define NV04_PGRAPH_PASSTHRU_2 0x00400584
269#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
270#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
271#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
272#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
273#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
274#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
275#define NV04_PGRAPH_FORMAT_0 0x004005A8
276#define NV04_PGRAPH_FORMAT_1 0x004005AC
277#define NV04_PGRAPH_FILTER_0 0x004005B0
278#define NV04_PGRAPH_FILTER_1 0x004005B4
279#define NV03_PGRAPH_MONO_COLOR0 0x00400600
280#define NV04_PGRAPH_ROP3 0x00400604
281#define NV04_PGRAPH_BETA_AND 0x00400608
282#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
283#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
284#define NV04_PGRAPH_FORMATS 0x00400618
285#define NV10_PGRAPH_DEBUG_2 0x00400620
286#define NV04_PGRAPH_BOFFSET0 0x00400640
287#define NV04_PGRAPH_BOFFSET1 0x00400644
288#define NV04_PGRAPH_BOFFSET2 0x00400648
289#define NV04_PGRAPH_BOFFSET3 0x0040064C
290#define NV04_PGRAPH_BOFFSET4 0x00400650
291#define NV04_PGRAPH_BOFFSET5 0x00400654
292#define NV04_PGRAPH_BBASE0 0x00400658
293#define NV04_PGRAPH_BBASE1 0x0040065C
294#define NV04_PGRAPH_BBASE2 0x00400660
295#define NV04_PGRAPH_BBASE3 0x00400664
296#define NV04_PGRAPH_BBASE4 0x00400668
297#define NV04_PGRAPH_BBASE5 0x0040066C
298#define NV04_PGRAPH_BPITCH0 0x00400670
299#define NV04_PGRAPH_BPITCH1 0x00400674
300#define NV04_PGRAPH_BPITCH2 0x00400678
301#define NV04_PGRAPH_BPITCH3 0x0040067C
302#define NV04_PGRAPH_BPITCH4 0x00400680
303#define NV04_PGRAPH_BLIMIT0 0x00400684
304#define NV04_PGRAPH_BLIMIT1 0x00400688
305#define NV04_PGRAPH_BLIMIT2 0x0040068C
306#define NV04_PGRAPH_BLIMIT3 0x00400690
307#define NV04_PGRAPH_BLIMIT4 0x00400694
308#define NV04_PGRAPH_BLIMIT5 0x00400698
309#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
310#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
311#define NV03_PGRAPH_STATUS 0x004006B0
312#define NV04_PGRAPH_STATUS 0x00400700
313#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
314#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
315#define NV04_PGRAPH_SURFACE 0x0040070C
316#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
317#define NV04_PGRAPH_STATE 0x00400710
318#define NV10_PGRAPH_SURFACE 0x00400710
319#define NV04_PGRAPH_NOTIFY 0x00400714
320#define NV10_PGRAPH_STATE 0x00400714
321#define NV10_PGRAPH_NOTIFY 0x00400718
322
323#define NV04_PGRAPH_FIFO 0x00400720
324
325#define NV04_PGRAPH_BPIXEL 0x00400724
326#define NV10_PGRAPH_RDI_INDEX 0x00400750
327#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
328#define NV10_PGRAPH_RDI_DATA 0x00400754
329#define NV04_PGRAPH_DMA_PITCH 0x00400760
330#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
331#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
332#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
333#define NV10_PGRAPH_DMA_PITCH 0x00400770
334#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
335#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
336#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
337#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
338#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
339#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
340#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
341#define NV04_PGRAPH_PATT_COLOR0 0x00400800
342#define NV04_PGRAPH_PATT_COLOR1 0x00400804
343#define NV04_PGRAPH_PATTERN 0x00400808
344#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
345#define NV04_PGRAPH_CHROMA 0x00400814
346#define NV04_PGRAPH_CONTROL0 0x00400818
347#define NV04_PGRAPH_CONTROL1 0x0040081C
348#define NV04_PGRAPH_CONTROL2 0x00400820
349#define NV04_PGRAPH_BLEND 0x00400824
350#define NV04_PGRAPH_STORED_FMT 0x00400830
351#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
352#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
353#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
354#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
355#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
356#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
357#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
358#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
359#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
360#define NV04_PGRAPH_U_RAM 0x00400D00
361#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
362#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
363#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
364#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
365#define NV04_PGRAPH_V_RAM 0x00400D40
366#define NV04_PGRAPH_W_RAM 0x00400D80
367#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
368#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
369#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
370#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
371#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
372#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
373#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
374#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
375#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
376#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
377#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
378#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
379#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
380#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
381#define NV10_PGRAPH_XFMODE0 0x00400F40
382#define NV10_PGRAPH_XFMODE1 0x00400F44
383#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
384#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
385#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
386#define NV10_PGRAPH_PIPE_DATA 0x00400F54
387#define NV04_PGRAPH_DMA_START_0 0x00401000
388#define NV04_PGRAPH_DMA_START_1 0x00401004
389#define NV04_PGRAPH_DMA_LENGTH 0x00401008
390#define NV04_PGRAPH_DMA_MISC 0x0040100C
391#define NV04_PGRAPH_DMA_DATA_0 0x00401020
392#define NV04_PGRAPH_DMA_DATA_1 0x00401024
393#define NV04_PGRAPH_DMA_RM 0x00401030
394#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
395#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
396#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
397#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
398#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
399#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
400#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
401#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
402#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
403#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
404#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
405#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
406#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
407#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
408#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
409#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
410#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
411#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
412#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
413#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
414#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
415#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
416
417
418/* It's a guess that this works on NV03. Confirmed on NV04, though */
419#define NV04_PFIFO_DELAY_0 0x00002040
420#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
421#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
422#define NV03_PFIFO_INTR_0 0x00002100
423#define NV03_PFIFO_INTR_EN_0 0x00002140
424# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
425# define NV_PFIFO_INTR_RUNOUT (1<<4)
426# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
427# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
428# define NV_PFIFO_INTR_DMA_PT (1<<16)
429# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
430# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
431#define NV03_PFIFO_RAMHT 0x00002210
432#define NV03_PFIFO_RAMFC 0x00002214
433#define NV03_PFIFO_RAMRO 0x00002218
434#define NV40_PFIFO_RAMFC 0x00002220
435#define NV03_PFIFO_CACHES 0x00002500
436#define NV04_PFIFO_MODE 0x00002504
437#define NV04_PFIFO_DMA 0x00002508
438#define NV04_PFIFO_SIZE 0x0000250c
439#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
440#define NV50_PFIFO_CTX_TABLE__SIZE 128
441#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
442#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
443#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
444#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
445#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
446#define NV03_PFIFO_CACHE0_PULL0 0x00003040
447#define NV04_PFIFO_CACHE0_PULL0 0x00003050
448#define NV04_PFIFO_CACHE0_PULL1 0x00003054
449#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
450#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
451#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
452#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
453#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
454#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
455#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
456#define NV03_PFIFO_CACHE1_PUT 0x00003210
457#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
458#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
459# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
460# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
461# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
462# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
463# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
464# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
465# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
466# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
467# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
468# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
469# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
470# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
471# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
472# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
473# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
474# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
475# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
476# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
477# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
478# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
479# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
480# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
481# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
482# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
483# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
484# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
485# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
486# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
487# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
488# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
489# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
490# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
491# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
492# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
493# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
494# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
495# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
496# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
497# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
498# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
499# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
500# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
501# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
502# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
503# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
504# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
505# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
506# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
507# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
508# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
509# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
510# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
511# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
512# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
513# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
514# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
515# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
516# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
517# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
518# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
519# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
520#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
521#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
522#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
523#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
524#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
525#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
526#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
527#define NV03_PFIFO_CACHE1_PULL0 0x00003240
528#define NV04_PFIFO_CACHE1_PULL0 0x00003250
529#define NV03_PFIFO_CACHE1_PULL1 0x00003250
530#define NV04_PFIFO_CACHE1_PULL1 0x00003254
531#define NV04_PFIFO_CACHE1_HASH 0x00003258
532#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
533#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
534#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
535#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
536#define NV03_PFIFO_CACHE1_GET 0x00003270
537#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
538#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
539#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
540#define NV40_PFIFO_UNK32E4 0x000032E4
541#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
542#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
543#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
544#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
545
546#define NV_CRTC0_INTSTAT 0x00600100
547#define NV_CRTC0_INTEN 0x00600140
548#define NV_CRTC1_INTSTAT 0x00602100
549#define NV_CRTC1_INTEN 0x00602140
550# define NV_CRTC_INTR_VBLANK (1<<0)
551
552#define NV04_PRAMIN 0x00700000
553
554/* Fifo commands. These are not regs, neither masks */
555#define NV03_FIFO_CMD_JUMP 0x20000000
556#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
557#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
558
559/* This is a partial import from rules-ng, a few things may be duplicated.
560 * Eventually we should completely import everything from rules-ng.
561 * For the moment check rules-ng for docs.
562 */
563
564#define NV50_PMC 0x00000000
565#define NV50_PMC__LEN 0x1
566#define NV50_PMC__ESIZE 0x2000
567# define NV50_PMC_BOOT_0 0x00000000
568# define NV50_PMC_BOOT_0_REVISION 0x000000ff
569# define NV50_PMC_BOOT_0_REVISION__SHIFT 0
570# define NV50_PMC_BOOT_0_ARCH 0x0ff00000
571# define NV50_PMC_BOOT_0_ARCH__SHIFT 20
572# define NV50_PMC_INTR_0 0x00000100
573# define NV50_PMC_INTR_0_PFIFO (1<<8)
574# define NV50_PMC_INTR_0_PGRAPH (1<<12)
575# define NV50_PMC_INTR_0_PTIMER (1<<20)
576# define NV50_PMC_INTR_0_HOTPLUG (1<<21)
577# define NV50_PMC_INTR_0_DISPLAY (1<<26)
578# define NV50_PMC_INTR_EN_0 0x00000140
579# define NV50_PMC_INTR_EN_0_MASTER (1<<0)
580# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0)
581# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0)
582# define NV50_PMC_ENABLE 0x00000200
583# define NV50_PMC_ENABLE_PFIFO (1<<8)
584# define NV50_PMC_ENABLE_PGRAPH (1<<12)
585
586#define NV50_PCONNECTOR 0x0000e000
587#define NV50_PCONNECTOR__LEN 0x1
588#define NV50_PCONNECTOR__ESIZE 0x1000
589# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050
590# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0)
591# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1)
592# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2)
593# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3)
594# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16)
595# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17)
596# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18)
597# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19)
598# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054
599# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0)
600# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1)
601# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2)
602# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3)
603# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16)
604# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17)
605# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18)
606# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19)
607# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104
608# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
609# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
610# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
611# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
612# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138
613# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150
614# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168
615# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180
616# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
617# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
618
619#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
620#define NV50_AUXCH_DATA_OUT__SIZE 4
621#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
622#define NV50_AUXCH_DATA_IN__SIZE 4
623#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
624#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
625#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000
626#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000
627#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000
628#define NV50_AUXCH_CTRL_LINKEN 0x00100000
629#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000
630#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000
631#define NV50_AUXCH_CTRL_EXEC 0x00010000
632#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000
633#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000
634#define NV50_AUXCH_CTRL_CMD 0x0000f000
635#define NV50_AUXCH_CTRL_CMD_SHIFT 12
636#define NV50_AUXCH_CTRL_LEN 0x0000000f
637#define NV50_AUXCH_CTRL_LEN_SHIFT 0
638#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8)
639#define NV50_AUXCH_STAT_STATE 0x10000000
640#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000
641#define NV50_AUXCH_STAT_STATE_READY 0x10000000
642#define NV50_AUXCH_STAT_REPLY 0x000f0000
643#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000
644#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000
645#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000
646#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000
647#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000
648#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000
649#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000
650#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000
651#define NV50_AUXCH_STAT_COUNT 0x0000001f
652
653#define NV50_PBUS 0x00088000
654#define NV50_PBUS__LEN 0x1
655#define NV50_PBUS__ESIZE 0x1000
656# define NV50_PBUS_PCI_ID 0x00088000
657# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff
658# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0
659# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000
660# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16
661
662#define NV50_PFB 0x00100000
663#define NV50_PFB__LEN 0x1
664#define NV50_PFB__ESIZE 0x1000
665
666#define NV50_PEXTDEV 0x00101000
667#define NV50_PEXTDEV__LEN 0x1
668#define NV50_PEXTDEV__ESIZE 0x1000
669
670#define NV50_PROM 0x00300000
671#define NV50_PROM__LEN 0x1
672#define NV50_PROM__ESIZE 0x10000
673
674#define NV50_PGRAPH 0x00400000
675#define NV50_PGRAPH__LEN 0x1
676#define NV50_PGRAPH__ESIZE 0x10000
677
678#define NV50_PDISPLAY 0x00610000
679#define NV50_PDISPLAY_OBJECTS 0x00610010
680#define NV50_PDISPLAY_INTR_0 0x00610020
681#define NV50_PDISPLAY_INTR_1 0x00610024
682#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c
683#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2
684#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
685#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004
686#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008
687#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
688#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
689#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
690#define NV50_PDISPLAY_INTR_EN 0x0061002c
691#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
692#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
693#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
694#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
695#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
696#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
697#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
698#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
699#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
700#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
701#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
702#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
703#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
704#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
705#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
706#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
707#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
708#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
709#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
710#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
711#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
712#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
713#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
714#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
715
716#define NV50_PDISPLAY_CURSOR 0x00610270
717#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
718#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001
719#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
720#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
721
722#define NV50_PDISPLAY_CTRL_STATE 0x00610300
723#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
724#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
725#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
726#define NV50_PDISPLAY_CTRL_VAL 0x00610304
727#define NV50_PDISPLAY_UNK_380 0x00610380
728#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
729#define NV50_PDISPLAY_UNK_388 0x00610388
730#define NV50_PDISPLAY_UNK_38C 0x0061038c
731
732#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
733#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
734#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18
735#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24
736#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48
737#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50
738#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58
739#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78
740#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8
741#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8
742#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0
743#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0
744#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8
745#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0
746#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8
747#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00
748#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08
749#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10
750#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18
751#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20
752#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000
753#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28
754#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38
755#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40
756#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48
757#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50
758
759#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
760#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
761#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
762#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
763#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
764#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
765
766#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
767#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
768#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
769#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
770#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
771#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
772
773#define NV50_PDISPLAY_CRTC_CLK 0x00614000
774#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
775#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600
776#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104)
777#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108)
778#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200)
779
780#define NV50_PDISPLAY_DAC_CLK 0x00614000
781#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280)
782
783#define NV50_PDISPLAY_SOR_CLK 0x00614000
784#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300)
785
786#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400)
787
788#define NV50_PDISPLAY_DAC 0x0061a000
789#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800)
790#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001
791#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004
792#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010
793#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040
794#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000
795#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800)
796#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000
797#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000
798#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000
799#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800)
800#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600
801
802#define NV50_PDISPLAY_SOR 0x0061c000
803#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800)
804#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000
805#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001
806#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800)
807#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600
808#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800)
809#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
810#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
811#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
812#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
813#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
814#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
815#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
816#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
817#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
818#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
819#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000
820#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000
821#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000
822#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000
823#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
824#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
825#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
826#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
827#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
828#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
829
830#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
831#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
832#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004)
833
834#define NV50_PDISPLAY_CURSOR_USER 0x00647000
835#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080)
836#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 000000000000..4c7f1e403e80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4
5#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
7#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
8
9struct nouveau_sgdma_be {
10 struct ttm_backend backend;
11 struct drm_device *dev;
12
13 dma_addr_t *pages;
14 unsigned nr_pages;
15
16 unsigned pte_start;
17 bool bound;
18};
19
20static int
21nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
22 struct page **pages, struct page *dummy_read_page)
23{
24 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
25 struct drm_device *dev = nvbe->dev;
26
27 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
28
29 if (nvbe->pages)
30 return -EINVAL;
31
32 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
33 if (!nvbe->pages)
34 return -ENOMEM;
35
36 nvbe->nr_pages = 0;
37 while (num_pages--) {
38 nvbe->pages[nvbe->nr_pages] =
39 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
40 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
41 if (pci_dma_mapping_error(dev->pdev,
42 nvbe->pages[nvbe->nr_pages])) {
43 be->func->clear(be);
44 return -EFAULT;
45 }
46
47 nvbe->nr_pages++;
48 }
49
50 return 0;
51}
52
53static void
54nouveau_sgdma_clear(struct ttm_backend *be)
55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60
61 if (nvbe && nvbe->pages) {
62 if (nvbe->bound)
63 be->func->unbind(be);
64
65 while (nvbe->nr_pages--) {
66 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
67 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
68 }
69 kfree(nvbe->pages);
70 nvbe->pages = NULL;
71 nvbe->nr_pages = 0;
72 }
73}
74
75static inline unsigned
76nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
80
81 if (dev_priv->card_type < NV_50)
82 return pte + 2;
83
84 return pte << 1;
85}
86
87static int
88nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
89{
90 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
91 struct drm_device *dev = nvbe->dev;
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
94 unsigned i, j, pte;
95
96 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
97
98 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
99 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
100 nvbe->pte_start = pte;
101 for (i = 0; i < nvbe->nr_pages; i++) {
102 dma_addr_t dma_offset = nvbe->pages[i];
103 uint32_t offset_l = lower_32_bits(dma_offset);
104 uint32_t offset_h = upper_32_bits(dma_offset);
105
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
107 if (dev_priv->card_type < NV_50)
108 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
109 else {
110 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
111 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
112 }
113
114 dma_offset += NV_CTXDMA_PAGE_SIZE;
115 }
116 }
117 dev_priv->engine.instmem.finish_access(nvbe->dev);
118
119 if (dev_priv->card_type == NV_50) {
120 nv_wr32(dev, 0x100c80, 0x00050001);
121 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
122 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
123 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
124 nv_rd32(dev, 0x100c80));
125 return -EBUSY;
126 }
127
128 nv_wr32(dev, 0x100c80, 0x00000001);
129 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
130 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
131 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
132 nv_rd32(dev, 0x100c80));
133 return -EBUSY;
134 }
135 }
136
137 nvbe->bound = true;
138 return 0;
139}
140
141static int
142nouveau_sgdma_unbind(struct ttm_backend *be)
143{
144 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
145 struct drm_device *dev = nvbe->dev;
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
148 unsigned i, j, pte;
149
150 NV_DEBUG(dev, "\n");
151
152 if (!nvbe->bound)
153 return 0;
154
155 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
156 pte = nvbe->pte_start;
157 for (i = 0; i < nvbe->nr_pages; i++) {
158 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
159
160 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
161 if (dev_priv->card_type < NV_50)
162 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
163 else {
164 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
165 nv_wo32(dev, gpuobj, pte++, 0x00000000);
166 }
167
168 dma_offset += NV_CTXDMA_PAGE_SIZE;
169 }
170 }
171 dev_priv->engine.instmem.finish_access(nvbe->dev);
172
173 nvbe->bound = false;
174 return 0;
175}
176
177static void
178nouveau_sgdma_destroy(struct ttm_backend *be)
179{
180 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
181
182 if (be) {
183 NV_DEBUG(nvbe->dev, "\n");
184
185 if (nvbe) {
186 if (nvbe->pages)
187 be->func->clear(be);
188 kfree(nvbe);
189 }
190 }
191}
192
193static struct ttm_backend_func nouveau_sgdma_backend = {
194 .populate = nouveau_sgdma_populate,
195 .clear = nouveau_sgdma_clear,
196 .bind = nouveau_sgdma_bind,
197 .unbind = nouveau_sgdma_unbind,
198 .destroy = nouveau_sgdma_destroy
199};
200
201struct ttm_backend *
202nouveau_sgdma_init_ttm(struct drm_device *dev)
203{
204 struct drm_nouveau_private *dev_priv = dev->dev_private;
205 struct nouveau_sgdma_be *nvbe;
206
207 if (!dev_priv->gart_info.sg_ctxdma)
208 return NULL;
209
210 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
211 if (!nvbe)
212 return NULL;
213
214 nvbe->dev = dev;
215
216 nvbe->backend.func = &nouveau_sgdma_backend;
217
218 return &nvbe->backend;
219}
220
221int
222nouveau_sgdma_init(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_gpuobj *gpuobj = NULL;
226 uint32_t aper_size, obj_size;
227 int i, ret;
228
229 if (dev_priv->card_type < NV_50) {
230 aper_size = (64 * 1024 * 1024);
231 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
232 obj_size += 8; /* ctxdma header */
233 } else {
234 /* 1 entire VM page table */
235 aper_size = (512 * 1024 * 1024);
236 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
237 }
238
239 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
240 NVOBJ_FLAG_ALLOW_NO_REFS |
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
249 alloc_page(GFP_KERNEL|__GFP_DMA32);
250 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
251 dev_priv->gart_info.sg_dummy_bus =
252 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
253 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
254
255 dev_priv->engine.instmem.prepare_access(dev, true);
256 if (dev_priv->card_type < NV_50) {
257 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
258 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
259 * on those cards? */
260 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
261 (1 << 12) /* PT present */ |
262 (0 << 13) /* PT *not* linear */ |
263 (NV_DMA_ACCESS_RW << 14) |
264 (NV_DMA_TARGET_PCI << 16));
265 nv_wo32(dev, gpuobj, 1, aper_size - 1);
266 for (i = 2; i < 2 + (aper_size >> 12); i++) {
267 nv_wo32(dev, gpuobj, i,
268 dev_priv->gart_info.sg_dummy_bus | 3);
269 }
270 } else {
271 for (i = 0; i < obj_size; i += 8) {
272 nv_wo32(dev, gpuobj, (i+0)/4,
273 dev_priv->gart_info.sg_dummy_bus | 0x21);
274 nv_wo32(dev, gpuobj, (i+4)/4, 0);
275 }
276 }
277 dev_priv->engine.instmem.finish_access(dev);
278
279 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
280 dev_priv->gart_info.aper_base = 0;
281 dev_priv->gart_info.aper_size = aper_size;
282 dev_priv->gart_info.sg_ctxdma = gpuobj;
283 return 0;
284}
285
286void
287nouveau_sgdma_takedown(struct drm_device *dev)
288{
289 struct drm_nouveau_private *dev_priv = dev->dev_private;
290
291 if (dev_priv->gart_info.sg_dummy_page) {
292 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
293 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
294 unlock_page(dev_priv->gart_info.sg_dummy_page);
295 __free_page(dev_priv->gart_info.sg_dummy_page);
296 dev_priv->gart_info.sg_dummy_page = NULL;
297 dev_priv->gart_info.sg_dummy_bus = 0;
298 }
299
300 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
301}
302
303int
304nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
305{
306 struct drm_nouveau_private *dev_priv = dev->dev_private;
307 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
308 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
309 int pte;
310
311 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
312 if (dev_priv->card_type < NV_50) {
313 instmem->prepare_access(dev, false);
314 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
315 instmem->finish_access(dev);
316 return 0;
317 }
318
319 NV_ERROR(dev, "Unimplemented on NV50\n");
320 return -EINVAL;
321}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 000000000000..2ed41d339f6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include "drmP.h"
28#include "drm.h"
29#include "drm_sarea.h"
30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h>
32
33#include "nouveau_drv.h"
34#include "nouveau_drm.h"
35#include "nv50_display.h"
36
37static int nouveau_stub_init(struct drm_device *dev) { return 0; }
38static void nouveau_stub_takedown(struct drm_device *dev) {}
39
40static int nouveau_init_engine_ptrs(struct drm_device *dev)
41{
42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nouveau_engine *engine = &dev_priv->engine;
44
45 switch (dev_priv->chipset & 0xf0) {
46 case 0x00:
47 engine->instmem.init = nv04_instmem_init;
48 engine->instmem.takedown = nv04_instmem_takedown;
49 engine->instmem.suspend = nv04_instmem_suspend;
50 engine->instmem.resume = nv04_instmem_resume;
51 engine->instmem.populate = nv04_instmem_populate;
52 engine->instmem.clear = nv04_instmem_clear;
53 engine->instmem.bind = nv04_instmem_bind;
54 engine->instmem.unbind = nv04_instmem_unbind;
55 engine->instmem.prepare_access = nv04_instmem_prepare_access;
56 engine->instmem.finish_access = nv04_instmem_finish_access;
57 engine->mc.init = nv04_mc_init;
58 engine->mc.takedown = nv04_mc_takedown;
59 engine->timer.init = nv04_timer_init;
60 engine->timer.read = nv04_timer_read;
61 engine->timer.takedown = nv04_timer_takedown;
62 engine->fb.init = nv04_fb_init;
63 engine->fb.takedown = nv04_fb_takedown;
64 engine->graph.grclass = nv04_graph_grclass;
65 engine->graph.init = nv04_graph_init;
66 engine->graph.takedown = nv04_graph_takedown;
67 engine->graph.fifo_access = nv04_graph_fifo_access;
68 engine->graph.channel = nv04_graph_channel;
69 engine->graph.create_context = nv04_graph_create_context;
70 engine->graph.destroy_context = nv04_graph_destroy_context;
71 engine->graph.load_context = nv04_graph_load_context;
72 engine->graph.unload_context = nv04_graph_unload_context;
73 engine->fifo.channels = 16;
74 engine->fifo.init = nv04_fifo_init;
75 engine->fifo.takedown = nouveau_stub_takedown;
76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.channel_id = nv04_fifo_channel_id;
80 engine->fifo.create_context = nv04_fifo_create_context;
81 engine->fifo.destroy_context = nv04_fifo_destroy_context;
82 engine->fifo.load_context = nv04_fifo_load_context;
83 engine->fifo.unload_context = nv04_fifo_unload_context;
84 break;
85 case 0x10:
86 engine->instmem.init = nv04_instmem_init;
87 engine->instmem.takedown = nv04_instmem_takedown;
88 engine->instmem.suspend = nv04_instmem_suspend;
89 engine->instmem.resume = nv04_instmem_resume;
90 engine->instmem.populate = nv04_instmem_populate;
91 engine->instmem.clear = nv04_instmem_clear;
92 engine->instmem.bind = nv04_instmem_bind;
93 engine->instmem.unbind = nv04_instmem_unbind;
94 engine->instmem.prepare_access = nv04_instmem_prepare_access;
95 engine->instmem.finish_access = nv04_instmem_finish_access;
96 engine->mc.init = nv04_mc_init;
97 engine->mc.takedown = nv04_mc_takedown;
98 engine->timer.init = nv04_timer_init;
99 engine->timer.read = nv04_timer_read;
100 engine->timer.takedown = nv04_timer_takedown;
101 engine->fb.init = nv10_fb_init;
102 engine->fb.takedown = nv10_fb_takedown;
103 engine->graph.grclass = nv10_graph_grclass;
104 engine->graph.init = nv10_graph_init;
105 engine->graph.takedown = nv10_graph_takedown;
106 engine->graph.channel = nv10_graph_channel;
107 engine->graph.create_context = nv10_graph_create_context;
108 engine->graph.destroy_context = nv10_graph_destroy_context;
109 engine->graph.fifo_access = nv04_graph_fifo_access;
110 engine->graph.load_context = nv10_graph_load_context;
111 engine->graph.unload_context = nv10_graph_unload_context;
112 engine->fifo.channels = 32;
113 engine->fifo.init = nv10_fifo_init;
114 engine->fifo.takedown = nouveau_stub_takedown;
115 engine->fifo.disable = nv04_fifo_disable;
116 engine->fifo.enable = nv04_fifo_enable;
117 engine->fifo.reassign = nv04_fifo_reassign;
118 engine->fifo.channel_id = nv10_fifo_channel_id;
119 engine->fifo.create_context = nv10_fifo_create_context;
120 engine->fifo.destroy_context = nv10_fifo_destroy_context;
121 engine->fifo.load_context = nv10_fifo_load_context;
122 engine->fifo.unload_context = nv10_fifo_unload_context;
123 break;
124 case 0x20:
125 engine->instmem.init = nv04_instmem_init;
126 engine->instmem.takedown = nv04_instmem_takedown;
127 engine->instmem.suspend = nv04_instmem_suspend;
128 engine->instmem.resume = nv04_instmem_resume;
129 engine->instmem.populate = nv04_instmem_populate;
130 engine->instmem.clear = nv04_instmem_clear;
131 engine->instmem.bind = nv04_instmem_bind;
132 engine->instmem.unbind = nv04_instmem_unbind;
133 engine->instmem.prepare_access = nv04_instmem_prepare_access;
134 engine->instmem.finish_access = nv04_instmem_finish_access;
135 engine->mc.init = nv04_mc_init;
136 engine->mc.takedown = nv04_mc_takedown;
137 engine->timer.init = nv04_timer_init;
138 engine->timer.read = nv04_timer_read;
139 engine->timer.takedown = nv04_timer_takedown;
140 engine->fb.init = nv10_fb_init;
141 engine->fb.takedown = nv10_fb_takedown;
142 engine->graph.grclass = nv20_graph_grclass;
143 engine->graph.init = nv20_graph_init;
144 engine->graph.takedown = nv20_graph_takedown;
145 engine->graph.channel = nv10_graph_channel;
146 engine->graph.create_context = nv20_graph_create_context;
147 engine->graph.destroy_context = nv20_graph_destroy_context;
148 engine->graph.fifo_access = nv04_graph_fifo_access;
149 engine->graph.load_context = nv20_graph_load_context;
150 engine->graph.unload_context = nv20_graph_unload_context;
151 engine->fifo.channels = 32;
152 engine->fifo.init = nv10_fifo_init;
153 engine->fifo.takedown = nouveau_stub_takedown;
154 engine->fifo.disable = nv04_fifo_disable;
155 engine->fifo.enable = nv04_fifo_enable;
156 engine->fifo.reassign = nv04_fifo_reassign;
157 engine->fifo.channel_id = nv10_fifo_channel_id;
158 engine->fifo.create_context = nv10_fifo_create_context;
159 engine->fifo.destroy_context = nv10_fifo_destroy_context;
160 engine->fifo.load_context = nv10_fifo_load_context;
161 engine->fifo.unload_context = nv10_fifo_unload_context;
162 break;
163 case 0x30:
164 engine->instmem.init = nv04_instmem_init;
165 engine->instmem.takedown = nv04_instmem_takedown;
166 engine->instmem.suspend = nv04_instmem_suspend;
167 engine->instmem.resume = nv04_instmem_resume;
168 engine->instmem.populate = nv04_instmem_populate;
169 engine->instmem.clear = nv04_instmem_clear;
170 engine->instmem.bind = nv04_instmem_bind;
171 engine->instmem.unbind = nv04_instmem_unbind;
172 engine->instmem.prepare_access = nv04_instmem_prepare_access;
173 engine->instmem.finish_access = nv04_instmem_finish_access;
174 engine->mc.init = nv04_mc_init;
175 engine->mc.takedown = nv04_mc_takedown;
176 engine->timer.init = nv04_timer_init;
177 engine->timer.read = nv04_timer_read;
178 engine->timer.takedown = nv04_timer_takedown;
179 engine->fb.init = nv10_fb_init;
180 engine->fb.takedown = nv10_fb_takedown;
181 engine->graph.grclass = nv30_graph_grclass;
182 engine->graph.init = nv30_graph_init;
183 engine->graph.takedown = nv20_graph_takedown;
184 engine->graph.fifo_access = nv04_graph_fifo_access;
185 engine->graph.channel = nv10_graph_channel;
186 engine->graph.create_context = nv20_graph_create_context;
187 engine->graph.destroy_context = nv20_graph_destroy_context;
188 engine->graph.load_context = nv20_graph_load_context;
189 engine->graph.unload_context = nv20_graph_unload_context;
190 engine->fifo.channels = 32;
191 engine->fifo.init = nv10_fifo_init;
192 engine->fifo.takedown = nouveau_stub_takedown;
193 engine->fifo.disable = nv04_fifo_disable;
194 engine->fifo.enable = nv04_fifo_enable;
195 engine->fifo.reassign = nv04_fifo_reassign;
196 engine->fifo.channel_id = nv10_fifo_channel_id;
197 engine->fifo.create_context = nv10_fifo_create_context;
198 engine->fifo.destroy_context = nv10_fifo_destroy_context;
199 engine->fifo.load_context = nv10_fifo_load_context;
200 engine->fifo.unload_context = nv10_fifo_unload_context;
201 break;
202 case 0x40:
203 case 0x60:
204 engine->instmem.init = nv04_instmem_init;
205 engine->instmem.takedown = nv04_instmem_takedown;
206 engine->instmem.suspend = nv04_instmem_suspend;
207 engine->instmem.resume = nv04_instmem_resume;
208 engine->instmem.populate = nv04_instmem_populate;
209 engine->instmem.clear = nv04_instmem_clear;
210 engine->instmem.bind = nv04_instmem_bind;
211 engine->instmem.unbind = nv04_instmem_unbind;
212 engine->instmem.prepare_access = nv04_instmem_prepare_access;
213 engine->instmem.finish_access = nv04_instmem_finish_access;
214 engine->mc.init = nv40_mc_init;
215 engine->mc.takedown = nv40_mc_takedown;
216 engine->timer.init = nv04_timer_init;
217 engine->timer.read = nv04_timer_read;
218 engine->timer.takedown = nv04_timer_takedown;
219 engine->fb.init = nv40_fb_init;
220 engine->fb.takedown = nv40_fb_takedown;
221 engine->graph.grclass = nv40_graph_grclass;
222 engine->graph.init = nv40_graph_init;
223 engine->graph.takedown = nv40_graph_takedown;
224 engine->graph.fifo_access = nv04_graph_fifo_access;
225 engine->graph.channel = nv40_graph_channel;
226 engine->graph.create_context = nv40_graph_create_context;
227 engine->graph.destroy_context = nv40_graph_destroy_context;
228 engine->graph.load_context = nv40_graph_load_context;
229 engine->graph.unload_context = nv40_graph_unload_context;
230 engine->fifo.channels = 32;
231 engine->fifo.init = nv40_fifo_init;
232 engine->fifo.takedown = nouveau_stub_takedown;
233 engine->fifo.disable = nv04_fifo_disable;
234 engine->fifo.enable = nv04_fifo_enable;
235 engine->fifo.reassign = nv04_fifo_reassign;
236 engine->fifo.channel_id = nv10_fifo_channel_id;
237 engine->fifo.create_context = nv40_fifo_create_context;
238 engine->fifo.destroy_context = nv40_fifo_destroy_context;
239 engine->fifo.load_context = nv40_fifo_load_context;
240 engine->fifo.unload_context = nv40_fifo_unload_context;
241 break;
242 case 0x50:
243 case 0x80: /* gotta love NVIDIA's consistency.. */
244 case 0x90:
245 case 0xA0:
246 engine->instmem.init = nv50_instmem_init;
247 engine->instmem.takedown = nv50_instmem_takedown;
248 engine->instmem.suspend = nv50_instmem_suspend;
249 engine->instmem.resume = nv50_instmem_resume;
250 engine->instmem.populate = nv50_instmem_populate;
251 engine->instmem.clear = nv50_instmem_clear;
252 engine->instmem.bind = nv50_instmem_bind;
253 engine->instmem.unbind = nv50_instmem_unbind;
254 engine->instmem.prepare_access = nv50_instmem_prepare_access;
255 engine->instmem.finish_access = nv50_instmem_finish_access;
256 engine->mc.init = nv50_mc_init;
257 engine->mc.takedown = nv50_mc_takedown;
258 engine->timer.init = nv04_timer_init;
259 engine->timer.read = nv04_timer_read;
260 engine->timer.takedown = nv04_timer_takedown;
261 engine->fb.init = nouveau_stub_init;
262 engine->fb.takedown = nouveau_stub_takedown;
263 engine->graph.grclass = nv50_graph_grclass;
264 engine->graph.init = nv50_graph_init;
265 engine->graph.takedown = nv50_graph_takedown;
266 engine->graph.fifo_access = nv50_graph_fifo_access;
267 engine->graph.channel = nv50_graph_channel;
268 engine->graph.create_context = nv50_graph_create_context;
269 engine->graph.destroy_context = nv50_graph_destroy_context;
270 engine->graph.load_context = nv50_graph_load_context;
271 engine->graph.unload_context = nv50_graph_unload_context;
272 engine->fifo.channels = 128;
273 engine->fifo.init = nv50_fifo_init;
274 engine->fifo.takedown = nv50_fifo_takedown;
275 engine->fifo.disable = nv04_fifo_disable;
276 engine->fifo.enable = nv04_fifo_enable;
277 engine->fifo.reassign = nv04_fifo_reassign;
278 engine->fifo.channel_id = nv50_fifo_channel_id;
279 engine->fifo.create_context = nv50_fifo_create_context;
280 engine->fifo.destroy_context = nv50_fifo_destroy_context;
281 engine->fifo.load_context = nv50_fifo_load_context;
282 engine->fifo.unload_context = nv50_fifo_unload_context;
283 break;
284 default:
285 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
286 return 1;
287 }
288
289 return 0;
290}
291
292static unsigned int
293nouveau_vga_set_decode(void *priv, bool state)
294{
295 if (state)
296 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
297 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
298 else
299 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
300}
301
302int
303nouveau_card_init(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_engine *engine;
307 struct nouveau_gpuobj *gpuobj;
308 int ret;
309
310 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
311
312 if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
313 return 0;
314
315 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
316
317 /* Initialise internal driver API hooks */
318 ret = nouveau_init_engine_ptrs(dev);
319 if (ret)
320 return ret;
321 engine = &dev_priv->engine;
322 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
323
324 /* Parse BIOS tables / Run init tables if card not POSTed */
325 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
326 ret = nouveau_bios_init(dev);
327 if (ret)
328 return ret;
329 }
330
331 ret = nouveau_gpuobj_early_init(dev);
332 if (ret)
333 return ret;
334
335 /* Initialise instance memory, must happen before mem_init so we
336 * know exactly how much VRAM we're able to use for "normal"
337 * purposes.
338 */
339 ret = engine->instmem.init(dev);
340 if (ret)
341 return ret;
342
343 /* Setup the memory manager */
344 ret = nouveau_mem_init(dev);
345 if (ret)
346 return ret;
347
348 ret = nouveau_gpuobj_init(dev);
349 if (ret)
350 return ret;
351
352 /* PMC */
353 ret = engine->mc.init(dev);
354 if (ret)
355 return ret;
356
357 /* PTIMER */
358 ret = engine->timer.init(dev);
359 if (ret)
360 return ret;
361
362 /* PFB */
363 ret = engine->fb.init(dev);
364 if (ret)
365 return ret;
366
367 /* PGRAPH */
368 ret = engine->graph.init(dev);
369 if (ret)
370 return ret;
371
372 /* PFIFO */
373 ret = engine->fifo.init(dev);
374 if (ret)
375 return ret;
376
377 /* this call irq_preinstall, register irq handler and
378 * call irq_postinstall
379 */
380 ret = drm_irq_install(dev);
381 if (ret)
382 return ret;
383
384 ret = drm_vblank_init(dev, 0);
385 if (ret)
386 return ret;
387
388 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
389
390 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
391 (struct drm_file *)-2,
392 NvDmaFB, NvDmaTT);
393 if (ret)
394 return ret;
395
396 gpuobj = NULL;
397 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
398 0, nouveau_mem_fb_amount(dev),
399 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
400 &gpuobj);
401 if (ret)
402 return ret;
403
404 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
405 gpuobj, NULL);
406 if (ret) {
407 nouveau_gpuobj_del(dev, &gpuobj);
408 return ret;
409 }
410
411 gpuobj = NULL;
412 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
413 dev_priv->gart_info.aper_size,
414 NV_DMA_ACCESS_RW, &gpuobj, NULL);
415 if (ret)
416 return ret;
417
418 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
419 gpuobj, NULL);
420 if (ret) {
421 nouveau_gpuobj_del(dev, &gpuobj);
422 return ret;
423 }
424
425 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
426 if (dev_priv->card_type >= NV_50) {
427 ret = nv50_display_create(dev);
428 if (ret)
429 return ret;
430 } else {
431 ret = nv04_display_create(dev);
432 if (ret)
433 return ret;
434 }
435 }
436
437 ret = nouveau_backlight_init(dev);
438 if (ret)
439 NV_ERROR(dev, "Error %d registering backlight\n", ret);
440
441 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
442
443 if (drm_core_check_feature(dev, DRIVER_MODESET))
444 drm_helper_initial_config(dev);
445
446 return 0;
447}
448
449static void nouveau_card_takedown(struct drm_device *dev)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_engine *engine = &dev_priv->engine;
453
454 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
455
456 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
457 nouveau_backlight_exit(dev);
458
459 if (dev_priv->channel) {
460 nouveau_channel_free(dev_priv->channel);
461 dev_priv->channel = NULL;
462 }
463
464 engine->fifo.takedown(dev);
465 engine->graph.takedown(dev);
466 engine->fb.takedown(dev);
467 engine->timer.takedown(dev);
468 engine->mc.takedown(dev);
469
470 mutex_lock(&dev->struct_mutex);
471 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
472 mutex_unlock(&dev->struct_mutex);
473 nouveau_sgdma_takedown(dev);
474
475 nouveau_gpuobj_takedown(dev);
476 nouveau_mem_close(dev);
477 engine->instmem.takedown(dev);
478
479 if (drm_core_check_feature(dev, DRIVER_MODESET))
480 drm_irq_uninstall(dev);
481
482 nouveau_gpuobj_late_takedown(dev);
483 nouveau_bios_takedown(dev);
484
485 vga_client_register(dev->pdev, NULL, NULL, NULL);
486
487 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
488 }
489}
490
491/* here a client dies, release the stuff that was allocated for its
492 * file_priv */
493void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
494{
495 nouveau_channel_cleanup(dev, file_priv);
496}
497
498/* first module load, setup the mmio/fb mapping */
499/* KMS: we need mmio at load time, not when the first drm client opens. */
500int nouveau_firstopen(struct drm_device *dev)
501{
502 return 0;
503}
504
505/* if we have an OF card, copy vbios to RAMIN */
506static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
507{
508#if defined(__powerpc__)
509 int size, i;
510 const uint32_t *bios;
511 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
512 if (!dn) {
513 NV_INFO(dev, "Unable to get the OF node\n");
514 return;
515 }
516
517 bios = of_get_property(dn, "NVDA,BMP", &size);
518 if (bios) {
519 for (i = 0; i < size; i += 4)
520 nv_wi32(dev, i, bios[i/4]);
521 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
522 } else {
523 NV_INFO(dev, "Unable to get the OF bios\n");
524 }
525#endif
526}
527
528int nouveau_load(struct drm_device *dev, unsigned long flags)
529{
530 struct drm_nouveau_private *dev_priv;
531 uint32_t reg0;
532 resource_size_t mmio_start_offs;
533
534 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
535 if (!dev_priv)
536 return -ENOMEM;
537 dev->dev_private = dev_priv;
538 dev_priv->dev = dev;
539
540 dev_priv->flags = flags & NOUVEAU_FLAGS;
541 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
542
543 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
544 dev->pci_vendor, dev->pci_device, dev->pdev->class);
545
546 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
547
548 if (dev_priv->acpi_dsm)
549 nouveau_hybrid_setup(dev);
550
551 dev_priv->wq = create_workqueue("nouveau");
552 if (!dev_priv->wq)
553 return -EINVAL;
554
555 /* resource 0 is mmio regs */
556 /* resource 1 is linear FB */
557 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
558 /* resource 6 is bios */
559
560 /* map the mmio regs */
561 mmio_start_offs = pci_resource_start(dev->pdev, 0);
562 dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
563 if (!dev_priv->mmio) {
564 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
565 "Please report your setup to " DRIVER_EMAIL "\n");
566 return -EINVAL;
567 }
568 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
569 (unsigned long long)mmio_start_offs);
570
571#ifdef __BIG_ENDIAN
572 /* Put the card in BE mode if it's not */
573 if (nv_rd32(dev, NV03_PMC_BOOT_1))
574 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
575
576 DRM_MEMORYBARRIER();
577#endif
578
579 /* Time to determine the card architecture */
580 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
581
582 /* We're dealing with >=NV10 */
583 if ((reg0 & 0x0f000000) > 0) {
584 /* Bit 27-20 contain the architecture in hex */
585 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
586 /* NV04 or NV05 */
587 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
588 dev_priv->chipset = 0x04;
589 } else
590 dev_priv->chipset = 0xff;
591
592 switch (dev_priv->chipset & 0xf0) {
593 case 0x00:
594 case 0x10:
595 case 0x20:
596 case 0x30:
597 dev_priv->card_type = dev_priv->chipset & 0xf0;
598 break;
599 case 0x40:
600 case 0x60:
601 dev_priv->card_type = NV_40;
602 break;
603 case 0x50:
604 case 0x80:
605 case 0x90:
606 case 0xa0:
607 dev_priv->card_type = NV_50;
608 break;
609 default:
610 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
611 return -EINVAL;
612 }
613
614 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
615 dev_priv->card_type, reg0);
616
617 /* map larger RAMIN aperture on NV40 cards */
618 dev_priv->ramin = NULL;
619 if (dev_priv->card_type >= NV_40) {
620 int ramin_bar = 2;
621 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
622 ramin_bar = 3;
623
624 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
625 dev_priv->ramin = ioremap(
626 pci_resource_start(dev->pdev, ramin_bar),
627 dev_priv->ramin_size);
628 if (!dev_priv->ramin) {
629 NV_ERROR(dev, "Failed to init RAMIN mapping, "
630 "limited instance memory available\n");
631 }
632 }
633
634 /* On older cards (or if the above failed), create a map covering
635 * the BAR0 PRAMIN aperture */
636 if (!dev_priv->ramin) {
637 dev_priv->ramin_size = 1 * 1024 * 1024;
638 dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
639 dev_priv->ramin_size);
640 if (!dev_priv->ramin) {
641 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
642 return -ENOMEM;
643 }
644 }
645
646 nouveau_OF_copy_vbios_to_ramin(dev);
647
648 /* Special flags */
649 if (dev->pci_device == 0x01a0)
650 dev_priv->flags |= NV_NFORCE;
651 else if (dev->pci_device == 0x01f0)
652 dev_priv->flags |= NV_NFORCE2;
653
654 /* For kernel modesetting, init card now and bring up fbcon */
655 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
656 int ret = nouveau_card_init(dev);
657 if (ret)
658 return ret;
659 }
660
661 return 0;
662}
663
664static void nouveau_close(struct drm_device *dev)
665{
666 struct drm_nouveau_private *dev_priv = dev->dev_private;
667
668 /* In the case of an error dev_priv may not be be allocated yet */
669 if (dev_priv && dev_priv->card_type)
670 nouveau_card_takedown(dev);
671}
672
673/* KMS: we need mmio at load time, not when the first drm client opens. */
674void nouveau_lastclose(struct drm_device *dev)
675{
676 if (drm_core_check_feature(dev, DRIVER_MODESET))
677 return;
678
679 nouveau_close(dev);
680}
681
682int nouveau_unload(struct drm_device *dev)
683{
684 struct drm_nouveau_private *dev_priv = dev->dev_private;
685
686 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
687 if (dev_priv->card_type >= NV_50)
688 nv50_display_destroy(dev);
689 else
690 nv04_display_destroy(dev);
691 nouveau_close(dev);
692 }
693
694 iounmap(dev_priv->mmio);
695 iounmap(dev_priv->ramin);
696
697 kfree(dev_priv);
698 dev->dev_private = NULL;
699 return 0;
700}
701
702int
703nouveau_ioctl_card_init(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 return nouveau_card_init(dev);
707}
708
709int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
710 struct drm_file *file_priv)
711{
712 struct drm_nouveau_private *dev_priv = dev->dev_private;
713 struct drm_nouveau_getparam *getparam = data;
714
715 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
716
717 switch (getparam->param) {
718 case NOUVEAU_GETPARAM_CHIPSET_ID:
719 getparam->value = dev_priv->chipset;
720 break;
721 case NOUVEAU_GETPARAM_PCI_VENDOR:
722 getparam->value = dev->pci_vendor;
723 break;
724 case NOUVEAU_GETPARAM_PCI_DEVICE:
725 getparam->value = dev->pci_device;
726 break;
727 case NOUVEAU_GETPARAM_BUS_TYPE:
728 if (drm_device_is_agp(dev))
729 getparam->value = NV_AGP;
730 else if (drm_device_is_pcie(dev))
731 getparam->value = NV_PCIE;
732 else
733 getparam->value = NV_PCI;
734 break;
735 case NOUVEAU_GETPARAM_FB_PHYSICAL:
736 getparam->value = dev_priv->fb_phys;
737 break;
738 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
739 getparam->value = dev_priv->gart_info.aper_base;
740 break;
741 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
742 if (dev->sg) {
743 getparam->value = (unsigned long)dev->sg->virtual;
744 } else {
745 NV_ERROR(dev, "Requested PCIGART address, "
746 "while no PCIGART was created\n");
747 return -EINVAL;
748 }
749 break;
750 case NOUVEAU_GETPARAM_FB_SIZE:
751 getparam->value = dev_priv->fb_available_size;
752 break;
753 case NOUVEAU_GETPARAM_AGP_SIZE:
754 getparam->value = dev_priv->gart_info.aper_size;
755 break;
756 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
757 getparam->value = dev_priv->vm_vram_base;
758 break;
759 default:
760 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
761 return -EINVAL;
762 }
763
764 return 0;
765}
766
767int
768nouveau_ioctl_setparam(struct drm_device *dev, void *data,
769 struct drm_file *file_priv)
770{
771 struct drm_nouveau_setparam *setparam = data;
772
773 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
774
775 switch (setparam->param) {
776 default:
777 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
778 return -EINVAL;
779 }
780
781 return 0;
782}
783
784/* Wait until (value(reg) & mask) == val, up until timeout has hit */
785bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
786 uint32_t reg, uint32_t mask, uint32_t val)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
790 uint64_t start = ptimer->read(dev);
791
792 do {
793 if ((nv_rd32(dev, reg) & mask) == val)
794 return true;
795 } while (ptimer->read(dev) - start < timeout);
796
797 return false;
798}
799
800/* Waits for PGRAPH to go completely idle */
801bool nouveau_wait_for_idle(struct drm_device *dev)
802{
803 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
804 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
805 nv_rd32(dev, NV04_PGRAPH_STATUS));
806 return false;
807 }
808
809 return true;
810}
811
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 000000000000..187eb84e4da5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3 * All Rights Reserved.
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28
29#include "nouveau_drv.h"
30
31static struct vm_operations_struct nouveau_ttm_vm_ops;
32static const struct vm_operations_struct *ttm_vm_ops;
33
34static int
35nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36{
37 struct ttm_buffer_object *bo = vma->vm_private_data;
38 int ret;
39
40 if (unlikely(bo == NULL))
41 return VM_FAULT_NOPAGE;
42
43 ret = ttm_vm_ops->fault(vma, vmf);
44 return ret;
45}
46
47int
48nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
49{
50 struct drm_file *file_priv = filp->private_data;
51 struct drm_nouveau_private *dev_priv =
52 file_priv->minor->dev->dev_private;
53 int ret;
54
55 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
56 return drm_mmap(filp, vma);
57
58 ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
59 if (unlikely(ret != 0))
60 return ret;
61
62 if (unlikely(ttm_vm_ops == NULL)) {
63 ttm_vm_ops = vma->vm_ops;
64 nouveau_ttm_vm_ops = *ttm_vm_ops;
65 nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
66 }
67
68 vma->vm_ops = &nouveau_ttm_vm_ops;
69 return 0;
70}
71
72static int
73nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
74{
75 return ttm_mem_global_init(ref->object);
76}
77
78static void
79nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
80{
81 ttm_mem_global_release(ref->object);
82}
83
84int
85nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
86{
87 struct ttm_global_reference *global_ref;
88 int ret;
89
90 global_ref = &dev_priv->ttm.mem_global_ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
92 global_ref->size = sizeof(struct ttm_mem_global);
93 global_ref->init = &nouveau_ttm_mem_global_init;
94 global_ref->release = &nouveau_ttm_mem_global_release;
95
96 ret = ttm_global_item_ref(global_ref);
97 if (unlikely(ret != 0)) {
98 DRM_ERROR("Failed setting up TTM memory accounting\n");
99 dev_priv->ttm.mem_global_ref.release = NULL;
100 return ret;
101 }
102
103 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
104 global_ref = &dev_priv->ttm.bo_global_ref.ref;
105 global_ref->global_type = TTM_GLOBAL_TTM_BO;
106 global_ref->size = sizeof(struct ttm_bo_global);
107 global_ref->init = &ttm_bo_global_init;
108 global_ref->release = &ttm_bo_global_release;
109
110 ret = ttm_global_item_ref(global_ref);
111 if (unlikely(ret != 0)) {
112 DRM_ERROR("Failed setting up TTM BO subsystem\n");
113 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
114 dev_priv->ttm.mem_global_ref.release = NULL;
115 return ret;
116 }
117
118 return 0;
119}
120
121void
122nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
123{
124 if (dev_priv->ttm.mem_global_ref.release == NULL)
125 return;
126
127 ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
128 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
129 dev_priv->ttm.mem_global_ref.release = NULL;
130}
131
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 000000000000..b91363606055
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1002 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_fb.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37static int
38nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
39 struct drm_framebuffer *old_fb);
40
41static void
42crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
43{
44 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
45 crtcstate->CRTC[index]);
46}
47
48static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
49{
50 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
51 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
52 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
53
54 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
55 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
56 regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
57 regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
58 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
59 }
60 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
61}
62
63static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
64{
65 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
66 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
67 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
68
69 nv_crtc->sharpness = level;
70 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
71 level += 0x40;
72 regp->ramdac_634 = level;
73 NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
74}
75
76#define PLLSEL_VPLL1_MASK \
77 (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
78 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
79#define PLLSEL_VPLL2_MASK \
80 (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
81 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
82#define PLLSEL_TV_MASK \
83 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
84 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
85 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
86 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
87
88/* NV4x 0x40.. pll notes:
89 * gpu pll: 0x4000 + 0x4004
90 * ?gpu? pll: 0x4008 + 0x400c
91 * vpll1: 0x4010 + 0x4014
92 * vpll2: 0x4018 + 0x401c
93 * mpll: 0x4020 + 0x4024
94 * mpll: 0x4038 + 0x403c
95 *
96 * the first register of each pair has some unknown details:
97 * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
98 * bits 20-23: (mpll) something to do with post divider?
99 * bits 28-31: related to single stage mode? (bit 8/12)
100 */
101
102static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
103{
104 struct drm_device *dev = crtc->dev;
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
107 struct nv04_mode_state *state = &dev_priv->mode_reg;
108 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
109 struct nouveau_pll_vals *pv = &regp->pllvals;
110 struct pll_lims pll_lim;
111
112 if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
113 return;
114
115 /* NM2 == 0 is used to determine single stage mode on two stage plls */
116 pv->NM2 = 0;
117
118 /* for newer nv4x the blob uses only the first stage of the vpll below a
119 * certain clock. for a certain nv4b this is 150MHz. since the max
120 * output frequency of the first stage for this card is 300MHz, it is
121 * assumed the threshold is given by vco1 maxfreq/2
122 */
123 /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
124 * not 8, others unknown), the blob always uses both plls. no problem
125 * has yet been observed in allowing the use a single stage pll on all
126 * nv43 however. the behaviour of single stage use is untested on nv40
127 */
128 if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
129 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
130
131 if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
132 return;
133
134 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
135
136 /* The blob uses this always, so let's do the same */
137 if (dev_priv->card_type == NV_40)
138 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
139 /* again nv40 and some nv43 act more like nv3x as described above */
140 if (dev_priv->chipset < 0x41)
141 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
142 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
143 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
144
145 if (pv->NM2)
146 NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
147 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
148 else
149 NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
150 pv->N1, pv->M1, pv->log2P);
151
152 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
153}
154
155static void
156nv_crtc_dpms(struct drm_crtc *crtc, int mode)
157{
158 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
159 struct drm_device *dev = crtc->dev;
160 unsigned char seq1 = 0, crtc17 = 0;
161 unsigned char crtc1A;
162
163 NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
164 nv_crtc->index);
165
166 if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
167 return;
168
169 nv_crtc->last_dpms = mode;
170
171 if (nv_two_heads(dev))
172 NVSetOwner(dev, nv_crtc->index);
173
174 /* nv4ref indicates these two RPC1 bits inhibit h/v sync */
175 crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
176 NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
177 switch (mode) {
178 case DRM_MODE_DPMS_STANDBY:
179 /* Screen: Off; HSync: Off, VSync: On -- Not Supported */
180 seq1 = 0x20;
181 crtc17 = 0x80;
182 crtc1A |= 0x80;
183 break;
184 case DRM_MODE_DPMS_SUSPEND:
185 /* Screen: Off; HSync: On, VSync: Off -- Not Supported */
186 seq1 = 0x20;
187 crtc17 = 0x80;
188 crtc1A |= 0x40;
189 break;
190 case DRM_MODE_DPMS_OFF:
191 /* Screen: Off; HSync: Off, VSync: Off */
192 seq1 = 0x20;
193 crtc17 = 0x00;
194 crtc1A |= 0xC0;
195 break;
196 case DRM_MODE_DPMS_ON:
197 default:
198 /* Screen: On; HSync: On, VSync: On */
199 seq1 = 0x00;
200 crtc17 = 0x80;
201 break;
202 }
203
204 NVVgaSeqReset(dev, nv_crtc->index, true);
205 /* Each head has it's own sequencer, so we can turn it off when we want */
206 seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
207 NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
208 crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
209 mdelay(10);
210 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
211 NVVgaSeqReset(dev, nv_crtc->index, false);
212
213 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
214}
215
216static bool
217nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode)
219{
220 return true;
221}
222
223static void
224nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
225{
226 struct drm_device *dev = crtc->dev;
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
229 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
230 struct drm_framebuffer *fb = crtc->fb;
231
232 /* Calculate our timings */
233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
234 int horizStart = (mode->crtc_hsync_start >> 3) - 1;
235 int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
236 int horizTotal = (mode->crtc_htotal >> 3) - 5;
237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
239 int vertDisplay = mode->crtc_vdisplay - 1;
240 int vertStart = mode->crtc_vsync_start - 1;
241 int vertEnd = mode->crtc_vsync_end - 1;
242 int vertTotal = mode->crtc_vtotal - 2;
243 int vertBlankStart = mode->crtc_vdisplay - 1;
244 int vertBlankEnd = mode->crtc_vtotal - 1;
245
246 struct drm_encoder *encoder;
247 bool fp_output = false;
248
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
251
252 if (encoder->crtc == crtc &&
253 (nv_encoder->dcb->type == OUTPUT_LVDS ||
254 nv_encoder->dcb->type == OUTPUT_TMDS))
255 fp_output = true;
256 }
257
258 if (fp_output) {
259 vertStart = vertTotal - 3;
260 vertEnd = vertTotal - 2;
261 vertBlankStart = vertStart;
262 horizStart = horizTotal - 5;
263 horizEnd = horizTotal - 2;
264 horizBlankEnd = horizTotal + 4;
265#if 0
266 if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
267 /* This reportedly works around some video overlay bandwidth problems */
268 horizTotal += 2;
269#endif
270 }
271
272 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
273 vertTotal |= 1;
274
275#if 0
276 ErrorF("horizDisplay: 0x%X \n", horizDisplay);
277 ErrorF("horizStart: 0x%X \n", horizStart);
278 ErrorF("horizEnd: 0x%X \n", horizEnd);
279 ErrorF("horizTotal: 0x%X \n", horizTotal);
280 ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
281 ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
282 ErrorF("vertDisplay: 0x%X \n", vertDisplay);
283 ErrorF("vertStart: 0x%X \n", vertStart);
284 ErrorF("vertEnd: 0x%X \n", vertEnd);
285 ErrorF("vertTotal: 0x%X \n", vertTotal);
286 ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
287 ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
288#endif
289
290 /*
291 * compute correct Hsync & Vsync polarity
292 */
293 if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
294 && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
295
296 regp->MiscOutReg = 0x23;
297 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
298 regp->MiscOutReg |= 0x40;
299 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
300 regp->MiscOutReg |= 0x80;
301 } else {
302 int vdisplay = mode->vdisplay;
303 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
304 vdisplay *= 2;
305 if (mode->vscan > 1)
306 vdisplay *= mode->vscan;
307 if (vdisplay < 400)
308 regp->MiscOutReg = 0xA3; /* +hsync -vsync */
309 else if (vdisplay < 480)
310 regp->MiscOutReg = 0x63; /* -hsync +vsync */
311 else if (vdisplay < 768)
312 regp->MiscOutReg = 0xE3; /* -hsync -vsync */
313 else
314 regp->MiscOutReg = 0x23; /* +hsync +vsync */
315 }
316
317 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
318
319 /*
320 * Time Sequencer
321 */
322 regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
323 /* 0x20 disables the sequencer */
324 if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
325 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
326 else
327 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
328 regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
329 regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
330 regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
331
332 /*
333 * CRTC
334 */
335 regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
336 regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
337 regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
338 regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
339 XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
340 regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
341 regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
342 XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
343 regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
344 regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
345 XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
346 XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
347 (1 << 4) |
348 XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
349 XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
350 XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
351 XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
352 regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
353 regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
354 1 << 6 |
355 XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
356 regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
357 regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
358 regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
359 regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
360 regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
361 regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
362 regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
363 regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
364 regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
365 /* framebuffer can be larger than crtc scanout area. */
366 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
367 regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
368 regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
369 regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
370 regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
371 regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
372
373 /*
374 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
375 */
376
377 /* framebuffer can be larger than crtc scanout area. */
378 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
379 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
380 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
381 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
382 XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
383 XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
384 XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
385 XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
386 regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
387 XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
388 XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
389 XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
390 regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
391 XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
392 XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
393 XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
394
395 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
396 horizTotal = (horizTotal >> 1) & ~1;
397 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
398 regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
399 } else
400 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
401
402 /*
403 * Graphics Display Controller
404 */
405 regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
406 regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
407 regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
408 regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
409 regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
410 regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
411 regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
412 regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
413 regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
414
415 regp->Attribute[0] = 0x00; /* standard colormap translation */
416 regp->Attribute[1] = 0x01;
417 regp->Attribute[2] = 0x02;
418 regp->Attribute[3] = 0x03;
419 regp->Attribute[4] = 0x04;
420 regp->Attribute[5] = 0x05;
421 regp->Attribute[6] = 0x06;
422 regp->Attribute[7] = 0x07;
423 regp->Attribute[8] = 0x08;
424 regp->Attribute[9] = 0x09;
425 regp->Attribute[10] = 0x0A;
426 regp->Attribute[11] = 0x0B;
427 regp->Attribute[12] = 0x0C;
428 regp->Attribute[13] = 0x0D;
429 regp->Attribute[14] = 0x0E;
430 regp->Attribute[15] = 0x0F;
431 regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
432 /* Non-vga */
433 regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
434 regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
435 regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
436 regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
437}
438
439/**
440 * Sets up registers for the given mode/adjusted_mode pair.
441 *
442 * The clocks, CRTCs and outputs attached to this CRTC must be off.
443 *
444 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
445 * be easily turned on/off after this.
446 */
447static void
448nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
449{
450 struct drm_device *dev = crtc->dev;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
453 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
454 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
455 struct drm_encoder *encoder;
456 bool lvds_output = false, tmds_output = false, tv_output = false,
457 off_chip_digital = false;
458
459 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
460 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
461 bool digital = false;
462
463 if (encoder->crtc != crtc)
464 continue;
465
466 if (nv_encoder->dcb->type == OUTPUT_LVDS)
467 digital = lvds_output = true;
468 if (nv_encoder->dcb->type == OUTPUT_TV)
469 tv_output = true;
470 if (nv_encoder->dcb->type == OUTPUT_TMDS)
471 digital = tmds_output = true;
472 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
473 off_chip_digital = true;
474 }
475
476 /* Registers not directly related to the (s)vga mode */
477
478 /* What is the meaning of this register? */
479 /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
480 regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
481
482 regp->crtc_eng_ctrl = 0;
483 /* Except for rare conditions I2C is enabled on the primary crtc */
484 if (nv_crtc->index == 0)
485 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
486#if 0
487 /* Set overlay to desired crtc. */
488 if (dev->overlayAdaptor) {
489 NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
490 if (pPriv->overlayCRTC == nv_crtc->index)
491 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
492 }
493#endif
494
495 /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
496 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
497 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
498 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
499 if (dev_priv->chipset >= 0x11)
500 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
501 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
502 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
503
504 /* Unblock some timings */
505 regp->CRTC[NV_CIO_CRE_53] = 0;
506 regp->CRTC[NV_CIO_CRE_54] = 0;
507
508 /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
509 if (lvds_output)
510 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
511 else if (tmds_output)
512 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
513 else
514 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
515
516 /* These values seem to vary */
517 /* This register seems to be used by the bios to make certain decisions on some G70 cards? */
518 regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
519
520 nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
521
522 /* probably a scratch reg, but kept for cargo-cult purposes:
523 * bit0: crtc0?, head A
524 * bit6: lvds, head A
525 * bit7: (only in X), head A
526 */
527 if (nv_crtc->index == 0)
528 regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
529
530 /* The blob seems to take the current value from crtc 0, add 4 to that
531 * and reuse the old value for crtc 1 */
532 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
533 if (!nv_crtc->index)
534 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
535
536 /* the blob sometimes sets |= 0x10 (which is the same as setting |=
537 * 1 << 30 on 0x60.830), for no apparent reason */
538 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
539
540 regp->crtc_830 = mode->crtc_vdisplay - 3;
541 regp->crtc_834 = mode->crtc_vdisplay - 1;
542
543 if (dev_priv->card_type == NV_40)
544 /* This is what the blob does */
545 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
546
547 if (dev_priv->card_type >= NV_30)
548 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
549
550 regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
551
552 /* Some misc regs */
553 if (dev_priv->card_type == NV_40) {
554 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
555 regp->CRTC[NV_CIO_CRE_86] = 0x1;
556 }
557
558 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
559 /* Enable slaved mode (called MODE_TV in nv4ref.h) */
560 if (lvds_output || tmds_output || tv_output)
561 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
562
563 /* Generic PRAMDAC regs */
564
565 if (dev_priv->card_type >= NV_10)
566 /* Only bit that bios and blob set. */
567 regp->nv10_cursync = (1 << 25);
568
569 regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
570 NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
571 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
572 if (crtc->fb->depth == 16)
573 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
574 if (dev_priv->chipset >= 0x11)
575 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
576
577 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
578 regp->tv_setup = 0;
579
580 nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
581
582 /* Some values the blob sets */
583 regp->ramdac_8c0 = 0x100;
584 regp->ramdac_a20 = 0x0;
585 regp->ramdac_a24 = 0xfffff;
586 regp->ramdac_a34 = 0x1;
587}
588
589/**
590 * Sets up registers for the given mode/adjusted_mode pair.
591 *
592 * The clocks, CRTCs and outputs attached to this CRTC must be off.
593 *
594 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
595 * be easily turned on/off after this.
596 */
597static int
598nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
599 struct drm_display_mode *adjusted_mode,
600 int x, int y, struct drm_framebuffer *old_fb)
601{
602 struct drm_device *dev = crtc->dev;
603 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
604 struct drm_nouveau_private *dev_priv = dev->dev_private;
605
606 NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
607 drm_mode_debug_printmodeline(adjusted_mode);
608
609 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
610 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
611
612 nv_crtc_mode_set_vga(crtc, adjusted_mode);
613 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
614 if (dev_priv->card_type == NV_40)
615 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
616 nv_crtc_mode_set_regs(crtc, adjusted_mode);
617 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
618 return 0;
619}
620
621static void nv_crtc_save(struct drm_crtc *crtc)
622{
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
625 struct nv04_mode_state *state = &dev_priv->mode_reg;
626 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
627 struct nv04_mode_state *saved = &dev_priv->saved_reg;
628 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
629
630 if (nv_two_heads(crtc->dev))
631 NVSetOwner(crtc->dev, nv_crtc->index);
632
633 nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
634
635 /* init some state to saved value */
636 state->sel_clk = saved->sel_clk & ~(0x5 << 16);
637 crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
638 state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
639 crtc_state->gpio_ext = crtc_saved->gpio_ext;
640}
641
642static void nv_crtc_restore(struct drm_crtc *crtc)
643{
644 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
645 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
646 int head = nv_crtc->index;
647 uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
648
649 if (nv_two_heads(crtc->dev))
650 NVSetOwner(crtc->dev, head);
651
652 nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
653 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
654
655 nv_crtc->last_dpms = NV_DPMS_CLEARED;
656}
657
658static void nv_crtc_prepare(struct drm_crtc *crtc)
659{
660 struct drm_device *dev = crtc->dev;
661 struct drm_nouveau_private *dev_priv = dev->dev_private;
662 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
663 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
664
665 if (nv_two_heads(dev))
666 NVSetOwner(dev, nv_crtc->index);
667
668 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
669
670 NVBlankScreen(dev, nv_crtc->index, true);
671
672 /* Some more preperation. */
673 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
674 if (dev_priv->card_type == NV_40) {
675 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
676 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
677 }
678}
679
680static void nv_crtc_commit(struct drm_crtc *crtc)
681{
682 struct drm_device *dev = crtc->dev;
683 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
684 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
685 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
686
687 nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
688 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
689
690#ifdef __BIG_ENDIAN
691 /* turn on LFB swapping */
692 {
693 uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
694 tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
695 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
696 }
697#endif
698
699 funcs->dpms(crtc, DRM_MODE_DPMS_ON);
700}
701
702static void nv_crtc_destroy(struct drm_crtc *crtc)
703{
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705
706 NV_DEBUG(crtc->dev, "\n");
707
708 if (!nv_crtc)
709 return;
710
711 drm_crtc_cleanup(crtc);
712
713 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
714 kfree(nv_crtc);
715}
716
717static void
718nv_crtc_gamma_load(struct drm_crtc *crtc)
719{
720 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
721 struct drm_device *dev = nv_crtc->base.dev;
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
724 int i;
725
726 rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
727 for (i = 0; i < 256; i++) {
728 rgbs[i].r = nv_crtc->lut.r[i] >> 8;
729 rgbs[i].g = nv_crtc->lut.g[i] >> 8;
730 rgbs[i].b = nv_crtc->lut.b[i] >> 8;
731 }
732
733 nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
734}
735
736static void
737nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
738{
739 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
740 int i;
741
742 if (size != 256)
743 return;
744
745 for (i = 0; i < 256; i++) {
746 nv_crtc->lut.r[i] = r[i];
747 nv_crtc->lut.g[i] = g[i];
748 nv_crtc->lut.b[i] = b[i];
749 }
750
751 /* We need to know the depth before we upload, but it's possible to
752 * get called before a framebuffer is bound. If this is the case,
753 * mark the lut values as dirty by setting depth==0, and it'll be
754 * uploaded on the first mode_set_base()
755 */
756 if (!nv_crtc->base.fb) {
757 nv_crtc->lut.depth = 0;
758 return;
759 }
760
761 nv_crtc_gamma_load(crtc);
762}
763
764static int
765nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
766 struct drm_framebuffer *old_fb)
767{
768 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
769 struct drm_device *dev = crtc->dev;
770 struct drm_nouveau_private *dev_priv = dev->dev_private;
771 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
772 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
773 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
774 int arb_burst, arb_lwm;
775 int ret;
776
777 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
778 if (ret)
779 return ret;
780
781 if (old_fb) {
782 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
783 nouveau_bo_unpin(ofb->nvbo);
784 }
785
786 nv_crtc->fb.offset = fb->nvbo->bo.offset;
787
788 if (nv_crtc->lut.depth != drm_fb->depth) {
789 nv_crtc->lut.depth = drm_fb->depth;
790 nv_crtc_gamma_load(crtc);
791 }
792
793 /* Update the framebuffer format. */
794 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
795 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
796 regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
797 if (crtc->fb->depth == 16)
798 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
799 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
800 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
801 regp->ramdac_gen_ctrl);
802
803 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
804 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
805 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
806 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
807 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
808
809 /* Update the framebuffer location. */
810 regp->fb_start = nv_crtc->fb.offset & ~3;
811 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
812 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
813
814 /* Update the arbitration parameters. */
815 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
816 &arb_burst, &arb_lwm);
817
818 regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
819 regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
820 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
821 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
822
823 if (dev_priv->card_type >= NV_30) {
824 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
825 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
826 }
827
828 return 0;
829}
830
831static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
832 struct nouveau_bo *dst)
833{
834 int width = nv_cursor_width(dev);
835 uint32_t pixel;
836 int i, j;
837
838 for (i = 0; i < width; i++) {
839 for (j = 0; j < width; j++) {
840 pixel = nouveau_bo_rd32(src, i*64 + j);
841
842 nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
843 | (pixel & 0xf80000) >> 9
844 | (pixel & 0xf800) >> 6
845 | (pixel & 0xf8) >> 3);
846 }
847 }
848}
849
850static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
851 struct nouveau_bo *dst)
852{
853 uint32_t pixel;
854 int alpha, i;
855
856 /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
857 * cursors (though NPM in combination with fp dithering may not work on
858 * nv11, from "nv" driver history)
859 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
860 * blob uses, however we get given PM cursors so we use PM mode
861 */
862 for (i = 0; i < 64 * 64; i++) {
863 pixel = nouveau_bo_rd32(src, i);
864
865 /* hw gets unhappy if alpha <= rgb values. for a PM image "less
866 * than" shouldn't happen; fix "equal to" case by adding one to
867 * alpha channel (slightly inaccurate, but so is attempting to
868 * get back to NPM images, due to limits of integer precision)
869 */
870 alpha = pixel >> 24;
871 if (alpha > 0 && alpha < 255)
872 pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
873
874#ifdef __BIG_ENDIAN
875 {
876 struct drm_nouveau_private *dev_priv = dev->dev_private;
877
878 if (dev_priv->chipset == 0x11) {
879 pixel = ((pixel & 0x000000ff) << 24) |
880 ((pixel & 0x0000ff00) << 8) |
881 ((pixel & 0x00ff0000) >> 8) |
882 ((pixel & 0xff000000) >> 24);
883 }
884 }
885#endif
886
887 nouveau_bo_wr32(dst, i, pixel);
888 }
889}
890
891static int
892nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
893 uint32_t buffer_handle, uint32_t width, uint32_t height)
894{
895 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
896 struct drm_device *dev = dev_priv->dev;
897 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
898 struct nouveau_bo *cursor = NULL;
899 struct drm_gem_object *gem;
900 int ret = 0;
901
902 if (width != 64 || height != 64)
903 return -EINVAL;
904
905 if (!buffer_handle) {
906 nv_crtc->cursor.hide(nv_crtc, true);
907 return 0;
908 }
909
910 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
911 if (!gem)
912 return -EINVAL;
913 cursor = nouveau_gem_object(gem);
914
915 ret = nouveau_bo_map(cursor);
916 if (ret)
917 goto out;
918
919 if (dev_priv->chipset >= 0x11)
920 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
921 else
922 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
923
924 nouveau_bo_unmap(cursor);
925 nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true);
928out:
929 mutex_lock(&dev->struct_mutex);
930 drm_gem_object_unreference(gem);
931 mutex_unlock(&dev->struct_mutex);
932 return ret;
933}
934
935static int
936nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
937{
938 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
939
940 nv_crtc->cursor.set_pos(nv_crtc, x, y);
941 return 0;
942}
943
944static const struct drm_crtc_funcs nv04_crtc_funcs = {
945 .save = nv_crtc_save,
946 .restore = nv_crtc_restore,
947 .cursor_set = nv04_crtc_cursor_set,
948 .cursor_move = nv04_crtc_cursor_move,
949 .gamma_set = nv_crtc_gamma_set,
950 .set_config = drm_crtc_helper_set_config,
951 .destroy = nv_crtc_destroy,
952};
953
954static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
955 .dpms = nv_crtc_dpms,
956 .prepare = nv_crtc_prepare,
957 .commit = nv_crtc_commit,
958 .mode_fixup = nv_crtc_mode_fixup,
959 .mode_set = nv_crtc_mode_set,
960 .mode_set_base = nv04_crtc_mode_set_base,
961 .load_lut = nv_crtc_gamma_load,
962};
963
964int
965nv04_crtc_create(struct drm_device *dev, int crtc_num)
966{
967 struct nouveau_crtc *nv_crtc;
968 int ret, i;
969
970 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
971 if (!nv_crtc)
972 return -ENOMEM;
973
974 for (i = 0; i < 256; i++) {
975 nv_crtc->lut.r[i] = i << 8;
976 nv_crtc->lut.g[i] = i << 8;
977 nv_crtc->lut.b[i] = i << 8;
978 }
979 nv_crtc->lut.depth = 0;
980
981 nv_crtc->index = crtc_num;
982 nv_crtc->last_dpms = NV_DPMS_CLEARED;
983
984 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
985 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
986 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
987
988 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
989 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
990 if (!ret) {
991 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
992 if (!ret)
993 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
994 if (ret)
995 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
996 }
997
998 nv04_cursor_init(nv_crtc);
999
1000 return 0;
1001}
1002
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 000000000000..89a91b9d8b25
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
1#include "drmP.h"
2#include "drm_mode.h"
3#include "nouveau_reg.h"
4#include "nouveau_drv.h"
5#include "nouveau_crtc.h"
6#include "nouveau_hw.h"
7
8static void
9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
10{
11 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
12}
13
14static void
15nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
16{
17 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
18}
19
20static void
21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
22{
23 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
24 NV_PRAMDAC_CU_START_POS,
25 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
26 XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
27}
28
29static void
30crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
31{
32 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
33 crtcstate->CRTC[index]);
34}
35
36static void
37nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
38{
39 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
42 struct drm_crtc *crtc = &nv_crtc->base;
43
44 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
45 MASK(NV_CIO_CRE_HCUR_ASI) |
46 XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
47 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
48 XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
49 if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
50 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
51 MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
52 regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
53
54 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
57 if (dev_priv->card_type == NV_40)
58 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
59}
60
61int
62nv04_cursor_init(struct nouveau_crtc *crtc)
63{
64 crtc->cursor.set_offset = nv04_cursor_set_offset;
65 crtc->cursor.set_pos = nv04_cursor_set_pos;
66 crtc->cursor.hide = nv04_cursor_hide;
67 crtc->cursor.show = nv04_cursor_show;
68 return 0;
69}
70
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 000000000000..a5fa51714e87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,528 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37int nv04_dac_output_offset(struct drm_encoder *encoder)
38{
39 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
40 int offset = 0;
41
42 if (dcb->or & (8 | OUTPUT_C))
43 offset += 0x68;
44 if (dcb->or & (8 | OUTPUT_B))
45 offset += 0x2000;
46
47 return offset;
48}
49
50/*
51 * arbitrary limit to number of sense oscillations tolerated in one sample
52 * period (observed to be at least 13 in "nvidia")
53 */
54#define MAX_HBLANK_OSC 20
55
56/*
57 * arbitrary limit to number of conflicting sample pairs to tolerate at a
58 * voltage step (observed to be at least 5 in "nvidia")
59 */
60#define MAX_SAMPLE_PAIRS 10
61
62static int sample_load_twice(struct drm_device *dev, bool sense[2])
63{
64 int i;
65
66 for (i = 0; i < 2; i++) {
67 bool sense_a, sense_b, sense_b_prime;
68 int j = 0;
69
70 /*
71 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
72 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
73 * hblank again
74 * use a 10ms timeout (guards against crtc being inactive, in
75 * which case blank state would never change)
76 */
77 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
78 0x00000001, 0x00000000))
79 return -EBUSY;
80 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
81 0x00000001, 0x00000001))
82 return -EBUSY;
83 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
84 0x00000001, 0x00000000))
85 return -EBUSY;
86
87 udelay(100);
88 /* when level triggers, sense is _LO_ */
89 sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
90
91 /* take another reading until it agrees with sense_a... */
92 do {
93 udelay(100);
94 sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
95 if (sense_a != sense_b) {
96 sense_b_prime =
97 nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
98 if (sense_b == sense_b_prime) {
99 /* ... unless two consecutive subsequent
100 * samples agree; sense_a is replaced */
101 sense_a = sense_b;
102 /* force mis-match so we loop */
103 sense_b = !sense_a;
104 }
105 }
106 } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
107
108 if (j == MAX_HBLANK_OSC)
109 /* with so much oscillation, default to sense:LO */
110 sense[i] = false;
111 else
112 sense[i] = sense_a;
113 }
114
115 return 0;
116}
117
118static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector)
120{
121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1;
123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i;
126 uint8_t blue;
127 bool sense = true;
128
129 /*
130 * for this detection to work, there needs to be a mode set up on the
131 * CRTC. this is presumed to be the case
132 */
133
134 if (nv_two_heads(dev))
135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0);
137
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140
141 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
142 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
143 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
144
145 msleep(10);
146
147 saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
148 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
149 saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
150 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
151 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
152
153 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
154 for (i = 0; i < 3; i++)
155 saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
156 saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
157 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
158
159 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
160 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
161 (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
162 NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
163 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
164
165 blue = 8; /* start of test range */
166
167 do {
168 bool sense_pair[2];
169
170 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
171 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
172 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
173 /* testing blue won't find monochrome monitors. I don't care */
174 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
175
176 i = 0;
177 /* take sample pairs until both samples in the pair agree */
178 do {
179 if (sample_load_twice(dev, sense_pair))
180 goto out;
181 } while ((sense_pair[0] != sense_pair[1]) &&
182 ++i < MAX_SAMPLE_PAIRS);
183
184 if (i == MAX_SAMPLE_PAIRS)
185 /* too much oscillation defaults to LO */
186 sense = false;
187 else
188 sense = sense_pair[0];
189
190 /*
191 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
192 * ergo, if blue gets to 0x18, monitor must be connected
193 */
194 } while (++blue < 0x18 && sense);
195
196out:
197 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
198 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
199 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
200 for (i = 0; i < 3; i++)
201 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
202 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
206
207 if (blue == 0x18) {
208 NV_TRACE(dev, "Load detected on head A\n");
209 return connector_status_connected;
210 }
211
212 return connector_status_disconnected;
213}
214
215enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
216 struct drm_connector *connector)
217{
218 struct drm_device *dev = encoder->dev;
219 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
221 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
222 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
223 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
224 int head, present = 0;
225
226#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
227 if (dcb->type == OUTPUT_TV) {
228 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
229
230 if (dev_priv->vbios->tvdactestval)
231 testval = dev_priv->vbios->tvdactestval;
232 } else {
233 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
234
235 if (dev_priv->vbios->dactestval)
236 testval = dev_priv->vbios->dactestval;
237 }
238
239 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
240 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
241 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
242
243 saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
244
245 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
246 if (regoffset == 0x68) {
247 saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
248 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
249 }
250
251 saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
252 saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
253
254 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
255 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
256
257 msleep(4);
258
259 saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
260 head = (saved_routput & 0x100) >> 8;
261#if 0
262 /* if there's a spare crtc, using it will minimise flicker for the case
263 * where the in-use crtc is in use by an off-chip tmds encoder */
264 if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
265 head ^= 1;
266#endif
267 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
268 routput = (saved_routput & 0xfffffece) | head << 8;
269
270 if (dev_priv->card_type >= NV_40) {
271 if (dcb->type == OUTPUT_TV)
272 routput |= 0x1a << 16;
273 else
274 routput &= ~(0x1a << 16);
275 }
276
277 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
278 msleep(1);
279
280 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
281 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
282
283 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
284 NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
285 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
286 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
287 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
288 msleep(5);
289
290 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
291
292 if (dcb->type == OUTPUT_TV)
293 present = (nv17_tv_detect(encoder, connector, temp)
294 == connector_status_connected);
295 else
296 present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
297
298 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
299 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
300 temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
301 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
302
303 /* bios does something more complex for restoring, but I think this is good enough */
304 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
305 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
306 if (regoffset == 0x68)
307 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
308 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
309
310 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
311 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
312
313 if (present) {
314 NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
315 return connector_status_connected;
316 }
317
318 return connector_status_disconnected;
319}
320
321
322static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
323 struct drm_display_mode *mode,
324 struct drm_display_mode *adjusted_mode)
325{
326 return true;
327}
328
329static void nv04_dac_prepare(struct drm_encoder *encoder)
330{
331 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
332 struct drm_device *dev = encoder->dev;
333 struct drm_nouveau_private *dev_priv = dev->dev_private;
334 int head = nouveau_crtc(encoder->crtc)->index;
335 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
336
337 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
338
339 nv04_dfp_disable(dev, head);
340
341 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
342 * at LCD__INDEX which we don't alter
343 */
344 if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
345 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
346}
347
348
349static void nv04_dac_mode_set(struct drm_encoder *encoder,
350 struct drm_display_mode *mode,
351 struct drm_display_mode *adjusted_mode)
352{
353 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
354 struct drm_device *dev = encoder->dev;
355 struct drm_nouveau_private *dev_priv = dev->dev_private;
356 int head = nouveau_crtc(encoder->crtc)->index;
357
358 NV_TRACE(dev, "%s called for encoder %d\n", __func__,
359 nv_encoder->dcb->index);
360
361 if (nv_gf4_disp_arch(dev)) {
362 struct drm_encoder *rebind;
363 uint32_t dac_offset = nv04_dac_output_offset(encoder);
364 uint32_t otherdac;
365
366 /* bit 16-19 are bits that are set on some G70 cards,
367 * but don't seem to have much effect */
368 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
369 head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
370 /* force any other vga encoders to bind to the other crtc */
371 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
372 if (rebind == encoder
373 || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
374 continue;
375
376 dac_offset = nv04_dac_output_offset(rebind);
377 otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
378 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
379 (otherdac & ~0x0100) | (head ^ 1) << 8);
380 }
381 }
382
383 /* This could use refinement for flatpanels, but it should work this way */
384 if (dev_priv->chipset < 0x44)
385 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
386 else
387 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
388}
389
390static void nv04_dac_commit(struct drm_encoder *encoder)
391{
392 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
393 struct drm_device *dev = encoder->dev;
394 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
395 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
396
397 helper->dpms(encoder, DRM_MODE_DPMS_ON);
398
399 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
400 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
401 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
402}
403
404void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
405{
406 struct drm_device *dev = encoder->dev;
407 struct drm_nouveau_private *dev_priv = dev->dev_private;
408 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
409
410 if (nv_gf4_disp_arch(dev)) {
411 uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
412 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
413 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
414
415 if (enable) {
416 *dac_users |= 1 << dcb->index;
417 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
418
419 } else {
420 *dac_users &= ~(1 << dcb->index);
421 if (!*dac_users)
422 NVWriteRAMDAC(dev, 0, dacclk_off,
423 dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
424 }
425 }
426}
427
428static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
429{
430 struct drm_device *dev = encoder->dev;
431 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
432
433 if (nv_encoder->last_dpms == mode)
434 return;
435 nv_encoder->last_dpms = mode;
436
437 NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
438 mode, nv_encoder->dcb->index);
439
440 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
441}
442
443static void nv04_dac_save(struct drm_encoder *encoder)
444{
445 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
446 struct drm_device *dev = encoder->dev;
447
448 if (nv_gf4_disp_arch(dev))
449 nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
450 nv04_dac_output_offset(encoder));
451}
452
453static void nv04_dac_restore(struct drm_encoder *encoder)
454{
455 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
456 struct drm_device *dev = encoder->dev;
457
458 if (nv_gf4_disp_arch(dev))
459 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
460 nv_encoder->restore.output);
461
462 nv_encoder->last_dpms = NV_DPMS_CLEARED;
463}
464
465static void nv04_dac_destroy(struct drm_encoder *encoder)
466{
467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
468
469 NV_DEBUG(encoder->dev, "\n");
470
471 drm_encoder_cleanup(encoder);
472 kfree(nv_encoder);
473}
474
475static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
476 .dpms = nv04_dac_dpms,
477 .save = nv04_dac_save,
478 .restore = nv04_dac_restore,
479 .mode_fixup = nv04_dac_mode_fixup,
480 .prepare = nv04_dac_prepare,
481 .commit = nv04_dac_commit,
482 .mode_set = nv04_dac_mode_set,
483 .detect = nv04_dac_detect
484};
485
486static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
487 .dpms = nv04_dac_dpms,
488 .save = nv04_dac_save,
489 .restore = nv04_dac_restore,
490 .mode_fixup = nv04_dac_mode_fixup,
491 .prepare = nv04_dac_prepare,
492 .commit = nv04_dac_commit,
493 .mode_set = nv04_dac_mode_set,
494 .detect = nv17_dac_detect
495};
496
497static const struct drm_encoder_funcs nv04_dac_funcs = {
498 .destroy = nv04_dac_destroy,
499};
500
501int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
502{
503 const struct drm_encoder_helper_funcs *helper;
504 struct drm_encoder *encoder;
505 struct nouveau_encoder *nv_encoder = NULL;
506
507 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
508 if (!nv_encoder)
509 return -ENOMEM;
510
511 encoder = to_drm_encoder(nv_encoder);
512
513 nv_encoder->dcb = entry;
514 nv_encoder->or = ffs(entry->or) - 1;
515
516 if (nv_gf4_disp_arch(dev))
517 helper = &nv17_dac_helper_funcs;
518 else
519 helper = &nv04_dac_helper_funcs;
520
521 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
522 drm_encoder_helper_add(encoder, helper);
523
524 encoder->possible_crtcs = entry->heads;
525 encoder->possible_clones = 0;
526
527 return 0;
528}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 000000000000..e5b33339d595
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
38 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
39 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
40#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
41 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
42 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
43
44static inline bool is_fpc_off(uint32_t fpc)
45{
46 return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
47 FP_TG_CONTROL_OFF);
48}
49
50int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
51{
52 /* special case of nv_read_tmds to find crtc associated with an output.
53 * this does not give a correct answer for off-chip dvi, but there's no
54 * use for such an answer anyway
55 */
56 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
57
58 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
59 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
60 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
61}
62
63void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
64 int head, bool dl)
65{
66 /* The BIOS scripts don't do this for us, sadly
67 * Luckily we do know the values ;-)
68 *
69 * head < 0 indicates we wish to force a setting with the overrideval
70 * (for VT restore etc.)
71 */
72
73 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
74 uint8_t tmds04 = 0x80;
75
76 if (head != ramdac)
77 tmds04 = 0x88;
78
79 if (dcbent->type == OUTPUT_LVDS)
80 tmds04 |= 0x01;
81
82 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
83
84 if (dl) /* dual link */
85 nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
86}
87
88void nv04_dfp_disable(struct drm_device *dev, int head)
89{
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
92
93 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
94 FP_TG_CONTROL_ON) {
95 /* digital remnants must be cleaned before new crtc
96 * values programmed. delay is time for the vga stuff
97 * to realise it's in control again
98 */
99 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
100 FP_TG_CONTROL_OFF);
101 msleep(50);
102 }
103 /* don't inadvertently turn it on when state written later */
104 crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
105}
106
107void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct drm_crtc *crtc;
112 struct nouveau_crtc *nv_crtc;
113 uint32_t *fpc;
114
115 if (mode == DRM_MODE_DPMS_ON) {
116 nv_crtc = nouveau_crtc(encoder->crtc);
117 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
118
119 if (is_fpc_off(*fpc)) {
120 /* using saved value is ok, as (is_digital && dpms_on &&
121 * fp_control==OFF) is (at present) *only* true when
122 * fpc's most recent change was by below "off" code
123 */
124 *fpc = nv_crtc->dpms_saved_fp_control;
125 }
126
127 nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
128 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
129 } else {
130 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
131 nv_crtc = nouveau_crtc(crtc);
132 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
133
134 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
135 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
136 nv_crtc->dpms_saved_fp_control = *fpc;
137 /* cut the FP output */
138 *fpc &= ~FP_TG_CONTROL_ON;
139 *fpc |= FP_TG_CONTROL_OFF;
140 NVWriteRAMDAC(dev, nv_crtc->index,
141 NV_PRAMDAC_FP_TG_CONTROL, *fpc);
142 }
143 }
144 }
145}
146
147static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
148 struct drm_display_mode *mode,
149 struct drm_display_mode *adjusted_mode)
150{
151 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
152 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
153
154 /* For internal panels and gpu scaling on DVI we need the native mode */
155 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
156 if (!nv_connector->native_mode)
157 return false;
158 nv_encoder->mode = *nv_connector->native_mode;
159 adjusted_mode->clock = nv_connector->native_mode->clock;
160 } else {
161 nv_encoder->mode = *adjusted_mode;
162 }
163
164 return true;
165}
166
167static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
168 struct nouveau_encoder *nv_encoder, int head)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 struct nv04_mode_state *state = &dev_priv->mode_reg;
172 uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
173
174 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
175 return;
176
177 /* SEL_CLK is only used on the primary ramdac
178 * It toggles spread spectrum PLL output and sets the bindings of PLLs
179 * to heads on digital outputs
180 */
181 if (head)
182 state->sel_clk |= bits1618;
183 else
184 state->sel_clk &= ~bits1618;
185
186 /* nv30:
187 * bit 0 NVClk spread spectrum on/off
188 * bit 2 MemClk spread spectrum on/off
189 * bit 4 PixClk1 spread spectrum on/off toggle
190 * bit 6 PixClk2 spread spectrum on/off toggle
191 *
192 * nv40 (observations from bios behaviour and mmio traces):
193 * bits 4&6 as for nv30
194 * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
195 * maybe a different spread mode
196 * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
197 * The logic behind turning spread spectrum on/off in the first place,
198 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
199 * entry has the necessary info)
200 */
201 if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
202 int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
203
204 state->sel_clk &= ~0xf0;
205 state->sel_clk |= (head ? 0x40 : 0x10) << shift;
206 }
207}
208
209static void nv04_dfp_prepare(struct drm_encoder *encoder)
210{
211 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
212 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
213 struct drm_device *dev = encoder->dev;
214 struct drm_nouveau_private *dev_priv = dev->dev_private;
215 int head = nouveau_crtc(encoder->crtc)->index;
216 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
217 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
218 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
219
220 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
221
222 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
223
224 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
225 * at LCD__INDEX which we don't alter
226 */
227 if (!(*cr_lcd & 0x44)) {
228 *cr_lcd = 0x3;
229
230 if (nv_two_heads(dev)) {
231 if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
232 *cr_lcd |= head ? 0x0 : 0x8;
233 else {
234 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
235 if (nv_encoder->dcb->type == OUTPUT_LVDS)
236 *cr_lcd |= 0x30;
237 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
238 /* avoid being connected to both crtcs */
239 *cr_lcd_oth &= ~0x30;
240 NVWriteVgaCrtc(dev, head ^ 1,
241 NV_CIO_CRE_LCD__INDEX,
242 *cr_lcd_oth);
243 }
244 }
245 }
246 }
247}
248
249
250static void nv04_dfp_mode_set(struct drm_encoder *encoder,
251 struct drm_display_mode *mode,
252 struct drm_display_mode *adjusted_mode)
253{
254 struct drm_device *dev = encoder->dev;
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
257 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
258 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
259 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
260 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
261 struct drm_display_mode *output_mode = &nv_encoder->mode;
262 uint32_t mode_ratio, panel_ratio;
263
264 NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
265 drm_mode_debug_printmodeline(output_mode);
266
267 /* Initialize the FP registers in this CRTC. */
268 regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios->digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
279 regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
280
281 regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
282 regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
283 regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
284 regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
285 regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
286 regp->fp_vert_regs[FP_VALID_START] = 0;
287 regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
288
289 /* bit26: a bit seen on some g7x, no as yet discernable purpose */
290 regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
291 (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
292 /* Deal with vsync/hsync polarity */
293 /* LVDS screens do set this, but modes with +ve syncs are very rare */
294 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
295 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
296 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
297 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
298 /* panel scaling first, as native would get set otherwise */
299 if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
300 nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
301 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
302 else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
303 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
304 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
305 else /* gpu needs to scale */
306 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
307 if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
308 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
309 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
310 output_mode->clock > 165000)
311 regp->fp_control |= (2 << 24);
312 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
313 bool duallink, dummy;
314
315 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
316 clock, &duallink, &dummy);
317 if (duallink)
318 regp->fp_control |= (8 << 28);
319 } else
320 if (output_mode->clock > 165000)
321 regp->fp_control |= (8 << 28);
322
323 regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
324 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
325 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
326 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
327 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
328 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
329 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
330
331 /* We want automatic scaling */
332 regp->fp_debug_1 = 0;
333 /* This can override HTOTAL and VTOTAL */
334 regp->fp_debug_2 = 0;
335
336 /* Use 20.12 fixed point format to avoid floats */
337 mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
338 panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
339 /* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
340 * get treated the same as SCALE_FULLSCREEN */
341 if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
342 mode_ratio != panel_ratio) {
343 uint32_t diff, scale;
344 bool divide_by_2 = nv_gf4_disp_arch(dev);
345
346 if (mode_ratio < panel_ratio) {
347 /* vertical needs to expand to glass size (automatic)
348 * horizontal needs to be scaled at vertical scale factor
349 * to maintain aspect */
350
351 scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
352 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
353 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
354
355 /* restrict area of screen used, horizontally */
356 diff = output_mode->hdisplay -
357 output_mode->vdisplay * mode_ratio / (1 << 12);
358 regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
359 regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
360 }
361
362 if (mode_ratio > panel_ratio) {
363 /* horizontal needs to expand to glass size (automatic)
364 * vertical needs to be scaled at horizontal scale factor
365 * to maintain aspect */
366
367 scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
368 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
369 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
370
371 /* restrict area of screen used, vertically */
372 diff = output_mode->vdisplay -
373 (1 << 12) * output_mode->hdisplay / mode_ratio;
374 regp->fp_vert_regs[FP_VALID_START] += diff / 2;
375 regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
376 }
377 }
378
379 /* Output property. */
380 if (nv_connector->use_dithering) {
381 if (dev_priv->chipset == 0x11)
382 regp->dither = savep->dither | 0x00010000;
383 else {
384 int i;
385 regp->dither = savep->dither | 0x00000001;
386 for (i = 0; i < 3; i++) {
387 regp->dither_regs[i] = 0xe4e4e4e4;
388 regp->dither_regs[i + 3] = 0x44444444;
389 }
390 }
391 } else {
392 if (dev_priv->chipset != 0x11) {
393 /* reset them */
394 int i;
395 for (i = 0; i < 3; i++) {
396 regp->dither_regs[i] = savep->dither_regs[i];
397 regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
398 }
399 }
400 regp->dither = savep->dither;
401 }
402
403 regp->fp_margin_color = 0;
404}
405
406static void nv04_dfp_commit(struct drm_encoder *encoder)
407{
408 struct drm_device *dev = encoder->dev;
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
411 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct dcb_entry *dcbe = nv_encoder->dcb;
414 int head = nouveau_crtc(encoder->crtc)->index;
415
416 NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
417
418 if (dcbe->type == OUTPUT_TMDS)
419 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
420 else if (dcbe->type == OUTPUT_LVDS)
421 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
422
423 /* update fp_control state for any changes made by scripts,
424 * so correct value is written at DPMS on */
425 dev_priv->mode_reg.crtc_reg[head].fp_control =
426 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
427
428 /* This could use refinement for flatpanels, but it should work this way */
429 if (dev_priv->chipset < 0x44)
430 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
431 else
432 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
433
434 helper->dpms(encoder, DRM_MODE_DPMS_ON);
435
436 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
437 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
438 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
439}
440
441static inline bool is_powersaving_dpms(int mode)
442{
443 return (mode != DRM_MODE_DPMS_ON);
444}
445
446static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
447{
448 struct drm_device *dev = encoder->dev;
449 struct drm_crtc *crtc = encoder->crtc;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
452 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
453
454 if (nv_encoder->last_dpms == mode)
455 return;
456 nv_encoder->last_dpms = mode;
457
458 NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
459 mode, nv_encoder->dcb->index);
460
461 if (was_powersaving && is_powersaving_dpms(mode))
462 return;
463
464 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
465 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
466
467 /* when removing an output, crtc may not be set, but PANEL_OFF
468 * must still be run
469 */
470 int head = crtc ? nouveau_crtc(crtc)->index :
471 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
472
473 if (mode == DRM_MODE_DPMS_ON) {
474 if (!nv_connector->native_mode) {
475 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
476 return;
477 }
478 call_lvds_script(dev, nv_encoder->dcb, head,
479 LVDS_PANEL_ON, nv_connector->native_mode->clock);
480 } else
481 /* pxclk of 0 is fine for PANEL_OFF, and for a
482 * disconnected LVDS encoder there is no native_mode
483 */
484 call_lvds_script(dev, nv_encoder->dcb, head,
485 LVDS_PANEL_OFF, 0);
486 }
487
488 nv04_dfp_update_fp_control(encoder, mode);
489
490 if (mode == DRM_MODE_DPMS_ON)
491 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
492 else {
493 dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
494 dev_priv->mode_reg.sel_clk &= ~0xf0;
495 }
496 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
497}
498
499static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
500{
501 struct drm_device *dev = encoder->dev;
502 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
503
504 if (nv_encoder->last_dpms == mode)
505 return;
506 nv_encoder->last_dpms = mode;
507
508 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
509 mode, nv_encoder->dcb->index);
510
511 nv04_dfp_update_fp_control(encoder, mode);
512}
513
514static void nv04_dfp_save(struct drm_encoder *encoder)
515{
516 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
517 struct drm_device *dev = encoder->dev;
518
519 if (nv_two_heads(dev))
520 nv_encoder->restore.head =
521 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
522}
523
524static void nv04_dfp_restore(struct drm_encoder *encoder)
525{
526 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
527 struct drm_device *dev = encoder->dev;
528 struct drm_nouveau_private *dev_priv = dev->dev_private;
529 int head = nv_encoder->restore.head;
530
531 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
532 struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
533 if (native_mode)
534 call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
535 native_mode->clock);
536 else
537 NV_ERROR(dev, "Not restoring LVDS without native mode\n");
538
539 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
540 int clock = nouveau_hw_pllvals_to_clk
541 (&dev_priv->saved_reg.crtc_reg[head].pllvals);
542
543 run_tmds_table(dev, nv_encoder->dcb, head, clock);
544 }
545
546 nv_encoder->last_dpms = NV_DPMS_CLEARED;
547}
548
549static void nv04_dfp_destroy(struct drm_encoder *encoder)
550{
551 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
552
553 NV_DEBUG(encoder->dev, "\n");
554
555 drm_encoder_cleanup(encoder);
556 kfree(nv_encoder);
557}
558
559static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
560 .dpms = nv04_lvds_dpms,
561 .save = nv04_dfp_save,
562 .restore = nv04_dfp_restore,
563 .mode_fixup = nv04_dfp_mode_fixup,
564 .prepare = nv04_dfp_prepare,
565 .commit = nv04_dfp_commit,
566 .mode_set = nv04_dfp_mode_set,
567 .detect = NULL,
568};
569
570static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
571 .dpms = nv04_tmds_dpms,
572 .save = nv04_dfp_save,
573 .restore = nv04_dfp_restore,
574 .mode_fixup = nv04_dfp_mode_fixup,
575 .prepare = nv04_dfp_prepare,
576 .commit = nv04_dfp_commit,
577 .mode_set = nv04_dfp_mode_set,
578 .detect = NULL,
579};
580
581static const struct drm_encoder_funcs nv04_dfp_funcs = {
582 .destroy = nv04_dfp_destroy,
583};
584
585int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
586{
587 const struct drm_encoder_helper_funcs *helper;
588 struct drm_encoder *encoder;
589 struct nouveau_encoder *nv_encoder = NULL;
590 int type;
591
592 switch (entry->type) {
593 case OUTPUT_TMDS:
594 type = DRM_MODE_ENCODER_TMDS;
595 helper = &nv04_tmds_helper_funcs;
596 break;
597 case OUTPUT_LVDS:
598 type = DRM_MODE_ENCODER_LVDS;
599 helper = &nv04_lvds_helper_funcs;
600 break;
601 default:
602 return -EINVAL;
603 }
604
605 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
606 if (!nv_encoder)
607 return -ENOMEM;
608
609 encoder = to_drm_encoder(nv_encoder);
610
611 nv_encoder->dcb = entry;
612 nv_encoder->or = ffs(entry->or) - 1;
613
614 drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
615 drm_encoder_helper_add(encoder, helper);
616
617 encoder->possible_crtcs = entry->heads;
618 encoder->possible_clones = 0;
619
620 return 0;
621}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 000000000000..b47c757ff48b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,288 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_hw.h"
32#include "nouveau_encoder.h"
33#include "nouveau_connector.h"
34
35#define MULTIPLE_ENCODERS(e) (e & (e - 1))
36
37static void
38nv04_display_store_initial_head_owner(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 if (dev_priv->chipset != 0x11) {
43 dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
44 goto ownerknown;
45 }
46
47 /* reading CR44 is broken on nv11, so we attempt to infer it */
48 if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
49 dev_priv->crtc_owner = 0x4;
50 else {
51 uint8_t slaved_on_A, slaved_on_B;
52 bool tvA = false;
53 bool tvB = false;
54
55 NVLockVgaCrtcs(dev, false);
56
57 slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
58 0x80;
59 if (slaved_on_B)
60 tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
61 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
62
63 slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
64 0x80;
65 if (slaved_on_A)
66 tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
67 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
68
69 NVLockVgaCrtcs(dev, true);
70
71 if (slaved_on_A && !tvA)
72 dev_priv->crtc_owner = 0x0;
73 else if (slaved_on_B && !tvB)
74 dev_priv->crtc_owner = 0x3;
75 else if (slaved_on_A)
76 dev_priv->crtc_owner = 0x0;
77 else if (slaved_on_B)
78 dev_priv->crtc_owner = 0x3;
79 else
80 dev_priv->crtc_owner = 0x0;
81 }
82
83ownerknown:
84 NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
85
86 /* we need to ensure the heads are not tied henceforth, or reading any
87 * 8 bit reg on head B will fail
88 * setting a single arbitrary head solves that */
89 NVSetOwner(dev, 0);
90}
91
92int
93nv04_display_create(struct drm_device *dev)
94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct parsed_dcb *dcb = dev_priv->vbios->dcb;
97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc;
99 uint16_t connector[16] = { 0 };
100 int i, ret;
101
102 NV_DEBUG(dev, "\n");
103
104 if (nv_two_heads(dev))
105 nv04_display_store_initial_head_owner(dev);
106
107 drm_mode_config_init(dev);
108 drm_mode_create_scaling_mode_property(dev);
109 drm_mode_create_dithering_property(dev);
110
111 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
112
113 dev->mode_config.min_width = 0;
114 dev->mode_config.min_height = 0;
115 switch (dev_priv->card_type) {
116 case NV_04:
117 dev->mode_config.max_width = 2048;
118 dev->mode_config.max_height = 2048;
119 break;
120 default:
121 dev->mode_config.max_width = 4096;
122 dev->mode_config.max_height = 4096;
123 break;
124 }
125
126 dev->mode_config.fb_base = dev_priv->fb_phys;
127
128 nv04_crtc_create(dev, 0);
129 if (nv_two_heads(dev))
130 nv04_crtc_create(dev, 1);
131
132 for (i = 0; i < dcb->entries; i++) {
133 struct dcb_entry *dcbent = &dcb->entry[i];
134
135 switch (dcbent->type) {
136 case OUTPUT_ANALOG:
137 ret = nv04_dac_create(dev, dcbent);
138 break;
139 case OUTPUT_LVDS:
140 case OUTPUT_TMDS:
141 ret = nv04_dfp_create(dev, dcbent);
142 break;
143 case OUTPUT_TV:
144 if (dcbent->location == DCB_LOC_ON_CHIP)
145 ret = nv17_tv_create(dev, dcbent);
146 else
147 ret = nv04_tv_create(dev, dcbent);
148 break;
149 default:
150 NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
151 continue;
152 }
153
154 if (ret)
155 continue;
156
157 connector[dcbent->connector] |= (1 << dcbent->type);
158 }
159
160 for (i = 0; i < dcb->entries; i++) {
161 struct dcb_entry *dcbent = &dcb->entry[i];
162 uint16_t encoders;
163 int type;
164
165 encoders = connector[dcbent->connector];
166 if (!(encoders & (1 << dcbent->type)))
167 continue;
168 connector[dcbent->connector] = 0;
169
170 switch (dcbent->type) {
171 case OUTPUT_ANALOG:
172 if (!MULTIPLE_ENCODERS(encoders))
173 type = DRM_MODE_CONNECTOR_VGA;
174 else
175 type = DRM_MODE_CONNECTOR_DVII;
176 break;
177 case OUTPUT_TMDS:
178 if (!MULTIPLE_ENCODERS(encoders))
179 type = DRM_MODE_CONNECTOR_DVID;
180 else
181 type = DRM_MODE_CONNECTOR_DVII;
182 break;
183 case OUTPUT_LVDS:
184 type = DRM_MODE_CONNECTOR_LVDS;
185#if 0
186 /* don't create i2c adapter when lvds ddc not allowed */
187 if (dcbent->lvdsconf.use_straps_for_mode ||
188 dev_priv->vbios->fp_no_ddc)
189 i2c_index = 0xf;
190#endif
191 break;
192 case OUTPUT_TV:
193 type = DRM_MODE_CONNECTOR_TV;
194 break;
195 default:
196 type = DRM_MODE_CONNECTOR_Unknown;
197 continue;
198 }
199
200 nouveau_connector_create(dev, dcbent->connector, type);
201 }
202
203 /* Save previous state */
204 NVLockVgaCrtcs(dev, false);
205
206 nouveau_hw_save_vga_fonts(dev, 1);
207
208 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
209 crtc->funcs->save(crtc);
210
211 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
212 struct drm_encoder_helper_funcs *func = encoder->helper_private;
213
214 func->save(encoder);
215 }
216
217 return 0;
218}
219
220void
221nv04_display_destroy(struct drm_device *dev)
222{
223 struct drm_encoder *encoder;
224 struct drm_crtc *crtc;
225
226 NV_DEBUG(dev, "\n");
227
228 /* Turn every CRTC off. */
229 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
230 struct drm_mode_set modeset = {
231 .crtc = crtc,
232 };
233
234 crtc->funcs->set_config(&modeset);
235 }
236
237 /* Restore state */
238 NVLockVgaCrtcs(dev, false);
239
240 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
241 struct drm_encoder_helper_funcs *func = encoder->helper_private;
242
243 func->restore(encoder);
244 }
245
246 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
247 crtc->funcs->restore(crtc);
248
249 nouveau_hw_save_vga_fonts(dev, 0);
250
251 drm_mode_config_cleanup(dev);
252}
253
254void
255nv04_display_restore(struct drm_device *dev)
256{
257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct drm_encoder *encoder;
259 struct drm_crtc *crtc;
260
261 NVLockVgaCrtcs(dev, false);
262
263 /* meh.. modeset apparently doesn't setup all the regs and depends
264 * on pre-existing state, for now load the state of the card *before*
265 * nouveau was loaded, and then do a modeset.
266 *
267 * best thing to do probably is to make save/restore routines not
268 * save/restore "pre-load" state, but more general so we can save
269 * on suspend too.
270 */
271 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
272 struct drm_encoder_helper_funcs *func = encoder->helper_private;
273
274 func->restore(encoder);
275 }
276
277 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
278 crtc->funcs->restore(crtc);
279
280 if (nv_two_heads(dev)) {
281 NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
282 dev_priv->crtc_owner);
283 NVSetOwner(dev, dev_priv->crtc_owner);
284 }
285
286 NVLockVgaCrtcs(dev, true);
287}
288
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 000000000000..638cf601c427
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_fb_init(struct drm_device *dev)
8{
9 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
10 * nvidia reading PFB_CFG_0, then writing back its original value.
11 * (which was 0x701114 in this case)
12 */
13
14 nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
15 return 0;
16}
17
18void
19nv04_fb_takedown(struct drm_device *dev)
20{
21}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 000000000000..09a31071ee58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright 2009 Ben Skeggs
3 * Copyright 2008 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h"
29
30static void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{
33 struct nouveau_fbcon_par *par = info->par;
34 struct drm_device *dev = par->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_channel *chan = dev_priv->channel;
37
38 if (info->state != FBINFO_STATE_RUNNING)
39 return;
40
41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
42 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
43 info->flags |= FBINFO_HWACCEL_DISABLED;
44 }
45
46 if (info->flags & FBINFO_HWACCEL_DISABLED) {
47 cfb_copyarea(info, region);
48 return;
49 }
50
51 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
52 OUT_RING(chan, (region->sy << 16) | region->sx);
53 OUT_RING(chan, (region->dy << 16) | region->dx);
54 OUT_RING(chan, (region->height << 16) | region->width);
55 FIRE_RING(chan);
56}
57
58static void
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{
61 struct nouveau_fbcon_par *par = info->par;
62 struct drm_device *dev = par->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel;
65 uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
66
67 if (info->state != FBINFO_STATE_RUNNING)
68 return;
69
70 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
71 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
72 info->flags |= FBINFO_HWACCEL_DISABLED;
73 }
74
75 if (info->flags & FBINFO_HWACCEL_DISABLED) {
76 cfb_fillrect(info, rect);
77 return;
78 }
79
80 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
81 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
82 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
83 OUT_RING(chan, color);
84 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
85 OUT_RING(chan, (rect->dx << 16) | rect->dy);
86 OUT_RING(chan, (rect->width << 16) | rect->height);
87 FIRE_RING(chan);
88}
89
90static void
91nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92{
93 struct nouveau_fbcon_par *par = info->par;
94 struct drm_device *dev = par->dev;
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct nouveau_channel *chan = dev_priv->channel;
97 uint32_t fg;
98 uint32_t bg;
99 uint32_t dsize;
100 uint32_t width;
101 uint32_t *data = (uint32_t *)image->data;
102
103 if (info->state != FBINFO_STATE_RUNNING)
104 return;
105
106 if (image->depth != 1) {
107 cfb_imageblit(info, image);
108 return;
109 }
110
111 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
112 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
113 info->flags |= FBINFO_HWACCEL_DISABLED;
114 }
115
116 if (info->flags & FBINFO_HWACCEL_DISABLED) {
117 cfb_imageblit(info, image);
118 return;
119 }
120
121 width = (image->width + 31) & ~31;
122 dsize = (width * image->height) >> 5;
123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
126 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
127 bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
128 } else {
129 fg = image->fg_color;
130 bg = image->bg_color;
131 }
132
133 BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
134 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
135 OUT_RING(chan, ((image->dy + image->height) << 16) |
136 ((image->dx + image->width) & 0xffff));
137 OUT_RING(chan, bg);
138 OUT_RING(chan, fg);
139 OUT_RING(chan, (image->height << 16) | image->width);
140 OUT_RING(chan, (image->height << 16) | width);
141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
142
143 while (dsize) {
144 int iter_len = dsize > 128 ? 128 : dsize;
145
146 if (RING_SPACE(chan, iter_len + 1)) {
147 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
148 info->flags |= FBINFO_HWACCEL_DISABLED;
149 cfb_imageblit(info, image);
150 return;
151 }
152
153 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
154 OUT_RINGp(chan, data, iter_len);
155 data += iter_len;
156 dsize -= iter_len;
157 }
158
159 FIRE_RING(chan);
160}
161
162static int
163nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_gpuobj *obj = NULL;
167 int ret;
168
169 ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
170 if (ret)
171 return ret;
172
173 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
174 if (ret)
175 return ret;
176
177 return 0;
178}
179
180int
181nv04_fbcon_accel_init(struct fb_info *info)
182{
183 struct nouveau_fbcon_par *par = info->par;
184 struct drm_device *dev = par->dev;
185 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_channel *chan = dev_priv->channel;
187 int surface_fmt, pattern_fmt, rect_fmt;
188 int ret;
189
190 switch (info->var.bits_per_pixel) {
191 case 8:
192 surface_fmt = 1;
193 pattern_fmt = 3;
194 rect_fmt = 3;
195 break;
196 case 16:
197 surface_fmt = 4;
198 pattern_fmt = 1;
199 rect_fmt = 1;
200 break;
201 case 32:
202 switch (info->var.transp.length) {
203 case 0: /* depth 24 */
204 case 8: /* depth 32 */
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 surface_fmt = 6;
211 pattern_fmt = 3;
212 rect_fmt = 3;
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
219 0x0062 : 0x0042, NvCtxSurf2D);
220 if (ret)
221 return ret;
222
223 ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
224 if (ret)
225 return ret;
226
227 ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
228 if (ret)
229 return ret;
230
231 ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
232 if (ret)
233 return ret;
234
235 ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
236 if (ret)
237 return ret;
238
239 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
240 0x009f : 0x005f, NvImageBlit);
241 if (ret)
242 return ret;
243
244 if (RING_SPACE(chan, 49)) {
245 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
246 info->flags |= FBINFO_HWACCEL_DISABLED;
247 return 0;
248 }
249
250 BEGIN_RING(chan, 1, 0x0000, 1);
251 OUT_RING(chan, NvCtxSurf2D);
252 BEGIN_RING(chan, 1, 0x0184, 2);
253 OUT_RING(chan, NvDmaFB);
254 OUT_RING(chan, NvDmaFB);
255 BEGIN_RING(chan, 1, 0x0300, 4);
256 OUT_RING(chan, surface_fmt);
257 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
259 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
260
261 BEGIN_RING(chan, 1, 0x0000, 1);
262 OUT_RING(chan, NvRop);
263 BEGIN_RING(chan, 1, 0x0300, 1);
264 OUT_RING(chan, 0x55);
265
266 BEGIN_RING(chan, 1, 0x0000, 1);
267 OUT_RING(chan, NvImagePatt);
268 BEGIN_RING(chan, 1, 0x0300, 8);
269 OUT_RING(chan, pattern_fmt);
270#ifdef __BIG_ENDIAN
271 OUT_RING(chan, 2);
272#else
273 OUT_RING(chan, 1);
274#endif
275 OUT_RING(chan, 0);
276 OUT_RING(chan, 1);
277 OUT_RING(chan, ~0);
278 OUT_RING(chan, ~0);
279 OUT_RING(chan, ~0);
280 OUT_RING(chan, ~0);
281
282 BEGIN_RING(chan, 1, 0x0000, 1);
283 OUT_RING(chan, NvClipRect);
284 BEGIN_RING(chan, 1, 0x0300, 2);
285 OUT_RING(chan, 0);
286 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
287
288 BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
289 OUT_RING(chan, NvImageBlit);
290 BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
291 OUT_RING(chan, NvCtxSurf2D);
292 BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
293 OUT_RING(chan, 3);
294
295 BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
296 OUT_RING(chan, NvGdiRect);
297 BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
298 OUT_RING(chan, NvCtxSurf2D);
299 BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
300 OUT_RING(chan, NvImagePatt);
301 OUT_RING(chan, NvRop);
302 BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
303 OUT_RING(chan, 1);
304 BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
305 OUT_RING(chan, rect_fmt);
306 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
307 OUT_RING(chan, 3);
308
309 FIRE_RING(chan);
310
311 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
312 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
313 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
314 return 0;
315}
316
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 000000000000..0c3cd53c7313
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
32#define NV04_RAMFC__SIZE 32
33#define NV04_RAMFC_DMA_PUT 0x00
34#define NV04_RAMFC_DMA_GET 0x04
35#define NV04_RAMFC_DMA_INSTANCE 0x08
36#define NV04_RAMFC_DMA_STATE 0x0C
37#define NV04_RAMFC_DMA_FETCH 0x10
38#define NV04_RAMFC_ENGINE 0x14
39#define NV04_RAMFC_PULL1_ENGINE 0x18
40
41#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
42 NV04_RAMFC_##offset/4, (val))
43#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
44 NV04_RAMFC_##offset/4)
45
46void
47nv04_fifo_disable(struct drm_device *dev)
48{
49 uint32_t tmp;
50
51 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
52 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
53 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
54 tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
55 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
56}
57
58void
59nv04_fifo_enable(struct drm_device *dev)
60{
61 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
62 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
63}
64
65bool
66nv04_fifo_reassign(struct drm_device *dev, bool enable)
67{
68 uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
69
70 nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
71 return (reassign == 1);
72}
73
74int
75nv04_fifo_channel_id(struct drm_device *dev)
76{
77 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
78 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
79}
80
81int
82nv04_fifo_create_context(struct nouveau_channel *chan)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 int ret;
87
88 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
89 NV04_RAMFC__SIZE,
90 NVOBJ_FLAG_ZERO_ALLOC |
91 NVOBJ_FLAG_ZERO_FREE,
92 NULL, &chan->ramfc);
93 if (ret)
94 return ret;
95
96 /* Setup initial state */
97 dev_priv->engine.instmem.prepare_access(dev, true);
98 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
99 RAMFC_WR(DMA_GET, chan->pushbuf_base);
100 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
101 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
103 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
104#ifdef __BIG_ENDIAN
105 NV_PFIFO_CACHE1_BIG_ENDIAN |
106#endif
107 0));
108 dev_priv->engine.instmem.finish_access(dev);
109
110 /* enable the fifo dma operation */
111 nv_wr32(dev, NV04_PFIFO_MODE,
112 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
113 return 0;
114}
115
116void
117nv04_fifo_destroy_context(struct nouveau_channel *chan)
118{
119 struct drm_device *dev = chan->dev;
120
121 nv_wr32(dev, NV04_PFIFO_MODE,
122 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
123
124 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
125}
126
127static void
128nv04_fifo_do_load_context(struct drm_device *dev, int chid)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 uint32_t fc = NV04_RAMFC(chid), tmp;
132
133 dev_priv->engine.instmem.prepare_access(dev, false);
134
135 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
136 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
137 tmp = nv_ri32(dev, fc + 8);
138 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
139 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
140 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
141 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
142 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
143 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
144
145 dev_priv->engine.instmem.finish_access(dev);
146
147 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
148 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
149}
150
151int
152nv04_fifo_load_context(struct nouveau_channel *chan)
153{
154 uint32_t tmp;
155
156 nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
157 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
158 nv04_fifo_do_load_context(chan->dev, chan->id);
159 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
160
161 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
162 tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
163 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
164
165 return 0;
166}
167
168int
169nv04_fifo_unload_context(struct drm_device *dev)
170{
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
173 struct nouveau_channel *chan = NULL;
174 uint32_t tmp;
175 int chid;
176
177 chid = pfifo->channel_id(dev);
178 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
179 return 0;
180
181 chan = dev_priv->fifos[chid];
182 if (!chan) {
183 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
184 return -EINVAL;
185 }
186
187 dev_priv->engine.instmem.prepare_access(dev, true);
188 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
189 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
190 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
191 tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
192 RAMFC_WR(DMA_INSTANCE, tmp);
193 RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
194 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
195 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
196 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
197 dev_priv->engine.instmem.finish_access(dev);
198
199 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
200 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
201 return 0;
202}
203
204static void
205nv04_fifo_init_reset(struct drm_device *dev)
206{
207 nv_wr32(dev, NV03_PMC_ENABLE,
208 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
209 nv_wr32(dev, NV03_PMC_ENABLE,
210 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
211
212 nv_wr32(dev, 0x003224, 0x000f0078);
213 nv_wr32(dev, 0x002044, 0x0101ffff);
214 nv_wr32(dev, 0x002040, 0x000000ff);
215 nv_wr32(dev, 0x002500, 0x00000000);
216 nv_wr32(dev, 0x003000, 0x00000000);
217 nv_wr32(dev, 0x003050, 0x00000000);
218 nv_wr32(dev, 0x003200, 0x00000000);
219 nv_wr32(dev, 0x003250, 0x00000000);
220 nv_wr32(dev, 0x003220, 0x00000000);
221
222 nv_wr32(dev, 0x003250, 0x00000000);
223 nv_wr32(dev, 0x003270, 0x00000000);
224 nv_wr32(dev, 0x003210, 0x00000000);
225}
226
227static void
228nv04_fifo_init_ramxx(struct drm_device *dev)
229{
230 struct drm_nouveau_private *dev_priv = dev->dev_private;
231
232 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
233 ((dev_priv->ramht_bits - 9) << 16) |
234 (dev_priv->ramht_offset >> 8));
235 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
236 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
237}
238
239static void
240nv04_fifo_init_intr(struct drm_device *dev)
241{
242 nv_wr32(dev, 0x002100, 0xffffffff);
243 nv_wr32(dev, 0x002140, 0xffffffff);
244}
245
246int
247nv04_fifo_init(struct drm_device *dev)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
251 int i;
252
253 nv04_fifo_init_reset(dev);
254 nv04_fifo_init_ramxx(dev);
255
256 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
257 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
258
259 nv04_fifo_init_intr(dev);
260 pfifo->enable(dev);
261
262 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
263 if (dev_priv->fifos[i]) {
264 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
265 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
266 }
267 }
268
269 return 0;
270}
271
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 000000000000..396ee92118f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,579 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30static uint32_t nv04_graph_ctx_regs[] = {
31 NV04_PGRAPH_CTX_SWITCH1,
32 NV04_PGRAPH_CTX_SWITCH2,
33 NV04_PGRAPH_CTX_SWITCH3,
34 NV04_PGRAPH_CTX_SWITCH4,
35 NV04_PGRAPH_CTX_CACHE1,
36 NV04_PGRAPH_CTX_CACHE2,
37 NV04_PGRAPH_CTX_CACHE3,
38 NV04_PGRAPH_CTX_CACHE4,
39 0x00400184,
40 0x004001a4,
41 0x004001c4,
42 0x004001e4,
43 0x00400188,
44 0x004001a8,
45 0x004001c8,
46 0x004001e8,
47 0x0040018c,
48 0x004001ac,
49 0x004001cc,
50 0x004001ec,
51 0x00400190,
52 0x004001b0,
53 0x004001d0,
54 0x004001f0,
55 0x00400194,
56 0x004001b4,
57 0x004001d4,
58 0x004001f4,
59 0x00400198,
60 0x004001b8,
61 0x004001d8,
62 0x004001f8,
63 0x0040019c,
64 0x004001bc,
65 0x004001dc,
66 0x004001fc,
67 0x00400174,
68 NV04_PGRAPH_DMA_START_0,
69 NV04_PGRAPH_DMA_START_1,
70 NV04_PGRAPH_DMA_LENGTH,
71 NV04_PGRAPH_DMA_MISC,
72 NV04_PGRAPH_DMA_PITCH,
73 NV04_PGRAPH_BOFFSET0,
74 NV04_PGRAPH_BBASE0,
75 NV04_PGRAPH_BLIMIT0,
76 NV04_PGRAPH_BOFFSET1,
77 NV04_PGRAPH_BBASE1,
78 NV04_PGRAPH_BLIMIT1,
79 NV04_PGRAPH_BOFFSET2,
80 NV04_PGRAPH_BBASE2,
81 NV04_PGRAPH_BLIMIT2,
82 NV04_PGRAPH_BOFFSET3,
83 NV04_PGRAPH_BBASE3,
84 NV04_PGRAPH_BLIMIT3,
85 NV04_PGRAPH_BOFFSET4,
86 NV04_PGRAPH_BBASE4,
87 NV04_PGRAPH_BLIMIT4,
88 NV04_PGRAPH_BOFFSET5,
89 NV04_PGRAPH_BBASE5,
90 NV04_PGRAPH_BLIMIT5,
91 NV04_PGRAPH_BPITCH0,
92 NV04_PGRAPH_BPITCH1,
93 NV04_PGRAPH_BPITCH2,
94 NV04_PGRAPH_BPITCH3,
95 NV04_PGRAPH_BPITCH4,
96 NV04_PGRAPH_SURFACE,
97 NV04_PGRAPH_STATE,
98 NV04_PGRAPH_BSWIZZLE2,
99 NV04_PGRAPH_BSWIZZLE5,
100 NV04_PGRAPH_BPIXEL,
101 NV04_PGRAPH_NOTIFY,
102 NV04_PGRAPH_PATT_COLOR0,
103 NV04_PGRAPH_PATT_COLOR1,
104 NV04_PGRAPH_PATT_COLORRAM+0x00,
105 NV04_PGRAPH_PATT_COLORRAM+0x01,
106 NV04_PGRAPH_PATT_COLORRAM+0x02,
107 NV04_PGRAPH_PATT_COLORRAM+0x03,
108 NV04_PGRAPH_PATT_COLORRAM+0x04,
109 NV04_PGRAPH_PATT_COLORRAM+0x05,
110 NV04_PGRAPH_PATT_COLORRAM+0x06,
111 NV04_PGRAPH_PATT_COLORRAM+0x07,
112 NV04_PGRAPH_PATT_COLORRAM+0x08,
113 NV04_PGRAPH_PATT_COLORRAM+0x09,
114 NV04_PGRAPH_PATT_COLORRAM+0x0A,
115 NV04_PGRAPH_PATT_COLORRAM+0x0B,
116 NV04_PGRAPH_PATT_COLORRAM+0x0C,
117 NV04_PGRAPH_PATT_COLORRAM+0x0D,
118 NV04_PGRAPH_PATT_COLORRAM+0x0E,
119 NV04_PGRAPH_PATT_COLORRAM+0x0F,
120 NV04_PGRAPH_PATT_COLORRAM+0x10,
121 NV04_PGRAPH_PATT_COLORRAM+0x11,
122 NV04_PGRAPH_PATT_COLORRAM+0x12,
123 NV04_PGRAPH_PATT_COLORRAM+0x13,
124 NV04_PGRAPH_PATT_COLORRAM+0x14,
125 NV04_PGRAPH_PATT_COLORRAM+0x15,
126 NV04_PGRAPH_PATT_COLORRAM+0x16,
127 NV04_PGRAPH_PATT_COLORRAM+0x17,
128 NV04_PGRAPH_PATT_COLORRAM+0x18,
129 NV04_PGRAPH_PATT_COLORRAM+0x19,
130 NV04_PGRAPH_PATT_COLORRAM+0x1A,
131 NV04_PGRAPH_PATT_COLORRAM+0x1B,
132 NV04_PGRAPH_PATT_COLORRAM+0x1C,
133 NV04_PGRAPH_PATT_COLORRAM+0x1D,
134 NV04_PGRAPH_PATT_COLORRAM+0x1E,
135 NV04_PGRAPH_PATT_COLORRAM+0x1F,
136 NV04_PGRAPH_PATT_COLORRAM+0x20,
137 NV04_PGRAPH_PATT_COLORRAM+0x21,
138 NV04_PGRAPH_PATT_COLORRAM+0x22,
139 NV04_PGRAPH_PATT_COLORRAM+0x23,
140 NV04_PGRAPH_PATT_COLORRAM+0x24,
141 NV04_PGRAPH_PATT_COLORRAM+0x25,
142 NV04_PGRAPH_PATT_COLORRAM+0x26,
143 NV04_PGRAPH_PATT_COLORRAM+0x27,
144 NV04_PGRAPH_PATT_COLORRAM+0x28,
145 NV04_PGRAPH_PATT_COLORRAM+0x29,
146 NV04_PGRAPH_PATT_COLORRAM+0x2A,
147 NV04_PGRAPH_PATT_COLORRAM+0x2B,
148 NV04_PGRAPH_PATT_COLORRAM+0x2C,
149 NV04_PGRAPH_PATT_COLORRAM+0x2D,
150 NV04_PGRAPH_PATT_COLORRAM+0x2E,
151 NV04_PGRAPH_PATT_COLORRAM+0x2F,
152 NV04_PGRAPH_PATT_COLORRAM+0x30,
153 NV04_PGRAPH_PATT_COLORRAM+0x31,
154 NV04_PGRAPH_PATT_COLORRAM+0x32,
155 NV04_PGRAPH_PATT_COLORRAM+0x33,
156 NV04_PGRAPH_PATT_COLORRAM+0x34,
157 NV04_PGRAPH_PATT_COLORRAM+0x35,
158 NV04_PGRAPH_PATT_COLORRAM+0x36,
159 NV04_PGRAPH_PATT_COLORRAM+0x37,
160 NV04_PGRAPH_PATT_COLORRAM+0x38,
161 NV04_PGRAPH_PATT_COLORRAM+0x39,
162 NV04_PGRAPH_PATT_COLORRAM+0x3A,
163 NV04_PGRAPH_PATT_COLORRAM+0x3B,
164 NV04_PGRAPH_PATT_COLORRAM+0x3C,
165 NV04_PGRAPH_PATT_COLORRAM+0x3D,
166 NV04_PGRAPH_PATT_COLORRAM+0x3E,
167 NV04_PGRAPH_PATT_COLORRAM+0x3F,
168 NV04_PGRAPH_PATTERN,
169 0x0040080c,
170 NV04_PGRAPH_PATTERN_SHAPE,
171 0x00400600,
172 NV04_PGRAPH_ROP3,
173 NV04_PGRAPH_CHROMA,
174 NV04_PGRAPH_BETA_AND,
175 NV04_PGRAPH_BETA_PREMULT,
176 NV04_PGRAPH_CONTROL0,
177 NV04_PGRAPH_CONTROL1,
178 NV04_PGRAPH_CONTROL2,
179 NV04_PGRAPH_BLEND,
180 NV04_PGRAPH_STORED_FMT,
181 NV04_PGRAPH_SOURCE_COLOR,
182 0x00400560,
183 0x00400568,
184 0x00400564,
185 0x0040056c,
186 0x00400400,
187 0x00400480,
188 0x00400404,
189 0x00400484,
190 0x00400408,
191 0x00400488,
192 0x0040040c,
193 0x0040048c,
194 0x00400410,
195 0x00400490,
196 0x00400414,
197 0x00400494,
198 0x00400418,
199 0x00400498,
200 0x0040041c,
201 0x0040049c,
202 0x00400420,
203 0x004004a0,
204 0x00400424,
205 0x004004a4,
206 0x00400428,
207 0x004004a8,
208 0x0040042c,
209 0x004004ac,
210 0x00400430,
211 0x004004b0,
212 0x00400434,
213 0x004004b4,
214 0x00400438,
215 0x004004b8,
216 0x0040043c,
217 0x004004bc,
218 0x00400440,
219 0x004004c0,
220 0x00400444,
221 0x004004c4,
222 0x00400448,
223 0x004004c8,
224 0x0040044c,
225 0x004004cc,
226 0x00400450,
227 0x004004d0,
228 0x00400454,
229 0x004004d4,
230 0x00400458,
231 0x004004d8,
232 0x0040045c,
233 0x004004dc,
234 0x00400460,
235 0x004004e0,
236 0x00400464,
237 0x004004e4,
238 0x00400468,
239 0x004004e8,
240 0x0040046c,
241 0x004004ec,
242 0x00400470,
243 0x004004f0,
244 0x00400474,
245 0x004004f4,
246 0x00400478,
247 0x004004f8,
248 0x0040047c,
249 0x004004fc,
250 0x0040053c,
251 0x00400544,
252 0x00400540,
253 0x00400548,
254 0x00400560,
255 0x00400568,
256 0x00400564,
257 0x0040056c,
258 0x00400534,
259 0x00400538,
260 0x00400514,
261 0x00400518,
262 0x0040051c,
263 0x00400520,
264 0x00400524,
265 0x00400528,
266 0x0040052c,
267 0x00400530,
268 0x00400d00,
269 0x00400d40,
270 0x00400d80,
271 0x00400d04,
272 0x00400d44,
273 0x00400d84,
274 0x00400d08,
275 0x00400d48,
276 0x00400d88,
277 0x00400d0c,
278 0x00400d4c,
279 0x00400d8c,
280 0x00400d10,
281 0x00400d50,
282 0x00400d90,
283 0x00400d14,
284 0x00400d54,
285 0x00400d94,
286 0x00400d18,
287 0x00400d58,
288 0x00400d98,
289 0x00400d1c,
290 0x00400d5c,
291 0x00400d9c,
292 0x00400d20,
293 0x00400d60,
294 0x00400da0,
295 0x00400d24,
296 0x00400d64,
297 0x00400da4,
298 0x00400d28,
299 0x00400d68,
300 0x00400da8,
301 0x00400d2c,
302 0x00400d6c,
303 0x00400dac,
304 0x00400d30,
305 0x00400d70,
306 0x00400db0,
307 0x00400d34,
308 0x00400d74,
309 0x00400db4,
310 0x00400d38,
311 0x00400d78,
312 0x00400db8,
313 0x00400d3c,
314 0x00400d7c,
315 0x00400dbc,
316 0x00400590,
317 0x00400594,
318 0x00400598,
319 0x0040059c,
320 0x004005a8,
321 0x004005ac,
322 0x004005b0,
323 0x004005b4,
324 0x004005c0,
325 0x004005c4,
326 0x004005c8,
327 0x004005cc,
328 0x004005d0,
329 0x004005d4,
330 0x004005d8,
331 0x004005dc,
332 0x004005e0,
333 NV04_PGRAPH_PASSTHRU_0,
334 NV04_PGRAPH_PASSTHRU_1,
335 NV04_PGRAPH_PASSTHRU_2,
336 NV04_PGRAPH_DVD_COLORFMT,
337 NV04_PGRAPH_SCALED_FORMAT,
338 NV04_PGRAPH_MISC24_0,
339 NV04_PGRAPH_MISC24_1,
340 NV04_PGRAPH_MISC24_2,
341 0x00400500,
342 0x00400504,
343 NV04_PGRAPH_VALID1,
344 NV04_PGRAPH_VALID2
345
346
347};
348
349struct graph_state {
350 int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
351};
352
353struct nouveau_channel *
354nv04_graph_channel(struct drm_device *dev)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 int chid = dev_priv->engine.fifo.channels;
358
359 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
360 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
361
362 if (chid >= dev_priv->engine.fifo.channels)
363 return NULL;
364
365 return dev_priv->fifos[chid];
366}
367
368void
369nv04_graph_context_switch(struct drm_device *dev)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
373 struct nouveau_channel *chan = NULL;
374 int chid;
375
376 pgraph->fifo_access(dev, false);
377 nouveau_wait_for_idle(dev);
378
379 /* If previous context is valid, we need to save it */
380 pgraph->unload_context(dev);
381
382 /* Load context for next channel */
383 chid = dev_priv->engine.fifo.channel_id(dev);
384 chan = dev_priv->fifos[chid];
385 if (chan)
386 nv04_graph_load_context(chan);
387
388 pgraph->fifo_access(dev, true);
389}
390
391int nv04_graph_create_context(struct nouveau_channel *chan)
392{
393 struct graph_state *pgraph_ctx;
394 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
395
396 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
397 GFP_KERNEL);
398 if (pgraph_ctx == NULL)
399 return -ENOMEM;
400
401 /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
402 pgraph_ctx->nv04[0] = 0x0001ffff;
403 /* is it really needed ??? */
404#if 0
405 dev_priv->fifos[channel].pgraph_ctx[1] =
406 nv_rd32(dev, NV_PGRAPH_DEBUG_4);
407 dev_priv->fifos[channel].pgraph_ctx[2] =
408 nv_rd32(dev, 0x004006b0);
409#endif
410 return 0;
411}
412
413void nv04_graph_destroy_context(struct nouveau_channel *chan)
414{
415 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
416
417 kfree(pgraph_ctx);
418 chan->pgraph_ctx = NULL;
419}
420
421int nv04_graph_load_context(struct nouveau_channel *chan)
422{
423 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 uint32_t tmp;
426 int i;
427
428 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
430
431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
432 nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
433 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
434 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
435 return 0;
436}
437
438int
439nv04_graph_unload_context(struct drm_device *dev)
440{
441 struct drm_nouveau_private *dev_priv = dev->dev_private;
442 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
443 struct nouveau_channel *chan = NULL;
444 struct graph_state *ctx;
445 uint32_t tmp;
446 int i;
447
448 chan = pgraph->channel(dev);
449 if (!chan)
450 return 0;
451 ctx = chan->pgraph_ctx;
452
453 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
454 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
455
456 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
457 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
458 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
459 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
460 return 0;
461}
462
463int nv04_graph_init(struct drm_device *dev)
464{
465 struct drm_nouveau_private *dev_priv = dev->dev_private;
466 uint32_t tmp;
467
468 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
469 ~NV_PMC_ENABLE_PGRAPH);
470 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
471 NV_PMC_ENABLE_PGRAPH);
472
473 /* Enable PGRAPH interrupts */
474 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
475 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
476
477 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
478 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
479 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
480 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
481 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
482 /*1231C000 blob, 001 haiku*/
483 //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
484 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
485 /*0x72111100 blob , 01 haiku*/
486 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
487 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
488 /*haiku same*/
489
490 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
491 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
492 /*haiku and blob 10d4*/
493
494 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
495 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
496 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
497 tmp |= dev_priv->engine.fifo.channels << 24;
498 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
499
500 /* These don't belong here, they're part of a per-channel context */
501 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
502 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
503
504 return 0;
505}
506
507void nv04_graph_takedown(struct drm_device *dev)
508{
509}
510
511void
512nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
513{
514 if (enabled)
515 nv_wr32(dev, NV04_PGRAPH_FIFO,
516 nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
517 else
518 nv_wr32(dev, NV04_PGRAPH_FIFO,
519 nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
520}
521
522static int
523nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
524 int mthd, uint32_t data)
525{
526 chan->fence.last_sequence_irq = data;
527 nouveau_fence_handler(chan->dev, chan->id);
528 return 0;
529}
530
531static int
532nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
533 int mthd, uint32_t data)
534{
535 struct drm_device *dev = chan->dev;
536 uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
537 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
538 uint32_t tmp;
539
540 tmp = nv_ri32(dev, instance);
541 tmp &= ~0x00038000;
542 tmp |= ((data & 7) << 15);
543
544 nv_wi32(dev, instance, tmp);
545 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
546 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
547 return 0;
548}
549
550static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
551 { 0x0150, nv04_graph_mthd_set_ref },
552 {}
553};
554
555static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
556 { 0x02fc, nv04_graph_mthd_set_operation },
557 {},
558};
559
560struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
561 { 0x0039, false, nv04_graph_mthds_m2mf },
562 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
563 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
564 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
565 { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
566 { 0x0030, false, NULL }, /* null */
567 { 0x0042, false, NULL }, /* surf2d */
568 { 0x0043, false, NULL }, /* rop */
569 { 0x0012, false, NULL }, /* beta1 */
570 { 0x0072, false, NULL }, /* beta4 */
571 { 0x0019, false, NULL }, /* cliprect */
572 { 0x0044, false, NULL }, /* pattern */
573 { 0x0052, false, NULL }, /* swzsurf */
574 { 0x0053, false, NULL }, /* surf3d */
575 { 0x0054, false, NULL }, /* tex_tri */
576 { 0x0055, false, NULL }, /* multitex_tri */
577 {}
578};
579
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 000000000000..a20c206625a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4
5/* returns the size of fifo context */
6static int
7nouveau_fifo_ctx_size(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10
11 if (dev_priv->chipset >= 0x40)
12 return 128;
13 else
14 if (dev_priv->chipset >= 0x17)
15 return 64;
16
17 return 32;
18}
19
20static void
21nv04_instmem_determine_amount(struct drm_device *dev)
22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 int i;
25
26 /* Figure out how much instance memory we need */
27 if (dev_priv->card_type >= NV_40) {
28 /* We'll want more instance memory than this on some NV4x cards.
29 * There's a 16MB aperture to play with that maps onto the end
30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires.
32 */
33 switch (dev_priv->chipset & 0xf0) {
34 case 0x40:
35 case 0x47:
36 case 0x49:
37 case 0x4b:
38 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
39 break;
40 default:
41 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
42 break;
43 }
44 } else {
45 /*XXX: what *are* the limits on <NV40 cards?
46 */
47 dev_priv->ramin_rsvd_vram = (512 * 1024);
48 }
49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
50
51 /* Clear all of it, except the BIOS image that's in the first 64KiB */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
54 nv_wi32(dev, i, 0x00000000);
55 dev_priv->engine.instmem.finish_access(dev);
56}
57
58static void
59nv04_instmem_configure_fixed_tables(struct drm_device *dev)
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_engine *engine = &dev_priv->engine;
63
64 /* FIFO hash table (RAMHT)
65 * use 4k hash table at RAMIN+0x10000
66 * TODO: extend the hash table
67 */
68 dev_priv->ramht_offset = 0x10000;
69 dev_priv->ramht_bits = 9;
70 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
71 dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
72 NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
73 dev_priv->ramht_size);
74
75 /* FIFO runout table (RAMRO) - 512k at 0x11200 */
76 dev_priv->ramro_offset = 0x11200;
77 dev_priv->ramro_size = 512;
78 NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
79 dev_priv->ramro_size);
80
81 /* FIFO context table (RAMFC)
82 * NV40 : Not sure exactly how to position RAMFC on some cards,
83 * 0x30002 seems to position it at RAMIN+0x20000 on these
84 * cards. RAMFC is 4kb (32 fifos, 128byte entries).
85 * Others: Position RAMFC at RAMIN+0x11400
86 */
87 dev_priv->ramfc_size = engine->fifo.channels *
88 nouveau_fifo_ctx_size(dev);
89 switch (dev_priv->card_type) {
90 case NV_40:
91 dev_priv->ramfc_offset = 0x20000;
92 break;
93 case NV_30:
94 case NV_20:
95 case NV_10:
96 case NV_04:
97 default:
98 dev_priv->ramfc_offset = 0x11400;
99 break;
100 }
101 NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
102 dev_priv->ramfc_size);
103}
104
105int nv04_instmem_init(struct drm_device *dev)
106{
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 uint32_t offset;
109 int ret = 0;
110
111 nv04_instmem_determine_amount(dev);
112 nv04_instmem_configure_fixed_tables(dev);
113
114 /* Create a heap to manage RAMIN allocations, we don't allocate
115 * the space that was reserved for RAMHT/FC/RO.
116 */
117 offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
118
119 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
120 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
121 * ("new style" control) the upper 16-bits of 0x2220 points at this
122 * other mysterious table that's clobbering important things.
123 *
124 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
125 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
126 */
127 if (dev_priv->card_type >= NV_40) {
128 if (offset < 0x40000)
129 offset = 0x40000;
130 }
131
132 ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
133 offset, dev_priv->ramin_rsvd_vram - offset);
134 if (ret) {
135 dev_priv->ramin_heap = NULL;
136 NV_ERROR(dev, "Failed to init RAMIN heap\n");
137 }
138
139 return ret;
140}
141
142void
143nv04_instmem_takedown(struct drm_device *dev)
144{
145}
146
147int
148nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
149{
150 if (gpuobj->im_backing)
151 return -EINVAL;
152
153 return 0;
154}
155
156void
157nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160
161 if (gpuobj && gpuobj->im_backing) {
162 if (gpuobj->im_bound)
163 dev_priv->engine.instmem.unbind(dev, gpuobj);
164 gpuobj->im_backing = NULL;
165 }
166}
167
168int
169nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
170{
171 if (!gpuobj->im_pramin || gpuobj->im_bound)
172 return -EINVAL;
173
174 gpuobj->im_bound = 1;
175 return 0;
176}
177
178int
179nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
180{
181 if (gpuobj->im_bound == 0)
182 return -EINVAL;
183
184 gpuobj->im_bound = 0;
185 return 0;
186}
187
188void
189nv04_instmem_prepare_access(struct drm_device *dev, bool write)
190{
191}
192
193void
194nv04_instmem_finish_access(struct drm_device *dev)
195{
196}
197
198int
199nv04_instmem_suspend(struct drm_device *dev)
200{
201 return 0;
202}
203
204void
205nv04_instmem_resume(struct drm_device *dev)
206{
207}
208
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 000000000000..617ed1e05269
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_mc_init(struct drm_device *dev)
8{
9 /* Power up everything, resetting each individual unit will
10 * be done later if needed.
11 */
12
13 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
14 return 0;
15}
16
17void
18nv04_mc_takedown(struct drm_device *dev)
19{
20}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 000000000000..1d09ddd57399
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_timer_init(struct drm_device *dev)
8{
9 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
10 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
11
12 /* Just use the pre-existing values when possible for now; these regs
13 * are not written in nv (driver writer missed a /4 on the address), and
14 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
15 * hardware sequencing microcode.
16 * A correct solution (involving calculations with the GPU PLL) can
17 * be done when kernel modesetting lands
18 */
19 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
20 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
21 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
22 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
23 }
24
25 return 0;
26}
27
28uint64_t
29nv04_timer_read(struct drm_device *dev)
30{
31 uint32_t low;
32 /* From kmmio dumps on nv28 this looks like how the blob does this.
33 * It reads the high dword twice, before and after.
34 * The only explanation seems to be that the 64-bit timer counter
35 * advances between high and low dword reads and may corrupt the
36 * result. Not confirmed.
37 */
38 uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
39 uint32_t high1;
40 do {
41 high1 = high2;
42 low = nv_rd32(dev, NV04_PTIMER_TIME_0);
43 high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
44 } while (high1 != high2);
45 return (((uint64_t)high2) << 32) | (uint64_t)low;
46}
47
48void
49nv04_timer_takedown(struct drm_device *dev)
50{
51}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 000000000000..9c63099e9c42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "drm_crtc_helper.h"
34
35#include "i2c/ch7006.h"
36
37static struct {
38 struct i2c_board_info board_info;
39 struct drm_encoder_funcs funcs;
40 struct drm_encoder_helper_funcs hfuncs;
41 void *params;
42
43} nv04_tv_encoder_info[] = {
44 {
45 .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
46 .params = &(struct ch7006_encoder_params) {
47 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
48 0, 0, 0,
49 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
50 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
51 },
52 },
53};
54
55static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
56{
57 struct i2c_msg msg = {
58 .addr = addr,
59 .len = 0,
60 };
61
62 return i2c_transfer(adapter, &msg, 1) == 1;
63}
64
65int nv04_tv_identify(struct drm_device *dev, int i2c_index)
66{
67 struct nouveau_i2c_chan *i2c;
68 bool was_locked;
69 int i, ret;
70
71 NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
72
73 i2c = nouveau_i2c_find(dev, i2c_index);
74 if (!i2c)
75 return -ENODEV;
76
77 was_locked = NVLockVgaCrtcs(dev, false);
78
79 for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
80 if (probe_i2c_addr(&i2c->adapter,
81 nv04_tv_encoder_info[i].board_info.addr)) {
82 ret = i;
83 break;
84 }
85 }
86
87 if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
88 NV_TRACE(dev, "Detected TV encoder: %s\n",
89 nv04_tv_encoder_info[i].board_info.type);
90
91 } else {
92 NV_TRACE(dev, "No TV encoders found.\n");
93 i = -ENODEV;
94 }
95
96 NVLockVgaCrtcs(dev, was_locked);
97 return i;
98}
99
100#define PLLSEL_TV_CRTC1_MASK \
101 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
102 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
103#define PLLSEL_TV_CRTC2_MASK \
104 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
105 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
106
107static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
111 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv04_mode_state *state = &dev_priv->mode_reg;
113 uint8_t crtc1A;
114
115 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
116 mode, nv_encoder->dcb->index);
117
118 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
119
120 if (mode == DRM_MODE_DPMS_ON) {
121 int head = nouveau_crtc(encoder->crtc)->index;
122 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
123
124 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
125 PLLSEL_TV_CRTC1_MASK;
126
127 /* Inhibit hsync */
128 crtc1A |= 0x80;
129
130 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
131 }
132
133 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
134
135 to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
136}
137
138static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
142
143 state->tv_setup = 0;
144
145 if (bind) {
146 state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
147 state->CRTC[NV_CIO_CRE_49] |= 0x10;
148 } else {
149 state->CRTC[NV_CIO_CRE_49] &= ~0x10;
150 }
151
152 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
153 state->CRTC[NV_CIO_CRE_LCD__INDEX]);
154 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
155 state->CRTC[NV_CIO_CRE_49]);
156 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
157 state->tv_setup);
158}
159
160static void nv04_tv_prepare(struct drm_encoder *encoder)
161{
162 struct drm_device *dev = encoder->dev;
163 int head = nouveau_crtc(encoder->crtc)->index;
164 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
165
166 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
167
168 nv04_dfp_disable(dev, head);
169
170 if (nv_two_heads(dev))
171 nv04_tv_bind(dev, head ^ 1, false);
172
173 nv04_tv_bind(dev, head, true);
174}
175
176static void nv04_tv_mode_set(struct drm_encoder *encoder,
177 struct drm_display_mode *mode,
178 struct drm_display_mode *adjusted_mode)
179{
180 struct drm_device *dev = encoder->dev;
181 struct drm_nouveau_private *dev_priv = dev->dev_private;
182 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
183 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
184
185 regp->tv_htotal = adjusted_mode->htotal;
186 regp->tv_vtotal = adjusted_mode->vtotal;
187
188 /* These delay the TV signals with respect to the VGA port,
189 * they might be useful if we ever allow a CRTC to drive
190 * multiple outputs.
191 */
192 regp->tv_hskew = 1;
193 regp->tv_hsync_delay = 1;
194 regp->tv_hsync_delay2 = 64;
195 regp->tv_vskew = 1;
196 regp->tv_vsync_delay = 1;
197
198 to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
199}
200
201static void nv04_tv_commit(struct drm_encoder *encoder)
202{
203 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
204 struct drm_device *dev = encoder->dev;
205 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
206 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
207
208 helper->dpms(encoder, DRM_MODE_DPMS_ON);
209
210 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
211 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
212 '@' + ffs(nv_encoder->dcb->or));
213}
214
215static void nv04_tv_destroy(struct drm_encoder *encoder)
216{
217 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
218
219 to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
220
221 drm_encoder_cleanup(encoder);
222
223 kfree(nv_encoder);
224}
225
226int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
227{
228 struct nouveau_encoder *nv_encoder;
229 struct drm_encoder *encoder;
230 struct drm_nouveau_private *dev_priv = dev->dev_private;
231 struct i2c_adapter *adap;
232 struct drm_encoder_funcs *funcs = NULL;
233 struct drm_encoder_helper_funcs *hfuncs = NULL;
234 struct drm_encoder_slave_funcs *sfuncs = NULL;
235 int i2c_index = entry->i2c_index;
236 int type, ret;
237 bool was_locked;
238
239 /* Ensure that we can talk to this encoder */
240 type = nv04_tv_identify(dev, i2c_index);
241 if (type < 0)
242 return type;
243
244 /* Allocate the necessary memory */
245 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
246 if (!nv_encoder)
247 return -ENOMEM;
248
249 /* Initialize the common members */
250 encoder = to_drm_encoder(nv_encoder);
251
252 funcs = &nv04_tv_encoder_info[type].funcs;
253 hfuncs = &nv04_tv_encoder_info[type].hfuncs;
254
255 drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
256 drm_encoder_helper_add(encoder, hfuncs);
257
258 encoder->possible_crtcs = entry->heads;
259 encoder->possible_clones = 0;
260
261 nv_encoder->dcb = entry;
262 nv_encoder->or = ffs(entry->or) - 1;
263
264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
266
267 was_locked = NVLockVgaCrtcs(dev, false);
268
269 ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
270 &nv04_tv_encoder_info[type].board_info);
271
272 NVLockVgaCrtcs(dev, was_locked);
273
274 if (ret < 0)
275 goto fail;
276
277 /* Fill the function pointers */
278 sfuncs = to_encoder_slave(encoder)->slave_funcs;
279
280 *funcs = (struct drm_encoder_funcs) {
281 .destroy = nv04_tv_destroy,
282 };
283
284 *hfuncs = (struct drm_encoder_helper_funcs) {
285 .dpms = nv04_tv_dpms,
286 .save = sfuncs->save,
287 .restore = sfuncs->restore,
288 .mode_fixup = sfuncs->mode_fixup,
289 .prepare = nv04_tv_prepare,
290 .commit = nv04_tv_commit,
291 .mode_set = nv04_tv_mode_set,
292 .detect = sfuncs->detect,
293 };
294
295 /* Set the slave encoder configuration */
296 sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
297
298 return 0;
299
300fail:
301 drm_encoder_cleanup(encoder);
302
303 kfree(nv_encoder);
304 return ret;
305}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 000000000000..79e2d104d70a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,24 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv10_fb_init(struct drm_device *dev)
8{
9 uint32_t fb_bar_size;
10 int i;
11
12 fb_bar_size = drm_get_resource_len(dev, 0) - 1;
13 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
14 nv_wr32(dev, NV10_PFB_TILE(i), 0);
15 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
16 }
17
18 return 0;
19}
20
21void
22nv10_fb_takedown(struct drm_device *dev)
23{
24}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 000000000000..7aeabf262bc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
32#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
33
34int
35nv10_fifo_channel_id(struct drm_device *dev)
36{
37 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
38 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
39}
40
41int
42nv10_fifo_create_context(struct nouveau_channel *chan)
43{
44 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
45 struct drm_device *dev = chan->dev;
46 uint32_t fc = NV10_RAMFC(chan->id);
47 int ret;
48
49 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
50 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
51 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
52 if (ret)
53 return ret;
54
55 /* Fill entries that are seen filled in dumps of nvidia driver just
56 * after channel's is put into DMA mode
57 */
58 dev_priv->engine.instmem.prepare_access(dev, true);
59 nv_wi32(dev, fc + 0, chan->pushbuf_base);
60 nv_wi32(dev, fc + 4, chan->pushbuf_base);
61 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
62 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
63 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
64 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
65#ifdef __BIG_ENDIAN
66 NV_PFIFO_CACHE1_BIG_ENDIAN |
67#endif
68 0);
69 dev_priv->engine.instmem.finish_access(dev);
70
71 /* enable the fifo dma operation */
72 nv_wr32(dev, NV04_PFIFO_MODE,
73 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
74 return 0;
75}
76
77void
78nv10_fifo_destroy_context(struct nouveau_channel *chan)
79{
80 struct drm_device *dev = chan->dev;
81
82 nv_wr32(dev, NV04_PFIFO_MODE,
83 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
84
85 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
86}
87
88static void
89nv10_fifo_do_load_context(struct drm_device *dev, int chid)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t fc = NV10_RAMFC(chid), tmp;
93
94 dev_priv->engine.instmem.prepare_access(dev, false);
95
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
98 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
99
100 tmp = nv_ri32(dev, fc + 12);
101 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
103
104 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
105 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
106 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
107 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
108
109 if (dev_priv->chipset < 0x17)
110 goto out;
111
112 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
113 tmp = nv_ri32(dev, fc + 36);
114 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
115 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
116 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
117 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
118
119out:
120 dev_priv->engine.instmem.finish_access(dev);
121
122 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
124}
125
126int
127nv10_fifo_load_context(struct nouveau_channel *chan)
128{
129 struct drm_device *dev = chan->dev;
130 uint32_t tmp;
131
132 nv10_fifo_do_load_context(dev, chan->id);
133
134 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
135 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
136 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
137
138 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
139 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
140 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
141
142 return 0;
143}
144
145int
146nv10_fifo_unload_context(struct drm_device *dev)
147{
148 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
150 uint32_t fc, tmp;
151 int chid;
152
153 chid = pfifo->channel_id(dev);
154 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
155 return 0;
156 fc = NV10_RAMFC(chid);
157
158 dev_priv->engine.instmem.prepare_access(dev, true);
159
160 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
161 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
162 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
163 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
164 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
165 nv_wi32(dev, fc + 12, tmp);
166 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
167 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
168 nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
169 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
170
171 if (dev_priv->chipset < 0x17)
172 goto out;
173
174 nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
175 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
176 nv_wi32(dev, fc + 36, tmp);
177 nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
178 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
179 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
180
181out:
182 dev_priv->engine.instmem.finish_access(dev);
183
184 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
185 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
186 return 0;
187}
188
189static void
190nv10_fifo_init_reset(struct drm_device *dev)
191{
192 nv_wr32(dev, NV03_PMC_ENABLE,
193 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
194 nv_wr32(dev, NV03_PMC_ENABLE,
195 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
196
197 nv_wr32(dev, 0x003224, 0x000f0078);
198 nv_wr32(dev, 0x002044, 0x0101ffff);
199 nv_wr32(dev, 0x002040, 0x000000ff);
200 nv_wr32(dev, 0x002500, 0x00000000);
201 nv_wr32(dev, 0x003000, 0x00000000);
202 nv_wr32(dev, 0x003050, 0x00000000);
203
204 nv_wr32(dev, 0x003258, 0x00000000);
205 nv_wr32(dev, 0x003210, 0x00000000);
206 nv_wr32(dev, 0x003270, 0x00000000);
207}
208
209static void
210nv10_fifo_init_ramxx(struct drm_device *dev)
211{
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213
214 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
215 ((dev_priv->ramht_bits - 9) << 16) |
216 (dev_priv->ramht_offset >> 8));
217 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
218
219 if (dev_priv->chipset < 0x17) {
220 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
221 } else {
222 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
223 (1 << 16) /* 64 Bytes entry*/);
224 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
225 }
226}
227
228static void
229nv10_fifo_init_intr(struct drm_device *dev)
230{
231 nv_wr32(dev, 0x002100, 0xffffffff);
232 nv_wr32(dev, 0x002140, 0xffffffff);
233}
234
235int
236nv10_fifo_init(struct drm_device *dev)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
240 int i;
241
242 nv10_fifo_init_reset(dev);
243 nv10_fifo_init_ramxx(dev);
244
245 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
246 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
247
248 nv10_fifo_init_intr(dev);
249 pfifo->enable(dev);
250 pfifo->reassign(dev, true);
251
252 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
253 if (dev_priv->fifos[i]) {
254 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
255 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
256 }
257 }
258
259 return 0;
260}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 000000000000..6bf6804bb0ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,892 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30#define NV10_FIFO_NUMBER 32
31
32struct pipe_state {
33 uint32_t pipe_0x0000[0x040/4];
34 uint32_t pipe_0x0040[0x010/4];
35 uint32_t pipe_0x0200[0x0c0/4];
36 uint32_t pipe_0x4400[0x080/4];
37 uint32_t pipe_0x6400[0x3b0/4];
38 uint32_t pipe_0x6800[0x2f0/4];
39 uint32_t pipe_0x6c00[0x030/4];
40 uint32_t pipe_0x7000[0x130/4];
41 uint32_t pipe_0x7400[0x0c0/4];
42 uint32_t pipe_0x7800[0x0c0/4];
43};
44
45static int nv10_graph_ctx_regs[] = {
46 NV10_PGRAPH_CTX_SWITCH1,
47 NV10_PGRAPH_CTX_SWITCH2,
48 NV10_PGRAPH_CTX_SWITCH3,
49 NV10_PGRAPH_CTX_SWITCH4,
50 NV10_PGRAPH_CTX_SWITCH5,
51 NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
52 NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
53 NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
54 NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
55 NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
56 0x00400164,
57 0x00400184,
58 0x004001a4,
59 0x004001c4,
60 0x004001e4,
61 0x00400168,
62 0x00400188,
63 0x004001a8,
64 0x004001c8,
65 0x004001e8,
66 0x0040016c,
67 0x0040018c,
68 0x004001ac,
69 0x004001cc,
70 0x004001ec,
71 0x00400170,
72 0x00400190,
73 0x004001b0,
74 0x004001d0,
75 0x004001f0,
76 0x00400174,
77 0x00400194,
78 0x004001b4,
79 0x004001d4,
80 0x004001f4,
81 0x00400178,
82 0x00400198,
83 0x004001b8,
84 0x004001d8,
85 0x004001f8,
86 0x0040017c,
87 0x0040019c,
88 0x004001bc,
89 0x004001dc,
90 0x004001fc,
91 NV10_PGRAPH_CTX_USER,
92 NV04_PGRAPH_DMA_START_0,
93 NV04_PGRAPH_DMA_START_1,
94 NV04_PGRAPH_DMA_LENGTH,
95 NV04_PGRAPH_DMA_MISC,
96 NV10_PGRAPH_DMA_PITCH,
97 NV04_PGRAPH_BOFFSET0,
98 NV04_PGRAPH_BBASE0,
99 NV04_PGRAPH_BLIMIT0,
100 NV04_PGRAPH_BOFFSET1,
101 NV04_PGRAPH_BBASE1,
102 NV04_PGRAPH_BLIMIT1,
103 NV04_PGRAPH_BOFFSET2,
104 NV04_PGRAPH_BBASE2,
105 NV04_PGRAPH_BLIMIT2,
106 NV04_PGRAPH_BOFFSET3,
107 NV04_PGRAPH_BBASE3,
108 NV04_PGRAPH_BLIMIT3,
109 NV04_PGRAPH_BOFFSET4,
110 NV04_PGRAPH_BBASE4,
111 NV04_PGRAPH_BLIMIT4,
112 NV04_PGRAPH_BOFFSET5,
113 NV04_PGRAPH_BBASE5,
114 NV04_PGRAPH_BLIMIT5,
115 NV04_PGRAPH_BPITCH0,
116 NV04_PGRAPH_BPITCH1,
117 NV04_PGRAPH_BPITCH2,
118 NV04_PGRAPH_BPITCH3,
119 NV04_PGRAPH_BPITCH4,
120 NV10_PGRAPH_SURFACE,
121 NV10_PGRAPH_STATE,
122 NV04_PGRAPH_BSWIZZLE2,
123 NV04_PGRAPH_BSWIZZLE5,
124 NV04_PGRAPH_BPIXEL,
125 NV10_PGRAPH_NOTIFY,
126 NV04_PGRAPH_PATT_COLOR0,
127 NV04_PGRAPH_PATT_COLOR1,
128 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
129 0x00400904,
130 0x00400908,
131 0x0040090c,
132 0x00400910,
133 0x00400914,
134 0x00400918,
135 0x0040091c,
136 0x00400920,
137 0x00400924,
138 0x00400928,
139 0x0040092c,
140 0x00400930,
141 0x00400934,
142 0x00400938,
143 0x0040093c,
144 0x00400940,
145 0x00400944,
146 0x00400948,
147 0x0040094c,
148 0x00400950,
149 0x00400954,
150 0x00400958,
151 0x0040095c,
152 0x00400960,
153 0x00400964,
154 0x00400968,
155 0x0040096c,
156 0x00400970,
157 0x00400974,
158 0x00400978,
159 0x0040097c,
160 0x00400980,
161 0x00400984,
162 0x00400988,
163 0x0040098c,
164 0x00400990,
165 0x00400994,
166 0x00400998,
167 0x0040099c,
168 0x004009a0,
169 0x004009a4,
170 0x004009a8,
171 0x004009ac,
172 0x004009b0,
173 0x004009b4,
174 0x004009b8,
175 0x004009bc,
176 0x004009c0,
177 0x004009c4,
178 0x004009c8,
179 0x004009cc,
180 0x004009d0,
181 0x004009d4,
182 0x004009d8,
183 0x004009dc,
184 0x004009e0,
185 0x004009e4,
186 0x004009e8,
187 0x004009ec,
188 0x004009f0,
189 0x004009f4,
190 0x004009f8,
191 0x004009fc,
192 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
193 0x0040080c,
194 NV04_PGRAPH_PATTERN_SHAPE,
195 NV03_PGRAPH_MONO_COLOR0,
196 NV04_PGRAPH_ROP3,
197 NV04_PGRAPH_CHROMA,
198 NV04_PGRAPH_BETA_AND,
199 NV04_PGRAPH_BETA_PREMULT,
200 0x00400e70,
201 0x00400e74,
202 0x00400e78,
203 0x00400e7c,
204 0x00400e80,
205 0x00400e84,
206 0x00400e88,
207 0x00400e8c,
208 0x00400ea0,
209 0x00400ea4,
210 0x00400ea8,
211 0x00400e90,
212 0x00400e94,
213 0x00400e98,
214 0x00400e9c,
215 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
216 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
217 0x00400f04,
218 0x00400f24,
219 0x00400f08,
220 0x00400f28,
221 0x00400f0c,
222 0x00400f2c,
223 0x00400f10,
224 0x00400f30,
225 0x00400f14,
226 0x00400f34,
227 0x00400f18,
228 0x00400f38,
229 0x00400f1c,
230 0x00400f3c,
231 NV10_PGRAPH_XFMODE0,
232 NV10_PGRAPH_XFMODE1,
233 NV10_PGRAPH_GLOBALSTATE0,
234 NV10_PGRAPH_GLOBALSTATE1,
235 NV04_PGRAPH_STORED_FMT,
236 NV04_PGRAPH_SOURCE_COLOR,
237 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
238 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
239 0x00400404,
240 0x00400484,
241 0x00400408,
242 0x00400488,
243 0x0040040c,
244 0x0040048c,
245 0x00400410,
246 0x00400490,
247 0x00400414,
248 0x00400494,
249 0x00400418,
250 0x00400498,
251 0x0040041c,
252 0x0040049c,
253 0x00400420,
254 0x004004a0,
255 0x00400424,
256 0x004004a4,
257 0x00400428,
258 0x004004a8,
259 0x0040042c,
260 0x004004ac,
261 0x00400430,
262 0x004004b0,
263 0x00400434,
264 0x004004b4,
265 0x00400438,
266 0x004004b8,
267 0x0040043c,
268 0x004004bc,
269 0x00400440,
270 0x004004c0,
271 0x00400444,
272 0x004004c4,
273 0x00400448,
274 0x004004c8,
275 0x0040044c,
276 0x004004cc,
277 0x00400450,
278 0x004004d0,
279 0x00400454,
280 0x004004d4,
281 0x00400458,
282 0x004004d8,
283 0x0040045c,
284 0x004004dc,
285 0x00400460,
286 0x004004e0,
287 0x00400464,
288 0x004004e4,
289 0x00400468,
290 0x004004e8,
291 0x0040046c,
292 0x004004ec,
293 0x00400470,
294 0x004004f0,
295 0x00400474,
296 0x004004f4,
297 0x00400478,
298 0x004004f8,
299 0x0040047c,
300 0x004004fc,
301 NV03_PGRAPH_ABS_UCLIP_XMIN,
302 NV03_PGRAPH_ABS_UCLIP_XMAX,
303 NV03_PGRAPH_ABS_UCLIP_YMIN,
304 NV03_PGRAPH_ABS_UCLIP_YMAX,
305 0x00400550,
306 0x00400558,
307 0x00400554,
308 0x0040055c,
309 NV03_PGRAPH_ABS_UCLIPA_XMIN,
310 NV03_PGRAPH_ABS_UCLIPA_XMAX,
311 NV03_PGRAPH_ABS_UCLIPA_YMIN,
312 NV03_PGRAPH_ABS_UCLIPA_YMAX,
313 NV03_PGRAPH_ABS_ICLIP_XMAX,
314 NV03_PGRAPH_ABS_ICLIP_YMAX,
315 NV03_PGRAPH_XY_LOGIC_MISC0,
316 NV03_PGRAPH_XY_LOGIC_MISC1,
317 NV03_PGRAPH_XY_LOGIC_MISC2,
318 NV03_PGRAPH_XY_LOGIC_MISC3,
319 NV03_PGRAPH_CLIPX_0,
320 NV03_PGRAPH_CLIPX_1,
321 NV03_PGRAPH_CLIPY_0,
322 NV03_PGRAPH_CLIPY_1,
323 NV10_PGRAPH_COMBINER0_IN_ALPHA,
324 NV10_PGRAPH_COMBINER1_IN_ALPHA,
325 NV10_PGRAPH_COMBINER0_IN_RGB,
326 NV10_PGRAPH_COMBINER1_IN_RGB,
327 NV10_PGRAPH_COMBINER_COLOR0,
328 NV10_PGRAPH_COMBINER_COLOR1,
329 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
330 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
331 NV10_PGRAPH_COMBINER0_OUT_RGB,
332 NV10_PGRAPH_COMBINER1_OUT_RGB,
333 NV10_PGRAPH_COMBINER_FINAL0,
334 NV10_PGRAPH_COMBINER_FINAL1,
335 0x00400e00,
336 0x00400e04,
337 0x00400e08,
338 0x00400e0c,
339 0x00400e10,
340 0x00400e14,
341 0x00400e18,
342 0x00400e1c,
343 0x00400e20,
344 0x00400e24,
345 0x00400e28,
346 0x00400e2c,
347 0x00400e30,
348 0x00400e34,
349 0x00400e38,
350 0x00400e3c,
351 NV04_PGRAPH_PASSTHRU_0,
352 NV04_PGRAPH_PASSTHRU_1,
353 NV04_PGRAPH_PASSTHRU_2,
354 NV10_PGRAPH_DIMX_TEXTURE,
355 NV10_PGRAPH_WDIMX_TEXTURE,
356 NV10_PGRAPH_DVD_COLORFMT,
357 NV10_PGRAPH_SCALED_FORMAT,
358 NV04_PGRAPH_MISC24_0,
359 NV04_PGRAPH_MISC24_1,
360 NV04_PGRAPH_MISC24_2,
361 NV03_PGRAPH_X_MISC,
362 NV03_PGRAPH_Y_MISC,
363 NV04_PGRAPH_VALID1,
364 NV04_PGRAPH_VALID2,
365};
366
367static int nv17_graph_ctx_regs[] = {
368 NV10_PGRAPH_DEBUG_4,
369 0x004006b0,
370 0x00400eac,
371 0x00400eb0,
372 0x00400eb4,
373 0x00400eb8,
374 0x00400ebc,
375 0x00400ec0,
376 0x00400ec4,
377 0x00400ec8,
378 0x00400ecc,
379 0x00400ed0,
380 0x00400ed4,
381 0x00400ed8,
382 0x00400edc,
383 0x00400ee0,
384 0x00400a00,
385 0x00400a04,
386};
387
388struct graph_state {
389 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
390 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
391 struct pipe_state pipe_state;
392};
393
394static void nv10_graph_save_pipe(struct nouveau_channel *chan)
395{
396 struct drm_device *dev = chan->dev;
397 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
398 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
399 int i;
400#define PIPE_SAVE(addr) \
401 do { \
402 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
403 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
404 fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
405 } while (0)
406
407 PIPE_SAVE(0x4400);
408 PIPE_SAVE(0x0200);
409 PIPE_SAVE(0x6400);
410 PIPE_SAVE(0x6800);
411 PIPE_SAVE(0x6c00);
412 PIPE_SAVE(0x7000);
413 PIPE_SAVE(0x7400);
414 PIPE_SAVE(0x7800);
415 PIPE_SAVE(0x0040);
416 PIPE_SAVE(0x0000);
417
418#undef PIPE_SAVE
419}
420
421static void nv10_graph_load_pipe(struct nouveau_channel *chan)
422{
423 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
426 int i;
427 uint32_t xfmode0, xfmode1;
428#define PIPE_RESTORE(addr) \
429 do { \
430 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
431 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
432 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
433 } while (0)
434
435
436 nouveau_wait_for_idle(dev);
437 /* XXX check haiku comments */
438 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
439 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
440 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
441 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
442 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
443 for (i = 0; i < 4; i++)
444 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
445 for (i = 0; i < 4; i++)
446 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
447
448 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
449 for (i = 0; i < 3; i++)
450 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
451
452 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
453 for (i = 0; i < 3; i++)
454 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
455
456 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
457 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
458
459
460 PIPE_RESTORE(0x0200);
461 nouveau_wait_for_idle(dev);
462
463 /* restore XFMODE */
464 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
465 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
466 PIPE_RESTORE(0x6400);
467 PIPE_RESTORE(0x6800);
468 PIPE_RESTORE(0x6c00);
469 PIPE_RESTORE(0x7000);
470 PIPE_RESTORE(0x7400);
471 PIPE_RESTORE(0x7800);
472 PIPE_RESTORE(0x4400);
473 PIPE_RESTORE(0x0000);
474 PIPE_RESTORE(0x0040);
475 nouveau_wait_for_idle(dev);
476
477#undef PIPE_RESTORE
478}
479
480static void nv10_graph_create_pipe(struct nouveau_channel *chan)
481{
482 struct drm_device *dev = chan->dev;
483 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
484 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
485 uint32_t *fifo_pipe_state_addr;
486 int i;
487#define PIPE_INIT(addr) \
488 do { \
489 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
490 } while (0)
491#define PIPE_INIT_END(addr) \
492 do { \
493 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
494 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
495 if (fifo_pipe_state_addr != __end_addr) \
496 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
497 addr, fifo_pipe_state_addr, __end_addr); \
498 } while (0)
499#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
500
501 PIPE_INIT(0x0200);
502 for (i = 0; i < 48; i++)
503 NV_WRITE_PIPE_INIT(0x00000000);
504 PIPE_INIT_END(0x0200);
505
506 PIPE_INIT(0x6400);
507 for (i = 0; i < 211; i++)
508 NV_WRITE_PIPE_INIT(0x00000000);
509 NV_WRITE_PIPE_INIT(0x3f800000);
510 NV_WRITE_PIPE_INIT(0x40000000);
511 NV_WRITE_PIPE_INIT(0x40000000);
512 NV_WRITE_PIPE_INIT(0x40000000);
513 NV_WRITE_PIPE_INIT(0x40000000);
514 NV_WRITE_PIPE_INIT(0x00000000);
515 NV_WRITE_PIPE_INIT(0x00000000);
516 NV_WRITE_PIPE_INIT(0x3f800000);
517 NV_WRITE_PIPE_INIT(0x00000000);
518 NV_WRITE_PIPE_INIT(0x3f000000);
519 NV_WRITE_PIPE_INIT(0x3f000000);
520 NV_WRITE_PIPE_INIT(0x00000000);
521 NV_WRITE_PIPE_INIT(0x00000000);
522 NV_WRITE_PIPE_INIT(0x00000000);
523 NV_WRITE_PIPE_INIT(0x00000000);
524 NV_WRITE_PIPE_INIT(0x3f800000);
525 NV_WRITE_PIPE_INIT(0x00000000);
526 NV_WRITE_PIPE_INIT(0x00000000);
527 NV_WRITE_PIPE_INIT(0x00000000);
528 NV_WRITE_PIPE_INIT(0x00000000);
529 NV_WRITE_PIPE_INIT(0x00000000);
530 NV_WRITE_PIPE_INIT(0x3f800000);
531 NV_WRITE_PIPE_INIT(0x3f800000);
532 NV_WRITE_PIPE_INIT(0x3f800000);
533 NV_WRITE_PIPE_INIT(0x3f800000);
534 PIPE_INIT_END(0x6400);
535
536 PIPE_INIT(0x6800);
537 for (i = 0; i < 162; i++)
538 NV_WRITE_PIPE_INIT(0x00000000);
539 NV_WRITE_PIPE_INIT(0x3f800000);
540 for (i = 0; i < 25; i++)
541 NV_WRITE_PIPE_INIT(0x00000000);
542 PIPE_INIT_END(0x6800);
543
544 PIPE_INIT(0x6c00);
545 NV_WRITE_PIPE_INIT(0x00000000);
546 NV_WRITE_PIPE_INIT(0x00000000);
547 NV_WRITE_PIPE_INIT(0x00000000);
548 NV_WRITE_PIPE_INIT(0x00000000);
549 NV_WRITE_PIPE_INIT(0xbf800000);
550 NV_WRITE_PIPE_INIT(0x00000000);
551 NV_WRITE_PIPE_INIT(0x00000000);
552 NV_WRITE_PIPE_INIT(0x00000000);
553 NV_WRITE_PIPE_INIT(0x00000000);
554 NV_WRITE_PIPE_INIT(0x00000000);
555 NV_WRITE_PIPE_INIT(0x00000000);
556 NV_WRITE_PIPE_INIT(0x00000000);
557 PIPE_INIT_END(0x6c00);
558
559 PIPE_INIT(0x7000);
560 NV_WRITE_PIPE_INIT(0x00000000);
561 NV_WRITE_PIPE_INIT(0x00000000);
562 NV_WRITE_PIPE_INIT(0x00000000);
563 NV_WRITE_PIPE_INIT(0x00000000);
564 NV_WRITE_PIPE_INIT(0x00000000);
565 NV_WRITE_PIPE_INIT(0x00000000);
566 NV_WRITE_PIPE_INIT(0x00000000);
567 NV_WRITE_PIPE_INIT(0x00000000);
568 NV_WRITE_PIPE_INIT(0x00000000);
569 NV_WRITE_PIPE_INIT(0x00000000);
570 NV_WRITE_PIPE_INIT(0x00000000);
571 NV_WRITE_PIPE_INIT(0x00000000);
572 NV_WRITE_PIPE_INIT(0x7149f2ca);
573 NV_WRITE_PIPE_INIT(0x00000000);
574 NV_WRITE_PIPE_INIT(0x00000000);
575 NV_WRITE_PIPE_INIT(0x00000000);
576 NV_WRITE_PIPE_INIT(0x7149f2ca);
577 NV_WRITE_PIPE_INIT(0x00000000);
578 NV_WRITE_PIPE_INIT(0x00000000);
579 NV_WRITE_PIPE_INIT(0x00000000);
580 NV_WRITE_PIPE_INIT(0x7149f2ca);
581 NV_WRITE_PIPE_INIT(0x00000000);
582 NV_WRITE_PIPE_INIT(0x00000000);
583 NV_WRITE_PIPE_INIT(0x00000000);
584 NV_WRITE_PIPE_INIT(0x7149f2ca);
585 NV_WRITE_PIPE_INIT(0x00000000);
586 NV_WRITE_PIPE_INIT(0x00000000);
587 NV_WRITE_PIPE_INIT(0x00000000);
588 NV_WRITE_PIPE_INIT(0x7149f2ca);
589 NV_WRITE_PIPE_INIT(0x00000000);
590 NV_WRITE_PIPE_INIT(0x00000000);
591 NV_WRITE_PIPE_INIT(0x00000000);
592 NV_WRITE_PIPE_INIT(0x7149f2ca);
593 NV_WRITE_PIPE_INIT(0x00000000);
594 NV_WRITE_PIPE_INIT(0x00000000);
595 NV_WRITE_PIPE_INIT(0x00000000);
596 NV_WRITE_PIPE_INIT(0x7149f2ca);
597 NV_WRITE_PIPE_INIT(0x00000000);
598 NV_WRITE_PIPE_INIT(0x00000000);
599 NV_WRITE_PIPE_INIT(0x00000000);
600 NV_WRITE_PIPE_INIT(0x7149f2ca);
601 for (i = 0; i < 35; i++)
602 NV_WRITE_PIPE_INIT(0x00000000);
603 PIPE_INIT_END(0x7000);
604
605 PIPE_INIT(0x7400);
606 for (i = 0; i < 48; i++)
607 NV_WRITE_PIPE_INIT(0x00000000);
608 PIPE_INIT_END(0x7400);
609
610 PIPE_INIT(0x7800);
611 for (i = 0; i < 48; i++)
612 NV_WRITE_PIPE_INIT(0x00000000);
613 PIPE_INIT_END(0x7800);
614
615 PIPE_INIT(0x4400);
616 for (i = 0; i < 32; i++)
617 NV_WRITE_PIPE_INIT(0x00000000);
618 PIPE_INIT_END(0x4400);
619
620 PIPE_INIT(0x0000);
621 for (i = 0; i < 16; i++)
622 NV_WRITE_PIPE_INIT(0x00000000);
623 PIPE_INIT_END(0x0000);
624
625 PIPE_INIT(0x0040);
626 for (i = 0; i < 4; i++)
627 NV_WRITE_PIPE_INIT(0x00000000);
628 PIPE_INIT_END(0x0040);
629
630#undef PIPE_INIT
631#undef PIPE_INIT_END
632#undef NV_WRITE_PIPE_INIT
633}
634
635static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
636{
637 int i;
638 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
639 if (nv10_graph_ctx_regs[i] == reg)
640 return i;
641 }
642 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
643 return -1;
644}
645
646static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
647{
648 int i;
649 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
650 if (nv17_graph_ctx_regs[i] == reg)
651 return i;
652 }
653 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
654 return -1;
655}
656
657int nv10_graph_load_context(struct nouveau_channel *chan)
658{
659 struct drm_device *dev = chan->dev;
660 struct drm_nouveau_private *dev_priv = dev->dev_private;
661 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
662 uint32_t tmp;
663 int i;
664
665 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
666 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
667 if (dev_priv->chipset >= 0x17) {
668 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
669 nv_wr32(dev, nv17_graph_ctx_regs[i],
670 pgraph_ctx->nv17[i]);
671 }
672
673 nv10_graph_load_pipe(chan);
674
675 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
676 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
677 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
678 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
679 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
680 return 0;
681}
682
683int
684nv10_graph_unload_context(struct drm_device *dev)
685{
686 struct drm_nouveau_private *dev_priv = dev->dev_private;
687 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
688 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
689 struct nouveau_channel *chan;
690 struct graph_state *ctx;
691 uint32_t tmp;
692 int i;
693
694 chan = pgraph->channel(dev);
695 if (!chan)
696 return 0;
697 ctx = chan->pgraph_ctx;
698
699 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
700 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
701
702 if (dev_priv->chipset >= 0x17) {
703 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
704 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
705 }
706
707 nv10_graph_save_pipe(chan);
708
709 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
710 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
711 tmp |= (pfifo->channels - 1) << 24;
712 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
713 return 0;
714}
715
716void
717nv10_graph_context_switch(struct drm_device *dev)
718{
719 struct drm_nouveau_private *dev_priv = dev->dev_private;
720 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
721 struct nouveau_channel *chan = NULL;
722 int chid;
723
724 pgraph->fifo_access(dev, false);
725 nouveau_wait_for_idle(dev);
726
727 /* If previous context is valid, we need to save it */
728 nv10_graph_unload_context(dev);
729
730 /* Load context for next channel */
731 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
732 chan = dev_priv->fifos[chid];
733 if (chan)
734 nv10_graph_load_context(chan);
735
736 pgraph->fifo_access(dev, true);
737}
738
739#define NV_WRITE_CTX(reg, val) do { \
740 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
741 if (offset > 0) \
742 pgraph_ctx->nv10[offset] = val; \
743 } while (0)
744
745#define NV17_WRITE_CTX(reg, val) do { \
746 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
747 if (offset > 0) \
748 pgraph_ctx->nv17[offset] = val; \
749 } while (0)
750
751struct nouveau_channel *
752nv10_graph_channel(struct drm_device *dev)
753{
754 struct drm_nouveau_private *dev_priv = dev->dev_private;
755 int chid = dev_priv->engine.fifo.channels;
756
757 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
758 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
759
760 if (chid >= dev_priv->engine.fifo.channels)
761 return NULL;
762
763 return dev_priv->fifos[chid];
764}
765
766int nv10_graph_create_context(struct nouveau_channel *chan)
767{
768 struct drm_device *dev = chan->dev;
769 struct drm_nouveau_private *dev_priv = dev->dev_private;
770 struct graph_state *pgraph_ctx;
771
772 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
773
774 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
775 GFP_KERNEL);
776 if (pgraph_ctx == NULL)
777 return -ENOMEM;
778
779
780 NV_WRITE_CTX(0x00400e88, 0x08000000);
781 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
782 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
783 NV_WRITE_CTX(0x00400e10, 0x00001000);
784 NV_WRITE_CTX(0x00400e14, 0x00001000);
785 NV_WRITE_CTX(0x00400e30, 0x00080008);
786 NV_WRITE_CTX(0x00400e34, 0x00080008);
787 if (dev_priv->chipset >= 0x17) {
788 /* is it really needed ??? */
789 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
790 nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
791 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
792 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
793 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
794 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
795 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
796 }
797 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
798
799 nv10_graph_create_pipe(chan);
800 return 0;
801}
802
803void nv10_graph_destroy_context(struct nouveau_channel *chan)
804{
805 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
806
807 kfree(pgraph_ctx);
808 chan->pgraph_ctx = NULL;
809}
810
811int nv10_graph_init(struct drm_device *dev)
812{
813 struct drm_nouveau_private *dev_priv = dev->dev_private;
814 uint32_t tmp;
815 int i;
816
817 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
818 ~NV_PMC_ENABLE_PGRAPH);
819 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
820 NV_PMC_ENABLE_PGRAPH);
821
822 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
823 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
824
825 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
826 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
827 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
828 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
829 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
830 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
831 (1<<29) |
832 (1<<31));
833 if (dev_priv->chipset >= 0x17) {
834 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
835 nv_wr32(dev, 0x004006b0, 0x40000020);
836 } else
837 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
838
839 /* copy tile info from PFB */
840 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
841 nv_wr32(dev, NV10_PGRAPH_TILE(i),
842 nv_rd32(dev, NV10_PFB_TILE(i)));
843 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
844 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
845 nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
846 nv_rd32(dev, NV10_PFB_TSIZE(i)));
847 nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
848 nv_rd32(dev, NV10_PFB_TSTATUS(i)));
849 }
850
851 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
852 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
853 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
854 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
855 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
856
857 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
858 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
859 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
860 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
861 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
862
863 return 0;
864}
865
866void nv10_graph_takedown(struct drm_device *dev)
867{
868}
869
870struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
871 { 0x0030, false, NULL }, /* null */
872 { 0x0039, false, NULL }, /* m2mf */
873 { 0x004a, false, NULL }, /* gdirect */
874 { 0x005f, false, NULL }, /* imageblit */
875 { 0x009f, false, NULL }, /* imageblit (nv12) */
876 { 0x008a, false, NULL }, /* ifc */
877 { 0x0089, false, NULL }, /* sifm */
878 { 0x0062, false, NULL }, /* surf2d */
879 { 0x0043, false, NULL }, /* rop */
880 { 0x0012, false, NULL }, /* beta1 */
881 { 0x0072, false, NULL }, /* beta4 */
882 { 0x0019, false, NULL }, /* cliprect */
883 { 0x0044, false, NULL }, /* pattern */
884 { 0x0052, false, NULL }, /* swzsurf */
885 { 0x0093, false, NULL }, /* surf3d */
886 { 0x0094, false, NULL }, /* tex_tri */
887 { 0x0095, false, NULL }, /* multitex_tri */
888 { 0x0056, false, NULL }, /* celcius (nv10) */
889 { 0x0096, false, NULL }, /* celcius (nv11) */
890 { 0x0099, false, NULL }, /* celcius (nv17) */
891 {}
892};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 000000000000..2e58c331e9b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30
31static bool
32get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
33 uint32_t *mask)
34{
35 if (ent->line < 2) {
36 *reg = NV_PCRTC_GPIO;
37 *shift = ent->line * 16;
38 *mask = 0x11;
39
40 } else if (ent->line < 10) {
41 *reg = NV_PCRTC_GPIO_EXT;
42 *shift = (ent->line - 2) * 4;
43 *mask = 0x3;
44
45 } else if (ent->line < 14) {
46 *reg = NV_PCRTC_850;
47 *shift = (ent->line - 10) * 4;
48 *mask = 0x3;
49
50 } else {
51 return false;
52 }
53
54 return true;
55}
56
57int
58nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
59{
60 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
61 uint32_t reg, shift, mask, value;
62
63 if (!ent)
64 return -ENODEV;
65
66 if (!get_gpio_location(ent, &reg, &shift, &mask))
67 return -ENODEV;
68
69 value = NVReadCRTC(dev, 0, reg) >> shift;
70
71 return (ent->invert ? 1 : 0) ^ (value & 1);
72}
73
74int
75nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
76{
77 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
78 uint32_t reg, shift, mask, value;
79
80 if (!ent)
81 return -ENODEV;
82
83 if (!get_gpio_location(ent, &reg, &shift, &mask))
84 return -ENODEV;
85
86 value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
87 mask = ~(mask << shift);
88
89 NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
90
91 return 0;
92}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 000000000000..46cfd9c60478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,681 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_hw.h"
34#include "nv17_tv.h"
35
36enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
37 struct drm_connector *connector,
38 uint32_t pin_mask)
39{
40 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
41
42 tv_enc->pin_mask = pin_mask >> 28 & 0xe;
43
44 switch (tv_enc->pin_mask) {
45 case 0x2:
46 case 0x4:
47 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
48 break;
49 case 0xc:
50 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
51 break;
52 case 0xe:
53 if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
54 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
55 else
56 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
57 break;
58 default:
59 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
60 break;
61 }
62
63 drm_connector_property_set_value(connector,
64 encoder->dev->mode_config.tv_subconnector_property,
65 tv_enc->subconnector);
66
67 return tv_enc->subconnector ? connector_status_connected :
68 connector_status_disconnected;
69}
70
71static const struct {
72 int hdisplay;
73 int vdisplay;
74} modes[] = {
75 { 640, 400 },
76 { 640, 480 },
77 { 720, 480 },
78 { 720, 576 },
79 { 800, 600 },
80 { 1024, 768 },
81 { 1280, 720 },
82 { 1280, 1024 },
83 { 1920, 1080 }
84};
85
86static int nv17_tv_get_modes(struct drm_encoder *encoder,
87 struct drm_connector *connector)
88{
89 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
90 struct drm_display_mode *mode;
91 struct drm_display_mode *output_mode;
92 int n = 0;
93 int i;
94
95 if (tv_norm->kind != CTV_ENC_MODE) {
96 struct drm_display_mode *tv_mode;
97
98 for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
99 mode = drm_mode_duplicate(encoder->dev, tv_mode);
100
101 mode->clock = tv_norm->tv_enc_mode.vrefresh *
102 mode->htotal / 1000 *
103 mode->vtotal / 1000;
104
105 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
106 mode->clock *= 2;
107
108 if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
109 mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
110 mode->type |= DRM_MODE_TYPE_PREFERRED;
111
112 drm_mode_probed_add(connector, mode);
113 n++;
114 }
115 return n;
116 }
117
118 /* tv_norm->kind == CTV_ENC_MODE */
119 output_mode = &tv_norm->ctv_enc_mode.mode;
120 for (i = 0; i < ARRAY_SIZE(modes); i++) {
121 if (modes[i].hdisplay > output_mode->hdisplay ||
122 modes[i].vdisplay > output_mode->vdisplay)
123 continue;
124
125 if (modes[i].hdisplay == output_mode->hdisplay &&
126 modes[i].vdisplay == output_mode->vdisplay) {
127 mode = drm_mode_duplicate(encoder->dev, output_mode);
128 mode->type |= DRM_MODE_TYPE_PREFERRED;
129 } else {
130 mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
131 modes[i].vdisplay, 60, false,
132 output_mode->flags & DRM_MODE_FLAG_INTERLACE,
133 false);
134 }
135
136 /* CVT modes are sometimes unsuitable... */
137 if (output_mode->hdisplay <= 720
138 || output_mode->hdisplay >= 1920) {
139 mode->htotal = output_mode->htotal;
140 mode->hsync_start = (mode->hdisplay + (mode->htotal
141 - mode->hdisplay) * 9 / 10) & ~7;
142 mode->hsync_end = mode->hsync_start + 8;
143 }
144 if (output_mode->vdisplay >= 1024) {
145 mode->vtotal = output_mode->vtotal;
146 mode->vsync_start = output_mode->vsync_start;
147 mode->vsync_end = output_mode->vsync_end;
148 }
149
150 mode->type |= DRM_MODE_TYPE_DRIVER;
151 drm_mode_probed_add(connector, mode);
152 n++;
153 }
154 return n;
155}
156
157static int nv17_tv_mode_valid(struct drm_encoder *encoder,
158 struct drm_display_mode *mode)
159{
160 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
161
162 if (tv_norm->kind == CTV_ENC_MODE) {
163 struct drm_display_mode *output_mode =
164 &tv_norm->ctv_enc_mode.mode;
165
166 if (mode->clock > 400000)
167 return MODE_CLOCK_HIGH;
168
169 if (mode->hdisplay > output_mode->hdisplay ||
170 mode->vdisplay > output_mode->vdisplay)
171 return MODE_BAD;
172
173 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
174 (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
175 return MODE_NO_INTERLACE;
176
177 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
178 return MODE_NO_DBLESCAN;
179
180 } else {
181 const int vsync_tolerance = 600;
182
183 if (mode->clock > 70000)
184 return MODE_CLOCK_HIGH;
185
186 if (abs(drm_mode_vrefresh(mode) * 1000 -
187 tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
188 return MODE_VSYNC;
189
190 /* The encoder takes care of the actual interlacing */
191 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
192 return MODE_NO_INTERLACE;
193 }
194
195 return MODE_OK;
196}
197
198static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
199 struct drm_display_mode *mode,
200 struct drm_display_mode *adjusted_mode)
201{
202 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
203
204 if (tv_norm->kind == CTV_ENC_MODE)
205 adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
206 else
207 adjusted_mode->clock = 90000;
208
209 return true;
210}
211
212static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
213{
214 struct drm_device *dev = encoder->dev;
215 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
216 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
217
218 if (nouveau_encoder(encoder)->last_dpms == mode)
219 return;
220 nouveau_encoder(encoder)->last_dpms = mode;
221
222 NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
223 mode, nouveau_encoder(encoder)->dcb->index);
224
225 regs->ptv_200 &= ~1;
226
227 if (tv_norm->kind == CTV_ENC_MODE) {
228 nv04_dfp_update_fp_control(encoder, mode);
229
230 } else {
231 nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
232
233 if (mode == DRM_MODE_DPMS_ON)
234 regs->ptv_200 |= 1;
235 }
236
237 nv_load_ptv(dev, regs, 200);
238
239 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
240 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
241
242 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
243}
244
245static void nv17_tv_prepare(struct drm_encoder *encoder)
246{
247 struct drm_device *dev = encoder->dev;
248 struct drm_nouveau_private *dev_priv = dev->dev_private;
249 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
250 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
251 int head = nouveau_crtc(encoder->crtc)->index;
252 uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
253 NV_CIO_CRE_LCD__INDEX];
254 uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
255 nv04_dac_output_offset(encoder);
256 uint32_t dacclk;
257
258 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
259
260 nv04_dfp_disable(dev, head);
261
262 /* Unbind any FP encoders from this head if we need the FP
263 * stuff enabled. */
264 if (tv_norm->kind == CTV_ENC_MODE) {
265 struct drm_encoder *enc;
266
267 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
268 struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
269
270 if ((dcb->type == OUTPUT_TMDS ||
271 dcb->type == OUTPUT_LVDS) &&
272 !enc->crtc &&
273 nv04_dfp_get_bound_head(dev, dcb) == head) {
274 nv04_dfp_bind_head(dev, dcb, head ^ 1,
275 dev_priv->VBIOS.fp.dual_link);
276 }
277 }
278
279 }
280
281 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
282 * at LCD__INDEX which we don't alter
283 */
284 if (!(*cr_lcd & 0x44)) {
285 if (tv_norm->kind == CTV_ENC_MODE)
286 *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
287 else
288 *cr_lcd = 0;
289 }
290
291 /* Set the DACCLK register */
292 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
293
294 if (dev_priv->card_type == NV_40)
295 dacclk |= 0x1a << 16;
296
297 if (tv_norm->kind == CTV_ENC_MODE) {
298 dacclk |= 0x20;
299
300 if (head)
301 dacclk |= 0x100;
302 else
303 dacclk &= ~0x100;
304
305 } else {
306 dacclk |= 0x10;
307
308 }
309
310 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
311}
312
313static void nv17_tv_mode_set(struct drm_encoder *encoder,
314 struct drm_display_mode *drm_mode,
315 struct drm_display_mode *adjusted_mode)
316{
317 struct drm_device *dev = encoder->dev;
318 struct drm_nouveau_private *dev_priv = dev->dev_private;
319 int head = nouveau_crtc(encoder->crtc)->index;
320 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
321 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
322 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
323 int i;
324
325 regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
326 regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
327 regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
328 regs->tv_setup = 1;
329 regs->ramdac_8c0 = 0x0;
330
331 if (tv_norm->kind == TV_ENC_MODE) {
332 tv_regs->ptv_200 = 0x13111100;
333 if (head)
334 tv_regs->ptv_200 |= 0x10;
335
336 tv_regs->ptv_20c = 0x808010;
337 tv_regs->ptv_304 = 0x2d00000;
338 tv_regs->ptv_600 = 0x0;
339 tv_regs->ptv_60c = 0x0;
340 tv_regs->ptv_610 = 0x1e00000;
341
342 if (tv_norm->tv_enc_mode.vdisplay == 576) {
343 tv_regs->ptv_508 = 0x1200000;
344 tv_regs->ptv_614 = 0x33;
345
346 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
347 tv_regs->ptv_508 = 0xf00000;
348 tv_regs->ptv_614 = 0x13;
349 }
350
351 if (dev_priv->card_type >= NV_30) {
352 tv_regs->ptv_500 = 0xe8e0;
353 tv_regs->ptv_504 = 0x1710;
354 tv_regs->ptv_604 = 0x0;
355 tv_regs->ptv_608 = 0x0;
356 } else {
357 if (tv_norm->tv_enc_mode.vdisplay == 576) {
358 tv_regs->ptv_604 = 0x20;
359 tv_regs->ptv_608 = 0x10;
360 tv_regs->ptv_500 = 0x19710;
361 tv_regs->ptv_504 = 0x68f0;
362
363 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
364 tv_regs->ptv_604 = 0x10;
365 tv_regs->ptv_608 = 0x20;
366 tv_regs->ptv_500 = 0x4b90;
367 tv_regs->ptv_504 = 0x1b480;
368 }
369 }
370
371 for (i = 0; i < 0x40; i++)
372 tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
373
374 } else {
375 struct drm_display_mode *output_mode =
376 &tv_norm->ctv_enc_mode.mode;
377
378 /* The registers in PRAMDAC+0xc00 control some timings and CSC
379 * parameters for the CTV encoder (It's only used for "HD" TV
380 * modes, I don't think I have enough working to guess what
381 * they exactly mean...), it's probably connected at the
382 * output of the FP encoder, but it also needs the analog
383 * encoder in its OR enabled and routed to the head it's
384 * using. It's enabled with the DACCLK register, bits [5:4].
385 */
386 for (i = 0; i < 38; i++)
387 regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
388
389 regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
390 regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
391 regs->fp_horiz_regs[FP_SYNC_START] =
392 output_mode->hsync_start - 1;
393 regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
394 regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
395 max((output_mode->hdisplay-600)/40 - 1, 1);
396
397 regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
398 regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
399 regs->fp_vert_regs[FP_SYNC_START] =
400 output_mode->vsync_start - 1;
401 regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
402 regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
403
404 regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
405 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
406 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
407
408 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
409 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
410 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
411 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
412
413 regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
414 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
415 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
416 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
417 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
418 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
419 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
420
421 regs->fp_debug_2 = 0;
422
423 regs->fp_margin_color = 0x801080;
424
425 }
426}
427
428static void nv17_tv_commit(struct drm_encoder *encoder)
429{
430 struct drm_device *dev = encoder->dev;
431 struct drm_nouveau_private *dev_priv = dev->dev_private;
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
433 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
434 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
435
436 if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
437 nv17_tv_update_rescaler(encoder);
438 nv17_tv_update_properties(encoder);
439 } else {
440 nv17_ctv_update_rescaler(encoder);
441 }
442
443 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
444
445 /* This could use refinement for flatpanels, but it should work */
446 if (dev_priv->chipset < 0x44)
447 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
448 nv04_dac_output_offset(encoder),
449 0xf0000000);
450 else
451 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
452 nv04_dac_output_offset(encoder),
453 0x00100000);
454
455 helper->dpms(encoder, DRM_MODE_DPMS_ON);
456
457 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
458 drm_get_connector_name(
459 &nouveau_encoder_connector_get(nv_encoder)->base),
460 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
461}
462
463static void nv17_tv_save(struct drm_encoder *encoder)
464{
465 struct drm_device *dev = encoder->dev;
466 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
467
468 nouveau_encoder(encoder)->restore.output =
469 NVReadRAMDAC(dev, 0,
470 NV_PRAMDAC_DACCLK +
471 nv04_dac_output_offset(encoder));
472
473 nv17_tv_state_save(dev, &tv_enc->saved_state);
474
475 tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
476}
477
478static void nv17_tv_restore(struct drm_encoder *encoder)
479{
480 struct drm_device *dev = encoder->dev;
481
482 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
483 nv04_dac_output_offset(encoder),
484 nouveau_encoder(encoder)->restore.output);
485
486 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
487}
488
489static int nv17_tv_create_resources(struct drm_encoder *encoder,
490 struct drm_connector *connector)
491{
492 struct drm_device *dev = encoder->dev;
493 struct drm_mode_config *conf = &dev->mode_config;
494 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
495 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
496 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
497 NUM_LD_TV_NORMS;
498 int i;
499
500 if (nouveau_tv_norm) {
501 for (i = 0; i < num_tv_norms; i++) {
502 if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
503 tv_enc->tv_norm = i;
504 break;
505 }
506 }
507
508 if (i == num_tv_norms)
509 NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
510 nouveau_tv_norm);
511 }
512
513 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
514
515 drm_connector_attach_property(connector,
516 conf->tv_select_subconnector_property,
517 tv_enc->select_subconnector);
518 drm_connector_attach_property(connector,
519 conf->tv_subconnector_property,
520 tv_enc->subconnector);
521 drm_connector_attach_property(connector,
522 conf->tv_mode_property,
523 tv_enc->tv_norm);
524 drm_connector_attach_property(connector,
525 conf->tv_flicker_reduction_property,
526 tv_enc->flicker);
527 drm_connector_attach_property(connector,
528 conf->tv_saturation_property,
529 tv_enc->saturation);
530 drm_connector_attach_property(connector,
531 conf->tv_hue_property,
532 tv_enc->hue);
533 drm_connector_attach_property(connector,
534 conf->tv_overscan_property,
535 tv_enc->overscan);
536
537 return 0;
538}
539
540static int nv17_tv_set_property(struct drm_encoder *encoder,
541 struct drm_connector *connector,
542 struct drm_property *property,
543 uint64_t val)
544{
545 struct drm_mode_config *conf = &encoder->dev->mode_config;
546 struct drm_crtc *crtc = encoder->crtc;
547 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
548 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
549 bool modes_changed = false;
550
551 if (property == conf->tv_overscan_property) {
552 tv_enc->overscan = val;
553 if (encoder->crtc) {
554 if (tv_norm->kind == CTV_ENC_MODE)
555 nv17_ctv_update_rescaler(encoder);
556 else
557 nv17_tv_update_rescaler(encoder);
558 }
559
560 } else if (property == conf->tv_saturation_property) {
561 if (tv_norm->kind != TV_ENC_MODE)
562 return -EINVAL;
563
564 tv_enc->saturation = val;
565 nv17_tv_update_properties(encoder);
566
567 } else if (property == conf->tv_hue_property) {
568 if (tv_norm->kind != TV_ENC_MODE)
569 return -EINVAL;
570
571 tv_enc->hue = val;
572 nv17_tv_update_properties(encoder);
573
574 } else if (property == conf->tv_flicker_reduction_property) {
575 if (tv_norm->kind != TV_ENC_MODE)
576 return -EINVAL;
577
578 tv_enc->flicker = val;
579 if (encoder->crtc)
580 nv17_tv_update_rescaler(encoder);
581
582 } else if (property == conf->tv_mode_property) {
583 if (connector->dpms != DRM_MODE_DPMS_OFF)
584 return -EINVAL;
585
586 tv_enc->tv_norm = val;
587
588 modes_changed = true;
589
590 } else if (property == conf->tv_select_subconnector_property) {
591 if (tv_norm->kind != TV_ENC_MODE)
592 return -EINVAL;
593
594 tv_enc->select_subconnector = val;
595 nv17_tv_update_properties(encoder);
596
597 } else {
598 return -EINVAL;
599 }
600
601 if (modes_changed) {
602 drm_helper_probe_single_connector_modes(connector, 0, 0);
603
604 /* Disable the crtc to ensure a full modeset is
605 * performed whenever it's turned on again. */
606 if (crtc) {
607 struct drm_mode_set modeset = {
608 .crtc = crtc,
609 };
610
611 crtc->funcs->set_config(&modeset);
612 }
613 }
614
615 return 0;
616}
617
618static void nv17_tv_destroy(struct drm_encoder *encoder)
619{
620 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
621
622 NV_DEBUG(encoder->dev, "\n");
623
624 drm_encoder_cleanup(encoder);
625 kfree(tv_enc);
626}
627
628static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
629 .dpms = nv17_tv_dpms,
630 .save = nv17_tv_save,
631 .restore = nv17_tv_restore,
632 .mode_fixup = nv17_tv_mode_fixup,
633 .prepare = nv17_tv_prepare,
634 .commit = nv17_tv_commit,
635 .mode_set = nv17_tv_mode_set,
636 .detect = nv17_dac_detect,
637};
638
639static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
640 .get_modes = nv17_tv_get_modes,
641 .mode_valid = nv17_tv_mode_valid,
642 .create_resources = nv17_tv_create_resources,
643 .set_property = nv17_tv_set_property,
644};
645
646static struct drm_encoder_funcs nv17_tv_funcs = {
647 .destroy = nv17_tv_destroy,
648};
649
650int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
651{
652 struct drm_encoder *encoder;
653 struct nv17_tv_encoder *tv_enc = NULL;
654
655 tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
656 if (!tv_enc)
657 return -ENOMEM;
658
659 tv_enc->overscan = 50;
660 tv_enc->flicker = 50;
661 tv_enc->saturation = 50;
662 tv_enc->hue = 0;
663 tv_enc->tv_norm = TV_NORM_PAL;
664 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
665 tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
666 tv_enc->pin_mask = 0;
667
668 encoder = to_drm_encoder(&tv_enc->base);
669
670 tv_enc->base.dcb = entry;
671 tv_enc->base.or = ffs(entry->or) - 1;
672
673 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
674 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
675 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
676
677 encoder->possible_crtcs = entry->heads;
678 encoder->possible_clones = 0;
679
680 return 0;
681}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 000000000000..c00977cedabd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV17_TV_H__
28#define __NV17_TV_H__
29
30struct nv17_tv_state {
31 uint8_t tv_enc[0x40];
32
33 uint32_t hfilter[4][7];
34 uint32_t hfilter2[4][7];
35 uint32_t vfilter[4][7];
36
37 uint32_t ptv_200;
38 uint32_t ptv_204;
39 uint32_t ptv_208;
40 uint32_t ptv_20c;
41 uint32_t ptv_304;
42 uint32_t ptv_500;
43 uint32_t ptv_504;
44 uint32_t ptv_508;
45 uint32_t ptv_600;
46 uint32_t ptv_604;
47 uint32_t ptv_608;
48 uint32_t ptv_60c;
49 uint32_t ptv_610;
50 uint32_t ptv_614;
51};
52
53enum nv17_tv_norm{
54 TV_NORM_PAL,
55 TV_NORM_PAL_M,
56 TV_NORM_PAL_N,
57 TV_NORM_PAL_NC,
58 TV_NORM_NTSC_M,
59 TV_NORM_NTSC_J,
60 NUM_LD_TV_NORMS,
61 TV_NORM_HD480I = NUM_LD_TV_NORMS,
62 TV_NORM_HD480P,
63 TV_NORM_HD576I,
64 TV_NORM_HD576P,
65 TV_NORM_HD720P,
66 TV_NORM_HD1080I,
67 NUM_TV_NORMS
68};
69
70struct nv17_tv_encoder {
71 struct nouveau_encoder base;
72
73 struct nv17_tv_state state;
74 struct nv17_tv_state saved_state;
75
76 int overscan;
77 int flicker;
78 int saturation;
79 int hue;
80 enum nv17_tv_norm tv_norm;
81 int subconnector;
82 int select_subconnector;
83 uint32_t pin_mask;
84};
85#define to_tv_enc(x) container_of(nouveau_encoder(x), \
86 struct nv17_tv_encoder, base)
87
88extern char *nv17_tv_norm_names[NUM_TV_NORMS];
89
90extern struct nv17_tv_norm_params {
91 enum {
92 TV_ENC_MODE,
93 CTV_ENC_MODE,
94 } kind;
95
96 union {
97 struct {
98 int hdisplay;
99 int vdisplay;
100 int vrefresh; /* mHz */
101
102 uint8_t tv_enc[0x40];
103 } tv_enc_mode;
104
105 struct {
106 struct drm_display_mode mode;
107
108 uint32_t ctv_regs[38];
109 } ctv_enc_mode;
110 };
111
112} nv17_tv_norms[NUM_TV_NORMS];
113#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
114
115extern struct drm_display_mode nv17_tv_modes[];
116
117static inline int interpolate(int y0, int y1, int y2, int x)
118{
119 return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
120}
121
122void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
123void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
124void nv17_tv_update_properties(struct drm_encoder *encoder);
125void nv17_tv_update_rescaler(struct drm_encoder *encoder);
126void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
127
128/* TV hardware access functions */
129
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
131{
132 nv_wr32(dev, reg, val);
133}
134
135static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
136{
137 return nv_rd32(dev, reg);
138}
139
140static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
141{
142 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
143 nv_write_ptv(dev, NV_PTV_TV_DATA, val);
144}
145
146static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
147{
148 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
149 return nv_read_ptv(dev, NV_PTV_TV_DATA);
150}
151
152#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
153#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
154#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
155
156#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 000000000000..d64683d97e0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "nv17_tv.h"
34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL",
37 [TV_NORM_PAL_M] = "PAL-M",
38 [TV_NORM_PAL_N] = "PAL-N",
39 [TV_NORM_PAL_NC] = "PAL-Nc",
40 [TV_NORM_NTSC_M] = "NTSC-M",
41 [TV_NORM_NTSC_J] = "NTSC-J",
42 [TV_NORM_HD480I] = "hd480i",
43 [TV_NORM_HD480P] = "hd480p",
44 [TV_NORM_HD576I] = "hd576i",
45 [TV_NORM_HD576P] = "hd576p",
46 [TV_NORM_HD720P] = "hd720p",
47 [TV_NORM_HD1080I] = "hd1080i"
48};
49
50/* TV standard specific parameters */
51
52struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
53 [TV_NORM_PAL] = { TV_ENC_MODE, {
54 .tv_enc_mode = { 720, 576, 50000, {
55 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
56 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
57 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
58 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
59 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
60 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
61 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
62 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
63 } } } },
64
65 [TV_NORM_PAL_M] = { TV_ENC_MODE, {
66 .tv_enc_mode = { 720, 480, 59940, {
67 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
68 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
69 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
70 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
71 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
72 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
73 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
74 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
75 } } } },
76
77 [TV_NORM_PAL_N] = { TV_ENC_MODE, {
78 .tv_enc_mode = { 720, 576, 50000, {
79 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
80 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
81 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
82 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
83 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
84 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
85 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
86 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
87 } } } },
88
89 [TV_NORM_PAL_NC] = { TV_ENC_MODE, {
90 .tv_enc_mode = { 720, 576, 50000, {
91 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
92 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
93 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
94 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
95 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
96 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
97 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
98 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
99 } } } },
100
101 [TV_NORM_NTSC_M] = { TV_ENC_MODE, {
102 .tv_enc_mode = { 720, 480, 59940, {
103 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
104 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
105 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
106 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
107 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
108 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
109 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
110 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
111 } } } },
112
113 [TV_NORM_NTSC_J] = { TV_ENC_MODE, {
114 .tv_enc_mode = { 720, 480, 59940, {
115 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
116 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
117 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
118 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
119 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
120 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
121 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
122 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
123 } } } },
124
125 [TV_NORM_HD480I] = { TV_ENC_MODE, {
126 .tv_enc_mode = { 720, 480, 59940, {
127 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
128 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
129 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
130 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
131 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
132 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
133 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
134 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
135 } } } },
136
137 [TV_NORM_HD576I] = { TV_ENC_MODE, {
138 .tv_enc_mode = { 720, 576, 50000, {
139 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
140 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
141 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
142 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
143 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
144 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
145 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
146 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
147 } } } },
148
149
150 [TV_NORM_HD480P] = { CTV_ENC_MODE, {
151 .ctv_enc_mode = {
152 .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
153 720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
154 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
155 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
156 0x354003a, 0x40000, 0x6f0344, 0x18100000,
157 0x10160004, 0x10060005, 0x1006000c, 0x10060020,
158 0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
159 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
160 0x10000fff, 0x10000fff, 0x10000fff, 0x70,
161 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
162 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
163 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
164 } } } },
165
166 [TV_NORM_HD576P] = { CTV_ENC_MODE, {
167 .ctv_enc_mode = {
168 .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
169 720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
170 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
171 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
172 0x354003a, 0x40000, 0x6f0344, 0x18100000,
173 0x10060001, 0x10060009, 0x10060026, 0x10060027,
174 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
175 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
176 0x10000fff, 0x10000fff, 0x10000fff, 0x69,
177 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
178 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
179 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
180 } } } },
181
182 [TV_NORM_HD720P] = { CTV_ENC_MODE, {
183 .ctv_enc_mode = {
184 .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
185 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
186 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
187 .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
188 0x66b0021, 0x6004a, 0x1210626, 0x8170000,
189 0x70004, 0x70016, 0x70017, 0x40f0018,
190 0x702e8, 0x81702ed, 0xfff, 0xfff,
191 0xfff, 0xfff, 0xfff, 0xfff,
192 0xfff, 0xfff, 0xfff, 0x0,
193 0x2e40001, 0x58, 0x2e001e, 0x258012c,
194 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
195 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
196 } } } },
197
198 [TV_NORM_HD1080I] = { CTV_ENC_MODE, {
199 .ctv_enc_mode = {
200 .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
201 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
202 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
203 | DRM_MODE_FLAG_INTERLACE) },
204 .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
205 0x8940028, 0x60054, 0xe80870, 0xbf70000,
206 0xbc70004, 0x70005, 0x70012, 0x70013,
207 0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
208 0x1c70237, 0x70238, 0x70244, 0x70245,
209 0x40f0246, 0x70462, 0x1f70464, 0x0,
210 0x2e40001, 0x58, 0x2e001e, 0x258012c,
211 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
212 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
213 } } } }
214};
215
216/*
217 * The following is some guesswork on how the TV encoder flicker
218 * filter/rescaler works:
219 *
220 * It seems to use some sort of resampling filter, it is controlled
221 * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
222 * control the horizontal and vertical stage respectively, there is
223 * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
224 * but they seem to do nothing. A rough guess might be that they could
225 * be used to independently control the filtering of each interlaced
226 * field, but I don't know how they are enabled. The whole filtering
227 * process seems to be disabled with bits 26:27 of PTV_200, but we
228 * aren't doing that.
229 *
230 * The layout of both register sets is the same:
231 *
232 * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
233 * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
234 *
235 * Each coefficient is stored in bits [31],[15:9] in two's complement
236 * format. They seem to be some kind of weights used in a low-pass
237 * filter. Both A and B coefficients are applied to the 14 nearest
238 * samples on each side (Listed from nearest to furthermost. They
239 * roughly cover 2 framebuffer pixels on each side). They are
240 * probably multiplied with some more hardwired weights before being
241 * used: B-coefficients are applied the same on both sides,
242 * A-coefficients are inverted before being applied to the opposite
243 * side.
244 *
245 * After all the hassle, I got the following formula by empirical
246 * means...
247 */
248
249#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
250
251#define id1 (1LL << 8)
252#define id2 (1LL << 16)
253#define id3 (1LL << 24)
254#define id4 (1LL << 32)
255#define id5 (1LL << 48)
256
257static struct filter_params{
258 int64_t k1;
259 int64_t ki;
260 int64_t ki2;
261 int64_t ki3;
262 int64_t kr;
263 int64_t kir;
264 int64_t ki2r;
265 int64_t ki3r;
266 int64_t kf;
267 int64_t kif;
268 int64_t ki2f;
269 int64_t ki3f;
270 int64_t krf;
271 int64_t kirf;
272 int64_t ki2rf;
273 int64_t ki3rf;
274} fparams[2][4] = {
275 /* Horizontal filter parameters */
276 {
277 {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
278 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
279 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
280 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
281 {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
282 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
283 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
284 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
285 {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
286 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
287 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
288 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
289 {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
290 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
291 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
292 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
293 },
294
295 /* Vertical filter parameters */
296 {
297 {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
298 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
299 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
300 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
301 {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
302 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
303 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
304 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
305 {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
306 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
307 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
308 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
309 {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
310 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
311 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
312 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
313 }
314};
315
316static void tv_setup_filter(struct drm_encoder *encoder)
317{
318 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
319 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
320 struct drm_display_mode *mode = &encoder->crtc->mode;
321 uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
322 &tv_enc->state.vfilter};
323 int i, j, k;
324 int32_t overscan = calc_overscan(tv_enc->overscan);
325 int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
326 uint64_t rs[] = {mode->hdisplay * id3,
327 mode->vdisplay * id3};
328
329 do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
330 do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
331
332 for (k = 0; k < 2; k++) {
333 rs[k] = max((int64_t)rs[k], id2);
334
335 for (j = 0; j < 4; j++) {
336 struct filter_params *p = &fparams[k][j];
337
338 for (i = 0; i < 7; i++) {
339 int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
340 + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
341 + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
342 + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
343
344 (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
345 }
346 }
347 }
348}
349
350/* Hardware state saving/restoring */
351
352static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
353{
354 int i, j;
355 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
356
357 for (i = 0; i < 4; i++) {
358 for (j = 0; j < 7; j++)
359 regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
360 }
361}
362
363static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
364{
365 int i, j;
366 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
367
368 for (i = 0; i < 4; i++) {
369 for (j = 0; j < 7; j++)
370 nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
371 }
372}
373
374void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
375{
376 int i;
377
378 for (i = 0; i < 0x40; i++)
379 state->tv_enc[i] = nv_read_tv_enc(dev, i);
380
381 tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
382 tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
383 tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
384
385 nv_save_ptv(dev, state, 200);
386 nv_save_ptv(dev, state, 204);
387 nv_save_ptv(dev, state, 208);
388 nv_save_ptv(dev, state, 20c);
389 nv_save_ptv(dev, state, 304);
390 nv_save_ptv(dev, state, 500);
391 nv_save_ptv(dev, state, 504);
392 nv_save_ptv(dev, state, 508);
393 nv_save_ptv(dev, state, 600);
394 nv_save_ptv(dev, state, 604);
395 nv_save_ptv(dev, state, 608);
396 nv_save_ptv(dev, state, 60c);
397 nv_save_ptv(dev, state, 610);
398 nv_save_ptv(dev, state, 614);
399}
400
401void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
402{
403 int i;
404
405 for (i = 0; i < 0x40; i++)
406 nv_write_tv_enc(dev, i, state->tv_enc[i]);
407
408 tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
409 tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
410 tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
411
412 nv_load_ptv(dev, state, 200);
413 nv_load_ptv(dev, state, 204);
414 nv_load_ptv(dev, state, 208);
415 nv_load_ptv(dev, state, 20c);
416 nv_load_ptv(dev, state, 304);
417 nv_load_ptv(dev, state, 500);
418 nv_load_ptv(dev, state, 504);
419 nv_load_ptv(dev, state, 508);
420 nv_load_ptv(dev, state, 600);
421 nv_load_ptv(dev, state, 604);
422 nv_load_ptv(dev, state, 608);
423 nv_load_ptv(dev, state, 60c);
424 nv_load_ptv(dev, state, 610);
425 nv_load_ptv(dev, state, 614);
426
427 /* This is required for some settings to kick in. */
428 nv_write_tv_enc(dev, 0x3e, 1);
429 nv_write_tv_enc(dev, 0x3e, 0);
430}
431
432/* Timings similar to the ones the blob sets */
433
434struct drm_display_mode nv17_tv_modes[] = {
435 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
436 320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
437 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
438 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
439 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
440 320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
441 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
442 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
443 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
444 400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
445 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
446 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
447 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
448 640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
449 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
450 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
451 720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
452 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
453 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
454 720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
455 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
456 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
457 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
459 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
460 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
461 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
462 {}
463};
464
465void nv17_tv_update_properties(struct drm_encoder *encoder)
466{
467 struct drm_device *dev = encoder->dev;
468 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
469 struct nv17_tv_state *regs = &tv_enc->state;
470 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
471 int subconnector = tv_enc->select_subconnector ?
472 tv_enc->select_subconnector :
473 tv_enc->subconnector;
474
475 switch (subconnector) {
476 case DRM_MODE_SUBCONNECTOR_Composite:
477 {
478 regs->ptv_204 = 0x2;
479
480 /* The composite connector may be found on either pin. */
481 if (tv_enc->pin_mask & 0x4)
482 regs->ptv_204 |= 0x010000;
483 else if (tv_enc->pin_mask & 0x2)
484 regs->ptv_204 |= 0x100000;
485 else
486 regs->ptv_204 |= 0x110000;
487
488 regs->tv_enc[0x7] = 0x10;
489 break;
490 }
491 case DRM_MODE_SUBCONNECTOR_SVIDEO:
492 regs->ptv_204 = 0x11012;
493 regs->tv_enc[0x7] = 0x18;
494 break;
495
496 case DRM_MODE_SUBCONNECTOR_Component:
497 regs->ptv_204 = 0x111333;
498 regs->tv_enc[0x7] = 0x14;
499 break;
500
501 case DRM_MODE_SUBCONNECTOR_SCART:
502 regs->ptv_204 = 0x111012;
503 regs->tv_enc[0x7] = 0x18;
504 break;
505 }
506
507 regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
508 tv_enc->saturation);
509 regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
510 tv_enc->saturation);
511 regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
512
513 nv_load_ptv(dev, regs, 204);
514 nv_load_tv_enc(dev, regs, 7);
515 nv_load_tv_enc(dev, regs, 20);
516 nv_load_tv_enc(dev, regs, 22);
517 nv_load_tv_enc(dev, regs, 25);
518}
519
520void nv17_tv_update_rescaler(struct drm_encoder *encoder)
521{
522 struct drm_device *dev = encoder->dev;
523 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
524 struct nv17_tv_state *regs = &tv_enc->state;
525
526 regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
527
528 tv_setup_filter(encoder);
529
530 nv_load_ptv(dev, regs, 208);
531 tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
532 tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
533 tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
534}
535
536void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
537{
538 struct drm_device *dev = encoder->dev;
539 struct drm_nouveau_private *dev_priv = dev->dev_private;
540 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
541 int head = nouveau_crtc(encoder->crtc)->index;
542 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
543 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
544 struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
545 int overscan, hmargin, vmargin, hratio, vratio;
546
547 /* The rescaler doesn't do the right thing for interlaced modes. */
548 if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
549 overscan = 100;
550 else
551 overscan = tv_enc->overscan;
552
553 hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
554 vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
555
556 hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
557 overscan);
558 vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
559 overscan);
560
561 hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
562 vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
563
564 regs->fp_horiz_regs[FP_VALID_START] = hmargin;
565 regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
566 regs->fp_vert_regs[FP_VALID_START] = vmargin;
567 regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
568
569 regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
570 XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
571 NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
572 XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
573
574 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
575 regs->fp_horiz_regs[FP_VALID_START]);
576 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
577 regs->fp_horiz_regs[FP_VALID_END]);
578 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
579 regs->fp_vert_regs[FP_VALID_START]);
580 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
581 regs->fp_vert_regs[FP_VALID_END]);
582 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
583}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 000000000000..18ba74f19703
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,780 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6/*
7 * NV20
8 * -----
9 * There are 3 families :
10 * NV20 is 0x10de:0x020*
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
12 * NV2A is 0x10de:0x02A0
13 *
14 * NV30
15 * -----
16 * There are 3 families :
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
18 * NV34 is 0x10de:0x032*
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
20 *
21 * Not seen in the wild, no dumps (probably NV35) :
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
24 *
25 */
26
27#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4)
30
31#define NV30_31_GRCTX_SIZE (24392)
32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396)
34
35static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{
38 int i;
39
40 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
41 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
42 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
43 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
44 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
45 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
46 for (i = 0x04d4; i <= 0x04e0; i += 4)
47 nv_wo32(dev, ctx, i/4, 0x00030303);
48 for (i = 0x04f4; i <= 0x0500; i += 4)
49 nv_wo32(dev, ctx, i/4, 0x00080000);
50 for (i = 0x050c; i <= 0x0518; i += 4)
51 nv_wo32(dev, ctx, i/4, 0x01012000);
52 for (i = 0x051c; i <= 0x0528; i += 4)
53 nv_wo32(dev, ctx, i/4, 0x000105b8);
54 for (i = 0x052c; i <= 0x0538; i += 4)
55 nv_wo32(dev, ctx, i/4, 0x00080008);
56 for (i = 0x055c; i <= 0x0598; i += 4)
57 nv_wo32(dev, ctx, i/4, 0x07ff0000);
58 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
59 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
60 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
61 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
62 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
63 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
64 for (i = 0x1c1c; i <= 0x248c; i += 16) {
65 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
66 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
67 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
68 }
69 nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
70 nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
71 nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
72 nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
73 nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
74 nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
75 nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
76 nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
77 nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
78 nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
79 nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
80 nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
81 for (i = 0x355c; i <= 0x3578; i += 4)
82 nv_wo32(dev, ctx, i/4, 0x001c527c);
83}
84
85static void
86nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87{
88 int i;
89
90 nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
91 nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
92 nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
93 nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
94 nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
95 nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
96 nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
97 nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
98 nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
99 nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
100 for (i = 0x0510; i <= 0x051c; i += 4)
101 nv_wo32(dev, ctx, i/4, 0x00030303);
102 for (i = 0x0530; i <= 0x053c; i += 4)
103 nv_wo32(dev, ctx, i/4, 0x00080000);
104 for (i = 0x0548; i <= 0x0554; i += 4)
105 nv_wo32(dev, ctx, i/4, 0x01012000);
106 for (i = 0x0558; i <= 0x0564; i += 4)
107 nv_wo32(dev, ctx, i/4, 0x000105b8);
108 for (i = 0x0568; i <= 0x0574; i += 4)
109 nv_wo32(dev, ctx, i/4, 0x00080008);
110 for (i = 0x0598; i <= 0x05d4; i += 4)
111 nv_wo32(dev, ctx, i/4, 0x07ff0000);
112 nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
113 nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
114 nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
115 nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
116 nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
117 nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
118 nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
119 nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
120 nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
121 nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
122 nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
123 for (i = 0x1b04; i <= 0x2374; i += 16) {
124 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
125 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
126 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
127 }
128 nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
129 nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
130 nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
131 nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
132 nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
133 nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
134 nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
135 nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
136 nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
137 nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
138 nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
139 nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
140 for (i = 0x3484; i <= 0x34a0; i += 4)
141 nv_wo32(dev, ctx, i/4, 0x001c527c);
142}
143
144static void
145nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146{
147 int i;
148
149 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
150 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
151 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
152 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
153 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
154 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
155 for (i = 0x04d4; i <= 0x04e0; i += 4)
156 nv_wo32(dev, ctx, i/4, 0x00030303);
157 for (i = 0x04f4; i <= 0x0500; i += 4)
158 nv_wo32(dev, ctx, i/4, 0x00080000);
159 for (i = 0x050c; i <= 0x0518; i += 4)
160 nv_wo32(dev, ctx, i/4, 0x01012000);
161 for (i = 0x051c; i <= 0x0528; i += 4)
162 nv_wo32(dev, ctx, i/4, 0x000105b8);
163 for (i = 0x052c; i <= 0x0538; i += 4)
164 nv_wo32(dev, ctx, i/4, 0x00080008);
165 for (i = 0x055c; i <= 0x0598; i += 4)
166 nv_wo32(dev, ctx, i/4, 0x07ff0000);
167 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
168 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
169 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
170 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
171 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
172 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
173 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
174 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
175 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
176 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
177 }
178 nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
179 nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
180 nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
181 nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
182 nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
183 nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
184 nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
185 nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
186 nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
187 nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
188 nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
189 nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
190 for (i = 0x341c; i <= 0x3438; i += 4)
191 nv_wo32(dev, ctx, i/4, 0x001c527c);
192}
193
194static void
195nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196{
197 int i;
198
199 nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
200 nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
201 nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
202 nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
203 nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
204 nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
205 nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
206 nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
207 for (i = 0x04e0; i < 0x04e8; i += 4)
208 nv_wo32(dev, ctx, i/4, 0x0fff0000);
209 nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
210 for (i = 0x0508; i < 0x0548; i += 4)
211 nv_wo32(dev, ctx, i/4, 0x07ff0000);
212 nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
213 nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
214 nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
215 nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
216 nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
217 nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
218 nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
219 for (i = 0x0600; i < 0x0640; i += 4)
220 nv_wo32(dev, ctx, i/4, 0x00010588);
221 for (i = 0x0640; i < 0x0680; i += 4)
222 nv_wo32(dev, ctx, i/4, 0x00030303);
223 for (i = 0x06c0; i < 0x0700; i += 4)
224 nv_wo32(dev, ctx, i/4, 0x0008aae4);
225 for (i = 0x0700; i < 0x0740; i += 4)
226 nv_wo32(dev, ctx, i/4, 0x01012000);
227 for (i = 0x0740; i < 0x0780; i += 4)
228 nv_wo32(dev, ctx, i/4, 0x00080008);
229 nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
230 nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
231 for (i = 0x0864; i < 0x0874; i += 4)
232 nv_wo32(dev, ctx, i/4, 0x00040004);
233 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
234 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
235 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
236 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
237 }
238 for (i = 0x30b8; i < 0x30c8; i += 4)
239 nv_wo32(dev, ctx, i/4, 0x0000ffff);
240 nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
241 nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
242 nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
243 nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
244 nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
245 nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
246 nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
247 nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
248 nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
249 nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
250}
251
252static void
253nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254{
255 int i;
256
257 nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
258 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
259 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
260 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
261 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
262 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
263 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
264 nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
265 for (i = 0x04d4; i < 0x04dc; i += 4)
266 nv_wo32(dev, ctx, i/4, 0x0fff0000);
267 nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
268 for (i = 0x04fc; i < 0x053c; i += 4)
269 nv_wo32(dev, ctx, i/4, 0x07ff0000);
270 nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
271 nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
272 nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
273 nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
274 nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
275 nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
276 nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
277 for (i = 0x05f0; i < 0x0630; i += 4)
278 nv_wo32(dev, ctx, i/4, 0x00010588);
279 for (i = 0x0630; i < 0x0670; i += 4)
280 nv_wo32(dev, ctx, i/4, 0x00030303);
281 for (i = 0x06b0; i < 0x06f0; i += 4)
282 nv_wo32(dev, ctx, i/4, 0x0008aae4);
283 for (i = 0x06f0; i < 0x0730; i += 4)
284 nv_wo32(dev, ctx, i/4, 0x01012000);
285 for (i = 0x0730; i < 0x0770; i += 4)
286 nv_wo32(dev, ctx, i/4, 0x00080008);
287 nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
288 nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
289 for (i = 0x0858; i < 0x0868; i += 4)
290 nv_wo32(dev, ctx, i/4, 0x00040004);
291 for (i = 0x15ac; i <= 0x271c ; i += 16) {
292 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
293 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
294 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
295 }
296 for (i = 0x274c; i < 0x275c; i += 4)
297 nv_wo32(dev, ctx, i/4, 0x0000ffff);
298 nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
299 nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
300 nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
301 nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
302 nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
303 nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
304 nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
305 nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
306 nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
307 nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
308}
309
310static void
311nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312{
313 int i;
314
315 nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
316 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
317 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
318 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
319 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
320 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
321 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
322 nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
323 for (i = 0x04dc; i < 0x04e4; i += 4)
324 nv_wo32(dev, ctx, i/4, 0x0fff0000);
325 nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
326 for (i = 0x0504; i < 0x0544; i += 4)
327 nv_wo32(dev, ctx, i/4, 0x07ff0000);
328 nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
329 nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
330 nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
331 nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
332 nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
333 nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
334 nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
335 for (i = 0x0604; i < 0x0644; i += 4)
336 nv_wo32(dev, ctx, i/4, 0x00010588);
337 for (i = 0x0644; i < 0x0684; i += 4)
338 nv_wo32(dev, ctx, i/4, 0x00030303);
339 for (i = 0x06c4; i < 0x0704; i += 4)
340 nv_wo32(dev, ctx, i/4, 0x0008aae4);
341 for (i = 0x0704; i < 0x0744; i += 4)
342 nv_wo32(dev, ctx, i/4, 0x01012000);
343 for (i = 0x0744; i < 0x0784; i += 4)
344 nv_wo32(dev, ctx, i/4, 0x00080008);
345 nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
346 nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
347 for (i = 0x0868; i < 0x0878; i += 4)
348 nv_wo32(dev, ctx, i/4, 0x00040004);
349 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
350 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
351 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
352 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
353 }
354 for (i = 0x30bc; i < 0x30cc; i += 4)
355 nv_wo32(dev, ctx, i/4, 0x0000ffff);
356 nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
357 nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
358 nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
359 nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
360 nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
361 nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
362 nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
363 nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
364 nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
365 nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
366}
367
368int
369nv20_graph_create_context(struct nouveau_channel *chan)
370{
371 struct drm_device *dev = chan->dev;
372 struct drm_nouveau_private *dev_priv = dev->dev_private;
373 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
374 unsigned int ctx_size;
375 unsigned int idoffs = 0x28/4;
376 int ret;
377
378 switch (dev_priv->chipset) {
379 case 0x20:
380 ctx_size = NV20_GRCTX_SIZE;
381 ctx_init = nv20_graph_context_init;
382 idoffs = 0;
383 break;
384 case 0x25:
385 case 0x28:
386 ctx_size = NV25_GRCTX_SIZE;
387 ctx_init = nv25_graph_context_init;
388 break;
389 case 0x2a:
390 ctx_size = NV2A_GRCTX_SIZE;
391 ctx_init = nv2a_graph_context_init;
392 idoffs = 0;
393 break;
394 case 0x30:
395 case 0x31:
396 ctx_size = NV30_31_GRCTX_SIZE;
397 ctx_init = nv30_31_graph_context_init;
398 break;
399 case 0x34:
400 ctx_size = NV34_GRCTX_SIZE;
401 ctx_init = nv34_graph_context_init;
402 break;
403 case 0x35:
404 case 0x36:
405 ctx_size = NV35_36_GRCTX_SIZE;
406 ctx_init = nv35_36_graph_context_init;
407 break;
408 default:
409 ctx_size = 0;
410 ctx_init = nv35_36_graph_context_init;
411 NV_ERROR(dev, "Please contact the devs if you want your NV%x"
412 " card to work\n", dev_priv->chipset);
413 return -ENOSYS;
414 break;
415 }
416
417 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
418 NVOBJ_FLAG_ZERO_ALLOC,
419 &chan->ramin_grctx);
420 if (ret)
421 return ret;
422
423 /* Initialise default context values */
424 dev_priv->engine.instmem.prepare_access(dev, true);
425 ctx_init(dev, chan->ramin_grctx->gpuobj);
426
427 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
428 nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
429 (chan->id << 24) | 0x1); /* CTX_USER */
430
431 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
432 chan->ramin_grctx->instance >> 4);
433
434 dev_priv->engine.instmem.finish_access(dev);
435 return 0;
436}
437
438void
439nv20_graph_destroy_context(struct nouveau_channel *chan)
440{
441 struct drm_device *dev = chan->dev;
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443
444 if (chan->ramin_grctx)
445 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
446
447 dev_priv->engine.instmem.prepare_access(dev, true);
448 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
449 dev_priv->engine.instmem.finish_access(dev);
450}
451
452int
453nv20_graph_load_context(struct nouveau_channel *chan)
454{
455 struct drm_device *dev = chan->dev;
456 uint32_t inst;
457
458 if (!chan->ramin_grctx)
459 return -EINVAL;
460 inst = chan->ramin_grctx->instance >> 4;
461
462 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
463 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
464 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
465 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
466
467 nouveau_wait_for_idle(dev);
468 return 0;
469}
470
471int
472nv20_graph_unload_context(struct drm_device *dev)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
476 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
477 struct nouveau_channel *chan;
478 uint32_t inst, tmp;
479
480 chan = pgraph->channel(dev);
481 if (!chan)
482 return 0;
483 inst = chan->ramin_grctx->instance >> 4;
484
485 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
486 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
487 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
488
489 nouveau_wait_for_idle(dev);
490
491 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
492 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
493 tmp |= (pfifo->channels - 1) << 24;
494 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
495 return 0;
496}
497
498static void
499nv20_graph_rdi(struct drm_device *dev)
500{
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502 int i, writecount = 32;
503 uint32_t rdi_index = 0x2c80000;
504
505 if (dev_priv->chipset == 0x20) {
506 rdi_index = 0x3d0000;
507 writecount = 15;
508 }
509
510 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
511 for (i = 0; i < writecount; i++)
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
513
514 nouveau_wait_for_idle(dev);
515}
516
517int
518nv20_graph_init(struct drm_device *dev)
519{
520 struct drm_nouveau_private *dev_priv =
521 (struct drm_nouveau_private *)dev->dev_private;
522 uint32_t tmp, vramsz;
523 int ret, i;
524
525 nv_wr32(dev, NV03_PMC_ENABLE,
526 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
527 nv_wr32(dev, NV03_PMC_ENABLE,
528 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
529
530 if (!dev_priv->ctx_table) {
531 /* Create Context Pointer Table */
532 dev_priv->ctx_table_size = 32 * 4;
533 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
534 dev_priv->ctx_table_size, 16,
535 NVOBJ_FLAG_ZERO_ALLOC,
536 &dev_priv->ctx_table);
537 if (ret)
538 return ret;
539 }
540
541 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
542 dev_priv->ctx_table->instance >> 4);
543
544 nv20_graph_rdi(dev);
545
546 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
547 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
548
549 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
550 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
551 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
552 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
553 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
554 nv_wr32(dev, 0x40009C , 0x00000040);
555
556 if (dev_priv->chipset >= 0x25) {
557 nv_wr32(dev, 0x400890, 0x00080000);
558 nv_wr32(dev, 0x400610, 0x304B1FB6);
559 nv_wr32(dev, 0x400B80, 0x18B82880);
560 nv_wr32(dev, 0x400B84, 0x44000000);
561 nv_wr32(dev, 0x400098, 0x40000080);
562 nv_wr32(dev, 0x400B88, 0x000000ff);
563 } else {
564 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
565 nv_wr32(dev, 0x400094, 0x00000005);
566 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
567 nv_wr32(dev, 0x400B84, 0x24000000);
568 nv_wr32(dev, 0x400098, 0x00000040);
569 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
570 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
571 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
572 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
573 }
574
575 /* copy tile info from PFB */
576 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
577 nv_wr32(dev, 0x00400904 + i * 0x10,
578 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
579 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
580 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
581 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
582 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
583 nv_wr32(dev, 0x00400908 + i * 0x10,
584 nv_rd32(dev, NV10_PFB_TSIZE(i)));
585 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
587 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
588 nv_rd32(dev, NV10_PFB_TSIZE(i)));
589 nv_wr32(dev, 0x00400900 + i * 0x10,
590 nv_rd32(dev, NV10_PFB_TILE(i)));
591 /* which is NV40_PGRAPH_TILE0(i) ?? */
592 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
594 nv_rd32(dev, NV10_PFB_TILE(i)));
595 }
596 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
599 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
600 nv_rd32(dev, 0x100300 + i * 4));
601 }
602 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
603 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
604 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
605
606 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
607 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
608
609 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
610 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
611 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
612 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
613
614 /* begin RAM config */
615 vramsz = drm_get_resource_len(dev, 0) - 1;
616 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
617 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
618 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
619 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
620 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
621 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
622 nv_wr32(dev, 0x400820, 0);
623 nv_wr32(dev, 0x400824, 0);
624 nv_wr32(dev, 0x400864, vramsz - 1);
625 nv_wr32(dev, 0x400868, vramsz - 1);
626
627 /* interesting.. the below overwrites some of the tile setup above.. */
628 nv_wr32(dev, 0x400B20, 0x00000000);
629 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
630
631 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
632 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
633 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
634 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
635
636 return 0;
637}
638
639void
640nv20_graph_takedown(struct drm_device *dev)
641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643
644 nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
645}
646
647int
648nv30_graph_init(struct drm_device *dev)
649{
650 struct drm_nouveau_private *dev_priv = dev->dev_private;
651 int ret, i;
652
653 nv_wr32(dev, NV03_PMC_ENABLE,
654 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
655 nv_wr32(dev, NV03_PMC_ENABLE,
656 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
657
658 if (!dev_priv->ctx_table) {
659 /* Create Context Pointer Table */
660 dev_priv->ctx_table_size = 32 * 4;
661 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
662 dev_priv->ctx_table_size, 16,
663 NVOBJ_FLAG_ZERO_ALLOC,
664 &dev_priv->ctx_table);
665 if (ret)
666 return ret;
667 }
668
669 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
670 dev_priv->ctx_table->instance >> 4);
671
672 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
673 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
674
675 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
676 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
677 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
678 nv_wr32(dev, 0x400890, 0x01b463ff);
679 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
680 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
681 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
682 nv_wr32(dev, 0x400B80, 0x1003d888);
683 nv_wr32(dev, 0x400B84, 0x0c000000);
684 nv_wr32(dev, 0x400098, 0x00000000);
685 nv_wr32(dev, 0x40009C, 0x0005ad00);
686 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
687 nv_wr32(dev, 0x4000a0, 0x00000000);
688 nv_wr32(dev, 0x4000a4, 0x00000008);
689 nv_wr32(dev, 0x4008a8, 0xb784a400);
690 nv_wr32(dev, 0x400ba0, 0x002f8685);
691 nv_wr32(dev, 0x400ba4, 0x00231f3f);
692 nv_wr32(dev, 0x4008a4, 0x40000020);
693
694 if (dev_priv->chipset == 0x34) {
695 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
696 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
697 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
698 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
699 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
700 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
701 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
702 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
703 }
704
705 nv_wr32(dev, 0x4000c0, 0x00000016);
706
707 /* copy tile info from PFB */
708 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
709 nv_wr32(dev, 0x00400904 + i * 0x10,
710 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
711 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
712 nv_wr32(dev, 0x00400908 + i * 0x10,
713 nv_rd32(dev, NV10_PFB_TSIZE(i)));
714 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
715 nv_wr32(dev, 0x00400900 + i * 0x10,
716 nv_rd32(dev, NV10_PFB_TILE(i)));
717 /* which is NV40_PGRAPH_TILE0(i) ?? */
718 }
719
720 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
721 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
722 nv_wr32(dev, 0x0040075c , 0x00000001);
723
724 /* begin RAM config */
725 /* vramsz = drm_get_resource_len(dev, 0) - 1; */
726 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
727 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
728 if (dev_priv->chipset != 0x34) {
729 nv_wr32(dev, 0x400750, 0x00EA0000);
730 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
731 nv_wr32(dev, 0x400750, 0x00EA0004);
732 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
733 }
734
735 return 0;
736}
737
738struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
739 { 0x0030, false, NULL }, /* null */
740 { 0x0039, false, NULL }, /* m2mf */
741 { 0x004a, false, NULL }, /* gdirect */
742 { 0x009f, false, NULL }, /* imageblit (nv12) */
743 { 0x008a, false, NULL }, /* ifc */
744 { 0x0089, false, NULL }, /* sifm */
745 { 0x0062, false, NULL }, /* surf2d */
746 { 0x0043, false, NULL }, /* rop */
747 { 0x0012, false, NULL }, /* beta1 */
748 { 0x0072, false, NULL }, /* beta4 */
749 { 0x0019, false, NULL }, /* cliprect */
750 { 0x0044, false, NULL }, /* pattern */
751 { 0x009e, false, NULL }, /* swzsurf */
752 { 0x0096, false, NULL }, /* celcius */
753 { 0x0097, false, NULL }, /* kelvin (nv20) */
754 { 0x0597, false, NULL }, /* kelvin (nv25) */
755 {}
756};
757
758struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
759 { 0x0030, false, NULL }, /* null */
760 { 0x0039, false, NULL }, /* m2mf */
761 { 0x004a, false, NULL }, /* gdirect */
762 { 0x009f, false, NULL }, /* imageblit (nv12) */
763 { 0x008a, false, NULL }, /* ifc */
764 { 0x038a, false, NULL }, /* ifc (nv30) */
765 { 0x0089, false, NULL }, /* sifm */
766 { 0x0389, false, NULL }, /* sifm (nv30) */
767 { 0x0062, false, NULL }, /* surf2d */
768 { 0x0362, false, NULL }, /* surf2d (nv30) */
769 { 0x0043, false, NULL }, /* rop */
770 { 0x0012, false, NULL }, /* beta1 */
771 { 0x0072, false, NULL }, /* beta4 */
772 { 0x0019, false, NULL }, /* cliprect */
773 { 0x0044, false, NULL }, /* pattern */
774 { 0x039e, false, NULL }, /* swzsurf */
775 { 0x0397, false, NULL }, /* rankine (nv30) */
776 { 0x0497, false, NULL }, /* rankine (nv35) */
777 { 0x0697, false, NULL }, /* rankine (nv34) */
778 {}
779};
780
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 000000000000..ca1d27107a8e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_fb_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t fb_bar_size, tmp;
11 int num_tiles;
12 int i;
13
14 /* This is strictly a NV4x register (don't know about NV5x). */
15 /* The blob sets these to all kinds of values, and they mess up our setup. */
16 /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
17 /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
18 /* Any idea what this is? */
19 nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
20
21 switch (dev_priv->chipset) {
22 case 0x40:
23 case 0x45:
24 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
25 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
26 num_tiles = NV10_PFB_TILE__SIZE;
27 break;
28 case 0x46: /* G72 */
29 case 0x47: /* G70 */
30 case 0x49: /* G71 */
31 case 0x4b: /* G73 */
32 case 0x4c: /* C51 (G7X version) */
33 num_tiles = NV40_PFB_TILE__SIZE_1;
34 break;
35 default:
36 num_tiles = NV40_PFB_TILE__SIZE_0;
37 break;
38 }
39
40 fb_bar_size = drm_get_resource_len(dev, 0) - 1;
41 switch (dev_priv->chipset) {
42 case 0x40:
43 for (i = 0; i < num_tiles; i++) {
44 nv_wr32(dev, NV10_PFB_TILE(i), 0);
45 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
46 }
47 break;
48 default:
49 for (i = 0; i < num_tiles; i++) {
50 nv_wr32(dev, NV40_PFB_TILE(i), 0);
51 nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
52 }
53 break;
54 }
55
56 return 0;
57}
58
59void
60nv40_fb_takedown(struct drm_device *dev)
61{
62}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 000000000000..b4f19ccb8b41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,314 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_drm.h"
30
31#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
32#define NV40_RAMFC__SIZE 128
33
34int
35nv40_fifo_create_context(struct nouveau_channel *chan)
36{
37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id);
40 int ret;
41
42 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
43 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
44 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
45 if (ret)
46 return ret;
47
48 dev_priv->engine.instmem.prepare_access(dev, true);
49 nv_wi32(dev, fc + 0, chan->pushbuf_base);
50 nv_wi32(dev, fc + 4, chan->pushbuf_base);
51 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
52 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
53 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
54 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
55#ifdef __BIG_ENDIAN
56 NV_PFIFO_CACHE1_BIG_ENDIAN |
57#endif
58 0x30000000 /* no idea.. */);
59 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
60 nv_wi32(dev, fc + 60, 0x0001FFFF);
61 dev_priv->engine.instmem.finish_access(dev);
62
63 /* enable the fifo dma operation */
64 nv_wr32(dev, NV04_PFIFO_MODE,
65 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
66 return 0;
67}
68
69void
70nv40_fifo_destroy_context(struct nouveau_channel *chan)
71{
72 struct drm_device *dev = chan->dev;
73
74 nv_wr32(dev, NV04_PFIFO_MODE,
75 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
76
77 if (chan->ramfc)
78 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
79}
80
81static void
82nv40_fifo_do_load_context(struct drm_device *dev, int chid)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
86
87 dev_priv->engine.instmem.prepare_access(dev, false);
88
89 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
90 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
91 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
92 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
93 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
94 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
95
96 /* No idea what 0x2058 is.. */
97 tmp = nv_ri32(dev, fc + 24);
98 tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
99 tmp2 |= (tmp & 0x30000000);
100 nv_wr32(dev, 0x2058, tmp2);
101 tmp &= ~0x30000000;
102 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
103
104 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
105 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
106 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
107 tmp = nv_ri32(dev, fc + 40);
108 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
109 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
110 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
111 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
112 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
113
114 /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
115 tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
116 tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
117 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
118
119 nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
120 /* NVIDIA does this next line twice... */
121 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
122 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
123 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
124
125 dev_priv->engine.instmem.finish_access(dev);
126
127 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
128 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
129}
130
131int
132nv40_fifo_load_context(struct nouveau_channel *chan)
133{
134 struct drm_device *dev = chan->dev;
135 uint32_t tmp;
136
137 nv40_fifo_do_load_context(dev, chan->id);
138
139 /* Set channel active, and in DMA mode */
140 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
141 NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
142 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
143
144 /* Reset DMA_CTL_AT_INFO to INVALID */
145 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
146 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
147
148 return 0;
149}
150
151int
152nv40_fifo_unload_context(struct drm_device *dev)
153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
156 uint32_t fc, tmp;
157 int chid;
158
159 chid = pfifo->channel_id(dev);
160 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
161 return 0;
162 fc = NV40_RAMFC(chid);
163
164 dev_priv->engine.instmem.prepare_access(dev, true);
165 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
166 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
167 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
168 nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
169 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
170 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
171 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
172 tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
173 nv_wi32(dev, fc + 24, tmp);
174 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
175 nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
176 nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
177 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
178 nv_wi32(dev, fc + 40, tmp);
179 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
180 nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
181 /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
182 * more involved depending on the value of 0x3228?
183 */
184 nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
185 nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
186 nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
187 /* No idea what the below is for exactly, ripped from a mmio-trace */
188 nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
189 /* NVIDIA do this next line twice.. bug? */
190 nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
191 nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
192 nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
193#if 0 /* no real idea which is PUT/GET in UNK_48.. */
194 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
195 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
196 nv_wi32(dev, fc + 72, tmp);
197#endif
198 dev_priv->engine.instmem.finish_access(dev);
199
200 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
201 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
202 NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
203 return 0;
204}
205
206static void
207nv40_fifo_init_reset(struct drm_device *dev)
208{
209 int i;
210
211 nv_wr32(dev, NV03_PMC_ENABLE,
212 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
213 nv_wr32(dev, NV03_PMC_ENABLE,
214 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
215
216 nv_wr32(dev, 0x003224, 0x000f0078);
217 nv_wr32(dev, 0x003210, 0x00000000);
218 nv_wr32(dev, 0x003270, 0x00000000);
219 nv_wr32(dev, 0x003240, 0x00000000);
220 nv_wr32(dev, 0x003244, 0x00000000);
221 nv_wr32(dev, 0x003258, 0x00000000);
222 nv_wr32(dev, 0x002504, 0x00000000);
223 for (i = 0; i < 16; i++)
224 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
225 nv_wr32(dev, 0x00250c, 0x0000ffff);
226 nv_wr32(dev, 0x002048, 0x00000000);
227 nv_wr32(dev, 0x003228, 0x00000000);
228 nv_wr32(dev, 0x0032e8, 0x00000000);
229 nv_wr32(dev, 0x002410, 0x00000000);
230 nv_wr32(dev, 0x002420, 0x00000000);
231 nv_wr32(dev, 0x002058, 0x00000001);
232 nv_wr32(dev, 0x00221c, 0x00000000);
233 /* something with 0x2084, read/modify/write, no change */
234 nv_wr32(dev, 0x002040, 0x000000ff);
235 nv_wr32(dev, 0x002500, 0x00000000);
236 nv_wr32(dev, 0x003200, 0x00000000);
237
238 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
239}
240
241static void
242nv40_fifo_init_ramxx(struct drm_device *dev)
243{
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245
246 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
247 ((dev_priv->ramht_bits - 9) << 16) |
248 (dev_priv->ramht_offset >> 8));
249 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
250
251 switch (dev_priv->chipset) {
252 case 0x47:
253 case 0x49:
254 case 0x4b:
255 nv_wr32(dev, 0x2230, 1);
256 break;
257 default:
258 break;
259 }
260
261 switch (dev_priv->chipset) {
262 case 0x40:
263 case 0x41:
264 case 0x42:
265 case 0x43:
266 case 0x45:
267 case 0x47:
268 case 0x48:
269 case 0x49:
270 case 0x4b:
271 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
272 break;
273 default:
274 nv_wr32(dev, 0x2230, 0);
275 nv_wr32(dev, NV40_PFIFO_RAMFC,
276 ((nouveau_mem_fb_amount(dev) - 512 * 1024 +
277 dev_priv->ramfc_offset) >> 16) | (3 << 16));
278 break;
279 }
280}
281
282static void
283nv40_fifo_init_intr(struct drm_device *dev)
284{
285 nv_wr32(dev, 0x002100, 0xffffffff);
286 nv_wr32(dev, 0x002140, 0xffffffff);
287}
288
289int
290nv40_fifo_init(struct drm_device *dev)
291{
292 struct drm_nouveau_private *dev_priv = dev->dev_private;
293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 int i;
295
296 nv40_fifo_init_reset(dev);
297 nv40_fifo_init_ramxx(dev);
298
299 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
300 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
301
302 nv40_fifo_init_intr(dev);
303 pfifo->enable(dev);
304 pfifo->reassign(dev, true);
305
306 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
307 if (dev_priv->fifos[i]) {
308 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
309 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
310 }
311 }
312
313 return 0;
314}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 000000000000..d3e0a2a6acf8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <linux/firmware.h>
28
29#include "drmP.h"
30#include "drm.h"
31#include "nouveau_drv.h"
32
33MODULE_FIRMWARE("nouveau/nv40.ctxprog");
34MODULE_FIRMWARE("nouveau/nv40.ctxvals");
35MODULE_FIRMWARE("nouveau/nv41.ctxprog");
36MODULE_FIRMWARE("nouveau/nv41.ctxvals");
37MODULE_FIRMWARE("nouveau/nv42.ctxprog");
38MODULE_FIRMWARE("nouveau/nv42.ctxvals");
39MODULE_FIRMWARE("nouveau/nv43.ctxprog");
40MODULE_FIRMWARE("nouveau/nv43.ctxvals");
41MODULE_FIRMWARE("nouveau/nv44.ctxprog");
42MODULE_FIRMWARE("nouveau/nv44.ctxvals");
43MODULE_FIRMWARE("nouveau/nv46.ctxprog");
44MODULE_FIRMWARE("nouveau/nv46.ctxvals");
45MODULE_FIRMWARE("nouveau/nv47.ctxprog");
46MODULE_FIRMWARE("nouveau/nv47.ctxvals");
47MODULE_FIRMWARE("nouveau/nv49.ctxprog");
48MODULE_FIRMWARE("nouveau/nv49.ctxvals");
49MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
50MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
51MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
52MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
53MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
54MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
55MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
56MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
57
58struct nouveau_channel *
59nv40_graph_channel(struct drm_device *dev)
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 uint32_t inst;
63 int i;
64
65 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
66 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
67 return NULL;
68 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
69
70 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
71 struct nouveau_channel *chan = dev_priv->fifos[i];
72
73 if (chan && chan->ramin_grctx &&
74 chan->ramin_grctx->instance == inst)
75 return chan;
76 }
77
78 return NULL;
79}
80
81int
82nv40_graph_create_context(struct nouveau_channel *chan)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nouveau_gpuobj *ctx;
87 int ret;
88
89 /* Allocate a 175KiB block of PRAMIN to store the context. This
90 * is massive overkill for a lot of chipsets, but it should be safe
91 * until we're able to implement this properly (will happen at more
92 * or less the same time we're able to write our own context programs.
93 */
94 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
95 NVOBJ_FLAG_ZERO_ALLOC,
96 &chan->ramin_grctx);
97 if (ret)
98 return ret;
99 ctx = chan->ramin_grctx->gpuobj;
100
101 /* Initialise default context values */
102 dev_priv->engine.instmem.prepare_access(dev, true);
103 nv40_grctx_vals_load(dev, ctx);
104 nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
105 dev_priv->engine.instmem.finish_access(dev);
106
107 return 0;
108}
109
110void
111nv40_graph_destroy_context(struct nouveau_channel *chan)
112{
113 nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
114}
115
116static int
117nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118{
119 uint32_t old_cp, tv = 1000, tmp;
120 int i;
121
122 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
123 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
124
125 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
126 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
127 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
128 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
129
130 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
131 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
132 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
133
134 nouveau_wait_for_idle(dev);
135
136 for (i = 0; i < tv; i++) {
137 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
138 break;
139 }
140
141 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
142
143 if (i == tv) {
144 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
145 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
146 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
147 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
148 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
149 NV_ERROR(dev, "0x40030C = 0x%08x\n",
150 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
151 return -EBUSY;
152 }
153
154 return 0;
155}
156
157/* Restore the context for a specific channel into PGRAPH */
158int
159nv40_graph_load_context(struct nouveau_channel *chan)
160{
161 struct drm_device *dev = chan->dev;
162 uint32_t inst;
163 int ret;
164
165 if (!chan->ramin_grctx)
166 return -EINVAL;
167 inst = chan->ramin_grctx->instance >> 4;
168
169 ret = nv40_graph_transfer_context(dev, inst, 0);
170 if (ret)
171 return ret;
172
173 /* 0x40032C, no idea of it's exact function. Could simply be a
174 * record of the currently active PGRAPH context. It's currently
175 * unknown as to what bit 24 does. The nv ddx has it set, so we will
176 * set it here too.
177 */
178 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
179 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
180 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
181 NV40_PGRAPH_CTXCTL_CUR_LOADED);
182 /* 0x32E0 records the instance address of the active FIFO's PGRAPH
183 * context. If at any time this doesn't match 0x40032C, you will
184 * recieve PGRAPH_INTR_CONTEXT_SWITCH
185 */
186 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
187 return 0;
188}
189
190int
191nv40_graph_unload_context(struct drm_device *dev)
192{
193 uint32_t inst;
194 int ret;
195
196 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
197 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
198 return 0;
199 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200
201 ret = nv40_graph_transfer_context(dev, inst, 1);
202
203 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
204 return ret;
205}
206
207struct nouveau_ctxprog {
208 uint32_t signature;
209 uint8_t version;
210 uint16_t length;
211 uint32_t data[];
212} __attribute__ ((packed));
213
214struct nouveau_ctxvals {
215 uint32_t signature;
216 uint8_t version;
217 uint32_t length;
218 struct {
219 uint32_t offset;
220 uint32_t value;
221 } data[];
222} __attribute__ ((packed));
223
224int
225nv40_grctx_init(struct drm_device *dev)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
229 const int chipset = dev_priv->chipset;
230 const struct firmware *fw;
231 const struct nouveau_ctxprog *cp;
232 const struct nouveau_ctxvals *cv;
233 char name[32];
234 int ret, i;
235
236 pgraph->accel_blocked = true;
237
238 if (!pgraph->ctxprog) {
239 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
240 ret = request_firmware(&fw, name, &dev->pdev->dev);
241 if (ret) {
242 NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
243 return ret;
244 }
245
246 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
247 if (!pgraph->ctxprog) {
248 NV_ERROR(dev, "OOM copying ctxprog\n");
249 release_firmware(fw);
250 return -ENOMEM;
251 }
252 memcpy(pgraph->ctxprog, fw->data, fw->size);
253
254 cp = pgraph->ctxprog;
255 if (cp->signature != 0x5043564e || cp->version != 0 ||
256 cp->length != ((fw->size - 7) / 4)) {
257 NV_ERROR(dev, "ctxprog invalid\n");
258 release_firmware(fw);
259 nv40_grctx_fini(dev);
260 return -EINVAL;
261 }
262 release_firmware(fw);
263 }
264
265 if (!pgraph->ctxvals) {
266 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
267 ret = request_firmware(&fw, name, &dev->pdev->dev);
268 if (ret) {
269 NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
270 nv40_grctx_fini(dev);
271 return ret;
272 }
273
274 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
275 if (!pgraph->ctxprog) {
276 NV_ERROR(dev, "OOM copying ctxprog\n");
277 release_firmware(fw);
278 nv40_grctx_fini(dev);
279 return -ENOMEM;
280 }
281 memcpy(pgraph->ctxvals, fw->data, fw->size);
282
283 cv = (void *)pgraph->ctxvals;
284 if (cv->signature != 0x5643564e || cv->version != 0 ||
285 cv->length != ((fw->size - 9) / 8)) {
286 NV_ERROR(dev, "ctxvals invalid\n");
287 release_firmware(fw);
288 nv40_grctx_fini(dev);
289 return -EINVAL;
290 }
291 release_firmware(fw);
292 }
293
294 cp = pgraph->ctxprog;
295
296 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
297 for (i = 0; i < cp->length; i++)
298 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
299
300 pgraph->accel_blocked = false;
301 return 0;
302}
303
304void
305nv40_grctx_fini(struct drm_device *dev)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
309
310 if (pgraph->ctxprog) {
311 kfree(pgraph->ctxprog);
312 pgraph->ctxprog = NULL;
313 }
314
315 if (pgraph->ctxvals) {
316 kfree(pgraph->ctxprog);
317 pgraph->ctxvals = NULL;
318 }
319}
320
321void
322nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
326 struct nouveau_ctxvals *cv = pgraph->ctxvals;
327 int i;
328
329 if (!cv)
330 return;
331
332 for (i = 0; i < cv->length; i++)
333 nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
334}
335
336/*
337 * G70 0x47
338 * G71 0x49
339 * NV45 0x48
340 * G72[M] 0x46
341 * G73 0x4b
342 * C51_G7X 0x4c
343 * C51 0x4e
344 */
345int
346nv40_graph_init(struct drm_device *dev)
347{
348 struct drm_nouveau_private *dev_priv =
349 (struct drm_nouveau_private *)dev->dev_private;
350 uint32_t vramsz, tmp;
351 int i, j;
352
353 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
354 ~NV_PMC_ENABLE_PGRAPH);
355 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
356 NV_PMC_ENABLE_PGRAPH);
357
358 nv40_grctx_init(dev);
359
360 /* No context present currently */
361 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
362
363 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
364 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
365
366 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
367 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
368 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
369 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
370 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
371 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
372
373 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
374 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
375
376 j = nv_rd32(dev, 0x1540) & 0xff;
377 if (j) {
378 for (i = 0; !(j & 1); j >>= 1, i++)
379 ;
380 nv_wr32(dev, 0x405000, i);
381 }
382
383 if (dev_priv->chipset == 0x40) {
384 nv_wr32(dev, 0x4009b0, 0x83280fff);
385 nv_wr32(dev, 0x4009b4, 0x000000a0);
386 } else {
387 nv_wr32(dev, 0x400820, 0x83280eff);
388 nv_wr32(dev, 0x400824, 0x000000a0);
389 }
390
391 switch (dev_priv->chipset) {
392 case 0x40:
393 case 0x45:
394 nv_wr32(dev, 0x4009b8, 0x0078e366);
395 nv_wr32(dev, 0x4009bc, 0x0000014c);
396 break;
397 case 0x41:
398 case 0x42: /* pciid also 0x00Cx */
399 /* case 0x0120: XXX (pciid) */
400 nv_wr32(dev, 0x400828, 0x007596ff);
401 nv_wr32(dev, 0x40082c, 0x00000108);
402 break;
403 case 0x43:
404 nv_wr32(dev, 0x400828, 0x0072cb77);
405 nv_wr32(dev, 0x40082c, 0x00000108);
406 break;
407 case 0x44:
408 case 0x46: /* G72 */
409 case 0x4a:
410 case 0x4c: /* G7x-based C51 */
411 case 0x4e:
412 nv_wr32(dev, 0x400860, 0);
413 nv_wr32(dev, 0x400864, 0);
414 break;
415 case 0x47: /* G70 */
416 case 0x49: /* G71 */
417 case 0x4b: /* G73 */
418 nv_wr32(dev, 0x400828, 0x07830610);
419 nv_wr32(dev, 0x40082c, 0x0000016A);
420 break;
421 default:
422 break;
423 }
424
425 nv_wr32(dev, 0x400b38, 0x2ffff800);
426 nv_wr32(dev, 0x400b3c, 0x00006000);
427
428 /* copy tile info from PFB */
429 switch (dev_priv->chipset) {
430 case 0x40: /* vanilla NV40 */
431 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
432 tmp = nv_rd32(dev, NV10_PFB_TILE(i));
433 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
434 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
435 tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
436 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
437 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
438 tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
439 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
440 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
441 tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
442 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
443 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
444 }
445 break;
446 case 0x44:
447 case 0x4a:
448 case 0x4e: /* NV44-based cores don't have 0x406900? */
449 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
450 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
451 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
452 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
453 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
454 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
455 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
456 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
457 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
458 }
459 break;
460 case 0x46:
461 case 0x47:
462 case 0x49:
463 case 0x4b: /* G7X-based cores */
464 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
465 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
466 nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
467 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
468 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
469 nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
470 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
471 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
472 nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
473 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
474 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
475 nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
476 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
477 }
478 break;
479 default: /* everything else */
480 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
481 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
482 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
483 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
484 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
485 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
486 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
487 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
488 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
489 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
490 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
491 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
492 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
493 }
494 break;
495 }
496
497 /* begin RAM config */
498 vramsz = drm_get_resource_len(dev, 0) - 1;
499 switch (dev_priv->chipset) {
500 case 0x40:
501 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
502 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
503 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
504 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
505 nv_wr32(dev, 0x400820, 0);
506 nv_wr32(dev, 0x400824, 0);
507 nv_wr32(dev, 0x400864, vramsz);
508 nv_wr32(dev, 0x400868, vramsz);
509 break;
510 default:
511 switch (dev_priv->chipset) {
512 case 0x46:
513 case 0x47:
514 case 0x49:
515 case 0x4b:
516 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
517 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
518 break;
519 default:
520 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
521 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
522 break;
523 }
524 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
525 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
526 nv_wr32(dev, 0x400840, 0);
527 nv_wr32(dev, 0x400844, 0);
528 nv_wr32(dev, 0x4008A0, vramsz);
529 nv_wr32(dev, 0x4008A4, vramsz);
530 break;
531 }
532
533 return 0;
534}
535
536void nv40_graph_takedown(struct drm_device *dev)
537{
538}
539
540struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
541 { 0x0030, false, NULL }, /* null */
542 { 0x0039, false, NULL }, /* m2mf */
543 { 0x004a, false, NULL }, /* gdirect */
544 { 0x009f, false, NULL }, /* imageblit (nv12) */
545 { 0x008a, false, NULL }, /* ifc */
546 { 0x0089, false, NULL }, /* sifm */
547 { 0x3089, false, NULL }, /* sifm (nv40) */
548 { 0x0062, false, NULL }, /* surf2d */
549 { 0x3062, false, NULL }, /* surf2d (nv40) */
550 { 0x0043, false, NULL }, /* rop */
551 { 0x0012, false, NULL }, /* beta1 */
552 { 0x0072, false, NULL }, /* beta4 */
553 { 0x0019, false, NULL }, /* cliprect */
554 { 0x0044, false, NULL }, /* pattern */
555 { 0x309e, false, NULL }, /* swzsurf */
556 { 0x4097, false, NULL }, /* curie (nv40) */
557 { 0x4497, false, NULL }, /* curie (nv44) */
558 {}
559};
560
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 000000000000..2a3495e848e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_mc_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will
13 * be done later if needed.
14 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16
17 switch (dev_priv->chipset) {
18 case 0x44:
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV40_PFB_020C);
23 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 }
31
32 return 0;
33}
34
35void
36nv40_mc_takedown(struct drm_device *dev)
37{
38}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 000000000000..f8e28a1e44e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,769 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29#include "drm_crtc_helper.h"
30
31#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
32#include "nouveau_reg.h"
33#include "nouveau_drv.h"
34#include "nouveau_hw.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_fb.h"
38#include "nouveau_connector.h"
39#include "nv50_display.h"
40
41static void
42nv50_crtc_lut_load(struct drm_crtc *crtc)
43{
44 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
46 int i;
47
48 NV_DEBUG(crtc->dev, "\n");
49
50 for (i = 0; i < 256; i++) {
51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
52 writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
53 writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
54 }
55
56 if (nv_crtc->lut.depth == 30) {
57 writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
58 writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
59 writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
60 }
61}
62
63int
64nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
65{
66 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *evo = dev_priv->evo;
69 int index = nv_crtc->index, ret;
70
71 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
72 NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
73
74 if (blanked) {
75 nv_crtc->cursor.hide(nv_crtc, false);
76
77 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
78 if (ret) {
79 NV_ERROR(dev, "no space while blanking crtc\n");
80 return ret;
81 }
82 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) {
86 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 }
89
90 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
92 } else {
93 if (nv_crtc->cursor.visible)
94 nv_crtc->cursor.show(nv_crtc, false);
95 else
96 nv_crtc->cursor.hide(nv_crtc, false);
97
98 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
99 if (ret) {
100 NV_ERROR(dev, "no space while unblanking crtc\n");
101 return ret;
102 }
103 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
108 PAGE_SHIFT) >> 8);
109 if (dev_priv->chipset != 0x50) {
110 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
111 OUT_RING(evo, NvEvoVRAM);
112 }
113
114 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
115 OUT_RING(evo, nv_crtc->fb.offset >> 8);
116 OUT_RING(evo, 0);
117 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
118 if (dev_priv->chipset != 0x50)
119 if (nv_crtc->fb.tile_flags == 0x7a00)
120 OUT_RING(evo, NvEvoFB32);
121 else
122 if (nv_crtc->fb.tile_flags == 0x7000)
123 OUT_RING(evo, NvEvoFB16);
124 else
125 OUT_RING(evo, NvEvoVRAM);
126 else
127 OUT_RING(evo, NvEvoVRAM);
128 }
129
130 nv_crtc->fb.blanked = blanked;
131 return 0;
132}
133
134static int
135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
136{
137 struct drm_device *dev = nv_crtc->base.dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nouveau_channel *evo = dev_priv->evo;
140 int ret;
141
142 NV_DEBUG(dev, "\n");
143
144 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
145 if (ret) {
146 NV_ERROR(dev, "no space while setting dither\n");
147 return ret;
148 }
149
150 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
151 if (on)
152 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
153 else
154 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
155
156 if (update) {
157 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
158 OUT_RING(evo, 0);
159 FIRE_RING(evo);
160 }
161
162 return 0;
163}
164
165struct nouveau_connector *
166nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
167{
168 struct drm_device *dev = nv_crtc->base.dev;
169 struct drm_connector *connector;
170 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
171
172 /* The safest approach is to find an encoder with the right crtc, that
173 * is also linked to a connector. */
174 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
175 if (connector->encoder)
176 if (connector->encoder->crtc == crtc)
177 return nouveau_connector(connector);
178 }
179
180 return NULL;
181}
182
183static int
184nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
185{
186 struct nouveau_connector *nv_connector =
187 nouveau_crtc_connector_get(nv_crtc);
188 struct drm_device *dev = nv_crtc->base.dev;
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_channel *evo = dev_priv->evo;
191 struct drm_display_mode *native_mode = NULL;
192 struct drm_display_mode *mode = &nv_crtc->base.mode;
193 uint32_t outX, outY, horiz, vert;
194 int ret;
195
196 NV_DEBUG(dev, "\n");
197
198 switch (scaling_mode) {
199 case DRM_MODE_SCALE_NONE:
200 break;
201 default:
202 if (!nv_connector || !nv_connector->native_mode) {
203 NV_ERROR(dev, "No native mode, forcing panel scaling\n");
204 scaling_mode = DRM_MODE_SCALE_NONE;
205 } else {
206 native_mode = nv_connector->native_mode;
207 }
208 break;
209 }
210
211 switch (scaling_mode) {
212 case DRM_MODE_SCALE_ASPECT:
213 horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
214 vert = (native_mode->vdisplay << 19) / mode->vdisplay;
215
216 if (vert > horiz) {
217 outX = (mode->hdisplay * horiz) >> 19;
218 outY = (mode->vdisplay * horiz) >> 19;
219 } else {
220 outX = (mode->hdisplay * vert) >> 19;
221 outY = (mode->vdisplay * vert) >> 19;
222 }
223 break;
224 case DRM_MODE_SCALE_FULLSCREEN:
225 outX = native_mode->hdisplay;
226 outY = native_mode->vdisplay;
227 break;
228 case DRM_MODE_SCALE_CENTER:
229 case DRM_MODE_SCALE_NONE:
230 default:
231 outX = mode->hdisplay;
232 outY = mode->vdisplay;
233 break;
234 }
235
236 ret = RING_SPACE(evo, update ? 7 : 5);
237 if (ret)
238 return ret;
239
240 /* Got a better name for SCALER_ACTIVE? */
241 /* One day i've got to really figure out why this is needed. */
242 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
243 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
244 (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
245 mode->hdisplay != outX || mode->vdisplay != outY) {
246 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
247 } else {
248 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
249 }
250
251 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
252 OUT_RING(evo, outY << 16 | outX);
253 OUT_RING(evo, outY << 16 | outX);
254
255 if (update) {
256 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
257 OUT_RING(evo, 0);
258 FIRE_RING(evo);
259 }
260
261 return 0;
262}
263
264int
265nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
266{
267 uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
268 struct nouveau_pll_vals pll;
269 struct pll_lims limits;
270 uint32_t reg1, reg2;
271 int ret;
272
273 ret = get_pll_limits(dev, pll_reg, &limits);
274 if (ret)
275 return ret;
276
277 ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
278 if (ret <= 0)
279 return ret;
280
281 if (limits.vco2.maxfreq) {
282 reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
283 reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
284 nv_wr32(dev, pll_reg, 0x10000611);
285 nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
286 nv_wr32(dev, pll_reg + 8,
287 reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
288 } else {
289 reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
290 nv_wr32(dev, pll_reg, 0x50000610);
291 nv_wr32(dev, pll_reg + 4, reg1 |
292 (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
293 }
294
295 return 0;
296}
297
298static void
299nv50_crtc_destroy(struct drm_crtc *crtc)
300{
301 struct drm_device *dev = crtc->dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
303
304 NV_DEBUG(dev, "\n");
305
306 if (!crtc)
307 return;
308
309 drm_crtc_cleanup(&nv_crtc->base);
310
311 nv50_cursor_fini(nv_crtc);
312
313 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
314 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
315 kfree(nv_crtc->mode);
316 kfree(nv_crtc);
317}
318
319int
320nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
321 uint32_t buffer_handle, uint32_t width, uint32_t height)
322{
323 struct drm_device *dev = crtc->dev;
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
326 struct nouveau_bo *cursor = NULL;
327 struct drm_gem_object *gem;
328 int ret = 0, i;
329
330 if (width != 64 || height != 64)
331 return -EINVAL;
332
333 if (!buffer_handle) {
334 nv_crtc->cursor.hide(nv_crtc, true);
335 return 0;
336 }
337
338 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
339 if (!gem)
340 return -EINVAL;
341 cursor = nouveau_gem_object(gem);
342
343 ret = nouveau_bo_map(cursor);
344 if (ret)
345 goto out;
346
347 /* The simple will do for now. */
348 for (i = 0; i < 64 * 64; i++)
349 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
350
351 nouveau_bo_unmap(cursor);
352
353 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
354 dev_priv->vm_vram_base);
355 nv_crtc->cursor.show(nv_crtc, true);
356
357out:
358 mutex_lock(&dev->struct_mutex);
359 drm_gem_object_unreference(gem);
360 mutex_unlock(&dev->struct_mutex);
361 return ret;
362}
363
364int
365nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
366{
367 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
368
369 nv_crtc->cursor.set_pos(nv_crtc, x, y);
370 return 0;
371}
372
373static void
374nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
375 uint32_t size)
376{
377 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
378 int i;
379
380 if (size != 256)
381 return;
382
383 for (i = 0; i < 256; i++) {
384 nv_crtc->lut.r[i] = r[i];
385 nv_crtc->lut.g[i] = g[i];
386 nv_crtc->lut.b[i] = b[i];
387 }
388
389 /* We need to know the depth before we upload, but it's possible to
390 * get called before a framebuffer is bound. If this is the case,
391 * mark the lut values as dirty by setting depth==0, and it'll be
392 * uploaded on the first mode_set_base()
393 */
394 if (!nv_crtc->base.fb) {
395 nv_crtc->lut.depth = 0;
396 return;
397 }
398
399 nv50_crtc_lut_load(crtc);
400}
401
402static void
403nv50_crtc_save(struct drm_crtc *crtc)
404{
405 NV_ERROR(crtc->dev, "!!\n");
406}
407
408static void
409nv50_crtc_restore(struct drm_crtc *crtc)
410{
411 NV_ERROR(crtc->dev, "!!\n");
412}
413
414static const struct drm_crtc_funcs nv50_crtc_funcs = {
415 .save = nv50_crtc_save,
416 .restore = nv50_crtc_restore,
417 .cursor_set = nv50_crtc_cursor_set,
418 .cursor_move = nv50_crtc_cursor_move,
419 .gamma_set = nv50_crtc_gamma_set,
420 .set_config = drm_crtc_helper_set_config,
421 .destroy = nv50_crtc_destroy,
422};
423
424static void
425nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
426{
427}
428
429static void
430nv50_crtc_prepare(struct drm_crtc *crtc)
431{
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder;
435
436 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
437
438 /* Disconnect all unused encoders. */
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441
442 if (drm_helper_encoder_in_use(encoder))
443 continue;
444
445 nv_encoder->disconnect(nv_encoder);
446 }
447
448 nv50_crtc_blank(nv_crtc, true);
449}
450
451static void
452nv50_crtc_commit(struct drm_crtc *crtc)
453{
454 struct drm_crtc *crtc2;
455 struct drm_device *dev = crtc->dev;
456 struct drm_nouveau_private *dev_priv = dev->dev_private;
457 struct nouveau_channel *evo = dev_priv->evo;
458 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
459 int ret;
460
461 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
462
463 nv50_crtc_blank(nv_crtc, false);
464
465 /* Explicitly blank all unused crtc's. */
466 list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
467 if (!drm_helper_crtc_in_use(crtc2))
468 nv50_crtc_blank(nouveau_crtc(crtc2), true);
469 }
470
471 ret = RING_SPACE(evo, 2);
472 if (ret) {
473 NV_ERROR(dev, "no space while committing crtc\n");
474 return;
475 }
476 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
477 OUT_RING(evo, 0);
478 FIRE_RING(evo);
479}
480
481static bool
482nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
483 struct drm_display_mode *adjusted_mode)
484{
485 return true;
486}
487
488static int
489nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
490 struct drm_framebuffer *old_fb, bool update)
491{
492 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
493 struct drm_device *dev = nv_crtc->base.dev;
494 struct drm_nouveau_private *dev_priv = dev->dev_private;
495 struct nouveau_channel *evo = dev_priv->evo;
496 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
497 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
498 int ret, format;
499
500 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
501
502 switch (drm_fb->depth) {
503 case 8:
504 format = NV50_EVO_CRTC_FB_DEPTH_8;
505 break;
506 case 15:
507 format = NV50_EVO_CRTC_FB_DEPTH_15;
508 break;
509 case 16:
510 format = NV50_EVO_CRTC_FB_DEPTH_16;
511 break;
512 case 24:
513 case 32:
514 format = NV50_EVO_CRTC_FB_DEPTH_24;
515 break;
516 case 30:
517 format = NV50_EVO_CRTC_FB_DEPTH_30;
518 break;
519 default:
520 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
521 return -EINVAL;
522 }
523
524 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
525 if (ret)
526 return ret;
527
528 if (old_fb) {
529 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
530 nouveau_bo_unpin(ofb->nvbo);
531 }
532
533 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
534 nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
535 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
536 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
537 ret = RING_SPACE(evo, 2);
538 if (ret)
539 return ret;
540
541 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
542 if (nv_crtc->fb.tile_flags == 0x7a00)
543 OUT_RING(evo, NvEvoFB32);
544 else
545 if (nv_crtc->fb.tile_flags == 0x7000)
546 OUT_RING(evo, NvEvoFB16);
547 else
548 OUT_RING(evo, NvEvoVRAM);
549 }
550
551 ret = RING_SPACE(evo, 12);
552 if (ret)
553 return ret;
554
555 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
556 OUT_RING(evo, nv_crtc->fb.offset >> 8);
557 OUT_RING(evo, 0);
558 OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
559 if (!nv_crtc->fb.tile_flags) {
560 OUT_RING(evo, drm_fb->pitch | (1 << 20));
561 } else {
562 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
563 fb->nvbo->tile_mode);
564 }
565 if (dev_priv->chipset == 0x50)
566 OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
567 else
568 OUT_RING(evo, format);
569
570 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
571 OUT_RING(evo, fb->base.depth == 8 ?
572 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
573
574 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
575 OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
576 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
577 OUT_RING(evo, (y << 16) | x);
578
579 if (nv_crtc->lut.depth != fb->base.depth) {
580 nv_crtc->lut.depth = fb->base.depth;
581 nv50_crtc_lut_load(crtc);
582 }
583
584 if (update) {
585 ret = RING_SPACE(evo, 2);
586 if (ret)
587 return ret;
588 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
589 OUT_RING(evo, 0);
590 FIRE_RING(evo);
591 }
592
593 return 0;
594}
595
596static int
597nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
598 struct drm_display_mode *adjusted_mode, int x, int y,
599 struct drm_framebuffer *old_fb)
600{
601 struct drm_device *dev = crtc->dev;
602 struct drm_nouveau_private *dev_priv = dev->dev_private;
603 struct nouveau_channel *evo = dev_priv->evo;
604 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
605 struct nouveau_connector *nv_connector = NULL;
606 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
607 uint32_t hunk1, vunk1, vunk2a, vunk2b;
608 int ret;
609
610 /* Find the connector attached to this CRTC */
611 nv_connector = nouveau_crtc_connector_get(nv_crtc);
612
613 *nv_crtc->mode = *adjusted_mode;
614
615 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
616
617 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
618 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
619 hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
620 vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
621 /* I can't give this a proper name, anyone else can? */
622 hunk1 = adjusted_mode->htotal -
623 adjusted_mode->hsync_start + adjusted_mode->hdisplay;
624 vunk1 = adjusted_mode->vtotal -
625 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
626 /* Another strange value, this time only for interlaced adjusted_modes. */
627 vunk2a = 2 * adjusted_mode->vtotal -
628 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
629 vunk2b = adjusted_mode->vtotal -
630 adjusted_mode->vsync_start + adjusted_mode->vtotal;
631
632 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
633 vsync_dur /= 2;
634 vsync_start_to_end /= 2;
635 vunk1 /= 2;
636 vunk2a /= 2;
637 vunk2b /= 2;
638 /* magic */
639 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
640 vsync_start_to_end -= 1;
641 vunk1 -= 1;
642 vunk2a -= 1;
643 vunk2b -= 1;
644 }
645 }
646
647 ret = RING_SPACE(evo, 17);
648 if (ret)
649 return ret;
650
651 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
652 OUT_RING(evo, adjusted_mode->clock | 0x800000);
653 OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
654
655 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
656 OUT_RING(evo, 0);
657 OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
658 OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
659 OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
660 (hsync_start_to_end - 1));
661 OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
662
663 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
664 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
665 OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
666 } else {
667 OUT_RING(evo, 0);
668 OUT_RING(evo, 0);
669 }
670
671 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
672 OUT_RING(evo, 0);
673
674 /* This is the actual resolution of the mode. */
675 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
676 OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
677 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
678 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
679
680 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
681 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
682
683 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
684}
685
686static int
687nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
688 struct drm_framebuffer *old_fb)
689{
690 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
691}
692
693static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
694 .dpms = nv50_crtc_dpms,
695 .prepare = nv50_crtc_prepare,
696 .commit = nv50_crtc_commit,
697 .mode_fixup = nv50_crtc_mode_fixup,
698 .mode_set = nv50_crtc_mode_set,
699 .mode_set_base = nv50_crtc_mode_set_base,
700 .load_lut = nv50_crtc_lut_load,
701};
702
703int
704nv50_crtc_create(struct drm_device *dev, int index)
705{
706 struct nouveau_crtc *nv_crtc = NULL;
707 int ret, i;
708
709 NV_DEBUG(dev, "\n");
710
711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
712 if (!nv_crtc)
713 return -ENOMEM;
714
715 nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
716 if (!nv_crtc->mode) {
717 kfree(nv_crtc);
718 return -ENOMEM;
719 }
720
721 /* Default CLUT parameters, will be activated on the hw upon
722 * first mode set.
723 */
724 for (i = 0; i < 256; i++) {
725 nv_crtc->lut.r[i] = i << 8;
726 nv_crtc->lut.g[i] = i << 8;
727 nv_crtc->lut.b[i] = i << 8;
728 }
729 nv_crtc->lut.depth = 0;
730
731 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
732 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
733 if (!ret) {
734 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
735 if (!ret)
736 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
737 if (ret)
738 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
739 }
740
741 if (ret) {
742 kfree(nv_crtc->mode);
743 kfree(nv_crtc);
744 return ret;
745 }
746
747 nv_crtc->index = index;
748
749 /* set function pointers */
750 nv_crtc->set_dither = nv50_crtc_set_dither;
751 nv_crtc->set_scale = nv50_crtc_set_scale;
752
753 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
754 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
755 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
756
757 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
758 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
759 if (!ret) {
760 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
761 if (!ret)
762 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
763 if (ret)
764 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
765 }
766
767 nv50_cursor_init(nv_crtc);
768 return 0;
769}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 000000000000..e2e79a8f220d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_crtc.h"
34#include "nv50_display.h"
35
36static void
37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
38{
39 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
40 struct nouveau_channel *evo = dev_priv->evo;
41 struct drm_device *dev = nv_crtc->base.dev;
42 int ret;
43
44 NV_DEBUG(dev, "\n");
45
46 if (update && nv_crtc->cursor.visible)
47 return;
48
49 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while unhiding cursor\n");
52 return;
53 }
54
55 if (dev_priv->chipset != 0x50) {
56 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM);
58 }
59 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
61 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
62
63 if (update) {
64 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
65 OUT_RING(evo, 0);
66 FIRE_RING(evo);
67 nv_crtc->cursor.visible = true;
68 }
69}
70
71static void
72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
73{
74 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
75 struct nouveau_channel *evo = dev_priv->evo;
76 struct drm_device *dev = nv_crtc->base.dev;
77 int ret;
78
79 NV_DEBUG(dev, "\n");
80
81 if (update && !nv_crtc->cursor.visible)
82 return;
83
84 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
85 if (ret) {
86 NV_ERROR(dev, "no space while hiding cursor\n");
87 return;
88 }
89 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) {
93 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 }
96
97 if (update) {
98 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
99 OUT_RING(evo, 0);
100 FIRE_RING(evo);
101 nv_crtc->cursor.visible = false;
102 }
103}
104
105static void
106nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{
108 struct drm_device *dev = nv_crtc->base.dev;
109
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */
113 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
114}
115
116static void
117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
118{
119 NV_DEBUG(nv_crtc->base.dev, "\n");
120 if (offset == nv_crtc->cursor.offset)
121 return;
122
123 nv_crtc->cursor.offset = offset;
124 if (nv_crtc->cursor.visible) {
125 nv_crtc->cursor.visible = false;
126 nv_crtc->cursor.show(nv_crtc, true);
127 }
128}
129
130int
131nv50_cursor_init(struct nouveau_crtc *nv_crtc)
132{
133 nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
134 nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
135 nv_crtc->cursor.hide = nv50_cursor_hide;
136 nv_crtc->cursor.show = nv50_cursor_show;
137 return 0;
138}
139
140void
141nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
142{
143 struct drm_device *dev = nv_crtc->base.dev;
144 int idx = nv_crtc->index;
145
146 NV_DEBUG(dev, "\n");
147
148 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
149 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
150 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
151 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
152 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
153 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
154 }
155}
156
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 000000000000..fb5838e3be24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting DAC\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static enum drm_connector_status
59nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
60{
61 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
62 struct drm_device *dev = encoder->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 enum drm_connector_status status = connector_status_disconnected;
65 uint32_t dpms_state, load_pattern, load_state;
66 int or = nv_encoder->or;
67
68 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
69 dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
70
71 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
72 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
73 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
74 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
75 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
76 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
77 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
78 return status;
79 }
80
81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) {
83 load_pattern = dev_priv->vbios->dactestval;
84 NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern);
86 } else {
87 load_pattern = 340;
88 NV_DEBUG(dev, "Using default load_pattern of %d\n",
89 load_pattern);
90 }
91
92 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
93 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
94 mdelay(45); /* give it some time to process */
95 load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
96
97 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
98 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
99 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
100
101 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
102 NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
103 status = connector_status_connected;
104
105 if (status == connector_status_connected)
106 NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
107 else
108 NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
109
110 return status;
111}
112
113static void
114nv50_dac_dpms(struct drm_encoder *encoder, int mode)
115{
116 struct drm_device *dev = encoder->dev;
117 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
118 uint32_t val;
119 int or = nv_encoder->or;
120
121 NV_DEBUG(dev, "or %d mode %d\n", or, mode);
122
123 /* wait for it to be done */
124 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
125 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
126 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
127 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
128 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
129 return;
130 }
131
132 val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
133
134 if (mode != DRM_MODE_DPMS_ON)
135 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
136
137 switch (mode) {
138 case DRM_MODE_DPMS_STANDBY:
139 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
140 break;
141 case DRM_MODE_DPMS_SUSPEND:
142 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
143 break;
144 case DRM_MODE_DPMS_OFF:
145 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
146 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
148 break;
149 default:
150 break;
151 }
152
153 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
154 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
155}
156
157static void
158nv50_dac_save(struct drm_encoder *encoder)
159{
160 NV_ERROR(encoder->dev, "!!\n");
161}
162
163static void
164nv50_dac_restore(struct drm_encoder *encoder)
165{
166 NV_ERROR(encoder->dev, "!!\n");
167}
168
169static bool
170nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
171 struct drm_display_mode *adjusted_mode)
172{
173 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
174 struct nouveau_connector *connector;
175
176 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
177
178 connector = nouveau_encoder_connector_get(nv_encoder);
179 if (!connector) {
180 NV_ERROR(encoder->dev, "Encoder has no connector\n");
181 return false;
182 }
183
184 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
185 connector->native_mode) {
186 int id = adjusted_mode->base.id;
187 *adjusted_mode = *connector->native_mode;
188 adjusted_mode->base.id = id;
189 }
190
191 return true;
192}
193
194static void
195nv50_dac_prepare(struct drm_encoder *encoder)
196{
197}
198
199static void
200nv50_dac_commit(struct drm_encoder *encoder)
201{
202}
203
204static void
205nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
209 struct drm_device *dev = encoder->dev;
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_channel *evo = dev_priv->evo;
212 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
213 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret;
215
216 NV_DEBUG(dev, "or %d\n", nv_encoder->or);
217
218 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
219
220 if (crtc->index == 1)
221 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
222 else
223 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
224
225 /* Lacking a working tv-out, this is not a 100% sure. */
226 if (nv_encoder->dcb->type == OUTPUT_ANALOG)
227 mode_ctl |= 0x40;
228 else
229 if (nv_encoder->dcb->type == OUTPUT_TV)
230 mode_ctl |= 0x100;
231
232 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
233 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
234
235 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
236 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
237
238 ret = RING_SPACE(evo, 3);
239 if (ret) {
240 NV_ERROR(dev, "no space while connecting DAC\n");
241 return;
242 }
243 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
244 OUT_RING(evo, mode_ctl);
245 OUT_RING(evo, mode_ctl2);
246}
247
248static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
249 .dpms = nv50_dac_dpms,
250 .save = nv50_dac_save,
251 .restore = nv50_dac_restore,
252 .mode_fixup = nv50_dac_mode_fixup,
253 .prepare = nv50_dac_prepare,
254 .commit = nv50_dac_commit,
255 .mode_set = nv50_dac_mode_set,
256 .detect = nv50_dac_detect
257};
258
259static void
260nv50_dac_destroy(struct drm_encoder *encoder)
261{
262 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
263
264 if (!encoder)
265 return;
266
267 NV_DEBUG(encoder->dev, "\n");
268
269 drm_encoder_cleanup(encoder);
270 kfree(nv_encoder);
271}
272
273static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
274 .destroy = nv50_dac_destroy,
275};
276
277int
278nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
279{
280 struct nouveau_encoder *nv_encoder;
281 struct drm_encoder *encoder;
282
283 NV_DEBUG(dev, "\n");
284 NV_INFO(dev, "Detected a DAC output\n");
285
286 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
287 if (!nv_encoder)
288 return -ENOMEM;
289 encoder = to_drm_encoder(nv_encoder);
290
291 nv_encoder->dcb = entry;
292 nv_encoder->or = ffs(entry->or) - 1;
293
294 nv_encoder->disconnect = nv50_dac_disconnect;
295
296 drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
297 DRM_MODE_ENCODER_DAC);
298 drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
299
300 encoder->possible_crtcs = entry->heads;
301 encoder->possible_clones = 0;
302 return 0;
303}
304
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 000000000000..12c5ee63495b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "nv50_display.h"
28#include "nouveau_crtc.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_fb.h"
32#include "drm_crtc_helper.h"
33
34static void
35nv50_evo_channel_del(struct nouveau_channel **pchan)
36{
37 struct nouveau_channel *chan = *pchan;
38
39 if (!chan)
40 return;
41 *pchan = NULL;
42
43 nouveau_gpuobj_channel_takedown(chan);
44 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
45
46 if (chan->user)
47 iounmap(chan->user);
48
49 kfree(chan);
50}
51
52static int
53nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
54 uint32_t tile_flags, uint32_t magic_flags,
55 uint32_t offset, uint32_t limit)
56{
57 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
58 struct drm_device *dev = evo->dev;
59 struct nouveau_gpuobj *obj = NULL;
60 int ret;
61
62 ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
63 if (ret)
64 return ret;
65 obj->engine = NVOBJ_ENGINE_DISPLAY;
66
67 ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
68 if (ret) {
69 nouveau_gpuobj_del(dev, &obj);
70 return ret;
71 }
72
73 dev_priv->engine.instmem.prepare_access(dev, true);
74 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
75 nv_wo32(dev, obj, 1, limit);
76 nv_wo32(dev, obj, 2, offset);
77 nv_wo32(dev, obj, 3, 0x00000000);
78 nv_wo32(dev, obj, 4, 0x00000000);
79 nv_wo32(dev, obj, 5, 0x00010000);
80 dev_priv->engine.instmem.finish_access(dev);
81
82 return 0;
83}
84
85static int
86nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
87{
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_channel *chan;
90 int ret;
91
92 chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
93 if (!chan)
94 return -ENOMEM;
95 *pchan = chan;
96
97 chan->id = -1;
98 chan->dev = dev;
99 chan->user_get = 4;
100 chan->user_put = 0;
101
102 INIT_LIST_HEAD(&chan->ramht_refs);
103
104 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
105 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
106 if (ret) {
107 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
108 nv50_evo_channel_del(pchan);
109 return ret;
110 }
111
112 ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
113 im_pramin->start, 32768);
114 if (ret) {
115 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
116 nv50_evo_channel_del(pchan);
117 return ret;
118 }
119
120 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
121 0, &chan->ramht);
122 if (ret) {
123 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
124 nv50_evo_channel_del(pchan);
125 return ret;
126 }
127
128 if (dev_priv->chipset != 0x50) {
129 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
130 0, 0xffffffff);
131 if (ret) {
132 nv50_evo_channel_del(pchan);
133 return ret;
134 }
135
136
137 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
138 0, 0xffffffff);
139 if (ret) {
140 nv50_evo_channel_del(pchan);
141 return ret;
142 }
143 }
144
145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
146 0, nouveau_mem_fb_amount(dev));
147 if (ret) {
148 nv50_evo_channel_del(pchan);
149 return ret;
150 }
151
152 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
153 false, true, &chan->pushbuf_bo);
154 if (ret == 0)
155 ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
156 if (ret) {
157 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
158 nv50_evo_channel_del(pchan);
159 return ret;
160 }
161
162 ret = nouveau_bo_map(chan->pushbuf_bo);
163 if (ret) {
164 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
165 nv50_evo_channel_del(pchan);
166 return ret;
167 }
168
169 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
170 NV50_PDISPLAY_USER(0), PAGE_SIZE);
171 if (!chan->user) {
172 NV_ERROR(dev, "Error mapping EVO control regs.\n");
173 nv50_evo_channel_del(pchan);
174 return -ENOMEM;
175 }
176
177 return 0;
178}
179
180int
181nv50_display_init(struct drm_device *dev)
182{
183 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
185 struct nouveau_channel *evo = dev_priv->evo;
186 struct drm_connector *connector;
187 uint32_t val, ram_amount, hpd_en[2];
188 uint64_t start;
189 int ret, i;
190
191 NV_DEBUG(dev, "\n");
192
193 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
194 /*
195 * I think the 0x006101XX range is some kind of main control area
196 * that enables things.
197 */
198 /* CRTC? */
199 for (i = 0; i < 2; i++) {
200 val = nv_rd32(dev, 0x00616100 + (i * 0x800));
201 nv_wr32(dev, 0x00610190 + (i * 0x10), val);
202 val = nv_rd32(dev, 0x00616104 + (i * 0x800));
203 nv_wr32(dev, 0x00610194 + (i * 0x10), val);
204 val = nv_rd32(dev, 0x00616108 + (i * 0x800));
205 nv_wr32(dev, 0x00610198 + (i * 0x10), val);
206 val = nv_rd32(dev, 0x0061610c + (i * 0x800));
207 nv_wr32(dev, 0x0061019c + (i * 0x10), val);
208 }
209 /* DAC */
210 for (i = 0; i < 3; i++) {
211 val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
212 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
213 }
214 /* SOR */
215 for (i = 0; i < 4; i++) {
216 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
217 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
218 }
219 /* Something not yet in use, tv-out maybe. */
220 for (i = 0; i < 3; i++) {
221 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
222 nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
223 }
224
225 for (i = 0; i < 3; i++) {
226 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
227 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
228 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
229 }
230
231 /* This used to be in crtc unblank, but seems out of place there. */
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */
234 ram_amount = nouveau_mem_fb_amount(dev);
235 NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024;
238 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
239 nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
240 nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
241
242 /* The precise purpose is unknown, i suspect it has something to do
243 * with text mode.
244 */
245 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
246 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
247 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
248 if (!nv_wait(0x006194e8, 2, 0)) {
249 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
250 NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
251 nv_rd32(dev, 0x6194e8));
252 return -EBUSY;
253 }
254 }
255
256 /* taken from nv bug #12637, attempts to un-wedge the hw if it's
257 * stuck in some unspecified state
258 */
259 start = ptimer->read(dev);
260 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
261 while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
262 if ((val & 0x9f0000) == 0x20000)
263 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
264 val | 0x800000);
265
266 if ((val & 0x3f0000) == 0x30000)
267 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
268 val | 0x200000);
269
270 if (ptimer->read(dev) - start > 1000000000ULL) {
271 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
272 NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
273 return -EBUSY;
274 }
275 }
276
277 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
278 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
279 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
280 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
281 NV_ERROR(dev, "0x610200 = 0x%08x\n",
282 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
283 return -EBUSY;
284 }
285
286 for (i = 0; i < 2; i++) {
287 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
288 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
289 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
290 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
291 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
292 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
293 return -EBUSY;
294 }
295
296 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
297 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
298 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
299 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
300 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
301 NV_ERROR(dev, "timeout: "
302 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
303 NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
304 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
305 return -EBUSY;
306 }
307 }
308
309 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
310
311 /* initialise fifo */
312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
313 ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
314 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
315 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
316 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
317 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
318 if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
319 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
320 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
321 return -EBUSY;
322 }
323 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
324 (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
325 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
326 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
327 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
328 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
329 nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
330
331 evo->dma.max = (4096/4) - 2;
332 evo->dma.put = 0;
333 evo->dma.cur = evo->dma.put;
334 evo->dma.free = evo->dma.max - evo->dma.cur;
335
336 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
337 if (ret)
338 return ret;
339
340 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
341 OUT_RING(evo, 0);
342
343 ret = RING_SPACE(evo, 11);
344 if (ret)
345 return ret;
346 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
347 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
348 OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
349 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
350 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
351 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
352 OUT_RING(evo, 0);
353 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
354 OUT_RING(evo, 0);
355 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
356 OUT_RING(evo, 0);
357 FIRE_RING(evo);
358 if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
359 NV_ERROR(dev, "evo pushbuf stalled\n");
360
361 /* enable clock change interrupts. */
362 nv_wr32(dev, 0x610028, 0x00010001);
363 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
364 NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
365 NV50_PDISPLAY_INTR_EN_CLK_UNK40));
366
367 /* enable hotplug interrupts */
368 hpd_en[0] = hpd_en[1] = 0;
369 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio;
372
373 if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
374 connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
375 connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
376 continue;
377
378 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
379 if (!gpio)
380 continue;
381
382 hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
383 }
384
385 nv_wr32(dev, 0xe054, 0xffffffff);
386 nv_wr32(dev, 0xe050, hpd_en[0]);
387 if (dev_priv->chipset >= 0x90) {
388 nv_wr32(dev, 0xe074, 0xffffffff);
389 nv_wr32(dev, 0xe070, hpd_en[1]);
390 }
391
392 return 0;
393}
394
395static int nv50_display_disable(struct drm_device *dev)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398 struct drm_crtc *drm_crtc;
399 int ret, i;
400
401 NV_DEBUG(dev, "\n");
402
403 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
404 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
405
406 nv50_crtc_blank(crtc, true);
407 }
408
409 ret = RING_SPACE(dev_priv->evo, 2);
410 if (ret == 0) {
411 BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
412 OUT_RING(dev_priv->evo, 0);
413 }
414 FIRE_RING(dev_priv->evo);
415
416 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
417 * cleaning up?
418 */
419 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
420 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
421 uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
422
423 if (!crtc->base.enabled)
424 continue;
425
426 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
427 if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
428 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
429 "0x%08x\n", mask, mask);
430 NV_ERROR(dev, "0x610024 = 0x%08x\n",
431 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
432 }
433 }
434
435 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
436 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
437 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
438 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
439 NV_ERROR(dev, "0x610200 = 0x%08x\n",
440 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
441 }
442
443 for (i = 0; i < 3; i++) {
444 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
445 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
446 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
447 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
448 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
449 }
450 }
451
452 /* disable interrupts. */
453 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
454
455 /* disable hotplug interrupts */
456 nv_wr32(dev, 0xe054, 0xffffffff);
457 nv_wr32(dev, 0xe050, 0x00000000);
458 if (dev_priv->chipset >= 0x90) {
459 nv_wr32(dev, 0xe074, 0xffffffff);
460 nv_wr32(dev, 0xe070, 0x00000000);
461 }
462 return 0;
463}
464
465int nv50_display_create(struct drm_device *dev)
466{
467 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 struct parsed_dcb *dcb = dev_priv->vbios->dcb;
469 uint32_t connector[16] = {};
470 int ret, i;
471
472 NV_DEBUG(dev, "\n");
473
474 /* init basic kernel modesetting */
475 drm_mode_config_init(dev);
476
477 /* Initialise some optional connector properties. */
478 drm_mode_create_scaling_mode_property(dev);
479 drm_mode_create_dithering_property(dev);
480
481 dev->mode_config.min_width = 0;
482 dev->mode_config.min_height = 0;
483
484 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
485
486 dev->mode_config.max_width = 8192;
487 dev->mode_config.max_height = 8192;
488
489 dev->mode_config.fb_base = dev_priv->fb_phys;
490
491 /* Create EVO channel */
492 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
493 if (ret) {
494 NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
495 return ret;
496 }
497
498 /* Create CRTC objects */
499 for (i = 0; i < 2; i++)
500 nv50_crtc_create(dev, i);
501
502 /* We setup the encoders from the BIOS table */
503 for (i = 0 ; i < dcb->entries; i++) {
504 struct dcb_entry *entry = &dcb->entry[i];
505
506 if (entry->location != DCB_LOC_ON_CHIP) {
507 NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
508 entry->type, ffs(entry->or) - 1);
509 continue;
510 }
511
512 switch (entry->type) {
513 case OUTPUT_TMDS:
514 case OUTPUT_LVDS:
515 case OUTPUT_DP:
516 nv50_sor_create(dev, entry);
517 break;
518 case OUTPUT_ANALOG:
519 nv50_dac_create(dev, entry);
520 break;
521 default:
522 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
523 continue;
524 }
525
526 connector[entry->connector] |= (1 << entry->type);
527 }
528
529 /* It appears that DCB 3.0+ VBIOS has a connector table, however,
530 * I'm not 100% certain how to decode it correctly yet so just
531 * look at what encoders are present on each connector index and
532 * attempt to derive the connector type from that.
533 */
534 for (i = 0 ; i < dcb->entries; i++) {
535 struct dcb_entry *entry = &dcb->entry[i];
536 uint16_t encoders;
537 int type;
538
539 encoders = connector[entry->connector];
540 if (!(encoders & (1 << entry->type)))
541 continue;
542 connector[entry->connector] = 0;
543
544 if (encoders & (1 << OUTPUT_DP)) {
545 type = DRM_MODE_CONNECTOR_DisplayPort;
546 } else if (encoders & (1 << OUTPUT_TMDS)) {
547 if (encoders & (1 << OUTPUT_ANALOG))
548 type = DRM_MODE_CONNECTOR_DVII;
549 else
550 type = DRM_MODE_CONNECTOR_DVID;
551 } else if (encoders & (1 << OUTPUT_ANALOG)) {
552 type = DRM_MODE_CONNECTOR_VGA;
553 } else if (encoders & (1 << OUTPUT_LVDS)) {
554 type = DRM_MODE_CONNECTOR_LVDS;
555 } else {
556 type = DRM_MODE_CONNECTOR_Unknown;
557 }
558
559 if (type == DRM_MODE_CONNECTOR_Unknown)
560 continue;
561
562 nouveau_connector_create(dev, entry->connector, type);
563 }
564
565 ret = nv50_display_init(dev);
566 if (ret)
567 return ret;
568
569 return 0;
570}
571
572int nv50_display_destroy(struct drm_device *dev)
573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575
576 NV_DEBUG(dev, "\n");
577
578 drm_mode_config_cleanup(dev);
579
580 nv50_display_disable(dev);
581 nv50_evo_channel_del(&dev_priv->evo);
582
583 return 0;
584}
585
586static inline uint32_t
587nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
588{
589 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 uint32_t mc;
591
592 if (sor) {
593 if (dev_priv->chipset < 0x90 ||
594 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
595 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
596 else
597 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
598 } else {
599 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
600 }
601
602 return mc;
603}
604
605static int
606nv50_display_irq_head(struct drm_device *dev, int *phead,
607 struct dcb_entry **pdcbent)
608{
609 struct drm_nouveau_private *dev_priv = dev->dev_private;
610 uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
611 uint32_t dac = 0, sor = 0;
612 int head, i, or = 0, type = OUTPUT_ANY;
613
614 /* We're assuming that head 0 *or* head 1 will be active here,
615 * and not both. I'm not sure if the hw will even signal both
616 * ever, but it definitely shouldn't for us as we commit each
617 * CRTC separately, and submission will be blocked by the GPU
618 * until we handle each in turn.
619 */
620 NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
621 head = ffs((unk30 >> 9) & 3) - 1;
622 if (head < 0)
623 return -EINVAL;
624
625 /* This assumes CRTCs are never bound to multiple encoders, which
626 * should be the case.
627 */
628 for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
629 uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
630 if (!(mc & (1 << head)))
631 continue;
632
633 switch ((mc >> 8) & 0xf) {
634 case 0: type = OUTPUT_ANALOG; break;
635 case 1: type = OUTPUT_TV; break;
636 default:
637 NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
638 return -1;
639 }
640
641 or = i;
642 }
643
644 for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
645 uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
646 if (!(mc & (1 << head)))
647 continue;
648
649 switch ((mc >> 8) & 0xf) {
650 case 0: type = OUTPUT_LVDS; break;
651 case 1: type = OUTPUT_TMDS; break;
652 case 2: type = OUTPUT_TMDS; break;
653 case 5: type = OUTPUT_TMDS; break;
654 case 8: type = OUTPUT_DP; break;
655 case 9: type = OUTPUT_DP; break;
656 default:
657 NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
658 return -1;
659 }
660
661 or = i;
662 }
663
664 NV_DEBUG(dev, "type %d, or %d\n", type, or);
665 if (type == OUTPUT_ANY) {
666 NV_ERROR(dev, "unknown encoder!!\n");
667 return -1;
668 }
669
670 for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
671 struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
672
673 if (dcbent->type != type)
674 continue;
675
676 if (!(dcbent->or & (1 << or)))
677 continue;
678
679 *phead = head;
680 *pdcbent = dcbent;
681 return 0;
682 }
683
684 NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
685 return 0;
686}
687
688static uint32_t
689nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
690 int pxclk)
691{
692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nvbios *bios = &dev_priv->VBIOS;
694 uint32_t mc, script = 0, or;
695
696 or = ffs(dcbent->or) - 1;
697 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
698 switch (dcbent->type) {
699 case OUTPUT_LVDS:
700 script = (mc >> 8) & 0xf;
701 if (bios->pub.fp_no_ddc) {
702 if (bios->fp.dual_link)
703 script |= 0x0100;
704 if (bios->fp.if_is_24bit)
705 script |= 0x0200;
706 } else {
707 if (pxclk >= bios->fp.duallink_transition_clk) {
708 script |= 0x0100;
709 if (bios->fp.strapless_is_24bit & 2)
710 script |= 0x0200;
711 } else
712 if (bios->fp.strapless_is_24bit & 1)
713 script |= 0x0200;
714 }
715
716 if (nouveau_uscript_lvds >= 0) {
717 NV_INFO(dev, "override script 0x%04x with 0x%04x "
718 "for output LVDS-%d\n", script,
719 nouveau_uscript_lvds, or);
720 script = nouveau_uscript_lvds;
721 }
722 break;
723 case OUTPUT_TMDS:
724 script = (mc >> 8) & 0xf;
725 if (pxclk >= 165000)
726 script |= 0x0100;
727
728 if (nouveau_uscript_tmds >= 0) {
729 NV_INFO(dev, "override script 0x%04x with 0x%04x "
730 "for output TMDS-%d\n", script,
731 nouveau_uscript_tmds, or);
732 script = nouveau_uscript_tmds;
733 }
734 break;
735 case OUTPUT_DP:
736 script = (mc >> 8) & 0xf;
737 break;
738 case OUTPUT_ANALOG:
739 script = 0xff;
740 break;
741 default:
742 NV_ERROR(dev, "modeset on unsupported output type!\n");
743 break;
744 }
745
746 return script;
747}
748
749static void
750nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
751{
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nouveau_channel *chan;
754 struct list_head *entry, *tmp;
755
756 list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
757 chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
758
759 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
760 chan->nvsw.vblsem_rval);
761 list_del(&chan->nvsw.vbl_wait);
762 }
763}
764
765static void
766nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
767{
768 intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
769
770 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
771 nv50_display_vblank_crtc_handler(dev, 0);
772
773 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
774 nv50_display_vblank_crtc_handler(dev, 1);
775
776 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
777 NV50_PDISPLAY_INTR_EN) & ~intr);
778 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
779}
780
781static void
782nv50_display_unk10_handler(struct drm_device *dev)
783{
784 struct dcb_entry *dcbent;
785 int head, ret;
786
787 ret = nv50_display_irq_head(dev, &head, &dcbent);
788 if (ret)
789 goto ack;
790
791 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
792
793 nouveau_bios_run_display_table(dev, dcbent, 0, -1);
794
795ack:
796 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
797 nv_wr32(dev, 0x610030, 0x80000000);
798}
799
800static void
801nv50_display_unk20_handler(struct drm_device *dev)
802{
803 struct dcb_entry *dcbent;
804 uint32_t tmp, pclk, script;
805 int head, or, ret;
806
807 ret = nv50_display_irq_head(dev, &head, &dcbent);
808 if (ret)
809 goto ack;
810 or = ffs(dcbent->or) - 1;
811 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
812 script = nv50_display_script_select(dev, dcbent, pclk);
813
814 NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
815
816 if (dcbent->type != OUTPUT_DP)
817 nouveau_bios_run_display_table(dev, dcbent, 0, -2);
818
819 nv50_crtc_set_clock(dev, head, pclk);
820
821 nouveau_bios_run_display_table(dev, dcbent, script, pclk);
822
823 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
824 tmp &= ~0x000000f;
825 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
826
827 if (dcbent->type != OUTPUT_ANALOG) {
828 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
829 tmp &= ~0x00000f0f;
830 if (script & 0x0100)
831 tmp |= 0x00000101;
832 nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
833 } else {
834 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
835 }
836
837ack:
838 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
839 nv_wr32(dev, 0x610030, 0x80000000);
840}
841
842static void
843nv50_display_unk40_handler(struct drm_device *dev)
844{
845 struct dcb_entry *dcbent;
846 int head, pclk, script, ret;
847
848 ret = nv50_display_irq_head(dev, &head, &dcbent);
849 if (ret)
850 goto ack;
851 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
852 script = nv50_display_script_select(dev, dcbent, pclk);
853
854 nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
855
856ack:
857 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
858 nv_wr32(dev, 0x610030, 0x80000000);
859 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
860}
861
862void
863nv50_display_irq_handler_bh(struct work_struct *work)
864{
865 struct drm_nouveau_private *dev_priv =
866 container_of(work, struct drm_nouveau_private, irq_work);
867 struct drm_device *dev = dev_priv->dev;
868
869 for (;;) {
870 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
871 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
872
873 NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
874
875 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
876 nv50_display_unk10_handler(dev);
877 else
878 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
879 nv50_display_unk20_handler(dev);
880 else
881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
882 nv50_display_unk40_handler(dev);
883 else
884 break;
885 }
886
887 nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
888}
889
890static void
891nv50_display_error_handler(struct drm_device *dev)
892{
893 uint32_t addr, data;
894
895 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
896 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
897 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
898
899 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
900 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
901
902 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
903}
904
905static void
906nv50_display_irq_hotplug(struct drm_device *dev)
907{
908 struct drm_nouveau_private *dev_priv = dev->dev_private;
909 struct drm_connector *connector;
910 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
911 uint32_t unplug_mask, plug_mask, change_mask;
912 uint32_t hpd0, hpd1 = 0;
913
914 hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
915 if (dev_priv->chipset >= 0x90)
916 hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
917
918 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
919 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
920 change_mask = plug_mask | unplug_mask;
921
922 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
923 struct drm_encoder_helper_funcs *helper;
924 struct nouveau_connector *nv_connector =
925 nouveau_connector(connector);
926 struct nouveau_encoder *nv_encoder;
927 struct dcb_gpio_entry *gpio;
928 uint32_t reg;
929 bool plugged;
930
931 if (!nv_connector->dcb)
932 continue;
933
934 gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
935 if (!gpio || !(change_mask & (1 << gpio->line)))
936 continue;
937
938 reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
939 plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
940 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
941 drm_get_connector_name(connector)) ;
942
943 if (!connector->encoder || !connector->encoder->crtc ||
944 !connector->encoder->crtc->enabled)
945 continue;
946 nv_encoder = nouveau_encoder(connector->encoder);
947 helper = connector->encoder->helper_private;
948
949 if (nv_encoder->dcb->type != OUTPUT_DP)
950 continue;
951
952 if (plugged)
953 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
954 else
955 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
956 }
957
958 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
959 if (dev_priv->chipset >= 0x90)
960 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
961}
962
963void
964nv50_display_irq_handler(struct drm_device *dev)
965{
966 struct drm_nouveau_private *dev_priv = dev->dev_private;
967 uint32_t delayed = 0;
968
969 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
970 nv50_display_irq_hotplug(dev);
971
972 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
973 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
974 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
975 uint32_t clock;
976
977 NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
978
979 if (!intr0 && !(intr1 & ~delayed))
980 break;
981
982 if (intr0 & 0x00010000) {
983 nv50_display_error_handler(dev);
984 intr0 &= ~0x00010000;
985 }
986
987 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
988 nv50_display_vblank_handler(dev, intr1);
989 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
990 }
991
992 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
993 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
994 NV50_PDISPLAY_INTR_1_CLK_UNK40));
995 if (clock) {
996 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
997 if (!work_pending(&dev_priv->irq_work))
998 queue_work(dev_priv->wq, &dev_priv->irq_work);
999 delayed |= clock;
1000 intr1 &= ~clock;
1001 }
1002
1003 if (intr0) {
1004 NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
1005 nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
1006 }
1007
1008 if (intr1) {
1009 NV_ERROR(dev,
1010 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
1011 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
1012 }
1013 }
1014}
1015
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 000000000000..3ae8d0725f63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV50_DISPLAY_H__
28#define __NV50_DISPLAY_H__
29
30#include "drmP.h"
31#include "drm.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_reg.h"
35#include "nouveau_crtc.h"
36#include "nv50_evo.h"
37
38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work);
40int nv50_display_init(struct drm_device *dev);
41int nv50_display_create(struct drm_device *dev);
42int nv50_display_destroy(struct drm_device *dev);
43int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
44int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
45
46#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 000000000000..aae13343bcec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#define NV50_EVO_UPDATE 0x00000080
28#define NV50_EVO_UNK84 0x00000084
29#define NV50_EVO_UNK84_NOTIFY 0x40000000
30#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
31#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
32#define NV50_EVO_DMA_NOTIFY 0x00000088
33#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
34#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
35#define NV50_EVO_UNK8C 0x0000008C
36
37#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
38#define NV50_EVO_DAC_MODE_CTRL 0x00000400
39#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
40#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
41#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
42#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
43#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
44
45#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
46#define NV50_EVO_SOR_MODE_CTRL 0x00000600
47#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
48#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
49#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
50#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
51#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
52#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
53
54#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
55#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
56#define NV50_EVO_CRTC_UNK0800 0x00000800
57#define NV50_EVO_CRTC_CLOCK 0x00000804
58#define NV50_EVO_CRTC_INTERLACE 0x00000808
59#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
60#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
61#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
62#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
63#define NV50_EVO_CRTC_UNK0820 0x00000820
64#define NV50_EVO_CRTC_UNK0824 0x00000824
65#define NV50_EVO_CRTC_UNK082C 0x0000082c
66#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
67/* You can't have a palette in 8 bit mode (=OFF) */
68#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
69#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
70#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
71#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
72#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
73#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
74#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
75#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
76#define NV50_EVO_CRTC_FB_SIZE 0x00000868
77#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
78#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
79#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
80#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
81#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
82#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
83#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
84#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
85#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
86#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
87#define NV50_EVO_CRTC_FB_DMA 0x00000874
88#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
89#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
90#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
91#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
92#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
93#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
94#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
95#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
96#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
97#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
98#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
99#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
100#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
101#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
102#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
103#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
104#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
105#define NV50_EVO_CRTC_FB_POS 0x000008c0
106#define NV50_EVO_CRTC_REAL_RES 0x000008c8
107#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
108#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
109 ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
110/* Both of these are needed, otherwise nothing happens. */
111#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
112#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
113
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 000000000000..6bcc6d39e9b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,273 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h"
5
6static void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{
9 struct nouveau_fbcon_par *par = info->par;
10 struct drm_device *dev = par->dev;
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 struct nouveau_channel *chan = dev_priv->channel;
13
14 if (info->state != FBINFO_STATE_RUNNING)
15 return;
16
17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
19 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
20
21 info->flags |= FBINFO_HWACCEL_DISABLED;
22 }
23
24 if (info->flags & FBINFO_HWACCEL_DISABLED) {
25 cfb_fillrect(info, rect);
26 return;
27 }
28
29 if (rect->rop != ROP_COPY) {
30 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
31 OUT_RING(chan, 1);
32 }
33 BEGIN_RING(chan, NvSub2D, 0x0588, 1);
34 OUT_RING(chan, rect->color);
35 BEGIN_RING(chan, NvSub2D, 0x0600, 4);
36 OUT_RING(chan, rect->dx);
37 OUT_RING(chan, rect->dy);
38 OUT_RING(chan, rect->dx + rect->width);
39 OUT_RING(chan, rect->dy + rect->height);
40 if (rect->rop != ROP_COPY) {
41 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
42 OUT_RING(chan, 3);
43 }
44 FIRE_RING(chan);
45}
46
47static void
48nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
49{
50 struct nouveau_fbcon_par *par = info->par;
51 struct drm_device *dev = par->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_channel *chan = dev_priv->channel;
54
55 if (info->state != FBINFO_STATE_RUNNING)
56 return;
57
58 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
59 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
60
61 info->flags |= FBINFO_HWACCEL_DISABLED;
62 }
63
64 if (info->flags & FBINFO_HWACCEL_DISABLED) {
65 cfb_copyarea(info, region);
66 return;
67 }
68
69 BEGIN_RING(chan, NvSub2D, 0x0110, 1);
70 OUT_RING(chan, 0);
71 BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
72 OUT_RING(chan, region->dx);
73 OUT_RING(chan, region->dy);
74 OUT_RING(chan, region->width);
75 OUT_RING(chan, region->height);
76 BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
77 OUT_RING(chan, 0);
78 OUT_RING(chan, region->sx);
79 OUT_RING(chan, 0);
80 OUT_RING(chan, region->sy);
81 FIRE_RING(chan);
82}
83
84static void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{
87 struct nouveau_fbcon_par *par = info->par;
88 struct drm_device *dev = par->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *chan = dev_priv->channel;
91 uint32_t width, dwords, *data = (uint32_t *)image->data;
92 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
93 uint32_t *palette = info->pseudo_palette;
94
95 if (info->state != FBINFO_STATE_RUNNING)
96 return;
97
98 if (image->depth != 1) {
99 cfb_imageblit(info, image);
100 return;
101 }
102
103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
104 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
105 info->flags |= FBINFO_HWACCEL_DISABLED;
106 }
107
108 if (info->flags & FBINFO_HWACCEL_DISABLED) {
109 cfb_imageblit(info, image);
110 return;
111 }
112
113 width = (image->width + 31) & ~31;
114 dwords = (width * image->height) >> 5;
115
116 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
117 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
118 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
119 OUT_RING(chan, palette[image->bg_color] | mask);
120 OUT_RING(chan, palette[image->fg_color] | mask);
121 } else {
122 OUT_RING(chan, image->bg_color);
123 OUT_RING(chan, image->fg_color);
124 }
125 BEGIN_RING(chan, NvSub2D, 0x0838, 2);
126 OUT_RING(chan, image->width);
127 OUT_RING(chan, image->height);
128 BEGIN_RING(chan, NvSub2D, 0x0850, 4);
129 OUT_RING(chan, 0);
130 OUT_RING(chan, image->dx);
131 OUT_RING(chan, 0);
132 OUT_RING(chan, image->dy);
133
134 while (dwords) {
135 int push = dwords > 2047 ? 2047 : dwords;
136
137 if (RING_SPACE(chan, push + 1)) {
138 NV_ERROR(dev,
139 "GPU lockup - switching to software fbcon\n");
140 info->flags |= FBINFO_HWACCEL_DISABLED;
141 cfb_imageblit(info, image);
142 return;
143 }
144
145 dwords -= push;
146
147 BEGIN_RING(chan, NvSub2D, 0x40000860, push);
148 OUT_RINGp(chan, data, push);
149 data += push;
150 }
151
152 FIRE_RING(chan);
153}
154
155int
156nv50_fbcon_accel_init(struct fb_info *info)
157{
158 struct nouveau_fbcon_par *par = info->par;
159 struct drm_device *dev = par->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_gpuobj *eng2d = NULL;
163 int ret, format;
164
165 switch (info->var.bits_per_pixel) {
166 case 8:
167 format = 0xf3;
168 break;
169 case 15:
170 format = 0xf8;
171 break;
172 case 16:
173 format = 0xe8;
174 break;
175 case 32:
176 switch (info->var.transp.length) {
177 case 0: /* depth 24 */
178 case 8: /* depth 32, just use 24.. */
179 format = 0xe6;
180 break;
181 case 2: /* depth 30 */
182 format = 0xd1;
183 break;
184 default:
185 return -EINVAL;
186 }
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
193 if (ret)
194 return ret;
195
196 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
197 if (ret)
198 return ret;
199
200 ret = RING_SPACE(chan, 59);
201 if (ret) {
202 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
203 return ret;
204 }
205
206 BEGIN_RING(chan, NvSub2D, 0x0000, 1);
207 OUT_RING(chan, Nv2D);
208 BEGIN_RING(chan, NvSub2D, 0x0180, 4);
209 OUT_RING(chan, NvNotify0);
210 OUT_RING(chan, chan->vram_handle);
211 OUT_RING(chan, chan->vram_handle);
212 OUT_RING(chan, chan->vram_handle);
213 BEGIN_RING(chan, NvSub2D, 0x0290, 1);
214 OUT_RING(chan, 0);
215 BEGIN_RING(chan, NvSub2D, 0x0888, 1);
216 OUT_RING(chan, 1);
217 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
218 OUT_RING(chan, 3);
219 BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
220 OUT_RING(chan, 0x55);
221 BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
222 OUT_RING(chan, 0);
223 OUT_RING(chan, 1);
224 OUT_RING(chan, 0);
225 OUT_RING(chan, 1);
226 BEGIN_RING(chan, NvSub2D, 0x0580, 2);
227 OUT_RING(chan, 4);
228 OUT_RING(chan, format);
229 BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
230 OUT_RING(chan, 2);
231 OUT_RING(chan, 1);
232 BEGIN_RING(chan, NvSub2D, 0x0804, 1);
233 OUT_RING(chan, format);
234 BEGIN_RING(chan, NvSub2D, 0x0800, 1);
235 OUT_RING(chan, 1);
236 BEGIN_RING(chan, NvSub2D, 0x0808, 3);
237 OUT_RING(chan, 0);
238 OUT_RING(chan, 0);
239 OUT_RING(chan, 0);
240 BEGIN_RING(chan, NvSub2D, 0x081c, 1);
241 OUT_RING(chan, 1);
242 BEGIN_RING(chan, NvSub2D, 0x0840, 4);
243 OUT_RING(chan, 0);
244 OUT_RING(chan, 1);
245 OUT_RING(chan, 0);
246 OUT_RING(chan, 1);
247 BEGIN_RING(chan, NvSub2D, 0x0200, 2);
248 OUT_RING(chan, format);
249 OUT_RING(chan, 1);
250 BEGIN_RING(chan, NvSub2D, 0x0214, 5);
251 OUT_RING(chan, info->fix.line_length);
252 OUT_RING(chan, info->var.xres_virtual);
253 OUT_RING(chan, info->var.yres_virtual);
254 OUT_RING(chan, 0);
255 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
256 dev_priv->vm_vram_base);
257 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
258 OUT_RING(chan, format);
259 OUT_RING(chan, 1);
260 BEGIN_RING(chan, NvSub2D, 0x0244, 5);
261 OUT_RING(chan, info->fix.line_length);
262 OUT_RING(chan, info->var.xres_virtual);
263 OUT_RING(chan, info->var.yres_virtual);
264 OUT_RING(chan, 0);
265 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
266 dev_priv->vm_vram_base);
267
268 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
269 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
270 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
271 return 0;
272}
273
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 000000000000..77ae1aaa0bce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31struct nv50_fifo_priv {
32 struct nouveau_gpuobj_ref *thingo[2];
33 int cur_thingo;
34};
35
36#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
37
38static void
39nv50_fifo_init_thingo(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
43 struct nouveau_gpuobj_ref *cur;
44 int i, nr;
45
46 NV_DEBUG(dev, "\n");
47
48 cur = priv->thingo[priv->cur_thingo];
49 priv->cur_thingo = !priv->cur_thingo;
50
51 /* We never schedule channel 0 or 127 */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 1, nr = 0; i < 127; i++) {
54 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
55 nv_wo32(dev, cur->gpuobj, nr++, i);
56 }
57 dev_priv->engine.instmem.finish_access(dev);
58
59 nv_wr32(dev, 0x32f4, cur->instance >> 12);
60 nv_wr32(dev, 0x32ec, nr);
61 nv_wr32(dev, 0x2500, 0x101);
62}
63
64static int
65nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *chan = dev_priv->fifos[channel];
69 uint32_t inst;
70
71 NV_DEBUG(dev, "ch%d\n", channel);
72
73 if (!chan->ramfc)
74 return -EINVAL;
75
76 if (IS_G80)
77 inst = chan->ramfc->instance >> 12;
78 else
79 inst = chan->ramfc->instance >> 8;
80 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
81 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
82
83 if (!nt)
84 nv50_fifo_init_thingo(dev);
85 return 0;
86}
87
88static void
89nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t inst;
93
94 NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
95
96 if (IS_G80)
97 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
98 else
99 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
100 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
101
102 if (!nt)
103 nv50_fifo_init_thingo(dev);
104}
105
106static void
107nv50_fifo_init_reset(struct drm_device *dev)
108{
109 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
110
111 NV_DEBUG(dev, "\n");
112
113 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
114 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
115}
116
117static void
118nv50_fifo_init_intr(struct drm_device *dev)
119{
120 NV_DEBUG(dev, "\n");
121
122 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
123 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
124}
125
126static void
127nv50_fifo_init_context_table(struct drm_device *dev)
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 int i;
131
132 NV_DEBUG(dev, "\n");
133
134 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
135 if (dev_priv->fifos[i])
136 nv50_fifo_channel_enable(dev, i, true);
137 else
138 nv50_fifo_channel_disable(dev, i, true);
139 }
140
141 nv50_fifo_init_thingo(dev);
142}
143
144static void
145nv50_fifo_init_regs__nv(struct drm_device *dev)
146{
147 NV_DEBUG(dev, "\n");
148
149 nv_wr32(dev, 0x250c, 0x6f3cfc34);
150}
151
152static void
153nv50_fifo_init_regs(struct drm_device *dev)
154{
155 NV_DEBUG(dev, "\n");
156
157 nv_wr32(dev, 0x2500, 0);
158 nv_wr32(dev, 0x3250, 0);
159 nv_wr32(dev, 0x3220, 0);
160 nv_wr32(dev, 0x3204, 0);
161 nv_wr32(dev, 0x3210, 0);
162 nv_wr32(dev, 0x3270, 0);
163
164 /* Enable dummy channels setup by nv50_instmem.c */
165 nv50_fifo_channel_enable(dev, 0, true);
166 nv50_fifo_channel_enable(dev, 127, true);
167}
168
169int
170nv50_fifo_init(struct drm_device *dev)
171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nv50_fifo_priv *priv;
174 int ret;
175
176 NV_DEBUG(dev, "\n");
177
178 priv = dev_priv->engine.fifo.priv;
179 if (priv) {
180 priv->cur_thingo = !priv->cur_thingo;
181 goto just_reset;
182 }
183
184 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
185 if (!priv)
186 return -ENOMEM;
187 dev_priv->engine.fifo.priv = priv;
188
189 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
190 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
191 if (ret) {
192 NV_ERROR(dev, "error creating thingo0: %d\n", ret);
193 return ret;
194 }
195
196 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
198 if (ret) {
199 NV_ERROR(dev, "error creating thingo1: %d\n", ret);
200 return ret;
201 }
202
203just_reset:
204 nv50_fifo_init_reset(dev);
205 nv50_fifo_init_intr(dev);
206 nv50_fifo_init_context_table(dev);
207 nv50_fifo_init_regs__nv(dev);
208 nv50_fifo_init_regs(dev);
209 dev_priv->engine.fifo.enable(dev);
210 dev_priv->engine.fifo.reassign(dev, true);
211
212 return 0;
213}
214
215void
216nv50_fifo_takedown(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
220
221 NV_DEBUG(dev, "\n");
222
223 if (!priv)
224 return;
225
226 nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
227 nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
228
229 dev_priv->engine.fifo.priv = NULL;
230 kfree(priv);
231}
232
233int
234nv50_fifo_channel_id(struct drm_device *dev)
235{
236 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
237 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
238}
239
240int
241nv50_fifo_create_context(struct nouveau_channel *chan)
242{
243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL;
246 int ret;
247
248 NV_DEBUG(dev, "ch%d\n", chan->id);
249
250 if (IS_G80) {
251 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
252 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
253
254 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
255 0x100, NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &ramfc,
257 &chan->ramfc);
258 if (ret)
259 return ret;
260
261 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
262 ramin_voffset + 0x0400, 4096,
263 0, NULL, &chan->cache);
264 if (ret)
265 return ret;
266 } else {
267 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
268 NVOBJ_FLAG_ZERO_ALLOC |
269 NVOBJ_FLAG_ZERO_FREE,
270 &chan->ramfc);
271 if (ret)
272 return ret;
273 ramfc = chan->ramfc->gpuobj;
274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
276 0, &chan->cache);
277 if (ret)
278 return ret;
279 }
280
281 dev_priv->engine.instmem.prepare_access(dev, true);
282
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
294
295 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
297 nv_wo32(dev, chan->ramin->gpuobj, 1,
298 chan->ramfc->instance >> 8);
299
300 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
301 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
302 }
303
304 dev_priv->engine.instmem.finish_access(dev);
305
306 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret;
311 }
312
313 return 0;
314}
315
316void
317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{
319 struct drm_device *dev = chan->dev;
320
321 NV_DEBUG(dev, "ch%d\n", chan->id);
322
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
324 nouveau_gpuobj_ref_del(dev, &chan->cache);
325
326 nv50_fifo_channel_disable(dev, chan->id, false);
327
328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false);
331}
332
333int
334nv50_fifo_load_context(struct nouveau_channel *chan)
335{
336 struct drm_device *dev = chan->dev;
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
339 struct nouveau_gpuobj *cache = chan->cache->gpuobj;
340 int ptr, cnt;
341
342 NV_DEBUG(dev, "ch%d\n", chan->id);
343
344 dev_priv->engine.instmem.prepare_access(dev, false);
345
346 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
347 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
348 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
349 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
350 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
351 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
352 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
353 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
354 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
355 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
356 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
357 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
358 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
359 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
360 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
361 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
362 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
363 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
364 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
365 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
366 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
367 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
368 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
369 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
370 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
371 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
372 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
373 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
374 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
375 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
376 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
377 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
378 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
379
380 cnt = nv_ro32(dev, ramfc, 0x84/4);
381 for (ptr = 0; ptr < cnt; ptr++) {
382 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
383 nv_ro32(dev, cache, (ptr * 2) + 0));
384 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
385 nv_ro32(dev, cache, (ptr * 2) + 1));
386 }
387 nv_wr32(dev, 0x3210, cnt << 2);
388 nv_wr32(dev, 0x3270, 0);
389
390 /* guessing that all the 0x34xx regs aren't on NV50 */
391 if (!IS_G80) {
392 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
393 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
394 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
395 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
396 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
397 }
398
399 dev_priv->engine.instmem.finish_access(dev);
400
401 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
403 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
404 return 0;
405}
406
407int
408nv50_fifo_unload_context(struct drm_device *dev)
409{
410 struct drm_nouveau_private *dev_priv = dev->dev_private;
411 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
412 struct nouveau_gpuobj *ramfc, *cache;
413 struct nouveau_channel *chan = NULL;
414 int chid, get, put, ptr;
415
416 NV_DEBUG(dev, "\n");
417
418 chid = pfifo->channel_id(dev);
419 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
420 return 0;
421
422 chan = dev_priv->fifos[chid];
423 if (!chan) {
424 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
425 return -EINVAL;
426 }
427 NV_DEBUG(dev, "ch%d\n", chan->id);
428 ramfc = chan->ramfc->gpuobj;
429 cache = chan->cache->gpuobj;
430
431 dev_priv->engine.instmem.prepare_access(dev, true);
432
433 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
434 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
435 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
436 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
437 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
438 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
439 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
440 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
441 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
442 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
443 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
444 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
445 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
446 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
447 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
448 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
449 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
450 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
451 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
452 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
453 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
454 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
455 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
456 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
457 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
458 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
459 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
460 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
461 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
462 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
463 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
464 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
465 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
466
467 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
468 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
469 ptr = 0;
470 while (put != get) {
471 nv_wo32(dev, cache, ptr++,
472 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
473 nv_wo32(dev, cache, ptr++,
474 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
475 get = (get + 1) & 0x1ff;
476 }
477
478 /* guessing that all the 0x34xx regs aren't on NV50 */
479 if (!IS_G80) {
480 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
481 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
482 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
483 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
484 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
485 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
486 }
487
488 dev_priv->engine.instmem.finish_access(dev);
489
490 /*XXX: probably reload ch127 (NULL) state back too */
491 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
492 return 0;
493}
494
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 000000000000..177d8229336f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,385 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31MODULE_FIRMWARE("nouveau/nv50.ctxprog");
32MODULE_FIRMWARE("nouveau/nv50.ctxvals");
33MODULE_FIRMWARE("nouveau/nv84.ctxprog");
34MODULE_FIRMWARE("nouveau/nv84.ctxvals");
35MODULE_FIRMWARE("nouveau/nv86.ctxprog");
36MODULE_FIRMWARE("nouveau/nv86.ctxvals");
37MODULE_FIRMWARE("nouveau/nv92.ctxprog");
38MODULE_FIRMWARE("nouveau/nv92.ctxvals");
39MODULE_FIRMWARE("nouveau/nv94.ctxprog");
40MODULE_FIRMWARE("nouveau/nv94.ctxvals");
41MODULE_FIRMWARE("nouveau/nv96.ctxprog");
42MODULE_FIRMWARE("nouveau/nv96.ctxvals");
43MODULE_FIRMWARE("nouveau/nv98.ctxprog");
44MODULE_FIRMWARE("nouveau/nv98.ctxvals");
45MODULE_FIRMWARE("nouveau/nva0.ctxprog");
46MODULE_FIRMWARE("nouveau/nva0.ctxvals");
47MODULE_FIRMWARE("nouveau/nva5.ctxprog");
48MODULE_FIRMWARE("nouveau/nva5.ctxvals");
49MODULE_FIRMWARE("nouveau/nva8.ctxprog");
50MODULE_FIRMWARE("nouveau/nva8.ctxvals");
51MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
52MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
53MODULE_FIRMWARE("nouveau/nvac.ctxprog");
54MODULE_FIRMWARE("nouveau/nvac.ctxvals");
55
56#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
57
58static void
59nv50_graph_init_reset(struct drm_device *dev)
60{
61 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
62
63 NV_DEBUG(dev, "\n");
64
65 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
66 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
67}
68
69static void
70nv50_graph_init_intr(struct drm_device *dev)
71{
72 NV_DEBUG(dev, "\n");
73
74 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
75 nv_wr32(dev, 0x400138, 0xffffffff);
76 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
77}
78
79static void
80nv50_graph_init_regs__nv(struct drm_device *dev)
81{
82 NV_DEBUG(dev, "\n");
83
84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000);
90
91 nv_wr32(dev, 0x400108, 0xffffffff);
92
93 nv_wr32(dev, 0x400824, 0x00004000);
94 nv_wr32(dev, 0x400500, 0x00010001);
95}
96
97static void
98nv50_graph_init_regs(struct drm_device *dev)
99{
100 NV_DEBUG(dev, "\n");
101
102 nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
103 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
104 nv_wr32(dev, 0x402ca8, 0x800);
105}
106
107static int
108nv50_graph_init_ctxctl(struct drm_device *dev)
109{
110 NV_DEBUG(dev, "\n");
111
112 nv40_grctx_init(dev);
113
114 nv_wr32(dev, 0x400320, 4);
115 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
116 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
117 return 0;
118}
119
120int
121nv50_graph_init(struct drm_device *dev)
122{
123 int ret;
124
125 NV_DEBUG(dev, "\n");
126
127 nv50_graph_init_reset(dev);
128 nv50_graph_init_regs__nv(dev);
129 nv50_graph_init_regs(dev);
130 nv50_graph_init_intr(dev);
131
132 ret = nv50_graph_init_ctxctl(dev);
133 if (ret)
134 return ret;
135
136 return 0;
137}
138
139void
140nv50_graph_takedown(struct drm_device *dev)
141{
142 NV_DEBUG(dev, "\n");
143 nv40_grctx_fini(dev);
144}
145
146void
147nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
148{
149 const uint32_t mask = 0x00010001;
150
151 if (enabled)
152 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
153 else
154 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
155}
156
157struct nouveau_channel *
158nv50_graph_channel(struct drm_device *dev)
159{
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 uint32_t inst;
162 int i;
163
164 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
165 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
166 return NULL;
167 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
168
169 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
170 struct nouveau_channel *chan = dev_priv->fifos[i];
171
172 if (chan && chan->ramin && chan->ramin->instance == inst)
173 return chan;
174 }
175
176 return NULL;
177}
178
179int
180nv50_graph_create_context(struct nouveau_channel *chan)
181{
182 struct drm_device *dev = chan->dev;
183 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
185 struct nouveau_gpuobj *ctx;
186 uint32_t grctx_size = 0x70000;
187 int hdr, ret;
188
189 NV_DEBUG(dev, "ch%d\n", chan->id);
190
191 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
192 NVOBJ_FLAG_ZERO_ALLOC |
193 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
194 if (ret)
195 return ret;
196 ctx = chan->ramin_grctx->gpuobj;
197
198 hdr = IS_G80 ? 0x200 : 0x20;
199 dev_priv->engine.instmem.prepare_access(dev, true);
200 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
201 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
202 grctx_size - 1);
203 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
204 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
205 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
206 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
207 dev_priv->engine.instmem.finish_access(dev);
208
209 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv40_grctx_vals_load(dev, ctx);
211 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
212 if ((dev_priv->chipset & 0xf0) == 0xa0)
213 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
214 else
215 nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
216 dev_priv->engine.instmem.finish_access(dev);
217
218 return 0;
219}
220
221void
222nv50_graph_destroy_context(struct nouveau_channel *chan)
223{
224 struct drm_device *dev = chan->dev;
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 int i, hdr = IS_G80 ? 0x200 : 0x20;
227
228 NV_DEBUG(dev, "ch%d\n", chan->id);
229
230 if (!chan->ramin || !chan->ramin->gpuobj)
231 return;
232
233 dev_priv->engine.instmem.prepare_access(dev, true);
234 for (i = hdr; i < hdr + 24; i += 4)
235 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
236 dev_priv->engine.instmem.finish_access(dev);
237
238 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
239}
240
241static int
242nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
243{
244 uint32_t fifo = nv_rd32(dev, 0x400500);
245
246 nv_wr32(dev, 0x400500, fifo & ~1);
247 nv_wr32(dev, 0x400784, inst);
248 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
249 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
250 nv_wr32(dev, 0x400040, 0xffffffff);
251 (void)nv_rd32(dev, 0x400040);
252 nv_wr32(dev, 0x400040, 0x00000000);
253 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
254
255 if (nouveau_wait_for_idle(dev))
256 nv_wr32(dev, 0x40032c, inst | (1<<31));
257 nv_wr32(dev, 0x400500, fifo);
258
259 return 0;
260}
261
262int
263nv50_graph_load_context(struct nouveau_channel *chan)
264{
265 uint32_t inst = chan->ramin->instance >> 12;
266
267 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
268 return nv50_graph_do_load_context(chan->dev, inst);
269}
270
271int
272nv50_graph_unload_context(struct drm_device *dev)
273{
274 uint32_t inst, fifo = nv_rd32(dev, 0x400500);
275
276 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
277 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
278 return 0;
279 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
280
281 nv_wr32(dev, 0x400500, fifo & ~1);
282 nv_wr32(dev, 0x400784, inst);
283 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
284 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
285 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo);
287
288 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
289 return 0;
290}
291
292void
293nv50_graph_context_switch(struct drm_device *dev)
294{
295 uint32_t inst;
296
297 nv50_graph_unload_context(dev);
298
299 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
300 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
301 nv50_graph_do_load_context(dev, inst);
302
303 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
304 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
305}
306
307static int
308nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
309 int mthd, uint32_t data)
310{
311 struct nouveau_gpuobj_ref *ref = NULL;
312
313 if (nouveau_gpuobj_ref_find(chan, data, &ref))
314 return -ENOENT;
315
316 if (nouveau_notifier_offset(ref->gpuobj, NULL))
317 return -EINVAL;
318
319 chan->nvsw.vblsem = ref->gpuobj;
320 chan->nvsw.vblsem_offset = ~0;
321 return 0;
322}
323
324static int
325nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
326 int mthd, uint32_t data)
327{
328 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
329 return -ERANGE;
330
331 chan->nvsw.vblsem_offset = data >> 2;
332 return 0;
333}
334
335static int
336nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
337 int mthd, uint32_t data)
338{
339 chan->nvsw.vblsem_rval = data;
340 return 0;
341}
342
343static int
344nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
345 int mthd, uint32_t data)
346{
347 struct drm_device *dev = chan->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349
350 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
351 return -EINVAL;
352
353 if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
354 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
355 nv_wr32(dev, NV50_PDISPLAY_INTR_1,
356 NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
357 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
358 NV50_PDISPLAY_INTR_EN) |
359 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
360 }
361
362 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
363 return 0;
364}
365
366static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
367 { 0x018c, nv50_graph_nvsw_dma_vblsem },
368 { 0x0400, nv50_graph_nvsw_vblsem_offset },
369 { 0x0404, nv50_graph_nvsw_vblsem_release_val },
370 { 0x0408, nv50_graph_nvsw_vblsem_release },
371 {}
372};
373
374struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
375 { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
376 { 0x0030, false, NULL }, /* null */
377 { 0x5039, false, NULL }, /* m2mf */
378 { 0x502d, false, NULL }, /* 2d */
379 { 0x50c0, false, NULL }, /* compute */
380 { 0x5097, false, NULL }, /* tesla (nv50) */
381 { 0x8297, false, NULL }, /* tesla (nv80/nv90) */
382 { 0x8397, false, NULL }, /* tesla (nva0) */
383 { 0x8597, false, NULL }, /* tesla (nva8) */
384 {}
385};
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 000000000000..94400f777e7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,509 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */
34
35 struct nouveau_gpuobj_ref *pramin_pt;
36 struct nouveau_gpuobj_ref *pramin_bar;
37 struct nouveau_gpuobj_ref *fb_bar;
38
39 bool last_access_wr;
40};
41
42#define NV50_INSTMEM_PAGE_SHIFT 12
43#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
44#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
45
46/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
47 */
48#define BAR0_WI32(g, o, v) do { \
49 uint32_t offset; \
50 if ((g)->im_backing) { \
51 offset = (g)->im_backing_start; \
52 } else { \
53 offset = chan->ramin->gpuobj->im_backing_start; \
54 offset += (g)->im_pramin->start; \
55 } \
56 offset += (o); \
57 nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
58} while (0)
59
60int
61nv50_instmem_init(struct drm_device *dev)
62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan;
65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
66 struct nv50_instmem_priv *priv;
67 int ret, i;
68 uint32_t v, save_nv001700;
69
70 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
71 if (!priv)
72 return -ENOMEM;
73 dev_priv->engine.instmem.priv = priv;
74
75 /* Save state, will restore at takedown. */
76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78
79 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at
81 * some point.
82 */
83 dev_priv->ramin_rsvd_vram = 1 << 20;
84 c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
85 c_size = 128 << 10;
86 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
87 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
88 c_base = c_vmpd + 0x4000;
89 pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
90
91 NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
92 NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
93 (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
94 NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
95 NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
96
97 /* Determine VM layout, we need to do this first to make sure
98 * we allocate enough memory for all the page tables.
99 */
100 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
101 dev_priv->vm_gart_size = NV50_VM_BLOCK;
102
103 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
104 dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
105 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
106 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
107 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
108 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
109
110 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
111
112 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
113 dev_priv->vm_gart_base,
114 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
115 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
116 dev_priv->vm_vram_base,
117 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
118
119 c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
120
121 /* Map BAR0 PRAMIN aperture over the memory we want to use */
122 save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
123 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
124
125 /* Create a fake channel, and use it as our "dummy" channels 0/127.
126 * The main reason for creating a channel is so we can use the gpuobj
127 * code. However, it's probably worth noting that NVIDIA also setup
128 * their channels 0/127 with the same values they configure here.
129 * So, there may be some other reason for doing this.
130 *
131 * Have to create the entire channel manually, as the real channel
132 * creation code assumes we have PRAMIN access, and we don't until
133 * we're done here.
134 */
135 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
136 if (!chan)
137 return -ENOMEM;
138 chan->id = 0;
139 chan->dev = dev;
140 chan->file_priv = (struct drm_file *)-2;
141 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
142
143 /* Channel's PRAMIN object + heap */
144 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
145 NULL, &chan->ramin);
146 if (ret)
147 return ret;
148
149 if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
150 return -ENOMEM;
151
152 /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
153 ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
154 0x4000, 0, NULL, &chan->ramfc);
155 if (ret)
156 return ret;
157
158 for (i = 0; i < c_vmpd; i += 4)
159 BAR0_WI32(chan->ramin->gpuobj, i, 0);
160
161 /* VM page directory */
162 ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
163 0x4000, 0, &chan->vm_pd, NULL);
164 if (ret)
165 return ret;
166 for (i = 0; i < 0x4000; i += 8) {
167 BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
168 BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
169 }
170
171 /* PRAMIN page table, cheat and map into VM at 0x0000000000.
172 * We map the entire fake channel into the start of the PRAMIN BAR
173 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt);
176 if (ret)
177 return ret;
178
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
180 if (v < (c_offset + c_size))
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
182 else
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
185 }
186
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
188 BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
189
190 /* VRAM page table(s), mapped into VM at +1GiB */
191 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
192 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
193 NV50_VM_BLOCK/65536*8, 0, 0,
194 &chan->vm_vram_pt[i]);
195 if (ret) {
196 NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
197 ret);
198 dev_priv->vm_vram_pt_nr = i;
199 return ret;
200 }
201 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
202
203 for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
204 v += 4)
205 BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
206
207 BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
208 chan->vm_vram_pt[i]->instance | 0x61);
209 BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
210 }
211
212 /* DMA object for PRAMIN BAR */
213 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
214 &priv->pramin_bar);
215 if (ret)
216 return ret;
217 BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
218 BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
219 BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
220 BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
221 BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
222 BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
223
224 /* DMA object for FB BAR */
225 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
226 &priv->fb_bar);
227 if (ret)
228 return ret;
229 BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
230 BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
231 drm_get_resource_len(dev, 1) - 1);
232 BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
233 BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
234 BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
235 BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
236
237 /* Poke the relevant regs, and pray it works :) */
238 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
239 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
240 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
241 NV50_PUNK_BAR_CFG_BASE_VALID);
242 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
243 NV50_PUNK_BAR1_CTXDMA_VALID);
244 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
245 NV50_PUNK_BAR3_CTXDMA_VALID);
246
247 for (i = 0; i < 8; i++)
248 nv_wr32(dev, 0x1900 + (i*4), 0);
249
250 /* Assume that praying isn't enough, check that we can re-read the
251 * entire fake channel back from the PRAMIN BAR */
252 dev_priv->engine.instmem.prepare_access(dev, false);
253 for (i = 0; i < c_size; i += 4) {
254 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
255 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
256 i);
257 dev_priv->engine.instmem.finish_access(dev);
258 return -EINVAL;
259 }
260 }
261 dev_priv->engine.instmem.finish_access(dev);
262
263 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
264
265 /* Global PRAMIN heap */
266 if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
267 c_size, dev_priv->ramin_size - c_size)) {
268 dev_priv->ramin_heap = NULL;
269 NV_ERROR(dev, "Failed to init RAMIN heap\n");
270 }
271
272 /*XXX: incorrect, but needed to make hash func "work" */
273 dev_priv->ramht_offset = 0x10000;
274 dev_priv->ramht_bits = 9;
275 dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
276 return 0;
277}
278
279void
280nv50_instmem_takedown(struct drm_device *dev)
281{
282 struct drm_nouveau_private *dev_priv = dev->dev_private;
283 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
284 struct nouveau_channel *chan = dev_priv->fifos[0];
285 int i;
286
287 NV_DEBUG(dev, "\n");
288
289 if (!priv)
290 return;
291
292 /* Restore state from before init */
293 for (i = 0x1700; i <= 0x1710; i += 4)
294 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
295
296 nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
297 nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
298 nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
299
300 /* Destroy dummy channel */
301 if (chan) {
302 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
303 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
304 dev_priv->vm_vram_pt[i] = NULL;
305 }
306 dev_priv->vm_vram_pt_nr = 0;
307
308 nouveau_gpuobj_del(dev, &chan->vm_pd);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 nouveau_gpuobj_ref_del(dev, &chan->ramin);
311 nouveau_mem_takedown(&chan->ramin_heap);
312
313 dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
314 kfree(chan);
315 }
316
317 dev_priv->engine.instmem.priv = NULL;
318 kfree(priv);
319}
320
321int
322nv50_instmem_suspend(struct drm_device *dev)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_channel *chan = dev_priv->fifos[0];
326 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
327 int i;
328
329 ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
330 if (!ramin->im_backing_suspend)
331 return -ENOMEM;
332
333 for (i = 0; i < ramin->im_pramin->size; i += 4)
334 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
335 return 0;
336}
337
338void
339nv50_instmem_resume(struct drm_device *dev)
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
343 struct nouveau_channel *chan = dev_priv->fifos[0];
344 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
345 int i;
346
347 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
348 for (i = 0; i < ramin->im_pramin->size; i += 4)
349 BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
350 vfree(ramin->im_backing_suspend);
351 ramin->im_backing_suspend = NULL;
352
353 /* Poke the relevant regs, and pray it works :) */
354 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
355 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
356 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
357 NV50_PUNK_BAR_CFG_BASE_VALID);
358 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
359 NV50_PUNK_BAR1_CTXDMA_VALID);
360 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
361 NV50_PUNK_BAR3_CTXDMA_VALID);
362
363 for (i = 0; i < 8; i++)
364 nv_wr32(dev, 0x1900 + (i*4), 0);
365}
366
367int
368nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
369 uint32_t *sz)
370{
371 int ret;
372
373 if (gpuobj->im_backing)
374 return -EINVAL;
375
376 *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
377 if (*sz == 0)
378 return -EINVAL;
379
380 ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
381 true, false, &gpuobj->im_backing);
382 if (ret) {
383 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
384 return ret;
385 }
386
387 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
388 if (ret) {
389 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
390 nouveau_bo_ref(NULL, &gpuobj->im_backing);
391 return ret;
392 }
393
394 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
395 gpuobj->im_backing_start <<= PAGE_SHIFT;
396
397 return 0;
398}
399
400void
401nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
402{
403 struct drm_nouveau_private *dev_priv = dev->dev_private;
404
405 if (gpuobj && gpuobj->im_backing) {
406 if (gpuobj->im_bound)
407 dev_priv->engine.instmem.unbind(dev, gpuobj);
408 nouveau_bo_unpin(gpuobj->im_backing);
409 nouveau_bo_ref(NULL, &gpuobj->im_backing);
410 gpuobj->im_backing = NULL;
411 }
412}
413
414int
415nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram;
420
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL;
423
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426
427 pte = (gpuobj->im_pramin->start >> 12) << 3;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
429 vram = gpuobj->im_backing_start;
430
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434
435 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE;
442 }
443 dev_priv->engine.instmem.finish_access(dev);
444
445 nv_wr32(dev, 0x100c80, 0x00040001);
446 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
447 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
448 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
449 return -EBUSY;
450 }
451
452 nv_wr32(dev, 0x100c80, 0x00060001);
453 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
454 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
455 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
456 return -EBUSY;
457 }
458
459 gpuobj->im_bound = 1;
460 return 0;
461}
462
463int
464nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
465{
466 struct drm_nouveau_private *dev_priv = dev->dev_private;
467 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
468 uint32_t pte, pte_end;
469
470 if (gpuobj->im_bound == 0)
471 return -EINVAL;
472
473 pte = (gpuobj->im_pramin->start >> 12) << 3;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
475
476 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
480 pte += 8;
481 }
482 dev_priv->engine.instmem.finish_access(dev);
483
484 gpuobj->im_bound = 0;
485 return 0;
486}
487
488void
489nv50_instmem_prepare_access(struct drm_device *dev, bool write)
490{
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
492 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
493
494 priv->last_access_wr = write;
495}
496
497void
498nv50_instmem_finish_access(struct drm_device *dev)
499{
500 struct drm_nouveau_private *dev_priv = dev->dev_private;
501 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
502
503 if (priv->last_access_wr) {
504 nv_wr32(dev, 0x070000, 0x00000001);
505 if (!nv_wait(0x070000, 0x00000001, 0x00000000))
506 NV_ERROR(dev, "PRAMIN flush timeout\n");
507 }
508}
509
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 000000000000..e0a9c3faa202
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31int
32nv50_mc_init(struct drm_device *dev)
33{
34 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
35 return 0;
36}
37
38void nv50_mc_takedown(struct drm_device *dev)
39{
40}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 000000000000..8c280463a664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting SOR\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static void
59nv50_sor_dp_link_train(struct drm_encoder *encoder)
60{
61 struct drm_device *dev = encoder->dev;
62 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
63 struct bit_displayport_encoder_table *dpe;
64 int dpe_headerlen;
65
66 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
67 if (!dpe) {
68 NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
69 return;
70 }
71
72 if (dpe->script0) {
73 NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
74 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
75 nv_encoder->dcb);
76 }
77
78 if (!nouveau_dp_link_train(encoder))
79 NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
80
81 if (dpe->script1) {
82 NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
83 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
84 nv_encoder->dcb);
85 }
86}
87
88static void
89nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{
91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 uint32_t val;
94 int or = nv_encoder->or;
95
96 NV_DEBUG(dev, "or %d mode %d\n", or, mode);
97
98 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
101 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
102 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
103 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
104 }
105
106 val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
107
108 if (mode == DRM_MODE_DPMS_ON)
109 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
110 else
111 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
112
113 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
115 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
116 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
117 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
118 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
119 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
120 }
121
122 if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
123 nv50_sor_dp_link_train(encoder);
124}
125
126static void
127nv50_sor_save(struct drm_encoder *encoder)
128{
129 NV_ERROR(encoder->dev, "!!\n");
130}
131
132static void
133nv50_sor_restore(struct drm_encoder *encoder)
134{
135 NV_ERROR(encoder->dev, "!!\n");
136}
137
138static bool
139nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
140 struct drm_display_mode *adjusted_mode)
141{
142 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
143 struct nouveau_connector *connector;
144
145 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
146
147 connector = nouveau_encoder_connector_get(nv_encoder);
148 if (!connector) {
149 NV_ERROR(encoder->dev, "Encoder has no connector\n");
150 return false;
151 }
152
153 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
154 connector->native_mode) {
155 int id = adjusted_mode->base.id;
156 *adjusted_mode = *connector->native_mode;
157 adjusted_mode->base.id = id;
158 }
159
160 return true;
161}
162
163static void
164nv50_sor_prepare(struct drm_encoder *encoder)
165{
166}
167
168static void
169nv50_sor_commit(struct drm_encoder *encoder)
170{
171}
172
173static void
174nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
175 struct drm_display_mode *adjusted_mode)
176{
177 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
178 struct nouveau_channel *evo = dev_priv->evo;
179 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
180 struct drm_device *dev = encoder->dev;
181 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
182 uint32_t mode_ctl = 0;
183 int ret;
184
185 NV_DEBUG(dev, "or %d\n", nv_encoder->or);
186
187 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
188
189 switch (nv_encoder->dcb->type) {
190 case OUTPUT_TMDS:
191 if (nv_encoder->dcb->sorconf.link & 1) {
192 if (adjusted_mode->clock < 165000)
193 mode_ctl = 0x0100;
194 else
195 mode_ctl = 0x0500;
196 } else
197 mode_ctl = 0x0200;
198 break;
199 case OUTPUT_DP:
200 mode_ctl |= 0x00050000;
201 if (nv_encoder->dcb->sorconf.link & 1)
202 mode_ctl |= 0x00000800;
203 else
204 mode_ctl |= 0x00000900;
205 break;
206 default:
207 break;
208 }
209
210 if (crtc->index == 1)
211 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
212 else
213 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
214
215 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
216 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
217
218 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
219 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
220
221 ret = RING_SPACE(evo, 2);
222 if (ret) {
223 NV_ERROR(dev, "no space while connecting SOR\n");
224 return;
225 }
226 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
227 OUT_RING(evo, mode_ctl);
228}
229
230static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
231 .dpms = nv50_sor_dpms,
232 .save = nv50_sor_save,
233 .restore = nv50_sor_restore,
234 .mode_fixup = nv50_sor_mode_fixup,
235 .prepare = nv50_sor_prepare,
236 .commit = nv50_sor_commit,
237 .mode_set = nv50_sor_mode_set,
238 .detect = NULL
239};
240
241static void
242nv50_sor_destroy(struct drm_encoder *encoder)
243{
244 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
245
246 if (!encoder)
247 return;
248
249 NV_DEBUG(encoder->dev, "\n");
250
251 drm_encoder_cleanup(encoder);
252
253 kfree(nv_encoder);
254}
255
256static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
257 .destroy = nv50_sor_destroy,
258};
259
260int
261nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
262{
263 struct nouveau_encoder *nv_encoder = NULL;
264 struct drm_encoder *encoder;
265 bool dum;
266 int type;
267
268 NV_DEBUG(dev, "\n");
269
270 switch (entry->type) {
271 case OUTPUT_TMDS:
272 NV_INFO(dev, "Detected a TMDS output\n");
273 type = DRM_MODE_ENCODER_TMDS;
274 break;
275 case OUTPUT_LVDS:
276 NV_INFO(dev, "Detected a LVDS output\n");
277 type = DRM_MODE_ENCODER_LVDS;
278
279 if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
280 NV_ERROR(dev, "Failed parsing LVDS table\n");
281 return -EINVAL;
282 }
283 break;
284 case OUTPUT_DP:
285 NV_INFO(dev, "Detected a DP output\n");
286 type = DRM_MODE_ENCODER_TMDS;
287 break;
288 default:
289 return -EINVAL;
290 }
291
292 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
293 if (!nv_encoder)
294 return -ENOMEM;
295 encoder = to_drm_encoder(nv_encoder);
296
297 nv_encoder->dcb = entry;
298 nv_encoder->or = ffs(entry->or) - 1;
299
300 nv_encoder->disconnect = nv50_sor_disconnect;
301
302 drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
303 drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
304
305 encoder->possible_crtcs = entry->heads;
306 encoder->possible_clones = 0;
307
308 return 0;
309}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 000000000000..5998c35237b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
1/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
2/*
3 * Copyright 1996-1997 David J. McKay
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
25
26#ifndef __NVREG_H_
27#define __NVREG_H_
28
29#define NV_PMC_OFFSET 0x00000000
30#define NV_PMC_SIZE 0x00001000
31
32#define NV_PBUS_OFFSET 0x00001000
33#define NV_PBUS_SIZE 0x00001000
34
35#define NV_PFIFO_OFFSET 0x00002000
36#define NV_PFIFO_SIZE 0x00002000
37
38#define NV_HDIAG_OFFSET 0x00005000
39#define NV_HDIAG_SIZE 0x00001000
40
41#define NV_PRAM_OFFSET 0x00006000
42#define NV_PRAM_SIZE 0x00001000
43
44#define NV_PVIDEO_OFFSET 0x00008000
45#define NV_PVIDEO_SIZE 0x00001000
46
47#define NV_PTIMER_OFFSET 0x00009000
48#define NV_PTIMER_SIZE 0x00001000
49
50#define NV_PPM_OFFSET 0x0000A000
51#define NV_PPM_SIZE 0x00001000
52
53#define NV_PTV_OFFSET 0x0000D000
54#define NV_PTV_SIZE 0x00001000
55
56#define NV_PRMVGA_OFFSET 0x000A0000
57#define NV_PRMVGA_SIZE 0x00020000
58
59#define NV_PRMVIO0_OFFSET 0x000C0000
60#define NV_PRMVIO_SIZE 0x00002000
61#define NV_PRMVIO1_OFFSET 0x000C2000
62
63#define NV_PFB_OFFSET 0x00100000
64#define NV_PFB_SIZE 0x00001000
65
66#define NV_PEXTDEV_OFFSET 0x00101000
67#define NV_PEXTDEV_SIZE 0x00001000
68
69#define NV_PME_OFFSET 0x00200000
70#define NV_PME_SIZE 0x00001000
71
72#define NV_PROM_OFFSET 0x00300000
73#define NV_PROM_SIZE 0x00010000
74
75#define NV_PGRAPH_OFFSET 0x00400000
76#define NV_PGRAPH_SIZE 0x00010000
77
78#define NV_PCRTC0_OFFSET 0x00600000
79#define NV_PCRTC0_SIZE 0x00002000 /* empirical */
80
81#define NV_PRMCIO0_OFFSET 0x00601000
82#define NV_PRMCIO_SIZE 0x00002000
83#define NV_PRMCIO1_OFFSET 0x00603000
84
85#define NV50_DISPLAY_OFFSET 0x00610000
86#define NV50_DISPLAY_SIZE 0x0000FFFF
87
88#define NV_PRAMDAC0_OFFSET 0x00680000
89#define NV_PRAMDAC0_SIZE 0x00002000
90
91#define NV_PRMDIO0_OFFSET 0x00681000
92#define NV_PRMDIO_SIZE 0x00002000
93#define NV_PRMDIO1_OFFSET 0x00683000
94
95#define NV_PRAMIN_OFFSET 0x00700000
96#define NV_PRAMIN_SIZE 0x00100000
97
98#define NV_FIFO_OFFSET 0x00800000
99#define NV_FIFO_SIZE 0x00800000
100
101#define NV_PMC_BOOT_0 0x00000000
102#define NV_PMC_ENABLE 0x00000200
103
104#define NV_VIO_VSE2 0x000003c3
105#define NV_VIO_SRX 0x000003c4
106
107#define NV_CIO_CRX__COLOR 0x000003d4
108#define NV_CIO_CR__COLOR 0x000003d5
109
110#define NV_PBUS_DEBUG_1 0x00001084
111#define NV_PBUS_DEBUG_4 0x00001098
112#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0
113#define NV_PBUS_POWERCTRL_1 0x00001584
114#define NV_PBUS_POWERCTRL_2 0x00001588
115#define NV_PBUS_POWERCTRL_4 0x00001590
116#define NV_PBUS_PCI_NV_19 0x0000184C
117#define NV_PBUS_PCI_NV_20 0x00001850
118# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
119# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
120
121#define NV_PFIFO_RAMHT 0x00002210
122
123#define NV_PTV_TV_INDEX 0x0000d220
124#define NV_PTV_TV_DATA 0x0000d224
125#define NV_PTV_HFILTER 0x0000d310
126#define NV_PTV_HFILTER2 0x0000d390
127#define NV_PTV_VFILTER 0x0000d510
128
129#define NV_PRMVIO_MISC__WRITE 0x000c03c2
130#define NV_PRMVIO_SRX 0x000c03c4
131#define NV_PRMVIO_SR 0x000c03c5
132# define NV_VIO_SR_RESET_INDEX 0x00
133# define NV_VIO_SR_CLOCK_INDEX 0x01
134# define NV_VIO_SR_PLANE_MASK_INDEX 0x02
135# define NV_VIO_SR_CHAR_MAP_INDEX 0x03
136# define NV_VIO_SR_MEM_MODE_INDEX 0x04
137#define NV_PRMVIO_MISC__READ 0x000c03cc
138#define NV_PRMVIO_GRX 0x000c03ce
139#define NV_PRMVIO_GX 0x000c03cf
140# define NV_VIO_GX_SR_INDEX 0x00
141# define NV_VIO_GX_SREN_INDEX 0x01
142# define NV_VIO_GX_CCOMP_INDEX 0x02
143# define NV_VIO_GX_ROP_INDEX 0x03
144# define NV_VIO_GX_READ_MAP_INDEX 0x04
145# define NV_VIO_GX_MODE_INDEX 0x05
146# define NV_VIO_GX_MISC_INDEX 0x06
147# define NV_VIO_GX_DONT_CARE_INDEX 0x07
148# define NV_VIO_GX_BIT_MASK_INDEX 0x08
149
150#define NV_PFB_BOOT_0 0x00100000
151#define NV_PFB_CFG0 0x00100200
152#define NV_PFB_CFG1 0x00100204
153#define NV_PFB_CSTATUS 0x0010020C
154#define NV_PFB_REFCTRL 0x00100210
155# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
156#define NV_PFB_PAD 0x0010021C
157# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
158#define NV_PFB_TILE_NV10 0x00100240
159#define NV_PFB_TILE_SIZE_NV10 0x00100244
160#define NV_PFB_REF 0x001002D0
161# define NV_PFB_REF_CMD_REFRESH (1 << 0)
162#define NV_PFB_PRE 0x001002D4
163# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
164#define NV_PFB_CLOSE_PAGE2 0x0010033C
165#define NV_PFB_TILE_NV40 0x00100600
166#define NV_PFB_TILE_SIZE_NV40 0x00100604
167
168#define NV_PEXTDEV_BOOT_0 0x00101000
169# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
170#define NV_PEXTDEV_BOOT_3 0x0010100c
171
172#define NV_PCRTC_INTR_0 0x00600100
173# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
174#define NV_PCRTC_INTR_EN_0 0x00600140
175#define NV_PCRTC_START 0x00600800
176#define NV_PCRTC_CONFIG 0x00600804
177# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
178# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
179#define NV_PCRTC_CURSOR_CONFIG 0x00600810
180# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
181# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
182# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8)
183# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12)
184# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16)
185# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24)
186# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24)
187# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28)
188
189/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
190#define NV_PCRTC_GPIO 0x00600818
191#define NV_PCRTC_GPIO_EXT 0x0060081c
192#define NV_PCRTC_830 0x00600830
193#define NV_PCRTC_834 0x00600834
194#define NV_PCRTC_850 0x00600850
195#define NV_PCRTC_ENGINE_CTRL 0x00600860
196# define NV_CRTC_FSEL_I2C (1 << 4)
197# define NV_CRTC_FSEL_OVERLAY (1 << 12)
198
199#define NV_PRMCIO_ARX 0x006013c0
200#define NV_PRMCIO_AR__WRITE 0x006013c0
201#define NV_PRMCIO_AR__READ 0x006013c1
202# define NV_CIO_AR_MODE_INDEX 0x10
203# define NV_CIO_AR_OSCAN_INDEX 0x11
204# define NV_CIO_AR_PLANE_INDEX 0x12
205# define NV_CIO_AR_HPP_INDEX 0x13
206# define NV_CIO_AR_CSEL_INDEX 0x14
207#define NV_PRMCIO_INP0 0x006013c2
208#define NV_PRMCIO_CRX__COLOR 0x006013d4
209#define NV_PRMCIO_CR__COLOR 0x006013d5
210 /* Standard VGA CRTC registers */
211# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */
212# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */
213# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */
214# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */
215# define NV_CIO_CR_HBE_4_0 4:0
216# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */
217# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */
218# define NV_CIO_CR_HRE_4_0 4:0
219# define NV_CIO_CR_HRE_HBE_5 7:7
220# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */
221# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */
222# define NV_CIO_CR_OVL_VDT_8 0:0
223# define NV_CIO_CR_OVL_VDE_8 1:1
224# define NV_CIO_CR_OVL_VRS_8 2:2
225# define NV_CIO_CR_OVL_VBS_8 3:3
226# define NV_CIO_CR_OVL_VDT_9 5:5
227# define NV_CIO_CR_OVL_VDE_9 6:6
228# define NV_CIO_CR_OVL_VRS_9 7:7
229# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */
230# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */
231# define NV_CIO_CR_CELL_HT_VBS_9 5:5
232# define NV_CIO_CR_CELL_HT_SCANDBL 7:7
233# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */
234# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */
235# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */
236# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */
237# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */
238# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */
239# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */
240# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */
241# define NV_CIO_CR_VRE_3_0 3:0
242# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */
243# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */
244# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */
245# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */
246# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */
247# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */
248# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */
249 /* Extended VGA CRTC registers */
250# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */
251# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5
252# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */
253# define NV_CIO_CRE_RPC1_LARGE 2:2
254# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */
255# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */
256# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */
257# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57
258# define NV_CIO_SR_LOCK_VALUE 0x99
259# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */
260# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */
261# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */
262# define NV_CIO_CRE_LSR_VDT_10 0:0
263# define NV_CIO_CRE_LSR_VDE_10 1:1
264# define NV_CIO_CRE_LSR_VRS_10 2:2
265# define NV_CIO_CRE_LSR_VBS_10 3:3
266# define NV_CIO_CRE_LSR_HBE_6 4:4
267# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */
268# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */
269# define NV_CIO_CRE_PIXEL_INDEX 0x28
270# define NV_CIO_CRE_PIXEL_FORMAT 1:0
271# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */
272# define NV_CIO_CRE_HEB_HDT_8 0:0
273# define NV_CIO_CRE_HEB_HDE_8 1:1
274# define NV_CIO_CRE_HEB_HBS_8 2:2
275# define NV_CIO_CRE_HEB_HRS_8 3:3
276# define NV_CIO_CRE_HEB_ILC_8 4:4
277# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */
278# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */
279# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */
280# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0
281# define NV_CIO_CRE_HCUR_ASI 7:7
282# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */
283# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0
284# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1
285# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
286# define NV_CIO_CRE_LCD__INDEX 0x33
287# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
288# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
289# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
290# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
291# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b
292# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c
293# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e
294# define NV_CIO_CRE_DDC_WR__INDEX 0x3f
295# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */
296# define NV_CIO_CRE_EBR_VDT_11 0:0
297# define NV_CIO_CRE_EBR_VDE_11 2:2
298# define NV_CIO_CRE_EBR_VRS_11 4:4
299# define NV_CIO_CRE_EBR_VBS_11 6:6
300# define NV_CIO_CRE_43 0x43
301# define NV_CIO_CRE_44 0x44 /* head control */
302# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
303# define NV_CIO_CRE_RCR 0x46
304# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7
305# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */
306# define NV_CIO_CRE_49 0x49
307# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
308# define NV_CIO_CRE_TVOUT_LATENCY 0x52
309# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */
310# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */
311# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */
312# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */
313# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */
314# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */
315# define NV_CIO_CRE_85 0x85
316# define NV_CIO_CRE_86 0x86
317#define NV_PRMCIO_INP0__COLOR 0x006013da
318
319#define NV_PRAMDAC_CU_START_POS 0x00680300
320# define NV_PRAMDAC_CU_START_POS_X 15:0
321# define NV_PRAMDAC_CU_START_POS_Y 31:16
322#define NV_RAMDAC_NV10_CURSYNC 0x00680404
323
324#define NV_PRAMDAC_NVPLL_COEFF 0x00680500
325#define NV_PRAMDAC_MPLL_COEFF 0x00680504
326#define NV_PRAMDAC_VPLL_COEFF 0x00680508
327# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4)
328
329#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c
330# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0)
331# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8)
332# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8)
333# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8)
334# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8)
335# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16)
336# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16)
337# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16)
338# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16)
339# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20)
340# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28)
341# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28)
342
343#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510
344#define NV_RAMDAC_VPLL2 0x00680520
345#define NV_PRAMDAC_SEL_CLK 0x00680524
346#define NV_RAMDAC_DITHER_NV11 0x00680528
347#define NV_PRAMDAC_DACCLK 0x0068052c
348# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0)
349
350#define NV_RAMDAC_NVPLL_B 0x00680570
351#define NV_RAMDAC_MPLL_B 0x00680574
352#define NV_RAMDAC_VPLL_B 0x00680578
353#define NV_RAMDAC_VPLL2_B 0x0068057c
354# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28)
355#define NV_PRAMDAC_580 0x00680580
356# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8)
357# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28)
358
359#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600
360# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4)
361# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8)
362# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12)
363# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16)
364# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20)
365# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28)
366#define NV_PRAMDAC_TEST_CONTROL 0x00680608
367# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12)
368# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16)
369# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28)
370#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610
371# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28)
372#define NV_PRAMDAC_630 0x00680630
373#define NV_PRAMDAC_634 0x00680634
374
375#define NV_PRAMDAC_TV_SETUP 0x00680700
376#define NV_PRAMDAC_TV_VTOTAL 0x00680720
377#define NV_PRAMDAC_TV_VSKEW 0x00680724
378#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728
379#define NV_PRAMDAC_TV_HTOTAL 0x0068072c
380#define NV_PRAMDAC_TV_HSKEW 0x00680730
381#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734
382#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738
383
384#define NV_PRAMDAC_TV_SETUP 0x00680700
385
386#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800
387#define NV_PRAMDAC_FP_VTOTAL 0x00680804
388#define NV_PRAMDAC_FP_VCRTC 0x00680808
389#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c
390#define NV_PRAMDAC_FP_VSYNC_END 0x00680810
391#define NV_PRAMDAC_FP_VVALID_START 0x00680814
392#define NV_PRAMDAC_FP_VVALID_END 0x00680818
393#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820
394#define NV_PRAMDAC_FP_HTOTAL 0x00680824
395#define NV_PRAMDAC_FP_HCRTC 0x00680828
396#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c
397#define NV_PRAMDAC_FP_HSYNC_END 0x00680830
398#define NV_PRAMDAC_FP_HVALID_START 0x00680834
399#define NV_PRAMDAC_FP_HVALID_END 0x00680838
400
401#define NV_RAMDAC_FP_DITHER 0x0068083c
402#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848
403# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0)
404# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0)
405# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4)
406# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4)
407# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8)
408# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8)
409# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8)
410# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20)
411# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24)
412# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28)
413# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28)
414#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c
415#define NV_PRAMDAC_850 0x00680850
416#define NV_PRAMDAC_85C 0x0068085c
417#define NV_PRAMDAC_FP_DEBUG_0 0x00680880
418# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0)
419# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4)
420/* This doesn't seem to be essential for tmds, but still often set */
421# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4)
422# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8)
423# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12)
424# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20)
425# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24)
426# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28)
427#define NV_PRAMDAC_FP_DEBUG_1 0x00680884
428# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0
429# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12)
430# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16
431# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28)
432#define NV_PRAMDAC_FP_DEBUG_2 0x00680888
433#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C
434
435/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
436#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0
437# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16)
438#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4
439
440#define NV_PRAMDAC_8C0 0x006808c0
441
442/* Some kind of switch */
443#define NV_PRAMDAC_900 0x00680900
444#define NV_PRAMDAC_A20 0x00680A20
445#define NV_PRAMDAC_A24 0x00680A24
446#define NV_PRAMDAC_A34 0x00680A34
447
448#define NV_PRAMDAC_CTV 0x00680c00
449
450/* names fabricated from NV_USER_DAC info */
451#define NV_PRMDIO_PIXEL_MASK 0x006813c6
452# define NV_PRMDIO_PIXEL_MASK_MASK 0xff
453#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7
454#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8
455#define NV_PRMDIO_PALETTE_DATA 0x006813c9
456
457#define NV_PGRAPH_DEBUG_0 0x00400080
458#define NV_PGRAPH_DEBUG_1 0x00400084
459#define NV_PGRAPH_DEBUG_2_NV04 0x00400088
460#define NV_PGRAPH_DEBUG_2 0x00400620
461#define NV_PGRAPH_DEBUG_3 0x0040008c
462#define NV_PGRAPH_DEBUG_4 0x00400090
463#define NV_PGRAPH_INTR 0x00400100
464#define NV_PGRAPH_INTR_EN 0x00400140
465#define NV_PGRAPH_CTX_CONTROL 0x00400144
466#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170
467#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
468#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540
469#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544
470#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548
471#define NV_PGRAPH_BETA_AND 0x00400608
472#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610
473#define NV_PGRAPH_BOFFSET0 0x00400640
474#define NV_PGRAPH_BOFFSET1 0x00400644
475#define NV_PGRAPH_BOFFSET2 0x00400648
476#define NV_PGRAPH_BLIMIT0 0x00400684
477#define NV_PGRAPH_BLIMIT1 0x00400688
478#define NV_PGRAPH_BLIMIT2 0x0040068c
479#define NV_PGRAPH_STATUS 0x00400700
480#define NV_PGRAPH_SURFACE 0x00400710
481#define NV_PGRAPH_STATE 0x00400714
482#define NV_PGRAPH_FIFO 0x00400720
483#define NV_PGRAPH_PATTERN_SHAPE 0x00400810
484#define NV_PGRAPH_TILE 0x00400b00
485
486#define NV_PVIDEO_INTR_EN 0x00008140
487#define NV_PVIDEO_BUFFER 0x00008700
488#define NV_PVIDEO_STOP 0x00008704
489#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4)
490#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4)
491#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4)
492#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4)
493#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4)
494#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4)
495#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4)
496#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4)
497#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4)
498#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4)
499#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4)
500#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4)
501#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4)
502#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4)
503#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4)
504# define NV_PVIDEO_FORMAT_PLANAR (1 << 0)
505# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16)
506# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20)
507# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24)
508#define NV_PVIDEO_COLOR_KEY 0x00008B00
509
510/* NV04 overlay defines from VIDIX & Haiku */
511#define NV_PVIDEO_INTR_EN_0 0x00680140
512#define NV_PVIDEO_STEP_SIZE 0x00680200
513#define NV_PVIDEO_CONTROL_Y 0x00680204
514#define NV_PVIDEO_CONTROL_X 0x00680208
515#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c
516#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214
517#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c
518#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210
519#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218
520#define NV_PVIDEO_BUFF1_OFFSET 0x00680220
521#define NV_PVIDEO_OE_STATE 0x00680224
522#define NV_PVIDEO_SU_STATE 0x00680228
523#define NV_PVIDEO_RM_STATE 0x0068022c
524#define NV_PVIDEO_WINDOW_START 0x00680230
525#define NV_PVIDEO_WINDOW_SIZE 0x00680234
526#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238
527#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c
528#define NV_PVIDEO_KEY 0x00680240
529#define NV_PVIDEO_OVERLAY 0x00680244
530#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280
531#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284
532#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288
533#define NV_PVIDEO_CSC_ADJUST 0x0068028c
534
535#endif