aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c366
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h253
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c318
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c363
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c776
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c205
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c518
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h74
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c81
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c110
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c506
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c286
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c3305
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c418
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c34
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c104
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c148
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
79 files changed, 7079 insertions, 3480 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d2d28048efb2..72730e9ca06c 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,7 @@ config DRM_NOUVEAU
10 select FB 10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED 11 select FRAMEBUFFER_CONSOLE if !EMBEDDED
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI
13 help 14 help
14 Choose this option for open-source nVidia support. 15 Choose this option for open-source nVidia support.
15 16
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e9b06e4ef2a2..23fa82d667d6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o \ 12 nouveau_dp.o nouveau_ramht.o \
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
13 nv04_timer.o \ 14 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \ 15 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ 16 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 24 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 25 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
25 nv10_gpio.o nv50_gpio.o \ 26 nv10_gpio.o nv50_gpio.o \
26 nv50_calc.o 27 nv50_calc.o \
28 nv04_pm.o nv50_pm.o nva3_pm.o
27 29
28nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 30nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
29nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 31nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index c17a055ee3e5..119152606e4c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
292 if (ret < 0) 292 if (ret < 0)
293 return ret; 293 return ret;
294 294
295 nv_connector->edid = edid; 295 nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
296 return 0; 296 return 0;
297} 297}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 974b0f8ae048..53f4eba65cb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,9 +43,6 @@
43#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) 43#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
44#define LOG_OLD_VALUE(x) 44#define LOG_OLD_VALUE(x)
45 45
46#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
47#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
48
49struct init_exec { 46struct init_exec {
50 bool execute; 47 bool execute;
51 bool repeat; 48 bool repeat;
@@ -272,12 +269,6 @@ struct init_tbl_entry {
272 int (*handler)(struct nvbios *, uint16_t, struct init_exec *); 269 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
273}; 270};
274 271
275struct bit_entry {
276 uint8_t id[2];
277 uint16_t length;
278 uint16_t offset;
279};
280
281static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); 272static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
282 273
283#define MACRO_INDEX_SIZE 2 274#define MACRO_INDEX_SIZE 2
@@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1231 return 3; 1222 return 3;
1232 } 1223 }
1233 1224
1234 if (cond & 1) 1225 if (!(cond & 1))
1235 iexec->execute = false; 1226 iexec->execute = false;
1236 } 1227 }
1237 break; 1228 break;
@@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4675 return 0; 4666 return 0;
4676} 4667}
4677 4668
4669struct pll_mapping {
4670 u8 type;
4671 u32 reg;
4672};
4673
4674static struct pll_mapping nv04_pll_mapping[] = {
4675 { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF },
4676 { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
4677 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4678 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4679 {}
4680};
4681
4682static struct pll_mapping nv40_pll_mapping[] = {
4683 { PLL_CORE , 0x004000 },
4684 { PLL_MEMORY, 0x004020 },
4685 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4686 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4687 {}
4688};
4689
4690static struct pll_mapping nv50_pll_mapping[] = {
4691 { PLL_CORE , 0x004028 },
4692 { PLL_SHADER, 0x004020 },
4693 { PLL_UNK03 , 0x004000 },
4694 { PLL_MEMORY, 0x004008 },
4695 { PLL_UNK40 , 0x00e810 },
4696 { PLL_UNK41 , 0x00e818 },
4697 { PLL_UNK42 , 0x00e824 },
4698 { PLL_VPLL0 , 0x614100 },
4699 { PLL_VPLL1 , 0x614900 },
4700 {}
4701};
4702
4703static struct pll_mapping nv84_pll_mapping[] = {
4704 { PLL_CORE , 0x004028 },
4705 { PLL_SHADER, 0x004020 },
4706 { PLL_MEMORY, 0x004008 },
4707 { PLL_UNK05 , 0x004030 },
4708 { PLL_UNK41 , 0x00e818 },
4709 { PLL_VPLL0 , 0x614100 },
4710 { PLL_VPLL1 , 0x614900 },
4711 {}
4712};
4713
4714u32
4715get_pll_register(struct drm_device *dev, enum pll_types type)
4716{
4717 struct drm_nouveau_private *dev_priv = dev->dev_private;
4718 struct nvbios *bios = &dev_priv->vbios;
4719 struct pll_mapping *map;
4720 int i;
4721
4722 if (dev_priv->card_type < NV_40)
4723 map = nv04_pll_mapping;
4724 else
4725 if (dev_priv->card_type < NV_50)
4726 map = nv40_pll_mapping;
4727 else {
4728 u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
4729
4730 if (plim[0] >= 0x30) {
4731 u8 *entry = plim + plim[1];
4732 for (i = 0; i < plim[3]; i++, entry += plim[2]) {
4733 if (entry[0] == type)
4734 return ROM32(entry[3]);
4735 }
4736
4737 return 0;
4738 }
4739
4740 if (dev_priv->chipset == 0x50)
4741 map = nv50_pll_mapping;
4742 else
4743 map = nv84_pll_mapping;
4744 }
4745
4746 while (map->reg) {
4747 if (map->type == type)
4748 return map->reg;
4749 map++;
4750 }
4751
4752 return 0;
4753}
4754
4678int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim) 4755int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
4679{ 4756{
4680 /* 4757 /*
@@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4750 /* initialize all members to zero */ 4827 /* initialize all members to zero */
4751 memset(pll_lim, 0, sizeof(struct pll_lims)); 4828 memset(pll_lim, 0, sizeof(struct pll_lims));
4752 4829
4830 /* if we were passed a type rather than a register, figure
4831 * out the register and store it
4832 */
4833 if (limit_match > PLL_MAX)
4834 pll_lim->reg = limit_match;
4835 else {
4836 pll_lim->reg = get_pll_register(dev, limit_match);
4837 if (!pll_lim->reg)
4838 return -ENOENT;
4839 }
4840
4753 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) { 4841 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4754 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex]; 4842 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4755 4843
@@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4785 pll_lim->max_usable_log2p = 0x6; 4873 pll_lim->max_usable_log2p = 0x6;
4786 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) { 4874 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4787 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen; 4875 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4788 uint32_t reg = 0; /* default match */
4789 uint8_t *pll_rec; 4876 uint8_t *pll_rec;
4790 int i; 4877 int i;
4791 4878
@@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4797 NV_WARN(dev, "Default PLL limit entry has non-zero " 4884 NV_WARN(dev, "Default PLL limit entry has non-zero "
4798 "register field\n"); 4885 "register field\n");
4799 4886
4800 if (limit_match > MAX_PLL_TYPES)
4801 /* we've been passed a reg as the match */
4802 reg = limit_match;
4803 else /* limit match is a pll type */
4804 for (i = 1; i < entries && !reg; i++) {
4805 uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
4806
4807 if (limit_match == NVPLL &&
4808 (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
4809 reg = cmpreg;
4810 if (limit_match == MPLL &&
4811 (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
4812 reg = cmpreg;
4813 if (limit_match == VPLL1 &&
4814 (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
4815 reg = cmpreg;
4816 if (limit_match == VPLL2 &&
4817 (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
4818 reg = cmpreg;
4819 }
4820
4821 for (i = 1; i < entries; i++) 4887 for (i = 1; i < entries; i++)
4822 if (ROM32(bios->data[plloffs + recordlen * i]) == reg) { 4888 if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
4823 pllindex = i; 4889 pllindex = i;
4824 break; 4890 break;
4825 } 4891 }
4826 4892
4893 if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
4894 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4895 "limits table", pll_lim->reg);
4896 return -ENOENT;
4897 }
4898
4827 pll_rec = &bios->data[plloffs + recordlen * pllindex]; 4899 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4828 4900
4829 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n", 4901 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4830 pllindex ? reg : 0); 4902 pllindex ? pll_lim->reg : 0);
4831 4903
4832 /* 4904 /*
4833 * Frequencies are stored in tables in MHz, kHz are more 4905 * Frequencies are stored in tables in MHz, kHz are more
@@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4877 if (cv == 0x51 && !pll_lim->refclk) { 4949 if (cv == 0x51 && !pll_lim->refclk) {
4878 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK); 4950 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4879 4951
4880 if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) || 4952 if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
4881 ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) { 4953 (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
4882 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3) 4954 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4883 pll_lim->refclk = 200000; 4955 pll_lim->refclk = 200000;
4884 else 4956 else
@@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4891 int i; 4963 int i;
4892 4964
4893 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", 4965 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4894 limit_match); 4966 pll_lim->reg);
4895 4967
4896 for (i = 0; i < entries; i++, entry += recordlen) { 4968 for (i = 0; i < entries; i++, entry += recordlen) {
4897 if (ROM32(entry[3]) == limit_match) { 4969 if (ROM32(entry[3]) == pll_lim->reg) {
4898 record = &bios->data[ROM16(entry[1])]; 4970 record = &bios->data[ROM16(entry[1])];
4899 break; 4971 break;
4900 } 4972 }
@@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4902 4974
4903 if (!record) { 4975 if (!record) {
4904 NV_ERROR(dev, "Register 0x%08x not found in PLL " 4976 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4905 "limits table", limit_match); 4977 "limits table", pll_lim->reg);
4906 return -ENOENT; 4978 return -ENOENT;
4907 } 4979 }
4908 4980
@@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4931 int i; 5003 int i;
4932 5004
4933 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", 5005 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4934 limit_match); 5006 pll_lim->reg);
4935 5007
4936 for (i = 0; i < entries; i++, entry += recordlen) { 5008 for (i = 0; i < entries; i++, entry += recordlen) {
4937 if (ROM32(entry[3]) == limit_match) { 5009 if (ROM32(entry[3]) == pll_lim->reg) {
4938 record = &bios->data[ROM16(entry[1])]; 5010 record = &bios->data[ROM16(entry[1])];
4939 break; 5011 break;
4940 } 5012 }
@@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
4942 5014
4943 if (!record) { 5015 if (!record) {
4944 NV_ERROR(dev, "Register 0x%08x not found in PLL " 5016 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4945 "limits table", limit_match); 5017 "limits table", pll_lim->reg);
4946 return -ENOENT; 5018 return -ENOENT;
4947 } 5019 }
4948 5020
@@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5293 if (bitentry->length < 0x5) 5365 if (bitentry->length < 0x5)
5294 return 0; 5366 return 0;
5295 5367
5296 if (bitentry->id[1] < 2) { 5368 if (bitentry->version < 2) {
5297 bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; 5369 bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
5298 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); 5370 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
5299 } else { 5371 } else {
@@ -5403,27 +5475,40 @@ struct bit_table {
5403 5475
5404#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) 5476#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
5405 5477
5478int
5479bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
5480{
5481 struct drm_nouveau_private *dev_priv = dev->dev_private;
5482 struct nvbios *bios = &dev_priv->vbios;
5483 u8 entries, *entry;
5484
5485 entries = bios->data[bios->offset + 10];
5486 entry = &bios->data[bios->offset + 12];
5487 while (entries--) {
5488 if (entry[0] == id) {
5489 bit->id = entry[0];
5490 bit->version = entry[1];
5491 bit->length = ROM16(entry[2]);
5492 bit->offset = ROM16(entry[4]);
5493 bit->data = ROMPTR(bios, entry[4]);
5494 return 0;
5495 }
5496
5497 entry += bios->data[bios->offset + 9];
5498 }
5499
5500 return -ENOENT;
5501}
5502
5406static int 5503static int
5407parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, 5504parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
5408 struct bit_table *table) 5505 struct bit_table *table)
5409{ 5506{
5410 struct drm_device *dev = bios->dev; 5507 struct drm_device *dev = bios->dev;
5411 uint8_t maxentries = bios->data[bitoffset + 4];
5412 int i, offset;
5413 struct bit_entry bitentry; 5508 struct bit_entry bitentry;
5414 5509
5415 for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) { 5510 if (bit_table(dev, table->id, &bitentry) == 0)
5416 bitentry.id[0] = bios->data[offset];
5417
5418 if (bitentry.id[0] != table->id)
5419 continue;
5420
5421 bitentry.id[1] = bios->data[offset + 1];
5422 bitentry.length = ROM16(bios->data[offset + 2]);
5423 bitentry.offset = ROM16(bios->data[offset + 4]);
5424
5425 return table->parse_fn(dev, bios, &bitentry); 5511 return table->parse_fn(dev, bios, &bitentry);
5426 }
5427 5512
5428 NV_INFO(dev, "BIT table '%c' not found\n", table->id); 5513 NV_INFO(dev, "BIT table '%c' not found\n", table->id);
5429 return -ENOSYS; 5514 return -ENOSYS;
@@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5683static struct dcb_gpio_entry * 5768static struct dcb_gpio_entry *
5684new_gpio_entry(struct nvbios *bios) 5769new_gpio_entry(struct nvbios *bios)
5685{ 5770{
5771 struct drm_device *dev = bios->dev;
5686 struct dcb_gpio_table *gpio = &bios->dcb.gpio; 5772 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5687 5773
5774 if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
5775 NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
5776 return NULL;
5777 }
5778
5688 return &gpio->entry[gpio->entries++]; 5779 return &gpio->entry[gpio->entries++];
5689} 5780}
5690 5781
@@ -5706,113 +5797,90 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5706} 5797}
5707 5798
5708static void 5799static void
5709parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5710{
5711 struct dcb_gpio_entry *gpio;
5712 uint16_t ent = ROM16(bios->data[offset]);
5713 uint8_t line = ent & 0x1f,
5714 tag = ent >> 5 & 0x3f,
5715 flags = ent >> 11 & 0x1f;
5716
5717 if (tag == 0x3f)
5718 return;
5719
5720 gpio = new_gpio_entry(bios);
5721
5722 gpio->tag = tag;
5723 gpio->line = line;
5724 gpio->invert = flags != 4;
5725 gpio->entry = ent;
5726}
5727
5728static void
5729parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5730{
5731 uint32_t entry = ROM32(bios->data[offset]);
5732 struct dcb_gpio_entry *gpio;
5733
5734 if ((entry & 0x0000ff00) == 0x0000ff00)
5735 return;
5736
5737 gpio = new_gpio_entry(bios);
5738 gpio->tag = (entry & 0x0000ff00) >> 8;
5739 gpio->line = (entry & 0x0000001f) >> 0;
5740 gpio->state_default = (entry & 0x01000000) >> 24;
5741 gpio->state[0] = (entry & 0x18000000) >> 27;
5742 gpio->state[1] = (entry & 0x60000000) >> 29;
5743 gpio->entry = entry;
5744}
5745
5746static void
5747parse_dcb_gpio_table(struct nvbios *bios) 5800parse_dcb_gpio_table(struct nvbios *bios)
5748{ 5801{
5749 struct drm_device *dev = bios->dev; 5802 struct drm_device *dev = bios->dev;
5750 uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr; 5803 struct dcb_gpio_entry *e;
5751 uint8_t *gpio_table = &bios->data[gpio_table_ptr]; 5804 u8 headerlen, entries, recordlen;
5752 int header_len = gpio_table[1], 5805 u8 *dcb, *gpio = NULL, *entry;
5753 entries = gpio_table[2],
5754 entry_len = gpio_table[3];
5755 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5756 int i; 5806 int i;
5757 5807
5758 if (bios->dcb.version >= 0x40) { 5808 dcb = ROMPTR(bios, bios->data[0x36]);
5759 if (gpio_table_ptr && entry_len != 4) { 5809 if (dcb[0] >= 0x30) {
5760 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5810 gpio = ROMPTR(bios, dcb[10]);
5761 return; 5811 if (!gpio)
5762 } 5812 goto no_table;
5763 5813
5764 parse_entry = parse_dcb40_gpio_entry; 5814 headerlen = gpio[1];
5815 entries = gpio[2];
5816 recordlen = gpio[3];
5817 } else
5818 if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
5819 gpio = ROMPTR(bios, dcb[-15]);
5820 if (!gpio)
5821 goto no_table;
5822
5823 headerlen = 3;
5824 entries = gpio[2];
5825 recordlen = gpio[1];
5826 } else
5827 if (dcb[0] >= 0x22) {
5828 /* No GPIO table present, parse the TVDAC GPIO data. */
5829 uint8_t *tvdac_gpio = &dcb[-5];
5765 5830
5766 } else if (bios->dcb.version >= 0x30) { 5831 if (tvdac_gpio[0] & 1) {
5767 if (gpio_table_ptr && entry_len != 2) { 5832 e = new_gpio_entry(bios);
5768 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5833 e->tag = DCB_GPIO_TVDAC0;
5769 return; 5834 e->line = tvdac_gpio[1] >> 4;
5835 e->invert = tvdac_gpio[0] & 2;
5770 } 5836 }
5771 5837
5772 parse_entry = parse_dcb30_gpio_entry; 5838 goto no_table;
5773 5839 } else {
5774 } else if (bios->dcb.version >= 0x22) { 5840 NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
5775 /* 5841 goto no_table;
5776 * DCBs older than v3.0 don't really have a GPIO 5842 }
5777 * table, instead they keep some GPIO info at fixed
5778 * locations.
5779 */
5780 uint16_t dcbptr = ROM16(bios->data[0x36]);
5781 uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
5782 5843
5783 if (tvdac_gpio[0] & 1) { 5844 entry = gpio + headerlen;
5784 struct dcb_gpio_entry *gpio = new_gpio_entry(bios); 5845 for (i = 0; i < entries; i++, entry += recordlen) {
5846 e = new_gpio_entry(bios);
5847 if (!e)
5848 break;
5785 5849
5786 gpio->tag = DCB_GPIO_TVDAC0; 5850 if (gpio[0] < 0x40) {
5787 gpio->line = tvdac_gpio[1] >> 4; 5851 e->entry = ROM16(entry[0]);
5788 gpio->invert = tvdac_gpio[0] & 2; 5852 e->tag = (e->entry & 0x07e0) >> 5;
5789 } 5853 if (e->tag == 0x3f) {
5790 } else { 5854 bios->dcb.gpio.entries--;
5791 /* 5855 continue;
5792 * No systematic way to store GPIO info on pre-v2.2 5856 }
5793 * DCBs, try to match the PCI device IDs.
5794 */
5795 5857
5796 /* Apple iMac G4 NV18 */ 5858 e->line = (e->entry & 0x001f);
5797 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { 5859 e->invert = ((e->entry & 0xf800) >> 11) != 4;
5798 struct dcb_gpio_entry *gpio = new_gpio_entry(bios); 5860 } else {
5861 e->entry = ROM32(entry[0]);
5862 e->tag = (e->entry & 0x0000ff00) >> 8;
5863 if (e->tag == 0xff) {
5864 bios->dcb.gpio.entries--;
5865 continue;
5866 }
5799 5867
5800 gpio->tag = DCB_GPIO_TVDAC0; 5868 e->line = (e->entry & 0x0000001f) >> 0;
5801 gpio->line = 4; 5869 e->state_default = (e->entry & 0x01000000) >> 24;
5870 e->state[0] = (e->entry & 0x18000000) >> 27;
5871 e->state[1] = (e->entry & 0x60000000) >> 29;
5802 } 5872 }
5803
5804 } 5873 }
5805 5874
5806 if (!gpio_table_ptr) 5875no_table:
5807 return; 5876 /* Apple iMac G4 NV18 */
5808 5877 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5809 if (entries > DCB_MAX_NUM_GPIO_ENTRIES) { 5878 e = new_gpio_entry(bios);
5810 NV_WARN(dev, "Too many entries in the DCB GPIO table.\n"); 5879 if (e) {
5811 entries = DCB_MAX_NUM_GPIO_ENTRIES; 5880 e->tag = DCB_GPIO_TVDAC0;
5881 e->line = 4;
5882 }
5812 } 5883 }
5813
5814 for (i = 0; i < entries; i++)
5815 parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
5816} 5884}
5817 5885
5818struct dcb_connector_table_entry * 5886struct dcb_connector_table_entry *
@@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6680 bit_signature, sizeof(bit_signature)); 6748 bit_signature, sizeof(bit_signature));
6681 if (offset) { 6749 if (offset) {
6682 NV_TRACE(dev, "BIT BIOS found\n"); 6750 NV_TRACE(dev, "BIT BIOS found\n");
6751 bios->type = NVBIOS_BIT;
6752 bios->offset = offset;
6683 return parse_bit_structure(bios, offset + 6); 6753 return parse_bit_structure(bios, offset + 6);
6684 } 6754 }
6685 6755
@@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6687 bmp_signature, sizeof(bmp_signature)); 6757 bmp_signature, sizeof(bmp_signature));
6688 if (offset) { 6758 if (offset) {
6689 NV_TRACE(dev, "BMP BIOS found\n"); 6759 NV_TRACE(dev, "BMP BIOS found\n");
6760 bios->type = NVBIOS_BMP;
6761 bios->offset = offset;
6690 return parse_bmp_structure(dev, bios, offset); 6762 return parse_bmp_structure(dev, bios, offset);
6691 } 6763 }
6692 6764
@@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev)
6806 "running VBIOS init tables.\n"); 6878 "running VBIOS init tables.\n");
6807 bios->execute = true; 6879 bios->execute = true;
6808 } 6880 }
6881 if (nouveau_force_post)
6882 bios->execute = true;
6809 6883
6810 ret = nouveau_run_vbios_init(dev); 6884 ret = nouveau_run_vbios_init(dev);
6811 if (ret) 6885 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c1de2f3fcb0e..50a648e01c49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,6 +34,20 @@
34 34
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
38#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
39#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
40
41struct bit_entry {
42 uint8_t id;
43 uint8_t version;
44 uint16_t length;
45 uint16_t offset;
46 uint8_t *data;
47};
48
49int bit_table(struct drm_device *, u8 id, struct bit_entry *);
50
37struct dcb_i2c_entry { 51struct dcb_i2c_entry {
38 uint32_t entry; 52 uint32_t entry;
39 uint8_t port_type; 53 uint8_t port_type;
@@ -170,16 +184,28 @@ enum LVDS_script {
170 LVDS_PANEL_OFF 184 LVDS_PANEL_OFF
171}; 185};
172 186
173/* changing these requires matching changes to reg tables in nv_get_clock */ 187/* these match types in pll limits table version 0x40,
174#define MAX_PLL_TYPES 4 188 * nouveau uses them on all chipsets internally where a
189 * specific pll needs to be referenced, but the exact
190 * register isn't known.
191 */
175enum pll_types { 192enum pll_types {
176 NVPLL, 193 PLL_CORE = 0x01,
177 MPLL, 194 PLL_SHADER = 0x02,
178 VPLL1, 195 PLL_UNK03 = 0x03,
179 VPLL2 196 PLL_MEMORY = 0x04,
197 PLL_UNK05 = 0x05,
198 PLL_UNK40 = 0x40,
199 PLL_UNK41 = 0x41,
200 PLL_UNK42 = 0x42,
201 PLL_VPLL0 = 0x80,
202 PLL_VPLL1 = 0x81,
203 PLL_MAX = 0xff
180}; 204};
181 205
182struct pll_lims { 206struct pll_lims {
207 u32 reg;
208
183 struct { 209 struct {
184 int minfreq; 210 int minfreq;
185 int maxfreq; 211 int maxfreq;
@@ -212,6 +238,11 @@ struct pll_lims {
212 238
213struct nvbios { 239struct nvbios {
214 struct drm_device *dev; 240 struct drm_device *dev;
241 enum {
242 NVBIOS_BMP,
243 NVBIOS_BIT
244 } type;
245 uint16_t offset;
215 246
216 uint8_t chip_version; 247 uint8_t chip_version;
217 248
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f6f44779d82f..80353e2b8409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,21 +36,6 @@
36#include <linux/log2.h> 36#include <linux/log2.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39int
40nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
41{
42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
43 int ret;
44
45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
46 return 0;
47
48 spin_lock(&nvbo->bo.lock);
49 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
50 spin_unlock(&nvbo->bo.lock);
51 return ret;
52}
53
54static void 39static void
55nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 40nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
56{ 41{
@@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
58 struct drm_device *dev = dev_priv->dev; 43 struct drm_device *dev = dev_priv->dev;
59 struct nouveau_bo *nvbo = nouveau_bo(bo); 44 struct nouveau_bo *nvbo = nouveau_bo(bo);
60 45
61 ttm_bo_kunmap(&nvbo->kmap);
62
63 if (unlikely(nvbo->gem)) 46 if (unlikely(nvbo->gem))
64 DRM_ERROR("bo %p still attached to GEM object\n", bo); 47 DRM_ERROR("bo %p still attached to GEM object\n", bo);
65 48
@@ -164,8 +147,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
164 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); 147 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
165 align >>= PAGE_SHIFT; 148 align >>= PAGE_SHIFT;
166 149
167 nvbo->placement.fpfn = 0;
168 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
169 nouveau_bo_placement_set(nvbo, flags, 0); 150 nouveau_bo_placement_set(nvbo, flags, 0);
170 151
171 nvbo->channel = chan; 152 nvbo->channel = chan;
@@ -305,7 +286,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
305void 286void
306nouveau_bo_unmap(struct nouveau_bo *nvbo) 287nouveau_bo_unmap(struct nouveau_bo *nvbo)
307{ 288{
308 ttm_bo_kunmap(&nvbo->kmap); 289 if (nvbo)
290 ttm_bo_kunmap(&nvbo->kmap);
309} 291}
310 292
311u16 293u16
@@ -399,14 +381,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
399 man->default_caching = TTM_PL_FLAG_CACHED; 381 man->default_caching = TTM_PL_FLAG_CACHED;
400 break; 382 break;
401 case TTM_PL_VRAM: 383 case TTM_PL_VRAM:
384 man->func = &ttm_bo_manager_func;
402 man->flags = TTM_MEMTYPE_FLAG_FIXED | 385 man->flags = TTM_MEMTYPE_FLAG_FIXED |
403 TTM_MEMTYPE_FLAG_MAPPABLE; 386 TTM_MEMTYPE_FLAG_MAPPABLE;
404 man->available_caching = TTM_PL_FLAG_UNCACHED | 387 man->available_caching = TTM_PL_FLAG_UNCACHED |
405 TTM_PL_FLAG_WC; 388 TTM_PL_FLAG_WC;
406 man->default_caching = TTM_PL_FLAG_WC; 389 man->default_caching = TTM_PL_FLAG_WC;
407 man->gpu_offset = dev_priv->vm_vram_base; 390 if (dev_priv->card_type == NV_50)
391 man->gpu_offset = 0x40000000;
392 else
393 man->gpu_offset = 0;
408 break; 394 break;
409 case TTM_PL_TT: 395 case TTM_PL_TT:
396 man->func = &ttm_bo_manager_func;
410 switch (dev_priv->gart_info.type) { 397 switch (dev_priv->gart_info.type) {
411 case NOUVEAU_GART_AGP: 398 case NOUVEAU_GART_AGP:
412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 399 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -469,19 +456,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 if (ret) 456 if (ret)
470 return ret; 457 return ret;
471 458
472 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 459 if (nvbo->channel) {
473 evict || (nvbo->channel && 460 ret = nouveau_fence_sync(fence, nvbo->channel);
474 nvbo->channel != chan), 461 if (ret)
462 goto out;
463 }
464
465 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
475 no_wait_reserve, no_wait_gpu, new_mem); 466 no_wait_reserve, no_wait_gpu, new_mem);
467out:
476 nouveau_fence_unref((void *)&fence); 468 nouveau_fence_unref((void *)&fence);
477 return ret; 469 return ret;
478} 470}
479 471
480static inline uint32_t 472static inline uint32_t
481nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, 473nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
482 struct ttm_mem_reg *mem) 474 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
483{ 475{
484 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { 476 struct nouveau_bo *nvbo = nouveau_bo(bo);
477
478 if (nvbo->no_vm) {
485 if (mem->mem_type == TTM_PL_TT) 479 if (mem->mem_type == TTM_PL_TT)
486 return NvDmaGART; 480 return NvDmaGART;
487 return NvDmaVRAM; 481 return NvDmaVRAM;
@@ -493,86 +487,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
493} 487}
494 488
495static int 489static int
496nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 490nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
497 bool no_wait_reserve, bool no_wait_gpu, 491 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
498 struct ttm_mem_reg *new_mem)
499{ 492{
500 struct nouveau_bo *nvbo = nouveau_bo(bo);
501 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 493 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
502 struct ttm_mem_reg *old_mem = &bo->mem; 494 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 struct nouveau_channel *chan; 495 u64 length = (new_mem->num_pages << PAGE_SHIFT);
504 uint64_t src_offset, dst_offset; 496 u64 src_offset, dst_offset;
505 uint32_t page_count;
506 int ret; 497 int ret;
507 498
508 chan = nvbo->channel; 499 src_offset = old_mem->start << PAGE_SHIFT;
509 if (!chan || nvbo->tile_flags || nvbo->no_vm) 500 dst_offset = new_mem->start << PAGE_SHIFT;
510 chan = dev_priv->channel; 501 if (!nvbo->no_vm) {
511 502 if (old_mem->mem_type == TTM_PL_VRAM)
512 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
513 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
514 if (chan != dev_priv->channel) {
515 if (old_mem->mem_type == TTM_PL_TT)
516 src_offset += dev_priv->vm_gart_base;
517 else
518 src_offset += dev_priv->vm_vram_base; 503 src_offset += dev_priv->vm_vram_base;
519
520 if (new_mem->mem_type == TTM_PL_TT)
521 dst_offset += dev_priv->vm_gart_base;
522 else 504 else
505 src_offset += dev_priv->vm_gart_base;
506
507 if (new_mem->mem_type == TTM_PL_VRAM)
523 dst_offset += dev_priv->vm_vram_base; 508 dst_offset += dev_priv->vm_vram_base;
509 else
510 dst_offset += dev_priv->vm_gart_base;
524 } 511 }
525 512
526 ret = RING_SPACE(chan, 3); 513 ret = RING_SPACE(chan, 3);
527 if (ret) 514 if (ret)
528 return ret; 515 return ret;
529 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
530 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
531 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
532 516
533 if (dev_priv->card_type >= NV_50) { 517 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
534 ret = RING_SPACE(chan, 4); 518 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
519 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
520
521 while (length) {
522 u32 amount, stride, height;
523
524 amount = min(length, (u64)(4 * 1024 * 1024));
525 stride = 16 * 4;
526 height = amount / stride;
527
528 if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
529 ret = RING_SPACE(chan, 8);
530 if (ret)
531 return ret;
532
533 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
534 OUT_RING (chan, 0);
535 OUT_RING (chan, 0);
536 OUT_RING (chan, stride);
537 OUT_RING (chan, height);
538 OUT_RING (chan, 1);
539 OUT_RING (chan, 0);
540 OUT_RING (chan, 0);
541 } else {
542 ret = RING_SPACE(chan, 2);
543 if (ret)
544 return ret;
545
546 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
547 OUT_RING (chan, 1);
548 }
549 if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
550 ret = RING_SPACE(chan, 8);
551 if (ret)
552 return ret;
553
554 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
555 OUT_RING (chan, 0);
556 OUT_RING (chan, 0);
557 OUT_RING (chan, stride);
558 OUT_RING (chan, height);
559 OUT_RING (chan, 1);
560 OUT_RING (chan, 0);
561 OUT_RING (chan, 0);
562 } else {
563 ret = RING_SPACE(chan, 2);
564 if (ret)
565 return ret;
566
567 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
568 OUT_RING (chan, 1);
569 }
570
571 ret = RING_SPACE(chan, 14);
535 if (ret) 572 if (ret)
536 return ret; 573 return ret;
537 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 574
538 OUT_RING(chan, 1); 575 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
539 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); 576 OUT_RING (chan, upper_32_bits(src_offset));
540 OUT_RING(chan, 1); 577 OUT_RING (chan, upper_32_bits(dst_offset));
578 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
579 OUT_RING (chan, lower_32_bits(src_offset));
580 OUT_RING (chan, lower_32_bits(dst_offset));
581 OUT_RING (chan, stride);
582 OUT_RING (chan, stride);
583 OUT_RING (chan, stride);
584 OUT_RING (chan, height);
585 OUT_RING (chan, 0x00000101);
586 OUT_RING (chan, 0x00000000);
587 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
588 OUT_RING (chan, 0);
589
590 length -= amount;
591 src_offset += amount;
592 dst_offset += amount;
541 } 593 }
542 594
595 return 0;
596}
597
598static int
599nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
600 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
601{
602 u32 src_offset = old_mem->start << PAGE_SHIFT;
603 u32 dst_offset = new_mem->start << PAGE_SHIFT;
604 u32 page_count = new_mem->num_pages;
605 int ret;
606
607 ret = RING_SPACE(chan, 3);
608 if (ret)
609 return ret;
610
611 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
612 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
613 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
614
543 page_count = new_mem->num_pages; 615 page_count = new_mem->num_pages;
544 while (page_count) { 616 while (page_count) {
545 int line_count = (page_count > 2047) ? 2047 : page_count; 617 int line_count = (page_count > 2047) ? 2047 : page_count;
546 618
547 if (dev_priv->card_type >= NV_50) {
548 ret = RING_SPACE(chan, 3);
549 if (ret)
550 return ret;
551 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
552 OUT_RING(chan, upper_32_bits(src_offset));
553 OUT_RING(chan, upper_32_bits(dst_offset));
554 }
555 ret = RING_SPACE(chan, 11); 619 ret = RING_SPACE(chan, 11);
556 if (ret) 620 if (ret)
557 return ret; 621 return ret;
622
558 BEGIN_RING(chan, NvSubM2MF, 623 BEGIN_RING(chan, NvSubM2MF,
559 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 624 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
560 OUT_RING(chan, lower_32_bits(src_offset)); 625 OUT_RING (chan, src_offset);
561 OUT_RING(chan, lower_32_bits(dst_offset)); 626 OUT_RING (chan, dst_offset);
562 OUT_RING(chan, PAGE_SIZE); /* src_pitch */ 627 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
563 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ 628 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
564 OUT_RING(chan, PAGE_SIZE); /* line_length */ 629 OUT_RING (chan, PAGE_SIZE); /* line_length */
565 OUT_RING(chan, line_count); 630 OUT_RING (chan, line_count);
566 OUT_RING(chan, (1<<8)|(1<<0)); 631 OUT_RING (chan, 0x00000101);
567 OUT_RING(chan, 0); 632 OUT_RING (chan, 0x00000000);
568 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 633 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
569 OUT_RING(chan, 0); 634 OUT_RING (chan, 0);
570 635
571 page_count -= line_count; 636 page_count -= line_count;
572 src_offset += (PAGE_SIZE * line_count); 637 src_offset += (PAGE_SIZE * line_count);
573 dst_offset += (PAGE_SIZE * line_count); 638 dst_offset += (PAGE_SIZE * line_count);
574 } 639 }
575 640
641 return 0;
642}
643
644static int
645nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
646 bool no_wait_reserve, bool no_wait_gpu,
647 struct ttm_mem_reg *new_mem)
648{
649 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
650 struct nouveau_bo *nvbo = nouveau_bo(bo);
651 struct nouveau_channel *chan;
652 int ret;
653
654 chan = nvbo->channel;
655 if (!chan || nvbo->no_vm)
656 chan = dev_priv->channel;
657
658 if (dev_priv->card_type < NV_50)
659 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
660 else
661 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
662 if (ret)
663 return ret;
664
576 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); 665 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
577} 666}
578 667
@@ -606,12 +695,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
606 695
607 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 696 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
608out: 697out:
609 if (tmp_mem.mm_node) { 698 ttm_bo_mem_put(bo, &tmp_mem);
610 spin_lock(&bo->bdev->glob->lru_lock);
611 drm_mm_put_block(tmp_mem.mm_node);
612 spin_unlock(&bo->bdev->glob->lru_lock);
613 }
614
615 return ret; 699 return ret;
616} 700}
617 701
@@ -644,12 +728,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
644 goto out; 728 goto out;
645 729
646out: 730out:
647 if (tmp_mem.mm_node) { 731 ttm_bo_mem_put(bo, &tmp_mem);
648 spin_lock(&bo->bdev->glob->lru_lock);
649 drm_mm_put_block(tmp_mem.mm_node);
650 spin_unlock(&bo->bdev->glob->lru_lock);
651 }
652
653 return ret; 732 return ret;
654} 733}
655 734
@@ -669,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
669 return 0; 748 return 0;
670 } 749 }
671 750
672 offset = new_mem->mm_node->start << PAGE_SHIFT; 751 offset = new_mem->start << PAGE_SHIFT;
673 752
674 if (dev_priv->card_type == NV_50) { 753 if (dev_priv->card_type == NV_50) {
675 ret = nv50_mem_vm_bind_linear(dev, 754 ret = nv50_mem_vm_bind_linear(dev,
@@ -719,12 +798,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
719 if (ret) 798 if (ret)
720 return ret; 799 return ret;
721 800
722 /* Software copy if the card isn't up and running yet. */
723 if (!dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
725 goto out;
726 }
727
728 /* Fake bo copy. */ 801 /* Fake bo copy. */
729 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 802 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
730 BUG_ON(bo->mem.mm_node != NULL); 803 BUG_ON(bo->mem.mm_node != NULL);
@@ -733,6 +806,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
733 goto out; 806 goto out;
734 } 807 }
735 808
809 /* Software copy if the card isn't up and running yet. */
810 if (!dev_priv->channel) {
811 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
812 goto out;
813 }
814
736 /* Hardware assisted copy. */ 815 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM) 816 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 817 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
@@ -783,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
783 case TTM_PL_TT: 862 case TTM_PL_TT:
784#if __OS_HAS_AGP 863#if __OS_HAS_AGP
785 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 864 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
786 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 865 mem->bus.offset = mem->start << PAGE_SHIFT;
787 mem->bus.base = dev_priv->gart_info.aper_base; 866 mem->bus.base = dev_priv->gart_info.aper_base;
788 mem->bus.is_iomem = true; 867 mem->bus.is_iomem = true;
789 } 868 }
790#endif 869#endif
791 break; 870 break;
792 case TTM_PL_VRAM: 871 case TTM_PL_VRAM:
793 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 872 mem->bus.offset = mem->start << PAGE_SHIFT;
794 mem->bus.base = pci_resource_start(dev->pdev, 1); 873 mem->bus.base = pci_resource_start(dev->pdev, 1);
795 mem->bus.is_iomem = true; 874 mem->bus.is_iomem = true;
796 break; 875 break;
@@ -808,7 +887,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
808static int 887static int
809nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 888nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
810{ 889{
811 return 0; 890 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
891 struct nouveau_bo *nvbo = nouveau_bo(bo);
892
893 /* as long as the bo isn't in vram, and isn't tiled, we've got
894 * nothing to do here.
895 */
896 if (bo->mem.mem_type != TTM_PL_VRAM) {
897 if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
898 return 0;
899 }
900
901 /* make sure bo is in mappable vram */
902 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
903 return 0;
904
905
906 nvbo->placement.fpfn = 0;
907 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
908 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
909 return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
812} 910}
813 911
814struct ttm_bo_driver nouveau_bo_driver = { 912struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ca85da784846..dad96cce5e39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
198 struct drm_nouveau_private *dev_priv = dev->dev_private; 198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nv_fifo_info fifo_data; 199 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data; 200 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, MPLL); 201 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
202 int NVClk = nouveau_hw_get_clock(dev, NVPLL); 202 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
203 uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); 203 uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
204 204
205 sim_data.pclk_khz = VClk; 205 sim_data.pclk_khz = VClk;
@@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
234} 234}
235 235
236static void 236static void
237nv30_update_arb(int *burst, int *lwm) 237nv20_update_arb(int *burst, int *lwm)
238{ 238{
239 unsigned int fifo_size, burst_size, graphics_lwm; 239 unsigned int fifo_size, burst_size, graphics_lwm;
240 240
@@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
251{ 251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253 253
254 if (dev_priv->card_type < NV_30) 254 if (dev_priv->card_type < NV_20)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm); 255 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
258 *burst = 128; 258 *burst = 128;
259 *lwm = 0x0480; 259 *lwm = 0x0480;
260 } else 260 } else
261 nv30_update_arb(burst, lwm); 261 nv20_update_arb(burst, lwm);
262} 262}
263 263
264static int 264static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 0480f064f2c1..373950e34814 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
48 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_DMA_ACCESS_RO, &pushbuf,
50 NULL); 50 NULL);
51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
52 } else 52 } else
53 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO, 56 NV_DMA_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_DMA_TARGET_VIDMEM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
59 } else { 59 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in 61 * exact reason for existing :) PCI access to cmdbuf in
@@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
67 dev_priv->fb_available_size, 67 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO, 68 NV_DMA_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf); 69 NV_DMA_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
71 }
72
73 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
74 if (ret) {
75 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
76 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
77 nouveau_gpuobj_del(dev, &pushbuf);
78 return ret;
79 } 71 }
80 72
73 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
74 nouveau_gpuobj_ref(NULL, &pushbuf);
81 return 0; 75 return 0;
82} 76}
83 77
@@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
229 223
230 ret = nouveau_dma_init(chan); 224 ret = nouveau_dma_init(chan);
231 if (!ret) 225 if (!ret)
232 ret = nouveau_fence_init(chan); 226 ret = nouveau_fence_channel_init(chan);
233 if (ret) { 227 if (ret) {
234 nouveau_channel_free(chan); 228 nouveau_channel_free(chan);
235 return ret; 229 return ret;
@@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
276 * above attempts at idling were OK, but if we failed this'll tell TTM 270 * above attempts at idling were OK, but if we failed this'll tell TTM
277 * we're done with the buffers. 271 * we're done with the buffers.
278 */ 272 */
279 nouveau_fence_fini(chan); 273 nouveau_fence_channel_fini(chan);
280 274
281 /* This will prevent pfifo from switching channels. */ 275 /* This will prevent pfifo from switching channels. */
282 pfifo->reassign(dev, false); 276 pfifo->reassign(dev, false);
@@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan)
308 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 302 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
309 303
310 /* Release the channel's resources */ 304 /* Release the channel's resources */
311 nouveau_gpuobj_ref_del(dev, &chan->pushbuf); 305 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
312 if (chan->pushbuf_bo) { 306 if (chan->pushbuf_bo) {
307 nouveau_bo_unmap(chan->pushbuf_bo);
313 nouveau_bo_unpin(chan->pushbuf_bo); 308 nouveau_bo_unpin(chan->pushbuf_bo);
314 nouveau_bo_ref(NULL, &chan->pushbuf_bo); 309 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
315 } 310 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fc737037f751..0871495096fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
76 return NULL; 76 return NULL;
77} 77}
78 78
79/*TODO: This could use improvement, and learn to handle the fixed
80 * BIOS tables etc. It's fine currently, for its only user.
81 */
82int
83nouveau_connector_bpp(struct drm_connector *connector)
84{
85 struct nouveau_connector *nv_connector = nouveau_connector(connector);
86
87 if (nv_connector->edid && nv_connector->edid->revision >= 4) {
88 u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
89 if (bpc > 4)
90 return bpc;
91 }
92
93 return 18;
94}
79 95
80static void 96static void
81nouveau_connector_destroy(struct drm_connector *drm_connector) 97nouveau_connector_destroy(struct drm_connector *drm_connector)
@@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
130 return NULL; 146 return NULL;
131} 147}
132 148
149static struct nouveau_encoder *
150nouveau_connector_of_detect(struct drm_connector *connector)
151{
152#ifdef __powerpc__
153 struct drm_device *dev = connector->dev;
154 struct nouveau_connector *nv_connector = nouveau_connector(connector);
155 struct nouveau_encoder *nv_encoder;
156 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
157
158 if (!dn ||
159 !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
160 (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
161 return NULL;
162
163 for_each_child_of_node(dn, cn) {
164 const char *name = of_get_property(cn, "name", NULL);
165 const void *edid = of_get_property(cn, "EDID", NULL);
166 int idx = name ? name[strlen(name) - 1] - 'A' : 0;
167
168 if (nv_encoder->dcb->i2c_index == idx && edid) {
169 nv_connector->edid =
170 kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
171 of_node_put(cn);
172 return nv_encoder;
173 }
174 }
175#endif
176 return NULL;
177}
178
133static void 179static void
134nouveau_connector_set_encoder(struct drm_connector *connector, 180nouveau_connector_set_encoder(struct drm_connector *connector,
135 struct nouveau_encoder *nv_encoder) 181 struct nouveau_encoder *nv_encoder)
@@ -225,6 +271,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
225 return connector_status_connected; 271 return connector_status_connected;
226 } 272 }
227 273
274 nv_encoder = nouveau_connector_of_detect(connector);
275 if (nv_encoder) {
276 nouveau_connector_set_encoder(connector, nv_encoder);
277 return connector_status_connected;
278 }
279
228detect_analog: 280detect_analog:
229 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
230 if (!nv_encoder && !nouveau_tv_disable) 282 if (!nv_encoder && !nouveau_tv_disable)
@@ -630,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
630 else 682 else
631 max_clock = nv_encoder->dp.link_nr * 162000; 683 max_clock = nv_encoder->dp.link_nr * 162000;
632 684
633 clock *= 3; 685 clock = clock * nouveau_connector_bpp(connector) / 8;
634 break; 686 break;
635 default: 687 default:
636 BUG_ON(1); 688 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 0d2e668ccfe5..c21ed6b16f88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index);
55void 55void
56nouveau_connector_set_polling(struct drm_connector *); 56nouveau_connector_set_polling(struct drm_connector *);
57 57
58int
59nouveau_connector_bpp(struct drm_connector *);
60
58#endif /* __NOUVEAU_CONNECTOR_H__ */ 61#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 7933de4aff2e..8e1592368cce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
157 return 0; 157 return 0;
158} 158}
159 159
160static int
161nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
162{
163 struct drm_info_node *node = (struct drm_info_node *) m->private;
164 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
165 int ret;
166
167 ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
168 if (ret)
169 seq_printf(m, "failed: %d", ret);
170 else
171 seq_printf(m, "succeeded\n");
172 return 0;
173}
174
160static struct drm_info_list nouveau_debugfs_list[] = { 175static struct drm_info_list nouveau_debugfs_list[] = {
176 { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
161 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 177 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
162 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 178 { "memory", nouveau_debugfs_memory_info, 0, NULL },
163 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 179 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2e3c6caa97ee..82581e600dcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -28,6 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_dma.h" 30#include "nouveau_dma.h"
31#include "nouveau_ramht.h"
31 32
32void 33void
33nouveau_dma_pre_init(struct nouveau_channel *chan) 34nouveau_dma_pre_init(struct nouveau_channel *chan)
@@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan)
58{ 59{
59 struct drm_device *dev = chan->dev; 60 struct drm_device *dev = chan->dev;
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 61 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpuobj *m2mf = NULL; 62 struct nouveau_gpuobj *obj = NULL;
62 struct nouveau_gpuobj *nvsw = NULL;
63 int ret, i; 63 int ret, i;
64 64
65 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 65 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
66 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? 66 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
67 0x0039 : 0x5039, &m2mf); 67 0x0039 : 0x5039, &obj);
68 if (ret) 68 if (ret)
69 return ret; 69 return ret;
70 70
71 ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); 71 ret = nouveau_ramht_insert(chan, NvM2MF, obj);
72 if (ret) 72 nouveau_gpuobj_ref(NULL, &obj);
73 return ret;
74
75 /* Create an NV_SW object for various sync purposes */
76 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
77 if (ret)
78 return ret;
79
80 ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
81 if (ret) 73 if (ret)
82 return ret; 74 return ret;
83 75
@@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
91 if (ret) 83 if (ret)
92 return ret; 84 return ret;
93 85
94 /* Map M2MF notifier object - fbcon. */
95 ret = nouveau_bo_map(chan->notifier_bo);
96 if (ret)
97 return ret;
98
99 /* Insert NOPS for NOUVEAU_DMA_SKIPS */ 86 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
100 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 87 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
101 if (ret) 88 if (ret)
@@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
113 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); 100 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
114 OUT_RING(chan, NvNotify0); 101 OUT_RING(chan, NvNotify0);
115 102
116 /* Initialise NV_SW */
117 ret = RING_SPACE(chan, 2);
118 if (ret)
119 return ret;
120 BEGIN_RING(chan, NvSubSw, 0, 1);
121 OUT_RING(chan, NvSw);
122
123 /* Sit back and pray the channel works.. */ 103 /* Sit back and pray the channel works.. */
124 FIRE_RING(chan); 104 FIRE_RING(chan);
125 105
@@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
217 197
218 chan->dma.ib_free = get - chan->dma.ib_put; 198 chan->dma.ib_free = get - chan->dma.ib_put;
219 if (chan->dma.ib_free <= 0) 199 if (chan->dma.ib_free <= 0)
220 chan->dma.ib_free += chan->dma.ib_max + 1; 200 chan->dma.ib_free += chan->dma.ib_max;
221 } 201 }
222 202
223 return 0; 203 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8b05c15866d5..d578c21d3c8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -72,6 +72,7 @@ enum {
72 NvGdiRect = 0x8000000c, 72 NvGdiRect = 0x8000000c,
73 NvImageBlit = 0x8000000d, 73 NvImageBlit = 0x8000000d,
74 NvSw = 0x8000000e, 74 NvSw = 0x8000000e,
75 NvSema = 0x8000000f,
75 76
76 /* G80+ display objects */ 77 /* G80+ display objects */
77 NvEvoVRAM = 0x01000000, 78 NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 8a1b188b4cd1..4562f309ae3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -317,7 +317,8 @@ train:
317 return false; 317 return false;
318 318
319 config[0] = nv_encoder->dp.link_nr; 319 config[0] = nv_encoder->dp.link_nr;
320 if (nv_encoder->dp.dpcd_version >= 0x11) 320 if (nv_encoder->dp.dpcd_version >= 0x11 &&
321 nv_encoder->dp.enhanced_frame)
321 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 322 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
322 323
323 ret = nouveau_dp_lane_count_set(encoder, config[0]); 324 ret = nouveau_dp_lane_count_set(encoder, config[0]);
@@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder)
468 !nv_encoder->dcb->dpconf.link_bw) 469 !nv_encoder->dcb->dpconf.link_bw)
469 nv_encoder->dp.link_bw = DP_LINK_BW_1_62; 470 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
470 471
471 nv_encoder->dp.link_nr = dpcd[2] & 0xf; 472 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
472 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) 473 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
473 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; 474 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
474 475
476 nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
477
475 return true; 478 return true;
476} 479}
477 480
@@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
524 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); 527 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
525 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); 528 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
526 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); 529 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
527 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 530 if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
531 0x00010000, 0x00000000)) {
528 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 532 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
529 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 533 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
530 ret = -EBUSY; 534 ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 209912a1b7a5..edc4a9ab28d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -31,13 +31,14 @@
31#include "nouveau_hw.h" 31#include "nouveau_hw.h"
32#include "nouveau_fb.h" 32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 33#include "nouveau_fbcon.h"
34#include "nouveau_pm.h"
34#include "nv50_display.h" 35#include "nv50_display.h"
35 36
36#include "drm_pciids.h" 37#include "drm_pciids.h"
37 38
38MODULE_PARM_DESC(noagp, "Disable AGP"); 39MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
39int nouveau_noagp; 40int nouveau_agpmode = -1;
40module_param_named(noagp, nouveau_noagp, int, 0400); 41module_param_named(agpmode, nouveau_agpmode, int, 0400);
41 42
42MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); 43MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
43static int nouveau_modeset = -1; /* kms */ 44static int nouveau_modeset = -1; /* kms */
@@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
79int nouveau_nofbaccel = 0; 80int nouveau_nofbaccel = 0;
80module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 81module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
81 82
83MODULE_PARM_DESC(force_post, "Force POST");
84int nouveau_force_post = 0;
85module_param_named(force_post, nouveau_force_post, int, 0400);
86
82MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); 87MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
83int nouveau_override_conntype = 0; 88int nouveau_override_conntype = 0;
84module_param_named(override_conntype, nouveau_override_conntype, int, 0400); 89module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
102int nouveau_reg_debug; 107int nouveau_reg_debug;
103module_param_named(reg_debug, nouveau_reg_debug, int, 0600); 108module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
104 109
110MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
111char *nouveau_perflvl;
112module_param_named(perflvl, nouveau_perflvl, charp, 0400);
113
114MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
115int nouveau_perflvl_wr;
116module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
117
105int nouveau_fbpercrtc; 118int nouveau_fbpercrtc;
106#if 0 119#if 0
107module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 120module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
271 if (ret) 284 if (ret)
272 return ret; 285 return ret;
273 286
287 nouveau_pm_resume(dev);
288
274 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 289 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
275 ret = nouveau_mem_init_agp(dev); 290 ret = nouveau_mem_init_agp(dev);
276 if (ret) { 291 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b1be617373b6..3a07e580d27a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -133,22 +133,24 @@ enum nouveau_flags {
133#define NVOBJ_ENGINE_DISPLAY 2 133#define NVOBJ_ENGINE_DISPLAY 2
134#define NVOBJ_ENGINE_INT 0xdeadbeef 134#define NVOBJ_ENGINE_INT 0xdeadbeef
135 135
136#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
137#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 136#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
138#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 137#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
139#define NVOBJ_FLAG_FAKE (1 << 3)
140struct nouveau_gpuobj { 138struct nouveau_gpuobj {
139 struct drm_device *dev;
140 struct kref refcount;
141 struct list_head list; 141 struct list_head list;
142 142
143 struct nouveau_channel *im_channel;
144 struct drm_mm_node *im_pramin; 143 struct drm_mm_node *im_pramin;
145 struct nouveau_bo *im_backing; 144 struct nouveau_bo *im_backing;
146 uint32_t im_backing_start;
147 uint32_t *im_backing_suspend; 145 uint32_t *im_backing_suspend;
148 int im_bound; 146 int im_bound;
149 147
150 uint32_t flags; 148 uint32_t flags;
151 int refcount; 149
150 u32 size;
151 u32 pinst;
152 u32 cinst;
153 u64 vinst;
152 154
153 uint32_t engine; 155 uint32_t engine;
154 uint32_t class; 156 uint32_t class;
@@ -157,16 +159,6 @@ struct nouveau_gpuobj {
157 void *priv; 159 void *priv;
158}; 160};
159 161
160struct nouveau_gpuobj_ref {
161 struct list_head list;
162
163 struct nouveau_gpuobj *gpuobj;
164 uint32_t instance;
165
166 struct nouveau_channel *channel;
167 int handle;
168};
169
170struct nouveau_channel { 162struct nouveau_channel {
171 struct drm_device *dev; 163 struct drm_device *dev;
172 int id; 164 int id;
@@ -192,33 +184,32 @@ struct nouveau_channel {
192 } fence; 184 } fence;
193 185
194 /* DMA push buffer */ 186 /* DMA push buffer */
195 struct nouveau_gpuobj_ref *pushbuf; 187 struct nouveau_gpuobj *pushbuf;
196 struct nouveau_bo *pushbuf_bo; 188 struct nouveau_bo *pushbuf_bo;
197 uint32_t pushbuf_base; 189 uint32_t pushbuf_base;
198 190
199 /* Notifier memory */ 191 /* Notifier memory */
200 struct nouveau_bo *notifier_bo; 192 struct nouveau_bo *notifier_bo;
201 struct drm_mm notifier_heap; 193 struct drm_mm notifier_heap;
202 194
203 /* PFIFO context */ 195 /* PFIFO context */
204 struct nouveau_gpuobj_ref *ramfc; 196 struct nouveau_gpuobj *ramfc;
205 struct nouveau_gpuobj_ref *cache; 197 struct nouveau_gpuobj *cache;
206 198
207 /* PGRAPH context */ 199 /* PGRAPH context */
208 /* XXX may be merge 2 pointers as private data ??? */ 200 /* XXX may be merge 2 pointers as private data ??? */
209 struct nouveau_gpuobj_ref *ramin_grctx; 201 struct nouveau_gpuobj *ramin_grctx;
210 void *pgraph_ctx; 202 void *pgraph_ctx;
211 203
212 /* NV50 VM */ 204 /* NV50 VM */
213 struct nouveau_gpuobj *vm_pd; 205 struct nouveau_gpuobj *vm_pd;
214 struct nouveau_gpuobj_ref *vm_gart_pt; 206 struct nouveau_gpuobj *vm_gart_pt;
215 struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR]; 207 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
216 208
217 /* Objects */ 209 /* Objects */
218 struct nouveau_gpuobj_ref *ramin; /* Private instmem */ 210 struct nouveau_gpuobj *ramin; /* Private instmem */
219 struct drm_mm ramin_heap; /* Private PRAMIN heap */ 211 struct drm_mm ramin_heap; /* Private PRAMIN heap */
220 struct nouveau_gpuobj_ref *ramht; /* Hash table */ 212 struct nouveau_ramht *ramht; /* Hash table */
221 struct list_head ramht_refs; /* Objects referenced by RAMHT */
222 213
223 /* GPU object info for stuff used in-kernel (mm_enabled) */ 214 /* GPU object info for stuff used in-kernel (mm_enabled) */
224 uint32_t m2mf_ntfy; 215 uint32_t m2mf_ntfy;
@@ -296,7 +287,7 @@ struct nouveau_fb_engine {
296struct nouveau_fifo_engine { 287struct nouveau_fifo_engine {
297 int channels; 288 int channels;
298 289
299 struct nouveau_gpuobj_ref *playlist[2]; 290 struct nouveau_gpuobj *playlist[2];
300 int cur_playlist; 291 int cur_playlist;
301 292
302 int (*init)(struct drm_device *); 293 int (*init)(struct drm_device *);
@@ -305,7 +296,6 @@ struct nouveau_fifo_engine {
305 void (*disable)(struct drm_device *); 296 void (*disable)(struct drm_device *);
306 void (*enable)(struct drm_device *); 297 void (*enable)(struct drm_device *);
307 bool (*reassign)(struct drm_device *, bool enable); 298 bool (*reassign)(struct drm_device *, bool enable);
308 bool (*cache_flush)(struct drm_device *dev);
309 bool (*cache_pull)(struct drm_device *dev, bool enable); 299 bool (*cache_pull)(struct drm_device *dev, bool enable);
310 300
311 int (*channel_id)(struct drm_device *); 301 int (*channel_id)(struct drm_device *);
@@ -334,7 +324,7 @@ struct nouveau_pgraph_engine {
334 int grctx_size; 324 int grctx_size;
335 325
336 /* NV2x/NV3x context table (0x400780) */ 326 /* NV2x/NV3x context table (0x400780) */
337 struct nouveau_gpuobj_ref *ctx_table; 327 struct nouveau_gpuobj *ctx_table;
338 328
339 int (*init)(struct drm_device *); 329 int (*init)(struct drm_device *);
340 void (*takedown)(struct drm_device *); 330 void (*takedown)(struct drm_device *);
@@ -369,6 +359,91 @@ struct nouveau_gpio_engine {
369 void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); 359 void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
370}; 360};
371 361
362struct nouveau_pm_voltage_level {
363 u8 voltage;
364 u8 vid;
365};
366
367struct nouveau_pm_voltage {
368 bool supported;
369 u8 vid_mask;
370
371 struct nouveau_pm_voltage_level *level;
372 int nr_level;
373};
374
375#define NOUVEAU_PM_MAX_LEVEL 8
376struct nouveau_pm_level {
377 struct device_attribute dev_attr;
378 char name[32];
379 int id;
380
381 u32 core;
382 u32 memory;
383 u32 shader;
384 u32 unk05;
385
386 u8 voltage;
387 u8 fanspeed;
388
389 u16 memscript;
390};
391
392struct nouveau_pm_temp_sensor_constants {
393 u16 offset_constant;
394 s16 offset_mult;
395 u16 offset_div;
396 u16 slope_mult;
397 u16 slope_div;
398};
399
400struct nouveau_pm_threshold_temp {
401 s16 critical;
402 s16 down_clock;
403 s16 fan_boost;
404};
405
406struct nouveau_pm_memtiming {
407 u32 reg_100220;
408 u32 reg_100224;
409 u32 reg_100228;
410 u32 reg_10022c;
411 u32 reg_100230;
412 u32 reg_100234;
413 u32 reg_100238;
414 u32 reg_10023c;
415};
416
417struct nouveau_pm_memtimings {
418 bool supported;
419 struct nouveau_pm_memtiming *timing;
420 int nr_timing;
421};
422
423struct nouveau_pm_engine {
424 struct nouveau_pm_voltage voltage;
425 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
426 int nr_perflvl;
427 struct nouveau_pm_memtimings memtimings;
428 struct nouveau_pm_temp_sensor_constants sensor_constants;
429 struct nouveau_pm_threshold_temp threshold_temp;
430
431 struct nouveau_pm_level boot;
432 struct nouveau_pm_level *cur;
433
434 struct device *hwmon;
435
436 int (*clock_get)(struct drm_device *, u32 id);
437 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
438 u32 id, int khz);
439 void (*clock_set)(struct drm_device *, void *);
440 int (*voltage_get)(struct drm_device *);
441 int (*voltage_set)(struct drm_device *, int voltage);
442 int (*fanspeed_get)(struct drm_device *);
443 int (*fanspeed_set)(struct drm_device *, int fanspeed);
444 int (*temp_get)(struct drm_device *);
445};
446
372struct nouveau_engine { 447struct nouveau_engine {
373 struct nouveau_instmem_engine instmem; 448 struct nouveau_instmem_engine instmem;
374 struct nouveau_mc_engine mc; 449 struct nouveau_mc_engine mc;
@@ -378,6 +453,7 @@ struct nouveau_engine {
378 struct nouveau_fifo_engine fifo; 453 struct nouveau_fifo_engine fifo;
379 struct nouveau_display_engine display; 454 struct nouveau_display_engine display;
380 struct nouveau_gpio_engine gpio; 455 struct nouveau_gpio_engine gpio;
456 struct nouveau_pm_engine pm;
381}; 457};
382 458
383struct nouveau_pll_vals { 459struct nouveau_pll_vals {
@@ -522,8 +598,14 @@ struct drm_nouveau_private {
522 int flags; 598 int flags;
523 599
524 void __iomem *mmio; 600 void __iomem *mmio;
601
602 spinlock_t ramin_lock;
525 void __iomem *ramin; 603 void __iomem *ramin;
526 uint32_t ramin_size; 604 u32 ramin_size;
605 u32 ramin_base;
606 bool ramin_available;
607 struct drm_mm ramin_heap;
608 struct list_head gpuobj_list;
527 609
528 struct nouveau_bo *vga_ram; 610 struct nouveau_bo *vga_ram;
529 611
@@ -540,6 +622,12 @@ struct drm_nouveau_private {
540 atomic_t validate_sequence; 622 atomic_t validate_sequence;
541 } ttm; 623 } ttm;
542 624
625 struct {
626 spinlock_t lock;
627 struct drm_mm heap;
628 struct nouveau_bo *bo;
629 } fence;
630
543 int fifo_alloc_count; 631 int fifo_alloc_count;
544 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 632 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
545 633
@@ -550,15 +638,11 @@ struct drm_nouveau_private {
550 spinlock_t context_switch_lock; 638 spinlock_t context_switch_lock;
551 639
552 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 640 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
553 struct nouveau_gpuobj *ramht; 641 struct nouveau_ramht *ramht;
642 struct nouveau_gpuobj *ramfc;
643 struct nouveau_gpuobj *ramro;
644
554 uint32_t ramin_rsvd_vram; 645 uint32_t ramin_rsvd_vram;
555 uint32_t ramht_offset;
556 uint32_t ramht_size;
557 uint32_t ramht_bits;
558 uint32_t ramfc_offset;
559 uint32_t ramfc_size;
560 uint32_t ramro_offset;
561 uint32_t ramro_size;
562 646
563 struct { 647 struct {
564 enum { 648 enum {
@@ -576,14 +660,12 @@ struct drm_nouveau_private {
576 } gart_info; 660 } gart_info;
577 661
578 /* nv10-nv40 tiling regions */ 662 /* nv10-nv40 tiling regions */
579 struct { 663 struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
580 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
581 spinlock_t lock;
582 } tile;
583 664
584 /* VRAM/fb configuration */ 665 /* VRAM/fb configuration */
585 uint64_t vram_size; 666 uint64_t vram_size;
586 uint64_t vram_sys_base; 667 uint64_t vram_sys_base;
668 u32 vram_rblock_size;
587 669
588 uint64_t fb_phys; 670 uint64_t fb_phys;
589 uint64_t fb_available_size; 671 uint64_t fb_available_size;
@@ -600,10 +682,6 @@ struct drm_nouveau_private {
600 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 682 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
601 int vm_vram_pt_nr; 683 int vm_vram_pt_nr;
602 684
603 struct drm_mm ramin_heap;
604
605 struct list_head gpuobj_list;
606
607 struct nvbios vbios; 685 struct nvbios vbios;
608 686
609 struct nv04_mode_state mode_reg; 687 struct nv04_mode_state mode_reg;
@@ -634,6 +712,12 @@ struct drm_nouveau_private {
634}; 712};
635 713
636static inline struct drm_nouveau_private * 714static inline struct drm_nouveau_private *
715nouveau_private(struct drm_device *dev)
716{
717 return dev->dev_private;
718}
719
720static inline struct drm_nouveau_private *
637nouveau_bdev(struct ttm_bo_device *bd) 721nouveau_bdev(struct ttm_bo_device *bd)
638{ 722{
639 return container_of(bd, struct drm_nouveau_private, ttm.bdev); 723 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
@@ -669,7 +753,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
669} while (0) 753} while (0)
670 754
671/* nouveau_drv.c */ 755/* nouveau_drv.c */
672extern int nouveau_noagp; 756extern int nouveau_agpmode;
673extern int nouveau_duallink; 757extern int nouveau_duallink;
674extern int nouveau_uscript_lvds; 758extern int nouveau_uscript_lvds;
675extern int nouveau_uscript_tmds; 759extern int nouveau_uscript_tmds;
@@ -683,7 +767,10 @@ extern char *nouveau_vbios;
683extern int nouveau_ignorelid; 767extern int nouveau_ignorelid;
684extern int nouveau_nofbaccel; 768extern int nouveau_nofbaccel;
685extern int nouveau_noaccel; 769extern int nouveau_noaccel;
770extern int nouveau_force_post;
686extern int nouveau_override_conntype; 771extern int nouveau_override_conntype;
772extern char *nouveau_perflvl;
773extern int nouveau_perflvl_wr;
687 774
688extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 775extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
689extern int nouveau_pci_resume(struct pci_dev *pdev); 776extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -704,8 +791,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
704extern int nouveau_card_init(struct drm_device *); 791extern int nouveau_card_init(struct drm_device *);
705 792
706/* nouveau_mem.c */ 793/* nouveau_mem.c */
707extern int nouveau_mem_detect(struct drm_device *dev); 794extern int nouveau_mem_vram_init(struct drm_device *);
708extern int nouveau_mem_init(struct drm_device *); 795extern void nouveau_mem_vram_fini(struct drm_device *);
796extern int nouveau_mem_gart_init(struct drm_device *);
797extern void nouveau_mem_gart_fini(struct drm_device *);
709extern int nouveau_mem_init_agp(struct drm_device *); 798extern int nouveau_mem_init_agp(struct drm_device *);
710extern int nouveau_mem_reset_agp(struct drm_device *); 799extern int nouveau_mem_reset_agp(struct drm_device *);
711extern void nouveau_mem_close(struct drm_device *); 800extern void nouveau_mem_close(struct drm_device *);
@@ -749,7 +838,6 @@ extern void nouveau_channel_free(struct nouveau_channel *);
749extern int nouveau_gpuobj_early_init(struct drm_device *); 838extern int nouveau_gpuobj_early_init(struct drm_device *);
750extern int nouveau_gpuobj_init(struct drm_device *); 839extern int nouveau_gpuobj_init(struct drm_device *);
751extern void nouveau_gpuobj_takedown(struct drm_device *); 840extern void nouveau_gpuobj_takedown(struct drm_device *);
752extern void nouveau_gpuobj_late_takedown(struct drm_device *);
753extern int nouveau_gpuobj_suspend(struct drm_device *dev); 841extern int nouveau_gpuobj_suspend(struct drm_device *dev);
754extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); 842extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
755extern void nouveau_gpuobj_resume(struct drm_device *dev); 843extern void nouveau_gpuobj_resume(struct drm_device *dev);
@@ -759,24 +847,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
759extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, 847extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
760 uint32_t size, int align, uint32_t flags, 848 uint32_t size, int align, uint32_t flags,
761 struct nouveau_gpuobj **); 849 struct nouveau_gpuobj **);
762extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); 850extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
763extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, 851 struct nouveau_gpuobj **);
764 uint32_t handle, struct nouveau_gpuobj *, 852extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
765 struct nouveau_gpuobj_ref **); 853 u32 size, u32 flags,
766extern int nouveau_gpuobj_ref_del(struct drm_device *, 854 struct nouveau_gpuobj **);
767 struct nouveau_gpuobj_ref **);
768extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
769 struct nouveau_gpuobj_ref **ref_ret);
770extern int nouveau_gpuobj_new_ref(struct drm_device *,
771 struct nouveau_channel *alloc_chan,
772 struct nouveau_channel *ref_chan,
773 uint32_t handle, uint32_t size, int align,
774 uint32_t flags, struct nouveau_gpuobj_ref **);
775extern int nouveau_gpuobj_new_fake(struct drm_device *,
776 uint32_t p_offset, uint32_t b_offset,
777 uint32_t size, uint32_t flags,
778 struct nouveau_gpuobj **,
779 struct nouveau_gpuobj_ref**);
780extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, 855extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
781 uint64_t offset, uint64_t size, int access, 856 uint64_t offset, uint64_t size, int access,
782 int target, struct nouveau_gpuobj **); 857 int target, struct nouveau_gpuobj **);
@@ -879,6 +954,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
879 enum dcb_gpio_tag); 954 enum dcb_gpio_tag);
880extern struct dcb_connector_table_entry * 955extern struct dcb_connector_table_entry *
881nouveau_bios_connector_entry(struct drm_device *, int index); 956nouveau_bios_connector_entry(struct drm_device *, int index);
957extern u32 get_pll_register(struct drm_device *, enum pll_types);
882extern int get_pll_limits(struct drm_device *, uint32_t limit_match, 958extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
883 struct pll_lims *); 959 struct pll_lims *);
884extern int nouveau_bios_run_display_table(struct drm_device *, 960extern int nouveau_bios_run_display_table(struct drm_device *,
@@ -925,10 +1001,10 @@ extern int nv40_fb_init(struct drm_device *);
925extern void nv40_fb_takedown(struct drm_device *); 1001extern void nv40_fb_takedown(struct drm_device *);
926extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1002extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
927 uint32_t, uint32_t); 1003 uint32_t, uint32_t);
928
929/* nv50_fb.c */ 1004/* nv50_fb.c */
930extern int nv50_fb_init(struct drm_device *); 1005extern int nv50_fb_init(struct drm_device *);
931extern void nv50_fb_takedown(struct drm_device *); 1006extern void nv50_fb_takedown(struct drm_device *);
1007extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
932 1008
933/* nvc0_fb.c */ 1009/* nvc0_fb.c */
934extern int nvc0_fb_init(struct drm_device *); 1010extern int nvc0_fb_init(struct drm_device *);
@@ -939,7 +1015,6 @@ extern int nv04_fifo_init(struct drm_device *);
939extern void nv04_fifo_disable(struct drm_device *); 1015extern void nv04_fifo_disable(struct drm_device *);
940extern void nv04_fifo_enable(struct drm_device *); 1016extern void nv04_fifo_enable(struct drm_device *);
941extern bool nv04_fifo_reassign(struct drm_device *, bool); 1017extern bool nv04_fifo_reassign(struct drm_device *, bool);
942extern bool nv04_fifo_cache_flush(struct drm_device *);
943extern bool nv04_fifo_cache_pull(struct drm_device *, bool); 1018extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
944extern int nv04_fifo_channel_id(struct drm_device *); 1019extern int nv04_fifo_channel_id(struct drm_device *);
945extern int nv04_fifo_create_context(struct nouveau_channel *); 1020extern int nv04_fifo_create_context(struct nouveau_channel *);
@@ -977,7 +1052,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
977extern void nvc0_fifo_disable(struct drm_device *); 1052extern void nvc0_fifo_disable(struct drm_device *);
978extern void nvc0_fifo_enable(struct drm_device *); 1053extern void nvc0_fifo_enable(struct drm_device *);
979extern bool nvc0_fifo_reassign(struct drm_device *, bool); 1054extern bool nvc0_fifo_reassign(struct drm_device *, bool);
980extern bool nvc0_fifo_cache_flush(struct drm_device *);
981extern bool nvc0_fifo_cache_pull(struct drm_device *, bool); 1055extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
982extern int nvc0_fifo_channel_id(struct drm_device *); 1056extern int nvc0_fifo_channel_id(struct drm_device *);
983extern int nvc0_fifo_create_context(struct nouveau_channel *); 1057extern int nvc0_fifo_create_context(struct nouveau_channel *);
@@ -1169,15 +1243,21 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
1169 1243
1170/* nouveau_fence.c */ 1244/* nouveau_fence.c */
1171struct nouveau_fence; 1245struct nouveau_fence;
1172extern int nouveau_fence_init(struct nouveau_channel *); 1246extern int nouveau_fence_init(struct drm_device *);
1173extern void nouveau_fence_fini(struct nouveau_channel *); 1247extern void nouveau_fence_fini(struct drm_device *);
1248extern int nouveau_fence_channel_init(struct nouveau_channel *);
1249extern void nouveau_fence_channel_fini(struct nouveau_channel *);
1174extern void nouveau_fence_update(struct nouveau_channel *); 1250extern void nouveau_fence_update(struct nouveau_channel *);
1175extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, 1251extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1176 bool emit); 1252 bool emit);
1177extern int nouveau_fence_emit(struct nouveau_fence *); 1253extern int nouveau_fence_emit(struct nouveau_fence *);
1254extern void nouveau_fence_work(struct nouveau_fence *fence,
1255 void (*work)(void *priv, bool signalled),
1256 void *priv);
1178struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); 1257struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1179extern bool nouveau_fence_signalled(void *obj, void *arg); 1258extern bool nouveau_fence_signalled(void *obj, void *arg);
1180extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); 1259extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1260extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
1181extern int nouveau_fence_flush(void *obj, void *arg); 1261extern int nouveau_fence_flush(void *obj, void *arg);
1182extern void nouveau_fence_unref(void **obj); 1262extern void nouveau_fence_unref(void **obj);
1183extern void *nouveau_fence_ref(void *obj); 1263extern void *nouveau_fence_ref(void *obj);
@@ -1255,12 +1335,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1255 iowrite32_native(val, dev_priv->mmio + reg); 1335 iowrite32_native(val, dev_priv->mmio + reg);
1256} 1336}
1257 1337
1258static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) 1338static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
1259{ 1339{
1260 u32 tmp = nv_rd32(dev, reg); 1340 u32 tmp = nv_rd32(dev, reg);
1261 tmp &= ~mask; 1341 nv_wr32(dev, reg, (tmp & ~mask) | val);
1262 tmp |= val; 1342 return tmp;
1263 nv_wr32(dev, reg, tmp);
1264} 1343}
1265 1344
1266static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) 1345static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
@@ -1275,7 +1354,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1275 iowrite8(val, dev_priv->mmio + reg); 1354 iowrite8(val, dev_priv->mmio + reg);
1276} 1355}
1277 1356
1278#define nv_wait(reg, mask, val) \ 1357#define nv_wait(dev, reg, mask, val) \
1279 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) 1358 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
1280 1359
1281/* PRAMIN access */ 1360/* PRAMIN access */
@@ -1292,17 +1371,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1292} 1371}
1293 1372
1294/* object access */ 1373/* object access */
1295static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj, 1374extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
1296 unsigned index) 1375extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
1297{
1298 return nv_ri32(dev, obj->im_pramin->start + index * 4);
1299}
1300
1301static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1302 unsigned index, u32 val)
1303{
1304 nv_wi32(dev, obj->im_pramin->start + index * 4, val);
1305}
1306 1376
1307/* 1377/*
1308 * Logging 1378 * Logging
@@ -1403,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device,
1403#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1473#define NV_SW_SEMAPHORE_OFFSET 0x00000064
1404#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 1474#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
1405#define NV_SW_SEMAPHORE_RELEASE 0x0000006c 1475#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
1476#define NV_SW_YIELD 0x00000080
1406#define NV_SW_DMA_VBLSEM 0x0000018c 1477#define NV_SW_DMA_VBLSEM 0x0000018c
1407#define NV_SW_VBLSEM_OFFSET 0x00000400 1478#define NV_SW_VBLSEM_OFFSET 0x00000400
1408#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 1479#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 7c82d68bc155..ae69b61d93db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -55,6 +55,7 @@ struct nouveau_encoder {
55 int dpcd_version; 55 int dpcd_version;
56 int link_nr; 56 int link_nr;
57 int link_bw; 57 int link_bw;
58 bool enhanced_frame;
58 } dp; 59 } dp;
59 }; 60 };
60}; 61};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 87ac21ec23d2..441b12420bb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,9 +28,11 @@
28#include "drm.h" 28#include "drm.h"
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_ramht.h"
31#include "nouveau_dma.h" 32#include "nouveau_dma.h"
32 33
33#define USE_REFCNT (dev_priv->card_type >= NV_10) 34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
34 36
35struct nouveau_fence { 37struct nouveau_fence {
36 struct nouveau_channel *channel; 38 struct nouveau_channel *channel;
@@ -39,6 +41,15 @@ struct nouveau_fence {
39 41
40 uint32_t sequence; 42 uint32_t sequence;
41 bool signalled; 43 bool signalled;
44
45 void (*work)(void *priv, bool signalled);
46 void *priv;
47};
48
49struct nouveau_semaphore {
50 struct kref ref;
51 struct drm_device *dev;
52 struct drm_mm_node *mem;
42}; 53};
43 54
44static inline struct nouveau_fence * 55static inline struct nouveau_fence *
@@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref)
59void 70void
60nouveau_fence_update(struct nouveau_channel *chan) 71nouveau_fence_update(struct nouveau_channel *chan)
61{ 72{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 73 struct drm_device *dev = chan->dev;
63 struct list_head *entry, *tmp; 74 struct nouveau_fence *tmp, *fence;
64 struct nouveau_fence *fence;
65 uint32_t sequence; 75 uint32_t sequence;
66 76
67 spin_lock(&chan->fence.lock); 77 spin_lock(&chan->fence.lock);
68 78
69 if (USE_REFCNT) 79 if (USE_REFCNT(dev))
70 sequence = nvchan_rd32(chan, 0x48); 80 sequence = nvchan_rd32(chan, 0x48);
71 else 81 else
72 sequence = atomic_read(&chan->fence.last_sequence_irq); 82 sequence = atomic_read(&chan->fence.last_sequence_irq);
@@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan)
75 goto out; 85 goto out;
76 chan->fence.sequence_ack = sequence; 86 chan->fence.sequence_ack = sequence;
77 87
78 list_for_each_safe(entry, tmp, &chan->fence.pending) { 88 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
79 fence = list_entry(entry, struct nouveau_fence, entry);
80
81 sequence = fence->sequence; 89 sequence = fence->sequence;
82 fence->signalled = true; 90 fence->signalled = true;
83 list_del(&fence->entry); 91 list_del(&fence->entry);
92
93 if (unlikely(fence->work))
94 fence->work(fence->priv, true);
95
84 kref_put(&fence->refcount, nouveau_fence_del); 96 kref_put(&fence->refcount, nouveau_fence_del);
85 97
86 if (sequence == chan->fence.sequence_ack) 98 if (sequence == chan->fence.sequence_ack)
@@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence)
121int 133int
122nouveau_fence_emit(struct nouveau_fence *fence) 134nouveau_fence_emit(struct nouveau_fence *fence)
123{ 135{
124 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
125 struct nouveau_channel *chan = fence->channel; 136 struct nouveau_channel *chan = fence->channel;
137 struct drm_device *dev = chan->dev;
126 int ret; 138 int ret;
127 139
128 ret = RING_SPACE(chan, 2); 140 ret = RING_SPACE(chan, 2);
@@ -143,7 +155,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
143 list_add_tail(&fence->entry, &chan->fence.pending); 155 list_add_tail(&fence->entry, &chan->fence.pending);
144 spin_unlock(&chan->fence.lock); 156 spin_unlock(&chan->fence.lock);
145 157
146 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); 158 BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
147 OUT_RING(chan, fence->sequence); 159 OUT_RING(chan, fence->sequence);
148 FIRE_RING(chan); 160 FIRE_RING(chan);
149 161
@@ -151,6 +163,25 @@ nouveau_fence_emit(struct nouveau_fence *fence)
151} 163}
152 164
153void 165void
166nouveau_fence_work(struct nouveau_fence *fence,
167 void (*work)(void *priv, bool signalled),
168 void *priv)
169{
170 BUG_ON(fence->work);
171
172 spin_lock(&fence->channel->fence.lock);
173
174 if (fence->signalled) {
175 work(priv, true);
176 } else {
177 fence->work = work;
178 fence->priv = priv;
179 }
180
181 spin_unlock(&fence->channel->fence.lock);
182}
183
184void
154nouveau_fence_unref(void **sync_obj) 185nouveau_fence_unref(void **sync_obj)
155{ 186{
156 struct nouveau_fence *fence = nouveau_fence(*sync_obj); 187 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -213,6 +244,162 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
213 return ret; 244 return ret;
214} 245}
215 246
247static struct nouveau_semaphore *
248alloc_semaphore(struct drm_device *dev)
249{
250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_semaphore *sema;
252
253 if (!USE_SEMA(dev))
254 return NULL;
255
256 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
257 if (!sema)
258 goto fail;
259
260 spin_lock(&dev_priv->fence.lock);
261 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
262 if (sema->mem)
263 sema->mem = drm_mm_get_block(sema->mem, 4, 0);
264 spin_unlock(&dev_priv->fence.lock);
265
266 if (!sema->mem)
267 goto fail;
268
269 kref_init(&sema->ref);
270 sema->dev = dev;
271 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
272
273 return sema;
274fail:
275 kfree(sema);
276 return NULL;
277}
278
279static void
280free_semaphore(struct kref *ref)
281{
282 struct nouveau_semaphore *sema =
283 container_of(ref, struct nouveau_semaphore, ref);
284 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
285
286 spin_lock(&dev_priv->fence.lock);
287 drm_mm_put_block(sema->mem);
288 spin_unlock(&dev_priv->fence.lock);
289
290 kfree(sema);
291}
292
293static void
294semaphore_work(void *priv, bool signalled)
295{
296 struct nouveau_semaphore *sema = priv;
297 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
298
299 if (unlikely(!signalled))
300 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
301
302 kref_put(&sema->ref, free_semaphore);
303}
304
305static int
306emit_semaphore(struct nouveau_channel *chan, int method,
307 struct nouveau_semaphore *sema)
308{
309 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
310 struct nouveau_fence *fence;
311 bool smart = (dev_priv->card_type >= NV_50);
312 int ret;
313
314 ret = RING_SPACE(chan, smart ? 8 : 4);
315 if (ret)
316 return ret;
317
318 if (smart) {
319 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
320 OUT_RING(chan, NvSema);
321 }
322 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
323 OUT_RING(chan, sema->mem->start);
324
325 if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
326 /*
327 * NV50 tries to be too smart and context-switch
328 * between semaphores instead of doing a "first come,
329 * first served" strategy like previous cards
330 * do.
331 *
332 * That's bad because the ACQUIRE latency can get as
333 * large as the PFIFO context time slice in the
334 * typical DRI2 case where you have several
335 * outstanding semaphores at the same moment.
336 *
337 * If we're going to ACQUIRE, force the card to
338 * context switch before, just in case the matching
339 * RELEASE is already scheduled to be executed in
340 * another channel.
341 */
342 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
343 OUT_RING(chan, 0);
344 }
345
346 BEGIN_RING(chan, NvSubSw, method, 1);
347 OUT_RING(chan, 1);
348
349 if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
350 /*
351 * Force the card to context switch, there may be
352 * another channel waiting for the semaphore we just
353 * released.
354 */
355 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
356 OUT_RING(chan, 0);
357 }
358
359 /* Delay semaphore destruction until its work is done */
360 ret = nouveau_fence_new(chan, &fence, true);
361 if (ret)
362 return ret;
363
364 kref_get(&sema->ref);
365 nouveau_fence_work(fence, semaphore_work, sema);
366 nouveau_fence_unref((void *)&fence);
367
368 return 0;
369}
370
371int
372nouveau_fence_sync(struct nouveau_fence *fence,
373 struct nouveau_channel *wchan)
374{
375 struct nouveau_channel *chan = nouveau_fence_channel(fence);
376 struct drm_device *dev = wchan->dev;
377 struct nouveau_semaphore *sema;
378 int ret;
379
380 if (likely(!fence || chan == wchan ||
381 nouveau_fence_signalled(fence, NULL)))
382 return 0;
383
384 sema = alloc_semaphore(dev);
385 if (!sema) {
386 /* Early card or broken userspace, fall back to
387 * software sync. */
388 return nouveau_fence_wait(fence, NULL, false, false);
389 }
390
391 /* Make wchan wait until it gets signalled */
392 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
393 if (ret)
394 goto out;
395
396 /* Signal the semaphore from chan */
397 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
398out:
399 kref_put(&sema->ref, free_semaphore);
400 return ret;
401}
402
216int 403int
217nouveau_fence_flush(void *sync_obj, void *sync_arg) 404nouveau_fence_flush(void *sync_obj, void *sync_arg)
218{ 405{
@@ -220,26 +407,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
220} 407}
221 408
222int 409int
223nouveau_fence_init(struct nouveau_channel *chan) 410nouveau_fence_channel_init(struct nouveau_channel *chan)
224{ 411{
412 struct drm_device *dev = chan->dev;
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_gpuobj *obj = NULL;
415 int ret;
416
417 /* Create an NV_SW object for various sync purposes */
418 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
419 if (ret)
420 return ret;
421
422 ret = nouveau_ramht_insert(chan, NvSw, obj);
423 nouveau_gpuobj_ref(NULL, &obj);
424 if (ret)
425 return ret;
426
427 ret = RING_SPACE(chan, 2);
428 if (ret)
429 return ret;
430 BEGIN_RING(chan, NvSubSw, 0, 1);
431 OUT_RING(chan, NvSw);
432
433 /* Create a DMA object for the shared cross-channel sync area. */
434 if (USE_SEMA(dev)) {
435 struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
436
437 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
438 mem->start << PAGE_SHIFT,
439 mem->size << PAGE_SHIFT,
440 NV_DMA_ACCESS_RW,
441 NV_DMA_TARGET_VIDMEM, &obj);
442 if (ret)
443 return ret;
444
445 ret = nouveau_ramht_insert(chan, NvSema, obj);
446 nouveau_gpuobj_ref(NULL, &obj);
447 if (ret)
448 return ret;
449
450 ret = RING_SPACE(chan, 2);
451 if (ret)
452 return ret;
453 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
454 OUT_RING(chan, NvSema);
455 }
456
457 FIRE_RING(chan);
458
225 INIT_LIST_HEAD(&chan->fence.pending); 459 INIT_LIST_HEAD(&chan->fence.pending);
226 spin_lock_init(&chan->fence.lock); 460 spin_lock_init(&chan->fence.lock);
227 atomic_set(&chan->fence.last_sequence_irq, 0); 461 atomic_set(&chan->fence.last_sequence_irq, 0);
462
228 return 0; 463 return 0;
229} 464}
230 465
231void 466void
232nouveau_fence_fini(struct nouveau_channel *chan) 467nouveau_fence_channel_fini(struct nouveau_channel *chan)
233{ 468{
234 struct list_head *entry, *tmp; 469 struct nouveau_fence *tmp, *fence;
235 struct nouveau_fence *fence;
236
237 list_for_each_safe(entry, tmp, &chan->fence.pending) {
238 fence = list_entry(entry, struct nouveau_fence, entry);
239 470
471 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
240 fence->signalled = true; 472 fence->signalled = true;
241 list_del(&fence->entry); 473 list_del(&fence->entry);
474
475 if (unlikely(fence->work))
476 fence->work(fence->priv, false);
477
242 kref_put(&fence->refcount, nouveau_fence_del); 478 kref_put(&fence->refcount, nouveau_fence_del);
243 } 479 }
244} 480}
245 481
482int
483nouveau_fence_init(struct drm_device *dev)
484{
485 struct drm_nouveau_private *dev_priv = dev->dev_private;
486 int ret;
487
488 /* Create a shared VRAM heap for cross-channel sync. */
489 if (USE_SEMA(dev)) {
490 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
491 0, 0, false, true, &dev_priv->fence.bo);
492 if (ret)
493 return ret;
494
495 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
496 if (ret)
497 goto fail;
498
499 ret = nouveau_bo_map(dev_priv->fence.bo);
500 if (ret)
501 goto fail;
502
503 ret = drm_mm_init(&dev_priv->fence.heap, 0,
504 dev_priv->fence.bo->bo.mem.size);
505 if (ret)
506 goto fail;
507
508 spin_lock_init(&dev_priv->fence.lock);
509 }
510
511 return 0;
512fail:
513 nouveau_bo_unmap(dev_priv->fence.bo);
514 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
515 return ret;
516}
517
518void
519nouveau_fence_fini(struct drm_device *dev)
520{
521 struct drm_nouveau_private *dev_priv = dev->dev_private;
522
523 if (USE_SEMA(dev)) {
524 drm_mm_takedown(&dev_priv->fence.heap);
525 nouveau_bo_unmap(dev_priv->fence.bo);
526 nouveau_bo_unpin(dev_priv->fence.bo);
527 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
528 }
529}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 19620a6709f5..5c4c929d7f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -362,7 +362,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 list_for_each_entry(nvbo, list, entry) { 362 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
364 364
365 ret = nouveau_bo_sync_gpu(nvbo, chan); 365 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
366 if (unlikely(ret)) { 366 if (unlikely(ret)) {
367 NV_ERROR(dev, "fail pre-validate sync\n"); 367 NV_ERROR(dev, "fail pre-validate sync\n");
368 return ret; 368 return ret;
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
385 return ret; 385 return ret;
386 } 386 }
387 387
388 ret = nouveau_bo_sync_gpu(nvbo, chan); 388 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
389 if (unlikely(ret)) { 389 if (unlikely(ret)) {
390 NV_ERROR(dev, "fail post-validate sync\n"); 390 NV_ERROR(dev, "fail post-validate sync\n");
391 return ret; 391 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 5d39c4ce8006..4a8ad1307fa4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
126 reg = (reg - 0x00400000) / 4; 126 reg = (reg - 0x00400000) / 4;
127 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; 127 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
128 128
129 nv_wo32(ctx->dev, ctx->data, reg, val); 129 nv_wo32(ctx->data, reg * 4, val);
130} 130}
131#endif 131#endif
132 132
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 7b613682e400..bed669a54a2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
305 bool mpll = Preg == 0x4020; 305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg); 306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1; 307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) | 308 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
309 0xc << 28 | pv->log2P << 16; 309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0; 310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */ 311 /* some cards have different maskc040s */
@@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
427 struct nouveau_pll_vals *pllvals) 427 struct nouveau_pll_vals *pllvals)
428{ 428{
429 struct drm_nouveau_private *dev_priv = dev->dev_private; 429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430 const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF, 430 uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
431 NV_PRAMDAC_MPLL_COEFF,
432 NV_PRAMDAC_VPLL_COEFF,
433 NV_RAMDAC_VPLL2 };
434 const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
435 0x4020,
436 NV_PRAMDAC_VPLL_COEFF,
437 NV_RAMDAC_VPLL2 };
438 uint32_t reg1, pll1, pll2 = 0;
439 struct pll_lims pll_lim; 431 struct pll_lims pll_lim;
440 int ret; 432 int ret;
441 433
442 if (dev_priv->card_type < NV_40) 434 if (reg1 == 0)
443 reg1 = nv04_regs[plltype]; 435 return -ENOENT;
444 else
445 reg1 = nv40_regs[plltype];
446 436
447 pll1 = nvReadMC(dev, reg1); 437 pll1 = nvReadMC(dev, reg1);
448 438
@@ -491,8 +481,10 @@ int
491nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) 481nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
492{ 482{
493 struct nouveau_pll_vals pllvals; 483 struct nouveau_pll_vals pllvals;
484 int ret;
494 485
495 if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { 486 if (plltype == PLL_MEMORY &&
487 (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
496 uint32_t mpllP; 488 uint32_t mpllP;
497 489
498 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 490 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
501 493
502 return 400000 / mpllP; 494 return 400000 / mpllP;
503 } else 495 } else
504 if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { 496 if (plltype == PLL_MEMORY &&
497 (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
505 uint32_t clock; 498 uint32_t clock;
506 499
507 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 500 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
508 return clock; 501 return clock;
509 } 502 }
510 503
511 nouveau_hw_get_pllvals(dev, plltype, &pllvals); 504 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
505 if (ret)
506 return ret;
512 507
513 return nouveau_hw_pllvals_to_clk(&pllvals); 508 return nouveau_hw_pllvals_to_clk(&pllvals);
514} 509}
@@ -526,9 +521,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
526 struct nouveau_pll_vals pv; 521 struct nouveau_pll_vals pv;
527 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 522 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
528 523
529 if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim)) 524 if (get_pll_limits(dev, pllreg, &pll_lim))
530 return; 525 return;
531 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv); 526 nouveau_hw_get_pllvals(dev, pllreg, &pv);
532 527
533 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
534 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && 529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
661 if (dev_priv->card_type >= NV_10) 656 if (dev_priv->card_type >= NV_10)
662 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); 657 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
663 658
664 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals); 659 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
665 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); 660 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
666 if (nv_two_heads(dev)) 661 if (nv_two_heads(dev))
667 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 662 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
@@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head,
866 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 861 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
867 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); 862 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
868 863
869 if (dev_priv->card_type >= NV_30) { 864 if (dev_priv->card_type >= NV_20)
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); 865 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
866
867 if (dev_priv->card_type >= NV_30)
871 rd_cio_state(dev, head, regp, 0x9f); 868 rd_cio_state(dev, head, regp, 0x9f);
872 }
873 869
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); 870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
875 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 871 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
@@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
976 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); 972 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
978 974
979 if (dev_priv->card_type >= NV_30) { 975 if (dev_priv->card_type >= NV_20)
980 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); 976 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
977
978 if (dev_priv->card_type >= NV_30)
981 wr_cio_state(dev, head, regp, 0x9f); 979 wr_cio_state(dev, head, regp, 0x9f);
982 }
983 980
984 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); 981 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 982 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 84614858728b..fdd7e3de79c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
299 299
300int 300int
301nouveau_i2c_identify(struct drm_device *dev, const char *what, 301nouveau_i2c_identify(struct drm_device *dev, const char *what,
302 struct i2c_board_info *info, int index) 302 struct i2c_board_info *info,
303 bool (*match)(struct nouveau_i2c_chan *,
304 struct i2c_board_info *),
305 int index)
303{ 306{
304 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); 307 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
305 int i; 308 int i;
@@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
307 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 310 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
308 311
309 for (i = 0; info[i].addr; i++) { 312 for (i = 0; info[i].addr; i++) {
310 if (nouveau_probe_i2c_addr(i2c, info[i].addr)) { 313 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
314 (!match || match(i2c, &info[i]))) {
311 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 315 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
312 return i; 316 return i;
313 } 317 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index f71cb32f7571..c77a6ba66b7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -44,7 +44,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
44struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); 44struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
45bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); 45bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
46int nouveau_i2c_identify(struct drm_device *dev, const char *what, 46int nouveau_i2c_identify(struct drm_device *dev, const char *what,
47 struct i2c_board_info *info, int index); 47 struct i2c_board_info *info,
48 bool (*match)(struct nouveau_i2c_chan *,
49 struct i2c_board_info *),
50 int index);
48 51
49extern const struct i2c_algorithm nouveau_dp_i2c_algo; 52extern const struct i2c_algorithm nouveau_dp_i2c_algo;
50 53
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 794b0ee30cf6..6fd51a51c608 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -35,6 +35,7 @@
35#include "nouveau_drm.h" 35#include "nouveau_drm.h"
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37#include "nouveau_reg.h" 37#include "nouveau_reg.h"
38#include "nouveau_ramht.h"
38#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
39 40
40/* needed for hotplug irq */ 41/* needed for hotplug irq */
@@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
106 const int mthd = addr & 0x1ffc; 107 const int mthd = addr & 0x1ffc;
107 108
108 if (mthd == 0x0000) { 109 if (mthd == 0x0000) {
109 struct nouveau_gpuobj_ref *ref = NULL; 110 struct nouveau_gpuobj *gpuobj;
110 111
111 if (nouveau_gpuobj_ref_find(chan, data, &ref)) 112 gpuobj = nouveau_ramht_find(chan, data);
113 if (!gpuobj)
112 return false; 114 return false;
113 115
114 if (ref->gpuobj->engine != NVOBJ_ENGINE_SW) 116 if (gpuobj->engine != NVOBJ_ENGINE_SW)
115 return false; 117 return false;
116 118
117 chan->sw_subchannel[subc] = ref->gpuobj->class; 119 chan->sw_subchannel[subc] = gpuobj->class;
118 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, 120 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
119 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); 121 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
120 return true; 122 return true;
@@ -200,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
200 } 202 }
201 203
202 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 204 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
203 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid); 205 u32 get = nv_rd32(dev, 0x003244);
206 u32 put = nv_rd32(dev, 0x003240);
207 u32 push = nv_rd32(dev, 0x003220);
208 u32 state = nv_rd32(dev, 0x003228);
209
210 if (dev_priv->card_type == NV_50) {
211 u32 ho_get = nv_rd32(dev, 0x003328);
212 u32 ho_put = nv_rd32(dev, 0x003320);
213 u32 ib_get = nv_rd32(dev, 0x003334);
214 u32 ib_put = nv_rd32(dev, 0x003330);
215
216 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
217 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
218 "State 0x%08x Push 0x%08x\n",
219 chid, ho_get, get, ho_put, put, ib_get, ib_put,
220 state, push);
221
222 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
223 nv_wr32(dev, 0x003364, 0x00000000);
224 if (get != put || ho_get != ho_put) {
225 nv_wr32(dev, 0x003244, put);
226 nv_wr32(dev, 0x003328, ho_put);
227 } else
228 if (ib_get != ib_put) {
229 nv_wr32(dev, 0x003334, ib_put);
230 }
231 } else {
232 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
233 "Put 0x%08x State 0x%08x Push 0x%08x\n",
234 chid, get, put, state, push);
204 235
205 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 236 if (get != put)
206 nv_wr32(dev, NV03_PFIFO_INTR_0, 237 nv_wr32(dev, 0x003244, put);
207 NV_PFIFO_INTR_DMA_PUSHER); 238 }
208 239
209 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); 240 nv_wr32(dev, 0x003228, 0x00000000);
210 if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get) 241 nv_wr32(dev, 0x003220, 0x00000001);
211 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, 242 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
212 get + 4); 243 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
213 } 244 }
214 245
215 if (status & NV_PFIFO_INTR_SEMAPHORE) { 246 if (status & NV_PFIFO_INTR_SEMAPHORE) {
@@ -226,6 +257,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
226 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 257 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
227 } 258 }
228 259
260 if (dev_priv->card_type == NV_50) {
261 if (status & 0x00000010) {
262 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
263 status &= ~0x00000010;
264 nv_wr32(dev, 0x002100, 0x00000010);
265 }
266 }
267
229 if (status) { 268 if (status) {
230 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 269 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
231 status, chid); 270 status, chid);
@@ -357,7 +396,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
357 if (!chan || !chan->ramin_grctx) 396 if (!chan || !chan->ramin_grctx)
358 continue; 397 continue;
359 398
360 if (inst == chan->ramin_grctx->instance) 399 if (inst == chan->ramin_grctx->pinst)
361 break; 400 break;
362 } 401 }
363 } else { 402 } else {
@@ -369,7 +408,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
369 if (!chan || !chan->ramin) 408 if (!chan || !chan->ramin)
370 continue; 409 continue;
371 410
372 if (inst == chan->ramin->instance) 411 if (inst == chan->ramin->vinst)
373 break; 412 break;
374 } 413 }
375 } 414 }
@@ -605,40 +644,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
605 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 644 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
606} 645}
607 646
608static void
609nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
610{
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
612 uint32_t trap[6];
613 int i, ch;
614 uint32_t idx = nv_rd32(dev, 0x100c90);
615 if (idx & 0x80000000) {
616 idx &= 0xffffff;
617 if (display) {
618 for (i = 0; i < 6; i++) {
619 nv_wr32(dev, 0x100c90, idx | i << 24);
620 trap[i] = nv_rd32(dev, 0x100c94);
621 }
622 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
623 struct nouveau_channel *chan = dev_priv->fifos[ch];
624
625 if (!chan || !chan->ramin)
626 continue;
627
628 if (trap[1] == chan->ramin->instance >> 12)
629 break;
630 }
631 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
632 name, (trap[5]&0x100?"read":"write"),
633 trap[5]&0xff, trap[4]&0xffff,
634 trap[3]&0xffff, trap[0], trap[2], ch);
635 }
636 nv_wr32(dev, 0x100c90, idx | 0x80000000);
637 } else if (display) {
638 NV_INFO(dev, "%s - no VM fault?\n", name);
639 }
640}
641
642static struct nouveau_enum_names nv50_mp_exec_error_names[] = 647static struct nouveau_enum_names nv50_mp_exec_error_names[] =
643{ 648{
644 { 3, "STACK_UNDERFLOW" }, 649 { 3, "STACK_UNDERFLOW" },
@@ -711,7 +716,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
711 tps++; 716 tps++;
712 switch (type) { 717 switch (type) {
713 case 6: /* texture error... unknown for now */ 718 case 6: /* texture error... unknown for now */
714 nv50_pfb_vm_trap(dev, display, name); 719 nv50_fb_vm_trap(dev, display, name);
715 if (display) { 720 if (display) {
716 NV_ERROR(dev, "magic set %d:\n", i); 721 NV_ERROR(dev, "magic set %d:\n", i);
717 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) 722 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -734,7 +739,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
734 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); 739 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
735 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); 740 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
736 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); 741 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
737 nv50_pfb_vm_trap(dev, display, name); 742 nv50_fb_vm_trap(dev, display, name);
738 /* 2d engine destination */ 743 /* 2d engine destination */
739 if (ustatus & 0x00000010) { 744 if (ustatus & 0x00000010) {
740 if (display) { 745 if (display) {
@@ -817,7 +822,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
817 822
818 /* Known to be triggered by screwed up NOTIFY and COND... */ 823 /* Known to be triggered by screwed up NOTIFY and COND... */
819 if (ustatus & 0x00000001) { 824 if (ustatus & 0x00000001) {
820 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); 825 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
821 nv_wr32(dev, 0x400500, 0); 826 nv_wr32(dev, 0x400500, 0);
822 if (nv_rd32(dev, 0x400808) & 0x80000000) { 827 if (nv_rd32(dev, 0x400808) & 0x80000000) {
823 if (display) { 828 if (display) {
@@ -842,7 +847,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
842 ustatus &= ~0x00000001; 847 ustatus &= ~0x00000001;
843 } 848 }
844 if (ustatus & 0x00000002) { 849 if (ustatus & 0x00000002) {
845 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); 850 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
846 nv_wr32(dev, 0x400500, 0); 851 nv_wr32(dev, 0x400500, 0);
847 if (nv_rd32(dev, 0x40084c) & 0x80000000) { 852 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
848 if (display) { 853 if (display) {
@@ -884,15 +889,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
884 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); 889 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
885 } 890 }
886 if (ustatus & 0x00000001) { 891 if (ustatus & 0x00000001) {
887 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); 892 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
888 ustatus &= ~0x00000001; 893 ustatus &= ~0x00000001;
889 } 894 }
890 if (ustatus & 0x00000002) { 895 if (ustatus & 0x00000002) {
891 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); 896 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
892 ustatus &= ~0x00000002; 897 ustatus &= ~0x00000002;
893 } 898 }
894 if (ustatus & 0x00000004) { 899 if (ustatus & 0x00000004) {
895 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); 900 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
896 ustatus &= ~0x00000004; 901 ustatus &= ~0x00000004;
897 } 902 }
898 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", 903 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
@@ -917,7 +922,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
917 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); 922 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
918 } 923 }
919 if (ustatus & 0x00000001) { 924 if (ustatus & 0x00000001) {
920 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); 925 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
921 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", 926 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
922 nv_rd32(dev, 0x400c00), 927 nv_rd32(dev, 0x400c00),
923 nv_rd32(dev, 0x400c08), 928 nv_rd32(dev, 0x400c08),
@@ -939,7 +944,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
939 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); 944 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
940 } 945 }
941 if (ustatus & 0x00000001) { 946 if (ustatus & 0x00000001) {
942 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); 947 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
943 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", 948 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
944 nv_rd32(dev, 0x401804), 949 nv_rd32(dev, 0x401804),
945 nv_rd32(dev, 0x401808), 950 nv_rd32(dev, 0x401808),
@@ -964,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
964 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); 969 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
965 } 970 }
966 if (ustatus & 0x00000001) { 971 if (ustatus & 0x00000001) {
967 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); 972 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
968 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", 973 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
969 nv_rd32(dev, 0x405800), 974 nv_rd32(dev, 0x405800),
970 nv_rd32(dev, 0x405804), 975 nv_rd32(dev, 0x405804),
@@ -986,7 +991,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
986 * remaining, so try to handle it anyway. Perhaps related to that 991 * remaining, so try to handle it anyway. Perhaps related to that
987 * unknown DMA slot on tesla? */ 992 * unknown DMA slot on tesla? */
988 if (status & 0x20) { 993 if (status & 0x20) {
989 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); 994 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
990 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; 995 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
991 if (display) 996 if (display)
992 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); 997 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9689d4147686..a163c7c612e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,6 +35,8 @@
35#include "drm_sarea.h" 35#include "drm_sarea.h"
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37 37
38#define MIN(a,b) a < b ? a : b
39
38/* 40/*
39 * NV10-NV40 tiling helpers 41 * NV10-NV40 tiling helpers
40 */ 42 */
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
49 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
50 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 52 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
51 53
52 tile->addr = addr; 54 tile->addr = addr;
53 tile->size = size; 55 tile->size = size;
54 tile->used = !!pitch; 56 tile->used = !!pitch;
55 nouveau_fence_unref((void **)&tile->fence); 57 nouveau_fence_unref((void **)&tile->fence);
56 58
57 if (!pfifo->cache_flush(dev))
58 return;
59
60 pfifo->reassign(dev, false); 59 pfifo->reassign(dev, false);
61 pfifo->cache_flush(dev);
62 pfifo->cache_pull(dev, false); 60 pfifo->cache_pull(dev, false);
63 61
64 nouveau_wait_for_idle(dev); 62 nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
76{ 74{
77 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
79 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; 77 struct nouveau_tile_reg *found = NULL;
80 int i; 78 unsigned long i, flags;
81 79
82 spin_lock(&dev_priv->tile.lock); 80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
83 81
84 for (i = 0; i < pfb->num_tiles; i++) { 82 for (i = 0; i < pfb->num_tiles; i++) {
85 if (tile[i].used) 83 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
84
85 if (tile->used)
86 /* Tile region in use. */ 86 /* Tile region in use. */
87 continue; 87 continue;
88 88
89 if (tile[i].fence && 89 if (tile->fence &&
90 !nouveau_fence_signalled(tile[i].fence, NULL)) 90 !nouveau_fence_signalled(tile->fence, NULL))
91 /* Pending tile region. */ 91 /* Pending tile region. */
92 continue; 92 continue;
93 93
94 if (max(tile[i].addr, addr) < 94 if (max(tile->addr, addr) <
95 min(tile[i].addr + tile[i].size, addr + size)) 95 min(tile->addr + tile->size, addr + size))
96 /* Kill an intersecting tile region. */ 96 /* Kill an intersecting tile region. */
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0); 97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
98 98
99 if (pitch && !found) { 99 if (pitch && !found) {
100 /* Free tile region. */ 100 /* Free tile region. */
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch); 101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
102 found = &tile[i]; 102 found = tile;
103 } 103 }
104 } 104 }
105 105
106 spin_unlock(&dev_priv->tile.lock); 106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
107 107
108 return found; 108 return found;
109} 109}
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
169 virt += (end - pte); 169 virt += (end - pte);
170 170
171 while (pte < end) { 171 while (pte < end) {
172 nv_wo32(dev, pgt, pte++, offset_l); 172 nv_wo32(pgt, (pte * 4) + 0, offset_l);
173 nv_wo32(dev, pgt, pte++, offset_h); 173 nv_wo32(pgt, (pte * 4) + 4, offset_h);
174 pte += 2;
174 } 175 }
175 } 176 }
176 } 177 }
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
203 pages -= (end - pte); 204 pages -= (end - pte);
204 virt += (end - pte) << 15; 205 virt += (end - pte) << 15;
205 206
206 while (pte < end) 207 while (pte < end) {
207 nv_wo32(dev, pgt, pte++, 0); 208 nv_wo32(pgt, (pte * 4), 0);
209 pte++;
210 }
208 } 211 }
209 dev_priv->engine.instmem.flush(dev); 212 dev_priv->engine.instmem.flush(dev);
210 213
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
218 * Cleanup everything 221 * Cleanup everything
219 */ 222 */
220void 223void
221nouveau_mem_close(struct drm_device *dev) 224nouveau_mem_vram_fini(struct drm_device *dev)
222{ 225{
223 struct drm_nouveau_private *dev_priv = dev->dev_private; 226 struct drm_nouveau_private *dev_priv = dev->dev_private;
224 227
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
229 232
230 nouveau_ttm_global_release(dev_priv); 233 nouveau_ttm_global_release(dev_priv);
231 234
235 if (dev_priv->fb_mtrr >= 0) {
236 drm_mtrr_del(dev_priv->fb_mtrr,
237 pci_resource_start(dev->pdev, 1),
238 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
239 dev_priv->fb_mtrr = -1;
240 }
241}
242
243void
244nouveau_mem_gart_fini(struct drm_device *dev)
245{
246 nouveau_sgdma_takedown(dev);
247
232 if (drm_core_has_AGP(dev) && dev->agp) { 248 if (drm_core_has_AGP(dev) && dev->agp) {
233 struct drm_agp_mem *entry, *tempe; 249 struct drm_agp_mem *entry, *tempe;
234 250
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
248 dev->agp->acquired = 0; 264 dev->agp->acquired = 0;
249 dev->agp->enabled = 0; 265 dev->agp->enabled = 0;
250 } 266 }
251
252 if (dev_priv->fb_mtrr) {
253 drm_mtrr_del(dev_priv->fb_mtrr,
254 pci_resource_start(dev->pdev, 1),
255 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
256 dev_priv->fb_mtrr = -1;
257 }
258} 267}
259 268
260static uint32_t 269static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
305 return 0; 314 return 0;
306} 315}
307 316
308/* returns the amount of FB ram in bytes */ 317static void
309int 318nv50_vram_preinit(struct drm_device *dev)
319{
320 struct drm_nouveau_private *dev_priv = dev->dev_private;
321 int i, parts, colbits, rowbitsa, rowbitsb, banks;
322 u64 rowsize, predicted;
323 u32 r0, r4, rt, ru;
324
325 r0 = nv_rd32(dev, 0x100200);
326 r4 = nv_rd32(dev, 0x100204);
327 rt = nv_rd32(dev, 0x100250);
328 ru = nv_rd32(dev, 0x001540);
329 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
330
331 for (i = 0, parts = 0; i < 8; i++) {
332 if (ru & (0x00010000 << i))
333 parts++;
334 }
335
336 colbits = (r4 & 0x0000f000) >> 12;
337 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
338 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
339 banks = ((r4 & 0x01000000) ? 8 : 4);
340
341 rowsize = parts * banks * (1 << colbits) * 8;
342 predicted = rowsize << rowbitsa;
343 if (r0 & 0x00000004)
344 predicted += rowsize << rowbitsb;
345
346 if (predicted != dev_priv->vram_size) {
347 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
348 (u32)(dev_priv->vram_size >> 20));
349 NV_WARN(dev, "we calculated %dMiB VRAM\n",
350 (u32)(predicted >> 20));
351 }
352
353 dev_priv->vram_rblock_size = rowsize >> 12;
354 if (rt & 1)
355 dev_priv->vram_rblock_size *= 3;
356
357 NV_DEBUG(dev, "rblock %lld bytes\n",
358 (u64)dev_priv->vram_rblock_size << 12);
359}
360
361static void
362nvaa_vram_preinit(struct drm_device *dev)
363{
364 struct drm_nouveau_private *dev_priv = dev->dev_private;
365
366 /* To our knowledge, there's no large scale reordering of pages
367 * that occurs on IGP chipsets.
368 */
369 dev_priv->vram_rblock_size = 1;
370}
371
372static int
310nouveau_mem_detect(struct drm_device *dev) 373nouveau_mem_detect(struct drm_device *dev)
311{ 374{
312 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
325 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 388 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
326 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; 389 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
327 dev_priv->vram_size &= 0xffffffff00ll; 390 dev_priv->vram_size &= 0xffffffff00ll;
328 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { 391
392 switch (dev_priv->chipset) {
393 case 0xaa:
394 case 0xac:
395 case 0xaf:
329 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); 396 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
330 dev_priv->vram_sys_base <<= 12; 397 dev_priv->vram_sys_base <<= 12;
398 nvaa_vram_preinit(dev);
399 break;
400 default:
401 nv50_vram_preinit(dev);
402 break;
331 } 403 }
332 } else { 404 } else {
333 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 405 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
345 return -ENOMEM; 417 return -ENOMEM;
346} 418}
347 419
420#if __OS_HAS_AGP
421static unsigned long
422get_agp_mode(struct drm_device *dev, unsigned long mode)
423{
424 struct drm_nouveau_private *dev_priv = dev->dev_private;
425
426 /*
427 * FW seems to be broken on nv18, it makes the card lock up
428 * randomly.
429 */
430 if (dev_priv->chipset == 0x18)
431 mode &= ~PCI_AGP_COMMAND_FW;
432
433 /*
434 * AGP mode set in the command line.
435 */
436 if (nouveau_agpmode > 0) {
437 bool agpv3 = mode & 0x8;
438 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
439
440 mode = (mode & ~0x7) | (rate & 0x7);
441 }
442
443 return mode;
444}
445#endif
446
348int 447int
349nouveau_mem_reset_agp(struct drm_device *dev) 448nouveau_mem_reset_agp(struct drm_device *dev)
350{ 449{
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
355 /* First of all, disable fast writes, otherwise if it's 454 /* First of all, disable fast writes, otherwise if it's
356 * already enabled in the AGP bridge and we disable the card's 455 * already enabled in the AGP bridge and we disable the card's
357 * AGP controller we might be locking ourselves out of it. */ 456 * AGP controller we might be locking ourselves out of it. */
358 if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) { 457 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
458 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
359 struct drm_agp_info info; 459 struct drm_agp_info info;
360 struct drm_agp_mode mode; 460 struct drm_agp_mode mode;
361 461
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
363 if (ret) 463 if (ret)
364 return ret; 464 return ret;
365 465
366 mode.mode = info.mode & ~PCI_AGP_COMMAND_FW; 466 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
367 ret = drm_agp_enable(dev, mode); 467 ret = drm_agp_enable(dev, mode);
368 if (ret) 468 if (ret)
369 return ret; 469 return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
418 } 518 }
419 519
420 /* see agp.h for the AGPSTAT_* modes available */ 520 /* see agp.h for the AGPSTAT_* modes available */
421 mode.mode = info.mode; 521 mode.mode = get_agp_mode(dev, info.mode);
422 ret = drm_agp_enable(dev, mode); 522 ret = drm_agp_enable(dev, mode);
423 if (ret) { 523 if (ret) {
424 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); 524 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
433} 533}
434 534
435int 535int
436nouveau_mem_init(struct drm_device *dev) 536nouveau_mem_vram_init(struct drm_device *dev)
437{ 537{
438 struct drm_nouveau_private *dev_priv = dev->dev_private; 538 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 539 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
440 int ret, dma_bits = 32; 540 int ret, dma_bits;
441
442 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
443 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
444 541
445 if (dev_priv->card_type >= NV_50 && 542 if (dev_priv->card_type >= NV_50 &&
446 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 543 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
447 dma_bits = 40; 544 dma_bits = 40;
545 else
546 dma_bits = 32;
448 547
449 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); 548 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
450 if (ret) { 549 if (ret)
451 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
452 return ret; 550 return ret;
453 } 551
552 ret = nouveau_mem_detect(dev);
553 if (ret)
554 return ret;
555
556 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
454 557
455 ret = nouveau_ttm_global_init(dev_priv); 558 ret = nouveau_ttm_global_init(dev_priv);
456 if (ret) 559 if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
465 return ret; 568 return ret;
466 } 569 }
467 570
468 spin_lock_init(&dev_priv->tile.lock);
469
470 dev_priv->fb_available_size = dev_priv->vram_size; 571 dev_priv->fb_available_size = dev_priv->vram_size;
471 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 572 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
472 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) 573 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
474 pci_resource_len(dev->pdev, 1); 575 pci_resource_len(dev->pdev, 1);
475 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 576 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
476 577
477 /* remove reserved space at end of vram from available amount */ 578 /* reserve space at end of VRAM for PRAMIN */
579 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
580 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
581 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
582 else
583 if (dev_priv->card_type >= NV_40)
584 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
585 else
586 dev_priv->ramin_rsvd_vram = (512 * 1024);
587
478 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 588 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
479 dev_priv->fb_aper_free = dev_priv->fb_available_size; 589 dev_priv->fb_aper_free = dev_priv->fb_available_size;
480 590
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
495 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 605 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
496 } 606 }
497 607
498 /* GART */ 608 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
609 pci_resource_len(dev->pdev, 1),
610 DRM_MTRR_WC);
611 return 0;
612}
613
614int
615nouveau_mem_gart_init(struct drm_device *dev)
616{
617 struct drm_nouveau_private *dev_priv = dev->dev_private;
618 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
619 int ret;
620
621 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
622
499#if !defined(__powerpc__) && !defined(__ia64__) 623#if !defined(__powerpc__) && !defined(__ia64__)
500 if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { 624 if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
501 ret = nouveau_mem_init_agp(dev); 625 ret = nouveau_mem_init_agp(dev);
502 if (ret) 626 if (ret)
503 NV_ERROR(dev, "Error initialising AGP: %d\n", ret); 627 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
523 return ret; 647 return ret;
524 } 648 }
525 649
526 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
527 pci_resource_len(dev->pdev, 1),
528 DRM_MTRR_WC);
529
530 return 0; 650 return 0;
531} 651}
532 652
653void
654nouveau_mem_timing_init(struct drm_device *dev)
655{
656 struct drm_nouveau_private *dev_priv = dev->dev_private;
657 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
658 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
659 struct nvbios *bios = &dev_priv->vbios;
660 struct bit_entry P;
661 u8 tUNK_0, tUNK_1, tUNK_2;
662 u8 tRP; /* Byte 3 */
663 u8 tRAS; /* Byte 5 */
664 u8 tRFC; /* Byte 7 */
665 u8 tRC; /* Byte 9 */
666 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
667 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
668 u8 *mem = NULL, *entry;
669 int i, recordlen, entries;
670
671 if (bios->type == NVBIOS_BIT) {
672 if (bit_table(dev, 'P', &P))
673 return;
674
675 if (P.version == 1)
676 mem = ROMPTR(bios, P.data[4]);
677 else
678 if (P.version == 2)
679 mem = ROMPTR(bios, P.data[8]);
680 else {
681 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
682 }
683 } else {
684 NV_DEBUG(dev, "BMP version too old for memory\n");
685 return;
686 }
687
688 if (!mem) {
689 NV_DEBUG(dev, "memory timing table pointer invalid\n");
690 return;
691 }
533 692
693 if (mem[0] != 0x10) {
694 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
695 return;
696 }
697
698 /* validate record length */
699 entries = mem[2];
700 recordlen = mem[3];
701 if (recordlen < 15) {
702 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
703 return;
704 }
705
706 /* parse vbios entries into common format */
707 memtimings->timing =
708 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
709 if (!memtimings->timing)
710 return;
711
712 entry = mem + mem[1];
713 for (i = 0; i < entries; i++, entry += recordlen) {
714 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
715 if (entry[0] == 0)
716 continue;
717
718 tUNK_18 = 1;
719 tUNK_19 = 1;
720 tUNK_20 = 0;
721 tUNK_21 = 0;
722 switch (MIN(recordlen,21)) {
723 case 21:
724 tUNK_21 = entry[21];
725 case 20:
726 tUNK_20 = entry[20];
727 case 19:
728 tUNK_19 = entry[19];
729 case 18:
730 tUNK_18 = entry[18];
731 default:
732 tUNK_0 = entry[0];
733 tUNK_1 = entry[1];
734 tUNK_2 = entry[2];
735 tRP = entry[3];
736 tRAS = entry[5];
737 tRFC = entry[7];
738 tRC = entry[9];
739 tUNK_10 = entry[10];
740 tUNK_11 = entry[11];
741 tUNK_12 = entry[12];
742 tUNK_13 = entry[13];
743 tUNK_14 = entry[14];
744 break;
745 }
746
747 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
748
749 /* XXX: I don't trust the -1's and +1's... they must come
750 * from somewhere! */
751 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
752 tUNK_18 << 16 |
753 (tUNK_1 + tUNK_19 + 1) << 8 |
754 (tUNK_2 - 1));
755
756 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
757 if(recordlen > 19) {
758 timing->reg_100228 += (tUNK_19 - 1) << 24;
759 } else {
760 timing->reg_100228 += tUNK_12 << 24;
761 }
762
763 /* XXX: reg_10022c */
764
765 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
766 tUNK_13 << 8 | tUNK_13);
767
768 /* XXX: +6? */
769 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
770 if(tUNK_10 > tUNK_11) {
771 timing->reg_100234 += tUNK_10 << 16;
772 } else {
773 timing->reg_100234 += tUNK_11 << 16;
774 }
775
776 /* XXX; reg_100238, reg_10023c */
777 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
778 timing->reg_100220, timing->reg_100224,
779 timing->reg_100228, timing->reg_10022c);
780 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
781 timing->reg_100230, timing->reg_100234,
782 timing->reg_100238, timing->reg_10023c);
783 }
784
785 memtimings->nr_timing = entries;
786 memtimings->supported = true;
787}
788
789void
790nouveau_mem_timing_fini(struct drm_device *dev)
791{
792 struct drm_nouveau_private *dev_priv = dev->dev_private;
793 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
794
795 kfree(mem->timing);
796}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3c9964a8fbad..d670839cb34d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_ramht.h"
31 32
32int 33int
33nouveau_notifier_init_channel(struct nouveau_channel *chan) 34nouveau_notifier_init_channel(struct nouveau_channel *chan)
@@ -113,7 +114,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
113 return -ENOMEM; 114 return -ENOMEM;
114 } 115 }
115 116
116 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; 117 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 118 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
118 target = NV_DMA_TARGET_VIDMEM; 119 target = NV_DMA_TARGET_VIDMEM;
119 } else 120 } else
@@ -147,11 +148,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
147 nobj->dtor = nouveau_notifier_gpuobj_dtor; 148 nobj->dtor = nouveau_notifier_gpuobj_dtor;
148 nobj->priv = mem; 149 nobj->priv = mem;
149 150
150 ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); 151 ret = nouveau_ramht_insert(chan, handle, nobj);
152 nouveau_gpuobj_ref(NULL, &nobj);
151 if (ret) { 153 if (ret) {
152 nouveau_gpuobj_del(dev, &nobj);
153 drm_mm_put_block(mem); 154 drm_mm_put_block(mem);
154 NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); 155 NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
155 return ret; 156 return ret;
156 } 157 }
157 158
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index b6bcb254f4ab..896cf8634144 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,6 +34,7 @@
34#include "drm.h" 34#include "drm.h"
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_ramht.h"
37 38
38/* NVidia uses context objects to drive drawing operations. 39/* NVidia uses context objects to drive drawing operations.
39 40
@@ -65,137 +66,6 @@
65 The key into the hash table depends on the object handle and channel id and 66 The key into the hash table depends on the object handle and channel id and
66 is given as: 67 is given as:
67*/ 68*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
135 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
136 do {
137 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
138 NV_DEBUG(dev,
139 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
140 chan->id, co, ref->handle, ctx);
141 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
142 nv_wo32(dev, ramht, (co + 4)/4, ctx);
143
144 list_add_tail(&ref->list, &chan->ramht_refs);
145 instmem->flush(dev);
146 return 0;
147 }
148 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
149 chan->id, co, nv_ro32(dev, ramht, co/4));
150
151 co += 8;
152 if (co >= dev_priv->ramht_size)
153 co = 0;
154 } while (co != ho);
155
156 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
157 return -ENOMEM;
158}
159
160static void
161nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
162{
163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
165 struct nouveau_channel *chan = ref->channel;
166 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
167 uint32_t co, ho;
168
169 if (!ramht) {
170 NV_ERROR(dev, "No hash table!\n");
171 return;
172 }
173
174 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
175 do {
176 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
177 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
178 NV_DEBUG(dev,
179 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
180 chan->id, co, ref->handle,
181 nv_ro32(dev, ramht, (co + 4)));
182 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
183 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
184
185 list_del(&ref->list);
186 instmem->flush(dev);
187 return;
188 }
189
190 co += 8;
191 if (co >= dev_priv->ramht_size)
192 co = 0;
193 } while (co != ho);
194 list_del(&ref->list);
195
196 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
197 chan->id, ref->handle);
198}
199 69
200int 70int
201nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, 71nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
@@ -205,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_engine *engine = &dev_priv->engine; 76 struct nouveau_engine *engine = &dev_priv->engine;
207 struct nouveau_gpuobj *gpuobj; 77 struct nouveau_gpuobj *gpuobj;
208 struct drm_mm *pramin = NULL; 78 struct drm_mm_node *ramin = NULL;
209 int ret; 79 int ret;
210 80
211 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", 81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -218,69 +88,102 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
218 if (!gpuobj) 88 if (!gpuobj)
219 return -ENOMEM; 89 return -ENOMEM;
220 NV_DEBUG(dev, "gpuobj %p\n", gpuobj); 90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
91 gpuobj->dev = dev;
221 gpuobj->flags = flags; 92 gpuobj->flags = flags;
222 gpuobj->im_channel = chan; 93 kref_init(&gpuobj->refcount);
94 gpuobj->size = size;
223 95
96 spin_lock(&dev_priv->ramin_lock);
224 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 97 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
98 spin_unlock(&dev_priv->ramin_lock);
225 99
226 /* Choose between global instmem heap, and per-channel private
227 * instmem heap. On <NV50 allow requests for private instmem
228 * to be satisfied from global heap if no per-channel area
229 * available.
230 */
231 if (chan) { 100 if (chan) {
232 NV_DEBUG(dev, "channel heap\n"); 101 NV_DEBUG(dev, "channel heap\n");
233 pramin = &chan->ramin_heap; 102
103 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
104 if (ramin)
105 ramin = drm_mm_get_block(ramin, size, align);
106
107 if (!ramin) {
108 nouveau_gpuobj_ref(NULL, &gpuobj);
109 return -ENOMEM;
110 }
234 } else { 111 } else {
235 NV_DEBUG(dev, "global heap\n"); 112 NV_DEBUG(dev, "global heap\n");
236 pramin = &dev_priv->ramin_heap;
237 113
114 /* allocate backing pages, sets vinst */
238 ret = engine->instmem.populate(dev, gpuobj, &size); 115 ret = engine->instmem.populate(dev, gpuobj, &size);
239 if (ret) { 116 if (ret) {
240 nouveau_gpuobj_del(dev, &gpuobj); 117 nouveau_gpuobj_ref(NULL, &gpuobj);
241 return ret; 118 return ret;
242 } 119 }
243 }
244 120
245 /* Allocate a chunk of the PRAMIN aperture */ 121 /* try and get aperture space */
246 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); 122 do {
247 if (gpuobj->im_pramin) 123 if (drm_mm_pre_get(&dev_priv->ramin_heap))
248 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); 124 return -ENOMEM;
125
126 spin_lock(&dev_priv->ramin_lock);
127 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
128 align, 0);
129 if (ramin == NULL) {
130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj);
132 return ret;
133 }
249 134
250 if (!gpuobj->im_pramin) { 135 ramin = drm_mm_get_block_atomic(ramin, size, align);
251 nouveau_gpuobj_del(dev, &gpuobj); 136 spin_unlock(&dev_priv->ramin_lock);
252 return -ENOMEM; 137 } while (ramin == NULL);
138
139 /* on nv50 it's ok to fail, we have a fallback path */
140 if (!ramin && dev_priv->card_type < NV_50) {
141 nouveau_gpuobj_ref(NULL, &gpuobj);
142 return -ENOMEM;
143 }
253 } 144 }
254 145
255 if (!chan) { 146 /* if we got a chunk of the aperture, map pages into it */
147 gpuobj->im_pramin = ramin;
148 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
256 ret = engine->instmem.bind(dev, gpuobj); 149 ret = engine->instmem.bind(dev, gpuobj);
257 if (ret) { 150 if (ret) {
258 nouveau_gpuobj_del(dev, &gpuobj); 151 nouveau_gpuobj_ref(NULL, &gpuobj);
259 return ret; 152 return ret;
260 } 153 }
261 } 154 }
262 155
156 /* calculate the various different addresses for the object */
157 if (chan) {
158 gpuobj->pinst = chan->ramin->pinst;
159 if (gpuobj->pinst != ~0)
160 gpuobj->pinst += gpuobj->im_pramin->start;
161
162 if (dev_priv->card_type < NV_50) {
163 gpuobj->cinst = gpuobj->pinst;
164 } else {
165 gpuobj->cinst = gpuobj->im_pramin->start;
166 gpuobj->vinst = gpuobj->im_pramin->start +
167 chan->ramin->vinst;
168 }
169 } else {
170 if (gpuobj->im_pramin)
171 gpuobj->pinst = gpuobj->im_pramin->start;
172 else
173 gpuobj->pinst = ~0;
174 gpuobj->cinst = 0xdeadbeef;
175 }
176
263 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 177 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
264 int i; 178 int i;
265 179
266 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 180 for (i = 0; i < gpuobj->size; i += 4)
267 nv_wo32(dev, gpuobj, i/4, 0); 181 nv_wo32(gpuobj, i, 0);
268 engine->instmem.flush(dev); 182 engine->instmem.flush(dev);
269 } 183 }
270 184
271 *gpuobj_ret = gpuobj;
272 return 0;
273}
274
275int
276nouveau_gpuobj_early_init(struct drm_device *dev)
277{
278 struct drm_nouveau_private *dev_priv = dev->dev_private;
279
280 NV_DEBUG(dev, "\n");
281
282 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
283 185
186 *gpuobj_ret = gpuobj;
284 return 0; 187 return 0;
285} 188}
286 189
@@ -288,18 +191,12 @@ int
288nouveau_gpuobj_init(struct drm_device *dev) 191nouveau_gpuobj_init(struct drm_device *dev)
289{ 192{
290 struct drm_nouveau_private *dev_priv = dev->dev_private; 193 struct drm_nouveau_private *dev_priv = dev->dev_private;
291 int ret;
292 194
293 NV_DEBUG(dev, "\n"); 195 NV_DEBUG(dev, "\n");
294 196
295 if (dev_priv->card_type < NV_50) { 197 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
296 ret = nouveau_gpuobj_new_fake(dev, 198 spin_lock_init(&dev_priv->ramin_lock);
297 dev_priv->ramht_offset, ~0, dev_priv->ramht_size, 199 dev_priv->ramin_base = ~0;
298 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
299 &dev_priv->ramht, NULL);
300 if (ret)
301 return ret;
302 }
303 200
304 return 0; 201 return 0;
305} 202}
@@ -311,297 +208,89 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
311 208
312 NV_DEBUG(dev, "\n"); 209 NV_DEBUG(dev, "\n");
313 210
314 nouveau_gpuobj_del(dev, &dev_priv->ramht); 211 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
315} 212}
316 213
317void
318nouveau_gpuobj_late_takedown(struct drm_device *dev)
319{
320 struct drm_nouveau_private *dev_priv = dev->dev_private;
321 struct nouveau_gpuobj *gpuobj = NULL;
322 struct list_head *entry, *tmp;
323
324 NV_DEBUG(dev, "\n");
325
326 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
327 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
328
329 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
330 gpuobj, gpuobj->refcount);
331 gpuobj->refcount = 0;
332 nouveau_gpuobj_del(dev, &gpuobj);
333 }
334}
335 214
336int 215static void
337nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) 216nouveau_gpuobj_del(struct kref *ref)
338{ 217{
218 struct nouveau_gpuobj *gpuobj =
219 container_of(ref, struct nouveau_gpuobj, refcount);
220 struct drm_device *dev = gpuobj->dev;
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 221 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_engine *engine = &dev_priv->engine; 222 struct nouveau_engine *engine = &dev_priv->engine;
341 struct nouveau_gpuobj *gpuobj;
342 int i; 223 int i;
343 224
344 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); 225 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
345
346 if (!dev_priv || !pgpuobj || !(*pgpuobj))
347 return -EINVAL;
348 gpuobj = *pgpuobj;
349
350 if (gpuobj->refcount != 0) {
351 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
352 return -EINVAL;
353 }
354 226
355 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 227 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
356 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 228 for (i = 0; i < gpuobj->size; i += 4)
357 nv_wo32(dev, gpuobj, i/4, 0); 229 nv_wo32(gpuobj, i, 0);
358 engine->instmem.flush(dev); 230 engine->instmem.flush(dev);
359 } 231 }
360 232
361 if (gpuobj->dtor) 233 if (gpuobj->dtor)
362 gpuobj->dtor(dev, gpuobj); 234 gpuobj->dtor(dev, gpuobj);
363 235
364 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) 236 if (gpuobj->im_backing)
365 engine->instmem.clear(dev, gpuobj); 237 engine->instmem.clear(dev, gpuobj);
366 238
367 if (gpuobj->im_pramin) { 239 spin_lock(&dev_priv->ramin_lock);
368 if (gpuobj->flags & NVOBJ_FLAG_FAKE) 240 if (gpuobj->im_pramin)
369 kfree(gpuobj->im_pramin); 241 drm_mm_put_block(gpuobj->im_pramin);
370 else
371 drm_mm_put_block(gpuobj->im_pramin);
372 }
373
374 list_del(&gpuobj->list); 242 list_del(&gpuobj->list);
243 spin_unlock(&dev_priv->ramin_lock);
375 244
376 *pgpuobj = NULL;
377 kfree(gpuobj); 245 kfree(gpuobj);
378 return 0;
379} 246}
380 247
381static int 248void
382nouveau_gpuobj_instance_get(struct drm_device *dev, 249nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
383 struct nouveau_channel *chan,
384 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
385{
386 struct drm_nouveau_private *dev_priv = dev->dev_private;
387 struct nouveau_gpuobj *cpramin;
388
389 /* <NV50 use PRAMIN address everywhere */
390 if (dev_priv->card_type < NV_50) {
391 *inst = gpuobj->im_pramin->start;
392 return 0;
393 }
394
395 if (chan && gpuobj->im_channel != chan) {
396 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
397 gpuobj->im_channel->id, chan->id);
398 return -EINVAL;
399 }
400
401 /* NV50 channel-local instance */
402 if (chan) {
403 cpramin = chan->ramin->gpuobj;
404 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
405 return 0;
406 }
407
408 /* NV50 global (VRAM) instance */
409 if (!gpuobj->im_channel) {
410 /* ...from global heap */
411 if (!gpuobj->im_backing) {
412 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
413 return -EINVAL;
414 }
415 *inst = gpuobj->im_backing_start;
416 return 0;
417 } else {
418 /* ...from local heap */
419 cpramin = gpuobj->im_channel->ramin->gpuobj;
420 *inst = cpramin->im_backing_start +
421 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
422 return 0;
423 }
424
425 return -EINVAL;
426}
427
428int
429nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
430 uint32_t handle, struct nouveau_gpuobj *gpuobj,
431 struct nouveau_gpuobj_ref **ref_ret)
432{
433 struct drm_nouveau_private *dev_priv = dev->dev_private;
434 struct nouveau_gpuobj_ref *ref;
435 uint32_t instance;
436 int ret;
437
438 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
439 chan ? chan->id : -1, handle, gpuobj);
440
441 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
442 return -EINVAL;
443
444 if (!chan && !ref_ret)
445 return -EINVAL;
446
447 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
448 /* sw object */
449 instance = 0x40;
450 } else {
451 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
452 if (ret)
453 return ret;
454 }
455
456 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
457 if (!ref)
458 return -ENOMEM;
459 INIT_LIST_HEAD(&ref->list);
460 ref->gpuobj = gpuobj;
461 ref->channel = chan;
462 ref->instance = instance;
463
464 if (!ref_ret) {
465 ref->handle = handle;
466
467 ret = nouveau_ramht_insert(dev, ref);
468 if (ret) {
469 kfree(ref);
470 return ret;
471 }
472 } else {
473 ref->handle = ~0;
474 *ref_ret = ref;
475 }
476
477 ref->gpuobj->refcount++;
478 return 0;
479}
480
481int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
482{
483 struct nouveau_gpuobj_ref *ref;
484
485 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
486
487 if (!dev || !pref || *pref == NULL)
488 return -EINVAL;
489 ref = *pref;
490
491 if (ref->handle != ~0)
492 nouveau_ramht_remove(dev, ref);
493
494 if (ref->gpuobj) {
495 ref->gpuobj->refcount--;
496
497 if (ref->gpuobj->refcount == 0) {
498 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
499 nouveau_gpuobj_del(dev, &ref->gpuobj);
500 }
501 }
502
503 *pref = NULL;
504 kfree(ref);
505 return 0;
506}
507
508int
509nouveau_gpuobj_new_ref(struct drm_device *dev,
510 struct nouveau_channel *oc, struct nouveau_channel *rc,
511 uint32_t handle, uint32_t size, int align,
512 uint32_t flags, struct nouveau_gpuobj_ref **ref)
513{
514 struct nouveau_gpuobj *gpuobj = NULL;
515 int ret;
516
517 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
518 if (ret)
519 return ret;
520
521 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
522 if (ret) {
523 nouveau_gpuobj_del(dev, &gpuobj);
524 return ret;
525 }
526
527 return 0;
528}
529
530int
531nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
532 struct nouveau_gpuobj_ref **ref_ret)
533{ 250{
534 struct nouveau_gpuobj_ref *ref; 251 if (ref)
535 struct list_head *entry, *tmp; 252 kref_get(&ref->refcount);
536
537 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
538 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
539 253
540 if (ref->handle == handle) { 254 if (*ptr)
541 if (ref_ret) 255 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
542 *ref_ret = ref;
543 return 0;
544 }
545 }
546 256
547 return -EINVAL; 257 *ptr = ref;
548} 258}
549 259
550int 260int
551nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, 261nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
552 uint32_t b_offset, uint32_t size, 262 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
553 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
554 struct nouveau_gpuobj_ref **pref)
555{ 263{
556 struct drm_nouveau_private *dev_priv = dev->dev_private; 264 struct drm_nouveau_private *dev_priv = dev->dev_private;
557 struct nouveau_gpuobj *gpuobj = NULL; 265 struct nouveau_gpuobj *gpuobj = NULL;
558 int i; 266 int i;
559 267
560 NV_DEBUG(dev, 268 NV_DEBUG(dev,
561 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", 269 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
562 p_offset, b_offset, size, flags); 270 pinst, vinst, size, flags);
563 271
564 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 272 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
565 if (!gpuobj) 273 if (!gpuobj)
566 return -ENOMEM; 274 return -ENOMEM;
567 NV_DEBUG(dev, "gpuobj %p\n", gpuobj); 275 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
568 gpuobj->im_channel = NULL; 276 gpuobj->dev = dev;
569 gpuobj->flags = flags | NVOBJ_FLAG_FAKE; 277 gpuobj->flags = flags;
570 278 kref_init(&gpuobj->refcount);
571 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 279 gpuobj->size = size;
572 280 gpuobj->pinst = pinst;
573 if (p_offset != ~0) { 281 gpuobj->cinst = 0xdeadbeef;
574 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), 282 gpuobj->vinst = vinst;
575 GFP_KERNEL);
576 if (!gpuobj->im_pramin) {
577 nouveau_gpuobj_del(dev, &gpuobj);
578 return -ENOMEM;
579 }
580 gpuobj->im_pramin->start = p_offset;
581 gpuobj->im_pramin->size = size;
582 }
583
584 if (b_offset != ~0) {
585 gpuobj->im_backing = (struct nouveau_bo *)-1;
586 gpuobj->im_backing_start = b_offset;
587 }
588 283
589 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 284 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
590 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 285 for (i = 0; i < gpuobj->size; i += 4)
591 nv_wo32(dev, gpuobj, i/4, 0); 286 nv_wo32(gpuobj, i, 0);
592 dev_priv->engine.instmem.flush(dev); 287 dev_priv->engine.instmem.flush(dev);
593 } 288 }
594 289
595 if (pref) { 290 spin_lock(&dev_priv->ramin_lock);
596 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); 291 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
597 if (i) { 292 spin_unlock(&dev_priv->ramin_lock);
598 nouveau_gpuobj_del(dev, &gpuobj); 293 *pgpuobj = gpuobj;
599 return i;
600 }
601 }
602
603 if (pgpuobj)
604 *pgpuobj = gpuobj;
605 return 0; 294 return 0;
606} 295}
607 296
@@ -685,14 +374,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
685 adjust = offset & 0x00000fff; 374 adjust = offset & 0x00000fff;
686 frame = offset & ~0x00000fff; 375 frame = offset & ~0x00000fff;
687 376
688 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | 377 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
689 (adjust << 20) | 378 (access << 14) | (target << 16) |
690 (access << 14) | 379 class));
691 (target << 16) | 380 nv_wo32(*gpuobj, 4, size - 1);
692 class)); 381 nv_wo32(*gpuobj, 8, frame | pte_flags);
693 nv_wo32(dev, *gpuobj, 1, size - 1); 382 nv_wo32(*gpuobj, 12, frame | pte_flags);
694 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
695 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
696 } else { 383 } else {
697 uint64_t limit = offset + size - 1; 384 uint64_t limit = offset + size - 1;
698 uint32_t flags0, flags5; 385 uint32_t flags0, flags5;
@@ -705,12 +392,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
705 flags5 = 0x00080000; 392 flags5 = 0x00080000;
706 } 393 }
707 394
708 nv_wo32(dev, *gpuobj, 0, flags0 | class); 395 nv_wo32(*gpuobj, 0, flags0 | class);
709 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); 396 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
710 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); 397 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
711 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | 398 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
712 (upper_32_bits(offset) & 0xff)); 399 (upper_32_bits(offset) & 0xff));
713 nv_wo32(dev, *gpuobj, 5, flags5); 400 nv_wo32(*gpuobj, 20, flags5);
714 } 401 }
715 402
716 instmem->flush(dev); 403 instmem->flush(dev);
@@ -741,7 +428,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
741 *o_ret = 0; 428 *o_ret = 0;
742 } else 429 } else
743 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 430 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
744 *gpuobj = dev_priv->gart_info.sg_ctxdma; 431 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
745 if (offset & ~0xffffffffULL) { 432 if (offset & ~0xffffffffULL) {
746 NV_ERROR(dev, "obj offset exceeds 32-bits\n"); 433 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
747 return -EINVAL; 434 return -EINVAL;
@@ -829,25 +516,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
829 } 516 }
830 517
831 if (dev_priv->card_type >= NV_50) { 518 if (dev_priv->card_type >= NV_50) {
832 nv_wo32(dev, *gpuobj, 0, class); 519 nv_wo32(*gpuobj, 0, class);
833 nv_wo32(dev, *gpuobj, 5, 0x00010000); 520 nv_wo32(*gpuobj, 20, 0x00010000);
834 } else { 521 } else {
835 switch (class) { 522 switch (class) {
836 case NV_CLASS_NULL: 523 case NV_CLASS_NULL:
837 nv_wo32(dev, *gpuobj, 0, 0x00001030); 524 nv_wo32(*gpuobj, 0, 0x00001030);
838 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); 525 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
839 break; 526 break;
840 default: 527 default:
841 if (dev_priv->card_type >= NV_40) { 528 if (dev_priv->card_type >= NV_40) {
842 nv_wo32(dev, *gpuobj, 0, class); 529 nv_wo32(*gpuobj, 0, class);
843#ifdef __BIG_ENDIAN 530#ifdef __BIG_ENDIAN
844 nv_wo32(dev, *gpuobj, 2, 0x01000000); 531 nv_wo32(*gpuobj, 8, 0x01000000);
845#endif 532#endif
846 } else { 533 } else {
847#ifdef __BIG_ENDIAN 534#ifdef __BIG_ENDIAN
848 nv_wo32(dev, *gpuobj, 0, class | 0x00080000); 535 nv_wo32(*gpuobj, 0, class | 0x00080000);
849#else 536#else
850 nv_wo32(dev, *gpuobj, 0, class); 537 nv_wo32(*gpuobj, 0, class);
851#endif 538#endif
852 } 539 }
853 } 540 }
@@ -873,10 +560,15 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
873 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 560 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
874 if (!gpuobj) 561 if (!gpuobj)
875 return -ENOMEM; 562 return -ENOMEM;
563 gpuobj->dev = chan->dev;
876 gpuobj->engine = NVOBJ_ENGINE_SW; 564 gpuobj->engine = NVOBJ_ENGINE_SW;
877 gpuobj->class = class; 565 gpuobj->class = class;
566 kref_init(&gpuobj->refcount);
567 gpuobj->cinst = 0x40;
878 568
569 spin_lock(&dev_priv->ramin_lock);
879 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 570 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
571 spin_unlock(&dev_priv->ramin_lock);
880 *gpuobj_ret = gpuobj; 572 *gpuobj_ret = gpuobj;
881 return 0; 573 return 0;
882} 574}
@@ -886,7 +578,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
886{ 578{
887 struct drm_device *dev = chan->dev; 579 struct drm_device *dev = chan->dev;
888 struct drm_nouveau_private *dev_priv = dev->dev_private; 580 struct drm_nouveau_private *dev_priv = dev->dev_private;
889 struct nouveau_gpuobj *pramin = NULL;
890 uint32_t size; 581 uint32_t size;
891 uint32_t base; 582 uint32_t base;
892 int ret; 583 int ret;
@@ -911,18 +602,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
911 size += 0x1000; 602 size += 0x1000;
912 } 603 }
913 604
914 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, 605 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
915 &chan->ramin);
916 if (ret) { 606 if (ret) {
917 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); 607 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
918 return ret; 608 return ret;
919 } 609 }
920 pramin = chan->ramin->gpuobj;
921 610
922 ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); 611 ret = drm_mm_init(&chan->ramin_heap, base, size);
923 if (ret) { 612 if (ret) {
924 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 613 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
925 nouveau_gpuobj_ref_del(dev, &chan->ramin); 614 nouveau_gpuobj_ref(NULL, &chan->ramin);
926 return ret; 615 return ret;
927 } 616 }
928 617
@@ -939,8 +628,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
939 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 628 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
940 int ret, i; 629 int ret, i;
941 630
942 INIT_LIST_HEAD(&chan->ramht_refs);
943
944 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 631 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
945 632
946 /* Allocate a chunk of memory for per-channel object storage */ 633 /* Allocate a chunk of memory for per-channel object storage */
@@ -956,41 +643,38 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
956 * locations determined during init. 643 * locations determined during init.
957 */ 644 */
958 if (dev_priv->card_type >= NV_50) { 645 if (dev_priv->card_type >= NV_50) {
959 uint32_t vm_offset, pde; 646 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
647 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
648 u32 vm_pinst = chan->ramin->pinst;
649 u32 pde;
960 650
961 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; 651 if (vm_pinst != ~0)
962 vm_offset += chan->ramin->gpuobj->im_pramin->start; 652 vm_pinst += pgd_offs;
963 653
964 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 654 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
965 0, &chan->vm_pd, NULL); 655 0, &chan->vm_pd);
966 if (ret) 656 if (ret)
967 return ret; 657 return ret;
968 for (i = 0; i < 0x4000; i += 8) { 658 for (i = 0; i < 0x4000; i += 8) {
969 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); 659 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
970 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); 660 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
971 } 661 }
972 662
973 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; 663 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
974 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 664 &chan->vm_gart_pt);
975 dev_priv->gart_info.sg_ctxdma, 665 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
976 &chan->vm_gart_pt); 666 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
977 if (ret) 667 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
978 return ret;
979 nv_wo32(dev, chan->vm_pd, pde++,
980 chan->vm_gart_pt->instance | 0x03);
981 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
982 668
983 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; 669 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
984 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { 670 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
985 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 671 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
986 dev_priv->vm_vram_pt[i], 672 &chan->vm_vram_pt[i]);
987 &chan->vm_vram_pt[i]);
988 if (ret)
989 return ret;
990 673
991 nv_wo32(dev, chan->vm_pd, pde++, 674 nv_wo32(chan->vm_pd, pde + 0,
992 chan->vm_vram_pt[i]->instance | 0x61); 675 chan->vm_vram_pt[i]->vinst | 0x61);
993 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 676 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
677 pde += 8;
994 } 678 }
995 679
996 instmem->flush(dev); 680 instmem->flush(dev);
@@ -998,15 +682,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
998 682
999 /* RAMHT */ 683 /* RAMHT */
1000 if (dev_priv->card_type < NV_50) { 684 if (dev_priv->card_type < NV_50) {
1001 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, 685 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
1002 &chan->ramht); 686 } else {
687 struct nouveau_gpuobj *ramht = NULL;
688
689 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
690 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
1003 if (ret) 691 if (ret)
1004 return ret; 692 return ret;
1005 } else { 693
1006 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 694 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
1007 0x8000, 16, 695 nouveau_gpuobj_ref(NULL, &ramht);
1008 NVOBJ_FLAG_ZERO_ALLOC,
1009 &chan->ramht);
1010 if (ret) 696 if (ret)
1011 return ret; 697 return ret;
1012 } 698 }
@@ -1023,24 +709,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1023 } 709 }
1024 } else { 710 } else {
1025 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 711 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1026 0, dev_priv->fb_available_size, 712 0, dev_priv->fb_available_size,
1027 NV_DMA_ACCESS_RW, 713 NV_DMA_ACCESS_RW,
1028 NV_DMA_TARGET_VIDMEM, &vram); 714 NV_DMA_TARGET_VIDMEM, &vram);
1029 if (ret) { 715 if (ret) {
1030 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 716 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1031 return ret; 717 return ret;
1032 } 718 }
1033 } 719 }
1034 720
1035 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); 721 ret = nouveau_ramht_insert(chan, vram_h, vram);
722 nouveau_gpuobj_ref(NULL, &vram);
1036 if (ret) { 723 if (ret) {
1037 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); 724 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
1038 return ret; 725 return ret;
1039 } 726 }
1040 727
1041 /* TT memory ctxdma */ 728 /* TT memory ctxdma */
1042 if (dev_priv->card_type >= NV_50) { 729 if (dev_priv->card_type >= NV_50) {
1043 tt = vram; 730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
731 0, dev_priv->vm_end,
732 NV_DMA_ACCESS_RW,
733 NV_DMA_TARGET_AGP, &tt);
734 if (ret) {
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
736 return ret;
737 }
1044 } else 738 } else
1045 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { 739 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1046 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 740 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
@@ -1056,9 +750,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1056 return ret; 750 return ret;
1057 } 751 }
1058 752
1059 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); 753 ret = nouveau_ramht_insert(chan, tt_h, tt);
754 nouveau_gpuobj_ref(NULL, &tt);
1060 if (ret) { 755 if (ret) {
1061 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); 756 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
1062 return ret; 757 return ret;
1063 } 758 }
1064 759
@@ -1070,33 +765,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1070{ 765{
1071 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 766 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1072 struct drm_device *dev = chan->dev; 767 struct drm_device *dev = chan->dev;
1073 struct list_head *entry, *tmp;
1074 struct nouveau_gpuobj_ref *ref;
1075 int i; 768 int i;
1076 769
1077 NV_DEBUG(dev, "ch%d\n", chan->id); 770 NV_DEBUG(dev, "ch%d\n", chan->id);
1078 771
1079 if (!chan->ramht_refs.next) 772 if (!chan->ramht)
1080 return; 773 return;
1081 774
1082 list_for_each_safe(entry, tmp, &chan->ramht_refs) { 775 nouveau_ramht_ref(NULL, &chan->ramht, chan);
1083 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1084
1085 nouveau_gpuobj_ref_del(dev, &ref);
1086 }
1087
1088 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1089 776
1090 nouveau_gpuobj_del(dev, &chan->vm_pd); 777 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
1091 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); 778 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
1092 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) 779 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1093 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); 780 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
1094 781
1095 if (chan->ramin_heap.free_stack.next) 782 if (chan->ramin_heap.free_stack.next)
1096 drm_mm_takedown(&chan->ramin_heap); 783 drm_mm_takedown(&chan->ramin_heap);
1097 if (chan->ramin) 784 nouveau_gpuobj_ref(NULL, &chan->ramin);
1098 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1099
1100} 785}
1101 786
1102int 787int
@@ -1117,17 +802,17 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
1117 } 802 }
1118 803
1119 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { 804 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1120 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) 805 if (!gpuobj->im_backing)
1121 continue; 806 continue;
1122 807
1123 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); 808 gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
1124 if (!gpuobj->im_backing_suspend) { 809 if (!gpuobj->im_backing_suspend) {
1125 nouveau_gpuobj_resume(dev); 810 nouveau_gpuobj_resume(dev);
1126 return -ENOMEM; 811 return -ENOMEM;
1127 } 812 }
1128 813
1129 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 814 for (i = 0; i < gpuobj->size; i += 4)
1130 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); 815 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
1131 } 816 }
1132 817
1133 return 0; 818 return 0;
@@ -1172,8 +857,8 @@ nouveau_gpuobj_resume(struct drm_device *dev)
1172 if (!gpuobj->im_backing_suspend) 857 if (!gpuobj->im_backing_suspend)
1173 continue; 858 continue;
1174 859
1175 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 860 for (i = 0; i < gpuobj->size; i += 4)
1176 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); 861 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
1177 dev_priv->engine.instmem.flush(dev); 862 dev_priv->engine.instmem.flush(dev);
1178 } 863 }
1179 864
@@ -1208,25 +893,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1208 return -EPERM; 893 return -EPERM;
1209 } 894 }
1210 895
1211 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) 896 if (nouveau_ramht_find(chan, init->handle))
1212 return -EEXIST; 897 return -EEXIST;
1213 898
1214 if (!grc->software) 899 if (!grc->software)
1215 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); 900 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1216 else 901 else
1217 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); 902 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1218
1219 if (ret) { 903 if (ret) {
1220 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", 904 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1221 ret, init->channel, init->handle); 905 ret, init->channel, init->handle);
1222 return ret; 906 return ret;
1223 } 907 }
1224 908
1225 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); 909 ret = nouveau_ramht_insert(chan, init->handle, gr);
910 nouveau_gpuobj_ref(NULL, &gr);
1226 if (ret) { 911 if (ret) {
1227 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", 912 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1228 ret, init->channel, init->handle); 913 ret, init->channel, init->handle);
1229 nouveau_gpuobj_del(dev, &gr);
1230 return ret; 914 return ret;
1231 } 915 }
1232 916
@@ -1237,16 +921,62 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1237 struct drm_file *file_priv) 921 struct drm_file *file_priv)
1238{ 922{
1239 struct drm_nouveau_gpuobj_free *objfree = data; 923 struct drm_nouveau_gpuobj_free *objfree = data;
1240 struct nouveau_gpuobj_ref *ref; 924 struct nouveau_gpuobj *gpuobj;
1241 struct nouveau_channel *chan; 925 struct nouveau_channel *chan;
1242 int ret;
1243 926
1244 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); 927 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1245 928
1246 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); 929 gpuobj = nouveau_ramht_find(chan, objfree->handle);
1247 if (ret) 930 if (!gpuobj)
1248 return ret; 931 return -ENOENT;
1249 nouveau_gpuobj_ref_del(dev, &ref);
1250 932
933 nouveau_ramht_remove(chan, objfree->handle);
1251 return 0; 934 return 0;
1252} 935}
936
937u32
938nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
939{
940 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
941 struct drm_device *dev = gpuobj->dev;
942
943 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
944 u64 ptr = gpuobj->vinst + offset;
945 u32 base = ptr >> 16;
946 u32 val;
947
948 spin_lock(&dev_priv->ramin_lock);
949 if (dev_priv->ramin_base != base) {
950 dev_priv->ramin_base = base;
951 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
952 }
953 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
954 spin_unlock(&dev_priv->ramin_lock);
955 return val;
956 }
957
958 return nv_ri32(dev, gpuobj->pinst + offset);
959}
960
961void
962nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
963{
964 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
965 struct drm_device *dev = gpuobj->dev;
966
967 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
968 u64 ptr = gpuobj->vinst + offset;
969 u32 base = ptr >> 16;
970
971 spin_lock(&dev_priv->ramin_lock);
972 if (dev_priv->ramin_base != base) {
973 dev_priv->ramin_base = base;
974 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
975 }
976 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
977 spin_unlock(&dev_priv->ramin_lock);
978 return;
979 }
980
981 nv_wi32(dev, gpuobj->pinst + offset, val);
982}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
new file mode 100644
index 000000000000..ac62a1b8c4fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29
30static void
31legacy_perf_init(struct drm_device *dev)
32{
33 struct drm_nouveau_private *dev_priv = dev->dev_private;
34 struct nvbios *bios = &dev_priv->vbios;
35 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
36 char *perf, *entry, *bmp = &bios->data[bios->offset];
37 int headerlen, use_straps;
38
39 if (bmp[5] < 0x5 || bmp[6] < 0x14) {
40 NV_DEBUG(dev, "BMP version too old for perf\n");
41 return;
42 }
43
44 perf = ROMPTR(bios, bmp[0x73]);
45 if (!perf) {
46 NV_DEBUG(dev, "No memclock table pointer found.\n");
47 return;
48 }
49
50 switch (perf[0]) {
51 case 0x12:
52 case 0x14:
53 case 0x18:
54 use_straps = 0;
55 headerlen = 1;
56 break;
57 case 0x01:
58 use_straps = perf[1] & 1;
59 headerlen = (use_straps ? 8 : 2);
60 break;
61 default:
62 NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
63 return;
64 }
65
66 entry = perf + headerlen;
67 if (use_straps)
68 entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
69
70 sprintf(pm->perflvl[0].name, "performance_level_0");
71 pm->perflvl[0].memory = ROM16(entry[0]) * 20;
72 pm->nr_perflvl = 1;
73}
74
75void
76nouveau_perf_init(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
80 struct nvbios *bios = &dev_priv->vbios;
81 struct bit_entry P;
82 u8 version, headerlen, recordlen, entries;
83 u8 *perf, *entry;
84 int vid, i;
85
86 if (bios->type == NVBIOS_BIT) {
87 if (bit_table(dev, 'P', &P))
88 return;
89
90 if (P.version != 1 && P.version != 2) {
91 NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
92 return;
93 }
94
95 perf = ROMPTR(bios, P.data[0]);
96 version = perf[0];
97 headerlen = perf[1];
98 if (version < 0x40) {
99 recordlen = perf[3] + (perf[4] * perf[5]);
100 entries = perf[2];
101 } else {
102 recordlen = perf[2] + (perf[3] * perf[4]);
103 entries = perf[5];
104 }
105 } else {
106 if (bios->data[bios->offset + 6] < 0x25) {
107 legacy_perf_init(dev);
108 return;
109 }
110
111 perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
112 if (!perf) {
113 NV_DEBUG(dev, "perf table pointer invalid\n");
114 return;
115 }
116
117 version = perf[1];
118 headerlen = perf[0];
119 recordlen = perf[3];
120 entries = perf[2];
121 }
122
123 entry = perf + headerlen;
124 for (i = 0; i < entries; i++) {
125 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
126
127 if (entry[0] == 0xff) {
128 entry += recordlen;
129 continue;
130 }
131
132 switch (version) {
133 case 0x12:
134 case 0x13:
135 case 0x15:
136 perflvl->fanspeed = entry[55];
137 perflvl->voltage = entry[56];
138 perflvl->core = ROM32(entry[1]) * 10;
139 perflvl->memory = ROM32(entry[5]) * 20;
140 break;
141 case 0x21:
142 case 0x23:
143 case 0x24:
144 perflvl->fanspeed = entry[4];
145 perflvl->voltage = entry[5];
146 perflvl->core = ROM16(entry[6]) * 1000;
147
148 if (dev_priv->chipset == 0x49 ||
149 dev_priv->chipset == 0x4b)
150 perflvl->memory = ROM16(entry[11]) * 1000;
151 else
152 perflvl->memory = ROM16(entry[11]) * 2000;
153
154 break;
155 case 0x25:
156 perflvl->fanspeed = entry[4];
157 perflvl->voltage = entry[5];
158 perflvl->core = ROM16(entry[6]) * 1000;
159 perflvl->shader = ROM16(entry[10]) * 1000;
160 perflvl->memory = ROM16(entry[12]) * 1000;
161 break;
162 case 0x30:
163 perflvl->memscript = ROM16(entry[2]);
164 case 0x35:
165 perflvl->fanspeed = entry[6];
166 perflvl->voltage = entry[7];
167 perflvl->core = ROM16(entry[8]) * 1000;
168 perflvl->shader = ROM16(entry[10]) * 1000;
169 perflvl->memory = ROM16(entry[12]) * 1000;
170 /*XXX: confirm on 0x35 */
171 perflvl->unk05 = ROM16(entry[16]) * 1000;
172 break;
173 case 0x40:
174#define subent(n) entry[perf[2] + ((n) * perf[3])]
175 perflvl->fanspeed = 0; /*XXX*/
176 perflvl->voltage = entry[2];
177 perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000;
178 perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000;
179 perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000;
180 break;
181 }
182
183 /* make sure vid is valid */
184 if (pm->voltage.supported && perflvl->voltage) {
185 vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
186 if (vid < 0) {
187 NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
188 entry += recordlen;
189 continue;
190 }
191 }
192
193 snprintf(perflvl->name, sizeof(perflvl->name),
194 "performance_level_%d", i);
195 perflvl->id = i;
196 pm->nr_perflvl++;
197
198 entry += recordlen;
199 }
200}
201
202void
203nouveau_perf_fini(struct drm_device *dev)
204{
205}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
new file mode 100644
index 000000000000..1c99c55d6d46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
32
33static int
34nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
35 u8 id, u32 khz)
36{
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
39 void *pre_state;
40
41 if (khz == 0)
42 return 0;
43
44 pre_state = pm->clock_pre(dev, perflvl, id, khz);
45 if (IS_ERR(pre_state))
46 return PTR_ERR(pre_state);
47
48 if (pre_state)
49 pm->clock_set(dev, pre_state);
50 return 0;
51}
52
53static int
54nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
55{
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
58 int ret;
59
60 if (perflvl == pm->cur)
61 return 0;
62
63 if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
64 ret = pm->voltage_set(dev, perflvl->voltage);
65 if (ret) {
66 NV_ERROR(dev, "voltage_set %d failed: %d\n",
67 perflvl->voltage, ret);
68 }
69 }
70
71 nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
72 nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
73 nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
74 nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
75
76 pm->cur = perflvl;
77 return 0;
78}
79
80static int
81nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
85 struct nouveau_pm_level *perflvl = NULL;
86
87 /* safety precaution, for now */
88 if (nouveau_perflvl_wr != 7777)
89 return -EPERM;
90
91 if (!pm->clock_set)
92 return -EINVAL;
93
94 if (!strncmp(profile, "boot", 4))
95 perflvl = &pm->boot;
96 else {
97 int pl = simple_strtol(profile, NULL, 10);
98 int i;
99
100 for (i = 0; i < pm->nr_perflvl; i++) {
101 if (pm->perflvl[i].id == pl) {
102 perflvl = &pm->perflvl[i];
103 break;
104 }
105 }
106
107 if (!perflvl)
108 return -EINVAL;
109 }
110
111 NV_INFO(dev, "setting performance level: %s\n", profile);
112 return nouveau_pm_perflvl_set(dev, perflvl);
113}
114
115static int
116nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
120 int ret;
121
122 if (!pm->clock_get)
123 return -EINVAL;
124
125 memset(perflvl, 0, sizeof(*perflvl));
126
127 ret = pm->clock_get(dev, PLL_CORE);
128 if (ret > 0)
129 perflvl->core = ret;
130
131 ret = pm->clock_get(dev, PLL_MEMORY);
132 if (ret > 0)
133 perflvl->memory = ret;
134
135 ret = pm->clock_get(dev, PLL_SHADER);
136 if (ret > 0)
137 perflvl->shader = ret;
138
139 ret = pm->clock_get(dev, PLL_UNK05);
140 if (ret > 0)
141 perflvl->unk05 = ret;
142
143 if (pm->voltage.supported && pm->voltage_get) {
144 ret = pm->voltage_get(dev);
145 if (ret > 0)
146 perflvl->voltage = ret;
147 }
148
149 return 0;
150}
151
152static void
153nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
154{
155 char c[16], s[16], v[16], f[16];
156
157 c[0] = '\0';
158 if (perflvl->core)
159 snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
160
161 s[0] = '\0';
162 if (perflvl->shader)
163 snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
164
165 v[0] = '\0';
166 if (perflvl->voltage)
167 snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
168
169 f[0] = '\0';
170 if (perflvl->fanspeed)
171 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
172
173 snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000,
174 c, s, v, f);
175}
176
177static ssize_t
178nouveau_pm_get_perflvl_info(struct device *d,
179 struct device_attribute *a, char *buf)
180{
181 struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
182 char *ptr = buf;
183 int len = PAGE_SIZE;
184
185 snprintf(ptr, len, "%d: ", perflvl->id);
186 ptr += strlen(buf);
187 len -= strlen(buf);
188
189 nouveau_pm_perflvl_info(perflvl, ptr, len);
190 return strlen(buf);
191}
192
193static ssize_t
194nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
195{
196 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
197 struct drm_nouveau_private *dev_priv = dev->dev_private;
198 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
199 struct nouveau_pm_level cur;
200 int len = PAGE_SIZE, ret;
201 char *ptr = buf;
202
203 if (!pm->cur)
204 snprintf(ptr, len, "setting: boot\n");
205 else if (pm->cur == &pm->boot)
206 snprintf(ptr, len, "setting: boot\nc: ");
207 else
208 snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
209 ptr += strlen(buf);
210 len -= strlen(buf);
211
212 ret = nouveau_pm_perflvl_get(dev, &cur);
213 if (ret == 0)
214 nouveau_pm_perflvl_info(&cur, ptr, len);
215 return strlen(buf);
216}
217
218static ssize_t
219nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
220 const char *buf, size_t count)
221{
222 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
223 int ret;
224
225 ret = nouveau_pm_profile_set(dev, buf);
226 if (ret)
227 return ret;
228 return strlen(buf);
229}
230
231static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
232 nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
233
234static int
235nouveau_sysfs_init(struct drm_device *dev)
236{
237 struct drm_nouveau_private *dev_priv = dev->dev_private;
238 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
239 struct device *d = &dev->pdev->dev;
240 int ret, i;
241
242 ret = device_create_file(d, &dev_attr_performance_level);
243 if (ret)
244 return ret;
245
246 for (i = 0; i < pm->nr_perflvl; i++) {
247 struct nouveau_pm_level *perflvl = &pm->perflvl[i];
248
249 perflvl->dev_attr.attr.name = perflvl->name;
250 perflvl->dev_attr.attr.mode = S_IRUGO;
251 perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
252 perflvl->dev_attr.store = NULL;
253 sysfs_attr_init(&perflvl->dev_attr.attr);
254
255 ret = device_create_file(d, &perflvl->dev_attr);
256 if (ret) {
257 NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
258 perflvl->id, i);
259 perflvl->dev_attr.attr.name = NULL;
260 nouveau_pm_fini(dev);
261 return ret;
262 }
263 }
264
265 return 0;
266}
267
268static void
269nouveau_sysfs_fini(struct drm_device *dev)
270{
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
272 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
273 struct device *d = &dev->pdev->dev;
274 int i;
275
276 device_remove_file(d, &dev_attr_performance_level);
277 for (i = 0; i < pm->nr_perflvl; i++) {
278 struct nouveau_pm_level *pl = &pm->perflvl[i];
279
280 if (!pl->dev_attr.attr.name)
281 break;
282
283 device_remove_file(d, &pl->dev_attr);
284 }
285}
286
287static ssize_t
288nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
289{
290 struct drm_device *dev = dev_get_drvdata(d);
291 struct drm_nouveau_private *dev_priv = dev->dev_private;
292 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
293
294 return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
295}
296static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
297 NULL, 0);
298
299static ssize_t
300nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
301{
302 struct drm_device *dev = dev_get_drvdata(d);
303 struct drm_nouveau_private *dev_priv = dev->dev_private;
304 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
305 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
306
307 return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
308}
309static ssize_t
310nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
311 const char *buf, size_t count)
312{
313 struct drm_device *dev = dev_get_drvdata(d);
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
316 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
317 long value;
318
319 if (strict_strtol(buf, 10, &value) == -EINVAL)
320 return count;
321
322 temp->down_clock = value/1000;
323
324 nouveau_temp_safety_checks(dev);
325
326 return count;
327}
328static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
329 nouveau_hwmon_set_max_temp,
330 0);
331
332static ssize_t
333nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
334 char *buf)
335{
336 struct drm_device *dev = dev_get_drvdata(d);
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
339 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
340
341 return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
342}
343static ssize_t
344nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
345 const char *buf,
346 size_t count)
347{
348 struct drm_device *dev = dev_get_drvdata(d);
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
351 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
352 long value;
353
354 if (strict_strtol(buf, 10, &value) == -EINVAL)
355 return count;
356
357 temp->critical = value/1000;
358
359 nouveau_temp_safety_checks(dev);
360
361 return count;
362}
363static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
364 nouveau_hwmon_critical_temp,
365 nouveau_hwmon_set_critical_temp,
366 0);
367
368static ssize_t nouveau_hwmon_show_name(struct device *dev,
369 struct device_attribute *attr,
370 char *buf)
371{
372 return sprintf(buf, "nouveau\n");
373}
374static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
375
376static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
379{
380 return sprintf(buf, "1000\n");
381}
382static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
383 nouveau_hwmon_show_update_rate,
384 NULL, 0);
385
386static struct attribute *hwmon_attributes[] = {
387 &sensor_dev_attr_temp1_input.dev_attr.attr,
388 &sensor_dev_attr_temp1_max.dev_attr.attr,
389 &sensor_dev_attr_temp1_crit.dev_attr.attr,
390 &sensor_dev_attr_name.dev_attr.attr,
391 &sensor_dev_attr_update_rate.dev_attr.attr,
392 NULL
393};
394
395static const struct attribute_group hwmon_attrgroup = {
396 .attrs = hwmon_attributes,
397};
398
399static int
400nouveau_hwmon_init(struct drm_device *dev)
401{
402 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
404 struct device *hwmon_dev;
405 int ret;
406
407 if (!pm->temp_get)
408 return -ENODEV;
409
410 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
411 if (IS_ERR(hwmon_dev)) {
412 ret = PTR_ERR(hwmon_dev);
413 NV_ERROR(dev,
414 "Unable to register hwmon device: %d\n", ret);
415 return ret;
416 }
417 dev_set_drvdata(hwmon_dev, dev);
418 ret = sysfs_create_group(&hwmon_dev->kobj,
419 &hwmon_attrgroup);
420 if (ret) {
421 NV_ERROR(dev,
422 "Unable to create hwmon sysfs file: %d\n", ret);
423 hwmon_device_unregister(hwmon_dev);
424 return ret;
425 }
426
427 pm->hwmon = hwmon_dev;
428
429 return 0;
430}
431
432static void
433nouveau_hwmon_fini(struct drm_device *dev)
434{
435 struct drm_nouveau_private *dev_priv = dev->dev_private;
436 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
437
438 if (pm->hwmon) {
439 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
440 hwmon_device_unregister(pm->hwmon);
441 }
442}
443
444int
445nouveau_pm_init(struct drm_device *dev)
446{
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
449 char info[256];
450 int ret, i;
451
452 nouveau_volt_init(dev);
453 nouveau_perf_init(dev);
454 nouveau_temp_init(dev);
455 nouveau_mem_timing_init(dev);
456
457 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
458 for (i = 0; i < pm->nr_perflvl; i++) {
459 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
460 NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
461 }
462
463 /* determine current ("boot") performance level */
464 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
465 if (ret == 0) {
466 pm->cur = &pm->boot;
467
468 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
469 NV_INFO(dev, "c: %s", info);
470 }
471
472 /* switch performance levels now if requested */
473 if (nouveau_perflvl != NULL) {
474 ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
475 if (ret) {
476 NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
477 nouveau_perflvl, ret);
478 }
479 }
480
481 nouveau_sysfs_init(dev);
482 nouveau_hwmon_init(dev);
483
484 return 0;
485}
486
487void
488nouveau_pm_fini(struct drm_device *dev)
489{
490 struct drm_nouveau_private *dev_priv = dev->dev_private;
491 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
492
493 if (pm->cur != &pm->boot)
494 nouveau_pm_perflvl_set(dev, &pm->boot);
495
496 nouveau_mem_timing_fini(dev);
497 nouveau_temp_fini(dev);
498 nouveau_perf_fini(dev);
499 nouveau_volt_fini(dev);
500
501 nouveau_hwmon_fini(dev);
502 nouveau_sysfs_fini(dev);
503}
504
505void
506nouveau_pm_resume(struct drm_device *dev)
507{
508 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
510 struct nouveau_pm_level *perflvl;
511
512 if (pm->cur == &pm->boot)
513 return;
514
515 perflvl = pm->cur;
516 pm->cur = &pm->boot;
517 nouveau_pm_perflvl_set(dev, perflvl);
518}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
new file mode 100644
index 000000000000..4a9838ddacec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__
27
28/* nouveau_pm.c */
29int nouveau_pm_init(struct drm_device *dev);
30void nouveau_pm_fini(struct drm_device *dev);
31void nouveau_pm_resume(struct drm_device *dev);
32
33/* nouveau_volt.c */
34void nouveau_volt_init(struct drm_device *);
35void nouveau_volt_fini(struct drm_device *);
36int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
37int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
38int nouveau_voltage_gpio_get(struct drm_device *);
39int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
40
41/* nouveau_perf.c */
42void nouveau_perf_init(struct drm_device *);
43void nouveau_perf_fini(struct drm_device *);
44
45/* nouveau_mem.c */
46void nouveau_mem_timing_init(struct drm_device *);
47void nouveau_mem_timing_fini(struct drm_device *);
48
49/* nv04_pm.c */
50int nv04_pm_clock_get(struct drm_device *, u32 id);
51void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
52 u32 id, int khz);
53void nv04_pm_clock_set(struct drm_device *, void *);
54
55/* nv50_pm.c */
56int nv50_pm_clock_get(struct drm_device *, u32 id);
57void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
58 u32 id, int khz);
59void nv50_pm_clock_set(struct drm_device *, void *);
60
61/* nva3_pm.c */
62int nva3_pm_clock_get(struct drm_device *, u32 id);
63void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
64 u32 id, int khz);
65void nva3_pm_clock_set(struct drm_device *, void *);
66
67/* nouveau_temp.c */
68void nouveau_temp_init(struct drm_device *dev);
69void nouveau_temp_fini(struct drm_device *dev);
70void nouveau_temp_safety_checks(struct drm_device *dev);
71int nv40_temp_get(struct drm_device *dev);
72int nv84_temp_get(struct drm_device *dev);
73
74#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
new file mode 100644
index 000000000000..7f16697cc96c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -0,0 +1,289 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29
30static u32
31nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
32{
33 struct drm_device *dev = chan->dev;
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_ramht *ramht = chan->ramht;
36 u32 hash = 0;
37 int i;
38
39 NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
40
41 for (i = 32; i > 0; i -= ramht->bits) {
42 hash ^= (handle & ((1 << ramht->bits) - 1));
43 handle >>= ramht->bits;
44 }
45
46 if (dev_priv->card_type < NV_50)
47 hash ^= chan->id << (ramht->bits - 4);
48 hash <<= 3;
49
50 NV_DEBUG(dev, "hash=0x%08x\n", hash);
51 return hash;
52}
53
54static int
55nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
56 u32 offset)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 u32 ctx = nv_ro32(ramht, offset + 4);
60
61 if (dev_priv->card_type < NV_40)
62 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
63 return (ctx != 0);
64}
65
66static int
67nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68 struct nouveau_gpuobj *ramht, u32 offset)
69{
70 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71 u32 ctx = nv_ro32(ramht, offset + 4);
72
73 if (dev_priv->card_type >= NV_50)
74 return true;
75 else if (dev_priv->card_type >= NV_40)
76 return chan->id ==
77 ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
78 else
79 return chan->id ==
80 ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
81}
82
83int
84nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
85 struct nouveau_gpuobj *gpuobj)
86{
87 struct drm_device *dev = chan->dev;
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90 struct nouveau_ramht_entry *entry;
91 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
92 unsigned long flags;
93 u32 ctx, co, ho;
94
95 if (nouveau_ramht_find(chan, handle))
96 return -EEXIST;
97
98 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
99 if (!entry)
100 return -ENOMEM;
101 entry->channel = chan;
102 entry->gpuobj = NULL;
103 entry->handle = handle;
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105
106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else
111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->cinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | 2;
118 } else {
119 ctx = (gpuobj->cinst >> 4) |
120 ((gpuobj->engine <<
121 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
122 }
123 }
124
125 spin_lock_irqsave(&chan->ramht->lock, flags);
126 list_add(&entry->head, &chan->ramht->entries);
127
128 co = ho = nouveau_ramht_hash_handle(chan, handle);
129 do {
130 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
131 NV_DEBUG(dev,
132 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
133 chan->id, co, handle, ctx);
134 nv_wo32(ramht, co + 0, handle);
135 nv_wo32(ramht, co + 4, ctx);
136
137 spin_unlock_irqrestore(&chan->ramht->lock, flags);
138 instmem->flush(dev);
139 return 0;
140 }
141 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
142 chan->id, co, nv_ro32(ramht, co));
143
144 co += 8;
145 if (co >= ramht->size)
146 co = 0;
147 } while (co != ho);
148
149 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
150 list_del(&entry->head);
151 spin_unlock_irqrestore(&chan->ramht->lock, flags);
152 kfree(entry);
153 return -ENOMEM;
154}
155
156static void
157nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
158{
159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
162 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
163 struct nouveau_ramht_entry *entry, *tmp;
164 u32 co, ho;
165
166 list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
167 if (entry->channel != chan || entry->handle != handle)
168 continue;
169
170 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
171 list_del(&entry->head);
172 kfree(entry);
173 break;
174 }
175
176 co = ho = nouveau_ramht_hash_handle(chan, handle);
177 do {
178 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
179 nouveau_ramht_entry_same_channel(chan, ramht, co) &&
180 (handle == nv_ro32(ramht, co))) {
181 NV_DEBUG(dev,
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, handle, nv_ro32(ramht, co + 4));
184 nv_wo32(ramht, co + 0, 0x00000000);
185 nv_wo32(ramht, co + 4, 0x00000000);
186 instmem->flush(dev);
187 return;
188 }
189
190 co += 8;
191 if (co >= ramht->size)
192 co = 0;
193 } while (co != ho);
194
195 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
196 chan->id, handle);
197}
198
199void
200nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
201{
202 struct nouveau_ramht *ramht = chan->ramht;
203 unsigned long flags;
204
205 spin_lock_irqsave(&ramht->lock, flags);
206 nouveau_ramht_remove_locked(chan, handle);
207 spin_unlock_irqrestore(&ramht->lock, flags);
208}
209
210struct nouveau_gpuobj *
211nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
212{
213 struct nouveau_ramht *ramht = chan->ramht;
214 struct nouveau_ramht_entry *entry;
215 struct nouveau_gpuobj *gpuobj = NULL;
216 unsigned long flags;
217
218 if (unlikely(!chan->ramht))
219 return NULL;
220
221 spin_lock_irqsave(&ramht->lock, flags);
222 list_for_each_entry(entry, &chan->ramht->entries, head) {
223 if (entry->channel == chan && entry->handle == handle) {
224 gpuobj = entry->gpuobj;
225 break;
226 }
227 }
228 spin_unlock_irqrestore(&ramht->lock, flags);
229
230 return gpuobj;
231}
232
233int
234nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
235 struct nouveau_ramht **pramht)
236{
237 struct nouveau_ramht *ramht;
238
239 ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
240 if (!ramht)
241 return -ENOMEM;
242
243 ramht->dev = dev;
244 kref_init(&ramht->refcount);
245 ramht->bits = drm_order(gpuobj->size / 8);
246 INIT_LIST_HEAD(&ramht->entries);
247 spin_lock_init(&ramht->lock);
248 nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
249
250 *pramht = ramht;
251 return 0;
252}
253
254static void
255nouveau_ramht_del(struct kref *ref)
256{
257 struct nouveau_ramht *ramht =
258 container_of(ref, struct nouveau_ramht, refcount);
259
260 nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
261 kfree(ramht);
262}
263
264void
265nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
266 struct nouveau_channel *chan)
267{
268 struct nouveau_ramht_entry *entry, *tmp;
269 struct nouveau_ramht *ramht;
270 unsigned long flags;
271
272 if (ref)
273 kref_get(&ref->refcount);
274
275 ramht = *ptr;
276 if (ramht) {
277 spin_lock_irqsave(&ramht->lock, flags);
278 list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
279 if (entry->channel != chan)
280 continue;
281
282 nouveau_ramht_remove_locked(chan, entry->handle);
283 }
284 spin_unlock_irqrestore(&ramht->lock, flags);
285
286 kref_put(&ramht->refcount, nouveau_ramht_del);
287 }
288 *ptr = ref;
289}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
new file mode 100644
index 000000000000..b79cb5e1a8f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_RAMHT_H__
26#define __NOUVEAU_RAMHT_H__
27
28struct nouveau_ramht_entry {
29 struct list_head head;
30 struct nouveau_channel *channel;
31 struct nouveau_gpuobj *gpuobj;
32 u32 handle;
33};
34
35struct nouveau_ramht {
36 struct drm_device *dev;
37 struct kref refcount;
38 spinlock_t lock;
39 struct nouveau_gpuobj *gpuobj;
40 struct list_head entries;
41 int bits;
42};
43
44extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
45 struct nouveau_ramht **);
46extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
47 struct nouveau_channel *unref_channel);
48
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
50 struct nouveau_gpuobj *);
51extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
52extern struct nouveau_gpuobj *
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 21a6e453b975..1b42541ca9e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -551,6 +551,8 @@
551#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C 551#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
552#define NV03_PFIFO_CACHE1_PULL0 0x00003240 552#define NV03_PFIFO_CACHE1_PULL0 0x00003240
553#define NV04_PFIFO_CACHE1_PULL0 0x00003250 553#define NV04_PFIFO_CACHE1_PULL0 0x00003250
554# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
555# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
554#define NV03_PFIFO_CACHE1_PULL1 0x00003250 556#define NV03_PFIFO_CACHE1_PULL1 0x00003250
555#define NV04_PFIFO_CACHE1_PULL1 0x00003254 557#define NV04_PFIFO_CACHE1_PULL1 0x00003254
556#define NV04_PFIFO_CACHE1_HASH 0x00003258 558#define NV04_PFIFO_CACHE1_HASH 0x00003258
@@ -785,15 +787,12 @@
785#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) 787#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
786#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8) 788#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
787#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8) 789#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
790#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8)
791#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8)
788#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8) 792#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
789#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8) 793#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
790
791#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8) 794#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
792#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8) 795#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
793#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
794#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
795#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
796#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
797 796
798#define NV50_PDISPLAY_CRTC_CLK 0x00614000 797#define NV50_PDISPLAY_CRTC_CLK 0x00614000
799#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100) 798#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 6b9187d7f67d..288bacac7e5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte; 96 unsigned i, j, pte;
97 97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); 98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
99 99
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); 100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
101 nvbe->pte_start = pte; 101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) { 102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 103 dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
105 uint32_t offset_h = upper_32_bits(dma_offset); 105 uint32_t offset_h = upper_32_bits(dma_offset);
106 106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { 107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50) 108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(dev, gpuobj, pte++, offset_l | 3); 109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110 else { 110 pte += 1;
111 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21); 111 } else {
112 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff); 112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114 pte += 2;
113 } 115 }
114 116
115 dma_offset += NV_CTXDMA_PAGE_SIZE; 117 dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
145 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; 147 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
146 148
147 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { 149 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
148 if (dev_priv->card_type < NV_50) 150 if (dev_priv->card_type < NV_50) {
149 nv_wo32(dev, gpuobj, pte++, dma_offset | 3); 151 nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
150 else { 152 pte += 1;
151 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21); 153 } else {
152 nv_wo32(dev, gpuobj, pte++, 0x00000000); 154 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
156 pte += 2;
153 } 157 }
154 158
155 dma_offset += NV_CTXDMA_PAGE_SIZE; 159 dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -230,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev)
230 } 234 }
231 235
232 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, 236 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
233 NVOBJ_FLAG_ALLOW_NO_REFS |
234 NVOBJ_FLAG_ZERO_ALLOC | 237 NVOBJ_FLAG_ZERO_ALLOC |
235 NVOBJ_FLAG_ZERO_FREE, &gpuobj); 238 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
236 if (ret) { 239 if (ret) {
@@ -239,9 +242,9 @@ nouveau_sgdma_init(struct drm_device *dev)
239 } 242 }
240 243
241 dev_priv->gart_info.sg_dummy_page = 244 dev_priv->gart_info.sg_dummy_page =
242 alloc_page(GFP_KERNEL|__GFP_DMA32); 245 alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
243 if (!dev_priv->gart_info.sg_dummy_page) { 246 if (!dev_priv->gart_info.sg_dummy_page) {
244 nouveau_gpuobj_del(dev, &gpuobj); 247 nouveau_gpuobj_ref(NULL, &gpuobj);
245 return -ENOMEM; 248 return -ENOMEM;
246 } 249 }
247 250
@@ -250,29 +253,34 @@ nouveau_sgdma_init(struct drm_device *dev)
250 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, 253 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
251 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 254 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
252 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { 255 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
253 nouveau_gpuobj_del(dev, &gpuobj); 256 nouveau_gpuobj_ref(NULL, &gpuobj);
254 return -EFAULT; 257 return -EFAULT;
255 } 258 }
256 259
257 if (dev_priv->card_type < NV_50) { 260 if (dev_priv->card_type < NV_50) {
261 /* special case, allocated from global instmem heap so
262 * cinst is invalid, we use it on all channels though so
263 * cinst needs to be valid, set it the same as pinst
264 */
265 gpuobj->cinst = gpuobj->pinst;
266
258 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and 267 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
259 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE 268 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
260 * on those cards? */ 269 * on those cards? */
261 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | 270 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
262 (1 << 12) /* PT present */ | 271 (1 << 12) /* PT present */ |
263 (0 << 13) /* PT *not* linear */ | 272 (0 << 13) /* PT *not* linear */ |
264 (NV_DMA_ACCESS_RW << 14) | 273 (NV_DMA_ACCESS_RW << 14) |
265 (NV_DMA_TARGET_PCI << 16)); 274 (NV_DMA_TARGET_PCI << 16));
266 nv_wo32(dev, gpuobj, 1, aper_size - 1); 275 nv_wo32(gpuobj, 4, aper_size - 1);
267 for (i = 2; i < 2 + (aper_size >> 12); i++) { 276 for (i = 2; i < 2 + (aper_size >> 12); i++) {
268 nv_wo32(dev, gpuobj, i, 277 nv_wo32(gpuobj, i * 4,
269 dev_priv->gart_info.sg_dummy_bus | 3); 278 dev_priv->gart_info.sg_dummy_bus | 3);
270 } 279 }
271 } else { 280 } else {
272 for (i = 0; i < obj_size; i += 8) { 281 for (i = 0; i < obj_size; i += 8) {
273 nv_wo32(dev, gpuobj, (i+0)/4, 282 nv_wo32(gpuobj, i + 0, 0x00000000);
274 dev_priv->gart_info.sg_dummy_bus | 0x21); 283 nv_wo32(gpuobj, i + 4, 0x00000000);
275 nv_wo32(dev, gpuobj, (i+4)/4, 0);
276 } 284 }
277 } 285 }
278 dev_priv->engine.instmem.flush(dev); 286 dev_priv->engine.instmem.flush(dev);
@@ -298,7 +306,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
298 dev_priv->gart_info.sg_dummy_bus = 0; 306 dev_priv->gart_info.sg_dummy_bus = 0;
299 } 307 }
300 308
301 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); 309 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
302} 310}
303 311
304int 312int
@@ -308,9 +316,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
308 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 316 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
309 int pte; 317 int pte;
310 318
311 pte = (offset >> NV_CTXDMA_PAGE_SHIFT); 319 pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
312 if (dev_priv->card_type < NV_50) { 320 if (dev_priv->card_type < NV_50) {
313 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; 321 *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
314 return 0; 322 return 0;
315 } 323 }
316 324
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 989322be3728..ed7757f14083 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,6 +35,8 @@
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fbcon.h" 37#include "nouveau_fbcon.h"
38#include "nouveau_ramht.h"
39#include "nouveau_pm.h"
38#include "nv50_display.h" 40#include "nv50_display.h"
39 41
40static void nouveau_stub_takedown(struct drm_device *dev) {} 42static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -78,7 +80,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
78 engine->fifo.disable = nv04_fifo_disable; 80 engine->fifo.disable = nv04_fifo_disable;
79 engine->fifo.enable = nv04_fifo_enable; 81 engine->fifo.enable = nv04_fifo_enable;
80 engine->fifo.reassign = nv04_fifo_reassign; 82 engine->fifo.reassign = nv04_fifo_reassign;
81 engine->fifo.cache_flush = nv04_fifo_cache_flush;
82 engine->fifo.cache_pull = nv04_fifo_cache_pull; 83 engine->fifo.cache_pull = nv04_fifo_cache_pull;
83 engine->fifo.channel_id = nv04_fifo_channel_id; 84 engine->fifo.channel_id = nv04_fifo_channel_id;
84 engine->fifo.create_context = nv04_fifo_create_context; 85 engine->fifo.create_context = nv04_fifo_create_context;
@@ -95,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
95 engine->gpio.get = NULL; 96 engine->gpio.get = NULL;
96 engine->gpio.set = NULL; 97 engine->gpio.set = NULL;
97 engine->gpio.irq_enable = NULL; 98 engine->gpio.irq_enable = NULL;
99 engine->pm.clock_get = nv04_pm_clock_get;
100 engine->pm.clock_pre = nv04_pm_clock_pre;
101 engine->pm.clock_set = nv04_pm_clock_set;
98 break; 102 break;
99 case 0x10: 103 case 0x10:
100 engine->instmem.init = nv04_instmem_init; 104 engine->instmem.init = nv04_instmem_init;
@@ -130,7 +134,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
130 engine->fifo.disable = nv04_fifo_disable; 134 engine->fifo.disable = nv04_fifo_disable;
131 engine->fifo.enable = nv04_fifo_enable; 135 engine->fifo.enable = nv04_fifo_enable;
132 engine->fifo.reassign = nv04_fifo_reassign; 136 engine->fifo.reassign = nv04_fifo_reassign;
133 engine->fifo.cache_flush = nv04_fifo_cache_flush;
134 engine->fifo.cache_pull = nv04_fifo_cache_pull; 137 engine->fifo.cache_pull = nv04_fifo_cache_pull;
135 engine->fifo.channel_id = nv10_fifo_channel_id; 138 engine->fifo.channel_id = nv10_fifo_channel_id;
136 engine->fifo.create_context = nv10_fifo_create_context; 139 engine->fifo.create_context = nv10_fifo_create_context;
@@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
147 engine->gpio.get = nv10_gpio_get; 150 engine->gpio.get = nv10_gpio_get;
148 engine->gpio.set = nv10_gpio_set; 151 engine->gpio.set = nv10_gpio_set;
149 engine->gpio.irq_enable = NULL; 152 engine->gpio.irq_enable = NULL;
153 engine->pm.clock_get = nv04_pm_clock_get;
154 engine->pm.clock_pre = nv04_pm_clock_pre;
155 engine->pm.clock_set = nv04_pm_clock_set;
150 break; 156 break;
151 case 0x20: 157 case 0x20:
152 engine->instmem.init = nv04_instmem_init; 158 engine->instmem.init = nv04_instmem_init;
@@ -182,7 +188,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
182 engine->fifo.disable = nv04_fifo_disable; 188 engine->fifo.disable = nv04_fifo_disable;
183 engine->fifo.enable = nv04_fifo_enable; 189 engine->fifo.enable = nv04_fifo_enable;
184 engine->fifo.reassign = nv04_fifo_reassign; 190 engine->fifo.reassign = nv04_fifo_reassign;
185 engine->fifo.cache_flush = nv04_fifo_cache_flush;
186 engine->fifo.cache_pull = nv04_fifo_cache_pull; 191 engine->fifo.cache_pull = nv04_fifo_cache_pull;
187 engine->fifo.channel_id = nv10_fifo_channel_id; 192 engine->fifo.channel_id = nv10_fifo_channel_id;
188 engine->fifo.create_context = nv10_fifo_create_context; 193 engine->fifo.create_context = nv10_fifo_create_context;
@@ -199,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
199 engine->gpio.get = nv10_gpio_get; 204 engine->gpio.get = nv10_gpio_get;
200 engine->gpio.set = nv10_gpio_set; 205 engine->gpio.set = nv10_gpio_set;
201 engine->gpio.irq_enable = NULL; 206 engine->gpio.irq_enable = NULL;
207 engine->pm.clock_get = nv04_pm_clock_get;
208 engine->pm.clock_pre = nv04_pm_clock_pre;
209 engine->pm.clock_set = nv04_pm_clock_set;
202 break; 210 break;
203 case 0x30: 211 case 0x30:
204 engine->instmem.init = nv04_instmem_init; 212 engine->instmem.init = nv04_instmem_init;
@@ -234,7 +242,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
234 engine->fifo.disable = nv04_fifo_disable; 242 engine->fifo.disable = nv04_fifo_disable;
235 engine->fifo.enable = nv04_fifo_enable; 243 engine->fifo.enable = nv04_fifo_enable;
236 engine->fifo.reassign = nv04_fifo_reassign; 244 engine->fifo.reassign = nv04_fifo_reassign;
237 engine->fifo.cache_flush = nv04_fifo_cache_flush;
238 engine->fifo.cache_pull = nv04_fifo_cache_pull; 245 engine->fifo.cache_pull = nv04_fifo_cache_pull;
239 engine->fifo.channel_id = nv10_fifo_channel_id; 246 engine->fifo.channel_id = nv10_fifo_channel_id;
240 engine->fifo.create_context = nv10_fifo_create_context; 247 engine->fifo.create_context = nv10_fifo_create_context;
@@ -251,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
251 engine->gpio.get = nv10_gpio_get; 258 engine->gpio.get = nv10_gpio_get;
252 engine->gpio.set = nv10_gpio_set; 259 engine->gpio.set = nv10_gpio_set;
253 engine->gpio.irq_enable = NULL; 260 engine->gpio.irq_enable = NULL;
261 engine->pm.clock_get = nv04_pm_clock_get;
262 engine->pm.clock_pre = nv04_pm_clock_pre;
263 engine->pm.clock_set = nv04_pm_clock_set;
264 engine->pm.voltage_get = nouveau_voltage_gpio_get;
265 engine->pm.voltage_set = nouveau_voltage_gpio_set;
254 break; 266 break;
255 case 0x40: 267 case 0x40:
256 case 0x60: 268 case 0x60:
@@ -287,7 +299,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
287 engine->fifo.disable = nv04_fifo_disable; 299 engine->fifo.disable = nv04_fifo_disable;
288 engine->fifo.enable = nv04_fifo_enable; 300 engine->fifo.enable = nv04_fifo_enable;
289 engine->fifo.reassign = nv04_fifo_reassign; 301 engine->fifo.reassign = nv04_fifo_reassign;
290 engine->fifo.cache_flush = nv04_fifo_cache_flush;
291 engine->fifo.cache_pull = nv04_fifo_cache_pull; 302 engine->fifo.cache_pull = nv04_fifo_cache_pull;
292 engine->fifo.channel_id = nv10_fifo_channel_id; 303 engine->fifo.channel_id = nv10_fifo_channel_id;
293 engine->fifo.create_context = nv40_fifo_create_context; 304 engine->fifo.create_context = nv40_fifo_create_context;
@@ -304,6 +315,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
304 engine->gpio.get = nv10_gpio_get; 315 engine->gpio.get = nv10_gpio_get;
305 engine->gpio.set = nv10_gpio_set; 316 engine->gpio.set = nv10_gpio_set;
306 engine->gpio.irq_enable = NULL; 317 engine->gpio.irq_enable = NULL;
318 engine->pm.clock_get = nv04_pm_clock_get;
319 engine->pm.clock_pre = nv04_pm_clock_pre;
320 engine->pm.clock_set = nv04_pm_clock_set;
321 engine->pm.voltage_get = nouveau_voltage_gpio_get;
322 engine->pm.voltage_set = nouveau_voltage_gpio_set;
323 engine->pm.temp_get = nv40_temp_get;
307 break; 324 break;
308 case 0x50: 325 case 0x50:
309 case 0x80: /* gotta love NVIDIA's consistency.. */ 326 case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -358,6 +375,27 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
358 engine->gpio.get = nv50_gpio_get; 375 engine->gpio.get = nv50_gpio_get;
359 engine->gpio.set = nv50_gpio_set; 376 engine->gpio.set = nv50_gpio_set;
360 engine->gpio.irq_enable = nv50_gpio_irq_enable; 377 engine->gpio.irq_enable = nv50_gpio_irq_enable;
378 switch (dev_priv->chipset) {
379 case 0xa3:
380 case 0xa5:
381 case 0xa8:
382 case 0xaf:
383 engine->pm.clock_get = nva3_pm_clock_get;
384 engine->pm.clock_pre = nva3_pm_clock_pre;
385 engine->pm.clock_set = nva3_pm_clock_set;
386 break;
387 default:
388 engine->pm.clock_get = nv50_pm_clock_get;
389 engine->pm.clock_pre = nv50_pm_clock_pre;
390 engine->pm.clock_set = nv50_pm_clock_set;
391 break;
392 }
393 engine->pm.voltage_get = nouveau_voltage_gpio_get;
394 engine->pm.voltage_set = nouveau_voltage_gpio_set;
395 if (dev_priv->chipset >= 0x84)
396 engine->pm.temp_get = nv84_temp_get;
397 else
398 engine->pm.temp_get = nv40_temp_get;
361 break; 399 break;
362 case 0xC0: 400 case 0xC0:
363 engine->instmem.init = nvc0_instmem_init; 401 engine->instmem.init = nvc0_instmem_init;
@@ -437,16 +475,14 @@ static int
437nouveau_card_init_channel(struct drm_device *dev) 475nouveau_card_init_channel(struct drm_device *dev)
438{ 476{
439 struct drm_nouveau_private *dev_priv = dev->dev_private; 477 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct nouveau_gpuobj *gpuobj; 478 struct nouveau_gpuobj *gpuobj = NULL;
441 int ret; 479 int ret;
442 480
443 ret = nouveau_channel_alloc(dev, &dev_priv->channel, 481 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
444 (struct drm_file *)-2, 482 (struct drm_file *)-2, NvDmaFB, NvDmaTT);
445 NvDmaFB, NvDmaTT);
446 if (ret) 483 if (ret)
447 return ret; 484 return ret;
448 485
449 gpuobj = NULL;
450 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 486 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
451 0, dev_priv->vram_size, 487 0, dev_priv->vram_size,
452 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 488 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
@@ -454,26 +490,25 @@ nouveau_card_init_channel(struct drm_device *dev)
454 if (ret) 490 if (ret)
455 goto out_err; 491 goto out_err;
456 492
457 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, 493 ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
458 gpuobj, NULL); 494 nouveau_gpuobj_ref(NULL, &gpuobj);
459 if (ret) 495 if (ret)
460 goto out_err; 496 goto out_err;
461 497
462 gpuobj = NULL;
463 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, 498 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
464 dev_priv->gart_info.aper_size, 499 dev_priv->gart_info.aper_size,
465 NV_DMA_ACCESS_RW, &gpuobj, NULL); 500 NV_DMA_ACCESS_RW, &gpuobj, NULL);
466 if (ret) 501 if (ret)
467 goto out_err; 502 goto out_err;
468 503
469 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, 504 ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
470 gpuobj, NULL); 505 nouveau_gpuobj_ref(NULL, &gpuobj);
471 if (ret) 506 if (ret)
472 goto out_err; 507 goto out_err;
473 508
474 return 0; 509 return 0;
510
475out_err: 511out_err:
476 nouveau_gpuobj_del(dev, &gpuobj);
477 nouveau_channel_free(dev_priv->channel); 512 nouveau_channel_free(dev_priv->channel);
478 dev_priv->channel = NULL; 513 dev_priv->channel = NULL;
479 return ret; 514 return ret;
@@ -534,35 +569,28 @@ nouveau_card_init(struct drm_device *dev)
534 if (ret) 569 if (ret)
535 goto out_display_early; 570 goto out_display_early;
536 571
537 ret = nouveau_mem_detect(dev); 572 nouveau_pm_init(dev);
573
574 ret = nouveau_mem_vram_init(dev);
538 if (ret) 575 if (ret)
539 goto out_bios; 576 goto out_bios;
540 577
541 ret = nouveau_gpuobj_early_init(dev); 578 ret = nouveau_gpuobj_init(dev);
542 if (ret) 579 if (ret)
543 goto out_bios; 580 goto out_vram;
544 581
545 /* Initialise instance memory, must happen before mem_init so we
546 * know exactly how much VRAM we're able to use for "normal"
547 * purposes.
548 */
549 ret = engine->instmem.init(dev); 582 ret = engine->instmem.init(dev);
550 if (ret) 583 if (ret)
551 goto out_gpuobj_early; 584 goto out_gpuobj;
552 585
553 /* Setup the memory manager */ 586 ret = nouveau_mem_gart_init(dev);
554 ret = nouveau_mem_init(dev);
555 if (ret) 587 if (ret)
556 goto out_instmem; 588 goto out_instmem;
557 589
558 ret = nouveau_gpuobj_init(dev);
559 if (ret)
560 goto out_mem;
561
562 /* PMC */ 590 /* PMC */
563 ret = engine->mc.init(dev); 591 ret = engine->mc.init(dev);
564 if (ret) 592 if (ret)
565 goto out_gpuobj; 593 goto out_gart;
566 594
567 /* PGPIO */ 595 /* PGPIO */
568 ret = engine->gpio.init(dev); 596 ret = engine->gpio.init(dev);
@@ -611,9 +639,13 @@ nouveau_card_init(struct drm_device *dev)
611 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 639 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
612 640
613 if (!engine->graph.accel_blocked) { 641 if (!engine->graph.accel_blocked) {
614 ret = nouveau_card_init_channel(dev); 642 ret = nouveau_fence_init(dev);
615 if (ret) 643 if (ret)
616 goto out_irq; 644 goto out_irq;
645
646 ret = nouveau_card_init_channel(dev);
647 if (ret)
648 goto out_fence;
617 } 649 }
618 650
619 ret = nouveau_backlight_init(dev); 651 ret = nouveau_backlight_init(dev);
@@ -624,6 +656,8 @@ nouveau_card_init(struct drm_device *dev)
624 drm_kms_helper_poll_init(dev); 656 drm_kms_helper_poll_init(dev);
625 return 0; 657 return 0;
626 658
659out_fence:
660 nouveau_fence_fini(dev);
627out_irq: 661out_irq:
628 drm_irq_uninstall(dev); 662 drm_irq_uninstall(dev);
629out_display: 663out_display:
@@ -642,16 +676,16 @@ out_gpio:
642 engine->gpio.takedown(dev); 676 engine->gpio.takedown(dev);
643out_mc: 677out_mc:
644 engine->mc.takedown(dev); 678 engine->mc.takedown(dev);
645out_gpuobj: 679out_gart:
646 nouveau_gpuobj_takedown(dev); 680 nouveau_mem_gart_fini(dev);
647out_mem:
648 nouveau_sgdma_takedown(dev);
649 nouveau_mem_close(dev);
650out_instmem: 681out_instmem:
651 engine->instmem.takedown(dev); 682 engine->instmem.takedown(dev);
652out_gpuobj_early: 683out_gpuobj:
653 nouveau_gpuobj_late_takedown(dev); 684 nouveau_gpuobj_takedown(dev);
685out_vram:
686 nouveau_mem_vram_fini(dev);
654out_bios: 687out_bios:
688 nouveau_pm_fini(dev);
655 nouveau_bios_takedown(dev); 689 nouveau_bios_takedown(dev);
656out_display_early: 690out_display_early:
657 engine->display.late_takedown(dev); 691 engine->display.late_takedown(dev);
@@ -667,7 +701,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
667 701
668 nouveau_backlight_exit(dev); 702 nouveau_backlight_exit(dev);
669 703
670 if (dev_priv->channel) { 704 if (!engine->graph.accel_blocked) {
705 nouveau_fence_fini(dev);
671 nouveau_channel_free(dev_priv->channel); 706 nouveau_channel_free(dev_priv->channel);
672 dev_priv->channel = NULL; 707 dev_priv->channel = NULL;
673 } 708 }
@@ -686,15 +721,15 @@ static void nouveau_card_takedown(struct drm_device *dev)
686 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 721 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
687 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 722 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
688 mutex_unlock(&dev->struct_mutex); 723 mutex_unlock(&dev->struct_mutex);
689 nouveau_sgdma_takedown(dev); 724 nouveau_mem_gart_fini(dev);
690 725
691 nouveau_gpuobj_takedown(dev);
692 nouveau_mem_close(dev);
693 engine->instmem.takedown(dev); 726 engine->instmem.takedown(dev);
727 nouveau_gpuobj_takedown(dev);
728 nouveau_mem_vram_fini(dev);
694 729
695 drm_irq_uninstall(dev); 730 drm_irq_uninstall(dev);
696 731
697 nouveau_gpuobj_late_takedown(dev); 732 nouveau_pm_fini(dev);
698 nouveau_bios_takedown(dev); 733 nouveau_bios_takedown(dev);
699 734
700 vga_client_register(dev->pdev, NULL, NULL, NULL); 735 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1057,7 +1092,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
1057/* Waits for PGRAPH to go completely idle */ 1092/* Waits for PGRAPH to go completely idle */
1058bool nouveau_wait_for_idle(struct drm_device *dev) 1093bool nouveau_wait_for_idle(struct drm_device *dev)
1059{ 1094{
1060 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { 1095 if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
1061 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", 1096 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
1062 nv_rd32(dev, NV04_PGRAPH_STATUS)); 1097 nv_rd32(dev, NV04_PGRAPH_STATUS));
1063 return false; 1098 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
new file mode 100644
index 000000000000..16bbbf1eff63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright 2010 PathScale inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29
30static void
31nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
32{
33 struct drm_nouveau_private *dev_priv = dev->dev_private;
34 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
35 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
36 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
37 int i, headerlen, recordlen, entries;
38
39 if (!temp) {
40 NV_DEBUG(dev, "temperature table pointer invalid\n");
41 return;
42 }
43
44 /* Set the default sensor's contants */
45 sensor->offset_constant = 0;
46 sensor->offset_mult = 1;
47 sensor->offset_div = 1;
48 sensor->slope_mult = 1;
49 sensor->slope_div = 1;
50
51 /* Set the default temperature thresholds */
52 temps->critical = 110;
53 temps->down_clock = 100;
54 temps->fan_boost = 90;
55
56 /* Set the known default values to setup the temperature sensor */
57 if (dev_priv->card_type >= NV_40) {
58 switch (dev_priv->chipset) {
59 case 0x43:
60 sensor->offset_mult = 32060;
61 sensor->offset_div = 1000;
62 sensor->slope_mult = 792;
63 sensor->slope_div = 1000;
64 break;
65
66 case 0x44:
67 case 0x47:
68 case 0x4a:
69 sensor->offset_mult = 27839;
70 sensor->offset_div = 1000;
71 sensor->slope_mult = 780;
72 sensor->slope_div = 1000;
73 break;
74
75 case 0x46:
76 sensor->offset_mult = -24775;
77 sensor->offset_div = 100;
78 sensor->slope_mult = 467;
79 sensor->slope_div = 10000;
80 break;
81
82 case 0x49:
83 sensor->offset_mult = -25051;
84 sensor->offset_div = 100;
85 sensor->slope_mult = 458;
86 sensor->slope_div = 10000;
87 break;
88
89 case 0x4b:
90 sensor->offset_mult = -24088;
91 sensor->offset_div = 100;
92 sensor->slope_mult = 442;
93 sensor->slope_div = 10000;
94 break;
95
96 case 0x50:
97 sensor->offset_mult = -22749;
98 sensor->offset_div = 100;
99 sensor->slope_mult = 431;
100 sensor->slope_div = 10000;
101 break;
102 }
103 }
104
105 headerlen = temp[1];
106 recordlen = temp[2];
107 entries = temp[3];
108 temp = temp + headerlen;
109
110 /* Read the entries from the table */
111 for (i = 0; i < entries; i++) {
112 u16 value = ROM16(temp[1]);
113
114 switch (temp[0]) {
115 case 0x01:
116 if ((value & 0x8f) == 0)
117 sensor->offset_constant = (value >> 9) & 0x7f;
118 break;
119
120 case 0x04:
121 if ((value & 0xf00f) == 0xa000) /* core */
122 temps->critical = (value&0x0ff0) >> 4;
123 break;
124
125 case 0x07:
126 if ((value & 0xf00f) == 0xa000) /* core */
127 temps->down_clock = (value&0x0ff0) >> 4;
128 break;
129
130 case 0x08:
131 if ((value & 0xf00f) == 0xa000) /* core */
132 temps->fan_boost = (value&0x0ff0) >> 4;
133 break;
134
135 case 0x10:
136 sensor->offset_mult = value;
137 break;
138
139 case 0x11:
140 sensor->offset_div = value;
141 break;
142
143 case 0x12:
144 sensor->slope_mult = value;
145 break;
146
147 case 0x13:
148 sensor->slope_div = value;
149 break;
150 }
151 temp += recordlen;
152 }
153
154 nouveau_temp_safety_checks(dev);
155}
156
157static int
158nv40_sensor_setup(struct drm_device *dev)
159{
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
162 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
163 u32 offset = sensor->offset_mult / sensor->offset_div;
164 u32 sensor_calibration;
165
166 /* set up the sensors */
167 sensor_calibration = 120 - offset - sensor->offset_constant;
168 sensor_calibration = sensor_calibration * sensor->slope_div /
169 sensor->slope_mult;
170
171 if (dev_priv->chipset >= 0x46)
172 sensor_calibration |= 0x80000000;
173 else
174 sensor_calibration |= 0x10000000;
175
176 nv_wr32(dev, 0x0015b0, sensor_calibration);
177
178 /* Wait for the sensor to update */
179 msleep(5);
180
181 /* read */
182 return nv_rd32(dev, 0x0015b4) & 0x1fff;
183}
184
185int
186nv40_temp_get(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
190 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
191 int offset = sensor->offset_mult / sensor->offset_div;
192 int core_temp;
193
194 if (dev_priv->chipset >= 0x50) {
195 core_temp = nv_rd32(dev, 0x20008);
196 } else {
197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
198 /* Setup the sensor if the temperature is 0 */
199 if (core_temp == 0)
200 core_temp = nv40_sensor_setup(dev);
201 }
202
203 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
204 core_temp = core_temp + offset + sensor->offset_constant;
205
206 return core_temp;
207}
208
209int
210nv84_temp_get(struct drm_device *dev)
211{
212 return nv_rd32(dev, 0x20400);
213}
214
215void
216nouveau_temp_safety_checks(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
220 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
221
222 if (temps->critical > 120)
223 temps->critical = 120;
224 else if (temps->critical < 80)
225 temps->critical = 80;
226
227 if (temps->down_clock > 110)
228 temps->down_clock = 110;
229 else if (temps->down_clock < 60)
230 temps->down_clock = 60;
231
232 if (temps->fan_boost > 100)
233 temps->fan_boost = 100;
234 else if (temps->fan_boost < 40)
235 temps->fan_boost = 40;
236}
237
238static bool
239probe_monitoring_device(struct nouveau_i2c_chan *i2c,
240 struct i2c_board_info *info)
241{
242 char modalias[16] = "i2c:";
243 struct i2c_client *client;
244
245 strlcat(modalias, info->type, sizeof(modalias));
246 request_module(modalias);
247
248 client = i2c_new_device(&i2c->adapter, info);
249 if (!client)
250 return false;
251
252 if (!client->driver || client->driver->detect(client, info)) {
253 i2c_unregister_device(client);
254 return false;
255 }
256
257 return true;
258}
259
260static void
261nouveau_temp_probe_i2c(struct drm_device *dev)
262{
263 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 struct dcb_table *dcb = &dev_priv->vbios.dcb;
265 struct i2c_board_info info[] = {
266 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
267 { I2C_BOARD_INFO("w83781d", 0x2d) },
268 { I2C_BOARD_INFO("f75375", 0x2e) },
269 { I2C_BOARD_INFO("adt7473", 0x2e) },
270 { I2C_BOARD_INFO("lm99", 0x4c) },
271 { }
272 };
273 int idx = (dcb->version >= 0x40 ?
274 dcb->i2c_default_indices & 0xf : 2);
275
276 nouveau_i2c_identify(dev, "monitoring device", info,
277 probe_monitoring_device, idx);
278}
279
280void
281nouveau_temp_init(struct drm_device *dev)
282{
283 struct drm_nouveau_private *dev_priv = dev->dev_private;
284 struct nvbios *bios = &dev_priv->vbios;
285 struct bit_entry P;
286 u8 *temp = NULL;
287
288 if (bios->type == NVBIOS_BIT) {
289 if (bit_table(dev, 'P', &P))
290 return;
291
292 if (P.version == 1)
293 temp = ROMPTR(bios, P.data[12]);
294 else if (P.version == 2)
295 temp = ROMPTR(bios, P.data[16]);
296 else
297 NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
298
299 nouveau_temp_vbios_parse(dev, temp);
300 }
301
302 nouveau_temp_probe_i2c(dev);
303}
304
305void
306nouveau_temp_fini(struct drm_device *dev)
307{
308
309}
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
new file mode 100644
index 000000000000..04fdc00a67d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29
30static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
31static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
32
33int
34nouveau_voltage_gpio_get(struct drm_device *dev)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
38 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
39 u8 vid = 0;
40 int i;
41
42 for (i = 0; i < nr_vidtag; i++) {
43 if (!(volt->vid_mask & (1 << i)))
44 continue;
45
46 vid |= gpio->get(dev, vidtag[i]) << i;
47 }
48
49 return nouveau_volt_lvl_lookup(dev, vid);
50}
51
52int
53nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
57 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
58 int vid, i;
59
60 vid = nouveau_volt_vid_lookup(dev, voltage);
61 if (vid < 0)
62 return vid;
63
64 for (i = 0; i < nr_vidtag; i++) {
65 if (!(volt->vid_mask & (1 << i)))
66 continue;
67
68 gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
69 }
70
71 return 0;
72}
73
74int
75nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
79 int i;
80
81 for (i = 0; i < volt->nr_level; i++) {
82 if (volt->level[i].voltage == voltage)
83 return volt->level[i].vid;
84 }
85
86 return -ENOENT;
87}
88
89int
90nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
91{
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
94 int i;
95
96 for (i = 0; i < volt->nr_level; i++) {
97 if (volt->level[i].vid == vid)
98 return volt->level[i].voltage;
99 }
100
101 return -ENOENT;
102}
103
104void
105nouveau_volt_init(struct drm_device *dev)
106{
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
109 struct nouveau_pm_voltage *voltage = &pm->voltage;
110 struct nvbios *bios = &dev_priv->vbios;
111 struct bit_entry P;
112 u8 *volt = NULL, *entry;
113 int i, headerlen, recordlen, entries, vidmask, vidshift;
114
115 if (bios->type == NVBIOS_BIT) {
116 if (bit_table(dev, 'P', &P))
117 return;
118
119 if (P.version == 1)
120 volt = ROMPTR(bios, P.data[16]);
121 else
122 if (P.version == 2)
123 volt = ROMPTR(bios, P.data[12]);
124 else {
125 NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
126 }
127 } else {
128 if (bios->data[bios->offset + 6] < 0x27) {
129 NV_DEBUG(dev, "BMP version too old for voltage\n");
130 return;
131 }
132
133 volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
134 }
135
136 if (!volt) {
137 NV_DEBUG(dev, "voltage table pointer invalid\n");
138 return;
139 }
140
141 switch (volt[0]) {
142 case 0x10:
143 case 0x11:
144 case 0x12:
145 headerlen = 5;
146 recordlen = volt[1];
147 entries = volt[2];
148 vidshift = 0;
149 vidmask = volt[4];
150 break;
151 case 0x20:
152 headerlen = volt[1];
153 recordlen = volt[3];
154 entries = volt[2];
155 vidshift = 0; /* could be vidshift like 0x30? */
156 vidmask = volt[5];
157 break;
158 case 0x30:
159 headerlen = volt[1];
160 recordlen = volt[2];
161 entries = volt[3];
162 vidshift = hweight8(volt[5]);
163 vidmask = volt[4];
164 break;
165 default:
166 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
167 return;
168 }
169
170 /* validate vid mask */
171 voltage->vid_mask = vidmask;
172 if (!voltage->vid_mask)
173 return;
174
175 i = 0;
176 while (vidmask) {
177 if (i > nr_vidtag) {
178 NV_DEBUG(dev, "vid bit %d unknown\n", i);
179 return;
180 }
181
182 if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
183 NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
184 return;
185 }
186
187 vidmask >>= 1;
188 i++;
189 }
190
191 /* parse vbios entries into common format */
192 voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
193 if (!voltage->level)
194 return;
195
196 entry = volt + headerlen;
197 for (i = 0; i < entries; i++, entry += recordlen) {
198 voltage->level[i].voltage = entry[0];
199 voltage->level[i].vid = entry[1] >> vidshift;
200 }
201 voltage->nr_level = entries;
202 voltage->supported = true;
203}
204
205void
206nouveau_volt_fini(struct drm_device *dev)
207{
208 struct drm_nouveau_private *dev_priv = dev->dev_private;
209 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
210
211 kfree(volt->level);
212}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 497df8765f28..ef480281afec 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -109,7 +109,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
109 struct nouveau_pll_vals *pv = &regp->pllvals; 109 struct nouveau_pll_vals *pv = &regp->pllvals;
110 struct pll_lims pll_lim; 110 struct pll_lims pll_lim;
111 111
112 if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim)) 112 if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim))
113 return; 113 return;
114 114
115 /* NM2 == 0 is used to determine single stage mode on two stage plls */ 115 /* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -718,6 +718,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
718 718
719 drm_crtc_cleanup(crtc); 719 drm_crtc_cleanup(crtc);
720 720
721 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
721 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 722 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
722 kfree(nv_crtc); 723 kfree(nv_crtc);
723} 724}
@@ -826,7 +827,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
826 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); 827 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
827 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); 828 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
828 829
829 if (dev_priv->card_type >= NV_30) { 830 if (dev_priv->card_type >= NV_20) {
830 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; 831 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
831 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); 832 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
832 } 833 }
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ea3627041ecf..ba6423f2ffcc 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
291 msleep(5); 291 msleep(5);
292 292
293 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 293 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
294 /* do it again just in case it's a residual current */
295 sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
294 296
295 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); 297 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
296 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, 298 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -343,22 +345,13 @@ static void nv04_dac_prepare(struct drm_encoder *encoder)
343{ 345{
344 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 346 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
345 struct drm_device *dev = encoder->dev; 347 struct drm_device *dev = encoder->dev;
346 struct drm_nouveau_private *dev_priv = dev->dev_private;
347 int head = nouveau_crtc(encoder->crtc)->index; 348 int head = nouveau_crtc(encoder->crtc)->index;
348 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
349 349
350 helper->dpms(encoder, DRM_MODE_DPMS_OFF); 350 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
351 351
352 nv04_dfp_disable(dev, head); 352 nv04_dfp_disable(dev, head);
353
354 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
355 * at LCD__INDEX which we don't alter
356 */
357 if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
358 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
359} 353}
360 354
361
362static void nv04_dac_mode_set(struct drm_encoder *encoder, 355static void nv04_dac_mode_set(struct drm_encoder *encoder,
363 struct drm_display_mode *mode, 356 struct drm_display_mode *mode,
364 struct drm_display_mode *adjusted_mode) 357 struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 0d3206a7046c..c936403b26e2 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
104 } 104 }
105 /* don't inadvertently turn it on when state written later */ 105 /* don't inadvertently turn it on when state written later */
106 crtcstate[head].fp_control = FP_TG_CONTROL_OFF; 106 crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
107 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
108 ~NV_CIO_CRE_LCD_ROUTE_MASK;
107} 109}
108 110
109void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) 111void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
@@ -253,26 +255,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
253 255
254 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); 256 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
255 257
256 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) 258 *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
257 * at LCD__INDEX which we don't alter 259
258 */ 260 if (nv_two_heads(dev)) {
259 if (!(*cr_lcd & 0x44)) { 261 if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
260 *cr_lcd = 0x3; 262 *cr_lcd |= head ? 0x0 : 0x8;
261 263 else {
262 if (nv_two_heads(dev)) { 264 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
263 if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) 265 if (nv_encoder->dcb->type == OUTPUT_LVDS)
264 *cr_lcd |= head ? 0x0 : 0x8; 266 *cr_lcd |= 0x30;
265 else { 267 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
266 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; 268 /* avoid being connected to both crtcs */
267 if (nv_encoder->dcb->type == OUTPUT_LVDS) 269 *cr_lcd_oth &= ~0x30;
268 *cr_lcd |= 0x30; 270 NVWriteVgaCrtc(dev, head ^ 1,
269 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { 271 NV_CIO_CRE_LCD__INDEX,
270 /* avoid being connected to both crtcs */ 272 *cr_lcd_oth);
271 *cr_lcd_oth &= ~0x30;
272 NVWriteVgaCrtc(dev, head ^ 1,
273 NV_CIO_CRE_LCD__INDEX,
274 *cr_lcd_oth);
275 }
276 } 273 }
277 } 274 }
278 } 275 }
@@ -640,7 +637,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
640 get_tmds_slave(encoder)) 637 get_tmds_slave(encoder))
641 return; 638 return;
642 639
643 type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2); 640 type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2);
644 if (type < 0) 641 if (type < 0)
645 return; 642 return;
646 643
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 1eeac4fae73d..33e4c9388bc1 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -25,6 +25,7 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
28#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
29 30
30void 31void
@@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
169 if (ret) 170 if (ret)
170 return ret; 171 return ret;
171 172
172 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); 173 ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
173 if (ret) 174 nouveau_gpuobj_ref(NULL, &obj);
174 return ret; 175 return ret;
175
176 return 0;
177} 176}
178 177
179int 178int
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 06cedd99c26a..708293b7ddcd 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -27,8 +27,9 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h"
30 31
31#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) 32#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
32#define NV04_RAMFC__SIZE 32 33#define NV04_RAMFC__SIZE 32
33#define NV04_RAMFC_DMA_PUT 0x00 34#define NV04_RAMFC_DMA_PUT 0x00
34#define NV04_RAMFC_DMA_GET 0x04 35#define NV04_RAMFC_DMA_GET 0x04
@@ -38,10 +39,8 @@
38#define NV04_RAMFC_ENGINE 0x14 39#define NV04_RAMFC_ENGINE 0x14
39#define NV04_RAMFC_PULL1_ENGINE 0x18 40#define NV04_RAMFC_PULL1_ENGINE 0x18
40 41
41#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \ 42#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
42 NV04_RAMFC_##offset/4, (val)) 43#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
43#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
44 NV04_RAMFC_##offset/4)
45 44
46void 45void
47nv04_fifo_disable(struct drm_device *dev) 46nv04_fifo_disable(struct drm_device *dev)
@@ -72,37 +71,32 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
72} 71}
73 72
74bool 73bool
75nv04_fifo_cache_flush(struct drm_device *dev)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
79 uint64_t start = ptimer->read(dev);
80
81 do {
82 if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
83 nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
84 return true;
85
86 } while (ptimer->read(dev) - start < 100000000);
87
88 NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
89
90 return false;
91}
92
93bool
94nv04_fifo_cache_pull(struct drm_device *dev, bool enable) 74nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
95{ 75{
96 uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0); 76 int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
77
78 if (!enable) {
79 /* In some cases the PFIFO puller may be left in an
80 * inconsistent state if you try to stop it when it's
81 * busy translating handles. Sometimes you get a
82 * PFIFO_CACHE_ERROR, sometimes it just fails silently
83 * sending incorrect instance offsets to PGRAPH after
84 * it's started up again. To avoid the latter we
85 * invalidate the most recently calculated instance.
86 */
87 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
88 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
89 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
90
91 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
92 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
93 nv_wr32(dev, NV03_PFIFO_INTR_0,
94 NV_PFIFO_INTR_CACHE_ERROR);
97 95
98 if (enable) {
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
100 } else {
101 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); 96 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
103 } 97 }
104 98
105 return !!(pull & 1); 99 return pull & 1;
106} 100}
107 101
108int 102int
@@ -130,7 +124,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
130 NV04_RAMFC__SIZE, 124 NV04_RAMFC__SIZE,
131 NVOBJ_FLAG_ZERO_ALLOC | 125 NVOBJ_FLAG_ZERO_ALLOC |
132 NVOBJ_FLAG_ZERO_FREE, 126 NVOBJ_FLAG_ZERO_FREE,
133 NULL, &chan->ramfc); 127 &chan->ramfc);
134 if (ret) 128 if (ret)
135 return ret; 129 return ret;
136 130
@@ -139,7 +133,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
139 /* Setup initial state */ 133 /* Setup initial state */
140 RAMFC_WR(DMA_PUT, chan->pushbuf_base); 134 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
141 RAMFC_WR(DMA_GET, chan->pushbuf_base); 135 RAMFC_WR(DMA_GET, chan->pushbuf_base);
142 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); 136 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
143 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 137 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
144 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 138 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
145 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 139 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -161,7 +155,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
161 nv_wr32(dev, NV04_PFIFO_MODE, 155 nv_wr32(dev, NV04_PFIFO_MODE,
162 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); 156 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
163 157
164 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 158 nouveau_gpuobj_ref(NULL, &chan->ramfc);
165} 159}
166 160
167static void 161static void
@@ -264,10 +258,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
264 struct drm_nouveau_private *dev_priv = dev->dev_private; 258 struct drm_nouveau_private *dev_priv = dev->dev_private;
265 259
266 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 260 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
267 ((dev_priv->ramht_bits - 9) << 16) | 261 ((dev_priv->ramht->bits - 9) << 16) |
268 (dev_priv->ramht_offset >> 8)); 262 (dev_priv->ramht->gpuobj->pinst >> 8));
269 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); 263 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
270 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); 264 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
271} 265}
272 266
273static void 267static void
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 4408232d33f1..0b5ae297abde 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,7 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "drm.h" 2#include "drm.h"
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_ramht.h"
4 5
5/* returns the size of fifo context */ 6/* returns the size of fifo context */
6static int 7static int
@@ -17,102 +18,51 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
17 return 32; 18 return 32;
18} 19}
19 20
20static void 21int nv04_instmem_init(struct drm_device *dev)
21nv04_instmem_determine_amount(struct drm_device *dev)
22{ 22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private; 23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 int i; 24 struct nouveau_gpuobj *ramht = NULL;
25 u32 offset, length;
26 int ret;
25 27
26 /* Figure out how much instance memory we need */ 28 /* RAMIN always available */
27 if (dev_priv->card_type >= NV_40) { 29 dev_priv->ramin_available = true;
28 /* We'll want more instance memory than this on some NV4x cards.
29 * There's a 16MB aperture to play with that maps onto the end
30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires.
32 */
33 switch (dev_priv->chipset) {
34 case 0x40:
35 case 0x47:
36 case 0x49:
37 case 0x4b:
38 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
39 break;
40 default:
41 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
42 break;
43 }
44 } else {
45 /*XXX: what *are* the limits on <NV40 cards?
46 */
47 dev_priv->ramin_rsvd_vram = (512 * 1024);
48 }
49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
50 30
51 /* Clear all of it, except the BIOS image that's in the first 64KiB */ 31 /* Setup shared RAMHT */
52 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) 32 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
53 nv_wi32(dev, i, 0x00000000); 33 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
54} 34 if (ret)
35 return ret;
55 36
56static void 37 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
57nv04_instmem_configure_fixed_tables(struct drm_device *dev) 38 nouveau_gpuobj_ref(NULL, &ramht);
58{ 39 if (ret)
59 struct drm_nouveau_private *dev_priv = dev->dev_private; 40 return ret;
60 struct nouveau_engine *engine = &dev_priv->engine;
61 41
62 /* FIFO hash table (RAMHT) 42 /* And RAMRO */
63 * use 4k hash table at RAMIN+0x10000 43 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
64 * TODO: extend the hash table 44 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
65 */ 45 if (ret)
66 dev_priv->ramht_offset = 0x10000; 46 return ret;
67 dev_priv->ramht_bits = 9; 47
68 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */ 48 /* And RAMFC */
69 dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */ 49 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
70 NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
71 dev_priv->ramht_size);
72
73 /* FIFO runout table (RAMRO) - 512k at 0x11200 */
74 dev_priv->ramro_offset = 0x11200;
75 dev_priv->ramro_size = 512;
76 NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
77 dev_priv->ramro_size);
78
79 /* FIFO context table (RAMFC)
80 * NV40 : Not sure exactly how to position RAMFC on some cards,
81 * 0x30002 seems to position it at RAMIN+0x20000 on these
82 * cards. RAMFC is 4kb (32 fifos, 128byte entries).
83 * Others: Position RAMFC at RAMIN+0x11400
84 */
85 dev_priv->ramfc_size = engine->fifo.channels *
86 nouveau_fifo_ctx_size(dev);
87 switch (dev_priv->card_type) { 50 switch (dev_priv->card_type) {
88 case NV_40: 51 case NV_40:
89 dev_priv->ramfc_offset = 0x20000; 52 offset = 0x20000;
90 break; 53 break;
91 case NV_30:
92 case NV_20:
93 case NV_10:
94 case NV_04:
95 default: 54 default:
96 dev_priv->ramfc_offset = 0x11400; 55 offset = 0x11400;
97 break; 56 break;
98 } 57 }
99 NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
100 dev_priv->ramfc_size);
101}
102 58
103int nv04_instmem_init(struct drm_device *dev) 59 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
104{ 60 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
105 struct drm_nouveau_private *dev_priv = dev->dev_private; 61 if (ret)
106 uint32_t offset; 62 return ret;
107 int ret;
108
109 nv04_instmem_determine_amount(dev);
110 nv04_instmem_configure_fixed_tables(dev);
111 63
112 /* Create a heap to manage RAMIN allocations, we don't allocate 64 /* Only allow space after RAMFC to be used for object allocation */
113 * the space that was reserved for RAMHT/FC/RO. 65 offset += length;
114 */
115 offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
116 66
117 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 67 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
118 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 68 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
@@ -140,46 +90,34 @@ int nv04_instmem_init(struct drm_device *dev)
140void 90void
141nv04_instmem_takedown(struct drm_device *dev) 91nv04_instmem_takedown(struct drm_device *dev)
142{ 92{
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94
95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
143} 98}
144 99
145int 100int
146nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) 101nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
102 uint32_t *sz)
147{ 103{
148 if (gpuobj->im_backing)
149 return -EINVAL;
150
151 return 0; 104 return 0;
152} 105}
153 106
154void 107void
155nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 108nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
156{ 109{
157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158
159 if (gpuobj && gpuobj->im_backing) {
160 if (gpuobj->im_bound)
161 dev_priv->engine.instmem.unbind(dev, gpuobj);
162 gpuobj->im_backing = NULL;
163 }
164} 110}
165 111
166int 112int
167nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 113nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
168{ 114{
169 if (!gpuobj->im_pramin || gpuobj->im_bound)
170 return -EINVAL;
171
172 gpuobj->im_bound = 1;
173 return 0; 115 return 0;
174} 116}
175 117
176int 118int
177nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 119nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
178{ 120{
179 if (gpuobj->im_bound == 0)
180 return -EINVAL;
181
182 gpuobj->im_bound = 0;
183 return 0; 121 return 0;
184} 122}
185 123
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
new file mode 100644
index 000000000000..6a6eb697d38e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28#include "nouveau_pm.h"
29
30struct nv04_pm_state {
31 struct pll_lims pll;
32 struct nouveau_pll_vals calc;
33};
34
35int
36nv04_pm_clock_get(struct drm_device *dev, u32 id)
37{
38 return nouveau_hw_get_clock(dev, id);
39}
40
41void *
42nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
43 u32 id, int khz)
44{
45 struct nv04_pm_state *state;
46 int ret;
47
48 state = kzalloc(sizeof(*state), GFP_KERNEL);
49 if (!state)
50 return ERR_PTR(-ENOMEM);
51
52 ret = get_pll_limits(dev, id, &state->pll);
53 if (ret) {
54 kfree(state);
55 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
56 }
57
58 ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
59 if (!ret) {
60 kfree(state);
61 return ERR_PTR(-EINVAL);
62 }
63
64 return state;
65}
66
67void
68nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
69{
70 struct drm_nouveau_private *dev_priv = dev->dev_private;
71 struct nv04_pm_state *state = pre_state;
72 u32 reg = state->pll.reg;
73
74 /* thank the insane nouveau_hw_setpll() interface for this */
75 if (dev_priv->card_type >= NV_40)
76 reg += 4;
77
78 nouveau_hw_setpll(dev, reg, &state->calc);
79 kfree(state);
80}
81
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 0b5d012d7c28..3eb605ddfd03 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
49 49
50int nv04_tv_identify(struct drm_device *dev, int i2c_index) 50int nv04_tv_identify(struct drm_device *dev, int i2c_index)
51{ 51{
52 return nouveau_i2c_identify(dev, "TV encoder", 52 return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info,
53 nv04_tv_encoder_info, i2c_index); 53 NULL, i2c_index);
54} 54}
55 55
56 56
@@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
99 99
100 state->tv_setup = 0; 100 state->tv_setup = 0;
101 101
102 if (bind) { 102 if (bind)
103 state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
104 state->CRTC[NV_CIO_CRE_49] |= 0x10; 103 state->CRTC[NV_CIO_CRE_49] |= 0x10;
105 } else { 104 else
106 state->CRTC[NV_CIO_CRE_49] &= ~0x10; 105 state->CRTC[NV_CIO_CRE_49] &= ~0x10;
107 }
108 106
109 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, 107 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
110 state->CRTC[NV_CIO_CRE_LCD__INDEX]); 108 state->CRTC[NV_CIO_CRE_LCD__INDEX]);
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7a4069cf5d0b..f1b03ad58fd5 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -27,8 +27,9 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h"
30 31
31#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) 32#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
32#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) 33#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
33 34
34int 35int
@@ -48,7 +49,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
48 49
49 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, 50 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
50 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 51 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
51 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); 52 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
52 if (ret) 53 if (ret)
53 return ret; 54 return ret;
54 55
@@ -57,7 +58,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
57 */ 58 */
58 nv_wi32(dev, fc + 0, chan->pushbuf_base); 59 nv_wi32(dev, fc + 0, chan->pushbuf_base);
59 nv_wi32(dev, fc + 4, chan->pushbuf_base); 60 nv_wi32(dev, fc + 4, chan->pushbuf_base);
60 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); 61 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
61 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 62 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
62 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 63 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
63 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 64 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -80,7 +81,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan)
80 nv_wr32(dev, NV04_PFIFO_MODE, 81 nv_wr32(dev, NV04_PFIFO_MODE,
81 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); 82 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
82 83
83 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 84 nouveau_gpuobj_ref(NULL, &chan->ramfc);
84} 85}
85 86
86static void 87static void
@@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
202 struct drm_nouveau_private *dev_priv = dev->dev_private; 203 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 204
204 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 205 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
205 ((dev_priv->ramht_bits - 9) << 16) | 206 ((dev_priv->ramht->bits - 9) << 16) |
206 (dev_priv->ramht_offset >> 8)); 207 (dev_priv->ramht->gpuobj->pinst >> 8));
207 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); 208 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
208 209
209 if (dev_priv->chipset < 0x17) { 210 if (dev_priv->chipset < 0x17) {
210 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); 211 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
211 } else { 212 } else {
212 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) | 213 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
213 (1 << 16) /* 64 Bytes entry*/); 214 (1 << 16) /* 64 Bytes entry*/);
214 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ 215 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
215 } 216 }
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index b2f6a57c0cc5..8e68c9731159 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev)
803 /* Load context for next channel */ 803 /* Load context for next channel */
804 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 804 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
805 chan = dev_priv->fifos[chid]; 805 chan = dev_priv->fifos[chid];
806 if (chan) 806 if (chan && chan->pgraph_ctx)
807 nv10_graph_load_context(chan); 807 nv10_graph_load_context(chan);
808 808
809 pgraph->fifo_access(dev, true); 809 pgraph->fifo_access(dev, true);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 13cdc05b7c2d..28119fd19d03 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
193 } 193 }
194} 194}
195 195
196static const struct { 196static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
197 int hdisplay; 197 struct drm_connector *connector)
198 int vdisplay;
199} modes[] = {
200 { 640, 400 },
201 { 640, 480 },
202 { 720, 480 },
203 { 720, 576 },
204 { 800, 600 },
205 { 1024, 768 },
206 { 1280, 720 },
207 { 1280, 1024 },
208 { 1920, 1080 }
209};
210
211static int nv17_tv_get_modes(struct drm_encoder *encoder,
212 struct drm_connector *connector)
213{ 198{
214 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 199 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
215 struct drm_display_mode *mode; 200 struct drm_display_mode *mode, *tv_mode;
216 struct drm_display_mode *output_mode;
217 int n = 0; 201 int n = 0;
218 int i;
219
220 if (tv_norm->kind != CTV_ENC_MODE) {
221 struct drm_display_mode *tv_mode;
222 202
223 for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { 203 for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
224 mode = drm_mode_duplicate(encoder->dev, tv_mode); 204 mode = drm_mode_duplicate(encoder->dev, tv_mode);
225 205
226 mode->clock = tv_norm->tv_enc_mode.vrefresh * 206 mode->clock = tv_norm->tv_enc_mode.vrefresh *
227 mode->htotal / 1000 * 207 mode->htotal / 1000 *
228 mode->vtotal / 1000; 208 mode->vtotal / 1000;
229 209
230 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 210 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
231 mode->clock *= 2; 211 mode->clock *= 2;
232 212
233 if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && 213 if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
234 mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) 214 mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
235 mode->type |= DRM_MODE_TYPE_PREFERRED; 215 mode->type |= DRM_MODE_TYPE_PREFERRED;
236 216
237 drm_mode_probed_add(connector, mode); 217 drm_mode_probed_add(connector, mode);
238 n++; 218 n++;
239 }
240 return n;
241 } 219 }
242 220
243 /* tv_norm->kind == CTV_ENC_MODE */ 221 return n;
244 output_mode = &tv_norm->ctv_enc_mode.mode; 222}
223
224static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
225 struct drm_connector *connector)
226{
227 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
228 struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
229 struct drm_display_mode *mode;
230 const struct {
231 int hdisplay;
232 int vdisplay;
233 } modes[] = {
234 { 640, 400 },
235 { 640, 480 },
236 { 720, 480 },
237 { 720, 576 },
238 { 800, 600 },
239 { 1024, 768 },
240 { 1280, 720 },
241 { 1280, 1024 },
242 { 1920, 1080 }
243 };
244 int i, n = 0;
245
245 for (i = 0; i < ARRAY_SIZE(modes); i++) { 246 for (i = 0; i < ARRAY_SIZE(modes); i++) {
246 if (modes[i].hdisplay > output_mode->hdisplay || 247 if (modes[i].hdisplay > output_mode->hdisplay ||
247 modes[i].vdisplay > output_mode->vdisplay) 248 modes[i].vdisplay > output_mode->vdisplay)
@@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
251 modes[i].vdisplay == output_mode->vdisplay) { 252 modes[i].vdisplay == output_mode->vdisplay) {
252 mode = drm_mode_duplicate(encoder->dev, output_mode); 253 mode = drm_mode_duplicate(encoder->dev, output_mode);
253 mode->type |= DRM_MODE_TYPE_PREFERRED; 254 mode->type |= DRM_MODE_TYPE_PREFERRED;
255
254 } else { 256 } else {
255 mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, 257 mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
256 modes[i].vdisplay, 60, false, 258 modes[i].vdisplay, 60, false,
257 output_mode->flags & DRM_MODE_FLAG_INTERLACE, 259 (output_mode->flags &
258 false); 260 DRM_MODE_FLAG_INTERLACE), false);
259 } 261 }
260 262
261 /* CVT modes are sometimes unsuitable... */ 263 /* CVT modes are sometimes unsuitable... */
@@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
266 - mode->hdisplay) * 9 / 10) & ~7; 268 - mode->hdisplay) * 9 / 10) & ~7;
267 mode->hsync_end = mode->hsync_start + 8; 269 mode->hsync_end = mode->hsync_start + 8;
268 } 270 }
271
269 if (output_mode->vdisplay >= 1024) { 272 if (output_mode->vdisplay >= 1024) {
270 mode->vtotal = output_mode->vtotal; 273 mode->vtotal = output_mode->vtotal;
271 mode->vsync_start = output_mode->vsync_start; 274 mode->vsync_start = output_mode->vsync_start;
@@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
276 drm_mode_probed_add(connector, mode); 279 drm_mode_probed_add(connector, mode);
277 n++; 280 n++;
278 } 281 }
282
279 return n; 283 return n;
280} 284}
281 285
286static int nv17_tv_get_modes(struct drm_encoder *encoder,
287 struct drm_connector *connector)
288{
289 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
290
291 if (tv_norm->kind == CTV_ENC_MODE)
292 return nv17_tv_get_hd_modes(encoder, connector);
293 else
294 return nv17_tv_get_ld_modes(encoder, connector);
295}
296
282static int nv17_tv_mode_valid(struct drm_encoder *encoder, 297static int nv17_tv_mode_valid(struct drm_encoder *encoder,
283 struct drm_display_mode *mode) 298 struct drm_display_mode *mode)
284{ 299{
@@ -408,15 +423,8 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
408 423
409 } 424 }
410 425
411 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) 426 if (tv_norm->kind == CTV_ENC_MODE)
412 * at LCD__INDEX which we don't alter 427 *cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
413 */
414 if (!(*cr_lcd & 0x44)) {
415 if (tv_norm->kind == CTV_ENC_MODE)
416 *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
417 else
418 *cr_lcd = 0;
419 }
420 428
421 /* Set the DACCLK register */ 429 /* Set the DACCLK register */
422 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; 430 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index c00977cedabd..6bf03840f9eb 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
127 127
128/* TV hardware access functions */ 128/* TV hardware access functions */
129 129
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) 130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val)
131{ 132{
132 nv_wr32(dev, reg, val); 133 nv_wr32(dev, reg, val);
133} 134}
@@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
137 return nv_rd32(dev, reg); 138 return nv_rd32(dev, reg);
138} 139}
139 140
140static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val) 141static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
142 uint8_t val)
141{ 143{
142 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); 144 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
143 nv_write_ptv(dev, NV_PTV_TV_DATA, val); 145 nv_write_ptv(dev, NV_PTV_TV_DATA, val);
@@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
149 return nv_read_ptv(dev, NV_PTV_TV_DATA); 151 return nv_read_ptv(dev, NV_PTV_TV_DATA);
150} 152}
151 153
152#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) 154#define nv_load_ptv(dev, state, reg) \
153#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) 155 nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
154#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) 156#define nv_save_ptv(dev, state, reg) \
157 state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
158#define nv_load_tv_enc(dev, state, reg) \
159 nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
155 160
156#endif 161#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index d64683d97e0d..9d3893c50a41 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder)
336 struct filter_params *p = &fparams[k][j]; 336 struct filter_params *p = &fparams[k][j];
337 337
338 for (i = 0; i < 7; i++) { 338 for (i = 0; i < 7; i++) {
339 int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) 339 int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
340 + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k] 340 p->ki3*i*i*i)
341 + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker 341 + (p->kr + p->kir*i + p->ki2r*i*i +
342 + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k]; 342 p->ki3r*i*i*i) * rs[k]
343 343 + (p->kf + p->kif*i + p->ki2f*i*i +
344 (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); 344 p->ki3f*i*i*i) * flicker
345 + (p->krf + p->kirf*i + p->ki2rf*i*i +
346 p->ki3rf*i*i*i) * flicker * rs[k];
347
348 (*filters[k])[j][i] = (c + id5/2) >> 39
349 & (0x1 << 31 | 0x7f << 9);
345 } 350 }
346 } 351 }
347 } 352 }
@@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder)
349 354
350/* Hardware state saving/restoring */ 355/* Hardware state saving/restoring */
351 356
352static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) 357static void tv_save_filter(struct drm_device *dev, uint32_t base,
358 uint32_t regs[4][7])
353{ 359{
354 int i, j; 360 int i, j;
355 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; 361 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[
360 } 366 }
361} 367}
362 368
363static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) 369static void tv_load_filter(struct drm_device *dev, uint32_t base,
370 uint32_t regs[4][7])
364{ 371{
365 int i, j; 372 int i, j;
366 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; 373 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder)
504 break; 511 break;
505 } 512 }
506 513
507 regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, 514 regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
508 tv_enc->saturation); 515 255, tv_enc->saturation);
509 regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, 516 regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
510 tv_enc->saturation); 517 255, tv_enc->saturation);
511 regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; 518 regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
512 519
513 nv_load_ptv(dev, regs, 204); 520 nv_load_ptv(dev, regs, 204);
@@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
541 int head = nouveau_crtc(encoder->crtc)->index; 548 int head = nouveau_crtc(encoder->crtc)->index;
542 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; 549 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
543 struct drm_display_mode *crtc_mode = &encoder->crtc->mode; 550 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
544 struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; 551 struct drm_display_mode *output_mode =
552 &get_tv_norm(encoder)->ctv_enc_mode.mode;
545 int overscan, hmargin, vmargin, hratio, vratio; 553 int overscan, hmargin, vmargin, hratio, vratio;
546 554
547 /* The rescaler doesn't do the right thing for interlaced modes. */ 555 /* The rescaler doesn't do the right thing for interlaced modes. */
@@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
553 hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; 561 hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
554 vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; 562 vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
555 563
556 hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, 564 hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
557 overscan); 565 hmargin, overscan);
558 vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, 566 vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
559 overscan); 567 vmargin, overscan);
560 568
561 hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); 569 hratio = crtc_mode->hdisplay * 0x800 /
562 vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; 570 (output_mode->hdisplay - 2*hmargin);
571 vratio = crtc_mode->vdisplay * 0x800 /
572 (output_mode->vdisplay - 2*vmargin) & ~3;
563 573
564 regs->fp_horiz_regs[FP_VALID_START] = hmargin; 574 regs->fp_horiz_regs[FP_VALID_START] = hmargin;
565 regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; 575 regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 17f309b36c91..12ab9cd56eca 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{ 37{
38 int i; 38 int i;
39 39
40 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); 40 nv_wo32(ctx, 0x033c, 0xffff0000);
41 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); 41 nv_wo32(ctx, 0x03a0, 0x0fff0000);
42 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); 42 nv_wo32(ctx, 0x03a4, 0x0fff0000);
43 nv_wo32(dev, ctx, 0x047c/4, 0x00000101); 43 nv_wo32(ctx, 0x047c, 0x00000101);
44 nv_wo32(dev, ctx, 0x0490/4, 0x00000111); 44 nv_wo32(ctx, 0x0490, 0x00000111);
45 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); 45 nv_wo32(ctx, 0x04a8, 0x44400000);
46 for (i = 0x04d4; i <= 0x04e0; i += 4) 46 for (i = 0x04d4; i <= 0x04e0; i += 4)
47 nv_wo32(dev, ctx, i/4, 0x00030303); 47 nv_wo32(ctx, i, 0x00030303);
48 for (i = 0x04f4; i <= 0x0500; i += 4) 48 for (i = 0x04f4; i <= 0x0500; i += 4)
49 nv_wo32(dev, ctx, i/4, 0x00080000); 49 nv_wo32(ctx, i, 0x00080000);
50 for (i = 0x050c; i <= 0x0518; i += 4) 50 for (i = 0x050c; i <= 0x0518; i += 4)
51 nv_wo32(dev, ctx, i/4, 0x01012000); 51 nv_wo32(ctx, i, 0x01012000);
52 for (i = 0x051c; i <= 0x0528; i += 4) 52 for (i = 0x051c; i <= 0x0528; i += 4)
53 nv_wo32(dev, ctx, i/4, 0x000105b8); 53 nv_wo32(ctx, i, 0x000105b8);
54 for (i = 0x052c; i <= 0x0538; i += 4) 54 for (i = 0x052c; i <= 0x0538; i += 4)
55 nv_wo32(dev, ctx, i/4, 0x00080008); 55 nv_wo32(ctx, i, 0x00080008);
56 for (i = 0x055c; i <= 0x0598; i += 4) 56 for (i = 0x055c; i <= 0x0598; i += 4)
57 nv_wo32(dev, ctx, i/4, 0x07ff0000); 57 nv_wo32(ctx, i, 0x07ff0000);
58 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); 58 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
59 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); 59 nv_wo32(ctx, 0x05fc, 0x00000001);
60 nv_wo32(dev, ctx, 0x0604/4, 0x00004000); 60 nv_wo32(ctx, 0x0604, 0x00004000);
61 nv_wo32(dev, ctx, 0x0610/4, 0x00000001); 61 nv_wo32(ctx, 0x0610, 0x00000001);
62 nv_wo32(dev, ctx, 0x0618/4, 0x00040000); 62 nv_wo32(ctx, 0x0618, 0x00040000);
63 nv_wo32(dev, ctx, 0x061c/4, 0x00010000); 63 nv_wo32(ctx, 0x061c, 0x00010000);
64 for (i = 0x1c1c; i <= 0x248c; i += 16) { 64 for (i = 0x1c1c; i <= 0x248c; i += 16) {
65 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); 65 nv_wo32(ctx, (i + 0), 0x10700ff9);
66 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); 66 nv_wo32(ctx, (i + 4), 0x0436086c);
67 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); 67 nv_wo32(ctx, (i + 8), 0x000c001b);
68 } 68 }
69 nv_wo32(dev, ctx, 0x281c/4, 0x3f800000); 69 nv_wo32(ctx, 0x281c, 0x3f800000);
70 nv_wo32(dev, ctx, 0x2830/4, 0x3f800000); 70 nv_wo32(ctx, 0x2830, 0x3f800000);
71 nv_wo32(dev, ctx, 0x285c/4, 0x40000000); 71 nv_wo32(ctx, 0x285c, 0x40000000);
72 nv_wo32(dev, ctx, 0x2860/4, 0x3f800000); 72 nv_wo32(ctx, 0x2860, 0x3f800000);
73 nv_wo32(dev, ctx, 0x2864/4, 0x3f000000); 73 nv_wo32(ctx, 0x2864, 0x3f000000);
74 nv_wo32(dev, ctx, 0x286c/4, 0x40000000); 74 nv_wo32(ctx, 0x286c, 0x40000000);
75 nv_wo32(dev, ctx, 0x2870/4, 0x3f800000); 75 nv_wo32(ctx, 0x2870, 0x3f800000);
76 nv_wo32(dev, ctx, 0x2878/4, 0xbf800000); 76 nv_wo32(ctx, 0x2878, 0xbf800000);
77 nv_wo32(dev, ctx, 0x2880/4, 0xbf800000); 77 nv_wo32(ctx, 0x2880, 0xbf800000);
78 nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000); 78 nv_wo32(ctx, 0x34a4, 0x000fe000);
79 nv_wo32(dev, ctx, 0x3530/4, 0x000003f8); 79 nv_wo32(ctx, 0x3530, 0x000003f8);
80 nv_wo32(dev, ctx, 0x3540/4, 0x002fe000); 80 nv_wo32(ctx, 0x3540, 0x002fe000);
81 for (i = 0x355c; i <= 0x3578; i += 4) 81 for (i = 0x355c; i <= 0x3578; i += 4)
82 nv_wo32(dev, ctx, i/4, 0x001c527c); 82 nv_wo32(ctx, i, 0x001c527c);
83} 83}
84 84
85static void 85static void
@@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87{ 87{
88 int i; 88 int i;
89 89
90 nv_wo32(dev, ctx, 0x035c/4, 0xffff0000); 90 nv_wo32(ctx, 0x035c, 0xffff0000);
91 nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000); 91 nv_wo32(ctx, 0x03c0, 0x0fff0000);
92 nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000); 92 nv_wo32(ctx, 0x03c4, 0x0fff0000);
93 nv_wo32(dev, ctx, 0x049c/4, 0x00000101); 93 nv_wo32(ctx, 0x049c, 0x00000101);
94 nv_wo32(dev, ctx, 0x04b0/4, 0x00000111); 94 nv_wo32(ctx, 0x04b0, 0x00000111);
95 nv_wo32(dev, ctx, 0x04c8/4, 0x00000080); 95 nv_wo32(ctx, 0x04c8, 0x00000080);
96 nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000); 96 nv_wo32(ctx, 0x04cc, 0xffff0000);
97 nv_wo32(dev, ctx, 0x04d0/4, 0x00000001); 97 nv_wo32(ctx, 0x04d0, 0x00000001);
98 nv_wo32(dev, ctx, 0x04e4/4, 0x44400000); 98 nv_wo32(ctx, 0x04e4, 0x44400000);
99 nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000); 99 nv_wo32(ctx, 0x04fc, 0x4b800000);
100 for (i = 0x0510; i <= 0x051c; i += 4) 100 for (i = 0x0510; i <= 0x051c; i += 4)
101 nv_wo32(dev, ctx, i/4, 0x00030303); 101 nv_wo32(ctx, i, 0x00030303);
102 for (i = 0x0530; i <= 0x053c; i += 4) 102 for (i = 0x0530; i <= 0x053c; i += 4)
103 nv_wo32(dev, ctx, i/4, 0x00080000); 103 nv_wo32(ctx, i, 0x00080000);
104 for (i = 0x0548; i <= 0x0554; i += 4) 104 for (i = 0x0548; i <= 0x0554; i += 4)
105 nv_wo32(dev, ctx, i/4, 0x01012000); 105 nv_wo32(ctx, i, 0x01012000);
106 for (i = 0x0558; i <= 0x0564; i += 4) 106 for (i = 0x0558; i <= 0x0564; i += 4)
107 nv_wo32(dev, ctx, i/4, 0x000105b8); 107 nv_wo32(ctx, i, 0x000105b8);
108 for (i = 0x0568; i <= 0x0574; i += 4) 108 for (i = 0x0568; i <= 0x0574; i += 4)
109 nv_wo32(dev, ctx, i/4, 0x00080008); 109 nv_wo32(ctx, i, 0x00080008);
110 for (i = 0x0598; i <= 0x05d4; i += 4) 110 for (i = 0x0598; i <= 0x05d4; i += 4)
111 nv_wo32(dev, ctx, i/4, 0x07ff0000); 111 nv_wo32(ctx, i, 0x07ff0000);
112 nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff); 112 nv_wo32(ctx, 0x05e0, 0x4b7fffff);
113 nv_wo32(dev, ctx, 0x0620/4, 0x00000080); 113 nv_wo32(ctx, 0x0620, 0x00000080);
114 nv_wo32(dev, ctx, 0x0624/4, 0x30201000); 114 nv_wo32(ctx, 0x0624, 0x30201000);
115 nv_wo32(dev, ctx, 0x0628/4, 0x70605040); 115 nv_wo32(ctx, 0x0628, 0x70605040);
116 nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080); 116 nv_wo32(ctx, 0x062c, 0xb0a09080);
117 nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0); 117 nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
118 nv_wo32(dev, ctx, 0x0664/4, 0x00000001); 118 nv_wo32(ctx, 0x0664, 0x00000001);
119 nv_wo32(dev, ctx, 0x066c/4, 0x00004000); 119 nv_wo32(ctx, 0x066c, 0x00004000);
120 nv_wo32(dev, ctx, 0x0678/4, 0x00000001); 120 nv_wo32(ctx, 0x0678, 0x00000001);
121 nv_wo32(dev, ctx, 0x0680/4, 0x00040000); 121 nv_wo32(ctx, 0x0680, 0x00040000);
122 nv_wo32(dev, ctx, 0x0684/4, 0x00010000); 122 nv_wo32(ctx, 0x0684, 0x00010000);
123 for (i = 0x1b04; i <= 0x2374; i += 16) { 123 for (i = 0x1b04; i <= 0x2374; i += 16) {
124 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); 124 nv_wo32(ctx, (i + 0), 0x10700ff9);
125 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); 125 nv_wo32(ctx, (i + 4), 0x0436086c);
126 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); 126 nv_wo32(ctx, (i + 8), 0x000c001b);
127 } 127 }
128 nv_wo32(dev, ctx, 0x2704/4, 0x3f800000); 128 nv_wo32(ctx, 0x2704, 0x3f800000);
129 nv_wo32(dev, ctx, 0x2718/4, 0x3f800000); 129 nv_wo32(ctx, 0x2718, 0x3f800000);
130 nv_wo32(dev, ctx, 0x2744/4, 0x40000000); 130 nv_wo32(ctx, 0x2744, 0x40000000);
131 nv_wo32(dev, ctx, 0x2748/4, 0x3f800000); 131 nv_wo32(ctx, 0x2748, 0x3f800000);
132 nv_wo32(dev, ctx, 0x274c/4, 0x3f000000); 132 nv_wo32(ctx, 0x274c, 0x3f000000);
133 nv_wo32(dev, ctx, 0x2754/4, 0x40000000); 133 nv_wo32(ctx, 0x2754, 0x40000000);
134 nv_wo32(dev, ctx, 0x2758/4, 0x3f800000); 134 nv_wo32(ctx, 0x2758, 0x3f800000);
135 nv_wo32(dev, ctx, 0x2760/4, 0xbf800000); 135 nv_wo32(ctx, 0x2760, 0xbf800000);
136 nv_wo32(dev, ctx, 0x2768/4, 0xbf800000); 136 nv_wo32(ctx, 0x2768, 0xbf800000);
137 nv_wo32(dev, ctx, 0x308c/4, 0x000fe000); 137 nv_wo32(ctx, 0x308c, 0x000fe000);
138 nv_wo32(dev, ctx, 0x3108/4, 0x000003f8); 138 nv_wo32(ctx, 0x3108, 0x000003f8);
139 nv_wo32(dev, ctx, 0x3468/4, 0x002fe000); 139 nv_wo32(ctx, 0x3468, 0x002fe000);
140 for (i = 0x3484; i <= 0x34a0; i += 4) 140 for (i = 0x3484; i <= 0x34a0; i += 4)
141 nv_wo32(dev, ctx, i/4, 0x001c527c); 141 nv_wo32(ctx, i, 0x001c527c);
142} 142}
143 143
144static void 144static void
@@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146{ 146{
147 int i; 147 int i;
148 148
149 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); 149 nv_wo32(ctx, 0x033c, 0xffff0000);
150 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); 150 nv_wo32(ctx, 0x03a0, 0x0fff0000);
151 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); 151 nv_wo32(ctx, 0x03a4, 0x0fff0000);
152 nv_wo32(dev, ctx, 0x047c/4, 0x00000101); 152 nv_wo32(ctx, 0x047c, 0x00000101);
153 nv_wo32(dev, ctx, 0x0490/4, 0x00000111); 153 nv_wo32(ctx, 0x0490, 0x00000111);
154 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); 154 nv_wo32(ctx, 0x04a8, 0x44400000);
155 for (i = 0x04d4; i <= 0x04e0; i += 4) 155 for (i = 0x04d4; i <= 0x04e0; i += 4)
156 nv_wo32(dev, ctx, i/4, 0x00030303); 156 nv_wo32(ctx, i, 0x00030303);
157 for (i = 0x04f4; i <= 0x0500; i += 4) 157 for (i = 0x04f4; i <= 0x0500; i += 4)
158 nv_wo32(dev, ctx, i/4, 0x00080000); 158 nv_wo32(ctx, i, 0x00080000);
159 for (i = 0x050c; i <= 0x0518; i += 4) 159 for (i = 0x050c; i <= 0x0518; i += 4)
160 nv_wo32(dev, ctx, i/4, 0x01012000); 160 nv_wo32(ctx, i, 0x01012000);
161 for (i = 0x051c; i <= 0x0528; i += 4) 161 for (i = 0x051c; i <= 0x0528; i += 4)
162 nv_wo32(dev, ctx, i/4, 0x000105b8); 162 nv_wo32(ctx, i, 0x000105b8);
163 for (i = 0x052c; i <= 0x0538; i += 4) 163 for (i = 0x052c; i <= 0x0538; i += 4)
164 nv_wo32(dev, ctx, i/4, 0x00080008); 164 nv_wo32(ctx, i, 0x00080008);
165 for (i = 0x055c; i <= 0x0598; i += 4) 165 for (i = 0x055c; i <= 0x0598; i += 4)
166 nv_wo32(dev, ctx, i/4, 0x07ff0000); 166 nv_wo32(ctx, i, 0x07ff0000);
167 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); 167 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
168 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); 168 nv_wo32(ctx, 0x05fc, 0x00000001);
169 nv_wo32(dev, ctx, 0x0604/4, 0x00004000); 169 nv_wo32(ctx, 0x0604, 0x00004000);
170 nv_wo32(dev, ctx, 0x0610/4, 0x00000001); 170 nv_wo32(ctx, 0x0610, 0x00000001);
171 nv_wo32(dev, ctx, 0x0618/4, 0x00040000); 171 nv_wo32(ctx, 0x0618, 0x00040000);
172 nv_wo32(dev, ctx, 0x061c/4, 0x00010000); 172 nv_wo32(ctx, 0x061c, 0x00010000);
173 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ 173 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
174 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); 174 nv_wo32(ctx, (i + 0), 0x10700ff9);
175 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); 175 nv_wo32(ctx, (i + 4), 0x0436086c);
176 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); 176 nv_wo32(ctx, (i + 8), 0x000c001b);
177 } 177 }
178 nv_wo32(dev, ctx, 0x269c/4, 0x3f800000); 178 nv_wo32(ctx, 0x269c, 0x3f800000);
179 nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000); 179 nv_wo32(ctx, 0x26b0, 0x3f800000);
180 nv_wo32(dev, ctx, 0x26dc/4, 0x40000000); 180 nv_wo32(ctx, 0x26dc, 0x40000000);
181 nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000); 181 nv_wo32(ctx, 0x26e0, 0x3f800000);
182 nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000); 182 nv_wo32(ctx, 0x26e4, 0x3f000000);
183 nv_wo32(dev, ctx, 0x26ec/4, 0x40000000); 183 nv_wo32(ctx, 0x26ec, 0x40000000);
184 nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000); 184 nv_wo32(ctx, 0x26f0, 0x3f800000);
185 nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000); 185 nv_wo32(ctx, 0x26f8, 0xbf800000);
186 nv_wo32(dev, ctx, 0x2700/4, 0xbf800000); 186 nv_wo32(ctx, 0x2700, 0xbf800000);
187 nv_wo32(dev, ctx, 0x3024/4, 0x000fe000); 187 nv_wo32(ctx, 0x3024, 0x000fe000);
188 nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8); 188 nv_wo32(ctx, 0x30a0, 0x000003f8);
189 nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000); 189 nv_wo32(ctx, 0x33fc, 0x002fe000);
190 for (i = 0x341c; i <= 0x3438; i += 4) 190 for (i = 0x341c; i <= 0x3438; i += 4)
191 nv_wo32(dev, ctx, i/4, 0x001c527c); 191 nv_wo32(ctx, i, 0x001c527c);
192} 192}
193 193
194static void 194static void
@@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196{ 196{
197 int i; 197 int i;
198 198
199 nv_wo32(dev, ctx, 0x0410/4, 0x00000101); 199 nv_wo32(ctx, 0x0410, 0x00000101);
200 nv_wo32(dev, ctx, 0x0424/4, 0x00000111); 200 nv_wo32(ctx, 0x0424, 0x00000111);
201 nv_wo32(dev, ctx, 0x0428/4, 0x00000060); 201 nv_wo32(ctx, 0x0428, 0x00000060);
202 nv_wo32(dev, ctx, 0x0444/4, 0x00000080); 202 nv_wo32(ctx, 0x0444, 0x00000080);
203 nv_wo32(dev, ctx, 0x0448/4, 0xffff0000); 203 nv_wo32(ctx, 0x0448, 0xffff0000);
204 nv_wo32(dev, ctx, 0x044c/4, 0x00000001); 204 nv_wo32(ctx, 0x044c, 0x00000001);
205 nv_wo32(dev, ctx, 0x0460/4, 0x44400000); 205 nv_wo32(ctx, 0x0460, 0x44400000);
206 nv_wo32(dev, ctx, 0x048c/4, 0xffff0000); 206 nv_wo32(ctx, 0x048c, 0xffff0000);
207 for (i = 0x04e0; i < 0x04e8; i += 4) 207 for (i = 0x04e0; i < 0x04e8; i += 4)
208 nv_wo32(dev, ctx, i/4, 0x0fff0000); 208 nv_wo32(ctx, i, 0x0fff0000);
209 nv_wo32(dev, ctx, 0x04ec/4, 0x00011100); 209 nv_wo32(ctx, 0x04ec, 0x00011100);
210 for (i = 0x0508; i < 0x0548; i += 4) 210 for (i = 0x0508; i < 0x0548; i += 4)
211 nv_wo32(dev, ctx, i/4, 0x07ff0000); 211 nv_wo32(ctx, i, 0x07ff0000);
212 nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff); 212 nv_wo32(ctx, 0x0550, 0x4b7fffff);
213 nv_wo32(dev, ctx, 0x058c/4, 0x00000080); 213 nv_wo32(ctx, 0x058c, 0x00000080);
214 nv_wo32(dev, ctx, 0x0590/4, 0x30201000); 214 nv_wo32(ctx, 0x0590, 0x30201000);
215 nv_wo32(dev, ctx, 0x0594/4, 0x70605040); 215 nv_wo32(ctx, 0x0594, 0x70605040);
216 nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888); 216 nv_wo32(ctx, 0x0598, 0xb8a89888);
217 nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8); 217 nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
218 nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000); 218 nv_wo32(ctx, 0x05b0, 0xb0000000);
219 for (i = 0x0600; i < 0x0640; i += 4) 219 for (i = 0x0600; i < 0x0640; i += 4)
220 nv_wo32(dev, ctx, i/4, 0x00010588); 220 nv_wo32(ctx, i, 0x00010588);
221 for (i = 0x0640; i < 0x0680; i += 4) 221 for (i = 0x0640; i < 0x0680; i += 4)
222 nv_wo32(dev, ctx, i/4, 0x00030303); 222 nv_wo32(ctx, i, 0x00030303);
223 for (i = 0x06c0; i < 0x0700; i += 4) 223 for (i = 0x06c0; i < 0x0700; i += 4)
224 nv_wo32(dev, ctx, i/4, 0x0008aae4); 224 nv_wo32(ctx, i, 0x0008aae4);
225 for (i = 0x0700; i < 0x0740; i += 4) 225 for (i = 0x0700; i < 0x0740; i += 4)
226 nv_wo32(dev, ctx, i/4, 0x01012000); 226 nv_wo32(ctx, i, 0x01012000);
227 for (i = 0x0740; i < 0x0780; i += 4) 227 for (i = 0x0740; i < 0x0780; i += 4)
228 nv_wo32(dev, ctx, i/4, 0x00080008); 228 nv_wo32(ctx, i, 0x00080008);
229 nv_wo32(dev, ctx, 0x085c/4, 0x00040000); 229 nv_wo32(ctx, 0x085c, 0x00040000);
230 nv_wo32(dev, ctx, 0x0860/4, 0x00010000); 230 nv_wo32(ctx, 0x0860, 0x00010000);
231 for (i = 0x0864; i < 0x0874; i += 4) 231 for (i = 0x0864; i < 0x0874; i += 4)
232 nv_wo32(dev, ctx, i/4, 0x00040004); 232 nv_wo32(ctx, i, 0x00040004);
233 for (i = 0x1f18; i <= 0x3088 ; i += 16) { 233 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
234 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); 234 nv_wo32(ctx, i + 0, 0x10700ff9);
235 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); 235 nv_wo32(ctx, i + 1, 0x0436086c);
236 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); 236 nv_wo32(ctx, i + 2, 0x000c001b);
237 } 237 }
238 for (i = 0x30b8; i < 0x30c8; i += 4) 238 for (i = 0x30b8; i < 0x30c8; i += 4)
239 nv_wo32(dev, ctx, i/4, 0x0000ffff); 239 nv_wo32(ctx, i, 0x0000ffff);
240 nv_wo32(dev, ctx, 0x344c/4, 0x3f800000); 240 nv_wo32(ctx, 0x344c, 0x3f800000);
241 nv_wo32(dev, ctx, 0x3808/4, 0x3f800000); 241 nv_wo32(ctx, 0x3808, 0x3f800000);
242 nv_wo32(dev, ctx, 0x381c/4, 0x3f800000); 242 nv_wo32(ctx, 0x381c, 0x3f800000);
243 nv_wo32(dev, ctx, 0x3848/4, 0x40000000); 243 nv_wo32(ctx, 0x3848, 0x40000000);
244 nv_wo32(dev, ctx, 0x384c/4, 0x3f800000); 244 nv_wo32(ctx, 0x384c, 0x3f800000);
245 nv_wo32(dev, ctx, 0x3850/4, 0x3f000000); 245 nv_wo32(ctx, 0x3850, 0x3f000000);
246 nv_wo32(dev, ctx, 0x3858/4, 0x40000000); 246 nv_wo32(ctx, 0x3858, 0x40000000);
247 nv_wo32(dev, ctx, 0x385c/4, 0x3f800000); 247 nv_wo32(ctx, 0x385c, 0x3f800000);
248 nv_wo32(dev, ctx, 0x3864/4, 0xbf800000); 248 nv_wo32(ctx, 0x3864, 0xbf800000);
249 nv_wo32(dev, ctx, 0x386c/4, 0xbf800000); 249 nv_wo32(ctx, 0x386c, 0xbf800000);
250} 250}
251 251
252static void 252static void
@@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254{ 254{
255 int i; 255 int i;
256 256
257 nv_wo32(dev, ctx, 0x040c/4, 0x01000101); 257 nv_wo32(ctx, 0x040c, 0x01000101);
258 nv_wo32(dev, ctx, 0x0420/4, 0x00000111); 258 nv_wo32(ctx, 0x0420, 0x00000111);
259 nv_wo32(dev, ctx, 0x0424/4, 0x00000060); 259 nv_wo32(ctx, 0x0424, 0x00000060);
260 nv_wo32(dev, ctx, 0x0440/4, 0x00000080); 260 nv_wo32(ctx, 0x0440, 0x00000080);
261 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); 261 nv_wo32(ctx, 0x0444, 0xffff0000);
262 nv_wo32(dev, ctx, 0x0448/4, 0x00000001); 262 nv_wo32(ctx, 0x0448, 0x00000001);
263 nv_wo32(dev, ctx, 0x045c/4, 0x44400000); 263 nv_wo32(ctx, 0x045c, 0x44400000);
264 nv_wo32(dev, ctx, 0x0480/4, 0xffff0000); 264 nv_wo32(ctx, 0x0480, 0xffff0000);
265 for (i = 0x04d4; i < 0x04dc; i += 4) 265 for (i = 0x04d4; i < 0x04dc; i += 4)
266 nv_wo32(dev, ctx, i/4, 0x0fff0000); 266 nv_wo32(ctx, i, 0x0fff0000);
267 nv_wo32(dev, ctx, 0x04e0/4, 0x00011100); 267 nv_wo32(ctx, 0x04e0, 0x00011100);
268 for (i = 0x04fc; i < 0x053c; i += 4) 268 for (i = 0x04fc; i < 0x053c; i += 4)
269 nv_wo32(dev, ctx, i/4, 0x07ff0000); 269 nv_wo32(ctx, i, 0x07ff0000);
270 nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff); 270 nv_wo32(ctx, 0x0544, 0x4b7fffff);
271 nv_wo32(dev, ctx, 0x057c/4, 0x00000080); 271 nv_wo32(ctx, 0x057c, 0x00000080);
272 nv_wo32(dev, ctx, 0x0580/4, 0x30201000); 272 nv_wo32(ctx, 0x0580, 0x30201000);
273 nv_wo32(dev, ctx, 0x0584/4, 0x70605040); 273 nv_wo32(ctx, 0x0584, 0x70605040);
274 nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888); 274 nv_wo32(ctx, 0x0588, 0xb8a89888);
275 nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8); 275 nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
276 nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000); 276 nv_wo32(ctx, 0x05a0, 0xb0000000);
277 for (i = 0x05f0; i < 0x0630; i += 4) 277 for (i = 0x05f0; i < 0x0630; i += 4)
278 nv_wo32(dev, ctx, i/4, 0x00010588); 278 nv_wo32(ctx, i, 0x00010588);
279 for (i = 0x0630; i < 0x0670; i += 4) 279 for (i = 0x0630; i < 0x0670; i += 4)
280 nv_wo32(dev, ctx, i/4, 0x00030303); 280 nv_wo32(ctx, i, 0x00030303);
281 for (i = 0x06b0; i < 0x06f0; i += 4) 281 for (i = 0x06b0; i < 0x06f0; i += 4)
282 nv_wo32(dev, ctx, i/4, 0x0008aae4); 282 nv_wo32(ctx, i, 0x0008aae4);
283 for (i = 0x06f0; i < 0x0730; i += 4) 283 for (i = 0x06f0; i < 0x0730; i += 4)
284 nv_wo32(dev, ctx, i/4, 0x01012000); 284 nv_wo32(ctx, i, 0x01012000);
285 for (i = 0x0730; i < 0x0770; i += 4) 285 for (i = 0x0730; i < 0x0770; i += 4)
286 nv_wo32(dev, ctx, i/4, 0x00080008); 286 nv_wo32(ctx, i, 0x00080008);
287 nv_wo32(dev, ctx, 0x0850/4, 0x00040000); 287 nv_wo32(ctx, 0x0850, 0x00040000);
288 nv_wo32(dev, ctx, 0x0854/4, 0x00010000); 288 nv_wo32(ctx, 0x0854, 0x00010000);
289 for (i = 0x0858; i < 0x0868; i += 4) 289 for (i = 0x0858; i < 0x0868; i += 4)
290 nv_wo32(dev, ctx, i/4, 0x00040004); 290 nv_wo32(ctx, i, 0x00040004);
291 for (i = 0x15ac; i <= 0x271c ; i += 16) { 291 for (i = 0x15ac; i <= 0x271c ; i += 16) {
292 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); 292 nv_wo32(ctx, i + 0, 0x10700ff9);
293 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); 293 nv_wo32(ctx, i + 1, 0x0436086c);
294 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); 294 nv_wo32(ctx, i + 2, 0x000c001b);
295 } 295 }
296 for (i = 0x274c; i < 0x275c; i += 4) 296 for (i = 0x274c; i < 0x275c; i += 4)
297 nv_wo32(dev, ctx, i/4, 0x0000ffff); 297 nv_wo32(ctx, i, 0x0000ffff);
298 nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000); 298 nv_wo32(ctx, 0x2ae0, 0x3f800000);
299 nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000); 299 nv_wo32(ctx, 0x2e9c, 0x3f800000);
300 nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000); 300 nv_wo32(ctx, 0x2eb0, 0x3f800000);
301 nv_wo32(dev, ctx, 0x2edc/4, 0x40000000); 301 nv_wo32(ctx, 0x2edc, 0x40000000);
302 nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000); 302 nv_wo32(ctx, 0x2ee0, 0x3f800000);
303 nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000); 303 nv_wo32(ctx, 0x2ee4, 0x3f000000);
304 nv_wo32(dev, ctx, 0x2eec/4, 0x40000000); 304 nv_wo32(ctx, 0x2eec, 0x40000000);
305 nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000); 305 nv_wo32(ctx, 0x2ef0, 0x3f800000);
306 nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000); 306 nv_wo32(ctx, 0x2ef8, 0xbf800000);
307 nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000); 307 nv_wo32(ctx, 0x2f00, 0xbf800000);
308} 308}
309 309
310static void 310static void
@@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312{ 312{
313 int i; 313 int i;
314 314
315 nv_wo32(dev, ctx, 0x040c/4, 0x00000101); 315 nv_wo32(ctx, 0x040c, 0x00000101);
316 nv_wo32(dev, ctx, 0x0420/4, 0x00000111); 316 nv_wo32(ctx, 0x0420, 0x00000111);
317 nv_wo32(dev, ctx, 0x0424/4, 0x00000060); 317 nv_wo32(ctx, 0x0424, 0x00000060);
318 nv_wo32(dev, ctx, 0x0440/4, 0x00000080); 318 nv_wo32(ctx, 0x0440, 0x00000080);
319 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); 319 nv_wo32(ctx, 0x0444, 0xffff0000);
320 nv_wo32(dev, ctx, 0x0448/4, 0x00000001); 320 nv_wo32(ctx, 0x0448, 0x00000001);
321 nv_wo32(dev, ctx, 0x045c/4, 0x44400000); 321 nv_wo32(ctx, 0x045c, 0x44400000);
322 nv_wo32(dev, ctx, 0x0488/4, 0xffff0000); 322 nv_wo32(ctx, 0x0488, 0xffff0000);
323 for (i = 0x04dc; i < 0x04e4; i += 4) 323 for (i = 0x04dc; i < 0x04e4; i += 4)
324 nv_wo32(dev, ctx, i/4, 0x0fff0000); 324 nv_wo32(ctx, i, 0x0fff0000);
325 nv_wo32(dev, ctx, 0x04e8/4, 0x00011100); 325 nv_wo32(ctx, 0x04e8, 0x00011100);
326 for (i = 0x0504; i < 0x0544; i += 4) 326 for (i = 0x0504; i < 0x0544; i += 4)
327 nv_wo32(dev, ctx, i/4, 0x07ff0000); 327 nv_wo32(ctx, i, 0x07ff0000);
328 nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff); 328 nv_wo32(ctx, 0x054c, 0x4b7fffff);
329 nv_wo32(dev, ctx, 0x0588/4, 0x00000080); 329 nv_wo32(ctx, 0x0588, 0x00000080);
330 nv_wo32(dev, ctx, 0x058c/4, 0x30201000); 330 nv_wo32(ctx, 0x058c, 0x30201000);
331 nv_wo32(dev, ctx, 0x0590/4, 0x70605040); 331 nv_wo32(ctx, 0x0590, 0x70605040);
332 nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888); 332 nv_wo32(ctx, 0x0594, 0xb8a89888);
333 nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8); 333 nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
334 nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000); 334 nv_wo32(ctx, 0x05ac, 0xb0000000);
335 for (i = 0x0604; i < 0x0644; i += 4) 335 for (i = 0x0604; i < 0x0644; i += 4)
336 nv_wo32(dev, ctx, i/4, 0x00010588); 336 nv_wo32(ctx, i, 0x00010588);
337 for (i = 0x0644; i < 0x0684; i += 4) 337 for (i = 0x0644; i < 0x0684; i += 4)
338 nv_wo32(dev, ctx, i/4, 0x00030303); 338 nv_wo32(ctx, i, 0x00030303);
339 for (i = 0x06c4; i < 0x0704; i += 4) 339 for (i = 0x06c4; i < 0x0704; i += 4)
340 nv_wo32(dev, ctx, i/4, 0x0008aae4); 340 nv_wo32(ctx, i, 0x0008aae4);
341 for (i = 0x0704; i < 0x0744; i += 4) 341 for (i = 0x0704; i < 0x0744; i += 4)
342 nv_wo32(dev, ctx, i/4, 0x01012000); 342 nv_wo32(ctx, i, 0x01012000);
343 for (i = 0x0744; i < 0x0784; i += 4) 343 for (i = 0x0744; i < 0x0784; i += 4)
344 nv_wo32(dev, ctx, i/4, 0x00080008); 344 nv_wo32(ctx, i, 0x00080008);
345 nv_wo32(dev, ctx, 0x0860/4, 0x00040000); 345 nv_wo32(ctx, 0x0860, 0x00040000);
346 nv_wo32(dev, ctx, 0x0864/4, 0x00010000); 346 nv_wo32(ctx, 0x0864, 0x00010000);
347 for (i = 0x0868; i < 0x0878; i += 4) 347 for (i = 0x0868; i < 0x0878; i += 4)
348 nv_wo32(dev, ctx, i/4, 0x00040004); 348 nv_wo32(ctx, i, 0x00040004);
349 for (i = 0x1f1c; i <= 0x308c ; i += 16) { 349 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
350 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); 350 nv_wo32(ctx, i + 0, 0x10700ff9);
351 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); 351 nv_wo32(ctx, i + 4, 0x0436086c);
352 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); 352 nv_wo32(ctx, i + 8, 0x000c001b);
353 } 353 }
354 for (i = 0x30bc; i < 0x30cc; i += 4) 354 for (i = 0x30bc; i < 0x30cc; i += 4)
355 nv_wo32(dev, ctx, i/4, 0x0000ffff); 355 nv_wo32(ctx, i, 0x0000ffff);
356 nv_wo32(dev, ctx, 0x3450/4, 0x3f800000); 356 nv_wo32(ctx, 0x3450, 0x3f800000);
357 nv_wo32(dev, ctx, 0x380c/4, 0x3f800000); 357 nv_wo32(ctx, 0x380c, 0x3f800000);
358 nv_wo32(dev, ctx, 0x3820/4, 0x3f800000); 358 nv_wo32(ctx, 0x3820, 0x3f800000);
359 nv_wo32(dev, ctx, 0x384c/4, 0x40000000); 359 nv_wo32(ctx, 0x384c, 0x40000000);
360 nv_wo32(dev, ctx, 0x3850/4, 0x3f800000); 360 nv_wo32(ctx, 0x3850, 0x3f800000);
361 nv_wo32(dev, ctx, 0x3854/4, 0x3f000000); 361 nv_wo32(ctx, 0x3854, 0x3f000000);
362 nv_wo32(dev, ctx, 0x385c/4, 0x40000000); 362 nv_wo32(ctx, 0x385c, 0x40000000);
363 nv_wo32(dev, ctx, 0x3860/4, 0x3f800000); 363 nv_wo32(ctx, 0x3860, 0x3f800000);
364 nv_wo32(dev, ctx, 0x3868/4, 0xbf800000); 364 nv_wo32(ctx, 0x3868, 0xbf800000);
365 nv_wo32(dev, ctx, 0x3870/4, 0xbf800000); 365 nv_wo32(ctx, 0x3870, 0xbf800000);
366} 366}
367 367
368int 368int
@@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan)
372 struct drm_nouveau_private *dev_priv = dev->dev_private; 372 struct drm_nouveau_private *dev_priv = dev->dev_private;
373 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 373 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
374 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); 374 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
375 unsigned int idoffs = 0x28/4; 375 unsigned int idoffs = 0x28;
376 int ret; 376 int ret;
377 377
378 switch (dev_priv->chipset) { 378 switch (dev_priv->chipset) {
@@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan)
403 BUG_ON(1); 403 BUG_ON(1);
404 } 404 }
405 405
406 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, 406 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
407 16, NVOBJ_FLAG_ZERO_ALLOC, 407 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
408 &chan->ramin_grctx);
409 if (ret) 408 if (ret)
410 return ret; 409 return ret;
411 410
412 /* Initialise default context values */ 411 /* Initialise default context values */
413 ctx_init(dev, chan->ramin_grctx->gpuobj); 412 ctx_init(dev, chan->ramin_grctx);
414 413
415 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ 414 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
416 nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, 415 nv_wo32(chan->ramin_grctx, idoffs,
417 (chan->id << 24) | 0x1); /* CTX_USER */ 416 (chan->id << 24) | 0x1); /* CTX_USER */
418 417
419 nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 418 nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
420 chan->ramin_grctx->instance >> 4);
421 return 0; 419 return 0;
422} 420}
423 421
@@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
428 struct drm_nouveau_private *dev_priv = dev->dev_private; 426 struct drm_nouveau_private *dev_priv = dev->dev_private;
429 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 427 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
430 428
431 if (chan->ramin_grctx) 429 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
432 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); 430 nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
433
434 nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
435} 431}
436 432
437int 433int
@@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan)
442 438
443 if (!chan->ramin_grctx) 439 if (!chan->ramin_grctx)
444 return -EINVAL; 440 return -EINVAL;
445 inst = chan->ramin_grctx->instance >> 4; 441 inst = chan->ramin_grctx->pinst >> 4;
446 442
447 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 443 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
448 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 444 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev)
465 chan = pgraph->channel(dev); 461 chan = pgraph->channel(dev);
466 if (!chan) 462 if (!chan)
467 return 0; 463 return 0;
468 inst = chan->ramin_grctx->instance >> 4; 464 inst = chan->ramin_grctx->pinst >> 4;
469 465
470 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 466 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
471 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 467 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev)
552 548
553 if (!pgraph->ctx_table) { 549 if (!pgraph->ctx_table) {
554 /* Create Context Pointer Table */ 550 /* Create Context Pointer Table */
555 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, 551 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
556 NVOBJ_FLAG_ZERO_ALLOC, 552 NVOBJ_FLAG_ZERO_ALLOC,
557 &pgraph->ctx_table); 553 &pgraph->ctx_table);
558 if (ret) 554 if (ret)
559 return ret; 555 return ret;
560 } 556 }
561 557
562 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 558 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
563 pgraph->ctx_table->instance >> 4); 559 pgraph->ctx_table->pinst >> 4);
564 560
565 nv20_graph_rdi(dev); 561 nv20_graph_rdi(dev);
566 562
@@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev)
646 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
647 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 643 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
648 644
649 nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); 645 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
650} 646}
651 647
652int 648int
@@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev)
681 677
682 if (!pgraph->ctx_table) { 678 if (!pgraph->ctx_table) {
683 /* Create Context Pointer Table */ 679 /* Create Context Pointer Table */
684 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, 680 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
685 NVOBJ_FLAG_ZERO_ALLOC, 681 NVOBJ_FLAG_ZERO_ALLOC,
686 &pgraph->ctx_table); 682 &pgraph->ctx_table);
687 if (ret) 683 if (ret)
688 return ret; 684 return ret;
689 } 685 }
690 686
691 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 687 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
692 pgraph->ctx_table->instance >> 4); 688 pgraph->ctx_table->pinst >> 4);
693 689
694 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 690 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
695 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 691 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 2b67f1835c39..d337b8b28cdd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -27,8 +27,9 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_drm.h" 29#include "nouveau_drm.h"
30#include "nouveau_ramht.h"
30 31
31#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE)) 32#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
32#define NV40_RAMFC__SIZE 128 33#define NV40_RAMFC__SIZE 128
33 34
34int 35int
@@ -42,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
42 43
43 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 44 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
44 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 45 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
45 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); 46 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
46 if (ret) 47 if (ret)
47 return ret; 48 return ret;
48 49
@@ -50,7 +51,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
50 51
51 nv_wi32(dev, fc + 0, chan->pushbuf_base); 52 nv_wi32(dev, fc + 0, chan->pushbuf_base);
52 nv_wi32(dev, fc + 4, chan->pushbuf_base); 53 nv_wi32(dev, fc + 4, chan->pushbuf_base);
53 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); 54 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
54 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 55 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
55 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 56 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
56 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 57 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -58,7 +59,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
58 NV_PFIFO_CACHE1_BIG_ENDIAN | 59 NV_PFIFO_CACHE1_BIG_ENDIAN |
59#endif 60#endif
60 0x30000000 /* no idea.. */); 61 0x30000000 /* no idea.. */);
61 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); 62 nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
62 nv_wi32(dev, fc + 60, 0x0001FFFF); 63 nv_wi32(dev, fc + 60, 0x0001FFFF);
63 64
64 /* enable the fifo dma operation */ 65 /* enable the fifo dma operation */
@@ -77,8 +78,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan)
77 nv_wr32(dev, NV04_PFIFO_MODE, 78 nv_wr32(dev, NV04_PFIFO_MODE,
78 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); 79 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
79 80
80 if (chan->ramfc) 81 nouveau_gpuobj_ref(NULL, &chan->ramfc);
81 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
82} 82}
83 83
84static void 84static void
@@ -241,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 241 struct drm_nouveau_private *dev_priv = dev->dev_private;
242 242
243 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 243 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
244 ((dev_priv->ramht_bits - 9) << 16) | 244 ((dev_priv->ramht->bits - 9) << 16) |
245 (dev_priv->ramht_offset >> 8)); 245 (dev_priv->ramht->gpuobj->pinst >> 8));
246 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); 246 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
247 247
248 switch (dev_priv->chipset) { 248 switch (dev_priv->chipset) {
249 case 0x47: 249 case 0x47:
@@ -271,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
271 nv_wr32(dev, 0x2230, 0); 271 nv_wr32(dev, 0x2230, 0);
272 nv_wr32(dev, NV40_PFIFO_RAMFC, 272 nv_wr32(dev, NV40_PFIFO_RAMFC,
273 ((dev_priv->vram_size - 512 * 1024 + 273 ((dev_priv->vram_size - 512 * 1024 +
274 dev_priv->ramfc_offset) >> 16) | (3 << 16)); 274 dev_priv->ramfc->pinst) >> 16) | (3 << 16));
275 break; 275 break;
276 } 276 }
277} 277}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index fd7d2b501316..7ee1b91569b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
45 struct nouveau_channel *chan = dev_priv->fifos[i]; 45 struct nouveau_channel *chan = dev_priv->fifos[i];
46 46
47 if (chan && chan->ramin_grctx && 47 if (chan && chan->ramin_grctx &&
48 chan->ramin_grctx->instance == inst) 48 chan->ramin_grctx->pinst == inst)
49 return chan; 49 return chan;
50 } 50 }
51 51
@@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan)
61 struct nouveau_grctx ctx = {}; 61 struct nouveau_grctx ctx = {};
62 int ret; 62 int ret;
63 63
64 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, 64 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
65 16, NVOBJ_FLAG_ZERO_ALLOC, 65 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
66 &chan->ramin_grctx);
67 if (ret) 66 if (ret)
68 return ret; 67 return ret;
69 68
70 /* Initialise default context values */ 69 /* Initialise default context values */
71 ctx.dev = chan->dev; 70 ctx.dev = chan->dev;
72 ctx.mode = NOUVEAU_GRCTX_VALS; 71 ctx.mode = NOUVEAU_GRCTX_VALS;
73 ctx.data = chan->ramin_grctx->gpuobj; 72 ctx.data = chan->ramin_grctx;
74 nv40_grctx_init(&ctx); 73 nv40_grctx_init(&ctx);
75 74
76 nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, 75 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
77 chan->ramin_grctx->gpuobj->im_pramin->start);
78 return 0; 76 return 0;
79} 77}
80 78
81void 79void
82nv40_graph_destroy_context(struct nouveau_channel *chan) 80nv40_graph_destroy_context(struct nouveau_channel *chan)
83{ 81{
84 nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); 82 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
85} 83}
86 84
87static int 85static int
@@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
135 133
136 if (!chan->ramin_grctx) 134 if (!chan->ramin_grctx)
137 return -EINVAL; 135 return -EINVAL;
138 inst = chan->ramin_grctx->instance >> 4; 136 inst = chan->ramin_grctx->pinst >> 4;
139 137
140 ret = nv40_graph_transfer_context(dev, inst, 0); 138 ret = nv40_graph_transfer_context(dev, inst, 0);
141 if (ret) 139 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 9b5c97469588..ce585093264e 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
596 596
597 offset += 0x0280/4; 597 offset += 0x0280/4;
598 for (i = 0; i < 16; i++, offset += 2) 598 for (i = 0; i < 16; i++, offset += 2)
599 nv_wo32(dev, obj, offset, 0x3f800000); 599 nv_wo32(obj, offset * 4, 0x3f800000);
600 600
601 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { 601 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
602 for (i = 0; i < vs_nr_b0 * 6; i += 6) 602 for (i = 0; i < vs_nr_b0 * 6; i += 6)
603 nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001); 603 nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
604 for (i = 0; i < vs_nr_b1 * 4; i += 4) 604 for (i = 0; i < vs_nr_b1 * 4; i += 4)
605 nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000); 605 nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
606 } 606 }
607} 607}
608 608
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index bfd4ca2fe7ef..3f2fb4ec63ab 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << 107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
108 PAGE_SHIFT) >> 8);
109 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
110 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
111 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
@@ -266,15 +265,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
266{ 265{
267 struct drm_nouveau_private *dev_priv = dev->dev_private; 266 struct drm_nouveau_private *dev_priv = dev->dev_private;
268 struct pll_lims pll; 267 struct pll_lims pll;
269 uint32_t reg, reg1, reg2; 268 uint32_t reg1, reg2;
270 int ret, N1, M1, N2, M2, P; 269 int ret, N1, M1, N2, M2, P;
271 270
272 if (dev_priv->chipset < NV_C0) 271 ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
273 reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
274 else
275 reg = 0x614140 + (head * 0x800);
276
277 ret = get_pll_limits(dev, reg, &pll);
278 if (ret) 272 if (ret)
279 return ret; 273 return ret;
280 274
@@ -286,11 +280,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
286 NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", 280 NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
287 pclk, ret, N1, M1, N2, M2, P); 281 pclk, ret, N1, M1, N2, M2, P);
288 282
289 reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; 283 reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
290 reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; 284 reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
291 nv_wr32(dev, reg, 0x10000611); 285 nv_wr32(dev, pll.reg + 0, 0x10000611);
292 nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); 286 nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
293 nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); 287 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
294 } else 288 } else
295 if (dev_priv->chipset < NV_C0) { 289 if (dev_priv->chipset < NV_C0) {
296 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 290 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
@@ -300,10 +294,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
300 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", 294 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
301 pclk, ret, N1, N2, M1, P); 295 pclk, ret, N1, N2, M1, P);
302 296
303 reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; 297 reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
304 nv_wr32(dev, reg, 0x50000610); 298 nv_wr32(dev, pll.reg + 0, 0x50000610);
305 nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); 299 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
306 nv_wr32(dev, reg + 8, N2); 300 nv_wr32(dev, pll.reg + 8, N2);
307 } else { 301 } else {
308 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 302 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
309 if (ret <= 0) 303 if (ret <= 0)
@@ -312,9 +306,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
312 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", 306 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
313 pclk, ret, N1, N2, M1, P); 307 pclk, ret, N1, N2, M1, P);
314 308
315 nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100); 309 nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100);
316 nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1); 310 nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
317 nv_wr32(dev, reg + 0x10, N2 << 16); 311 nv_wr32(dev, pll.reg + 0x10, N2 << 16);
318 } 312 }
319 313
320 return 0; 314 return 0;
@@ -338,7 +332,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
338 332
339 nv50_cursor_fini(nv_crtc); 333 nv50_cursor_fini(nv_crtc);
340 334
335 nouveau_bo_unmap(nv_crtc->lut.nvbo);
341 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 336 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
337 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
342 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 338 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
343 kfree(nv_crtc->mode); 339 kfree(nv_crtc->mode);
344 kfree(nv_crtc); 340 kfree(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 03ad7ab14f09..1b9ce3021aa3 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
147 NV_DEBUG_KMS(dev, "\n"); 147 NV_DEBUG_KMS(dev, "\n");
148 148
149 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); 149 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
150 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 150 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
151 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 151 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
152 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 152 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
153 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 153 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 1bc085962945..875414b09ade 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
79 79
80 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 80 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
81 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 81 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
82 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), 82 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
83 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 83 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
84 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 84 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
85 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 85 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
130 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 130 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
131 131
132 /* wait for it to be done */ 132 /* wait for it to be done */
133 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), 133 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
134 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 134 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
135 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 135 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
136 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 136 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 612fa6d6a0cb..55c9663ef2bf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -30,8 +30,22 @@
30#include "nouveau_connector.h" 30#include "nouveau_connector.h"
31#include "nouveau_fb.h" 31#include "nouveau_fb.h"
32#include "nouveau_fbcon.h" 32#include "nouveau_fbcon.h"
33#include "nouveau_ramht.h"
33#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
34 35
36static inline int
37nv50_sor_nr(struct drm_device *dev)
38{
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40
41 if (dev_priv->chipset < 0x90 ||
42 dev_priv->chipset == 0x92 ||
43 dev_priv->chipset == 0xa0)
44 return 2;
45
46 return 4;
47}
48
35static void 49static void
36nv50_evo_channel_del(struct nouveau_channel **pchan) 50nv50_evo_channel_del(struct nouveau_channel **pchan)
37{ 51{
@@ -42,6 +56,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan)
42 *pchan = NULL; 56 *pchan = NULL;
43 57
44 nouveau_gpuobj_channel_takedown(chan); 58 nouveau_gpuobj_channel_takedown(chan);
59 nouveau_bo_unmap(chan->pushbuf_bo);
45 nouveau_bo_ref(NULL, &chan->pushbuf_bo); 60 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
46 61
47 if (chan->user) 62 if (chan->user)
@@ -65,23 +80,23 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
65 return ret; 80 return ret;
66 obj->engine = NVOBJ_ENGINE_DISPLAY; 81 obj->engine = NVOBJ_ENGINE_DISPLAY;
67 82
68 ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL); 83 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
69 if (ret) { 84 nv_wo32(obj, 4, limit);
70 nouveau_gpuobj_del(dev, &obj); 85 nv_wo32(obj, 8, offset);
71 return ret; 86 nv_wo32(obj, 12, 0x00000000);
72 } 87 nv_wo32(obj, 16, 0x00000000);
73
74 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
75 nv_wo32(dev, obj, 1, limit);
76 nv_wo32(dev, obj, 2, offset);
77 nv_wo32(dev, obj, 3, 0x00000000);
78 nv_wo32(dev, obj, 4, 0x00000000);
79 if (dev_priv->card_type < NV_C0) 88 if (dev_priv->card_type < NV_C0)
80 nv_wo32(dev, obj, 5, 0x00010000); 89 nv_wo32(obj, 20, 0x00010000);
81 else 90 else
82 nv_wo32(dev, obj, 5, 0x00020000); 91 nv_wo32(obj, 20, 0x00020000);
83 dev_priv->engine.instmem.flush(dev); 92 dev_priv->engine.instmem.flush(dev);
84 93
94 ret = nouveau_ramht_insert(evo, name, obj);
95 nouveau_gpuobj_ref(NULL, &obj);
96 if (ret) {
97 return ret;
98 }
99
85 return 0; 100 return 0;
86} 101}
87 102
@@ -89,6 +104,7 @@ static int
89nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) 104nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
90{ 105{
91 struct drm_nouveau_private *dev_priv = dev->dev_private; 106 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_gpuobj *ramht = NULL;
92 struct nouveau_channel *chan; 108 struct nouveau_channel *chan;
93 int ret; 109 int ret;
94 110
@@ -102,32 +118,35 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
102 chan->user_get = 4; 118 chan->user_get = 4;
103 chan->user_put = 0; 119 chan->user_put = 0;
104 120
105 INIT_LIST_HEAD(&chan->ramht_refs); 121 ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
106 122 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
107 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
108 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
109 if (ret) { 123 if (ret) {
110 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); 124 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
111 nv50_evo_channel_del(pchan); 125 nv50_evo_channel_del(pchan);
112 return ret; 126 return ret;
113 } 127 }
114 128
115 ret = drm_mm_init(&chan->ramin_heap, 129 ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
116 chan->ramin->gpuobj->im_pramin->start, 32768);
117 if (ret) { 130 if (ret) {
118 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); 131 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
119 nv50_evo_channel_del(pchan); 132 nv50_evo_channel_del(pchan);
120 return ret; 133 return ret;
121 } 134 }
122 135
123 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16, 136 ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
124 0, &chan->ramht);
125 if (ret) { 137 if (ret) {
126 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); 138 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
127 nv50_evo_channel_del(pchan); 139 nv50_evo_channel_del(pchan);
128 return ret; 140 return ret;
129 } 141 }
130 142
143 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
144 nouveau_gpuobj_ref(NULL, &ramht);
145 if (ret) {
146 nv50_evo_channel_del(pchan);
147 return ret;
148 }
149
131 if (dev_priv->chipset != 0x50) { 150 if (dev_priv->chipset != 0x50) {
132 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, 151 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
133 0, 0xffffffff); 152 0, 0xffffffff);
@@ -227,11 +246,11 @@ nv50_display_init(struct drm_device *dev)
227 nv_wr32(dev, 0x006101d0 + (i * 0x04), val); 246 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
228 } 247 }
229 /* SOR */ 248 /* SOR */
230 for (i = 0; i < 4; i++) { 249 for (i = 0; i < nv50_sor_nr(dev); i++) {
231 val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); 250 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
232 nv_wr32(dev, 0x006101e0 + (i * 0x04), val); 251 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
233 } 252 }
234 /* Something not yet in use, tv-out maybe. */ 253 /* EXT */
235 for (i = 0; i < 3; i++) { 254 for (i = 0; i < 3; i++) {
236 val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); 255 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
237 nv_wr32(dev, 0x006101f0 + (i * 0x04), val); 256 nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
@@ -260,7 +279,7 @@ nv50_display_init(struct drm_device *dev)
260 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { 279 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
261 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); 280 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
262 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); 281 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
263 if (!nv_wait(0x006194e8, 2, 0)) { 282 if (!nv_wait(dev, 0x006194e8, 2, 0)) {
264 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); 283 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
265 NV_ERROR(dev, "0x6194e8 = 0x%08x\n", 284 NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
266 nv_rd32(dev, 0x6194e8)); 285 nv_rd32(dev, 0x6194e8));
@@ -291,7 +310,8 @@ nv50_display_init(struct drm_device *dev)
291 310
292 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); 311 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
293 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); 312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
294 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) { 313 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
314 0x40000000, 0x40000000)) {
295 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); 315 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
296 NV_ERROR(dev, "0x610200 = 0x%08x\n", 316 NV_ERROR(dev, "0x610200 = 0x%08x\n",
297 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); 317 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
@@ -300,7 +320,7 @@ nv50_display_init(struct drm_device *dev)
300 320
301 for (i = 0; i < 2; i++) { 321 for (i = 0; i < 2; i++) {
302 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); 322 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
303 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 323 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
304 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 324 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
305 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 325 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
306 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 326 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
@@ -310,7 +330,7 @@ nv50_display_init(struct drm_device *dev)
310 330
311 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 331 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
312 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); 332 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
313 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 333 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
314 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 334 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
315 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { 335 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
316 NV_ERROR(dev, "timeout: " 336 NV_ERROR(dev, "timeout: "
@@ -321,16 +341,16 @@ nv50_display_init(struct drm_device *dev)
321 } 341 }
322 } 342 }
323 343
324 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9); 344 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
325 345
326 /* initialise fifo */ 346 /* initialise fifo */
327 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), 347 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
328 ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | 348 ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
329 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | 349 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
330 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); 350 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
331 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); 351 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
332 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); 352 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
333 if (!nv_wait(0x610200, 0x80000000, 0x00000000)) { 353 if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
334 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); 354 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
335 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); 355 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
336 return -EBUSY; 356 return -EBUSY;
@@ -370,7 +390,7 @@ nv50_display_init(struct drm_device *dev)
370 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1); 390 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
371 OUT_RING(evo, 0); 391 OUT_RING(evo, 0);
372 FIRE_RING(evo); 392 FIRE_RING(evo);
373 if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2)) 393 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
374 NV_ERROR(dev, "evo pushbuf stalled\n"); 394 NV_ERROR(dev, "evo pushbuf stalled\n");
375 395
376 /* enable clock change interrupts. */ 396 /* enable clock change interrupts. */
@@ -424,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev)
424 continue; 444 continue;
425 445
426 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); 446 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
427 if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) { 447 if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
428 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " 448 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
429 "0x%08x\n", mask, mask); 449 "0x%08x\n", mask, mask);
430 NV_ERROR(dev, "0x610024 = 0x%08x\n", 450 NV_ERROR(dev, "0x610024 = 0x%08x\n",
@@ -434,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev)
434 454
435 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); 455 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
436 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); 456 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
437 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { 457 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
438 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); 458 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
439 NV_ERROR(dev, "0x610200 = 0x%08x\n", 459 NV_ERROR(dev, "0x610200 = 0x%08x\n",
440 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); 460 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
441 } 461 }
442 462
443 for (i = 0; i < 3; i++) { 463 for (i = 0; i < 3; i++) {
444 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i), 464 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
445 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 465 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
446 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); 466 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
447 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, 467 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
@@ -710,7 +730,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
710 or = i; 730 or = i;
711 } 731 }
712 732
713 for (i = 0; type == OUTPUT_ANY && i < 4; i++) { 733 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
714 if (dev_priv->chipset < 0x90 || 734 if (dev_priv->chipset < 0x90 ||
715 dev_priv->chipset == 0x92 || 735 dev_priv->chipset == 0x92 ||
716 dev_priv->chipset == 0xa0) 736 dev_priv->chipset == 0xa0)
@@ -841,7 +861,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
841 or = i; 861 or = i;
842 } 862 }
843 863
844 for (i = 0; type == OUTPUT_ANY && i < 4; i++) { 864 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
845 if (dev_priv->chipset < 0x90 || 865 if (dev_priv->chipset < 0x90 ||
846 dev_priv->chipset == 0x92 || 866 dev_priv->chipset == 0x92 ||
847 dev_priv->chipset == 0xa0) 867 dev_priv->chipset == 0xa0)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 32611bd30e6d..cd1988b15d2c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev)
20 case 0x50: 20 case 0x50:
21 nv_wr32(dev, 0x100c90, 0x0707ff); 21 nv_wr32(dev, 0x100c90, 0x0707ff);
22 break; 22 break;
23 case 0xa3:
23 case 0xa5: 24 case 0xa5:
24 case 0xa8: 25 case 0xa8:
25 nv_wr32(dev, 0x100c90, 0x0d0fff); 26 nv_wr32(dev, 0x100c90, 0x0d0fff);
@@ -36,3 +37,42 @@ void
36nv50_fb_takedown(struct drm_device *dev) 37nv50_fb_takedown(struct drm_device *dev)
37{ 38{
38} 39}
40
41void
42nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
43{
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 u32 trap[6], idx, chinst;
46 int i, ch;
47
48 idx = nv_rd32(dev, 0x100c90);
49 if (!(idx & 0x80000000))
50 return;
51 idx &= 0x00ffffff;
52
53 for (i = 0; i < 6; i++) {
54 nv_wr32(dev, 0x100c90, idx | i << 24);
55 trap[i] = nv_rd32(dev, 0x100c94);
56 }
57 nv_wr32(dev, 0x100c90, idx | 0x80000000);
58
59 if (!display)
60 return;
61
62 chinst = (trap[2] << 16) | trap[1];
63 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
64 struct nouveau_channel *chan = dev_priv->fifos[ch];
65
66 if (!chan || !chan->ramin)
67 continue;
68
69 if (chinst == chan->ramin->vinst >> 12)
70 break;
71 }
72
73 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
74 "channel %d (0x%08x)\n",
75 name, (trap[5] & 0x100 ? "read" : "write"),
76 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
77 trap[0], ch, chinst);
78}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bf025c6fc6f..6dcf048eddbc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,6 +1,7 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "nouveau_drv.h" 2#include "nouveau_drv.h"
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_ramht.h"
4#include "nouveau_fbcon.h" 5#include "nouveau_fbcon.h"
5 6
6void 7void
@@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
193 if (ret) 194 if (ret)
194 return ret; 195 return ret;
195 196
196 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL); 197 ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
198 nouveau_gpuobj_ref(NULL, &eng2d);
197 if (ret) 199 if (ret)
198 return ret; 200 return ret;
199 201
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index fb0281ae8f90..a46a961102f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -27,13 +27,14 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h"
30 31
31static void 32static void
32nv50_fifo_playlist_update(struct drm_device *dev) 33nv50_fifo_playlist_update(struct drm_device *dev)
33{ 34{
34 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 36 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
36 struct nouveau_gpuobj_ref *cur; 37 struct nouveau_gpuobj *cur;
37 int i, nr; 38 int i, nr;
38 39
39 NV_DEBUG(dev, "\n"); 40 NV_DEBUG(dev, "\n");
@@ -43,12 +44,14 @@ nv50_fifo_playlist_update(struct drm_device *dev)
43 44
44 /* We never schedule channel 0 or 127 */ 45 /* We never schedule channel 0 or 127 */
45 for (i = 1, nr = 0; i < 127; i++) { 46 for (i = 1, nr = 0; i < 127; i++) {
46 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) 47 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
47 nv_wo32(dev, cur->gpuobj, nr++, i); 48 nv_wo32(cur, (nr * 4), i);
49 nr++;
50 }
48 } 51 }
49 dev_priv->engine.instmem.flush(dev); 52 dev_priv->engine.instmem.flush(dev);
50 53
51 nv_wr32(dev, 0x32f4, cur->instance >> 12); 54 nv_wr32(dev, 0x32f4, cur->vinst >> 12);
52 nv_wr32(dev, 0x32ec, nr); 55 nv_wr32(dev, 0x32ec, nr);
53 nv_wr32(dev, 0x2500, 0x101); 56 nv_wr32(dev, 0x2500, 0x101);
54} 57}
@@ -63,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel)
63 NV_DEBUG(dev, "ch%d\n", channel); 66 NV_DEBUG(dev, "ch%d\n", channel);
64 67
65 if (dev_priv->chipset == 0x50) 68 if (dev_priv->chipset == 0x50)
66 inst = chan->ramfc->instance >> 12; 69 inst = chan->ramfc->vinst >> 12;
67 else 70 else
68 inst = chan->ramfc->instance >> 8; 71 inst = chan->ramfc->vinst >> 8;
69 72
70 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | 73 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
71 NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); 74 NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
@@ -163,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev)
163 goto just_reset; 166 goto just_reset;
164 } 167 }
165 168
166 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, 169 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
167 NVOBJ_FLAG_ZERO_ALLOC, 170 NVOBJ_FLAG_ZERO_ALLOC,
168 &pfifo->playlist[0]); 171 &pfifo->playlist[0]);
169 if (ret) { 172 if (ret) {
170 NV_ERROR(dev, "error creating playlist 0: %d\n", ret); 173 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
171 return ret; 174 return ret;
172 } 175 }
173 176
174 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, 177 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
175 NVOBJ_FLAG_ZERO_ALLOC, 178 NVOBJ_FLAG_ZERO_ALLOC,
176 &pfifo->playlist[1]); 179 &pfifo->playlist[1]);
177 if (ret) { 180 if (ret) {
178 nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); 181 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
179 NV_ERROR(dev, "error creating playlist 1: %d\n", ret); 182 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
180 return ret; 183 return ret;
181 } 184 }
@@ -203,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev)
203 if (!pfifo->playlist[0]) 206 if (!pfifo->playlist[0])
204 return; 207 return;
205 208
206 nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); 209 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
207 nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); 210 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
208} 211}
209 212
210int 213int
@@ -226,59 +229,54 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
226 NV_DEBUG(dev, "ch%d\n", chan->id); 229 NV_DEBUG(dev, "ch%d\n", chan->id);
227 230
228 if (dev_priv->chipset == 0x50) { 231 if (dev_priv->chipset == 0x50) {
229 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; 232 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
230 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; 233 chan->ramin->vinst, 0x100,
231 234 NVOBJ_FLAG_ZERO_ALLOC |
232 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset, 235 NVOBJ_FLAG_ZERO_FREE,
233 0x100, NVOBJ_FLAG_ZERO_ALLOC |
234 NVOBJ_FLAG_ZERO_FREE, &ramfc,
235 &chan->ramfc); 236 &chan->ramfc);
236 if (ret) 237 if (ret)
237 return ret; 238 return ret;
238 239
239 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400, 240 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
240 ramin_voffset + 0x0400, 4096, 241 chan->ramin->vinst + 0x0400,
241 0, NULL, &chan->cache); 242 4096, 0, &chan->cache);
242 if (ret) 243 if (ret)
243 return ret; 244 return ret;
244 } else { 245 } else {
245 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, 246 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
246 NVOBJ_FLAG_ZERO_ALLOC | 247 NVOBJ_FLAG_ZERO_ALLOC |
247 NVOBJ_FLAG_ZERO_FREE, 248 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
248 &chan->ramfc);
249 if (ret) 249 if (ret)
250 return ret; 250 return ret;
251 ramfc = chan->ramfc->gpuobj;
252 251
253 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, 252 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
254 0, &chan->cache); 253 0, &chan->cache);
255 if (ret) 254 if (ret)
256 return ret; 255 return ret;
257 } 256 }
257 ramfc = chan->ramfc;
258 258
259 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 259 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
260 260
261 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); 261 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
262 nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | 262 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
263 (4 << 24) /* SEARCH_FULL */ | 263 (4 << 24) /* SEARCH_FULL */ |
264 (chan->ramht->instance >> 4)); 264 (chan->ramht->gpuobj->cinst >> 4));
265 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); 265 nv_wo32(ramfc, 0x44, 0x2101ffff);
266 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); 266 nv_wo32(ramfc, 0x60, 0x7fffffff);
267 nv_wo32(dev, ramfc, 0x40/4, 0x00000000); 267 nv_wo32(ramfc, 0x40, 0x00000000);
268 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); 268 nv_wo32(ramfc, 0x7c, 0x30000001);
269 nv_wo32(dev, ramfc, 0x78/4, 0x00000000); 269 nv_wo32(ramfc, 0x78, 0x00000000);
270 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); 270 nv_wo32(ramfc, 0x3c, 0x403f6078);
271 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + 271 nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
272 chan->dma.ib_base * 4); 272 nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
273 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
274 273
275 if (dev_priv->chipset != 0x50) { 274 if (dev_priv->chipset != 0x50) {
276 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); 275 nv_wo32(chan->ramin, 0, chan->id);
277 nv_wo32(dev, chan->ramin->gpuobj, 1, 276 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
278 chan->ramfc->instance >> 8);
279 277
280 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10); 278 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
281 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); 279 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
282 } 280 }
283 281
284 dev_priv->engine.instmem.flush(dev); 282 dev_priv->engine.instmem.flush(dev);
@@ -293,12 +291,13 @@ void
293nv50_fifo_destroy_context(struct nouveau_channel *chan) 291nv50_fifo_destroy_context(struct nouveau_channel *chan)
294{ 292{
295 struct drm_device *dev = chan->dev; 293 struct drm_device *dev = chan->dev;
296 struct nouveau_gpuobj_ref *ramfc = chan->ramfc; 294 struct nouveau_gpuobj *ramfc = NULL;
297 295
298 NV_DEBUG(dev, "ch%d\n", chan->id); 296 NV_DEBUG(dev, "ch%d\n", chan->id);
299 297
300 /* This will ensure the channel is seen as disabled. */ 298 /* This will ensure the channel is seen as disabled. */
301 chan->ramfc = NULL; 299 nouveau_gpuobj_ref(chan->ramfc, &ramfc);
300 nouveau_gpuobj_ref(NULL, &chan->ramfc);
302 nv50_fifo_channel_disable(dev, chan->id); 301 nv50_fifo_channel_disable(dev, chan->id);
303 302
304 /* Dummy channel, also used on ch 127 */ 303 /* Dummy channel, also used on ch 127 */
@@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
306 nv50_fifo_channel_disable(dev, 127); 305 nv50_fifo_channel_disable(dev, 127);
307 nv50_fifo_playlist_update(dev); 306 nv50_fifo_playlist_update(dev);
308 307
309 nouveau_gpuobj_ref_del(dev, &ramfc); 308 nouveau_gpuobj_ref(NULL, &ramfc);
310 nouveau_gpuobj_ref_del(dev, &chan->cache); 309 nouveau_gpuobj_ref(NULL, &chan->cache);
311} 310}
312 311
313int 312int
@@ -315,63 +314,63 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
315{ 314{
316 struct drm_device *dev = chan->dev; 315 struct drm_device *dev = chan->dev;
317 struct drm_nouveau_private *dev_priv = dev->dev_private; 316 struct drm_nouveau_private *dev_priv = dev->dev_private;
318 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; 317 struct nouveau_gpuobj *ramfc = chan->ramfc;
319 struct nouveau_gpuobj *cache = chan->cache->gpuobj; 318 struct nouveau_gpuobj *cache = chan->cache;
320 int ptr, cnt; 319 int ptr, cnt;
321 320
322 NV_DEBUG(dev, "ch%d\n", chan->id); 321 NV_DEBUG(dev, "ch%d\n", chan->id);
323 322
324 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); 323 nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
325 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); 324 nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
326 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); 325 nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
327 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4)); 326 nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
328 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4)); 327 nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
329 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4)); 328 nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
330 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4)); 329 nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
331 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4)); 330 nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
332 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4)); 331 nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
333 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4)); 332 nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
334 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4)); 333 nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
335 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4)); 334 nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
336 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4)); 335 nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
337 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4)); 336 nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
338 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4)); 337 nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
339 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4)); 338 nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
340 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4)); 339 nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
341 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4)); 340 nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
342 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4)); 341 nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
343 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4)); 342 nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
344 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4)); 343 nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
345 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4)); 344 nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
346 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4)); 345 nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
347 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4)); 346 nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
348 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4)); 347 nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
349 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4)); 348 nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
350 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4)); 349 nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
351 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4)); 350 nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
352 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4)); 351 nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
353 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4)); 352 nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
354 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4)); 353 nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
355 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4)); 354 nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
356 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4)); 355 nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
357 356
358 cnt = nv_ro32(dev, ramfc, 0x84/4); 357 cnt = nv_ro32(ramfc, 0x84);
359 for (ptr = 0; ptr < cnt; ptr++) { 358 for (ptr = 0; ptr < cnt; ptr++) {
360 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), 359 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
361 nv_ro32(dev, cache, (ptr * 2) + 0)); 360 nv_ro32(cache, (ptr * 8) + 0));
362 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), 361 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
363 nv_ro32(dev, cache, (ptr * 2) + 1)); 362 nv_ro32(cache, (ptr * 8) + 4));
364 } 363 }
365 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); 364 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
366 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 365 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
367 366
368 /* guessing that all the 0x34xx regs aren't on NV50 */ 367 /* guessing that all the 0x34xx regs aren't on NV50 */
369 if (dev_priv->chipset != 0x50) { 368 if (dev_priv->chipset != 0x50) {
370 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); 369 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
371 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); 370 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
372 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); 371 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
373 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4)); 372 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
374 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); 373 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
375 } 374 }
376 375
377 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 376 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
@@ -399,62 +398,63 @@ nv50_fifo_unload_context(struct drm_device *dev)
399 return -EINVAL; 398 return -EINVAL;
400 } 399 }
401 NV_DEBUG(dev, "ch%d\n", chan->id); 400 NV_DEBUG(dev, "ch%d\n", chan->id);
402 ramfc = chan->ramfc->gpuobj; 401 ramfc = chan->ramfc;
403 cache = chan->cache->gpuobj; 402 cache = chan->cache;
404 403
405 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); 404 nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
406 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); 405 nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
407 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); 406 nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
408 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320)); 407 nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
409 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244)); 408 nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
410 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328)); 409 nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
411 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368)); 410 nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
412 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c)); 411 nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
413 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370)); 412 nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
414 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374)); 413 nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
415 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378)); 414 nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
416 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c)); 415 nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
417 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228)); 416 nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
418 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364)); 417 nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
419 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0)); 418 nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
420 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224)); 419 nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
421 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c)); 420 nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
422 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044)); 421 nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
423 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c)); 422 nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
424 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234)); 423 nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
425 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340)); 424 nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
426 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344)); 425 nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
427 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280)); 426 nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
428 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254)); 427 nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
429 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260)); 428 nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
430 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264)); 429 nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
431 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268)); 430 nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
432 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c)); 431 nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
433 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4)); 432 nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
434 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248)); 433 nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
435 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088)); 434 nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
436 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058)); 435 nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
437 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210)); 436 nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
438 437
439 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; 438 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
440 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; 439 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
441 ptr = 0; 440 ptr = 0;
442 while (put != get) { 441 while (put != get) {
443 nv_wo32(dev, cache, ptr++, 442 nv_wo32(cache, ptr + 0,
444 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); 443 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
445 nv_wo32(dev, cache, ptr++, 444 nv_wo32(cache, ptr + 4,
446 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); 445 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
447 get = (get + 1) & 0x1ff; 446 get = (get + 1) & 0x1ff;
447 ptr += 8;
448 } 448 }
449 449
450 /* guessing that all the 0x34xx regs aren't on NV50 */ 450 /* guessing that all the 0x34xx regs aren't on NV50 */
451 if (dev_priv->chipset != 0x50) { 451 if (dev_priv->chipset != 0x50) {
452 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); 452 nv_wo32(ramfc, 0x84, ptr >> 3);
453 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); 453 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
454 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); 454 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
455 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404)); 455 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
456 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408)); 456 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
457 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); 457 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
458 } 458 }
459 459
460 dev_priv->engine.instmem.flush(dev); 460 dev_priv->engine.instmem.flush(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 1413028e1580..cbf5ae2f67d4 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,7 +27,7 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30#include "nouveau_ramht.h"
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32 32
33static void 33static void
@@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev)
181 /* Be sure we're not in the middle of a context switch or bad things 181 /* Be sure we're not in the middle of a context switch or bad things
182 * will happen, such as unloading the wrong pgraph context. 182 * will happen, such as unloading the wrong pgraph context.
183 */ 183 */
184 if (!nv_wait(0x400300, 0x00000001, 0x00000000)) 184 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
185 NV_ERROR(dev, "Ctxprog is still running\n"); 185 NV_ERROR(dev, "Ctxprog is still running\n");
186 186
187 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 187 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
@@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev)
192 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 192 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->fifos[i]; 193 struct nouveau_channel *chan = dev_priv->fifos[i];
194 194
195 if (chan && chan->ramin && chan->ramin->instance == inst) 195 if (chan && chan->ramin && chan->ramin->vinst == inst)
196 return chan; 196 return chan;
197 } 197 }
198 198
@@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan)
204{ 204{
205 struct drm_device *dev = chan->dev; 205 struct drm_device *dev = chan->dev;
206 struct drm_nouveau_private *dev_priv = dev->dev_private; 206 struct drm_nouveau_private *dev_priv = dev->dev_private;
207 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 207 struct nouveau_gpuobj *ramin = chan->ramin;
208 struct nouveau_gpuobj *obj;
209 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 208 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
210 struct nouveau_grctx ctx = {}; 209 struct nouveau_grctx ctx = {};
211 int hdr, ret; 210 int hdr, ret;
212 211
213 NV_DEBUG(dev, "ch%d\n", chan->id); 212 NV_DEBUG(dev, "ch%d\n", chan->id);
214 213
215 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, 214 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
216 0x1000, NVOBJ_FLAG_ZERO_ALLOC | 215 NVOBJ_FLAG_ZERO_ALLOC |
217 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 216 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
218 if (ret) 217 if (ret)
219 return ret; 218 return ret;
220 obj = chan->ramin_grctx->gpuobj;
221 219
222 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 220 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
223 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); 221 nv_wo32(ramin, hdr + 0x00, 0x00190002);
224 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + 222 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
225 pgraph->grctx_size - 1); 223 pgraph->grctx_size - 1);
226 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); 224 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
227 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); 225 nv_wo32(ramin, hdr + 0x0c, 0);
228 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); 226 nv_wo32(ramin, hdr + 0x10, 0);
229 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); 227 nv_wo32(ramin, hdr + 0x14, 0x00010000);
230 228
231 ctx.dev = chan->dev; 229 ctx.dev = chan->dev;
232 ctx.mode = NOUVEAU_GRCTX_VALS; 230 ctx.mode = NOUVEAU_GRCTX_VALS;
233 ctx.data = obj; 231 ctx.data = chan->ramin_grctx;
234 nv50_grctx_init(&ctx); 232 nv50_grctx_init(&ctx);
235 233
236 nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); 234 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
237 235
238 dev_priv->engine.instmem.flush(dev); 236 dev_priv->engine.instmem.flush(dev);
239 return 0; 237 return 0;
@@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
248 246
249 NV_DEBUG(dev, "ch%d\n", chan->id); 247 NV_DEBUG(dev, "ch%d\n", chan->id);
250 248
251 if (!chan->ramin || !chan->ramin->gpuobj) 249 if (!chan->ramin)
252 return; 250 return;
253 251
254 for (i = hdr; i < hdr + 24; i += 4) 252 for (i = hdr; i < hdr + 24; i += 4)
255 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); 253 nv_wo32(chan->ramin, i, 0);
256 dev_priv->engine.instmem.flush(dev); 254 dev_priv->engine.instmem.flush(dev);
257 255
258 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); 256 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
259} 257}
260 258
261static int 259static int
@@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
282int 280int
283nv50_graph_load_context(struct nouveau_channel *chan) 281nv50_graph_load_context(struct nouveau_channel *chan)
284{ 282{
285 uint32_t inst = chan->ramin->instance >> 12; 283 uint32_t inst = chan->ramin->vinst >> 12;
286 284
287 NV_DEBUG(chan->dev, "ch%d\n", chan->id); 285 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
288 return nv50_graph_do_load_context(chan->dev, inst); 286 return nv50_graph_do_load_context(chan->dev, inst);
@@ -327,15 +325,16 @@ static int
327nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, 325nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
328 int mthd, uint32_t data) 326 int mthd, uint32_t data)
329{ 327{
330 struct nouveau_gpuobj_ref *ref = NULL; 328 struct nouveau_gpuobj *gpuobj;
331 329
332 if (nouveau_gpuobj_ref_find(chan, data, &ref)) 330 gpuobj = nouveau_ramht_find(chan, data);
331 if (!gpuobj)
333 return -ENOENT; 332 return -ENOENT;
334 333
335 if (nouveau_notifier_offset(ref->gpuobj, NULL)) 334 if (nouveau_notifier_offset(gpuobj, NULL))
336 return -EINVAL; 335 return -EINVAL;
337 336
338 chan->nvsw.vblsem = ref->gpuobj; 337 chan->nvsw.vblsem = gpuobj;
339 chan->nvsw.vblsem_offset = ~0; 338 chan->nvsw.vblsem_offset = ~0;
340 return 0; 339 return 0;
341} 340}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 42a8fb20c1e6..336aab2a24a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -103,6 +103,9 @@
103#include "nouveau_drv.h" 103#include "nouveau_drv.h"
104#include "nouveau_grctx.h" 104#include "nouveau_grctx.h"
105 105
106#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
107#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
108
106/* 109/*
107 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's 110 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
108 * the GPU itself that does context-switching, but it needs a special 111 * the GPU itself that does context-switching, but it needs a special
@@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
182 case 0xa8: 185 case 0xa8:
183 case 0xaa: 186 case 0xaa:
184 case 0xac: 187 case 0xac:
188 case 0xaf:
185 break; 189 break;
186 default: 190 default:
187 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " 191 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
@@ -268,6 +272,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
268 */ 272 */
269 273
270static void 274static void
275nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
276
277static void
271nv50_graph_construct_mmio(struct nouveau_grctx *ctx) 278nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
272{ 279{
273 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 280 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
286 gr_def(ctx, 0x400840, 0xffe806a8); 293 gr_def(ctx, 0x400840, 0xffe806a8);
287 } 294 }
288 gr_def(ctx, 0x400844, 0x00000002); 295 gr_def(ctx, 0x400844, 0x00000002);
289 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 296 if (IS_NVA3F(dev_priv->chipset))
290 gr_def(ctx, 0x400894, 0x00001000); 297 gr_def(ctx, 0x400894, 0x00001000);
291 gr_def(ctx, 0x4008e8, 0x00000003); 298 gr_def(ctx, 0x4008e8, 0x00000003);
292 gr_def(ctx, 0x4008ec, 0x00001000); 299 gr_def(ctx, 0x4008ec, 0x00001000);
@@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
299 306
300 if (dev_priv->chipset >= 0xa0) 307 if (dev_priv->chipset >= 0xa0)
301 cp_ctx(ctx, 0x400b00, 0x1); 308 cp_ctx(ctx, 0x400b00, 0x1);
302 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 309 if (IS_NVA3F(dev_priv->chipset)) {
303 cp_ctx(ctx, 0x400b10, 0x1); 310 cp_ctx(ctx, 0x400b10, 0x1);
304 gr_def(ctx, 0x400b10, 0x0001629d); 311 gr_def(ctx, 0x400b10, 0x0001629d);
305 cp_ctx(ctx, 0x400b20, 0x1); 312 cp_ctx(ctx, 0x400b20, 0x1);
306 gr_def(ctx, 0x400b20, 0x0001629d); 313 gr_def(ctx, 0x400b20, 0x0001629d);
307 } 314 }
308 315
316 nv50_graph_construct_mmio_ddata(ctx);
317
309 /* 0C00: VFETCH */ 318 /* 0C00: VFETCH */
310 cp_ctx(ctx, 0x400c08, 0x2); 319 cp_ctx(ctx, 0x400c08, 0x2);
311 gr_def(ctx, 0x400c08, 0x0000fe0c); 320 gr_def(ctx, 0x400c08, 0x0000fe0c);
@@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
314 if (dev_priv->chipset < 0xa0) { 323 if (dev_priv->chipset < 0xa0) {
315 cp_ctx(ctx, 0x401008, 0x4); 324 cp_ctx(ctx, 0x401008, 0x4);
316 gr_def(ctx, 0x401014, 0x00001000); 325 gr_def(ctx, 0x401014, 0x00001000);
317 } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) { 326 } else if (!IS_NVA3F(dev_priv->chipset)) {
318 cp_ctx(ctx, 0x401008, 0x5); 327 cp_ctx(ctx, 0x401008, 0x5);
319 gr_def(ctx, 0x401018, 0x00001000); 328 gr_def(ctx, 0x401018, 0x00001000);
320 } else { 329 } else {
@@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
368 case 0xa3: 377 case 0xa3:
369 case 0xa5: 378 case 0xa5:
370 case 0xa8: 379 case 0xa8:
380 case 0xaf:
371 gr_def(ctx, 0x401c00, 0x142500df); 381 gr_def(ctx, 0x401c00, 0x142500df);
372 break; 382 break;
373 } 383 }
374 384
385 /* 2000 */
386
375 /* 2400 */ 387 /* 2400 */
376 cp_ctx(ctx, 0x402400, 0x1); 388 cp_ctx(ctx, 0x402400, 0x1);
377 if (dev_priv->chipset == 0x50) 389 if (dev_priv->chipset == 0x50)
@@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
380 cp_ctx(ctx, 0x402408, 0x2); 392 cp_ctx(ctx, 0x402408, 0x2);
381 gr_def(ctx, 0x402408, 0x00000600); 393 gr_def(ctx, 0x402408, 0x00000600);
382 394
383 /* 2800 */ 395 /* 2800: CSCHED */
384 cp_ctx(ctx, 0x402800, 0x1); 396 cp_ctx(ctx, 0x402800, 0x1);
385 if (dev_priv->chipset == 0x50) 397 if (dev_priv->chipset == 0x50)
386 gr_def(ctx, 0x402800, 0x00000006); 398 gr_def(ctx, 0x402800, 0x00000006);
387 399
388 /* 2C00 */ 400 /* 2C00: ZCULL */
389 cp_ctx(ctx, 0x402c08, 0x6); 401 cp_ctx(ctx, 0x402c08, 0x6);
390 if (dev_priv->chipset != 0x50) 402 if (dev_priv->chipset != 0x50)
391 gr_def(ctx, 0x402c14, 0x01000000); 403 gr_def(ctx, 0x402c14, 0x01000000);
@@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
396 cp_ctx(ctx, 0x402ca0, 0x2); 408 cp_ctx(ctx, 0x402ca0, 0x2);
397 if (dev_priv->chipset < 0xa0) 409 if (dev_priv->chipset < 0xa0)
398 gr_def(ctx, 0x402ca0, 0x00000400); 410 gr_def(ctx, 0x402ca0, 0x00000400);
399 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) 411 else if (!IS_NVA3F(dev_priv->chipset))
400 gr_def(ctx, 0x402ca0, 0x00000800); 412 gr_def(ctx, 0x402ca0, 0x00000800);
401 else 413 else
402 gr_def(ctx, 0x402ca0, 0x00000400); 414 gr_def(ctx, 0x402ca0, 0x00000400);
403 cp_ctx(ctx, 0x402cac, 0x4); 415 cp_ctx(ctx, 0x402cac, 0x4);
404 416
405 /* 3000 */ 417 /* 3000: ENG2D */
406 cp_ctx(ctx, 0x403004, 0x1); 418 cp_ctx(ctx, 0x403004, 0x1);
407 gr_def(ctx, 0x403004, 0x00000001); 419 gr_def(ctx, 0x403004, 0x00000001);
408 420
409 /* 3404 */ 421 /* 3400 */
410 if (dev_priv->chipset >= 0xa0) { 422 if (dev_priv->chipset >= 0xa0) {
411 cp_ctx(ctx, 0x403404, 0x1); 423 cp_ctx(ctx, 0x403404, 0x1);
412 gr_def(ctx, 0x403404, 0x00000001); 424 gr_def(ctx, 0x403404, 0x00000001);
413 } 425 }
414 426
415 /* 5000 */ 427 /* 5000: CCACHE */
416 cp_ctx(ctx, 0x405000, 0x1); 428 cp_ctx(ctx, 0x405000, 0x1);
417 switch (dev_priv->chipset) { 429 switch (dev_priv->chipset) {
418 case 0x50: 430 case 0x50:
@@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
425 case 0xa8: 437 case 0xa8:
426 case 0xaa: 438 case 0xaa:
427 case 0xac: 439 case 0xac:
440 case 0xaf:
428 gr_def(ctx, 0x405000, 0x000e0080); 441 gr_def(ctx, 0x405000, 0x000e0080);
429 break; 442 break;
430 case 0x86: 443 case 0x86:
@@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
441 cp_ctx(ctx, 0x405024, 0x1); 454 cp_ctx(ctx, 0x405024, 0x1);
442 cp_ctx(ctx, 0x40502c, 0x1); 455 cp_ctx(ctx, 0x40502c, 0x1);
443 456
444 /* 5400 or maybe 4800 */
445 if (dev_priv->chipset == 0x50) {
446 offset = 0x405400;
447 cp_ctx(ctx, 0x405400, 0xea);
448 } else if (dev_priv->chipset < 0x94) {
449 offset = 0x405400;
450 cp_ctx(ctx, 0x405400, 0xcb);
451 } else if (dev_priv->chipset < 0xa0) {
452 offset = 0x405400;
453 cp_ctx(ctx, 0x405400, 0xcc);
454 } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
455 offset = 0x404800;
456 cp_ctx(ctx, 0x404800, 0xda);
457 } else {
458 offset = 0x405400;
459 cp_ctx(ctx, 0x405400, 0xd4);
460 }
461 gr_def(ctx, offset + 0x0c, 0x00000002);
462 gr_def(ctx, offset + 0x10, 0x00000001);
463 if (dev_priv->chipset >= 0x94)
464 offset += 4;
465 gr_def(ctx, offset + 0x1c, 0x00000001);
466 gr_def(ctx, offset + 0x20, 0x00000100);
467 gr_def(ctx, offset + 0x38, 0x00000002);
468 gr_def(ctx, offset + 0x3c, 0x00000001);
469 gr_def(ctx, offset + 0x40, 0x00000001);
470 gr_def(ctx, offset + 0x50, 0x00000001);
471 gr_def(ctx, offset + 0x54, 0x003fffff);
472 gr_def(ctx, offset + 0x58, 0x00001fff);
473 gr_def(ctx, offset + 0x60, 0x00000001);
474 gr_def(ctx, offset + 0x64, 0x00000001);
475 gr_def(ctx, offset + 0x6c, 0x00000001);
476 gr_def(ctx, offset + 0x70, 0x00000001);
477 gr_def(ctx, offset + 0x74, 0x00000001);
478 gr_def(ctx, offset + 0x78, 0x00000004);
479 gr_def(ctx, offset + 0x7c, 0x00000001);
480 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
481 offset += 4;
482 gr_def(ctx, offset + 0x80, 0x00000001);
483 gr_def(ctx, offset + 0x84, 0x00000001);
484 gr_def(ctx, offset + 0x88, 0x00000007);
485 gr_def(ctx, offset + 0x8c, 0x00000001);
486 gr_def(ctx, offset + 0x90, 0x00000007);
487 gr_def(ctx, offset + 0x94, 0x00000001);
488 gr_def(ctx, offset + 0x98, 0x00000001);
489 gr_def(ctx, offset + 0x9c, 0x00000001);
490 if (dev_priv->chipset == 0x50) {
491 gr_def(ctx, offset + 0xb0, 0x00000001);
492 gr_def(ctx, offset + 0xb4, 0x00000001);
493 gr_def(ctx, offset + 0xbc, 0x00000001);
494 gr_def(ctx, offset + 0xc0, 0x0000000a);
495 gr_def(ctx, offset + 0xd0, 0x00000040);
496 gr_def(ctx, offset + 0xd8, 0x00000002);
497 gr_def(ctx, offset + 0xdc, 0x00000100);
498 gr_def(ctx, offset + 0xe0, 0x00000001);
499 gr_def(ctx, offset + 0xe4, 0x00000100);
500 gr_def(ctx, offset + 0x100, 0x00000001);
501 gr_def(ctx, offset + 0x124, 0x00000004);
502 gr_def(ctx, offset + 0x13c, 0x00000001);
503 gr_def(ctx, offset + 0x140, 0x00000100);
504 gr_def(ctx, offset + 0x148, 0x00000001);
505 gr_def(ctx, offset + 0x154, 0x00000100);
506 gr_def(ctx, offset + 0x158, 0x00000001);
507 gr_def(ctx, offset + 0x15c, 0x00000100);
508 gr_def(ctx, offset + 0x164, 0x00000001);
509 gr_def(ctx, offset + 0x170, 0x00000100);
510 gr_def(ctx, offset + 0x174, 0x00000001);
511 gr_def(ctx, offset + 0x17c, 0x00000001);
512 gr_def(ctx, offset + 0x188, 0x00000002);
513 gr_def(ctx, offset + 0x190, 0x00000001);
514 gr_def(ctx, offset + 0x198, 0x00000001);
515 gr_def(ctx, offset + 0x1ac, 0x00000003);
516 offset += 0xd0;
517 } else {
518 gr_def(ctx, offset + 0xb0, 0x00000001);
519 gr_def(ctx, offset + 0xb4, 0x00000100);
520 gr_def(ctx, offset + 0xbc, 0x00000001);
521 gr_def(ctx, offset + 0xc8, 0x00000100);
522 gr_def(ctx, offset + 0xcc, 0x00000001);
523 gr_def(ctx, offset + 0xd0, 0x00000100);
524 gr_def(ctx, offset + 0xd8, 0x00000001);
525 gr_def(ctx, offset + 0xe4, 0x00000100);
526 }
527 gr_def(ctx, offset + 0xf8, 0x00000004);
528 gr_def(ctx, offset + 0xfc, 0x00000070);
529 gr_def(ctx, offset + 0x100, 0x00000080);
530 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
531 offset += 4;
532 gr_def(ctx, offset + 0x114, 0x0000000c);
533 if (dev_priv->chipset == 0x50)
534 offset -= 4;
535 gr_def(ctx, offset + 0x11c, 0x00000008);
536 gr_def(ctx, offset + 0x120, 0x00000014);
537 if (dev_priv->chipset == 0x50) {
538 gr_def(ctx, offset + 0x124, 0x00000026);
539 offset -= 0x18;
540 } else {
541 gr_def(ctx, offset + 0x128, 0x00000029);
542 gr_def(ctx, offset + 0x12c, 0x00000027);
543 gr_def(ctx, offset + 0x130, 0x00000026);
544 gr_def(ctx, offset + 0x134, 0x00000008);
545 gr_def(ctx, offset + 0x138, 0x00000004);
546 gr_def(ctx, offset + 0x13c, 0x00000027);
547 }
548 gr_def(ctx, offset + 0x148, 0x00000001);
549 gr_def(ctx, offset + 0x14c, 0x00000002);
550 gr_def(ctx, offset + 0x150, 0x00000003);
551 gr_def(ctx, offset + 0x154, 0x00000004);
552 gr_def(ctx, offset + 0x158, 0x00000005);
553 gr_def(ctx, offset + 0x15c, 0x00000006);
554 gr_def(ctx, offset + 0x160, 0x00000007);
555 gr_def(ctx, offset + 0x164, 0x00000001);
556 gr_def(ctx, offset + 0x1a8, 0x000000cf);
557 if (dev_priv->chipset == 0x50)
558 offset -= 4;
559 gr_def(ctx, offset + 0x1d8, 0x00000080);
560 gr_def(ctx, offset + 0x1dc, 0x00000004);
561 gr_def(ctx, offset + 0x1e0, 0x00000004);
562 if (dev_priv->chipset == 0x50)
563 offset -= 4;
564 else
565 gr_def(ctx, offset + 0x1e4, 0x00000003);
566 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
567 gr_def(ctx, offset + 0x1ec, 0x00000003);
568 offset += 8;
569 }
570 gr_def(ctx, offset + 0x1e8, 0x00000001);
571 if (dev_priv->chipset == 0x50)
572 offset -= 4;
573 gr_def(ctx, offset + 0x1f4, 0x00000012);
574 gr_def(ctx, offset + 0x1f8, 0x00000010);
575 gr_def(ctx, offset + 0x1fc, 0x0000000c);
576 gr_def(ctx, offset + 0x200, 0x00000001);
577 gr_def(ctx, offset + 0x210, 0x00000004);
578 gr_def(ctx, offset + 0x214, 0x00000002);
579 gr_def(ctx, offset + 0x218, 0x00000004);
580 if (dev_priv->chipset >= 0xa0)
581 offset += 4;
582 gr_def(ctx, offset + 0x224, 0x003fffff);
583 gr_def(ctx, offset + 0x228, 0x00001fff);
584 if (dev_priv->chipset == 0x50)
585 offset -= 0x20;
586 else if (dev_priv->chipset >= 0xa0) {
587 gr_def(ctx, offset + 0x250, 0x00000001);
588 gr_def(ctx, offset + 0x254, 0x00000001);
589 gr_def(ctx, offset + 0x258, 0x00000002);
590 offset += 0x10;
591 }
592 gr_def(ctx, offset + 0x250, 0x00000004);
593 gr_def(ctx, offset + 0x254, 0x00000014);
594 gr_def(ctx, offset + 0x258, 0x00000001);
595 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
596 offset += 4;
597 gr_def(ctx, offset + 0x264, 0x00000002);
598 if (dev_priv->chipset >= 0xa0)
599 offset += 8;
600 gr_def(ctx, offset + 0x270, 0x00000001);
601 gr_def(ctx, offset + 0x278, 0x00000002);
602 gr_def(ctx, offset + 0x27c, 0x00001000);
603 if (dev_priv->chipset == 0x50)
604 offset -= 0xc;
605 else {
606 gr_def(ctx, offset + 0x280, 0x00000e00);
607 gr_def(ctx, offset + 0x284, 0x00001000);
608 gr_def(ctx, offset + 0x288, 0x00001e00);
609 }
610 gr_def(ctx, offset + 0x290, 0x00000001);
611 gr_def(ctx, offset + 0x294, 0x00000001);
612 gr_def(ctx, offset + 0x298, 0x00000001);
613 gr_def(ctx, offset + 0x29c, 0x00000001);
614 gr_def(ctx, offset + 0x2a0, 0x00000001);
615 gr_def(ctx, offset + 0x2b0, 0x00000200);
616 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
617 gr_def(ctx, offset + 0x2b4, 0x00000200);
618 offset += 4;
619 }
620 if (dev_priv->chipset < 0xa0) {
621 gr_def(ctx, offset + 0x2b8, 0x00000001);
622 gr_def(ctx, offset + 0x2bc, 0x00000070);
623 gr_def(ctx, offset + 0x2c0, 0x00000080);
624 gr_def(ctx, offset + 0x2cc, 0x00000001);
625 gr_def(ctx, offset + 0x2d0, 0x00000070);
626 gr_def(ctx, offset + 0x2d4, 0x00000080);
627 } else {
628 gr_def(ctx, offset + 0x2b8, 0x00000001);
629 gr_def(ctx, offset + 0x2bc, 0x000000f0);
630 gr_def(ctx, offset + 0x2c0, 0x000000ff);
631 gr_def(ctx, offset + 0x2cc, 0x00000001);
632 gr_def(ctx, offset + 0x2d0, 0x000000f0);
633 gr_def(ctx, offset + 0x2d4, 0x000000ff);
634 gr_def(ctx, offset + 0x2dc, 0x00000009);
635 offset += 4;
636 }
637 gr_def(ctx, offset + 0x2e4, 0x00000001);
638 gr_def(ctx, offset + 0x2e8, 0x000000cf);
639 gr_def(ctx, offset + 0x2f0, 0x00000001);
640 gr_def(ctx, offset + 0x300, 0x000000cf);
641 gr_def(ctx, offset + 0x308, 0x00000002);
642 gr_def(ctx, offset + 0x310, 0x00000001);
643 gr_def(ctx, offset + 0x318, 0x00000001);
644 gr_def(ctx, offset + 0x320, 0x000000cf);
645 gr_def(ctx, offset + 0x324, 0x000000cf);
646 gr_def(ctx, offset + 0x328, 0x00000001);
647
648 /* 6000? */ 457 /* 6000? */
649 if (dev_priv->chipset == 0x50) 458 if (dev_priv->chipset == 0x50)
650 cp_ctx(ctx, 0x4063e0, 0x1); 459 cp_ctx(ctx, 0x4063e0, 0x1);
@@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
661 gr_def(ctx, 0x406818, 0x00000f80); 470 gr_def(ctx, 0x406818, 0x00000f80);
662 else 471 else
663 gr_def(ctx, 0x406818, 0x00001f80); 472 gr_def(ctx, 0x406818, 0x00001f80);
664 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 473 if (IS_NVA3F(dev_priv->chipset))
665 gr_def(ctx, 0x40681c, 0x00000030); 474 gr_def(ctx, 0x40681c, 0x00000030);
666 cp_ctx(ctx, 0x406830, 0x3); 475 cp_ctx(ctx, 0x406830, 0x3);
667 } 476 }
@@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
706 515
707 if (dev_priv->chipset < 0xa0) 516 if (dev_priv->chipset < 0xa0)
708 cp_ctx(ctx, 0x407094 + (i<<8), 1); 517 cp_ctx(ctx, 0x407094 + (i<<8), 1);
709 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) 518 else if (!IS_NVA3F(dev_priv->chipset))
710 cp_ctx(ctx, 0x407094 + (i<<8), 3); 519 cp_ctx(ctx, 0x407094 + (i<<8), 3);
711 else { 520 else {
712 cp_ctx(ctx, 0x407094 + (i<<8), 4); 521 cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
799 case 0xa8: 608 case 0xa8:
800 case 0xaa: 609 case 0xaa:
801 case 0xac: 610 case 0xac:
611 case 0xaf:
802 gr_def(ctx, offset + 0x1c, 0x300c0000); 612 gr_def(ctx, offset + 0x1c, 0x300c0000);
803 break; 613 break;
804 } 614 }
@@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
825 gr_def(ctx, base + 0x304, 0x00007070); 635 gr_def(ctx, base + 0x304, 0x00007070);
826 else if (dev_priv->chipset < 0xa0) 636 else if (dev_priv->chipset < 0xa0)
827 gr_def(ctx, base + 0x304, 0x00027070); 637 gr_def(ctx, base + 0x304, 0x00027070);
828 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) 638 else if (!IS_NVA3F(dev_priv->chipset))
829 gr_def(ctx, base + 0x304, 0x01127070); 639 gr_def(ctx, base + 0x304, 0x01127070);
830 else 640 else
831 gr_def(ctx, base + 0x304, 0x05127070); 641 gr_def(ctx, base + 0x304, 0x05127070);
@@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
849 if (dev_priv->chipset < 0xa0) { 659 if (dev_priv->chipset < 0xa0) {
850 cp_ctx(ctx, base + 0x340, 9); 660 cp_ctx(ctx, base + 0x340, 9);
851 offset = base + 0x340; 661 offset = base + 0x340;
852 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { 662 } else if (!IS_NVA3F(dev_priv->chipset)) {
853 cp_ctx(ctx, base + 0x33c, 0xb); 663 cp_ctx(ctx, base + 0x33c, 0xb);
854 offset = base + 0x344; 664 offset = base + 0x344;
855 } else { 665 } else {
@@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
880 gr_def(ctx, offset + 0x0, 0x000001f0); 690 gr_def(ctx, offset + 0x0, 0x000001f0);
881 gr_def(ctx, offset + 0x4, 0x00000001); 691 gr_def(ctx, offset + 0x4, 0x00000001);
882 gr_def(ctx, offset + 0x8, 0x00000003); 692 gr_def(ctx, offset + 0x8, 0x00000003);
883 if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa) 693 if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
884 gr_def(ctx, offset + 0xc, 0x00008000); 694 gr_def(ctx, offset + 0xc, 0x00008000);
885 gr_def(ctx, offset + 0x14, 0x00039e00); 695 gr_def(ctx, offset + 0x14, 0x00039e00);
886 cp_ctx(ctx, offset + 0x1c, 2); 696 cp_ctx(ctx, offset + 0x1c, 2);
@@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
892 702
893 if (dev_priv->chipset >= 0xa0) { 703 if (dev_priv->chipset >= 0xa0) {
894 cp_ctx(ctx, base + 0x54c, 2); 704 cp_ctx(ctx, base + 0x54c, 2);
895 if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) 705 if (!IS_NVA3F(dev_priv->chipset))
896 gr_def(ctx, base + 0x54c, 0x003fe006); 706 gr_def(ctx, base + 0x54c, 0x003fe006);
897 else 707 else
898 gr_def(ctx, base + 0x54c, 0x003fe007); 708 gr_def(ctx, base + 0x54c, 0x003fe007);
@@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
948 } 758 }
949} 759}
950 760
761static void
762dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
763 int i;
764 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
765 for (i = 0; i < num; i++)
766 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
767 ctx->ctxvals_pos += num;
768}
769
770static void
771nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
772{
773 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
774 int base, num;
775 base = ctx->ctxvals_pos;
776
777 /* tesla state */
778 dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */
779 dd_emit(ctx, 1, 0); /* 00000001 UNK135C */
780
781 /* SRC_TIC state */
782 dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */
783 dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */
784 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
785 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
786 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
787 if (dev_priv->chipset >= 0x94)
788 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
789 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
790 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
791
792 /* turing state */
793 dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */
794 dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */
795 dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
796 dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
797 dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
798 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
799 dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */
800 dd_emit(ctx, 1, 1); /* 00000001 LANES32 */
801 dd_emit(ctx, 1, 0); /* 000000ff UNK370 */
802 dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */
803 dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */
804 dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */
805 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
806 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
807 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
808 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */
809 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */
810 dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */
811 dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */
812 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */
813 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
814 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
815 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
816 if (IS_NVA3F(dev_priv->chipset))
817 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
818 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
819 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
820 dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */
821 dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */
822 dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */
823 dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
824 dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
825 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
826
827 /* compat 2d state */
828 if (dev_priv->chipset == 0x50) {
829 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
830
831 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
832
833 dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */
834 dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */
835 dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */
836
837 dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */
838 dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */
839 dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */
840 dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */
841 dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */
842 dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */
843 dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */
844 dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */
845 dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */
846 dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */
847
848 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */
849 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */
850 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */
851 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */
852 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */
853 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */
854 dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */
855 dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */
856 dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */
857 dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */
858
859 dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */
860 dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */
861
862 dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */
863
864 dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */
865 dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */
866 dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */
867 dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */
868 }
869
870 /* m2mf state */
871 dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */
872 dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */
873 dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
874 dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */
875 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */
876 dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */
877 dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */
878 dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
879 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */
880 dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */
881 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */
882 dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */
883 dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */
884 dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
885 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
886
887 /* more compat 2d state */
888 if (dev_priv->chipset == 0x50) {
889 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
890 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
891
892 dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */
893 dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */
894
895 dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */
896 dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */
897 dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */
898 dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */
899 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */
900 dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */
901 dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */
902 dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */
903 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */
904 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */
905 dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */
906 dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */
907
908 dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */
909 }
910
911 /* tesla state */
912 dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */
913 dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */
914 dd_emit(ctx, 1, 0); /* 000000ff */
915 dd_emit(ctx, 1, 0); /* ffffffff */
916 dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */
917 dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */
918 dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */
919 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
920 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
921 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
922 if (IS_NVA3F(dev_priv->chipset)) {
923 dd_emit(ctx, 1, 0); /* ffffffff */
924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
925 } else {
926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
927 }
928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
929 if (dev_priv->chipset != 0x50)
930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
931 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
932 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
933 if (dev_priv->chipset == 0x50) {
934 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
935 dd_emit(ctx, 1, 0); /* 00000001 */
936 } else {
937 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */
938 dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
939 dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */
940 dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
941 dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
942 dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */
943 dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
944 dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */
945 }
946 dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */
947 dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */
948 dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */
949 dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */
950 dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */
951 dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */
952 dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */
953 dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */
954 dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */
955 dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */
956 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
957 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
958 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
959 if (dev_priv->chipset != 0x50)
960 dd_emit(ctx, 3, 0); /* 1, 1, 1 */
961 else
962 dd_emit(ctx, 2, 0); /* 1, 1 */
963 dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */
964 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
965 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
966 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
967 if (IS_NVA3F(dev_priv->chipset)) {
968 dd_emit(ctx, 1, 3); /* 00000003 */
969 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
970 }
971 if (dev_priv->chipset != 0x50)
972 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
973 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
974 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
975 if (dev_priv->chipset != 0x50)
976 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
977 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
978 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
979 dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
980 dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
981 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
982 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
983 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
984 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
985 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
986 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
987 if (dev_priv->chipset >= 0xa0)
988 dd_emit(ctx, 1, 0); /* ffffffff */
989 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
990 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
991 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
992 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
993 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
994 if (dev_priv->chipset != 0x50)
995 dd_emit(ctx, 8, 0); /* 00000001 */
996 if (dev_priv->chipset >= 0xa0) {
997 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
998 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
999 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
1000 dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */
1001 }
1002 dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1003 dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */
1004 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1005 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
1006 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
1007 if (IS_NVA3F(dev_priv->chipset))
1008 dd_emit(ctx, 1, 0); /* 00000001 */
1009 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
1010 if (dev_priv->chipset >= 0xa0)
1011 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
1012 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
1013 if (dev_priv->chipset >= 0xa0)
1014 dd_emit(ctx, 1, 0); /* 00000003 */
1015 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
1016 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
1017 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
1018 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
1019 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
1020 if (dev_priv->chipset != 0x50) {
1021 dd_emit(ctx, 1, 0xe00); /* 7fff */
1022 dd_emit(ctx, 1, 0x1000); /* 7fff */
1023 dd_emit(ctx, 1, 0x1e00); /* 7fff */
1024 }
1025 dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */
1026 dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */
1027 dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
1028 dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
1029 dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
1030 dd_emit(ctx, 1, 1); /* 00000001 */
1031 dd_emit(ctx, 1, 0); /* 00000001 */
1032 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
1033 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
1034 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
1035 if (IS_NVA3F(dev_priv->chipset))
1036 dd_emit(ctx, 1, 0x200);
1037 dd_emit(ctx, 1, 0); /* 00000001 */
1038 if (dev_priv->chipset < 0xa0) {
1039 dd_emit(ctx, 1, 1); /* 00000001 */
1040 dd_emit(ctx, 1, 0x70); /* 000000ff */
1041 dd_emit(ctx, 1, 0x80); /* 000000ff */
1042 dd_emit(ctx, 1, 0); /* 000000ff */
1043 dd_emit(ctx, 1, 0); /* 00000001 */
1044 dd_emit(ctx, 1, 1); /* 00000001 */
1045 dd_emit(ctx, 1, 0x70); /* 000000ff */
1046 dd_emit(ctx, 1, 0x80); /* 000000ff */
1047 dd_emit(ctx, 1, 0); /* 000000ff */
1048 } else {
1049 dd_emit(ctx, 1, 1); /* 00000001 */
1050 dd_emit(ctx, 1, 0xf0); /* 000000ff */
1051 dd_emit(ctx, 1, 0xff); /* 000000ff */
1052 dd_emit(ctx, 1, 0); /* 000000ff */
1053 dd_emit(ctx, 1, 0); /* 00000001 */
1054 dd_emit(ctx, 1, 1); /* 00000001 */
1055 dd_emit(ctx, 1, 0xf0); /* 000000ff */
1056 dd_emit(ctx, 1, 0xff); /* 000000ff */
1057 dd_emit(ctx, 1, 0); /* 000000ff */
1058 dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */
1059 }
1060
1061 /* eng2d state */
1062 dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */
1063 dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */
1064 dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */
1065 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */
1066 dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */
1067 dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */
1068 dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */
1069 dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */
1070 dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */
1071 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */
1072 dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */
1073 dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */
1074 dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */
1075 dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */
1076 dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */
1077 dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */
1078 dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */
1079 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */
1080 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */
1081 dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */
1082
1083 num = ctx->ctxvals_pos - base;
1084 ctx->ctxvals_pos = base;
1085 if (IS_NVA3F(dev_priv->chipset))
1086 cp_ctx(ctx, 0x404800, num);
1087 else
1088 cp_ctx(ctx, 0x405400, num);
1089}
1090
951/* 1091/*
952 * xfer areas. These are a pain. 1092 * xfer areas. These are a pain.
953 * 1093 *
@@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
990 * without the help of ctxprog. 1130 * without the help of ctxprog.
991 */ 1131 */
992 1132
993static inline void 1133static void
994xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 1134xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
995 int i; 1135 int i;
996 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 1136 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
997 for (i = 0; i < num; i++) 1137 for (i = 0; i < num; i++)
998 nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); 1138 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
999 ctx->ctxvals_pos += num << 3; 1139 ctx->ctxvals_pos += num << 3;
1000} 1140}
1001 1141
1002/* Gene declarations... */ 1142/* Gene declarations... */
1003 1143
1144static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
1004static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); 1145static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
1005static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx); 1146static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
1006static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx); 1147static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
1007static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx); 1148static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
1008static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx); 1149static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
1009static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx); 1150static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
1010static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx); 1151static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
1011static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx); 1152static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
1012static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx); 1153static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
1013static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx); 1154static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
1014static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx); 1155static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
1156static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
1157static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
1158static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
1159static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
1015static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); 1160static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
1016static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); 1161static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1017 1162
@@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1030 if (dev_priv->chipset < 0xa0) { 1175 if (dev_priv->chipset < 0xa0) {
1031 /* Strand 0 */ 1176 /* Strand 0 */
1032 ctx->ctxvals_pos = offset; 1177 ctx->ctxvals_pos = offset;
1033 switch (dev_priv->chipset) { 1178 nv50_graph_construct_gene_dispatch(ctx);
1034 case 0x50: 1179 nv50_graph_construct_gene_m2mf(ctx);
1035 xf_emit(ctx, 0x99, 0); 1180 nv50_graph_construct_gene_unk24xx(ctx);
1036 break; 1181 nv50_graph_construct_gene_clipid(ctx);
1037 case 0x84: 1182 nv50_graph_construct_gene_zcull(ctx);
1038 case 0x86:
1039 xf_emit(ctx, 0x384, 0);
1040 break;
1041 case 0x92:
1042 case 0x94:
1043 case 0x96:
1044 case 0x98:
1045 xf_emit(ctx, 0x380, 0);
1046 break;
1047 }
1048 nv50_graph_construct_gene_m2mf (ctx);
1049 switch (dev_priv->chipset) {
1050 case 0x50:
1051 case 0x84:
1052 case 0x86:
1053 case 0x98:
1054 xf_emit(ctx, 0x4c4, 0);
1055 break;
1056 case 0x92:
1057 case 0x94:
1058 case 0x96:
1059 xf_emit(ctx, 0x984, 0);
1060 break;
1061 }
1062 nv50_graph_construct_gene_unk5(ctx);
1063 if (dev_priv->chipset == 0x50)
1064 xf_emit(ctx, 0xa, 0);
1065 else
1066 xf_emit(ctx, 0xb, 0);
1067 nv50_graph_construct_gene_unk4(ctx);
1068 nv50_graph_construct_gene_unk3(ctx);
1069 if ((ctx->ctxvals_pos-offset)/8 > size) 1183 if ((ctx->ctxvals_pos-offset)/8 > size)
1070 size = (ctx->ctxvals_pos-offset)/8; 1184 size = (ctx->ctxvals_pos-offset)/8;
1071 1185
1072 /* Strand 1 */ 1186 /* Strand 1 */
1073 ctx->ctxvals_pos = offset + 0x1; 1187 ctx->ctxvals_pos = offset + 0x1;
1074 nv50_graph_construct_gene_unk6(ctx); 1188 nv50_graph_construct_gene_vfetch(ctx);
1075 nv50_graph_construct_gene_unk7(ctx); 1189 nv50_graph_construct_gene_eng2d(ctx);
1076 nv50_graph_construct_gene_unk8(ctx); 1190 nv50_graph_construct_gene_csched(ctx);
1077 switch (dev_priv->chipset) { 1191 nv50_graph_construct_gene_ropm1(ctx);
1078 case 0x50: 1192 nv50_graph_construct_gene_ropm2(ctx);
1079 case 0x92:
1080 xf_emit(ctx, 0xfb, 0);
1081 break;
1082 case 0x84:
1083 xf_emit(ctx, 0xd3, 0);
1084 break;
1085 case 0x94:
1086 case 0x96:
1087 xf_emit(ctx, 0xab, 0);
1088 break;
1089 case 0x86:
1090 case 0x98:
1091 xf_emit(ctx, 0x6b, 0);
1092 break;
1093 }
1094 xf_emit(ctx, 2, 0x4e3bfdf);
1095 xf_emit(ctx, 4, 0);
1096 xf_emit(ctx, 1, 0x0fac6881);
1097 xf_emit(ctx, 0xb, 0);
1098 xf_emit(ctx, 2, 0x4e3bfdf);
1099 if ((ctx->ctxvals_pos-offset)/8 > size) 1193 if ((ctx->ctxvals_pos-offset)/8 > size)
1100 size = (ctx->ctxvals_pos-offset)/8; 1194 size = (ctx->ctxvals_pos-offset)/8;
1101 1195
1102 /* Strand 2 */ 1196 /* Strand 2 */
1103 ctx->ctxvals_pos = offset + 0x2; 1197 ctx->ctxvals_pos = offset + 0x2;
1104 switch (dev_priv->chipset) { 1198 nv50_graph_construct_gene_ccache(ctx);
1105 case 0x50: 1199 nv50_graph_construct_gene_unk1cxx(ctx);
1106 case 0x92: 1200 nv50_graph_construct_gene_strmout(ctx);
1107 xf_emit(ctx, 0xa80, 0); 1201 nv50_graph_construct_gene_unk14xx(ctx);
1108 break; 1202 nv50_graph_construct_gene_unk10xx(ctx);
1109 case 0x84: 1203 nv50_graph_construct_gene_unk34xx(ctx);
1110 xf_emit(ctx, 0xa7e, 0);
1111 break;
1112 case 0x94:
1113 case 0x96:
1114 xf_emit(ctx, 0xa7c, 0);
1115 break;
1116 case 0x86:
1117 case 0x98:
1118 xf_emit(ctx, 0xa7a, 0);
1119 break;
1120 }
1121 xf_emit(ctx, 1, 0x3fffff);
1122 xf_emit(ctx, 2, 0);
1123 xf_emit(ctx, 1, 0x1fff);
1124 xf_emit(ctx, 0xe, 0);
1125 nv50_graph_construct_gene_unk9(ctx);
1126 nv50_graph_construct_gene_unk2(ctx);
1127 nv50_graph_construct_gene_unk1(ctx);
1128 nv50_graph_construct_gene_unk10(ctx);
1129 if ((ctx->ctxvals_pos-offset)/8 > size) 1204 if ((ctx->ctxvals_pos-offset)/8 > size)
1130 size = (ctx->ctxvals_pos-offset)/8; 1205 size = (ctx->ctxvals_pos-offset)/8;
1131 1206
@@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1150 } else { 1225 } else {
1151 /* Strand 0 */ 1226 /* Strand 0 */
1152 ctx->ctxvals_pos = offset; 1227 ctx->ctxvals_pos = offset;
1153 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 1228 nv50_graph_construct_gene_dispatch(ctx);
1154 xf_emit(ctx, 0x385, 0);
1155 else
1156 xf_emit(ctx, 0x384, 0);
1157 nv50_graph_construct_gene_m2mf(ctx); 1229 nv50_graph_construct_gene_m2mf(ctx);
1158 xf_emit(ctx, 0x950, 0); 1230 nv50_graph_construct_gene_unk34xx(ctx);
1159 nv50_graph_construct_gene_unk10(ctx); 1231 nv50_graph_construct_gene_csched(ctx);
1160 xf_emit(ctx, 1, 0x0fac6881); 1232 nv50_graph_construct_gene_unk1cxx(ctx);
1161 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 1233 nv50_graph_construct_gene_strmout(ctx);
1162 xf_emit(ctx, 1, 1);
1163 xf_emit(ctx, 3, 0);
1164 }
1165 nv50_graph_construct_gene_unk8(ctx);
1166 if (dev_priv->chipset == 0xa0)
1167 xf_emit(ctx, 0x189, 0);
1168 else if (dev_priv->chipset == 0xa3)
1169 xf_emit(ctx, 0xd5, 0);
1170 else if (dev_priv->chipset == 0xa5)
1171 xf_emit(ctx, 0x99, 0);
1172 else if (dev_priv->chipset == 0xaa)
1173 xf_emit(ctx, 0x65, 0);
1174 else
1175 xf_emit(ctx, 0x6d, 0);
1176 nv50_graph_construct_gene_unk9(ctx);
1177 if ((ctx->ctxvals_pos-offset)/8 > size) 1234 if ((ctx->ctxvals_pos-offset)/8 > size)
1178 size = (ctx->ctxvals_pos-offset)/8; 1235 size = (ctx->ctxvals_pos-offset)/8;
1179 1236
1180 /* Strand 1 */ 1237 /* Strand 1 */
1181 ctx->ctxvals_pos = offset + 1; 1238 ctx->ctxvals_pos = offset + 1;
1182 nv50_graph_construct_gene_unk1(ctx); 1239 nv50_graph_construct_gene_unk10xx(ctx);
1183 if ((ctx->ctxvals_pos-offset)/8 > size) 1240 if ((ctx->ctxvals_pos-offset)/8 > size)
1184 size = (ctx->ctxvals_pos-offset)/8; 1241 size = (ctx->ctxvals_pos-offset)/8;
1185 1242
1186 /* Strand 2 */ 1243 /* Strand 2 */
1187 ctx->ctxvals_pos = offset + 2; 1244 ctx->ctxvals_pos = offset + 2;
1188 if (dev_priv->chipset == 0xa0) { 1245 if (dev_priv->chipset == 0xa0)
1189 nv50_graph_construct_gene_unk2(ctx); 1246 nv50_graph_construct_gene_unk14xx(ctx);
1190 } 1247 nv50_graph_construct_gene_unk24xx(ctx);
1191 xf_emit(ctx, 0x36, 0);
1192 nv50_graph_construct_gene_unk5(ctx);
1193 if ((ctx->ctxvals_pos-offset)/8 > size) 1248 if ((ctx->ctxvals_pos-offset)/8 > size)
1194 size = (ctx->ctxvals_pos-offset)/8; 1249 size = (ctx->ctxvals_pos-offset)/8;
1195 1250
1196 /* Strand 3 */ 1251 /* Strand 3 */
1197 ctx->ctxvals_pos = offset + 3; 1252 ctx->ctxvals_pos = offset + 3;
1198 xf_emit(ctx, 1, 0); 1253 nv50_graph_construct_gene_vfetch(ctx);
1199 xf_emit(ctx, 1, 1);
1200 nv50_graph_construct_gene_unk6(ctx);
1201 if ((ctx->ctxvals_pos-offset)/8 > size) 1254 if ((ctx->ctxvals_pos-offset)/8 > size)
1202 size = (ctx->ctxvals_pos-offset)/8; 1255 size = (ctx->ctxvals_pos-offset)/8;
1203 1256
1204 /* Strand 4 */ 1257 /* Strand 4 */
1205 ctx->ctxvals_pos = offset + 4; 1258 ctx->ctxvals_pos = offset + 4;
1206 if (dev_priv->chipset == 0xa0) 1259 nv50_graph_construct_gene_ccache(ctx);
1207 xf_emit(ctx, 0xa80, 0);
1208 else if (dev_priv->chipset == 0xa3)
1209 xf_emit(ctx, 0xa7c, 0);
1210 else
1211 xf_emit(ctx, 0xa7a, 0);
1212 xf_emit(ctx, 1, 0x3fffff);
1213 xf_emit(ctx, 2, 0);
1214 xf_emit(ctx, 1, 0x1fff);
1215 if ((ctx->ctxvals_pos-offset)/8 > size) 1260 if ((ctx->ctxvals_pos-offset)/8 > size)
1216 size = (ctx->ctxvals_pos-offset)/8; 1261 size = (ctx->ctxvals_pos-offset)/8;
1217 1262
1218 /* Strand 5 */ 1263 /* Strand 5 */
1219 ctx->ctxvals_pos = offset + 5; 1264 ctx->ctxvals_pos = offset + 5;
1220 xf_emit(ctx, 1, 0); 1265 nv50_graph_construct_gene_ropm2(ctx);
1221 xf_emit(ctx, 1, 0x0fac6881); 1266 nv50_graph_construct_gene_ropm1(ctx);
1222 xf_emit(ctx, 0xb, 0); 1267 /* per-ROP context */
1223 xf_emit(ctx, 2, 0x4e3bfdf);
1224 xf_emit(ctx, 3, 0);
1225 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1226 xf_emit(ctx, 1, 0x11);
1227 xf_emit(ctx, 1, 0);
1228 xf_emit(ctx, 2, 0x4e3bfdf);
1229 xf_emit(ctx, 2, 0);
1230 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1231 xf_emit(ctx, 1, 0x11);
1232 xf_emit(ctx, 1, 0);
1233 for (i = 0; i < 8; i++) 1268 for (i = 0; i < 8; i++)
1234 if (units & (1<<(i+16))) 1269 if (units & (1<<(i+16)))
1235 nv50_graph_construct_gene_ropc(ctx); 1270 nv50_graph_construct_gene_ropc(ctx);
@@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1238 1273
1239 /* Strand 6 */ 1274 /* Strand 6 */
1240 ctx->ctxvals_pos = offset + 6; 1275 ctx->ctxvals_pos = offset + 6;
1241 nv50_graph_construct_gene_unk3(ctx); 1276 nv50_graph_construct_gene_zcull(ctx);
1242 xf_emit(ctx, 0xb, 0); 1277 nv50_graph_construct_gene_clipid(ctx);
1243 nv50_graph_construct_gene_unk4(ctx); 1278 nv50_graph_construct_gene_eng2d(ctx);
1244 nv50_graph_construct_gene_unk7(ctx);
1245 if (units & (1 << 0)) 1279 if (units & (1 << 0))
1246 nv50_graph_construct_xfer_tp(ctx); 1280 nv50_graph_construct_xfer_tp(ctx);
1247 if (units & (1 << 1)) 1281 if (units & (1 << 1))
@@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1269 if (units & (1 << 9)) 1303 if (units & (1 << 9))
1270 nv50_graph_construct_xfer_tp(ctx); 1304 nv50_graph_construct_xfer_tp(ctx);
1271 } else { 1305 } else {
1272 nv50_graph_construct_gene_unk2(ctx); 1306 nv50_graph_construct_gene_unk14xx(ctx);
1273 } 1307 }
1274 if ((ctx->ctxvals_pos-offset)/8 > size) 1308 if ((ctx->ctxvals_pos-offset)/8 > size)
1275 size = (ctx->ctxvals_pos-offset)/8; 1309 size = (ctx->ctxvals_pos-offset)/8;
@@ -1290,9 +1324,70 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1290 */ 1324 */
1291 1325
1292static void 1326static void
1327nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1328{
1329 /* start of strand 0 */
1330 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1331 /* SEEK */
1332 if (dev_priv->chipset == 0x50)
1333 xf_emit(ctx, 5, 0);
1334 else if (!IS_NVA3F(dev_priv->chipset))
1335 xf_emit(ctx, 6, 0);
1336 else
1337 xf_emit(ctx, 4, 0);
1338 /* SEEK */
1339 /* the PGRAPH's internal FIFO */
1340 if (dev_priv->chipset == 0x50)
1341 xf_emit(ctx, 8*3, 0);
1342 else
1343 xf_emit(ctx, 0x100*3, 0);
1344 /* and another bonus slot?!? */
1345 xf_emit(ctx, 3, 0);
1346 /* and YET ANOTHER bonus slot? */
1347 if (IS_NVA3F(dev_priv->chipset))
1348 xf_emit(ctx, 3, 0);
1349 /* SEEK */
1350 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
1351 xf_emit(ctx, 9, 0);
1352 /* SEEK */
1353 xf_emit(ctx, 9, 0);
1354 /* SEEK */
1355 xf_emit(ctx, 9, 0);
1356 /* SEEK */
1357 xf_emit(ctx, 9, 0);
1358 /* SEEK */
1359 if (dev_priv->chipset < 0x90)
1360 xf_emit(ctx, 4, 0);
1361 /* SEEK */
1362 xf_emit(ctx, 2, 0);
1363 /* SEEK */
1364 xf_emit(ctx, 6*2, 0);
1365 xf_emit(ctx, 2, 0);
1366 /* SEEK */
1367 xf_emit(ctx, 2, 0);
1368 /* SEEK */
1369 xf_emit(ctx, 6*2, 0);
1370 xf_emit(ctx, 2, 0);
1371 /* SEEK */
1372 if (dev_priv->chipset == 0x50)
1373 xf_emit(ctx, 0x1c, 0);
1374 else if (dev_priv->chipset < 0xa0)
1375 xf_emit(ctx, 0x1e, 0);
1376 else
1377 xf_emit(ctx, 0x22, 0);
1378 /* SEEK */
1379 xf_emit(ctx, 0x15, 0);
1380}
1381
1382static void
1293nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) 1383nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1294{ 1384{
1295 /* m2mf state */ 1385 /* Strand 0, right after dispatch */
1386 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1387 int smallm2mf = 0;
1388 if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
1389 smallm2mf = 1;
1390 /* SEEK */
1296 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ 1391 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
1297 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ 1392 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
1298 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ 1393 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
@@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1319 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ 1414 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
1320 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ 1415 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
1321 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ 1416 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
1417 /* SEEK */
1418 if (smallm2mf)
1419 xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */
1420 else
1421 xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
1422 xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
1423 /* SEEK */
1424 if (smallm2mf)
1425 xf_emit(ctx, 0x400, 0); /* ffffffff */
1426 else
1427 xf_emit(ctx, 0x800, 0); /* ffffffff */
1428 xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
1429 /* SEEK */
1430 xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */
1431 xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */
1322} 1432}
1323 1433
1324static void 1434static void
1325nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) 1435nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1326{ 1436{
1327 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1437 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1328 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1438 xf_emit(ctx, 2, 0); /* RO */
1329 xf_emit(ctx, 2, 4); 1439 xf_emit(ctx, 0x800, 0); /* ffffffff */
1330 xf_emit(ctx, 1, 0);
1331 xf_emit(ctx, 1, 0x80);
1332 xf_emit(ctx, 1, 4);
1333 xf_emit(ctx, 1, 0x80c14);
1334 xf_emit(ctx, 1, 0);
1335 if (dev_priv->chipset == 0x50)
1336 xf_emit(ctx, 1, 0x3ff);
1337 else
1338 xf_emit(ctx, 1, 0x7ff);
1339 switch (dev_priv->chipset) { 1440 switch (dev_priv->chipset) {
1340 case 0x50: 1441 case 0x50:
1341 case 0x86: 1442 case 0x92:
1342 case 0x98: 1443 case 0xa0:
1343 case 0xaa: 1444 xf_emit(ctx, 0x2b, 0);
1344 case 0xac:
1345 xf_emit(ctx, 0x542, 0);
1346 break; 1445 break;
1347 case 0x84: 1446 case 0x84:
1348 case 0x92: 1447 xf_emit(ctx, 0x29, 0);
1448 break;
1349 case 0x94: 1449 case 0x94:
1350 case 0x96: 1450 case 0x96:
1351 xf_emit(ctx, 0x942, 0);
1352 break;
1353 case 0xa0:
1354 case 0xa3: 1451 case 0xa3:
1355 xf_emit(ctx, 0x2042, 0); 1452 xf_emit(ctx, 0x27, 0);
1356 break; 1453 break;
1454 case 0x86:
1455 case 0x98:
1357 case 0xa5: 1456 case 0xa5:
1358 case 0xa8: 1457 case 0xa8:
1359 xf_emit(ctx, 0x842, 0); 1458 case 0xaa:
1459 case 0xac:
1460 case 0xaf:
1461 xf_emit(ctx, 0x25, 0);
1360 break; 1462 break;
1361 } 1463 }
1362 xf_emit(ctx, 2, 4); 1464 /* CB bindings, 0x80 of them. first word is address >> 8, second is
1363 xf_emit(ctx, 1, 0); 1465 * size >> 4 | valid << 24 */
1364 xf_emit(ctx, 1, 0x80); 1466 xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */
1365 xf_emit(ctx, 1, 4); 1467 xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */
1366 xf_emit(ctx, 1, 1); 1468 xf_emit(ctx, 1, 0); /* 0 */
1367 xf_emit(ctx, 1, 0); 1469 xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */
1368 xf_emit(ctx, 1, 0x27); 1470 xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */
1369 xf_emit(ctx, 1, 0); 1471 xf_emit(ctx, 4, 0); /* RO */
1370 xf_emit(ctx, 1, 0x26); 1472 xf_emit(ctx, 0x100, 0); /* ffffffff */
1371 xf_emit(ctx, 3, 0); 1473 xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */
1474 xf_emit(ctx, 8, 0); /* ffffffff */
1475 xf_emit(ctx, 4, 0); /* ffffffff */
1476 xf_emit(ctx, 1, 0); /* 3 */
1477 xf_emit(ctx, 1, 0); /* ffffffff */
1478 xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */
1479 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */
1480 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */
1481 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
1482 xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */
1483 xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */
1484 xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
1485 xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */
1486 xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */
1487 xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
1488 xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */
1489 xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */
1490 xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */
1491 xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
1492 xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
1493 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1494 xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */
1495 xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */
1496 xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */
1497 xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */
1498 xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
1499 xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */
1372} 1500}
1373 1501
1374static void 1502static void
1375nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx) 1503nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1376{ 1504{
1505 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1506 int i;
1377 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1507 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1378 xf_emit(ctx, 0x10, 0x04000000); 1508 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1379 xf_emit(ctx, 0x24, 0); 1509 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1380 xf_emit(ctx, 2, 0x04e3bfdf); 1510 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1381 xf_emit(ctx, 2, 0); 1511 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
1382 xf_emit(ctx, 1, 0x1fe21); 1512 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1513 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1514 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1515 if (dev_priv->chipset == 0x50)
1516 xf_emit(ctx, 1, 0x3ff);
1517 else
1518 xf_emit(ctx, 1, 0x7ff); /* 000007ff */
1519 xf_emit(ctx, 1, 0); /* 111/113 */
1520 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1521 for (i = 0; i < 8; i++) {
1522 switch (dev_priv->chipset) {
1523 case 0x50:
1524 case 0x86:
1525 case 0x98:
1526 case 0xaa:
1527 case 0xac:
1528 xf_emit(ctx, 0xa0, 0); /* ffffffff */
1529 break;
1530 case 0x84:
1531 case 0x92:
1532 case 0x94:
1533 case 0x96:
1534 xf_emit(ctx, 0x120, 0);
1535 break;
1536 case 0xa5:
1537 case 0xa8:
1538 xf_emit(ctx, 0x100, 0); /* ffffffff */
1539 break;
1540 case 0xa0:
1541 case 0xa3:
1542 case 0xaf:
1543 xf_emit(ctx, 0x400, 0); /* ffffffff */
1544 break;
1545 }
1546 xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */
1547 xf_emit(ctx, 4, 0); /* ffffffff */
1548 }
1549 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1550 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1551 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1552 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
1553 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */
1554 xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */
1555 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1556 xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
1557 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
1558 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
1559 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1560}
1561
1562static void
1563nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1564{
1565 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1566 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1567 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1568 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
1569 xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
1570 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
1571 xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */
1572 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
1573 xf_emit(ctx, 1, 0); /* ffff0ff3 */
1574 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
1575 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
1576 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1577 xf_emit(ctx, 1, 0); /* 00000007 */
1578 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
1579 if (dev_priv->chipset >= 0xa0)
1580 xf_emit(ctx, 1, 0x0fac6881);
1581 if (IS_NVA3F(dev_priv->chipset)) {
1582 xf_emit(ctx, 1, 1);
1583 xf_emit(ctx, 3, 0);
1584 }
1383} 1585}
1384 1586
1385static void 1587static void
1386nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx) 1588nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1387{ 1589{
1388 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1590 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1389 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ 1591 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1390 if (dev_priv->chipset != 0x50) { 1592 if (dev_priv->chipset != 0x50) {
1391 xf_emit(ctx, 5, 0); 1593 xf_emit(ctx, 5, 0); /* ffffffff */
1392 xf_emit(ctx, 1, 0x80c14); 1594 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1393 xf_emit(ctx, 2, 0); 1595 xf_emit(ctx, 1, 0); /* 00000001 */
1394 xf_emit(ctx, 1, 0x804); 1596 xf_emit(ctx, 1, 0); /* 000003ff */
1395 xf_emit(ctx, 1, 0); 1597 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
1396 xf_emit(ctx, 2, 4); 1598 xf_emit(ctx, 1, 0); /* 00000001 */
1397 xf_emit(ctx, 1, 0x8100c12); 1599 xf_emit(ctx, 2, 4); /* 7f, ff */
1600 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1398 } 1601 }
1399 xf_emit(ctx, 1, 0); 1602 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1400 xf_emit(ctx, 2, 4); 1603 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1401 xf_emit(ctx, 1, 0); 1604 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1402 xf_emit(ctx, 1, 0x10); 1605 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1403 if (dev_priv->chipset == 0x50) 1606 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1404 xf_emit(ctx, 3, 0); 1607 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
1405 else
1406 xf_emit(ctx, 4, 0);
1407 xf_emit(ctx, 1, 0x804);
1408 xf_emit(ctx, 1, 1);
1409 xf_emit(ctx, 1, 0x1a);
1410 if (dev_priv->chipset != 0x50) 1608 if (dev_priv->chipset != 0x50)
1411 xf_emit(ctx, 1, 0x7f); 1609 xf_emit(ctx, 1, 0); /* 3ff */
1412 xf_emit(ctx, 1, 0); 1610 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
1413 xf_emit(ctx, 1, 1); 1611 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
1414 xf_emit(ctx, 1, 0x80c14); 1612 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
1415 xf_emit(ctx, 1, 0); 1613 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1416 xf_emit(ctx, 1, 0x8100c12); 1614 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1417 xf_emit(ctx, 2, 4);
1418 xf_emit(ctx, 1, 0);
1419 xf_emit(ctx, 1, 0x10);
1420 xf_emit(ctx, 3, 0);
1421 xf_emit(ctx, 1, 1);
1422 xf_emit(ctx, 1, 0x8100c12);
1423 xf_emit(ctx, 6, 0);
1424 if (dev_priv->chipset == 0x50)
1425 xf_emit(ctx, 1, 0x3ff);
1426 else
1427 xf_emit(ctx, 1, 0x7ff);
1428 xf_emit(ctx, 1, 0x80c14);
1429 xf_emit(ctx, 0x38, 0);
1430 xf_emit(ctx, 1, 1);
1431 xf_emit(ctx, 2, 0);
1432 xf_emit(ctx, 1, 0x10);
1433 xf_emit(ctx, 0x38, 0);
1434 xf_emit(ctx, 2, 0x88);
1435 xf_emit(ctx, 2, 0);
1436 xf_emit(ctx, 1, 4);
1437 xf_emit(ctx, 0x16, 0);
1438 xf_emit(ctx, 1, 0x26);
1439 xf_emit(ctx, 2, 0);
1440 xf_emit(ctx, 1, 0x3f800000);
1441 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1442 xf_emit(ctx, 4, 0);
1443 else
1444 xf_emit(ctx, 3, 0);
1445 xf_emit(ctx, 1, 0x1a);
1446 xf_emit(ctx, 1, 0x10);
1447 if (dev_priv->chipset != 0x50) 1615 if (dev_priv->chipset != 0x50)
1448 xf_emit(ctx, 0x28, 0); 1616 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
1617 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1618 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
1619 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1620 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1621 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1622 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1623 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1624 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1625 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1626 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
1627 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */
1628 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1629 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1630 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1631 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
1632 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1633 xf_emit(ctx, 1, 0); /* 0000000f */
1634 if (dev_priv->chipset == 0x50)
1635 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
1449 else 1636 else
1450 xf_emit(ctx, 0x25, 0); 1637 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
1451 xf_emit(ctx, 1, 0x52); 1638 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1452 xf_emit(ctx, 1, 0); 1639 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1453 xf_emit(ctx, 1, 0x26); 1640 xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
1454 xf_emit(ctx, 1, 0); 1641 xf_emit(ctx, 3, 0); /* f, 0, 0 */
1455 xf_emit(ctx, 2, 4); 1642 xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */
1456 xf_emit(ctx, 1, 0); 1643 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1457 xf_emit(ctx, 1, 0x1a); 1644 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1458 xf_emit(ctx, 2, 0); 1645 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1459 xf_emit(ctx, 1, 0x00ffff00); 1646 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
1460 xf_emit(ctx, 1, 0); 1647 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1648 xf_emit(ctx, 1, 0); /* 00000001 */
1649 xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */
1650 xf_emit(ctx, 3, 0); /* f, 0, 0 */
1651 xf_emit(ctx, 3, 0); /* ffffffff */
1652 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1653 xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */
1654 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
1655 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1656 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
1657 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
1658 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
1659 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1660 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
1661 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
1662 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
1663 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1664 xf_emit(ctx, 1, 0); /* 0000000f */
1665 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
1666 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1667 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1668 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1669 if (IS_NVA3F(dev_priv->chipset))
1670 xf_emit(ctx, 1, 0); /* 00000001 */
1671 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1672 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1673 if (dev_priv->chipset != 0x50) {
1674 xf_emit(ctx, 1, 0); /* ffffffff */
1675 xf_emit(ctx, 1, 0); /* 00000001 */
1676 xf_emit(ctx, 1, 0); /* 000003ff */
1677 }
1678 xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */
1679 xf_emit(ctx, 1, 0); /* f */
1680 xf_emit(ctx, 1, 0); /* 0? */
1681 xf_emit(ctx, 1, 0); /* ffffffff */
1682 xf_emit(ctx, 1, 0); /* 003fffff */
1683 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1684 xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
1685 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
1686 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
1687 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1688 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1689 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1690 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1691 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1692 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1693 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1694 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
1695 xf_emit(ctx, 1, 0); /* 0000000f */
1461} 1696}
1462 1697
1463static void 1698static void
1464nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx) 1699nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1465{ 1700{
1466 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1701 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1467 /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */ 1702 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
1468 xf_emit(ctx, 1, 0x3f); 1703 /* SEEK */
1469 xf_emit(ctx, 0xa, 0); 1704 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
1470 xf_emit(ctx, 1, 2); 1705 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
1471 xf_emit(ctx, 2, 0x04000000); 1706 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1472 xf_emit(ctx, 8, 0); 1707 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1473 xf_emit(ctx, 1, 4); 1708 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
1474 xf_emit(ctx, 3, 0); 1709 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
1475 xf_emit(ctx, 1, 4); 1710 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
1476 if (dev_priv->chipset == 0x50) 1711 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
1477 xf_emit(ctx, 0x10, 0); 1712 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
1478 else 1713 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
1479 xf_emit(ctx, 0x11, 0); 1714 xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
1480 xf_emit(ctx, 1, 1); 1715 xf_emit(ctx, 1, 0); /* ffff0ff3 */
1481 xf_emit(ctx, 1, 0x1001); 1716 xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
1482 xf_emit(ctx, 4, 0xffff); 1717 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
1483 xf_emit(ctx, 0x20, 0); 1718 xf_emit(ctx, 1, 0); /* 00000001 */
1484 xf_emit(ctx, 0x10, 0x3f800000); 1719 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
1485 xf_emit(ctx, 1, 0x10); 1720 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
1486 if (dev_priv->chipset == 0x50) 1721 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
1487 xf_emit(ctx, 1, 0); 1722 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
1488 else 1723 xf_emit(ctx, 1, 0); /* 0000ffff */
1489 xf_emit(ctx, 2, 0); 1724 xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */
1490 xf_emit(ctx, 1, 3); 1725 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
1491 xf_emit(ctx, 2, 0); 1726 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
1727 xf_emit(ctx, 1, 0); /* ffffffff */
1728 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
1729 xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
1730 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
1731 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
1732 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
1733 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
1734 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
1735 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
1736 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1737 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
1738 xf_emit(ctx, 1, 0); /* 00000007 */
1739 if (dev_priv->chipset != 0x50)
1740 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
1741 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1742 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
1743 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1744 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
1745 /* SEEK */
1746 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
1747 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
1748 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
1749 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
1750 xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
1751 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1752 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
1753 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
1754 if (dev_priv->chipset != 0x50)
1755 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
1756 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
1492} 1757}
1493 1758
1494static void 1759static void
1495nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx) 1760nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
1496{ 1761{
1497 /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */ 1762 /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
1498 xf_emit(ctx, 2, 0x04000000); 1763 /* SEEK */
1499 xf_emit(ctx, 1, 0); 1764 xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */
1500 xf_emit(ctx, 1, 0x80); 1765 /* SEEK */
1501 xf_emit(ctx, 3, 0); 1766 xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */
1502 xf_emit(ctx, 1, 0x80); 1767 xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */
1503 xf_emit(ctx, 1, 0); 1768 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
1769 xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */
1770 xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
1771 xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */
1772 xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */
1773 xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */
1774 xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */
1775 xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */
1776 xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */
1504} 1777}
1505 1778
1506static void 1779static void
1507nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx) 1780nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1508{ 1781{
1509 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1782 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1510 /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */ 1783 int i;
1511 xf_emit(ctx, 2, 4); 1784 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
1512 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 1785 /* SEEK */
1513 xf_emit(ctx, 0x1c4d, 0); 1786 xf_emit(ctx, 0x33, 0);
1787 /* SEEK */
1788 xf_emit(ctx, 2, 0);
1789 /* SEEK */
1790 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1791 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1792 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1793 /* SEEK */
1794 if (IS_NVA3F(dev_priv->chipset)) {
1795 xf_emit(ctx, 4, 0); /* RO */
1796 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1797 xf_emit(ctx, 1, 0); /* 1ff */
1798 xf_emit(ctx, 8, 0); /* 0? */
1799 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
1800
1801 xf_emit(ctx, 4, 0); /* RO */
1802 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1803 xf_emit(ctx, 1, 0); /* 1ff */
1804 xf_emit(ctx, 8, 0); /* 0? */
1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
1806 }
1514 else 1807 else
1515 xf_emit(ctx, 0x1c4b, 0); 1808 {
1516 xf_emit(ctx, 2, 4); 1809 xf_emit(ctx, 0xc, 0); /* RO */
1517 xf_emit(ctx, 1, 0x8100c12); 1810 /* SEEK */
1811 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1812 xf_emit(ctx, 1, 0); /* 1ff */
1813 xf_emit(ctx, 8, 0); /* 0? */
1814
1815 /* SEEK */
1816 xf_emit(ctx, 0xc, 0); /* RO */
1817 /* SEEK */
1818 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1819 xf_emit(ctx, 1, 0); /* 1ff */
1820 xf_emit(ctx, 8, 0); /* 0? */
1821 }
1822 /* SEEK */
1823 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1824 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1825 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1826 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1518 if (dev_priv->chipset != 0x50) 1827 if (dev_priv->chipset != 0x50)
1519 xf_emit(ctx, 1, 3); 1828 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1520 xf_emit(ctx, 1, 0); 1829 /* SEEK */
1521 xf_emit(ctx, 1, 0x8100c12); 1830 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1522 xf_emit(ctx, 1, 0); 1831 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1523 xf_emit(ctx, 1, 0x80c14); 1832 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1524 xf_emit(ctx, 1, 1); 1833 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1834 xf_emit(ctx, 1, 1); /* 00000001 */
1835 /* SEEK */
1525 if (dev_priv->chipset >= 0xa0) 1836 if (dev_priv->chipset >= 0xa0)
1526 xf_emit(ctx, 2, 4); 1837 xf_emit(ctx, 2, 4); /* 000000ff */
1527 xf_emit(ctx, 1, 0x80c14); 1838 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1528 xf_emit(ctx, 2, 0); 1839 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1529 xf_emit(ctx, 1, 0x8100c12); 1840 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
1530 xf_emit(ctx, 1, 0x27); 1841 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1531 xf_emit(ctx, 2, 0); 1842 xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */
1532 xf_emit(ctx, 1, 1); 1843 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1533 xf_emit(ctx, 0x3c1, 0); 1844 xf_emit(ctx, 1, 0); /* 0000000f */
1534 xf_emit(ctx, 1, 1); 1845 xf_emit(ctx, 1, 1); /* 00000001 */
1535 xf_emit(ctx, 0x16, 0); 1846 for (i = 0; i < 10; i++) {
1536 xf_emit(ctx, 1, 0x8100c12); 1847 /* SEEK */
1537 xf_emit(ctx, 1, 0); 1848 xf_emit(ctx, 0x40, 0); /* ffffffff */
1849 xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */
1850 xf_emit(ctx, 0x10, 0); /* ffffffff */
1851 }
1852 /* SEEK */
1853 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */
1854 xf_emit(ctx, 1, 1); /* 00000001 */
1855 xf_emit(ctx, 1, 0); /* ffffffff */
1856 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
1857 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
1858 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1859 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1860 if (dev_priv->chipset != 0x50)
1861 xf_emit(ctx, 1, 0); /* 000003ff */
1538} 1862}
1539 1863
1540static void 1864static void
1541nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx) 1865nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1542{ 1866{
1543 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1867 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1544 /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */ 1868 int acnt = 0x10, rep, i;
1545 xf_emit(ctx, 4, 0); 1869 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
1546 xf_emit(ctx, 1, 0xf); 1870 if (IS_NVA3F(dev_priv->chipset))
1547 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 1871 acnt = 0x20;
1548 xf_emit(ctx, 8, 0); 1872 /* SEEK */
1549 else 1873 if (dev_priv->chipset >= 0xa0) {
1550 xf_emit(ctx, 4, 0); 1874 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
1551 xf_emit(ctx, 1, 0x20); 1875 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
1552 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 1876 }
1553 xf_emit(ctx, 0x11, 0); 1877 xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */
1878 xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */
1879 xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */
1880 xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */
1881 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1882 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
1883 xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */
1884 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1885 xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */
1886 xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */
1887 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
1888 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1889 /* SEEK */
1890 if (IS_NVA3F(dev_priv->chipset))
1891 xf_emit(ctx, 0xb, 0); /* RO */
1554 else if (dev_priv->chipset >= 0xa0) 1892 else if (dev_priv->chipset >= 0xa0)
1555 xf_emit(ctx, 0xf, 0); 1893 xf_emit(ctx, 0x9, 0); /* RO */
1556 else 1894 else
1557 xf_emit(ctx, 0xe, 0); 1895 xf_emit(ctx, 0x8, 0); /* RO */
1558 xf_emit(ctx, 1, 0x1a); 1896 /* SEEK */
1559 xf_emit(ctx, 0xd, 0); 1897 xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */
1560 xf_emit(ctx, 2, 4); 1898 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
1561 xf_emit(ctx, 1, 0); 1899 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1562 xf_emit(ctx, 1, 4); 1900 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1563 xf_emit(ctx, 1, 8); 1901 /* SEEK */
1564 xf_emit(ctx, 1, 0); 1902 xf_emit(ctx, 0xc, 0); /* RO */
1903 /* SEEK */
1904 xf_emit(ctx, 1, 0); /* 7f/ff */
1905 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
1906 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
1907 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1908 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
1909 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
1910 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1565 if (dev_priv->chipset == 0x50) 1911 if (dev_priv->chipset == 0x50)
1566 xf_emit(ctx, 1, 0x3ff); 1912 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
1567 else 1913 else
1568 xf_emit(ctx, 1, 0x7ff); 1914 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
1569 if (dev_priv->chipset == 0xa8) 1915 if (dev_priv->chipset == 0xa8)
1570 xf_emit(ctx, 1, 0x1e00); 1916 xf_emit(ctx, 1, 0x1e00); /* 7fff */
1571 xf_emit(ctx, 0xc, 0); 1917 /* SEEK */
1572 xf_emit(ctx, 1, 0xf); 1918 xf_emit(ctx, 0xc, 0); /* RO or close */
1573 if (dev_priv->chipset == 0x50) 1919 /* SEEK */
1574 xf_emit(ctx, 0x125, 0); 1920 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1575 else if (dev_priv->chipset < 0xa0) 1921 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
1576 xf_emit(ctx, 0x126, 0); 1922 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1577 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) 1923 if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
1578 xf_emit(ctx, 0x124, 0); 1924 xf_emit(ctx, 2, 0); /* ffffffff */
1579 else 1925 else
1580 xf_emit(ctx, 0x1f7, 0); 1926 xf_emit(ctx, 1, 0); /* ffffffff */
1581 xf_emit(ctx, 1, 0xf); 1927 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
1582 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 1928 /* SEEK */
1583 xf_emit(ctx, 3, 0); 1929 if (IS_NVA3F(dev_priv->chipset)) {
1930 xf_emit(ctx, 0x10, 0); /* 0? */
1931 xf_emit(ctx, 2, 0); /* weird... */
1932 xf_emit(ctx, 2, 0); /* RO */
1933 } else {
1934 xf_emit(ctx, 8, 0); /* 0? */
1935 xf_emit(ctx, 1, 0); /* weird... */
1936 xf_emit(ctx, 2, 0); /* RO */
1937 }
1938 /* SEEK */
1939 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
1940 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
1941 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
1942 if (dev_priv->chipset >= 0xa0)
1943 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
1944 /* SEEK */
1945 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
1946 xf_emit(ctx, 1, 0); /* f/1f */
1947 /* SEEK */
1948 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
1949 xf_emit(ctx, 1, 0); /* f/1f */
1950 /* SEEK */
1951 xf_emit(ctx, acnt, 0); /* RO */
1952 xf_emit(ctx, 2, 0); /* RO */
1953 /* SEEK */
1954 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */
1955 xf_emit(ctx, 1, 0); /* RO */
1956 /* SEEK */
1957 xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */
1958 xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */
1959 xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */
1960 xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */
1961 /* SEEK */
1962 xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
1963 xf_emit(ctx, 3, 0); /* f/1f */
1964 /* SEEK */
1965 xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */
1966 xf_emit(ctx, 3, 0); /* f/1f */
1967 /* SEEK */
1968 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */
1969 xf_emit(ctx, 3, 0); /* f/1f */
1970 /* SEEK */
1971 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */
1972 xf_emit(ctx, 3, 0); /* f/1f */
1973 /* SEEK */
1974 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */
1975 xf_emit(ctx, 3, 0); /* f/1f */
1976 /* SEEK */
1977 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
1978 xf_emit(ctx, 3, 0); /* f/1f */
1979 /* SEEK */
1980 if (IS_NVA3F(dev_priv->chipset)) {
1981 xf_emit(ctx, acnt, 0); /* f */
1982 xf_emit(ctx, 3, 0); /* f/1f */
1983 }
1984 /* SEEK */
1985 if (IS_NVA3F(dev_priv->chipset))
1986 xf_emit(ctx, 2, 0); /* RO */
1987 else
1988 xf_emit(ctx, 5, 0); /* RO */
1989 /* SEEK */
1990 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
1991 /* SEEK */
1992 if (dev_priv->chipset < 0xa0) {
1993 xf_emit(ctx, 0x41, 0); /* RO */
1994 /* SEEK */
1995 xf_emit(ctx, 0x11, 0); /* RO */
1996 } else if (!IS_NVA3F(dev_priv->chipset))
1997 xf_emit(ctx, 0x50, 0); /* RO */
1584 else 1998 else
1585 xf_emit(ctx, 1, 0); 1999 xf_emit(ctx, 0x58, 0); /* RO */
1586 xf_emit(ctx, 1, 1); 2000 /* SEEK */
1587 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2001 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1588 xf_emit(ctx, 0xa1, 0); 2002 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2003 xf_emit(ctx, 1, 1); /* 1 UNK0DEC */
2004 /* SEEK */
2005 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
2006 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
2007 /* SEEK */
2008 if (IS_NVA3F(dev_priv->chipset))
2009 xf_emit(ctx, 0x1d, 0); /* RO */
1589 else 2010 else
1590 xf_emit(ctx, 0x5a, 0); 2011 xf_emit(ctx, 0x16, 0); /* RO */
1591 xf_emit(ctx, 1, 0xf); 2012 /* SEEK */
2013 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
2014 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2015 /* SEEK */
1592 if (dev_priv->chipset < 0xa0) 2016 if (dev_priv->chipset < 0xa0)
1593 xf_emit(ctx, 0x834, 0); 2017 xf_emit(ctx, 8, 0); /* RO */
1594 else if (dev_priv->chipset == 0xa0) 2018 else if (IS_NVA3F(dev_priv->chipset))
1595 xf_emit(ctx, 0x1873, 0); 2019 xf_emit(ctx, 0xc, 0); /* RO */
1596 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2020 else
1597 xf_emit(ctx, 0x8ba, 0); 2021 xf_emit(ctx, 7, 0); /* RO */
2022 /* SEEK */
2023 xf_emit(ctx, 0xa, 0); /* RO */
2024 if (dev_priv->chipset == 0xa0)
2025 rep = 0xc;
2026 else
2027 rep = 4;
2028 for (i = 0; i < rep; i++) {
2029 /* SEEK */
2030 if (IS_NVA3F(dev_priv->chipset))
2031 xf_emit(ctx, 0x20, 0); /* ffffffff */
2032 xf_emit(ctx, 0x200, 0); /* ffffffff */
2033 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
2034 xf_emit(ctx, 4, 0); /* ffffffff */
2035 }
2036 /* SEEK */
2037 xf_emit(ctx, 1, 0); /* 113/111 */
2038 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
2039 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2040 xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
2041 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
2042 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2043 /* SEEK */
2044 if (IS_NVA3F(dev_priv->chipset))
2045 xf_emit(ctx, 7, 0); /* weird... */
1598 else 2046 else
1599 xf_emit(ctx, 0x833, 0); 2047 xf_emit(ctx, 5, 0); /* weird... */
1600 xf_emit(ctx, 1, 0xf);
1601 xf_emit(ctx, 0xf, 0);
1602} 2048}
1603 2049
1604static void 2050static void
1605nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx) 2051nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
1606{ 2052{
1607 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2053 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1608 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */ 2054 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
1609 xf_emit(ctx, 2, 0); 2055 /* SEEK */
1610 if (dev_priv->chipset == 0x50) 2056 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
1611 xf_emit(ctx, 2, 1); 2057 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
1612 else 2058 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
1613 xf_emit(ctx, 2, 0); 2059 if (dev_priv->chipset < 0xa0) {
1614 xf_emit(ctx, 1, 0); 2060 /* this is useless on everything but the original NV50,
1615 xf_emit(ctx, 1, 1); 2061 * guess they forgot to nuke it. Or just didn't bother. */
1616 xf_emit(ctx, 2, 0x100); 2062 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
1617 xf_emit(ctx, 1, 0x11); 2063 xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */
1618 xf_emit(ctx, 1, 0); 2064 xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */
1619 xf_emit(ctx, 1, 8); 2065 }
1620 xf_emit(ctx, 5, 0); 2066 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
1621 xf_emit(ctx, 1, 1); 2067 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
1622 xf_emit(ctx, 1, 0); 2068 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
1623 xf_emit(ctx, 3, 1); 2069 xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */
1624 xf_emit(ctx, 1, 0xcf); 2070 xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */
1625 xf_emit(ctx, 1, 2); 2071 xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */
1626 xf_emit(ctx, 6, 0); 2072 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */
1627 xf_emit(ctx, 1, 1); 2073 xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */
1628 xf_emit(ctx, 1, 0); 2074 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */
1629 xf_emit(ctx, 3, 1); 2075 xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */
1630 xf_emit(ctx, 4, 0); 2076 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */
1631 xf_emit(ctx, 1, 4); 2077 xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */
1632 xf_emit(ctx, 1, 0); 2078 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */
1633 xf_emit(ctx, 1, 1); 2079 xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */
1634 xf_emit(ctx, 1, 0x15); 2080 xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */
1635 xf_emit(ctx, 3, 0); 2081 xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */
1636 xf_emit(ctx, 1, 0x4444480); 2082 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
1637 xf_emit(ctx, 0x37, 0); 2083 xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */
2084 xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
2085 xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */
2086 xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */
2087 xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */
2088 xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */
2089 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
2090 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
2091 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
2092 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
2093 xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */
2094 xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */
2095 xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */
2096 xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */
2097 xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */
2098 xf_emit(ctx, 1, 0); /* 00000001 UNK888 */
2099 xf_emit(ctx, 1, 4); /* 0000003f UNK884 */
2100 xf_emit(ctx, 1, 0); /* 00000007 UNK880 */
2101 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */
2102 xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */
2103 xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */
2104 xf_emit(ctx, 1, 0); /* 00000001 UNK260 */
2105 xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */
2106 /* SEEK */
2107 xf_emit(ctx, 0x10, 0);
2108 /* SEEK */
2109 xf_emit(ctx, 0x27, 0);
1638} 2110}
1639 2111
1640static void 2112static void
1641nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx) 2113nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
1642{ 2114{
1643 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */ 2115 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1644 xf_emit(ctx, 4, 0); 2116 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
1645 xf_emit(ctx, 1, 0x8100c12); 2117 /* SEEK */
1646 xf_emit(ctx, 4, 0); 2118 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
1647 xf_emit(ctx, 1, 0x100); 2119 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
1648 xf_emit(ctx, 2, 0); 2120 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1649 xf_emit(ctx, 1, 0x10001); 2121 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1650 xf_emit(ctx, 1, 0); 2122 xf_emit(ctx, 1, 0); /* 000003ff */
1651 xf_emit(ctx, 1, 0x10001); 2123 /* SEEK */
1652 xf_emit(ctx, 1, 1); 2124 xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */
1653 xf_emit(ctx, 1, 0x10001); 2125 xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */
1654 xf_emit(ctx, 1, 1); 2126 xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */
1655 xf_emit(ctx, 1, 4); 2127 xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */
1656 xf_emit(ctx, 1, 2); 2128 xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */
2129 xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */
2130 xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */
2131 xf_emit(ctx, 1, 0); /* ffffffff */
2132 xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
2133 xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
2134 xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
2135 xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
2136 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
2137 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2138 /* SEEK */
2139 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
2140 switch (dev_priv->chipset) {
2141 case 0x50:
2142 case 0x92:
2143 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2144 xf_emit(ctx, 0x80, 0); /* fff */
2145 xf_emit(ctx, 2, 0); /* ff, fff */
2146 xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */
2147 break;
2148 case 0x84:
2149 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2150 xf_emit(ctx, 0x60, 0); /* fff */
2151 xf_emit(ctx, 2, 0); /* ff, fff */
2152 xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
2153 break;
2154 case 0x94:
2155 case 0x96:
2156 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2157 xf_emit(ctx, 0x40, 0); /* fff */
2158 xf_emit(ctx, 2, 0); /* ff, fff */
2159 xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */
2160 break;
2161 case 0x86:
2162 case 0x98:
2163 xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
2164 xf_emit(ctx, 0x10, 0); /* fff */
2165 xf_emit(ctx, 2, 0); /* ff, fff */
2166 xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
2167 break;
2168 case 0xa0:
2169 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2170 xf_emit(ctx, 0xf0, 0); /* fff */
2171 xf_emit(ctx, 2, 0); /* ff, fff */
2172 xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */
2173 break;
2174 case 0xa3:
2175 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2176 xf_emit(ctx, 0x60, 0); /* fff */
2177 xf_emit(ctx, 2, 0); /* ff, fff */
2178 xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
2179 break;
2180 case 0xa5:
2181 case 0xaf:
2182 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
2183 xf_emit(ctx, 0x30, 0); /* fff */
2184 xf_emit(ctx, 2, 0); /* ff, fff */
2185 xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */
2186 break;
2187 case 0xaa:
2188 xf_emit(ctx, 0x12, 0);
2189 break;
2190 case 0xa8:
2191 case 0xac:
2192 xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
2193 xf_emit(ctx, 0x10, 0); /* fff */
2194 xf_emit(ctx, 2, 0); /* ff, fff */
2195 xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
2196 break;
2197 }
2198 xf_emit(ctx, 1, 0); /* 0000000f */
2199 xf_emit(ctx, 1, 0); /* 00000000 */
2200 xf_emit(ctx, 1, 0); /* ffffffff */
2201 xf_emit(ctx, 1, 0); /* 0000001f */
2202 xf_emit(ctx, 4, 0); /* ffffffff */
2203 xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
2204 xf_emit(ctx, 1, 0); /* ffffffff */
2205 xf_emit(ctx, 4, 0); /* ffffffff */
2206 xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
2207 xf_emit(ctx, 1, 0); /* ffffffff */
2208 xf_emit(ctx, 1, 0); /* 000000ff */
1657} 2209}
1658 2210
1659static void 2211static void
1660nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx) 2212nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
1661{ 2213{
1662 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2214 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1663 /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */ 2215 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
1664 xf_emit(ctx, 1, 0x3f800000); 2216 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
1665 xf_emit(ctx, 6, 0); 2217 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1666 xf_emit(ctx, 1, 4); 2218 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
1667 xf_emit(ctx, 1, 0x1a); 2219 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
1668 xf_emit(ctx, 2, 0); 2220 xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */
1669 xf_emit(ctx, 1, 1); 2221 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
1670 xf_emit(ctx, 0x12, 0); 2222 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1671 xf_emit(ctx, 1, 0x00ffff00); 2223 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
1672 xf_emit(ctx, 6, 0); 2224 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
1673 xf_emit(ctx, 1, 0xf); 2225 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
1674 xf_emit(ctx, 7, 0); 2226 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
1675 xf_emit(ctx, 1, 0x0fac6881); 2227 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1676 xf_emit(ctx, 1, 0x11); 2228 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1677 xf_emit(ctx, 0xf, 0); 2229 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
1678 xf_emit(ctx, 1, 4); 2230 xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */
1679 xf_emit(ctx, 2, 0); 2231 xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */
1680 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2232 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
1681 xf_emit(ctx, 1, 3); 2233 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
2234 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
2235 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
2236 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2237 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2238 xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
2239 xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
2240 xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */
2241 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2242 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2243 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2244 if (IS_NVA3F(dev_priv->chipset))
2245 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
1682 else if (dev_priv->chipset >= 0xa0) 2246 else if (dev_priv->chipset >= 0xa0)
1683 xf_emit(ctx, 1, 1); 2247 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
1684 xf_emit(ctx, 2, 0); 2248 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
1685 xf_emit(ctx, 1, 2); 2249 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
1686 xf_emit(ctx, 2, 0x04000000); 2250 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
1687 xf_emit(ctx, 3, 0); 2251 xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
1688 xf_emit(ctx, 1, 5); 2252 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
1689 xf_emit(ctx, 1, 0x52); 2253 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
1690 if (dev_priv->chipset == 0x50) { 2254 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1691 xf_emit(ctx, 0x13, 0); 2255 xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */
1692 } else { 2256 xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
1693 xf_emit(ctx, 4, 0); 2257 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
1694 xf_emit(ctx, 1, 1); 2258 xf_emit(ctx, 1, 0); /* 00000001 */
1695 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2259 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
1696 xf_emit(ctx, 0x11, 0); 2260 if (dev_priv->chipset != 0x50) {
1697 else 2261 xf_emit(ctx, 1, 0); /* 3ff */
1698 xf_emit(ctx, 0x10, 0); 2262 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
1699 } 2263 }
1700 xf_emit(ctx, 0x10, 0x3f800000); 2264 if (IS_NVA3F(dev_priv->chipset))
1701 xf_emit(ctx, 1, 0x10); 2265 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
1702 xf_emit(ctx, 0x26, 0); 2266 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
1703 xf_emit(ctx, 1, 0x8100c12); 2267 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
1704 xf_emit(ctx, 1, 5); 2268 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1705 xf_emit(ctx, 2, 0); 2269 xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
1706 xf_emit(ctx, 1, 1); 2270 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */
1707 xf_emit(ctx, 1, 0); 2271 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1708 xf_emit(ctx, 4, 0xffff); 2272 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2273 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2274 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2275 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
2276 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
2277 xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */
2278 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
2279 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */
2280 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2281 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
2282 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
1709 if (dev_priv->chipset != 0x50) 2283 if (dev_priv->chipset != 0x50)
1710 xf_emit(ctx, 1, 3); 2284 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1711 if (dev_priv->chipset < 0xa0) 2285 if (dev_priv->chipset < 0xa0)
1712 xf_emit(ctx, 0x1f, 0); 2286 xf_emit(ctx, 0x1c, 0); /* RO */
1713 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2287 else if (IS_NVA3F(dev_priv->chipset))
1714 xf_emit(ctx, 0xc, 0); 2288 xf_emit(ctx, 0x9, 0);
1715 else 2289 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
1716 xf_emit(ctx, 3, 0); 2290 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1717 xf_emit(ctx, 1, 0x00ffff00); 2291 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1718 xf_emit(ctx, 1, 0x1a); 2292 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2293 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
2294 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1719 if (dev_priv->chipset != 0x50) { 2295 if (dev_priv->chipset != 0x50) {
1720 xf_emit(ctx, 1, 0); 2296 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1721 xf_emit(ctx, 1, 3); 2297 xf_emit(ctx, 1, 0); /* 3ff */
1722 } 2298 }
2299 /* XXX: the following block could belong either to unk1cxx, or
2300 * to STRMOUT. Rather hard to tell. */
1723 if (dev_priv->chipset < 0xa0) 2301 if (dev_priv->chipset < 0xa0)
1724 xf_emit(ctx, 0x26, 0); 2302 xf_emit(ctx, 0x25, 0);
1725 else 2303 else
1726 xf_emit(ctx, 0x3c, 0); 2304 xf_emit(ctx, 0x3b, 0);
1727 xf_emit(ctx, 1, 0x102); 2305}
1728 xf_emit(ctx, 1, 0); 2306
1729 xf_emit(ctx, 4, 4); 2307static void
1730 if (dev_priv->chipset >= 0xa0) 2308nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
1731 xf_emit(ctx, 8, 0); 2309{
1732 xf_emit(ctx, 2, 4); 2310 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1733 xf_emit(ctx, 1, 0); 2311 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
2312 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
2313 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2314 if (dev_priv->chipset >= 0xa0) {
2315 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2316 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2317 }
2318 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2319 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
2320 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1734 if (dev_priv->chipset == 0x50) 2321 if (dev_priv->chipset == 0x50)
1735 xf_emit(ctx, 1, 0x3ff); 2322 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
1736 else 2323 else
1737 xf_emit(ctx, 1, 0x7ff); 2324 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
1738 xf_emit(ctx, 1, 0); 2325 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1739 xf_emit(ctx, 1, 0x102); 2326 /* SEEK */
1740 xf_emit(ctx, 9, 0); 2327 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
1741 xf_emit(ctx, 4, 4); 2328 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
1742 xf_emit(ctx, 0x2c, 0); 2329 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
2330 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
2331 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2332 if (dev_priv->chipset >= 0xa0) {
2333 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2334 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2335 }
2336 xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */
2337 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2338 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
2339 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
2340 xf_emit(ctx, 2, 0); /* ffffffff */
2341 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2342 /* SEEK */
2343 xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */
2344 xf_emit(ctx, 1, 0); /* 0000000f */
2345 xf_emit(ctx, 1, 0); /* 00000000? */
2346 xf_emit(ctx, 2, 0); /* ffffffff */
2347}
2348
2349static void
2350nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2351{
2352 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2353 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
2354 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
2355 xf_emit(ctx, 1, 0); /* 00000007 */
2356 xf_emit(ctx, 1, 0); /* 000003ff */
2357 if (IS_NVA3F(dev_priv->chipset))
2358 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2359 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2360}
2361
2362static void
2363nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2364{
2365 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2366 /* SEEK */
2367 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2368 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2369 xf_emit(ctx, 2, 0); /* ffffffff */
2370 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
2371 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
2372 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
2373 xf_emit(ctx, 1, 0); /* 7 */
2374 /* SEEK */
2375 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2376 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
2377 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
2378 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
2379 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
2380 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
2381 xf_emit(ctx, 1, 0); /* ff/3ff */
2382 xf_emit(ctx, 1, 0); /* 00000007 */
2383 if (IS_NVA3F(dev_priv->chipset))
2384 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2385 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
1743} 2386}
1744 2387
1745static void 2388static void
@@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
1749 int magic2; 2392 int magic2;
1750 if (dev_priv->chipset == 0x50) { 2393 if (dev_priv->chipset == 0x50) {
1751 magic2 = 0x00003e60; 2394 magic2 = 0x00003e60;
1752 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { 2395 } else if (!IS_NVA3F(dev_priv->chipset)) {
1753 magic2 = 0x001ffe67; 2396 magic2 = 0x001ffe67;
1754 } else { 2397 } else {
1755 magic2 = 0x00087e67; 2398 magic2 = 0x00087e67;
1756 } 2399 }
1757 xf_emit(ctx, 8, 0); 2400 xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
1758 xf_emit(ctx, 1, 2); 2401 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1759 xf_emit(ctx, 1, 0); 2402 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
1760 xf_emit(ctx, 1, magic2); 2403 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
1761 xf_emit(ctx, 4, 0); 2404 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
1762 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2405 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
1763 xf_emit(ctx, 1, 1); 2406 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
1764 xf_emit(ctx, 7, 0); 2407 xf_emit(ctx, 1, 0); /* ffff0ff3 */
1765 if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa) 2408 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
1766 xf_emit(ctx, 1, 0x15); 2409 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
1767 xf_emit(ctx, 1, 0); 2410 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
1768 xf_emit(ctx, 1, 1); 2411 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
1769 xf_emit(ctx, 1, 0x10); 2412 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
1770 xf_emit(ctx, 2, 0); 2413 if (IS_NVA3F(dev_priv->chipset))
1771 xf_emit(ctx, 1, 1); 2414 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
1772 xf_emit(ctx, 4, 0); 2415 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2416 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2417 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2418 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
2419 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2420 if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
2421 xf_emit(ctx, 1, 0x15); /* 000000ff */
2422 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2423 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
2424 xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
2425 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
2426 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2427 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2428 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
1773 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { 2429 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
1774 xf_emit(ctx, 1, 4); 2430 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
1775 xf_emit(ctx, 1, 0x400); 2431 xf_emit(ctx, 1, 4); /* 7 */
1776 xf_emit(ctx, 1, 0x300); 2432 xf_emit(ctx, 1, 0x400); /* fffffff */
1777 xf_emit(ctx, 1, 0x1001); 2433 xf_emit(ctx, 1, 0x300); /* ffff */
2434 xf_emit(ctx, 1, 0x1001); /* 1fff */
1778 if (dev_priv->chipset != 0xa0) { 2435 if (dev_priv->chipset != 0xa0) {
1779 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2436 if (IS_NVA3F(dev_priv->chipset))
1780 xf_emit(ctx, 1, 0); 2437 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
1781 else 2438 else
1782 xf_emit(ctx, 1, 0x15); 2439 xf_emit(ctx, 1, 0x15); /* ff */
1783 } 2440 }
1784 xf_emit(ctx, 3, 0);
1785 } 2441 }
1786 xf_emit(ctx, 2, 0); 2442 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1787 xf_emit(ctx, 1, 2); 2443 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1788 xf_emit(ctx, 8, 0); 2444 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
1789 xf_emit(ctx, 1, 1); 2445 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
1790 xf_emit(ctx, 1, 0x10); 2446 xf_emit(ctx, 1, 0); /* ffff0ff3 */
1791 xf_emit(ctx, 1, 0); 2447 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
1792 xf_emit(ctx, 1, 1); 2448 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
1793 xf_emit(ctx, 0x13, 0); 2449 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
1794 xf_emit(ctx, 1, 0x10); 2450 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
1795 xf_emit(ctx, 0x10, 0); 2451 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
1796 xf_emit(ctx, 0x10, 0x3f800000); 2452 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
1797 xf_emit(ctx, 0x19, 0); 2453 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
1798 xf_emit(ctx, 1, 0x10); 2454 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
1799 xf_emit(ctx, 1, 0); 2455 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1800 xf_emit(ctx, 1, 0x3f); 2456 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
1801 xf_emit(ctx, 6, 0); 2457 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1802 xf_emit(ctx, 1, 1); 2458 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
1803 xf_emit(ctx, 1, 0); 2459 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1804 xf_emit(ctx, 1, 1); 2460 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
1805 xf_emit(ctx, 1, 0); 2461 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1806 xf_emit(ctx, 1, 1); 2462 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
2463 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
2464 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
2465 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
2466 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
2467 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2468 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
2469 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2470 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2471 xf_emit(ctx, 1, 0); /* 0000000f */
2472 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
2473 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2474 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2475 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
2476 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2477 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2478 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
2479 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
2480 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
2481 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2482 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
2483 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
2484 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
2485 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
2486 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
2487 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
2488 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
2489 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2490 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
2491 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2492 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2493 xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
2494 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2495 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2496 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
2497 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2498 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
2499 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2500 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2501 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
2502 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2503 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
2504 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
2505 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
2506 xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */
2507 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
2508 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2509 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
2510 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2511 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2512 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
2513 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
1807 if (dev_priv->chipset >= 0xa0) { 2514 if (dev_priv->chipset >= 0xa0) {
1808 xf_emit(ctx, 2, 0); 2515 xf_emit(ctx, 2, 0);
1809 xf_emit(ctx, 1, 0x1001); 2516 xf_emit(ctx, 1, 0x1001);
1810 xf_emit(ctx, 0xb, 0); 2517 xf_emit(ctx, 0xb, 0);
1811 } else { 2518 } else {
1812 xf_emit(ctx, 0xc, 0); 2519 xf_emit(ctx, 1, 0); /* 00000007 */
2520 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
2521 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
2522 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
2523 xf_emit(ctx, 1, 0); /* ffff0ff3 */
1813 } 2524 }
1814 xf_emit(ctx, 1, 0x11); 2525 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
1815 xf_emit(ctx, 7, 0); 2526 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
1816 xf_emit(ctx, 1, 0xf); 2527 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
1817 xf_emit(ctx, 7, 0); 2528 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
1818 xf_emit(ctx, 1, 0x11); 2529 xf_emit(ctx, 1, 0x11); /* 3f/7f */
1819 if (dev_priv->chipset == 0x50) 2530 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
1820 xf_emit(ctx, 4, 0); 2531 if (dev_priv->chipset != 0x50) {
1821 else 2532 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
1822 xf_emit(ctx, 6, 0); 2533 xf_emit(ctx, 1, 0); /* 000000ff */
1823 xf_emit(ctx, 3, 1); 2534 }
1824 xf_emit(ctx, 1, 2); 2535 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
1825 xf_emit(ctx, 1, 1); 2536 xf_emit(ctx, 1, 0); /* ff/3ff */
1826 xf_emit(ctx, 1, 2); 2537 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
1827 xf_emit(ctx, 1, 1); 2538 xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */
1828 xf_emit(ctx, 1, 0); 2539 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
1829 xf_emit(ctx, 1, magic2); 2540 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
1830 xf_emit(ctx, 1, 0); 2541 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
1831 xf_emit(ctx, 1, 0x0fac6881); 2542 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
1832 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 2543 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
1833 xf_emit(ctx, 1, 0); 2544 xf_emit(ctx, 1, 0); /* 00000001 */
1834 xf_emit(ctx, 0x18, 1); 2545 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
1835 xf_emit(ctx, 8, 2); 2546 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
1836 xf_emit(ctx, 8, 1); 2547 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
1837 xf_emit(ctx, 8, 2); 2548 if (IS_NVA3F(dev_priv->chipset)) {
1838 xf_emit(ctx, 8, 1); 2549 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
1839 xf_emit(ctx, 3, 0); 2550 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
1840 xf_emit(ctx, 1, 1); 2551 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
1841 xf_emit(ctx, 5, 0); 2552 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
1842 xf_emit(ctx, 1, 1); 2553 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
1843 xf_emit(ctx, 0x16, 0); 2554 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
2555 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
2556 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
2557 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
2558 xf_emit(ctx, 2, 0); /* 00000001 */
2559 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2560 xf_emit(ctx, 1, 0); /* 0000000f */
2561 xf_emit(ctx, 1, 0); /* 00000003 */
2562 xf_emit(ctx, 1, 0); /* ffffffff */
2563 xf_emit(ctx, 2, 0); /* 00000001 */
2564 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2565 xf_emit(ctx, 1, 0); /* 00000001 */
2566 xf_emit(ctx, 1, 0); /* 000003ff */
2567 } else if (dev_priv->chipset >= 0xa0) {
2568 xf_emit(ctx, 2, 0); /* 00000001 */
2569 xf_emit(ctx, 1, 0); /* 00000007 */
2570 xf_emit(ctx, 1, 0); /* 00000003 */
2571 xf_emit(ctx, 1, 0); /* ffffffff */
2572 xf_emit(ctx, 2, 0); /* 00000001 */
1844 } else { 2573 } else {
1845 if (dev_priv->chipset >= 0xa0) 2574 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1846 xf_emit(ctx, 0x1b, 0); 2575 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */
1847 else 2576 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
1848 xf_emit(ctx, 0x15, 0);
1849 } 2577 }
1850 xf_emit(ctx, 1, 1); 2578 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
1851 xf_emit(ctx, 1, 2); 2579 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
1852 xf_emit(ctx, 2, 1); 2580 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
1853 xf_emit(ctx, 1, 2);
1854 xf_emit(ctx, 2, 1);
1855 if (dev_priv->chipset >= 0xa0) 2581 if (dev_priv->chipset >= 0xa0)
1856 xf_emit(ctx, 4, 0); 2582 xf_emit(ctx, 2, 0); /* 00000001 */
1857 else 2583 xf_emit(ctx, 1, 0); /* 000003ff */
1858 xf_emit(ctx, 3, 0); 2584 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
1859 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 2585 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
1860 xf_emit(ctx, 0x10, 1); 2586 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
1861 xf_emit(ctx, 8, 2); 2587 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
1862 xf_emit(ctx, 0x10, 1); 2588 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
1863 xf_emit(ctx, 8, 2); 2589 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
1864 xf_emit(ctx, 8, 1); 2590 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
1865 xf_emit(ctx, 3, 0); 2591 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
2592 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
2593 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2594 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2595 if (dev_priv->chipset >= 0xa0)
2596 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
2597 if (IS_NVA3F(dev_priv->chipset)) {
2598 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
2599 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2600 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
2601 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
2602 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
2603 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
2604 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
2605 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */
2606 xf_emit(ctx, 1, 0); /* 00000001 */
2607 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
1866 } 2608 }
1867 xf_emit(ctx, 1, 0x11); 2609 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
1868 xf_emit(ctx, 1, 1); 2610 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
1869 xf_emit(ctx, 0x5b, 0); 2611 xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */
2612 xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */
2613 xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */
2614 xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */
2615 xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */
2616 xf_emit(ctx, 1, 0); /* 000000ff ROP */
2617 xf_emit(ctx, 1, 0); /* ffffffff BETA1 */
2618 xf_emit(ctx, 1, 0); /* ffffffff BETA4 */
2619 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
2620 xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
1870} 2621}
1871 2622
1872static void 2623static void
1873nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx) 2624nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
1874{ 2625{
1875 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2626 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1876 int magic3; 2627 int magic3;
1877 if (dev_priv->chipset == 0x50) 2628 switch (dev_priv->chipset) {
2629 case 0x50:
1878 magic3 = 0x1000; 2630 magic3 = 0x1000;
1879 else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) 2631 break;
2632 case 0x86:
2633 case 0x98:
2634 case 0xa8:
2635 case 0xaa:
2636 case 0xac:
2637 case 0xaf:
1880 magic3 = 0x1e00; 2638 magic3 = 0x1e00;
1881 else 2639 break;
2640 default:
1882 magic3 = 0; 2641 magic3 = 0;
1883 xf_emit(ctx, 1, 0); 2642 }
1884 xf_emit(ctx, 1, 4); 2643 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1885 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2644 xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
1886 xf_emit(ctx, 0x24, 0); 2645 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2646 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2647 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
2648 if (IS_NVA3F(dev_priv->chipset))
2649 xf_emit(ctx, 0x1f, 0); /* ffffffff */
1887 else if (dev_priv->chipset >= 0xa0) 2650 else if (dev_priv->chipset >= 0xa0)
1888 xf_emit(ctx, 0x14, 0); 2651 xf_emit(ctx, 0x0f, 0); /* ffffffff */
1889 else 2652 else
1890 xf_emit(ctx, 0x15, 0); 2653 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
1891 xf_emit(ctx, 2, 4); 2654 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
2655 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
2656 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
1892 if (dev_priv->chipset >= 0xa0) 2657 if (dev_priv->chipset >= 0xa0)
1893 xf_emit(ctx, 1, 0x03020100); 2658 xf_emit(ctx, 1, 0x03020100); /* ffffffff */
1894 else 2659 else
1895 xf_emit(ctx, 1, 0x00608080); 2660 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
1896 xf_emit(ctx, 4, 0); 2661 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1897 xf_emit(ctx, 1, 4); 2662 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1898 xf_emit(ctx, 2, 0); 2663 xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */
1899 xf_emit(ctx, 2, 4); 2664 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
1900 xf_emit(ctx, 1, 0x80); 2665 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2666 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2667 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
2668 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2669 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
1901 if (magic3) 2670 if (magic3)
1902 xf_emit(ctx, 1, magic3); 2671 xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */
1903 xf_emit(ctx, 1, 4); 2672 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
1904 xf_emit(ctx, 0x24, 0); 2673 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1905 xf_emit(ctx, 1, 4); 2674 xf_emit(ctx, 1, 0); /* 111/113 */
1906 xf_emit(ctx, 1, 0x80); 2675 xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */
1907 xf_emit(ctx, 1, 4); 2676 xf_emit(ctx, 1, 0); /* 0000001f */
1908 xf_emit(ctx, 1, 0x03020100); 2677 xf_emit(ctx, 1, 0); /* ffffffff */
1909 xf_emit(ctx, 1, 3); 2678 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2679 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
2680 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
2681 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2682 xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */
2683 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
1910 if (magic3) 2684 if (magic3)
1911 xf_emit(ctx, 1, magic3); 2685 xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */
1912 xf_emit(ctx, 1, 4); 2686 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
1913 xf_emit(ctx, 4, 0); 2687 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
1914 xf_emit(ctx, 1, 4); 2688 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1915 xf_emit(ctx, 1, 3); 2689 xf_emit(ctx, 1, 0); /* 111/113 */
1916 xf_emit(ctx, 3, 0); 2690 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1917 xf_emit(ctx, 1, 4); 2691 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2692 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
2693 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
2694 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2695 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */
2696 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
2697 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2698 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2699 xf_emit(ctx, 1, 0); /* 111/113 */
1918 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) 2700 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
1919 xf_emit(ctx, 0x1024, 0); 2701 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
1920 else if (dev_priv->chipset < 0xa0) 2702 else if (dev_priv->chipset < 0xa0)
1921 xf_emit(ctx, 0xa24, 0); 2703 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
1922 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) 2704 else if (!IS_NVA3F(dev_priv->chipset))
1923 xf_emit(ctx, 0x214, 0); 2705 xf_emit(ctx, 0x210, 0); /* ffffffff */
1924 else 2706 else
1925 xf_emit(ctx, 0x414, 0); 2707 xf_emit(ctx, 0x410, 0); /* ffffffff */
1926 xf_emit(ctx, 1, 4); 2708 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1927 xf_emit(ctx, 1, 3); 2709 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1928 xf_emit(ctx, 2, 0); 2710 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
2711 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
2712 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1929} 2713}
1930 2714
1931static void 2715static void
1932nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx) 2716nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
1933{ 2717{
1934 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2718 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1935 int magic1, magic2; 2719 int magic1, magic2;
1936 if (dev_priv->chipset == 0x50) { 2720 if (dev_priv->chipset == 0x50) {
1937 magic1 = 0x3ff; 2721 magic1 = 0x3ff;
1938 magic2 = 0x00003e60; 2722 magic2 = 0x00003e60;
1939 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { 2723 } else if (!IS_NVA3F(dev_priv->chipset)) {
1940 magic1 = 0x7ff; 2724 magic1 = 0x7ff;
1941 magic2 = 0x001ffe67; 2725 magic2 = 0x001ffe67;
1942 } else { 2726 } else {
1943 magic1 = 0x7ff; 2727 magic1 = 0x7ff;
1944 magic2 = 0x00087e67; 2728 magic2 = 0x00087e67;
1945 } 2729 }
1946 xf_emit(ctx, 3, 0); 2730 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
1947 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2731 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
1948 xf_emit(ctx, 1, 1); 2732 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
1949 xf_emit(ctx, 0xc, 0); 2733 if (IS_NVA3F(dev_priv->chipset))
1950 xf_emit(ctx, 1, 0xf); 2734 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
1951 xf_emit(ctx, 0xb, 0); 2735 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
1952 xf_emit(ctx, 1, 4); 2736 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1953 xf_emit(ctx, 4, 0xffff); 2737 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
1954 xf_emit(ctx, 8, 0); 2738 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
1955 xf_emit(ctx, 1, 1); 2739 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */
1956 xf_emit(ctx, 3, 0); 2740 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
1957 xf_emit(ctx, 1, 1); 2741 xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */
1958 xf_emit(ctx, 5, 0); 2742 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
1959 xf_emit(ctx, 1, 1); 2743 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
1960 xf_emit(ctx, 2, 0); 2744 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
1961 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 2745 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
1962 xf_emit(ctx, 1, 3); 2746 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
1963 xf_emit(ctx, 1, 0); 2747 xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */
1964 } else if (dev_priv->chipset >= 0xa0) 2748 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
1965 xf_emit(ctx, 1, 1); 2749 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
1966 xf_emit(ctx, 0xa, 0); 2750 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
1967 xf_emit(ctx, 2, 1); 2751 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
1968 xf_emit(ctx, 1, 2); 2752 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
1969 xf_emit(ctx, 2, 1); 2753 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1970 xf_emit(ctx, 1, 2); 2754 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
1971 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 2755 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
1972 xf_emit(ctx, 1, 0); 2756 xf_emit(ctx, 1, 0); /* 7 */
1973 xf_emit(ctx, 0x18, 1); 2757 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1974 xf_emit(ctx, 8, 2); 2758 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
1975 xf_emit(ctx, 8, 1); 2759 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1976 xf_emit(ctx, 8, 2); 2760 xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */
1977 xf_emit(ctx, 8, 1); 2761 xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */
1978 xf_emit(ctx, 1, 0); 2762 xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */
2763 xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */
2764 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
2765 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2766 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2767 if (IS_NVA3F(dev_priv->chipset)) {
2768 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
2769 xf_emit(ctx, 1, 0); /* 00000003 */
2770 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
2771 } else if (dev_priv->chipset >= 0xa0) {
2772 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
2773 xf_emit(ctx, 1, 0); /* 00000003 */
2774 } else {
2775 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
1979 } 2776 }
1980 xf_emit(ctx, 1, 1); 2777 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
1981 xf_emit(ctx, 1, 0); 2778 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
1982 xf_emit(ctx, 1, 0x11); 2779 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
1983 xf_emit(ctx, 7, 0); 2780 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
1984 xf_emit(ctx, 1, 0x0fac6881); 2781 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
1985 xf_emit(ctx, 2, 0); 2782 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
1986 xf_emit(ctx, 1, 4); 2783 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
1987 xf_emit(ctx, 3, 0); 2784 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
1988 xf_emit(ctx, 1, 0x11); 2785 if (IS_NVA3F(dev_priv->chipset)) {
1989 xf_emit(ctx, 1, 1); 2786 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
1990 xf_emit(ctx, 1, 0); 2787 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
1991 xf_emit(ctx, 3, 0xcf); 2788 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
1992 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2789 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
1993 xf_emit(ctx, 1, 1); 2790 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */
1994 xf_emit(ctx, 0xa, 0); 2791 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */
1995 xf_emit(ctx, 2, 1); 2792 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */
1996 xf_emit(ctx, 1, 2); 2793 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */
1997 xf_emit(ctx, 2, 1); 2794 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
1998 xf_emit(ctx, 1, 2); 2795 }
1999 xf_emit(ctx, 1, 1); 2796 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
2000 xf_emit(ctx, 1, 0); 2797 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2001 xf_emit(ctx, 8, 1); 2798 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
2002 xf_emit(ctx, 1, 0x11); 2799 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
2003 xf_emit(ctx, 7, 0); 2800 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2004 xf_emit(ctx, 1, 0x0fac6881); 2801 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2005 xf_emit(ctx, 1, 0xf); 2802 xf_emit(ctx, 1, 0); /* ff/3ff */
2006 xf_emit(ctx, 7, 0); 2803 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2007 xf_emit(ctx, 1, magic2); 2804 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2008 xf_emit(ctx, 2, 0); 2805 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2009 xf_emit(ctx, 1, 0x11); 2806 xf_emit(ctx, 1, 0); /* 7 */
2010 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2807 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2011 xf_emit(ctx, 2, 1); 2808 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2012 else 2809 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
2013 xf_emit(ctx, 1, 1); 2810 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
2811 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
2812 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
2813 if (IS_NVA3F(dev_priv->chipset))
2814 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2815 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2816 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
2817 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
2818 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
2819 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
2820 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
2821 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
2822 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
2823 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
2824 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
2825 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2826 xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */
2827 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
2828 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
2829 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2830 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
2831 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2832 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2833 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2834 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2835 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2836 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2837 if (IS_NVA3F(dev_priv->chipset))
2838 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2014 if(dev_priv->chipset == 0x50) 2839 if(dev_priv->chipset == 0x50)
2015 xf_emit(ctx, 1, 0); 2840 xf_emit(ctx, 1, 0); /* ff */
2016 else 2841 else
2017 xf_emit(ctx, 3, 0); 2842 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
2018 xf_emit(ctx, 1, 4); 2843 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2019 xf_emit(ctx, 5, 0); 2844 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2020 xf_emit(ctx, 1, 1); 2845 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2021 xf_emit(ctx, 4, 0); 2846 xf_emit(ctx, 1, 0); /* 00000007 */
2022 xf_emit(ctx, 1, 0x11); 2847 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
2023 xf_emit(ctx, 7, 0); 2848 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2024 xf_emit(ctx, 1, 0x0fac6881); 2849 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2025 xf_emit(ctx, 3, 0); 2850 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2026 xf_emit(ctx, 1, 0x11); 2851 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2027 xf_emit(ctx, 1, 1); 2852 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
2028 xf_emit(ctx, 1, 0); 2853 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2029 xf_emit(ctx, 1, 1); 2854 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
2030 xf_emit(ctx, 1, 0); 2855 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
2031 xf_emit(ctx, 1, 1); 2856 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2032 xf_emit(ctx, 1, 0); 2857 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2033 xf_emit(ctx, 1, magic1); 2858 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2034 xf_emit(ctx, 1, 0); 2859 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2035 xf_emit(ctx, 1, 1); 2860 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2036 xf_emit(ctx, 1, 0); 2861 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2037 xf_emit(ctx, 1, 1); 2862 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
2038 xf_emit(ctx, 2, 0); 2863 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
2039 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2864 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
2040 xf_emit(ctx, 1, 1); 2865 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
2041 xf_emit(ctx, 0x28, 0); 2866 xf_emit(ctx, 1, 0); /* ff/3ff */
2042 xf_emit(ctx, 8, 8); 2867 xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */
2043 xf_emit(ctx, 1, 0x11); 2868 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2044 xf_emit(ctx, 7, 0); 2869 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
2045 xf_emit(ctx, 1, 0x0fac6881); 2870 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2046 xf_emit(ctx, 8, 0x400); 2871 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2047 xf_emit(ctx, 8, 0x300); 2872 xf_emit(ctx, 1, 0); /* 00000007 */
2048 xf_emit(ctx, 1, 1); 2873 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2049 xf_emit(ctx, 1, 0xf); 2874 if (IS_NVA3F(dev_priv->chipset))
2050 xf_emit(ctx, 7, 0); 2875 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2051 xf_emit(ctx, 1, 0x20); 2876 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
2052 xf_emit(ctx, 1, 0x11); 2877 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
2053 xf_emit(ctx, 1, 0x100); 2878 xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */
2054 xf_emit(ctx, 1, 0); 2879 xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */
2055 xf_emit(ctx, 1, 1); 2880 xf_emit(ctx, 1, 0); /* ff/3ff */
2056 xf_emit(ctx, 2, 0); 2881 xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */
2057 xf_emit(ctx, 1, 0x40); 2882 xf_emit(ctx, 1, 0); /* 7 */
2058 xf_emit(ctx, 1, 0x100); 2883 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2059 xf_emit(ctx, 1, 0); 2884 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2060 xf_emit(ctx, 1, 3); 2885 xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */
2061 xf_emit(ctx, 4, 0); 2886 xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */
2062 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2887 xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
2063 xf_emit(ctx, 1, 1); 2888 xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */
2064 xf_emit(ctx, 1, magic2); 2889 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
2065 xf_emit(ctx, 3, 0); 2890 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
2066 xf_emit(ctx, 1, 2); 2891 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2067 xf_emit(ctx, 1, 0x0fac6881); 2892 xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */
2068 xf_emit(ctx, 9, 0); 2893 xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */
2069 xf_emit(ctx, 1, 1); 2894 xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */
2070 xf_emit(ctx, 4, 0); 2895 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
2071 xf_emit(ctx, 1, 4); 2896 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2072 xf_emit(ctx, 1, 0); 2897 xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */
2073 xf_emit(ctx, 1, 1); 2898 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2074 xf_emit(ctx, 1, 0x400); 2899 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
2075 xf_emit(ctx, 1, 0x300); 2900 xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */
2076 xf_emit(ctx, 1, 0x1001); 2901 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2077 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2902 xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */
2078 xf_emit(ctx, 4, 0); 2903 xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */
2079 else 2904 xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */
2080 xf_emit(ctx, 3, 0); 2905 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
2081 xf_emit(ctx, 1, 0x11); 2906 xf_emit(ctx, 1, 0); /* 0000ffff */
2082 xf_emit(ctx, 7, 0); 2907 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */
2083 xf_emit(ctx, 1, 0x0fac6881); 2908 xf_emit(ctx, 1, 0); /* ff/3ff */
2084 xf_emit(ctx, 1, 0xf); 2909 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2085 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 2910 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2086 xf_emit(ctx, 0x15, 0); 2911 xf_emit(ctx, 1, 0); /* 00000007 */
2087 xf_emit(ctx, 1, 1); 2912 if (IS_NVA3F(dev_priv->chipset))
2088 xf_emit(ctx, 3, 0); 2913 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2089 } else 2914 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2090 xf_emit(ctx, 0x17, 0); 2915 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2916 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
2917 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2918 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
2919 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2920 xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */
2921 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2922 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2923 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2924 xf_emit(ctx, 2, 0); /* ffff, ff/3ff */
2925 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2926 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2927 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2928 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
2929 xf_emit(ctx, 1, 0); /* 00000007 */
2930 xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */
2931 xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */
2932 xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */
2933 xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */
2934 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2935 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2936 xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */
2937 xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */
2938 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
2939 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2940 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2941 if (IS_NVA3F(dev_priv->chipset))
2942 xf_emit(ctx, 1, 0); /* 00000001 */
2943 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2944 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
2945 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
2946 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2947 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
2948 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2949 xf_emit(ctx, 1, 0); /* ff/3ff */
2950 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
2951 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2952 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2953 xf_emit(ctx, 1, 0); /* 7 */
2954 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2955 if (IS_NVA3F(dev_priv->chipset)) {
2956 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
2957 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2958 }
2959 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2960 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2961 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2091 if (dev_priv->chipset >= 0xa0) 2962 if (dev_priv->chipset >= 0xa0)
2092 xf_emit(ctx, 1, 0x0fac6881); 2963 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
2093 xf_emit(ctx, 1, magic2); 2964 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2094 xf_emit(ctx, 3, 0); 2965 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
2095 xf_emit(ctx, 1, 0x11); 2966 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2096 xf_emit(ctx, 2, 0); 2967 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2097 xf_emit(ctx, 1, 4); 2968 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2098 xf_emit(ctx, 1, 0); 2969 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
2099 xf_emit(ctx, 2, 1); 2970 xf_emit(ctx, 1, 0); /* ff/3ff */
2100 xf_emit(ctx, 3, 0); 2971 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2101 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2972 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2102 xf_emit(ctx, 2, 1); 2973 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
2103 else 2974 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
2104 xf_emit(ctx, 1, 1); 2975 xf_emit(ctx, 1, 0); /* 00000007 */
2105 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 2976 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
2106 xf_emit(ctx, 2, 0); 2977 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2107 else if (dev_priv->chipset != 0x50) 2978 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2108 xf_emit(ctx, 1, 0); 2979 if (IS_NVA3F(dev_priv->chipset)) {
2980 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2981 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
2982 }
2983 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2984 if (dev_priv->chipset >= 0xa0) {
2985 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
2986 xf_emit(ctx, 1, 0xfac6881); /* fffffff */
2987 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
2988 xf_emit(ctx, 1, 4); /* 7 */
2989 xf_emit(ctx, 1, 0); /* 1 */
2990 xf_emit(ctx, 2, 1); /* 1 */
2991 xf_emit(ctx, 2, 0); /* 7, f */
2992 xf_emit(ctx, 1, 1); /* 1 */
2993 xf_emit(ctx, 1, 0); /* 7/f */
2994 if (IS_NVA3F(dev_priv->chipset))
2995 xf_emit(ctx, 0x9, 0); /* 1 */
2996 else
2997 xf_emit(ctx, 0x8, 0); /* 1 */
2998 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2999 xf_emit(ctx, 8, 1); /* 1 */
3000 xf_emit(ctx, 1, 0x11); /* 7f */
3001 xf_emit(ctx, 7, 0); /* 7f */
3002 xf_emit(ctx, 1, 0xfac6881); /* fffffff */
3003 xf_emit(ctx, 1, 0xf); /* f */
3004 xf_emit(ctx, 7, 0); /* f */
3005 xf_emit(ctx, 1, 0x11); /* 7f */
3006 xf_emit(ctx, 1, 1); /* 1 */
3007 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
3008 if (IS_NVA3F(dev_priv->chipset)) {
3009 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
3010 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3011 }
3012 }
2109} 3013}
2110 3014
2111static void 3015static void
2112nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx) 3016nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
2113{ 3017{
2114 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3018 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2115 xf_emit(ctx, 3, 0); 3019 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
2116 xf_emit(ctx, 1, 1); 3020 if (dev_priv->chipset != 0x50)
2117 xf_emit(ctx, 1, 0); 3021 xf_emit(ctx, 1, 0); /* 3 */
2118 xf_emit(ctx, 1, 1); 3022 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
3023 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
3024 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
3025 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
2119 if (dev_priv->chipset == 0x50) 3026 if (dev_priv->chipset == 0x50)
2120 xf_emit(ctx, 2, 0); 3027 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
2121 else 3028 else
2122 xf_emit(ctx, 3, 0); 3029 xf_emit(ctx, 2, 0); /* 3ff, 1 */
2123 xf_emit(ctx, 1, 0x2a712488); 3030 xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */
2124 xf_emit(ctx, 1, 0); 3031 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */
2125 xf_emit(ctx, 1, 0x4085c000); 3032 xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */
2126 xf_emit(ctx, 1, 0x40); 3033 xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */
2127 xf_emit(ctx, 1, 0x100); 3034 xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */
2128 xf_emit(ctx, 1, 0x10100); 3035 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
2129 xf_emit(ctx, 1, 0x02800000); 3036 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
3037 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
3038 if (dev_priv->chipset == 0x50) {
3039 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
3040 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3041 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
3042 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
3043 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
3044 } else if (!IS_NVAAF(dev_priv->chipset)) {
3045 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3046 xf_emit(ctx, 1, 0); /* 00000003 */
3047 xf_emit(ctx, 1, 0); /* 000003ff */
3048 xf_emit(ctx, 1, 0); /* 00000003 */
3049 xf_emit(ctx, 1, 0); /* 000003ff */
3050 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */
3051 xf_emit(ctx, 1, 0); /* 00000003 */
3052 xf_emit(ctx, 1, 0); /* 000003ff */
3053 } else {
3054 xf_emit(ctx, 0x6, 0);
3055 }
3056 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
3057 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */
3058 xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */
2130} 3059}
2131 3060
2132static void 3061static void
2133nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx) 3062nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
2134{ 3063{
2135 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3064 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2136 xf_emit(ctx, 2, 0x04e3bfdf); 3065 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2137 xf_emit(ctx, 1, 1); 3066 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2138 xf_emit(ctx, 1, 0); 3067 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
2139 xf_emit(ctx, 1, 0x00ffff00); 3068 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2140 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 3069 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
2141 xf_emit(ctx, 2, 1); 3070 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
2142 else 3071 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
2143 xf_emit(ctx, 1, 1); 3072 xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
2144 xf_emit(ctx, 2, 0); 3073 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
2145 xf_emit(ctx, 1, 0x00ffff00); 3074 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2146 xf_emit(ctx, 8, 0); 3075 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
2147 xf_emit(ctx, 1, 1); 3076 if (IS_NVA3F(dev_priv->chipset))
2148 xf_emit(ctx, 1, 0); 3077 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2149 xf_emit(ctx, 1, 1); 3078 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
2150 xf_emit(ctx, 1, 0x30201000); 3079 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
2151 xf_emit(ctx, 1, 0x70605040); 3080 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2152 xf_emit(ctx, 1, 0xb8a89888); 3081 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
2153 xf_emit(ctx, 1, 0xf8e8d8c8); 3082 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2154 xf_emit(ctx, 1, 0); 3083 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2155 xf_emit(ctx, 1, 0x1a); 3084 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
2156} 3085 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
2157 3086 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2158static void 3087 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2159nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx) 3088 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
2160{ 3089 xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
2161 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3090 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
2162 xf_emit(ctx, 3, 0); 3091 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
2163 xf_emit(ctx, 1, 0xfac6881); 3092 xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */
2164 xf_emit(ctx, 4, 0); 3093 xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */
2165 xf_emit(ctx, 1, 4); 3094 xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */
2166 xf_emit(ctx, 1, 0); 3095 xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */
2167 xf_emit(ctx, 2, 1); 3096 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
2168 xf_emit(ctx, 2, 0); 3097 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
2169 xf_emit(ctx, 1, 1);
2170 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2171 xf_emit(ctx, 0xb, 0);
2172 else
2173 xf_emit(ctx, 0xa, 0);
2174 xf_emit(ctx, 8, 1);
2175 xf_emit(ctx, 1, 0x11);
2176 xf_emit(ctx, 7, 0);
2177 xf_emit(ctx, 1, 0xfac6881);
2178 xf_emit(ctx, 1, 0xf);
2179 xf_emit(ctx, 7, 0);
2180 xf_emit(ctx, 1, 0x11);
2181 xf_emit(ctx, 1, 1);
2182 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2183 xf_emit(ctx, 6, 0);
2184 xf_emit(ctx, 1, 1);
2185 xf_emit(ctx, 6, 0);
2186 } else {
2187 xf_emit(ctx, 0xb, 0);
2188 }
2189} 3098}
2190 3099
2191static void 3100static void
@@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
2193{ 3102{
2194 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3103 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2195 if (dev_priv->chipset < 0xa0) { 3104 if (dev_priv->chipset < 0xa0) {
2196 nv50_graph_construct_xfer_tp_x1(ctx); 3105 nv50_graph_construct_xfer_unk84xx(ctx);
2197 nv50_graph_construct_xfer_tp_x2(ctx); 3106 nv50_graph_construct_xfer_tprop(ctx);
2198 nv50_graph_construct_xfer_tp_x3(ctx); 3107 nv50_graph_construct_xfer_tex(ctx);
2199 if (dev_priv->chipset == 0x50) 3108 nv50_graph_construct_xfer_unk8cxx(ctx);
2200 xf_emit(ctx, 0xf, 0);
2201 else
2202 xf_emit(ctx, 0x12, 0);
2203 nv50_graph_construct_xfer_tp_x4(ctx);
2204 } else { 3109 } else {
2205 nv50_graph_construct_xfer_tp_x3(ctx); 3110 nv50_graph_construct_xfer_tex(ctx);
2206 if (dev_priv->chipset < 0xaa) 3111 nv50_graph_construct_xfer_tprop(ctx);
2207 xf_emit(ctx, 0xc, 0); 3112 nv50_graph_construct_xfer_unk8cxx(ctx);
2208 else 3113 nv50_graph_construct_xfer_unk84xx(ctx);
2209 xf_emit(ctx, 0xa, 0);
2210 nv50_graph_construct_xfer_tp_x2(ctx);
2211 nv50_graph_construct_xfer_tp_x5(ctx);
2212 nv50_graph_construct_xfer_tp_x4(ctx);
2213 nv50_graph_construct_xfer_tp_x1(ctx);
2214 } 3114 }
2215} 3115}
2216 3116
2217static void 3117static void
2218nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) 3118nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
2219{ 3119{
2220 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3120 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2221 int i, mpcnt; 3121 int i, mpcnt = 2;
2222 if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) 3122 switch (dev_priv->chipset) {
2223 mpcnt = 1; 3123 case 0x98:
2224 else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8) 3124 case 0xaa:
2225 mpcnt = 2; 3125 mpcnt = 1;
2226 else 3126 break;
2227 mpcnt = 3; 3127 case 0x50:
3128 case 0x84:
3129 case 0x86:
3130 case 0x92:
3131 case 0x94:
3132 case 0x96:
3133 case 0xa8:
3134 case 0xac:
3135 mpcnt = 2;
3136 break;
3137 case 0xa0:
3138 case 0xa3:
3139 case 0xa5:
3140 case 0xaf:
3141 mpcnt = 3;
3142 break;
3143 }
2228 for (i = 0; i < mpcnt; i++) { 3144 for (i = 0; i < mpcnt; i++) {
2229 xf_emit(ctx, 1, 0); 3145 xf_emit(ctx, 1, 0); /* ff */
2230 xf_emit(ctx, 1, 0x80); 3146 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
2231 xf_emit(ctx, 1, 0x80007004); 3147 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
2232 xf_emit(ctx, 1, 0x04000400); 3148 xf_emit(ctx, 1, 0x04000400); /* ffffffff */
2233 if (dev_priv->chipset >= 0xa0) 3149 if (dev_priv->chipset >= 0xa0)
2234 xf_emit(ctx, 1, 0xc0); 3150 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
2235 xf_emit(ctx, 1, 0x1000); 3151 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
2236 xf_emit(ctx, 2, 0); 3152 xf_emit(ctx, 1, 0); /* ff/3ff */
2237 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) { 3153 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2238 xf_emit(ctx, 1, 0xe00); 3154 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
2239 xf_emit(ctx, 1, 0x1e00); 3155 xf_emit(ctx, 1, 0xe00); /* 7fff */
3156 xf_emit(ctx, 1, 0x1e00); /* 7fff */
2240 } 3157 }
2241 xf_emit(ctx, 1, 1); 3158 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
2242 xf_emit(ctx, 2, 0); 3159 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
3160 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2243 if (dev_priv->chipset == 0x50) 3161 if (dev_priv->chipset == 0x50)
2244 xf_emit(ctx, 2, 0x1000); 3162 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
2245 xf_emit(ctx, 1, 1); 3163 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
2246 xf_emit(ctx, 1, 0); 3164 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2247 xf_emit(ctx, 1, 4); 3165 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
2248 xf_emit(ctx, 1, 2); 3166 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2249 if (dev_priv->chipset >= 0xaa) 3167 if (IS_NVAAF(dev_priv->chipset))
2250 xf_emit(ctx, 0xb, 0); 3168 xf_emit(ctx, 0xb, 0); /* RO */
2251 else if (dev_priv->chipset >= 0xa0) 3169 else if (dev_priv->chipset >= 0xa0)
2252 xf_emit(ctx, 0xc, 0); 3170 xf_emit(ctx, 0xc, 0); /* RO */
2253 else 3171 else
2254 xf_emit(ctx, 0xa, 0); 3172 xf_emit(ctx, 0xa, 0); /* RO */
2255 } 3173 }
2256 xf_emit(ctx, 1, 0x08100c12); 3174 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
2257 xf_emit(ctx, 1, 0); 3175 xf_emit(ctx, 1, 0); /* ff/3ff */
2258 if (dev_priv->chipset >= 0xa0) { 3176 if (dev_priv->chipset >= 0xa0) {
2259 xf_emit(ctx, 1, 0x1fe21); 3177 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
2260 } 3178 }
2261 xf_emit(ctx, 5, 0); 3179 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
2262 xf_emit(ctx, 4, 0xffff); 3180 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
2263 xf_emit(ctx, 1, 1); 3181 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2264 xf_emit(ctx, 2, 0x10001); 3182 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
2265 xf_emit(ctx, 1, 1); 3183 xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
2266 xf_emit(ctx, 1, 0); 3184 xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
2267 xf_emit(ctx, 1, 0x1fe21); 3185 xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
2268 xf_emit(ctx, 1, 0); 3186 xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
2269 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 3187 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
2270 xf_emit(ctx, 1, 1); 3188 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
2271 xf_emit(ctx, 4, 0); 3189 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
2272 xf_emit(ctx, 1, 0x08100c12); 3190 if (IS_NVA3F(dev_priv->chipset))
2273 xf_emit(ctx, 1, 4); 3191 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2274 xf_emit(ctx, 1, 0); 3192 xf_emit(ctx, 1, 0); /* ff/3ff */
2275 xf_emit(ctx, 1, 2); 3193 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
2276 xf_emit(ctx, 1, 0x11); 3194 xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */
2277 xf_emit(ctx, 8, 0); 3195 xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
2278 xf_emit(ctx, 1, 0xfac6881); 3196 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
2279 xf_emit(ctx, 1, 0); 3197 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2280 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) 3198 xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */
2281 xf_emit(ctx, 1, 3); 3199 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2282 xf_emit(ctx, 3, 0); 3200 xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
2283 xf_emit(ctx, 1, 4); 3201 xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
2284 xf_emit(ctx, 9, 0); 3202 xf_emit(ctx, 1, 0); /* 00000007 */
2285 xf_emit(ctx, 1, 2); 3203 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
2286 xf_emit(ctx, 2, 1); 3204 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
2287 xf_emit(ctx, 1, 2); 3205 if (IS_NVA3F(dev_priv->chipset))
2288 xf_emit(ctx, 3, 1); 3206 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
2289 xf_emit(ctx, 1, 0); 3207 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2290 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { 3208 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2291 xf_emit(ctx, 8, 2); 3209 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2292 xf_emit(ctx, 0x10, 1); 3210 xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */
2293 xf_emit(ctx, 8, 2); 3211 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
2294 xf_emit(ctx, 0x18, 1); 3212 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2295 xf_emit(ctx, 3, 0); 3213 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
3214 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
3215 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
3216 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
3217 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
3218 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
3219 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
3220 if (IS_NVA3F(dev_priv->chipset)) {
3221 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
3222 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
3223 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
3224 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
3225 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
3226 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
3227 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
3228 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
3229 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
3230 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
2296 } 3231 }
2297 xf_emit(ctx, 1, 4); 3232 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
3233 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
3234 /* XXX: demagic this part some day */
2298 if (dev_priv->chipset == 0x50) 3235 if (dev_priv->chipset == 0x50)
2299 xf_emit(ctx, 0x3a0, 0); 3236 xf_emit(ctx, 0x3a0, 0);
2300 else if (dev_priv->chipset < 0x94) 3237 else if (dev_priv->chipset < 0x94)
@@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
2303 xf_emit(ctx, 0x39f, 0); 3240 xf_emit(ctx, 0x39f, 0);
2304 else 3241 else
2305 xf_emit(ctx, 0x3a3, 0); 3242 xf_emit(ctx, 0x3a3, 0);
2306 xf_emit(ctx, 1, 0x11); 3243 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2307 xf_emit(ctx, 1, 0); 3244 xf_emit(ctx, 1, 0); /* 7 OPERATION */
2308 xf_emit(ctx, 1, 1); 3245 xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */
2309 xf_emit(ctx, 0x2d, 0); 3246 xf_emit(ctx, 0x2d, 0);
2310} 3247}
2311 3248
@@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
2323 if (dev_priv->chipset < 0xa0) { 3260 if (dev_priv->chipset < 0xa0) {
2324 for (i = 0; i < 8; i++) { 3261 for (i = 0; i < 8; i++) {
2325 ctx->ctxvals_pos = offset + i; 3262 ctx->ctxvals_pos = offset + i;
3263 /* that little bugger belongs to csched. No idea
3264 * what it's doing here. */
2326 if (i == 0) 3265 if (i == 0)
2327 xf_emit(ctx, 1, 0x08100c12); 3266 xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
2328 if (units & (1 << i)) 3267 if (units & (1 << i))
2329 nv50_graph_construct_xfer_tp2(ctx); 3268 nv50_graph_construct_xfer_mpc(ctx);
2330 if ((ctx->ctxvals_pos-offset)/8 > size) 3269 if ((ctx->ctxvals_pos-offset)/8 > size)
2331 size = (ctx->ctxvals_pos-offset)/8; 3270 size = (ctx->ctxvals_pos-offset)/8;
2332 } 3271 }
2333 } else { 3272 } else {
2334 /* Strand 0: TPs 0, 1 */ 3273 /* Strand 0: TPs 0, 1 */
2335 ctx->ctxvals_pos = offset; 3274 ctx->ctxvals_pos = offset;
2336 xf_emit(ctx, 1, 0x08100c12); 3275 /* that little bugger belongs to csched. No idea
3276 * what it's doing here. */
3277 xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
2337 if (units & (1 << 0)) 3278 if (units & (1 << 0))
2338 nv50_graph_construct_xfer_tp2(ctx); 3279 nv50_graph_construct_xfer_mpc(ctx);
2339 if (units & (1 << 1)) 3280 if (units & (1 << 1))
2340 nv50_graph_construct_xfer_tp2(ctx); 3281 nv50_graph_construct_xfer_mpc(ctx);
2341 if ((ctx->ctxvals_pos-offset)/8 > size) 3282 if ((ctx->ctxvals_pos-offset)/8 > size)
2342 size = (ctx->ctxvals_pos-offset)/8; 3283 size = (ctx->ctxvals_pos-offset)/8;
2343 3284
2344 /* Strand 0: TPs 2, 3 */ 3285 /* Strand 1: TPs 2, 3 */
2345 ctx->ctxvals_pos = offset + 1; 3286 ctx->ctxvals_pos = offset + 1;
2346 if (units & (1 << 2)) 3287 if (units & (1 << 2))
2347 nv50_graph_construct_xfer_tp2(ctx); 3288 nv50_graph_construct_xfer_mpc(ctx);
2348 if (units & (1 << 3)) 3289 if (units & (1 << 3))
2349 nv50_graph_construct_xfer_tp2(ctx); 3290 nv50_graph_construct_xfer_mpc(ctx);
2350 if ((ctx->ctxvals_pos-offset)/8 > size) 3291 if ((ctx->ctxvals_pos-offset)/8 > size)
2351 size = (ctx->ctxvals_pos-offset)/8; 3292 size = (ctx->ctxvals_pos-offset)/8;
2352 3293
2353 /* Strand 0: TPs 4, 5, 6 */ 3294 /* Strand 2: TPs 4, 5, 6 */
2354 ctx->ctxvals_pos = offset + 2; 3295 ctx->ctxvals_pos = offset + 2;
2355 if (units & (1 << 4)) 3296 if (units & (1 << 4))
2356 nv50_graph_construct_xfer_tp2(ctx); 3297 nv50_graph_construct_xfer_mpc(ctx);
2357 if (units & (1 << 5)) 3298 if (units & (1 << 5))
2358 nv50_graph_construct_xfer_tp2(ctx); 3299 nv50_graph_construct_xfer_mpc(ctx);
2359 if (units & (1 << 6)) 3300 if (units & (1 << 6))
2360 nv50_graph_construct_xfer_tp2(ctx); 3301 nv50_graph_construct_xfer_mpc(ctx);
2361 if ((ctx->ctxvals_pos-offset)/8 > size) 3302 if ((ctx->ctxvals_pos-offset)/8 > size)
2362 size = (ctx->ctxvals_pos-offset)/8; 3303 size = (ctx->ctxvals_pos-offset)/8;
2363 3304
2364 /* Strand 0: TPs 7, 8, 9 */ 3305 /* Strand 3: TPs 7, 8, 9 */
2365 ctx->ctxvals_pos = offset + 3; 3306 ctx->ctxvals_pos = offset + 3;
2366 if (units & (1 << 7)) 3307 if (units & (1 << 7))
2367 nv50_graph_construct_xfer_tp2(ctx); 3308 nv50_graph_construct_xfer_mpc(ctx);
2368 if (units & (1 << 8)) 3309 if (units & (1 << 8))
2369 nv50_graph_construct_xfer_tp2(ctx); 3310 nv50_graph_construct_xfer_mpc(ctx);
2370 if (units & (1 << 9)) 3311 if (units & (1 << 9))
2371 nv50_graph_construct_xfer_tp2(ctx); 3312 nv50_graph_construct_xfer_mpc(ctx);
2372 if ((ctx->ctxvals_pos-offset)/8 > size) 3313 if ((ctx->ctxvals_pos-offset)/8 > size)
2373 size = (ctx->ctxvals_pos-offset)/8; 3314 size = (ctx->ctxvals_pos-offset)/8;
2374 } 3315 }
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 91ef93cf1f35..a53fc974332b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -32,39 +32,87 @@
32struct nv50_instmem_priv { 32struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */ 33 uint32_t save1700[5]; /* 0x1700->0x1710 */
34 34
35 struct nouveau_gpuobj_ref *pramin_pt; 35 struct nouveau_gpuobj *pramin_pt;
36 struct nouveau_gpuobj_ref *pramin_bar; 36 struct nouveau_gpuobj *pramin_bar;
37 struct nouveau_gpuobj_ref *fb_bar; 37 struct nouveau_gpuobj *fb_bar;
38}; 38};
39 39
40#define NV50_INSTMEM_PAGE_SHIFT 12 40static void
41#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) 41nv50_channel_del(struct nouveau_channel **pchan)
42#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) 42{
43 struct nouveau_channel *chan;
43 44
44/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN 45 chan = *pchan;
45 */ 46 *pchan = NULL;
46#define BAR0_WI32(g, o, v) do { \ 47 if (!chan)
47 uint32_t offset; \ 48 return;
48 if ((g)->im_backing) { \ 49
49 offset = (g)->im_backing_start; \ 50 nouveau_gpuobj_ref(NULL, &chan->ramfc);
50 } else { \ 51 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
51 offset = chan->ramin->gpuobj->im_backing_start; \ 52 if (chan->ramin_heap.free_stack.next)
52 offset += (g)->im_pramin->start; \ 53 drm_mm_takedown(&chan->ramin_heap);
53 } \ 54 nouveau_gpuobj_ref(NULL, &chan->ramin);
54 offset += (o); \ 55 kfree(chan);
55 nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ 56}
56} while (0) 57
58static int
59nv50_channel_new(struct drm_device *dev, u32 size,
60 struct nouveau_channel **pchan)
61{
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
64 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
65 struct nouveau_channel *chan;
66 int ret;
67
68 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
69 if (!chan)
70 return -ENOMEM;
71 chan->dev = dev;
72
73 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
74 if (ret) {
75 nv50_channel_del(&chan);
76 return ret;
77 }
78
79 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
80 if (ret) {
81 nv50_channel_del(&chan);
82 return ret;
83 }
84
85 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
86 chan->ramin->pinst + pgd,
87 chan->ramin->vinst + pgd,
88 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
89 &chan->vm_pd);
90 if (ret) {
91 nv50_channel_del(&chan);
92 return ret;
93 }
94
95 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
96 chan->ramin->pinst + fc,
97 chan->ramin->vinst + fc, 0x100,
98 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
99 if (ret) {
100 nv50_channel_del(&chan);
101 return ret;
102 }
103
104 *pchan = chan;
105 return 0;
106}
57 107
58int 108int
59nv50_instmem_init(struct drm_device *dev) 109nv50_instmem_init(struct drm_device *dev)
60{ 110{
61 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_channel *chan;
63 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
64 uint32_t save_nv001700;
65 uint64_t v;
66 struct nv50_instmem_priv *priv; 112 struct nv50_instmem_priv *priv;
113 struct nouveau_channel *chan;
67 int ret, i; 114 int ret, i;
115 u32 tmp;
68 116
69 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 117 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
70 if (!priv) 118 if (!priv)
@@ -75,212 +123,115 @@ nv50_instmem_init(struct drm_device *dev)
75 for (i = 0x1700; i <= 0x1710; i += 4) 123 for (i = 0x1700; i <= 0x1710; i += 4)
76 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 124 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
77 125
78 /* Reserve the last MiB of VRAM, we should probably try to avoid 126 /* Global PRAMIN heap */
79 * setting up the below tables over the top of the VBIOS image at 127 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
80 * some point. 128 if (ret) {
81 */ 129 NV_ERROR(dev, "Failed to init RAMIN heap\n");
82 dev_priv->ramin_rsvd_vram = 1 << 20;
83 c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
84 c_size = 128 << 10;
85 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
86 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
87 c_base = c_vmpd + 0x4000;
88 pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
89
90 NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
91 NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
92 (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
93 NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
94 NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
95
96 /* Determine VM layout, we need to do this first to make sure
97 * we allocate enough memory for all the page tables.
98 */
99 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
100 dev_priv->vm_gart_size = NV50_VM_BLOCK;
101
102 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
103 dev_priv->vm_vram_size = dev_priv->vram_size;
104 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
105 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
106 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
107 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
108
109 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
110
111 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
112 dev_priv->vm_gart_base,
113 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
114 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
115 dev_priv->vm_vram_base,
116 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
117
118 c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
119
120 /* Map BAR0 PRAMIN aperture over the memory we want to use */
121 save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
122 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
123
124 /* Create a fake channel, and use it as our "dummy" channels 0/127.
125 * The main reason for creating a channel is so we can use the gpuobj
126 * code. However, it's probably worth noting that NVIDIA also setup
127 * their channels 0/127 with the same values they configure here.
128 * So, there may be some other reason for doing this.
129 *
130 * Have to create the entire channel manually, as the real channel
131 * creation code assumes we have PRAMIN access, and we don't until
132 * we're done here.
133 */
134 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
135 if (!chan)
136 return -ENOMEM; 130 return -ENOMEM;
137 chan->id = 0; 131 }
138 chan->dev = dev;
139 chan->file_priv = (struct drm_file *)-2;
140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
141
142 INIT_LIST_HEAD(&chan->ramht_refs);
143 132
144 /* Channel's PRAMIN object + heap */ 133 /* we need a channel to plug into the hw to control the BARs */
145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, 134 ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
146 NULL, &chan->ramin);
147 if (ret) 135 if (ret)
148 return ret; 136 return ret;
137 chan = dev_priv->fifos[127] = dev_priv->fifos[0];
149 138
150 if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) 139 /* allocate page table for PRAMIN BAR */
151 return -ENOMEM; 140 ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
152 141 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
153 /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ 142 &priv->pramin_pt);
154 ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
155 0x4000, 0, NULL, &chan->ramfc);
156 if (ret) 143 if (ret)
157 return ret; 144 return ret;
158 145
159 for (i = 0; i < c_vmpd; i += 4) 146 nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
160 BAR0_WI32(chan->ramin->gpuobj, i, 0); 147 nv_wo32(chan->vm_pd, 0x0004, 0);
161 148
162 /* VM page directory */ 149 /* DMA object for PRAMIN BAR */
163 ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, 150 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
164 0x4000, 0, &chan->vm_pd, NULL);
165 if (ret) 151 if (ret)
166 return ret; 152 return ret;
167 for (i = 0; i < 0x4000; i += 8) { 153 nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
168 BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); 154 nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
169 BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); 155 nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
170 } 156 nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
171 157 nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
172 /* PRAMIN page table, cheat and map into VM at 0x0000000000. 158 nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
173 * We map the entire fake channel into the start of the PRAMIN BAR 159
174 */ 160 /* map channel into PRAMIN, gpuobj didn't do it for us */
175 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 161 ret = nv50_instmem_bind(dev, chan->ramin);
176 0, &priv->pramin_pt);
177 if (ret) 162 if (ret)
178 return ret; 163 return ret;
179 164
180 v = c_offset | 1; 165 /* poke regs... */
181 if (dev_priv->vram_sys_base) { 166 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
182 v += dev_priv->vram_sys_base; 167 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
183 v |= 0x30; 168 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
184 }
185 169
186 i = 0; 170 tmp = nv_ri32(dev, 0);
187 while (v < dev_priv->vram_sys_base + c_offset + c_size) { 171 nv_wi32(dev, 0, ~tmp);
188 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); 172 if (nv_ri32(dev, 0) != ~tmp) {
189 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); 173 NV_ERROR(dev, "PRAMIN readback failed\n");
190 v += 0x1000; 174 return -EIO;
191 i += 8;
192 } 175 }
176 nv_wi32(dev, 0, tmp);
193 177
194 while (i < pt_size) { 178 dev_priv->ramin_available = true;
195 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
196 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
197 i += 8;
198 }
199 179
200 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 180 /* Determine VM layout */
201 BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); 181 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
182 dev_priv->vm_gart_size = NV50_VM_BLOCK;
183
184 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
185 dev_priv->vm_vram_size = dev_priv->vram_size;
186 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
187 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
188 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
189 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
190
191 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
192
193 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
194 dev_priv->vm_gart_base,
195 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
196 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
197 dev_priv->vm_vram_base,
198 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
202 199
203 /* VRAM page table(s), mapped into VM at +1GiB */ 200 /* VRAM page table(s), mapped into VM at +1GiB */
204 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { 201 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
205 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 202 ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
206 NV50_VM_BLOCK/65536*8, 0, 0, 203 0, NVOBJ_FLAG_ZERO_ALLOC,
207 &chan->vm_vram_pt[i]); 204 &chan->vm_vram_pt[i]);
208 if (ret) { 205 if (ret) {
209 NV_ERROR(dev, "Error creating VRAM page tables: %d\n", 206 NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
210 ret);
211 dev_priv->vm_vram_pt_nr = i; 207 dev_priv->vm_vram_pt_nr = i;
212 return ret; 208 return ret;
213 } 209 }
214 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj; 210 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
215 211
216 for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; 212 nv_wo32(chan->vm_pd, 0x10 + (i*8),
217 v += 4) 213 chan->vm_vram_pt[i]->vinst | 0x61);
218 BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); 214 nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
219
220 BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
221 chan->vm_vram_pt[i]->instance | 0x61);
222 BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
223 } 215 }
224 216
225 /* DMA object for PRAMIN BAR */
226 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
227 &priv->pramin_bar);
228 if (ret)
229 return ret;
230 BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
231 BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
232 BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
233 BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
234 BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
235 BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
236
237 /* DMA object for FB BAR */ 217 /* DMA object for FB BAR */
238 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, 218 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
239 &priv->fb_bar);
240 if (ret) 219 if (ret)
241 return ret; 220 return ret;
242 BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); 221 nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
243 BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + 222 nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
244 pci_resource_len(dev->pdev, 1) - 1); 223 pci_resource_len(dev->pdev, 1) - 1);
245 BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); 224 nv_wo32(priv->fb_bar, 0x08, 0x40000000);
246 BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); 225 nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
247 BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); 226 nv_wo32(priv->fb_bar, 0x10, 0x00000000);
248 BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000); 227 nv_wo32(priv->fb_bar, 0x14, 0x00000000);
249 228
250 /* Poke the relevant regs, and pray it works :) */ 229 dev_priv->engine.instmem.flush(dev);
251 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
252 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
253 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
254 NV50_PUNK_BAR_CFG_BASE_VALID);
255 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
256 NV50_PUNK_BAR1_CTXDMA_VALID);
257 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
258 NV50_PUNK_BAR3_CTXDMA_VALID);
259 230
231 nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
260 for (i = 0; i < 8; i++) 232 for (i = 0; i < 8; i++)
261 nv_wr32(dev, 0x1900 + (i*4), 0); 233 nv_wr32(dev, 0x1900 + (i*4), 0);
262 234
263 /* Assume that praying isn't enough, check that we can re-read the
264 * entire fake channel back from the PRAMIN BAR */
265 for (i = 0; i < c_size; i += 4) {
266 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
267 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
268 i);
269 return -EINVAL;
270 }
271 }
272
273 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
274
275 /* Global PRAMIN heap */
276 if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
277 NV_ERROR(dev, "Failed to init RAMIN heap\n");
278 }
279
280 /*XXX: incorrect, but needed to make hash func "work" */
281 dev_priv->ramht_offset = 0x10000;
282 dev_priv->ramht_bits = 9;
283 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
284 return 0; 235 return 0;
285} 236}
286 237
@@ -297,29 +248,24 @@ nv50_instmem_takedown(struct drm_device *dev)
297 if (!priv) 248 if (!priv)
298 return; 249 return;
299 250
251 dev_priv->ramin_available = false;
252
300 /* Restore state from before init */ 253 /* Restore state from before init */
301 for (i = 0x1700; i <= 0x1710; i += 4) 254 for (i = 0x1700; i <= 0x1710; i += 4)
302 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); 255 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
303 256
304 nouveau_gpuobj_ref_del(dev, &priv->fb_bar); 257 nouveau_gpuobj_ref(NULL, &priv->fb_bar);
305 nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); 258 nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
306 nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); 259 nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
307 260
308 /* Destroy dummy channel */ 261 /* Destroy dummy channel */
309 if (chan) { 262 if (chan) {
310 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { 263 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
311 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); 264 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
312 dev_priv->vm_vram_pt[i] = NULL;
313 }
314 dev_priv->vm_vram_pt_nr = 0; 265 dev_priv->vm_vram_pt_nr = 0;
315 266
316 nouveau_gpuobj_del(dev, &chan->vm_pd); 267 nv50_channel_del(&dev_priv->fifos[0]);
317 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 268 dev_priv->fifos[127] = NULL;
318 nouveau_gpuobj_ref_del(dev, &chan->ramin);
319 drm_mm_takedown(&chan->ramin_heap);
320
321 dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
322 kfree(chan);
323 } 269 }
324 270
325 dev_priv->engine.instmem.priv = NULL; 271 dev_priv->engine.instmem.priv = NULL;
@@ -331,14 +277,14 @@ nv50_instmem_suspend(struct drm_device *dev)
331{ 277{
332 struct drm_nouveau_private *dev_priv = dev->dev_private; 278 struct drm_nouveau_private *dev_priv = dev->dev_private;
333 struct nouveau_channel *chan = dev_priv->fifos[0]; 279 struct nouveau_channel *chan = dev_priv->fifos[0];
334 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 280 struct nouveau_gpuobj *ramin = chan->ramin;
335 int i; 281 int i;
336 282
337 ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); 283 ramin->im_backing_suspend = vmalloc(ramin->size);
338 if (!ramin->im_backing_suspend) 284 if (!ramin->im_backing_suspend)
339 return -ENOMEM; 285 return -ENOMEM;
340 286
341 for (i = 0; i < ramin->im_pramin->size; i += 4) 287 for (i = 0; i < ramin->size; i += 4)
342 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); 288 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
343 return 0; 289 return 0;
344} 290}
@@ -349,23 +295,25 @@ nv50_instmem_resume(struct drm_device *dev)
349 struct drm_nouveau_private *dev_priv = dev->dev_private; 295 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 296 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
351 struct nouveau_channel *chan = dev_priv->fifos[0]; 297 struct nouveau_channel *chan = dev_priv->fifos[0];
352 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 298 struct nouveau_gpuobj *ramin = chan->ramin;
353 int i; 299 int i;
354 300
355 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); 301 dev_priv->ramin_available = false;
356 for (i = 0; i < ramin->im_pramin->size; i += 4) 302 dev_priv->ramin_base = ~0;
357 BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]); 303 for (i = 0; i < ramin->size; i += 4)
304 nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
305 dev_priv->ramin_available = true;
358 vfree(ramin->im_backing_suspend); 306 vfree(ramin->im_backing_suspend);
359 ramin->im_backing_suspend = NULL; 307 ramin->im_backing_suspend = NULL;
360 308
361 /* Poke the relevant regs, and pray it works :) */ 309 /* Poke the relevant regs, and pray it works :) */
362 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); 310 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
363 nv_wr32(dev, NV50_PUNK_UNK1710, 0); 311 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
364 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | 312 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
365 NV50_PUNK_BAR_CFG_BASE_VALID); 313 NV50_PUNK_BAR_CFG_BASE_VALID);
366 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | 314 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
367 NV50_PUNK_BAR1_CTXDMA_VALID); 315 NV50_PUNK_BAR1_CTXDMA_VALID);
368 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | 316 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
369 NV50_PUNK_BAR3_CTXDMA_VALID); 317 NV50_PUNK_BAR3_CTXDMA_VALID);
370 318
371 for (i = 0; i < 8; i++) 319 for (i = 0; i < 8; i++)
@@ -381,7 +329,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
381 if (gpuobj->im_backing) 329 if (gpuobj->im_backing)
382 return -EINVAL; 330 return -EINVAL;
383 331
384 *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE); 332 *sz = ALIGN(*sz, 4096);
385 if (*sz == 0) 333 if (*sz == 0)
386 return -EINVAL; 334 return -EINVAL;
387 335
@@ -399,9 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
399 return ret; 347 return ret;
400 } 348 }
401 349
402 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; 350 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
403 gpuobj->im_backing_start <<= PAGE_SHIFT;
404
405 return 0; 351 return 0;
406} 352}
407 353
@@ -424,7 +370,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424{ 370{
425 struct drm_nouveau_private *dev_priv = dev->dev_private; 371 struct drm_nouveau_private *dev_priv = dev->dev_private;
426 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 372 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
427 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; 373 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
428 uint32_t pte, pte_end; 374 uint32_t pte, pte_end;
429 uint64_t vram; 375 uint64_t vram;
430 376
@@ -436,11 +382,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
436 382
437 pte = (gpuobj->im_pramin->start >> 12) << 1; 383 pte = (gpuobj->im_pramin->start >> 12) << 1;
438 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 384 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
439 vram = gpuobj->im_backing_start; 385 vram = gpuobj->vinst;
440 386
441 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", 387 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
442 gpuobj->im_pramin->start, pte, pte_end); 388 gpuobj->im_pramin->start, pte, pte_end);
443 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 389 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
444 390
445 vram |= 1; 391 vram |= 1;
446 if (dev_priv->vram_sys_base) { 392 if (dev_priv->vram_sys_base) {
@@ -449,9 +395,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
449 } 395 }
450 396
451 while (pte < pte_end) { 397 while (pte < pte_end) {
452 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); 398 nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
453 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); 399 nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
454 vram += NV50_INSTMEM_PAGE_SIZE; 400 vram += 0x1000;
401 pte += 2;
455 } 402 }
456 dev_priv->engine.instmem.flush(dev); 403 dev_priv->engine.instmem.flush(dev);
457 404
@@ -472,12 +419,17 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
472 if (gpuobj->im_bound == 0) 419 if (gpuobj->im_bound == 0)
473 return -EINVAL; 420 return -EINVAL;
474 421
422 /* can happen during late takedown */
423 if (unlikely(!dev_priv->ramin_available))
424 return 0;
425
475 pte = (gpuobj->im_pramin->start >> 12) << 1; 426 pte = (gpuobj->im_pramin->start >> 12) << 1;
476 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 427 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
477 428
478 while (pte < pte_end) { 429 while (pte < pte_end) {
479 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 430 nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
480 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 431 nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
432 pte += 2;
481 } 433 }
482 dev_priv->engine.instmem.flush(dev); 434 dev_priv->engine.instmem.flush(dev);
483 435
@@ -489,7 +441,7 @@ void
489nv50_instmem_flush(struct drm_device *dev) 441nv50_instmem_flush(struct drm_device *dev)
490{ 442{
491 nv_wr32(dev, 0x00330c, 0x00000001); 443 nv_wr32(dev, 0x00330c, 0x00000001);
492 if (!nv_wait(0x00330c, 0x00000002, 0x00000000)) 444 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
493 NV_ERROR(dev, "PRAMIN flush timeout\n"); 445 NV_ERROR(dev, "PRAMIN flush timeout\n");
494} 446}
495 447
@@ -497,7 +449,7 @@ void
497nv84_instmem_flush(struct drm_device *dev) 449nv84_instmem_flush(struct drm_device *dev)
498{ 450{
499 nv_wr32(dev, 0x070000, 0x00000001); 451 nv_wr32(dev, 0x070000, 0x00000001);
500 if (!nv_wait(0x070000, 0x00000002, 0x00000000)) 452 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
501 NV_ERROR(dev, "PRAMIN flush timeout\n"); 453 NV_ERROR(dev, "PRAMIN flush timeout\n");
502} 454}
503 455
@@ -505,7 +457,7 @@ void
505nv50_vm_flush(struct drm_device *dev, int engine) 457nv50_vm_flush(struct drm_device *dev, int engine)
506{ 458{
507 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 459 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
508 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) 460 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
509 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 461 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
510} 462}
511 463
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
new file mode 100644
index 000000000000..7dbb305d7e63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29
30struct nv50_pm_state {
31 struct nouveau_pm_level *perflvl;
32 struct pll_lims pll;
33 enum pll_types type;
34 int N, M, P;
35};
36
37int
38nv50_pm_clock_get(struct drm_device *dev, u32 id)
39{
40 struct pll_lims pll;
41 int P, N, M, ret;
42 u32 reg0, reg1;
43
44 ret = get_pll_limits(dev, id, &pll);
45 if (ret)
46 return ret;
47
48 reg0 = nv_rd32(dev, pll.reg + 0);
49 reg1 = nv_rd32(dev, pll.reg + 4);
50 P = (reg0 & 0x00070000) >> 16;
51 N = (reg1 & 0x0000ff00) >> 8;
52 M = (reg1 & 0x000000ff);
53
54 return ((pll.refclk * N / M) >> P);
55}
56
57void *
58nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
59 u32 id, int khz)
60{
61 struct nv50_pm_state *state;
62 int dummy, ret;
63
64 state = kzalloc(sizeof(*state), GFP_KERNEL);
65 if (!state)
66 return ERR_PTR(-ENOMEM);
67 state->type = id;
68 state->perflvl = perflvl;
69
70 ret = get_pll_limits(dev, id, &state->pll);
71 if (ret < 0) {
72 kfree(state);
73 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
74 }
75
76 ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
77 &dummy, &dummy, &state->P);
78 if (ret < 0) {
79 kfree(state);
80 return ERR_PTR(ret);
81 }
82
83 return state;
84}
85
86void
87nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
88{
89 struct nv50_pm_state *state = pre_state;
90 struct nouveau_pm_level *perflvl = state->perflvl;
91 u32 reg = state->pll.reg, tmp;
92 struct bit_entry BIT_M;
93 u16 script;
94 int N = state->N;
95 int M = state->M;
96 int P = state->P;
97
98 if (state->type == PLL_MEMORY && perflvl->memscript &&
99 bit_table(dev, 'M', &BIT_M) == 0 &&
100 BIT_M.version == 1 && BIT_M.length >= 0x0b) {
101 script = ROM16(BIT_M.data[0x05]);
102 if (script)
103 nouveau_bios_run_init_table(dev, script, NULL);
104 script = ROM16(BIT_M.data[0x07]);
105 if (script)
106 nouveau_bios_run_init_table(dev, script, NULL);
107 script = ROM16(BIT_M.data[0x09]);
108 if (script)
109 nouveau_bios_run_init_table(dev, script, NULL);
110
111 nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
112 }
113
114 if (state->type == PLL_MEMORY) {
115 nv_wr32(dev, 0x100210, 0);
116 nv_wr32(dev, 0x1002dc, 1);
117 }
118
119 tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
120 tmp |= 0x80000000 | (P << 16);
121 nv_wr32(dev, reg + 0, tmp);
122 nv_wr32(dev, reg + 4, (N << 8) | M);
123
124 if (state->type == PLL_MEMORY) {
125 nv_wr32(dev, 0x1002dc, 0);
126 nv_wr32(dev, 0x100210, 0x80000000);
127 }
128
129 kfree(state);
130}
131
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index bcd4cf84a7e6..b4a5ecb199f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
92 } 92 }
93 93
94 /* wait for it to be done */ 94 /* wait for it to be done */
95 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 95 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
96 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 96 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
97 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); 97 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
98 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, 98 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
108 108
109 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | 109 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
110 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); 110 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
111 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or), 111 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or),
112 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 112 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
113 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); 113 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
114 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, 114 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
new file mode 100644
index 000000000000..dbbafed36406
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29
30/*XXX: boards using limits 0x40 need fixing, the register layout
31 * is correct here, but, there's some other funny magic
32 * that modifies things, so it's not likely we'll set/read
33 * the correct timings yet.. working on it...
34 */
35
36struct nva3_pm_state {
37 struct pll_lims pll;
38 int N, M, P;
39};
40
41int
42nva3_pm_clock_get(struct drm_device *dev, u32 id)
43{
44 struct pll_lims pll;
45 int P, N, M, ret;
46 u32 reg;
47
48 ret = get_pll_limits(dev, id, &pll);
49 if (ret)
50 return ret;
51
52 reg = nv_rd32(dev, pll.reg + 4);
53 P = (reg & 0x003f0000) >> 16;
54 N = (reg & 0x0000ff00) >> 8;
55 M = (reg & 0x000000ff);
56 return pll.refclk * N / M / P;
57}
58
59void *
60nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
61 u32 id, int khz)
62{
63 struct nva3_pm_state *state;
64 int dummy, ret;
65
66 state = kzalloc(sizeof(*state), GFP_KERNEL);
67 if (!state)
68 return ERR_PTR(-ENOMEM);
69
70 ret = get_pll_limits(dev, id, &state->pll);
71 if (ret < 0) {
72 kfree(state);
73 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
74 }
75
76 ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy,
77 &state->M, &state->P);
78 if (ret < 0) {
79 kfree(state);
80 return ERR_PTR(ret);
81 }
82
83 return state;
84}
85
86void
87nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
88{
89 struct nva3_pm_state *state = pre_state;
90 u32 reg = state->pll.reg;
91
92 nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M);
93 kfree(state);
94}
95
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index d64375871979..890c2b95fbc1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -43,12 +43,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable)
43} 43}
44 44
45bool 45bool
46nvc0_fifo_cache_flush(struct drm_device *dev)
47{
48 return true;
49}
50
51bool
52nvc0_fifo_cache_pull(struct drm_device *dev, bool enable) 46nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
53{ 47{
54 return false; 48 return false;
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 6b451f864783..13a0f78a9088 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
50 return ret; 50 return ret;
51 } 51 }
52 52
53 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; 53 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
54 gpuobj->im_backing_start <<= PAGE_SHIFT;
55 return 0; 54 return 0;
56} 55}
57 56
@@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
84 83
85 pte = gpuobj->im_pramin->start >> 12; 84 pte = gpuobj->im_pramin->start >> 12;
86 pte_end = (gpuobj->im_pramin->size >> 12) + pte; 85 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
87 vram = gpuobj->im_backing_start; 86 vram = gpuobj->vinst;
88 87
89 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", 88 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
90 gpuobj->im_pramin->start, pte, pte_end); 89 gpuobj->im_pramin->start, pte, pte_end);
91 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 90 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
92 91
93 while (pte < pte_end) { 92 while (pte < pte_end) {
94 nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1); 93 nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
@@ -134,7 +133,7 @@ void
134nvc0_instmem_flush(struct drm_device *dev) 133nvc0_instmem_flush(struct drm_device *dev)
135{ 134{
136 nv_wr32(dev, 0x070000, 1); 135 nv_wr32(dev, 0x070000, 1);
137 if (!nv_wait(0x070000, 0x00000002, 0x00000000)) 136 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
138 NV_ERROR(dev, "PRAMIN flush timeout\n"); 137 NV_ERROR(dev, "PRAMIN flush timeout\n");
139} 138}
140 139
@@ -221,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev)
221 return -ENOMEM; 220 return -ENOMEM;
222 } 221 }
223 222
224 /*XXX: incorrect, but needed to make hash func "work" */
225 dev_priv->ramht_offset = 0x10000;
226 dev_priv->ramht_bits = 9;
227 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
228 return 0; 223 return 0;
229} 224}
230 225
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index ad64673ace1f..881f8a585613 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -263,6 +263,7 @@
263# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2 263# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
264# define NV_CIO_CRE_LCD__INDEX 0x33 264# define NV_CIO_CRE_LCD__INDEX 0x33
265# define NV_CIO_CRE_LCD_LCD_SELECT 0:0 265# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
266# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b
266# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36 267# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
267# define NV_CIO_CRE_DDC0_WR__INDEX 0x37 268# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
268# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */ 269# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0afd1e62347d..c26106066ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
435 435
436out: 436out:
437 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 437 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
438 bo->tbo.mem.mm_node->start << PAGE_SHIFT, 438 bo->tbo.mem.start << PAGE_SHIFT,
439 bo->tbo.num_pages << PAGE_SHIFT); 439 bo->tbo.num_pages << PAGE_SHIFT);
440 return 0; 440 return 0;
441} 441}
@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
532 rdev = rbo->rdev; 532 rdev = rbo->rdev;
533 if (bo->mem.mem_type == TTM_PL_VRAM) { 533 if (bo->mem.mem_type == TTM_PL_VRAM) {
534 size = bo->mem.num_pages << PAGE_SHIFT; 534 size = bo->mem.num_pages << PAGE_SHIFT;
535 offset = bo->mem.mm_node->start << PAGE_SHIFT; 535 offset = bo->mem.start << PAGE_SHIFT;
536 if ((offset + size) > rdev->mc.visible_vram_size) { 536 if ((offset + size) > rdev->mc.visible_vram_size) {
537 /* hurrah the memory is not visible ! */ 537 /* hurrah the memory is not visible ! */
538 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 538 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
540 r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 540 r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
541 if (unlikely(r != 0)) 541 if (unlikely(r != 0))
542 return r; 542 return r;
543 offset = bo->mem.mm_node->start << PAGE_SHIFT; 543 offset = bo->mem.start << PAGE_SHIFT;
544 /* this should not happen */ 544 /* this should not happen */
545 if ((offset + size) > rdev->mc.visible_vram_size) 545 if ((offset + size) > rdev->mc.visible_vram_size)
546 return -EINVAL; 546 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 84c53e41a88f..0921910698d4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
152 man->default_caching = TTM_PL_FLAG_CACHED; 152 man->default_caching = TTM_PL_FLAG_CACHED;
153 break; 153 break;
154 case TTM_PL_TT: 154 case TTM_PL_TT:
155 man->func = &ttm_bo_manager_func;
155 man->gpu_offset = rdev->mc.gtt_start; 156 man->gpu_offset = rdev->mc.gtt_start;
156 man->available_caching = TTM_PL_MASK_CACHING; 157 man->available_caching = TTM_PL_MASK_CACHING;
157 man->default_caching = TTM_PL_FLAG_CACHED; 158 man->default_caching = TTM_PL_FLAG_CACHED;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
173 break; 174 break;
174 case TTM_PL_VRAM: 175 case TTM_PL_VRAM:
175 /* "On-card" video ram */ 176 /* "On-card" video ram */
177 man->func = &ttm_bo_manager_func;
176 man->gpu_offset = rdev->mc.vram_start; 178 man->gpu_offset = rdev->mc.vram_start;
177 man->flags = TTM_MEMTYPE_FLAG_FIXED | 179 man->flags = TTM_MEMTYPE_FLAG_FIXED |
178 TTM_MEMTYPE_FLAG_MAPPABLE; 180 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
246 if (unlikely(r)) { 248 if (unlikely(r)) {
247 return r; 249 return r;
248 } 250 }
249 old_start = old_mem->mm_node->start << PAGE_SHIFT; 251 old_start = old_mem->start << PAGE_SHIFT;
250 new_start = new_mem->mm_node->start << PAGE_SHIFT; 252 new_start = new_mem->start << PAGE_SHIFT;
251 253
252 switch (old_mem->mem_type) { 254 switch (old_mem->mem_type) {
253 case TTM_PL_VRAM: 255 case TTM_PL_VRAM:
@@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
326 } 328 }
327 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 329 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
328out_cleanup: 330out_cleanup:
329 if (tmp_mem.mm_node) { 331 ttm_bo_mem_put(bo, &tmp_mem);
330 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
331
332 spin_lock(&glob->lru_lock);
333 drm_mm_put_block(tmp_mem.mm_node);
334 spin_unlock(&glob->lru_lock);
335 return r;
336 }
337 return r; 332 return r;
338} 333}
339 334
@@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
372 goto out_cleanup; 367 goto out_cleanup;
373 } 368 }
374out_cleanup: 369out_cleanup:
375 if (tmp_mem.mm_node) { 370 ttm_bo_mem_put(bo, &tmp_mem);
376 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
377
378 spin_lock(&glob->lru_lock);
379 drm_mm_put_block(tmp_mem.mm_node);
380 spin_unlock(&glob->lru_lock);
381 return r;
382 }
383 return r; 371 return r;
384} 372}
385 373
@@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
449#if __OS_HAS_AGP 437#if __OS_HAS_AGP
450 if (rdev->flags & RADEON_IS_AGP) { 438 if (rdev->flags & RADEON_IS_AGP) {
451 /* RADEON_IS_AGP is set only if AGP is active */ 439 /* RADEON_IS_AGP is set only if AGP is active */
452 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 440 mem->bus.offset = mem->start << PAGE_SHIFT;
453 mem->bus.base = rdev->mc.agp_base; 441 mem->bus.base = rdev->mc.agp_base;
454 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; 442 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
455 } 443 }
456#endif 444#endif
457 break; 445 break;
458 case TTM_PL_VRAM: 446 case TTM_PL_VRAM:
459 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 447 mem->bus.offset = mem->start << PAGE_SHIFT;
460 /* check if it's visible */ 448 /* check if it's visible */
461 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) 449 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
462 return -EINVAL; 450 return -EINVAL;
@@ -699,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
699 int r; 687 int r;
700 688
701 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
702 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
703 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
704 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
705 } 693 }
@@ -798,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
798 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 786 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
799 radeon_mem_types_list[i].driver_features = 0; 787 radeon_mem_types_list[i].driver_features = 0;
800 if (i == 0) 788 if (i == 0)
801 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; 789 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
802 else 790 else
803 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; 791 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
804 792
805 } 793 }
806 /* Add ttm page pool to debugfs */ 794 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfafe..f3cf6f02c997 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o
8 9
9obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c404491..f999e36f30b4 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{ 74{
75 struct ttm_agp_backend *agp_be = 75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend); 76 container_of(backend, struct ttm_agp_backend, backend);
77 struct drm_mm_node *node = bo_mem->mm_node;
77 struct agp_memory *mem = agp_be->mem; 78 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); 79 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret; 80 int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81 mem->is_flushed = 1; 82 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 83 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83 84
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start); 85 ret = agp_bind_memory(mem, node->start);
85 if (ret) 86 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); 87 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87 88
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb4cf7ef4d1e..af7b57a47fbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
84 man->available_caching); 84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching); 86 man->default_caching);
87 if (mem_type != TTM_PL_SYSTEM) { 87 if (mem_type != TTM_PL_SYSTEM)
88 spin_lock(&bdev->glob->lru_lock); 88 (*man->func->debug)(man, TTM_PFX);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
92} 89}
93 90
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 91static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -421,7 +418,7 @@ moved:
421 418
422 if (bo->mem.mm_node) { 419 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock); 420 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + 421 bo->offset = (bo->mem.start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset; 422 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement; 423 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock); 424 spin_unlock(&bo->lock);
@@ -475,11 +472,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
475 list_del_init(&bo->ddestroy); 472 list_del_init(&bo->ddestroy);
476 ++put_count; 473 ++put_count;
477 } 474 }
478 if (bo->mem.mm_node) {
479 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL;
481 }
482 spin_unlock(&glob->lru_lock); 475 spin_unlock(&glob->lru_lock);
476 ttm_bo_mem_put(bo, &bo->mem);
483 477
484 atomic_set(&bo->reserved, 0); 478 atomic_set(&bo->reserved, 0);
485 479
@@ -621,7 +615,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
621 bool no_wait_reserve, bool no_wait_gpu) 615 bool no_wait_reserve, bool no_wait_gpu)
622{ 616{
623 struct ttm_bo_device *bdev = bo->bdev; 617 struct ttm_bo_device *bdev = bo->bdev;
624 struct ttm_bo_global *glob = bo->glob;
625 struct ttm_mem_reg evict_mem; 618 struct ttm_mem_reg evict_mem;
626 struct ttm_placement placement; 619 struct ttm_placement placement;
627 int ret = 0; 620 int ret = 0;
@@ -667,12 +660,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
667 if (ret) { 660 if (ret) {
668 if (ret != -ERESTARTSYS) 661 if (ret != -ERESTARTSYS)
669 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 662 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
670 spin_lock(&glob->lru_lock); 663 ttm_bo_mem_put(bo, &evict_mem);
671 if (evict_mem.mm_node) {
672 drm_mm_put_block(evict_mem.mm_node);
673 evict_mem.mm_node = NULL;
674 }
675 spin_unlock(&glob->lru_lock);
676 goto out; 664 goto out;
677 } 665 }
678 bo->evicted = true; 666 bo->evicted = true;
@@ -733,41 +721,14 @@ retry:
733 return ret; 721 return ret;
734} 722}
735 723
736static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, 724void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
737 struct ttm_mem_type_manager *man,
738 struct ttm_placement *placement,
739 struct ttm_mem_reg *mem,
740 struct drm_mm_node **node)
741{ 725{
742 struct ttm_bo_global *glob = bo->glob; 726 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
743 unsigned long lpfn;
744 int ret;
745 727
746 lpfn = placement->lpfn; 728 if (mem->mm_node)
747 if (!lpfn) 729 (*man->func->put_node)(man, mem);
748 lpfn = man->size;
749 *node = NULL;
750 do {
751 ret = drm_mm_pre_get(&man->manager);
752 if (unlikely(ret))
753 return ret;
754
755 spin_lock(&glob->lru_lock);
756 *node = drm_mm_search_free_in_range(&man->manager,
757 mem->num_pages, mem->page_alignment,
758 placement->fpfn, lpfn, 1);
759 if (unlikely(*node == NULL)) {
760 spin_unlock(&glob->lru_lock);
761 return 0;
762 }
763 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
764 mem->page_alignment,
765 placement->fpfn,
766 lpfn);
767 spin_unlock(&glob->lru_lock);
768 } while (*node == NULL);
769 return 0;
770} 730}
731EXPORT_SYMBOL(ttm_bo_mem_put);
771 732
772/** 733/**
773 * Repeatedly evict memory from the LRU for @mem_type until we create enough 734 * Repeatedly evict memory from the LRU for @mem_type until we create enough
@@ -784,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 struct ttm_bo_device *bdev = bo->bdev; 745 struct ttm_bo_device *bdev = bo->bdev;
785 struct ttm_bo_global *glob = bdev->glob; 746 struct ttm_bo_global *glob = bdev->glob;
786 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 747 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
787 struct drm_mm_node *node;
788 int ret; 748 int ret;
789 749
790 do { 750 do {
791 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); 751 ret = (*man->func->get_node)(man, bo, placement, mem);
792 if (unlikely(ret != 0)) 752 if (unlikely(ret != 0))
793 return ret; 753 return ret;
794 if (node) 754 if (mem->mm_node)
795 break; 755 break;
796 spin_lock(&glob->lru_lock); 756 spin_lock(&glob->lru_lock);
797 if (list_empty(&man->lru)) { 757 if (list_empty(&man->lru)) {
@@ -804,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
804 if (unlikely(ret != 0)) 764 if (unlikely(ret != 0))
805 return ret; 765 return ret;
806 } while (1); 766 } while (1);
807 if (node == NULL) 767 if (mem->mm_node == NULL)
808 return -ENOMEM; 768 return -ENOMEM;
809 mem->mm_node = node;
810 mem->mem_type = mem_type; 769 mem->mem_type = mem_type;
811 return 0; 770 return 0;
812} 771}
@@ -880,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
880 bool type_found = false; 839 bool type_found = false;
881 bool type_ok = false; 840 bool type_ok = false;
882 bool has_erestartsys = false; 841 bool has_erestartsys = false;
883 struct drm_mm_node *node = NULL;
884 int i, ret; 842 int i, ret;
885 843
886 mem->mm_node = NULL; 844 mem->mm_node = NULL;
@@ -914,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
914 872
915 if (man->has_type && man->use_type) { 873 if (man->has_type && man->use_type) {
916 type_found = true; 874 type_found = true;
917 ret = ttm_bo_man_get_node(bo, man, placement, mem, 875 ret = (*man->func->get_node)(man, bo, placement, mem);
918 &node);
919 if (unlikely(ret)) 876 if (unlikely(ret))
920 return ret; 877 return ret;
921 } 878 }
922 if (node) 879 if (mem->mm_node)
923 break; 880 break;
924 } 881 }
925 882
926 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { 883 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
927 mem->mm_node = node;
928 mem->mem_type = mem_type; 884 mem->mem_type = mem_type;
929 mem->placement = cur_flags; 885 mem->placement = cur_flags;
930 return 0; 886 return 0;
@@ -994,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
994 bool interruptible, bool no_wait_reserve, 950 bool interruptible, bool no_wait_reserve,
995 bool no_wait_gpu) 951 bool no_wait_gpu)
996{ 952{
997 struct ttm_bo_global *glob = bo->glob;
998 int ret = 0; 953 int ret = 0;
999 struct ttm_mem_reg mem; 954 struct ttm_mem_reg mem;
1000 955
@@ -1022,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1022 goto out_unlock; 977 goto out_unlock;
1023 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 978 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1024out_unlock: 979out_unlock:
1025 if (ret && mem.mm_node) { 980 if (ret && mem.mm_node)
1026 spin_lock(&glob->lru_lock); 981 ttm_bo_mem_put(bo, &mem);
1027 drm_mm_put_block(mem.mm_node);
1028 spin_unlock(&glob->lru_lock);
1029 }
1030 return ret; 982 return ret;
1031} 983}
1032 984
@@ -1034,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1034 struct ttm_mem_reg *mem) 986 struct ttm_mem_reg *mem)
1035{ 987{
1036 int i; 988 int i;
1037 struct drm_mm_node *node = mem->mm_node;
1038 989
1039 if (node && placement->lpfn != 0 && 990 if (mem->mm_node && placement->lpfn != 0 &&
1040 (node->start < placement->fpfn || 991 (mem->start < placement->fpfn ||
1041 node->start + node->size > placement->lpfn)) 992 mem->start + mem->num_pages > placement->lpfn))
1042 return -1; 993 return -1;
1043 994
1044 for (i = 0; i < placement->num_placement; i++) { 995 for (i = 0; i < placement->num_placement; i++) {
@@ -1282,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1282 1233
1283int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1234int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1284{ 1235{
1285 struct ttm_bo_global *glob = bdev->glob;
1286 struct ttm_mem_type_manager *man; 1236 struct ttm_mem_type_manager *man;
1287 int ret = -EINVAL; 1237 int ret = -EINVAL;
1288 1238
@@ -1305,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1305 if (mem_type > 0) { 1255 if (mem_type > 0) {
1306 ttm_bo_force_list_clean(bdev, mem_type, false); 1256 ttm_bo_force_list_clean(bdev, mem_type, false);
1307 1257
1308 spin_lock(&glob->lru_lock); 1258 ret = (*man->func->takedown)(man);
1309 if (drm_mm_clean(&man->manager))
1310 drm_mm_takedown(&man->manager);
1311 else
1312 ret = -EBUSY;
1313
1314 spin_unlock(&glob->lru_lock);
1315 } 1259 }
1316 1260
1317 return ret; 1261 return ret;
@@ -1362,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1362 ret = bdev->driver->init_mem_type(bdev, type, man); 1306 ret = bdev->driver->init_mem_type(bdev, type, man);
1363 if (ret) 1307 if (ret)
1364 return ret; 1308 return ret;
1309 man->bdev = bdev;
1365 1310
1366 ret = 0; 1311 ret = 0;
1367 if (type != TTM_PL_SYSTEM) { 1312 if (type != TTM_PL_SYSTEM) {
@@ -1371,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1371 type); 1316 type);
1372 return ret; 1317 return ret;
1373 } 1318 }
1374 ret = drm_mm_init(&man->manager, 0, p_size); 1319
1320 ret = (*man->func->init)(man, p_size);
1375 if (ret) 1321 if (ret)
1376 return ret; 1322 return ret;
1377 } 1323 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 000000000000..7410c190c891
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem)
45{
46 struct ttm_bo_global *glob = man->bdev->glob;
47 struct drm_mm *mm = man->priv;
48 struct drm_mm_node *node = NULL;
49 unsigned long lpfn;
50 int ret;
51
52 lpfn = placement->lpfn;
53 if (!lpfn)
54 lpfn = man->size;
55 do {
56 ret = drm_mm_pre_get(mm);
57 if (unlikely(ret))
58 return ret;
59
60 spin_lock(&glob->lru_lock);
61 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock);
66 return 0;
67 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment,
70 placement->fpfn,
71 lpfn);
72 spin_unlock(&glob->lru_lock);
73 } while (node == NULL);
74
75 mem->mm_node = node;
76 mem->start = node->start;
77 return 0;
78}
79
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem)
82{
83 struct ttm_bo_global *glob = man->bdev->glob;
84
85 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock);
87 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock);
89 mem->mm_node = NULL;
90 }
91}
92
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size)
95{
96 struct drm_mm *mm;
97 int ret;
98
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
100 if (!mm)
101 return -ENOMEM;
102
103 ret = drm_mm_init(mm, 0, p_size);
104 if (ret) {
105 kfree(mm);
106 return ret;
107 }
108
109 man->priv = mm;
110 return 0;
111}
112
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{
115 struct ttm_bo_global *glob = man->bdev->glob;
116 struct drm_mm *mm = man->priv;
117 int ret = 0;
118
119 spin_lock(&glob->lru_lock);
120 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm);
122 kfree(mm);
123 man->priv = NULL;
124 } else
125 ret = -EBUSY;
126 spin_unlock(&glob->lru_lock);
127 return ret;
128}
129
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix)
132{
133 struct ttm_bo_global *glob = man->bdev->glob;
134 struct drm_mm *mm = man->priv;
135
136 spin_lock(&glob->lru_lock);
137 drm_mm_debug_table(mm, prefix);
138 spin_unlock(&glob->lru_lock);
139}
140
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
142 ttm_bo_man_init,
143 ttm_bo_man_takedown,
144 ttm_bo_man_get_node,
145 ttm_bo_man_put_node,
146 ttm_bo_man_debug
147};
148EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3451a82adba7..ff358ad45aa3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -39,14 +39,7 @@
39 39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{ 41{
42 struct ttm_mem_reg *old_mem = &bo->mem; 42 ttm_bo_mem_put(bo, &bo->mem);
43
44 if (old_mem->mm_node) {
45 spin_lock(&bo->glob->lru_lock);
46 drm_mm_put_block(old_mem->mm_node);
47 spin_unlock(&bo->glob->lru_lock);
48 }
49 old_mem->mm_node = NULL;
50} 43}
51 44
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
263 dir = 1; 256 dir = 1;
264 257
265 if ((old_mem->mem_type == new_mem->mem_type) && 258 if ((old_mem->mem_type == new_mem->mem_type) &&
266 (new_mem->mm_node->start < 259 (new_mem->start < old_mem->start + old_mem->size)) {
267 old_mem->mm_node->start + old_mem->mm_node->size)) {
268 dir = -1; 260 dir = -1;
269 add = new_mem->num_pages - 1; 261 add = new_mem->num_pages - 1;
270 } 262 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c4f5114aee7c..1b3bd8c6c67e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
147 break; 147 break;
148 case TTM_PL_VRAM: 148 case TTM_PL_VRAM:
149 /* "On-card" video ram */ 149 /* "On-card" video ram */
150 man->func = &ttm_bo_manager_func;
150 man->gpu_offset = 0; 151 man->gpu_offset = 0;
151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 152 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
152 man->available_caching = TTM_PL_MASK_CACHING; 153 man->available_caching = TTM_PL_MASK_CACHING;
@@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
203 /* System memory */ 204 /* System memory */
204 return 0; 205 return 0;
205 case TTM_PL_VRAM: 206 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 207 mem->bus.offset = mem->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start; 208 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true; 209 mem->bus.is_iomem = true;
209 break; 210 break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b27a9f2887d2..e7304188a784 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -612,6 +612,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
612{ 612{
613 struct ttm_buffer_object *bo = &vmw_bo->base; 613 struct ttm_buffer_object *bo = &vmw_bo->base;
614 struct ttm_placement ne_placement = vmw_vram_ne_placement; 614 struct ttm_placement ne_placement = vmw_vram_ne_placement;
615 struct drm_mm_node *mm_node;
615 int ret = 0; 616 int ret = 0;
616 617
617 ne_placement.lpfn = bo->num_pages; 618 ne_placement.lpfn = bo->num_pages;
@@ -625,8 +626,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
625 if (unlikely(ret != 0)) 626 if (unlikely(ret != 0))
626 goto err_unlock; 627 goto err_unlock;
627 628
629 mm_node = bo->mem.mm_node;
628 if (bo->mem.mem_type == TTM_PL_VRAM && 630 if (bo->mem.mem_type == TTM_PL_VRAM &&
629 bo->mem.mm_node->start < bo->num_pages) 631 mm_node->start < bo->num_pages)
630 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 632 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
631 false, false); 633 false, false);
632 634