aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-02-17 02:43:07 -0500
committerDave Airlie <airlied@redhat.com>2017-02-17 02:43:07 -0500
commit8fd4a62d875945fc8aacdb92fdc90161ec1d9bae (patch)
tree77e5839a90af73cd9c36d48b7d529018f6b5efcb
parent9ca70356a9260403c1bda40d942935e55d00c11c (diff)
parenteb875d87d9ef87cb5565e2e1c4c720e9d4ee591c (diff)
Merge branch 'linux-4.11' of git://github.com/skeggsb/linux into drm-next
- Rework of the secure boot code, in preparation for GP10x secure boot. - Improvements to channel recovery - Initial power budget code - Some preparation for an upcoming MMU rework (probably 4.12) - Misc other fixes. * 'linux-4.11' of git://github.com/skeggsb/linux: (88 commits) drm/nouveau/tmr: provide backtrace when a timeout is hit drm/nouveau/pci/g92: Fix rearm drm/nouveau/drm/therm/fan: add a fallback if no fan control is specified in the vbios drm/nouveau/hwmon: expose power_max and power_crit drm/nouveau/iccsense: Parse max and crit power level drm/nouveau/bios/power_budget: Add basic power budget parsing drm/nouveau/fifo/gk104-: preempt recovery drm/nouveau/fifo/gk104-: trigger mmu fault before attempting engine recovery drm/nouveau/fifo/gk104-: ACK SCHED_ERROR before attempting CTXSW_TIMEOUT recovery drm/nouveau/fifo/gk104-: directly use new recovery code for ctxsw timeout drm/nouveau/fifo/gk104-: directly use new recovery code for mmu faults drm/nouveau/fifo/gk104-: reset all engines a killed channel is still active on drm/nouveau/fifo/gk104-: refactor recovery code drm/nouveau/fifo/gk104-: better detection of chid when parsing engine status drm/nouveau/fifo/gk104-: separate out engine status parsing drm/nouveau/fifo: add an api for initiating channel recovery drm/nouveau/top: add function to translate subdev index to mmu fault id drm/nouveau/gr/gf100-: implement chsw_load() method drm/nouveau/gr: implement chsw_load() method drm/nouveau/core: add engine method to assist in determining chsw direction ...
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c80
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c355
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c174
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c307
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c936
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h250
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c254
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c1391
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c158
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c14
144 files changed, 4558 insertions, 3000 deletions
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index a555681c3096..90075b676256 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
198 int *burst, int *lwm) 198 int *burst, int *lwm)
199{ 199{
200 struct nouveau_drm *drm = nouveau_drm(dev); 200 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nvif_object *device = &nouveau_drm(dev)->device.object; 201 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
202 struct nv_fifo_info fifo_data; 202 struct nv_fifo_info fifo_data;
203 struct nv_sim_state sim_data; 203 struct nv_sim_state sim_data;
204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); 204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
@@ -227,7 +227,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
227 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); 227 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
228 } 228 }
229 229
230 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) 230 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
231 nv04_calc_arb(&fifo_data, &sim_data); 231 nv04_calc_arb(&fifo_data, &sim_data);
232 else 232 else
233 nv10_calc_arb(&fifo_data, &sim_data); 233 nv10_calc_arb(&fifo_data, &sim_data);
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
254{ 254{
255 struct nouveau_drm *drm = nouveau_drm(dev); 255 struct nouveau_drm *drm = nouveau_drm(dev);
256 256
257 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) 257 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
258 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a72754d73c84..ab7b69c11d40 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -113,8 +113,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
113{ 113{
114 struct drm_device *dev = crtc->dev; 114 struct drm_device *dev = crtc->dev;
115 struct nouveau_drm *drm = nouveau_drm(dev); 115 struct nouveau_drm *drm = nouveau_drm(dev);
116 struct nvkm_bios *bios = nvxx_bios(&drm->device); 116 struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
117 struct nvkm_clk *clk = nvxx_clk(&drm->device); 117 struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
118 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 118 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
119 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; 119 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
120 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; 120 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -138,7 +138,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
138 * has yet been observed in allowing the use a single stage pll on all 138 * has yet been observed in allowing the use a single stage pll on all
139 * nv43 however. the behaviour of single stage use is untested on nv40 139 * nv43 however. the behaviour of single stage use is untested on nv40
140 */ 140 */
141 if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) 141 if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
142 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); 142 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
143 143
144 144
@@ -148,10 +148,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
148 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; 148 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
149 149
150 /* The blob uses this always, so let's do the same */ 150 /* The blob uses this always, so let's do the same */
151 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 151 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
152 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; 152 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
153 /* again nv40 and some nv43 act more like nv3x as described above */ 153 /* again nv40 and some nv43 act more like nv3x as described above */
154 if (drm->device.info.chipset < 0x41) 154 if (drm->client.device.info.chipset < 0x41)
155 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | 155 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
156 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; 156 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
157 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; 157 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
@@ -270,7 +270,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
270 horizEnd = horizTotal - 2; 270 horizEnd = horizTotal - 2;
271 horizBlankEnd = horizTotal + 4; 271 horizBlankEnd = horizTotal + 4;
272#if 0 272#if 0
273 if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) 273 if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
274 /* This reportedly works around some video overlay bandwidth problems */ 274 /* This reportedly works around some video overlay bandwidth problems */
275 horizTotal += 2; 275 horizTotal += 2;
276#endif 276#endif
@@ -505,7 +505,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
505 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | 505 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
506 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | 506 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
507 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; 507 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
508 if (drm->device.info.chipset >= 0x11) 508 if (drm->client.device.info.chipset >= 0x11)
509 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; 509 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
510 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 510 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
511 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; 511 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -546,26 +546,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
546 * 1 << 30 on 0x60.830), for no apparent reason */ 546 * 1 << 30 on 0x60.830), for no apparent reason */
547 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; 547 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
548 548
549 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 549 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
550 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; 550 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
551 551
552 regp->crtc_830 = mode->crtc_vdisplay - 3; 552 regp->crtc_830 = mode->crtc_vdisplay - 3;
553 regp->crtc_834 = mode->crtc_vdisplay - 1; 553 regp->crtc_834 = mode->crtc_vdisplay - 1;
554 554
555 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 555 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
556 /* This is what the blob does */ 556 /* This is what the blob does */
557 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); 557 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
558 558
559 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 559 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
560 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 560 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
561 561
562 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) 562 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
563 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; 563 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
564 else 564 else
565 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; 565 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
566 566
567 /* Some misc regs */ 567 /* Some misc regs */
568 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { 568 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
569 regp->CRTC[NV_CIO_CRE_85] = 0xFF; 569 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
570 regp->CRTC[NV_CIO_CRE_86] = 0x1; 570 regp->CRTC[NV_CIO_CRE_86] = 0x1;
571 } 571 }
@@ -577,7 +577,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
577 577
578 /* Generic PRAMDAC regs */ 578 /* Generic PRAMDAC regs */
579 579
580 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) 580 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
581 /* Only bit that bios and blob set. */ 581 /* Only bit that bios and blob set. */
582 regp->nv10_cursync = (1 << 25); 582 regp->nv10_cursync = (1 << 25);
583 583
@@ -586,7 +586,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
586 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; 586 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
587 if (fb->format->depth == 16) 587 if (fb->format->depth == 16)
588 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 588 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
589 if (drm->device.info.chipset >= 0x11) 589 if (drm->client.device.info.chipset >= 0x11)
590 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; 590 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
591 591
592 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ 592 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -649,7 +649,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
649 649
650 nv_crtc_mode_set_vga(crtc, adjusted_mode); 650 nv_crtc_mode_set_vga(crtc, adjusted_mode);
651 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ 651 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
652 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 652 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
653 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); 653 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
654 nv_crtc_mode_set_regs(crtc, adjusted_mode); 654 nv_crtc_mode_set_regs(crtc, adjusted_mode);
655 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); 655 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
@@ -710,7 +710,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
710 710
711 /* Some more preparation. */ 711 /* Some more preparation. */
712 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); 712 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
713 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { 713 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
714 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); 714 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
715 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); 715 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
716 } 716 }
@@ -886,7 +886,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
886 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); 886 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
887 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); 887 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
888 888
889 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { 889 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
890 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; 890 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
891 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); 891 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
892 } 892 }
@@ -967,7 +967,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
967 { 967 {
968 struct nouveau_drm *drm = nouveau_drm(dev); 968 struct nouveau_drm *drm = nouveau_drm(dev);
969 969
970 if (drm->device.info.chipset == 0x11) { 970 if (drm->client.device.info.chipset == 0x11) {
971 pixel = ((pixel & 0x000000ff) << 24) | 971 pixel = ((pixel & 0x000000ff) << 24) |
972 ((pixel & 0x0000ff00) << 8) | 972 ((pixel & 0x0000ff00) << 8) |
973 ((pixel & 0x00ff0000) >> 8) | 973 ((pixel & 0x00ff0000) >> 8) |
@@ -1008,7 +1008,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1008 if (ret) 1008 if (ret)
1009 goto out; 1009 goto out;
1010 1010
1011 if (drm->device.info.chipset >= 0x11) 1011 if (drm->client.device.info.chipset >= 0x11)
1012 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 1012 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
1013 else 1013 else
1014 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 1014 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
@@ -1124,8 +1124,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1124 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); 1124 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
1125 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1125 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1126 1126
1127 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1127 ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
1128 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); 1128 TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
1129 &nv_crtc->cursor.nvbo);
1129 if (!ret) { 1130 if (!ret) {
1130 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); 1131 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
1131 if (!ret) { 1132 if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index c83116a308a4..f26e44ea7389 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
58 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 58 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index); 59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
60} 60}
61 61
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index b6cc7766e6f7..4feab0a5419d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
66static int sample_load_twice(struct drm_device *dev, bool sense[2]) 66static int sample_load_twice(struct drm_device *dev, bool sense[2])
67{ 67{
68 struct nouveau_drm *drm = nouveau_drm(dev); 68 struct nouveau_drm *drm = nouveau_drm(dev);
69 struct nvif_object *device = &drm->device.object; 69 struct nvif_object *device = &drm->client.device.object;
70 int i; 70 int i;
71 71
72 for (i = 0; i < 2; i++) { 72 for (i = 0; i < 2; i++) {
@@ -80,19 +80,19 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
80 * use a 10ms timeout (guards against crtc being inactive, in 80 * use a 10ms timeout (guards against crtc being inactive, in
81 * which case blank state would never change) 81 * which case blank state would never change)
82 */ 82 */
83 if (nvif_msec(&drm->device, 10, 83 if (nvif_msec(&drm->client.device, 10,
84 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 84 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
85 break; 85 break;
86 ) < 0) 86 ) < 0)
87 return -EBUSY; 87 return -EBUSY;
88 88
89 if (nvif_msec(&drm->device, 10, 89 if (nvif_msec(&drm->client.device, 10,
90 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 90 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
91 break; 91 break;
92 ) < 0) 92 ) < 0)
93 return -EBUSY; 93 return -EBUSY;
94 94
95 if (nvif_msec(&drm->device, 10, 95 if (nvif_msec(&drm->client.device, 10,
96 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 96 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
97 break; 97 break;
98 ) < 0) 98 ) < 0)
@@ -133,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
133 struct drm_connector *connector) 133 struct drm_connector *connector)
134{ 134{
135 struct drm_device *dev = encoder->dev; 135 struct drm_device *dev = encoder->dev;
136 struct nvif_object *device = &nouveau_drm(dev)->device.object; 136 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
137 struct nouveau_drm *drm = nouveau_drm(dev); 137 struct nouveau_drm *drm = nouveau_drm(dev);
138 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 138 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
139 uint8_t saved_palette0[3], saved_palette_mask; 139 uint8_t saved_palette0[3], saved_palette_mask;
@@ -236,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
236{ 236{
237 struct drm_device *dev = encoder->dev; 237 struct drm_device *dev = encoder->dev;
238 struct nouveau_drm *drm = nouveau_drm(dev); 238 struct nouveau_drm *drm = nouveau_drm(dev);
239 struct nvif_object *device = &nouveau_drm(dev)->device.object; 239 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
240 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 240 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
241 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 241 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
242 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 242 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
243 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 243 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -288,7 +288,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
288 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ 288 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
289 routput = (saved_routput & 0xfffffece) | head << 8; 289 routput = (saved_routput & 0xfffffece) | head << 8;
290 290
291 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) { 291 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
292 if (dcb->type == DCB_OUTPUT_TV) 292 if (dcb->type == DCB_OUTPUT_TV)
293 routput |= 0x1a << 16; 293 routput |= 0x1a << 16;
294 else 294 else
@@ -403,7 +403,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
403 } 403 }
404 404
405 /* This could use refinement for flatpanels, but it should work this way */ 405 /* This could use refinement for flatpanels, but it should work this way */
406 if (drm->device.info.chipset < 0x44) 406 if (drm->client.device.info.chipset < 0x44)
407 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 407 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
408 else 408 else
409 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 409 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 2e5bb2afda7c..9805d2cdc1a1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
281 struct drm_display_mode *adjusted_mode) 281 struct drm_display_mode *adjusted_mode)
282{ 282{
283 struct drm_device *dev = encoder->dev; 283 struct drm_device *dev = encoder->dev;
284 struct nvif_object *device = &nouveau_drm(dev)->device.object; 284 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
285 struct nouveau_drm *drm = nouveau_drm(dev); 285 struct nouveau_drm *drm = nouveau_drm(dev);
286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -417,7 +417,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
417 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || 417 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
418 (nv_connector->dithering_mode == DITHERING_MODE_AUTO && 418 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
419 fb->format->depth > connector->display_info.bpc * 3)) { 419 fb->format->depth > connector->display_info.bpc * 3)) {
420 if (drm->device.info.chipset == 0x11) 420 if (drm->client.device.info.chipset == 0x11)
421 regp->dither = savep->dither | 0x00010000; 421 regp->dither = savep->dither | 0x00010000;
422 else { 422 else {
423 int i; 423 int i;
@@ -428,7 +428,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
428 } 428 }
429 } 429 }
430 } else { 430 } else {
431 if (drm->device.info.chipset != 0x11) { 431 if (drm->client.device.info.chipset != 0x11) {
432 /* reset them */ 432 /* reset them */
433 int i; 433 int i;
434 for (i = 0; i < 3; i++) { 434 for (i = 0; i < 3; i++) {
@@ -464,7 +464,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
464 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); 464 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
465 465
466 /* This could use refinement for flatpanels, but it should work this way */ 466 /* This could use refinement for flatpanels, but it should work this way */
467 if (drm->device.info.chipset < 0x44) 467 if (drm->client.device.info.chipset < 0x44)
468 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 468 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
469 else 469 else
470 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 470 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -486,7 +486,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
486{ 486{
487#ifdef __powerpc__ 487#ifdef __powerpc__
488 struct drm_device *dev = encoder->dev; 488 struct drm_device *dev = encoder->dev;
489 struct nvif_object *device = &nouveau_drm(dev)->device.object; 489 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
490 490
491 /* BIOS scripts usually take care of the backlight, thanks 491 /* BIOS scripts usually take care of the backlight, thanks
492 * Apple for your consistency. 492 * Apple for your consistency.
@@ -624,7 +624,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
624 struct drm_device *dev = encoder->dev; 624 struct drm_device *dev = encoder->dev;
625 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 625 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
626 struct nouveau_drm *drm = nouveau_drm(dev); 626 struct nouveau_drm *drm = nouveau_drm(dev);
627 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 627 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
628 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI); 628 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
629 struct nvkm_i2c_bus_probe info[] = { 629 struct nvkm_i2c_bus_probe info[] = {
630 { 630 {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 34c0f2f67548..5b9d549aa791 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -35,7 +35,7 @@ int
35nv04_display_create(struct drm_device *dev) 35nv04_display_create(struct drm_device *dev)
36{ 36{
37 struct nouveau_drm *drm = nouveau_drm(dev); 37 struct nouveau_drm *drm = nouveau_drm(dev);
38 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 38 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
39 struct dcb_table *dcb = &drm->vbios.dcb; 39 struct dcb_table *dcb = &drm->vbios.dcb;
40 struct drm_connector *connector, *ct; 40 struct drm_connector *connector, *ct;
41 struct drm_encoder *encoder; 41 struct drm_encoder *encoder;
@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
48 if (!disp) 48 if (!disp)
49 return -ENOMEM; 49 return -ENOMEM;
50 50
51 nvif_object_map(&drm->device.object); 51 nvif_object_map(&drm->client.device.object);
52 52
53 nouveau_display(dev)->priv = disp; 53 nouveau_display(dev)->priv = disp;
54 nouveau_display(dev)->dtor = nv04_display_destroy; 54 nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -139,7 +139,7 @@ nv04_display_destroy(struct drm_device *dev)
139 nouveau_display(dev)->priv = NULL; 139 nouveau_display(dev)->priv = NULL;
140 kfree(disp); 140 kfree(disp);
141 141
142 nvif_object_unmap(&drm->device.object); 142 nvif_object_unmap(&drm->client.device.object);
143} 143}
144 144
145int 145int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 7030307d2d48..bea4543554ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -129,7 +129,7 @@ nv_two_heads(struct drm_device *dev)
129 struct nouveau_drm *drm = nouveau_drm(dev); 129 struct nouveau_drm *drm = nouveau_drm(dev);
130 const int impl = dev->pdev->device & 0x0ff0; 130 const int impl = dev->pdev->device & 0x0ff0;
131 131
132 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && 132 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) 133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
134 return true; 134 return true;
135 135
@@ -148,7 +148,7 @@ nv_two_reg_pll(struct drm_device *dev)
148 struct nouveau_drm *drm = nouveau_drm(dev); 148 struct nouveau_drm *drm = nouveau_drm(dev);
149 const int impl = dev->pdev->device & 0x0ff0; 149 const int impl = dev->pdev->device & 0x0ff0;
150 150
151 if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) 151 if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
152 return true; 152 return true;
153 return false; 153 return false;
154} 154}
@@ -170,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
170 struct dcb_output *outp, int crtc) 170 struct dcb_output *outp, int crtc)
171{ 171{
172 struct nouveau_drm *drm = nouveau_drm(dev); 172 struct nouveau_drm *drm = nouveau_drm(dev);
173 struct nvkm_bios *bios = nvxx_bios(&drm->device); 173 struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
174 struct nvbios_init init = { 174 struct nvbios_init init = {
175 .subdev = &bios->subdev, 175 .subdev = &bios->subdev,
176 .bios = bios, 176 .bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..48ad4be28867 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -89,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
89 if (owner == 1) 89 if (owner == 1)
90 owner *= 3; 90 owner *= 3;
91 91
92 if (drm->device.info.chipset == 0x11) { 92 if (drm->client.device.info.chipset == 0x11) {
93 /* This might seem stupid, but the blob does it and 93 /* This might seem stupid, but the blob does it and
94 * omitting it often locks the system up. 94 * omitting it often locks the system up.
95 */ 95 */
@@ -100,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
100 /* CR44 is always changed on CRTC0 */ 100 /* CR44 is always changed on CRTC0 */
101 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); 101 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
102 102
103 if (drm->device.info.chipset == 0x11) { /* set me harder */ 103 if (drm->client.device.info.chipset == 0x11) { /* set me harder */
104 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 104 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
105 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 105 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
106 } 106 }
@@ -149,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
149 pllvals->NM1 = pll1 & 0xffff; 149 pllvals->NM1 = pll1 & 0xffff;
150 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) 150 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
151 pllvals->NM2 = pll2 & 0xffff; 151 pllvals->NM2 = pll2 & 0xffff;
152 else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) { 152 else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) {
153 pllvals->M1 &= 0xf; /* only 4 bits */ 153 pllvals->M1 &= 0xf; /* only 4 bits */
154 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { 154 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
155 pllvals->M2 = (pll1 >> 4) & 0x7; 155 pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
165 struct nvkm_pll_vals *pllvals) 165 struct nvkm_pll_vals *pllvals)
166{ 166{
167 struct nouveau_drm *drm = nouveau_drm(dev); 167 struct nouveau_drm *drm = nouveau_drm(dev);
168 struct nvif_object *device = &drm->device.object; 168 struct nvif_object *device = &drm->client.device.object;
169 struct nvkm_bios *bios = nvxx_bios(&drm->device); 169 struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
170 uint32_t reg1, pll1, pll2 = 0; 170 uint32_t reg1, pll1, pll2 = 0;
171 struct nvbios_pll pll_lim; 171 struct nvbios_pll pll_lim;
172 int ret; 172 int ret;
@@ -184,7 +184,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
184 pll2 = nvif_rd32(device, reg2); 184 pll2 = nvif_rd32(device, reg2);
185 } 185 }
186 186
187 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { 187 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
188 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); 188 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
189 189
190 /* check whether vpll has been forced into single stage mode */ 190 /* check whether vpll has been forced into single stage mode */
@@ -252,7 +252,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
252 */ 252 */
253 253
254 struct nouveau_drm *drm = nouveau_drm(dev); 254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nvif_device *device = &drm->device; 255 struct nvif_device *device = &drm->client.device;
256 struct nvkm_clk *clk = nvxx_clk(device); 256 struct nvkm_clk *clk = nvxx_clk(device);
257 struct nvkm_bios *bios = nvxx_bios(device); 257 struct nvkm_bios *bios = nvxx_bios(device);
258 struct nvbios_pll pll_lim; 258 struct nvbios_pll pll_lim;
@@ -391,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
391 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 391 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
392 int i; 392 int i;
393 393
394 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) 394 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
395 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); 395 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
396 396
397 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals); 397 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
398 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); 398 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
399 if (nv_two_heads(dev)) 399 if (nv_two_heads(dev))
400 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 400 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
401 if (drm->device.info.chipset == 0x11) 401 if (drm->client.device.info.chipset == 0x11)
402 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); 402 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
403 403
404 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); 404 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
405 405
406 if (nv_gf4_disp_arch(dev)) 406 if (nv_gf4_disp_arch(dev))
407 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); 407 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
408 if (drm->device.info.chipset >= 0x30) 408 if (drm->client.device.info.chipset >= 0x30)
409 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); 409 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
410 410
411 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); 411 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -447,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
447 if (nv_gf4_disp_arch(dev)) 447 if (nv_gf4_disp_arch(dev))
448 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); 448 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
449 449
450 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { 450 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
451 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); 451 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
452 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); 452 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
453 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); 453 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -463,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
463 struct nv04_mode_state *state) 463 struct nv04_mode_state *state)
464{ 464{
465 struct nouveau_drm *drm = nouveau_drm(dev); 465 struct nouveau_drm *drm = nouveau_drm(dev);
466 struct nvkm_clk *clk = nvxx_clk(&drm->device); 466 struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
467 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 467 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
468 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 468 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
469 int i; 469 int i;
470 470
471 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) 471 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
472 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); 472 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
473 473
474 clk->pll_prog(clk, pllreg, &regp->pllvals); 474 clk->pll_prog(clk, pllreg, &regp->pllvals);
475 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); 475 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
476 if (nv_two_heads(dev)) 476 if (nv_two_heads(dev))
477 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); 477 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
478 if (drm->device.info.chipset == 0x11) 478 if (drm->client.device.info.chipset == 0x11)
479 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); 479 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
480 480
481 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); 481 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
482 482
483 if (nv_gf4_disp_arch(dev)) 483 if (nv_gf4_disp_arch(dev))
484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); 484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
485 if (drm->device.info.chipset >= 0x30) 485 if (drm->client.device.info.chipset >= 0x30)
486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
487 487
488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -519,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
519 if (nv_gf4_disp_arch(dev)) 519 if (nv_gf4_disp_arch(dev))
520 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); 520 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
521 521
522 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { 522 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); 523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
524 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); 524 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -600,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
600 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 600 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
601 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); 601 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
602 602
603 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) 603 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
604 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); 604 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
605 605
606 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 606 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
607 rd_cio_state(dev, head, regp, 0x9f); 607 rd_cio_state(dev, head, regp, 0x9f);
608 608
609 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); 609 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -612,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
612 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 612 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
613 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 613 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
614 614
615 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 615 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
616 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); 616 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
617 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); 617 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
618 618
619 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 619 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
620 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); 620 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
621 621
622 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 622 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
623 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); 623 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
624 624
625 if (nv_two_heads(dev)) 625 if (nv_two_heads(dev))
@@ -631,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
631 631
632 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 632 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
633 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 633 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
634 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 634 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
635 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
636 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 636 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
637 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -660,12 +660,12 @@ nv_load_state_ext(struct drm_device *dev, int head,
660 struct nv04_mode_state *state) 660 struct nv04_mode_state *state)
661{ 661{
662 struct nouveau_drm *drm = nouveau_drm(dev); 662 struct nouveau_drm *drm = nouveau_drm(dev);
663 struct nvif_object *device = &drm->device.object; 663 struct nvif_object *device = &drm->client.device.object;
664 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 664 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
665 uint32_t reg900; 665 uint32_t reg900;
666 int i; 666 int i;
667 667
668 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 668 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
669 if (nv_two_heads(dev)) 669 if (nv_two_heads(dev))
670 /* setting ENGINE_CTRL (EC) *must* come before 670 /* setting ENGINE_CTRL (EC) *must* come before
671 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in 671 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -677,20 +677,20 @@ nv_load_state_ext(struct drm_device *dev, int head,
677 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); 677 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
678 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 678 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
680 nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1); 680 nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1);
681 nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1); 681 nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1);
682 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1); 682 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1);
683 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1); 683 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1);
684 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); 684 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
685 685
686 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 686 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
687 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); 687 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
688 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); 688 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
689 689
690 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 690 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
691 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); 691 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
692 692
693 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { 693 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
694 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 694 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
695 695
696 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 696 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -713,23 +713,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
713 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); 713 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
714 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 714 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
715 715
716 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) 716 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
717 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); 717 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
718 718
719 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) 719 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
720 wr_cio_state(dev, head, regp, 0x9f); 720 wr_cio_state(dev, head, regp, 0x9f);
721 721
722 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); 722 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
723 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 723 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
724 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 724 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
725 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
726 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 726 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
727 nv_fix_nv40_hw_cursor(dev, head); 727 nv_fix_nv40_hw_cursor(dev, head);
728 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 728 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
729 729
730 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 730 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
731 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
732 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 732 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
733 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 733 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
734 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
735 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); 735 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -737,14 +737,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
737 } 737 }
738 /* NV11 and NV20 stop at 0x52. */ 738 /* NV11 and NV20 stop at 0x52. */
739 if (nv_gf4_disp_arch(dev)) { 739 if (nv_gf4_disp_arch(dev)) {
740 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { 740 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
741 /* Not waiting for vertical retrace before modifying 741 /* Not waiting for vertical retrace before modifying
742 CRE_53/CRE_54 causes lockups. */ 742 CRE_53/CRE_54 causes lockups. */
743 nvif_msec(&drm->device, 650, 743 nvif_msec(&drm->client.device, 650,
744 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) 744 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
745 break; 745 break;
746 ); 746 );
747 nvif_msec(&drm->device, 650, 747 nvif_msec(&drm->client.device, 650,
748 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) 748 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
749 break; 749 break;
750 ); 750 );
@@ -770,7 +770,7 @@ static void
770nv_save_state_palette(struct drm_device *dev, int head, 770nv_save_state_palette(struct drm_device *dev, int head,
771 struct nv04_mode_state *state) 771 struct nv04_mode_state *state)
772{ 772{
773 struct nvif_object *device = &nouveau_drm(dev)->device.object; 773 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
774 int head_offset = head * NV_PRMDIO_SIZE, i; 774 int head_offset = head * NV_PRMDIO_SIZE, i;
775 775
776 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 776 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -789,7 +789,7 @@ void
789nouveau_hw_load_state_palette(struct drm_device *dev, int head, 789nouveau_hw_load_state_palette(struct drm_device *dev, int head,
790 struct nv04_mode_state *state) 790 struct nv04_mode_state *state)
791{ 791{
792 struct nvif_object *device = &nouveau_drm(dev)->device.object; 792 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
793 int head_offset = head * NV_PRMDIO_SIZE, i; 793 int head_offset = head * NV_PRMDIO_SIZE, i;
794 794
795 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 795 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -809,7 +809,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
809{ 809{
810 struct nouveau_drm *drm = nouveau_drm(dev); 810 struct nouveau_drm *drm = nouveau_drm(dev);
811 811
812 if (drm->device.info.chipset == 0x11) 812 if (drm->client.device.info.chipset == 0x11)
813 /* NB: no attempt is made to restore the bad pll later on */ 813 /* NB: no attempt is made to restore the bad pll later on */
814 nouveau_hw_fix_bad_vpll(dev, head); 814 nouveau_hw_fix_bad_vpll(dev, head);
815 nv_save_state_ramdac(dev, head, state); 815 nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 3bded60c5596..3a2be47fb4f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
60static inline uint32_t NVReadCRTC(struct drm_device *dev, 60static inline uint32_t NVReadCRTC(struct drm_device *dev,
61 int head, uint32_t reg) 61 int head, uint32_t reg)
62{ 62{
63 struct nvif_object *device = &nouveau_drm(dev)->device.object; 63 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
64 uint32_t val; 64 uint32_t val;
65 if (head) 65 if (head)
66 reg += NV_PCRTC0_SIZE; 66 reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
71static inline void NVWriteCRTC(struct drm_device *dev, 71static inline void NVWriteCRTC(struct drm_device *dev,
72 int head, uint32_t reg, uint32_t val) 72 int head, uint32_t reg, uint32_t val)
73{ 73{
74 struct nvif_object *device = &nouveau_drm(dev)->device.object; 74 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
75 if (head) 75 if (head)
76 reg += NV_PCRTC0_SIZE; 76 reg += NV_PCRTC0_SIZE;
77 nvif_wr32(device, reg, val); 77 nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
80static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 80static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
81 int head, uint32_t reg) 81 int head, uint32_t reg)
82{ 82{
83 struct nvif_object *device = &nouveau_drm(dev)->device.object; 83 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
84 uint32_t val; 84 uint32_t val;
85 if (head) 85 if (head)
86 reg += NV_PRAMDAC0_SIZE; 86 reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
91static inline void NVWriteRAMDAC(struct drm_device *dev, 91static inline void NVWriteRAMDAC(struct drm_device *dev,
92 int head, uint32_t reg, uint32_t val) 92 int head, uint32_t reg, uint32_t val)
93{ 93{
94 struct nvif_object *device = &nouveau_drm(dev)->device.object; 94 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
95 if (head) 95 if (head)
96 reg += NV_PRAMDAC0_SIZE; 96 reg += NV_PRAMDAC0_SIZE;
97 nvif_wr32(device, reg, val); 97 nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
120static inline void NVWriteVgaCrtc(struct drm_device *dev, 120static inline void NVWriteVgaCrtc(struct drm_device *dev,
121 int head, uint8_t index, uint8_t value) 121 int head, uint8_t index, uint8_t value)
122{ 122{
123 struct nvif_object *device = &nouveau_drm(dev)->device.object; 123 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
126} 126}
@@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
129 int head, uint8_t index) 129 int head, uint8_t index)
130{ 130{
131 struct nvif_object *device = &nouveau_drm(dev)->device.object; 131 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
132 uint8_t val; 132 uint8_t val;
133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,13 +165,13 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
165static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 165static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
166 int head, uint32_t reg) 166 int head, uint32_t reg)
167{ 167{
168 struct nvif_object *device = &nouveau_drm(dev)->device.object; 168 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
169 struct nouveau_drm *drm = nouveau_drm(dev); 169 struct nouveau_drm *drm = nouveau_drm(dev);
170 uint8_t val; 170 uint8_t val;
171 171
172 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 172 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
173 * NVSetOwner for the relevant head to be programmed */ 173 * NVSetOwner for the relevant head to be programmed */
174 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 174 if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
175 reg += NV_PRMVIO_SIZE; 175 reg += NV_PRMVIO_SIZE;
176 176
177 val = nvif_rd08(device, reg); 177 val = nvif_rd08(device, reg);
@@ -181,12 +181,12 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
181static inline void NVWritePRMVIO(struct drm_device *dev, 181static inline void NVWritePRMVIO(struct drm_device *dev,
182 int head, uint32_t reg, uint8_t value) 182 int head, uint32_t reg, uint8_t value)
183{ 183{
184 struct nvif_object *device = &nouveau_drm(dev)->device.object; 184 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
185 struct nouveau_drm *drm = nouveau_drm(dev); 185 struct nouveau_drm *drm = nouveau_drm(dev);
186 186
187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
188 * NVSetOwner for the relevant head to be programmed */ 188 * NVSetOwner for the relevant head to be programmed */
189 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 189 if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
190 reg += NV_PRMVIO_SIZE; 190 reg += NV_PRMVIO_SIZE;
191 191
192 nvif_wr08(device, reg, value); 192 nvif_wr08(device, reg, value);
@@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
194 194
195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
196{ 196{
197 struct nvif_object *device = &nouveau_drm(dev)->device.object; 197 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
200} 200}
201 201
202static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 202static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
203{ 203{
204 struct nvif_object *device = &nouveau_drm(dev)->device.object; 204 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
207} 207}
@@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
209static inline void NVWriteVgaAttr(struct drm_device *dev, 209static inline void NVWriteVgaAttr(struct drm_device *dev,
210 int head, uint8_t index, uint8_t value) 210 int head, uint8_t index, uint8_t value)
211{ 211{
212 struct nvif_object *device = &nouveau_drm(dev)->device.object; 212 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
213 if (NVGetEnablePalette(dev, head)) 213 if (NVGetEnablePalette(dev, head))
214 index &= ~0x20; 214 index &= ~0x20;
215 else 215 else
@@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
223static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 223static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
224 int head, uint8_t index) 224 int head, uint8_t index)
225{ 225{
226 struct nvif_object *device = &nouveau_drm(dev)->device.object; 226 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
227 uint8_t val; 227 uint8_t val;
228 if (NVGetEnablePalette(dev, head)) 228 if (NVGetEnablePalette(dev, head))
229 index &= ~0x20; 229 index &= ~0x20;
@@ -259,10 +259,10 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
259static inline bool 259static inline bool
260nv_heads_tied(struct drm_device *dev) 260nv_heads_tied(struct drm_device *dev)
261{ 261{
262 struct nvif_object *device = &nouveau_drm(dev)->device.object; 262 struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
263 struct nouveau_drm *drm = nouveau_drm(dev); 263 struct nouveau_drm *drm = nouveau_drm(dev);
264 264
265 if (drm->device.info.chipset == 0x11) 265 if (drm->client.device.info.chipset == 0x11)
266 return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); 266 return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
267 267
268 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; 268 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
318 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, 318 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
319 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); 319 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
320 /* NV11 has independently lockable extended crtcs, except when tied */ 320 /* NV11 has independently lockable extended crtcs, except when tied */
321 if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev)) 321 if (drm->client.device.info.chipset == 0x11 && !nv_heads_tied(dev))
322 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, 322 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
323 lock ? NV_CIO_SR_LOCK_VALUE : 323 lock ? NV_CIO_SR_LOCK_VALUE :
324 NV_CIO_SR_UNLOCK_RW_VALUE); 324 NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
335{ 335{
336 struct nouveau_drm *drm = nouveau_drm(dev); 336 struct nouveau_drm *drm = nouveau_drm(dev);
337 337
338 return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; 338 return drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
339} 339}
340 340
341static inline void 341static inline void
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
357 357
358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); 358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
359 359
360 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) { 360 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) {
361 /* 361 /*
362 * Hilarious, the 24th bit doesn't want to stick to 362 * Hilarious, the 24th bit doesn't want to stick to
363 * PCRTC_START... 363 * PCRTC_START...
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
382 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 382 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); 383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
384 384
385 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 385 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
386 nv_fix_nv40_hw_cursor(dev, head); 386 nv_fix_nv40_hw_cursor(dev, head);
387} 387}
388 388
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
398 bpp = 8; 398 bpp = 8;
399 399
400 /* Alignment requirements taken from the Haiku driver */ 400 /* Alignment requirements taken from the Haiku driver */
401 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) 401 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
402 mask = 128 / bpp - 1; 402 mask = 128 / bpp - 1;
403 else 403 else
404 mask = 512 / bpp - 1; 404 mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 6275c270df25..5319f2a7f24d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -97,7 +97,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
97 uint32_t src_w, uint32_t src_h) 97 uint32_t src_w, uint32_t src_h)
98{ 98{
99 struct nouveau_drm *drm = nouveau_drm(plane->dev); 99 struct nouveau_drm *drm = nouveau_drm(plane->dev);
100 struct nvif_object *dev = &drm->device.object; 100 struct nvif_object *dev = &drm->client.device.object;
101 struct nouveau_plane *nv_plane = 101 struct nouveau_plane *nv_plane =
102 container_of(plane, struct nouveau_plane, base); 102 container_of(plane, struct nouveau_plane, base);
103 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 103 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -119,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
119 if (format > 0xffff) 119 if (format > 0xffff)
120 return -ERANGE; 120 return -ERANGE;
121 121
122 if (drm->device.info.chipset >= 0x30) { 122 if (drm->client.device.info.chipset >= 0x30) {
123 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) 123 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
124 return -ERANGE; 124 return -ERANGE;
125 } else { 125 } else {
@@ -174,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
174static int 174static int
175nv10_disable_plane(struct drm_plane *plane) 175nv10_disable_plane(struct drm_plane *plane)
176{ 176{
177 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 177 struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
178 struct nouveau_plane *nv_plane = 178 struct nouveau_plane *nv_plane =
179 container_of(plane, struct nouveau_plane, base); 179 container_of(plane, struct nouveau_plane, base);
180 180
@@ -198,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
198static void 198static void
199nv10_set_params(struct nouveau_plane *plane) 199nv10_set_params(struct nouveau_plane *plane)
200{ 200{
201 struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object; 201 struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object;
202 u32 luma = (plane->brightness - 512) << 16 | plane->contrast; 202 u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
203 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | 203 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
204 (cos_mul(plane->hue, plane->saturation) & 0xffff); 204 (cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -268,7 +268,7 @@ nv10_overlay_init(struct drm_device *device)
268 if (!plane) 268 if (!plane)
269 return; 269 return;
270 270
271 switch (drm->device.info.chipset) { 271 switch (drm->client.device.info.chipset) {
272 case 0x10: 272 case 0x10:
273 case 0x11: 273 case 0x11:
274 case 0x15: 274 case 0x15:
@@ -347,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
347 uint32_t src_x, uint32_t src_y, 347 uint32_t src_x, uint32_t src_y,
348 uint32_t src_w, uint32_t src_h) 348 uint32_t src_w, uint32_t src_h)
349{ 349{
350 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 350 struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
351 struct nouveau_plane *nv_plane = 351 struct nouveau_plane *nv_plane =
352 container_of(plane, struct nouveau_plane, base); 352 container_of(plane, struct nouveau_plane, base);
353 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 353 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -427,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
427static int 427static int
428nv04_disable_plane(struct drm_plane *plane) 428nv04_disable_plane(struct drm_plane *plane)
429{ 429{
430 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 430 struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
431 struct nouveau_plane *nv_plane = 431 struct nouveau_plane *nv_plane =
432 container_of(plane, struct nouveau_plane, base); 432 container_of(plane, struct nouveau_plane, base);
433 433
@@ -495,7 +495,7 @@ err:
495void 495void
496nouveau_overlay_init(struct drm_device *device) 496nouveau_overlay_init(struct drm_device *device)
497{ 497{
498 struct nvif_device *dev = &nouveau_drm(device)->device; 498 struct nvif_device *dev = &nouveau_drm(device)->client.device;
499 if (dev->info.chipset < 0x10) 499 if (dev->info.chipset < 0x10)
500 nv04_overlay_init(device); 500 nv04_overlay_init(device);
501 else if (dev->info.chipset <= 0x40) 501 else if (dev->info.chipset <= 0x40)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 477a8d072af4..01664357d3e1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -54,7 +54,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
54int nv04_tv_identify(struct drm_device *dev, int i2c_index) 54int nv04_tv_identify(struct drm_device *dev, int i2c_index)
55{ 55{
56 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
57 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 57 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
58 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index); 58 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
59 if (bus) { 59 if (bus) {
60 return nvkm_i2c_bus_probe(bus, "TV encoder", 60 return nvkm_i2c_bus_probe(bus, "TV encoder",
@@ -206,7 +206,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
206 struct drm_encoder *encoder; 206 struct drm_encoder *encoder;
207 struct drm_device *dev = connector->dev; 207 struct drm_device *dev = connector->dev;
208 struct nouveau_drm *drm = nouveau_drm(dev); 208 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 209 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
210 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index); 210 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
211 int type, ret; 211 int type, ret;
212 212
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 434d1e29f279..6d99f11fee4e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
46{ 46{
47 struct drm_device *dev = encoder->dev; 47 struct drm_device *dev = encoder->dev;
48 struct nouveau_drm *drm = nouveau_drm(dev); 48 struct nouveau_drm *drm = nouveau_drm(dev);
49 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 49 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
50 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 50 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
51 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, 51 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
52 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; 52 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -130,7 +130,7 @@ static bool
130get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 130get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
131{ 131{
132 struct nouveau_drm *drm = nouveau_drm(dev); 132 struct nouveau_drm *drm = nouveau_drm(dev);
133 struct nvkm_device *device = nvxx_device(&drm->device); 133 struct nvkm_device *device = nvxx_device(&drm->client.device);
134 134
135 if (device->quirk && device->quirk->tv_pin_mask) { 135 if (device->quirk && device->quirk->tv_pin_mask) {
136 *pin_mask = device->quirk->tv_pin_mask; 136 *pin_mask = device->quirk->tv_pin_mask;
@@ -154,8 +154,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
154 return connector_status_disconnected; 154 return connector_status_disconnected;
155 155
156 if (reliable) { 156 if (reliable) {
157 if (drm->device.info.chipset == 0x42 || 157 if (drm->client.device.info.chipset == 0x42 ||
158 drm->device.info.chipset == 0x43) 158 drm->client.device.info.chipset == 0x43)
159 tv_enc->pin_mask = 159 tv_enc->pin_mask =
160 nv42_tv_sample_load(encoder) >> 28 & 0xe; 160 nv42_tv_sample_load(encoder) >> 28 & 0xe;
161 else 161 else
@@ -362,7 +362,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
362{ 362{
363 struct drm_device *dev = encoder->dev; 363 struct drm_device *dev = encoder->dev;
364 struct nouveau_drm *drm = nouveau_drm(dev); 364 struct nouveau_drm *drm = nouveau_drm(dev);
365 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 365 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
366 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; 366 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
367 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 367 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
368 368
@@ -435,7 +435,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
435 /* Set the DACCLK register */ 435 /* Set the DACCLK register */
436 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; 436 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
437 437
438 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) 438 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
439 dacclk |= 0x1a << 16; 439 dacclk |= 0x1a << 16;
440 440
441 if (tv_norm->kind == CTV_ENC_MODE) { 441 if (tv_norm->kind == CTV_ENC_MODE) {
@@ -492,7 +492,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
492 tv_regs->ptv_614 = 0x13; 492 tv_regs->ptv_614 = 0x13;
493 } 493 }
494 494
495 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { 495 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
496 tv_regs->ptv_500 = 0xe8e0; 496 tv_regs->ptv_500 = 0xe8e0;
497 tv_regs->ptv_504 = 0x1710; 497 tv_regs->ptv_504 = 0x1710;
498 tv_regs->ptv_604 = 0x0; 498 tv_regs->ptv_604 = 0x0;
@@ -587,7 +587,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
587 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); 587 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
588 588
589 /* This could use refinement for flatpanels, but it should work */ 589 /* This could use refinement for flatpanels, but it should work */
590 if (drm->device.info.chipset < 0x44) 590 if (drm->client.device.info.chipset < 0x44)
591 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + 591 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
592 nv04_dac_output_offset(encoder), 592 nv04_dac_output_offset(encoder),
593 0xf0000000); 593 0xf0000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 1b07521cde0d..29773b325bd9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -130,13 +130,13 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, 130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val) 131 uint32_t val)
132{ 132{
133 struct nvif_device *device = &nouveau_drm(dev)->device; 133 struct nvif_device *device = &nouveau_drm(dev)->client.device;
134 nvif_wr32(&device->object, reg, val); 134 nvif_wr32(&device->object, reg, val);
135} 135}
136 136
137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
138{ 138{
139 struct nvif_device *device = &nouveau_drm(dev)->device; 139 struct nvif_device *device = &nouveau_drm(dev)->client.device;
140 return nvif_rd32(&device->object, reg); 140 return nvif_rd32(&device->object, reg);
141} 141}
142 142
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 05e6ef7cd190..91e33db21a2f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -10,5 +10,5 @@ struct g82_channel_dma_v0 {
10 __u64 offset; 10 __u64 offset;
11}; 11};
12 12
13#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 13#define NV826E_V0_NTFY_NON_STALL_INTERRUPT 0x00
14#endif 14#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index cecafcb1e954..e34efd4ec537 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -11,5 +11,5 @@ struct g82_channel_gpfifo_v0 {
11 __u64 vm; 11 __u64 vm;
12}; 12};
13 13
14#define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 14#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
15#endif 15#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 2caf0838fcfd..a2d5410a491b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -10,5 +10,6 @@ struct fermi_channel_gpfifo_v0 {
10 __u64 vm; 10 __u64 vm;
11}; 11};
12 12
13#define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 13#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
14#define NV906F_V0_NTFY_KILLED 0x01
14#endif 15#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 46301ec018ce..2efa3d048bb9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -25,5 +25,6 @@ struct kepler_channel_gpfifo_a_v0 {
25 __u64 vm; 25 __u64 vm;
26}; 26};
27 27
28#define NVA06F_V0_NTFY_UEVENT 0x00 28#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
29#define NVA06F_V0_NTFY_KILLED 0x01
29#endif 30#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 82235f30277c..3a2c0137d4b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -2,23 +2,31 @@
2#define __NVIF_CLASS_H__ 2#define __NVIF_CLASS_H__
3 3
4/* these class numbers are made up by us, and not nvidia-assigned */ 4/* these class numbers are made up by us, and not nvidia-assigned */
5#define NVIF_CLASS_CONTROL /* if0001.h */ -1 5#define NVIF_CLASS_CLIENT /* if0000.h */ -0x00000000
6#define NVIF_CLASS_PERFMON /* if0002.h */ -2 6
7#define NVIF_CLASS_PERFDOM /* if0003.h */ -3 7#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
8#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4 8
9#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5 9#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
10#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6 10#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
11#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7 11
12#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
13#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
14#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
15#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
12 16
13/* the below match nvidia-assigned (either in hw, or sw) class numbers */ 17/* the below match nvidia-assigned (either in hw, or sw) class numbers */
18#define NV_NULL_CLASS 0x00000030
19
14#define NV_DEVICE /* cl0080.h */ 0x00000080 20#define NV_DEVICE /* cl0080.h */ 0x00000080
15 21
16#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002 22#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
17#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003 23#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
18#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d 24#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
19 25
26#define NV50_TWOD 0x0000502d
20#define FERMI_TWOD_A 0x0000902d 27#define FERMI_TWOD_A 0x0000902d
21 28
29#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
22#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 30#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
23 31
24#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 32#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
@@ -99,6 +107,12 @@
99#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e 107#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
100#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e 108#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
101 109
110#define NV50_TESLA 0x00005097
111#define G82_TESLA 0x00008297
112#define GT200_TESLA 0x00008397
113#define GT214_TESLA 0x00008597
114#define GT21A_TESLA 0x00008697
115
102#define FERMI_A /* cl9097.h */ 0x00009097 116#define FERMI_A /* cl9097.h */ 0x00009097
103#define FERMI_B /* cl9097.h */ 0x00009197 117#define FERMI_B /* cl9097.h */ 0x00009197
104#define FERMI_C /* cl9097.h */ 0x00009297 118#define FERMI_C /* cl9097.h */ 0x00009297
@@ -140,6 +154,8 @@
140 154
141#define FERMI_DECOMPRESS 0x000090b8 155#define FERMI_DECOMPRESS 0x000090b8
142 156
157#define NV50_COMPUTE 0x000050c0
158#define GT214_COMPUTE 0x000085c0
143#define FERMI_COMPUTE_A 0x000090c0 159#define FERMI_COMPUTE_A 0x000090c0
144#define FERMI_COMPUTE_B 0x000091c0 160#define FERMI_COMPUTE_B 0x000091c0
145#define KEPLER_COMPUTE_A 0x0000a0c0 161#define KEPLER_COMPUTE_A 0x0000a0c0
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 4a7f6f7b836d..b52a8eadce01 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -11,8 +11,7 @@ struct nvif_client {
11 bool super; 11 bool super;
12}; 12};
13 13
14int nvif_client_init(const char *drv, const char *name, u64 device, 14int nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
15 const char *cfg, const char *dbg,
16 struct nvif_client *); 15 struct nvif_client *);
17void nvif_client_fini(struct nvif_client *); 16void nvif_client_fini(struct nvif_client *);
18int nvif_client_ioctl(struct nvif_client *, void *, u32); 17int nvif_client_ioctl(struct nvif_client *, void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8bd39e69229c..0c6f48d8140a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -1,5 +1,7 @@
1#ifndef __NVIF_DRIVER_H__ 1#ifndef __NVIF_DRIVER_H__
2#define __NVIF_DRIVER_H__ 2#define __NVIF_DRIVER_H__
3#include <nvif/os.h>
4struct nvif_client;
3 5
4struct nvif_driver { 6struct nvif_driver {
5 const char *name; 7 const char *name;
@@ -14,9 +16,11 @@ struct nvif_driver {
14 bool keep; 16 bool keep;
15}; 17};
16 18
19int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
20 const char *name, u64 device, struct nvif_client *);
21
17extern const struct nvif_driver nvif_driver_nvkm; 22extern const struct nvif_driver nvif_driver_nvkm;
18extern const struct nvif_driver nvif_driver_drm; 23extern const struct nvif_driver nvif_driver_drm;
19extern const struct nvif_driver nvif_driver_lib; 24extern const struct nvif_driver nvif_driver_lib;
20extern const struct nvif_driver nvif_driver_null; 25extern const struct nvif_driver nvif_driver_null;
21
22#endif 26#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index 85c44e8a1201..c2c0fc41e017 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -1,9 +1,16 @@
1#ifndef __NVIF_IF0000_H__ 1#ifndef __NVIF_IF0000_H__
2#define __NVIF_IF0000_H__ 2#define __NVIF_IF0000_H__
3 3
4#define NV_CLIENT_DEVLIST 0x00 4struct nvif_client_v0 {
5 __u8 version;
6 __u8 pad01[7];
7 __u64 device;
8 char name[32];
9};
10
11#define NVIF_CLIENT_V0_DEVLIST 0x00
5 12
6struct nv_client_devlist_v0 { 13struct nvif_client_devlist_v0 {
7 __u8 version; 14 __u8 version;
8 __u8 count; 15 __u8 count;
9 __u8 pad02[6]; 16 __u8 pad02[6];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index eaf5905a87a3..e876634da10a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,5 +1,6 @@
1#ifndef __NVKM_CLIENT_H__ 1#ifndef __NVKM_CLIENT_H__
2#define __NVKM_CLIENT_H__ 2#define __NVKM_CLIENT_H__
3#define nvkm_client(p) container_of((p), struct nvkm_client, object)
3#include <core/object.h> 4#include <core/object.h>
4 5
5struct nvkm_client { 6struct nvkm_client {
@@ -8,9 +9,8 @@ struct nvkm_client {
8 u64 device; 9 u64 device;
9 u32 debug; 10 u32 debug;
10 11
11 struct nvkm_client_notify *notify[16]; 12 struct nvkm_client_notify *notify[32];
12 struct rb_root objroot; 13 struct rb_root objroot;
13 struct rb_root dmaroot;
14 14
15 bool super; 15 bool super;
16 void *data; 16 void *data;
@@ -19,15 +19,11 @@ struct nvkm_client {
19 struct nvkm_vm *vm; 19 struct nvkm_vm *vm;
20}; 20};
21 21
22bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
23void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
24struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
25
26int nvkm_client_new(const char *name, u64 device, const char *cfg, 22int nvkm_client_new(const char *name, u64 device, const char *cfg,
27 const char *dbg, struct nvkm_client **); 23 const char *dbg,
28void nvkm_client_del(struct nvkm_client **); 24 int (*)(const void *, u32, const void *, u32),
29int nvkm_client_init(struct nvkm_client *); 25 struct nvkm_client **);
30int nvkm_client_fini(struct nvkm_client *, bool suspend); 26struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
31 27
32int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, 28int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
33 void *data, u32 size); 29 void *data, u32 size);
@@ -37,8 +33,8 @@ int nvkm_client_notify_put(struct nvkm_client *, int index);
37 33
38/* logging for client-facing objects */ 34/* logging for client-facing objects */
39#define nvif_printk(o,l,p,f,a...) do { \ 35#define nvif_printk(o,l,p,f,a...) do { \
40 struct nvkm_object *_object = (o); \ 36 const struct nvkm_object *_object = (o); \
41 struct nvkm_client *_client = _object->client; \ 37 const struct nvkm_client *_client = _object->client; \
42 if (_client->debug >= NV_DBG_##l) \ 38 if (_client->debug >= NV_DBG_##l) \
43 printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \ 39 printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \
44 _object->handle, _object->oclass, ##a); \ 40 _object->handle, _object->oclass, ##a); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6bc712f32c8b..d426b86e2712 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -262,7 +262,7 @@ extern const struct nvkm_sclass nvkm_udevice_sclass;
262 262
263/* device logging */ 263/* device logging */
264#define nvdev_printk_(d,l,p,f,a...) do { \ 264#define nvdev_printk_(d,l,p,f,a...) do { \
265 struct nvkm_device *_device = (d); \ 265 const struct nvkm_device *_device = (d); \
266 if (_device->debug >= (l)) \ 266 if (_device->debug >= (l)) \
267 dev_##p(_device->dev, f, ##a); \ 267 dev_##p(_device->dev, f, ##a); \
268} while(0) 268} while(0)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 9ebfd8782366..d4cd2fbfde88 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -20,6 +20,7 @@ struct nvkm_engine_func {
20 int (*fini)(struct nvkm_engine *, bool suspend); 20 int (*fini)(struct nvkm_engine *, bool suspend);
21 void (*intr)(struct nvkm_engine *); 21 void (*intr)(struct nvkm_engine *);
22 void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *); 22 void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
23 bool (*chsw_load)(struct nvkm_engine *);
23 24
24 struct { 25 struct {
25 int (*sclass)(struct nvkm_oclass *, int index, 26 int (*sclass)(struct nvkm_oclass *, int index,
@@ -44,4 +45,5 @@ int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
44struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *); 45struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
45void nvkm_engine_unref(struct nvkm_engine **); 46void nvkm_engine_unref(struct nvkm_engine **);
46void nvkm_engine_tile(struct nvkm_engine *, int region); 47void nvkm_engine_tile(struct nvkm_engine *, int region);
48bool nvkm_engine_chsw_load(struct nvkm_engine *);
47#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 9363b839a9da..33ca6769266a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -6,9 +6,10 @@ struct nvkm_vma;
6struct nvkm_vm; 6struct nvkm_vm;
7 7
8enum nvkm_memory_target { 8enum nvkm_memory_target {
9 NVKM_MEM_TARGET_INST, 9 NVKM_MEM_TARGET_INST, /* instance memory */
10 NVKM_MEM_TARGET_VRAM, 10 NVKM_MEM_TARGET_VRAM, /* video memory */
11 NVKM_MEM_TARGET_HOST, 11 NVKM_MEM_TARGET_HOST, /* coherent system memory */
12 NVKM_MEM_TARGET_NCOH, /* non-coherent system memory */
12}; 13};
13 14
14struct nvkm_memory { 15struct nvkm_memory {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index d92fd41e4056..7bd4897a8a2a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -5,7 +5,7 @@
5struct nvkm_mm_node { 5struct nvkm_mm_node {
6 struct list_head nl_entry; 6 struct list_head nl_entry;
7 struct list_head fl_entry; 7 struct list_head fl_entry;
8 struct list_head rl_entry; 8 struct nvkm_mm_node *next;
9 9
10#define NVKM_MM_HEAP_ANY 0x00 10#define NVKM_MM_HEAP_ANY 0x00
11 u8 heap; 11 u8 heap;
@@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
38 u32 size_min, u32 align, struct nvkm_mm_node **); 38 u32 size_min, u32 align, struct nvkm_mm_node **);
39void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); 39void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
40void nvkm_mm_dump(struct nvkm_mm *, const char *); 40void nvkm_mm_dump(struct nvkm_mm *, const char *);
41
42static inline bool
43nvkm_mm_contiguous(struct nvkm_mm_node *node)
44{
45 return !node->next;
46}
41#endif 47#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index dcd048b91fac..96dda350ada3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -62,6 +62,11 @@ int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
62int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align, 62int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
63 struct nvkm_gpuobj **); 63 struct nvkm_gpuobj **);
64 64
65bool nvkm_object_insert(struct nvkm_object *);
66void nvkm_object_remove(struct nvkm_object *);
67struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
68 const struct nvkm_object_func *);
69
65struct nvkm_sclass { 70struct nvkm_sclass {
66 int minver; 71 int minver;
67 int maxver; 72 int maxver;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 57adefa8b08e..ca9ed3d68f44 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -32,7 +32,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
32 32
33/* subdev logging */ 33/* subdev logging */
34#define nvkm_printk_(s,l,p,f,a...) do { \ 34#define nvkm_printk_(s,l,p,f,a...) do { \
35 struct nvkm_subdev *_subdev = (s); \ 35 const struct nvkm_subdev *_subdev = (s); \
36 if (_subdev->debug >= (l)) { \ 36 if (_subdev->debug >= (l)) { \
37 dev_##p(_subdev->device->dev, "%s: "f, \ 37 dev_##p(_subdev->device->dev, "%s: "f, \
38 nvkm_subdev_name[_subdev->index], ##a); \ 38 nvkm_subdev_name[_subdev->index], ##a); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 114bfb737a81..d2a6532ce3b9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -12,9 +12,6 @@ struct nvkm_dmaobj {
12 u32 access; 12 u32 access;
13 u64 start; 13 u64 start;
14 u64 limit; 14 u64 limit;
15
16 struct rb_node rb;
17 u64 handle; /*XXX HANDLE MERGE */
18}; 15};
19 16
20struct nvkm_dma { 17struct nvkm_dma {
@@ -22,8 +19,7 @@ struct nvkm_dma {
22 struct nvkm_engine engine; 19 struct nvkm_engine engine;
23}; 20};
24 21
25struct nvkm_dmaobj * 22struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
26nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
27 23
28int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **); 24int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
29int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); 25int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index e6baf039c269..7e498e65b1e8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -4,13 +4,26 @@
4#include <core/engine.h> 4#include <core/engine.h>
5struct nvkm_fifo_chan; 5struct nvkm_fifo_chan;
6 6
7enum nvkm_falcon_dmaidx {
8 FALCON_DMAIDX_UCODE = 0,
9 FALCON_DMAIDX_VIRT = 1,
10 FALCON_DMAIDX_PHYS_VID = 2,
11 FALCON_DMAIDX_PHYS_SYS_COH = 3,
12 FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
13};
14
7struct nvkm_falcon { 15struct nvkm_falcon {
8 const struct nvkm_falcon_func *func; 16 const struct nvkm_falcon_func *func;
9 struct nvkm_engine engine; 17 const struct nvkm_subdev *owner;
10 18 const char *name;
11 u32 addr; 19 u32 addr;
12 u8 version; 20
13 u8 secret; 21 struct mutex mutex;
22 const struct nvkm_subdev *user;
23
24 u8 version;
25 u8 secret;
26 bool debug;
14 27
15 struct nvkm_memory *core; 28 struct nvkm_memory *core;
16 bool external; 29 bool external;
@@ -19,15 +32,25 @@ struct nvkm_falcon {
19 u32 limit; 32 u32 limit;
20 u32 *data; 33 u32 *data;
21 u32 size; 34 u32 size;
35 u8 ports;
22 } code; 36 } code;
23 37
24 struct { 38 struct {
25 u32 limit; 39 u32 limit;
26 u32 *data; 40 u32 *data;
27 u32 size; 41 u32 size;
42 u8 ports;
28 } data; 43 } data;
44
45 struct nvkm_engine engine;
29}; 46};
30 47
48int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
49 struct nvkm_falcon **);
50void nvkm_falcon_del(struct nvkm_falcon **);
51int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
52void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
53
31int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, 54int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
32 int index, bool enable, u32 addr, struct nvkm_engine **); 55 int index, bool enable, u32 addr, struct nvkm_engine **);
33 56
@@ -42,6 +65,51 @@ struct nvkm_falcon_func {
42 } data; 65 } data;
43 void (*init)(struct nvkm_falcon *); 66 void (*init)(struct nvkm_falcon *);
44 void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); 67 void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
68 void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
69 void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
70 void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
71 void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
72 int (*wait_for_halt)(struct nvkm_falcon *, u32);
73 int (*clear_interrupt)(struct nvkm_falcon *, u32);
74 void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
75 void (*start)(struct nvkm_falcon *);
76 int (*enable)(struct nvkm_falcon *falcon);
77 void (*disable)(struct nvkm_falcon *falcon);
78
45 struct nvkm_sclass sclass[]; 79 struct nvkm_sclass sclass[];
46}; 80};
81
82static inline u32
83nvkm_falcon_rd32(struct nvkm_falcon *falcon, u32 addr)
84{
85 return nvkm_rd32(falcon->owner->device, falcon->addr + addr);
86}
87
88static inline void
89nvkm_falcon_wr32(struct nvkm_falcon *falcon, u32 addr, u32 data)
90{
91 nvkm_wr32(falcon->owner->device, falcon->addr + addr, data);
92}
93
94static inline u32
95nvkm_falcon_mask(struct nvkm_falcon *falcon, u32 addr, u32 mask, u32 val)
96{
97 struct nvkm_device *device = falcon->owner->device;
98
99 return nvkm_mask(device, falcon->addr + addr, mask, val);
100}
101
102void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
103 bool);
104void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
105void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
106void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
107void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
108void nvkm_falcon_start(struct nvkm_falcon *);
109int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
110int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
111int nvkm_falcon_enable(struct nvkm_falcon *);
112void nvkm_falcon_disable(struct nvkm_falcon *);
113int nvkm_falcon_reset(struct nvkm_falcon *);
114
47#endif 115#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index ed92fec5292c..24efa900d8ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -40,6 +40,7 @@ struct nvkm_fifo {
40 40
41 struct nvkm_event uevent; /* async user trigger */ 41 struct nvkm_event uevent; /* async user trigger */
42 struct nvkm_event cevent; /* channel creation event */ 42 struct nvkm_event cevent; /* channel creation event */
43 struct nvkm_event kevent; /* channel killed */
43}; 44};
44 45
45void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *); 46void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
new file mode 100644
index 000000000000..f5f4a14c4030
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
@@ -0,0 +1,26 @@
1#ifndef __NVBIOS_POWER_BUDGET_H__
2#define __NVBIOS_POWER_BUDGET_H__
3
4#include <nvkm/subdev/bios.h>
5
6struct nvbios_power_budget_entry {
7 u32 min_w;
8 u32 avg_w;
9 u32 max_w;
10};
11
12struct nvbios_power_budget {
13 u32 offset;
14 u8 ver;
15 u8 hlen;
16 u8 elen;
17 u8 ecount;
18 u8 cap_entry;
19};
20
21int nvbios_power_budget_header(struct nvkm_bios *,
22 struct nvbios_power_budget *);
23int nvbios_power_budget_entry(struct nvkm_bios *, struct nvbios_power_budget *,
24 u8 idx, struct nvbios_power_budget_entry *);
25
26#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 794e432578b2..0b26a4c860ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -29,7 +29,7 @@ struct nvkm_mem {
29 u8 page_shift; 29 u8 page_shift;
30 30
31 struct nvkm_mm_node *tag; 31 struct nvkm_mm_node *tag;
32 struct list_head regions; 32 struct nvkm_mm_node *mem;
33 dma_addr_t *pages; 33 dma_addr_t *pages;
34 u32 memtype; 34 u32 memtype;
35 u64 offset; 35 u64 offset;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index 3c2ddd975273..b7a9b041e130 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -8,6 +8,9 @@ struct nvkm_iccsense {
8 bool data_valid; 8 bool data_valid;
9 struct list_head sensors; 9 struct list_head sensors;
10 struct list_head rails; 10 struct list_head rails;
11
12 u32 power_w_max;
13 u32 power_w_crit;
11}; 14};
12 15
13int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **); 16int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 27d25b18d85c..e68ba636741b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -9,6 +9,7 @@ struct nvkm_mc {
9 9
10void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); 10void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
11void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); 11void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
12bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
12void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); 13void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
13void nvkm_mc_intr(struct nvkm_device *, bool *handled); 14void nvkm_mc_intr(struct nvkm_device *, bool *handled);
14void nvkm_mc_intr_unarm(struct nvkm_device *); 15void nvkm_mc_intr_unarm(struct nvkm_device *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index e6523e2cea9f..ac2a695963c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -43,6 +43,7 @@ int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
43int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 43int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
44int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 44int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
45int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 45int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
46int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
46int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 47int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
47int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 48int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
48int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 49int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index f37538eb1fe5..179b6ed3f595 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,10 +1,12 @@
1#ifndef __NVKM_PMU_H__ 1#ifndef __NVKM_PMU_H__
2#define __NVKM_PMU_H__ 2#define __NVKM_PMU_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4#include <engine/falcon.h>
4 5
5struct nvkm_pmu { 6struct nvkm_pmu {
6 const struct nvkm_pmu_func *func; 7 const struct nvkm_pmu_func *func;
7 struct nvkm_subdev subdev; 8 struct nvkm_subdev subdev;
9 struct nvkm_falcon *falcon;
8 10
9 struct { 11 struct {
10 u32 base; 12 u32 base;
@@ -35,6 +37,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
35int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 37int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
36int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 38int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
37int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 39int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
40int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
38int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 41int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
39int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 42int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
40 43
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index b04c38c07761..5dbd8aa4f8c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -26,7 +26,7 @@
26#include <core/subdev.h> 26#include <core/subdev.h>
27 27
28enum nvkm_secboot_falcon { 28enum nvkm_secboot_falcon {
29 NVKM_SECBOOT_FALCON_PMU = 0, 29 NVKM_SECBOOT_FALCON_PMU = 0,
30 NVKM_SECBOOT_FALCON_RESERVED = 1, 30 NVKM_SECBOOT_FALCON_RESERVED = 1,
31 NVKM_SECBOOT_FALCON_FECS = 2, 31 NVKM_SECBOOT_FALCON_FECS = 2,
32 NVKM_SECBOOT_FALCON_GPCCS = 3, 32 NVKM_SECBOOT_FALCON_GPCCS = 3,
@@ -35,22 +35,23 @@ enum nvkm_secboot_falcon {
35}; 35};
36 36
37/** 37/**
38 * @base: base IO address of the falcon performing secure boot 38 * @wpr_set: whether the WPR region is currently set
39 * @irq_mask: IRQ mask of the falcon performing secure boot
40 * @enable_mask: enable mask of the falcon performing secure boot
41*/ 39*/
42struct nvkm_secboot { 40struct nvkm_secboot {
43 const struct nvkm_secboot_func *func; 41 const struct nvkm_secboot_func *func;
42 struct nvkm_acr *acr;
44 struct nvkm_subdev subdev; 43 struct nvkm_subdev subdev;
44 struct nvkm_falcon *boot_falcon;
45 45
46 enum nvkm_devidx devidx; 46 u64 wpr_addr;
47 u32 base; 47 u32 wpr_size;
48
49 bool wpr_set;
48}; 50};
49#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) 51#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
50 52
51bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon); 53bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
52int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon); 54int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
53int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon);
54 55
55int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 56int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
56int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 57int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 82d3e28918fd..6a567fe347b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -48,10 +48,8 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
48 } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \ 48 } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
49 \ 49 \
50 if (_taken >= _nsecs) { \ 50 if (_taken >= _nsecs) { \
51 if (_warn) { \ 51 if (_warn) \
52 dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \ 52 dev_WARN(_device->dev, "timeout\n"); \
53 __FILE__, __LINE__, __func__); \
54 } \
55 _taken = -ETIMEDOUT; \ 53 _taken = -ETIMEDOUT; \
56 } \ 54 } \
57 _taken; \ 55 _taken; \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 71ebbfd4484f..d23209b62c25 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -11,6 +11,7 @@ struct nvkm_top {
11u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); 11u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
12u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); 12u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
13u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); 13u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
14int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
14enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); 15enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
15enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); 16enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
16 17
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 4df4f6ed4886..f98f800cc011 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,7 +87,7 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
87s32 87s32
88nouveau_abi16_swclass(struct nouveau_drm *drm) 88nouveau_abi16_swclass(struct nouveau_drm *drm)
89{ 89{
90 switch (drm->device.info.family) { 90 switch (drm->client.device.info.family) {
91 case NV_DEVICE_INFO_V0_TNT: 91 case NV_DEVICE_INFO_V0_TNT:
92 return NVIF_CLASS_SW_NV04; 92 return NVIF_CLASS_SW_NV04;
93 case NV_DEVICE_INFO_V0_CELSIUS: 93 case NV_DEVICE_INFO_V0_CELSIUS:
@@ -175,7 +175,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
175{ 175{
176 struct nouveau_cli *cli = nouveau_cli(file_priv); 176 struct nouveau_cli *cli = nouveau_cli(file_priv);
177 struct nouveau_drm *drm = nouveau_drm(dev); 177 struct nouveau_drm *drm = nouveau_drm(dev);
178 struct nvif_device *device = &drm->device; 178 struct nvif_device *device = &drm->client.device;
179 struct nvkm_gr *gr = nvxx_gr(device); 179 struct nvkm_gr *gr = nvxx_gr(device);
180 struct drm_nouveau_getparam *getparam = data; 180 struct drm_nouveau_getparam *getparam = data;
181 181
@@ -321,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
321 } 321 }
322 322
323 /* Named memory object area */ 323 /* Named memory object area */
324 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 324 ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
325 0, 0, &chan->ntfy); 325 0, 0, &chan->ntfy);
326 if (ret == 0) 326 if (ret == 0)
327 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); 327 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 8b1ca4add2ed..380f340204e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -65,7 +65,7 @@ static int
65nv40_get_intensity(struct backlight_device *bd) 65nv40_get_intensity(struct backlight_device *bd)
66{ 66{
67 struct nouveau_drm *drm = bl_get_data(bd); 67 struct nouveau_drm *drm = bl_get_data(bd);
68 struct nvif_object *device = &drm->device.object; 68 struct nvif_object *device = &drm->client.device.object;
69 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & 69 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
70 NV40_PMC_BACKLIGHT_MASK) >> 16; 70 NV40_PMC_BACKLIGHT_MASK) >> 16;
71 71
@@ -76,7 +76,7 @@ static int
76nv40_set_intensity(struct backlight_device *bd) 76nv40_set_intensity(struct backlight_device *bd)
77{ 77{
78 struct nouveau_drm *drm = bl_get_data(bd); 78 struct nouveau_drm *drm = bl_get_data(bd);
79 struct nvif_object *device = &drm->device.object; 79 struct nvif_object *device = &drm->client.device.object;
80 int val = bd->props.brightness; 80 int val = bd->props.brightness;
81 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); 81 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
82 82
@@ -96,7 +96,7 @@ static int
96nv40_backlight_init(struct drm_connector *connector) 96nv40_backlight_init(struct drm_connector *connector)
97{ 97{
98 struct nouveau_drm *drm = nouveau_drm(connector->dev); 98 struct nouveau_drm *drm = nouveau_drm(connector->dev);
99 struct nvif_object *device = &drm->device.object; 99 struct nvif_object *device = &drm->client.device.object;
100 struct backlight_properties props; 100 struct backlight_properties props;
101 struct backlight_device *bd; 101 struct backlight_device *bd;
102 struct backlight_connector bl_connector; 102 struct backlight_connector bl_connector;
@@ -133,7 +133,7 @@ nv50_get_intensity(struct backlight_device *bd)
133{ 133{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_object *device = &drm->device.object; 136 struct nvif_object *device = &drm->client.device.object;
137 int or = nv_encoder->or; 137 int or = nv_encoder->or;
138 u32 div = 1025; 138 u32 div = 1025;
139 u32 val; 139 u32 val;
@@ -148,7 +148,7 @@ nv50_set_intensity(struct backlight_device *bd)
148{ 148{
149 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 149 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
151 struct nvif_object *device = &drm->device.object; 151 struct nvif_object *device = &drm->client.device.object;
152 int or = nv_encoder->or; 152 int or = nv_encoder->or;
153 u32 div = 1025; 153 u32 div = 1025;
154 u32 val = (bd->props.brightness * div) / 100; 154 u32 val = (bd->props.brightness * div) / 100;
@@ -169,7 +169,7 @@ nva3_get_intensity(struct backlight_device *bd)
169{ 169{
170 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 170 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
172 struct nvif_object *device = &drm->device.object; 172 struct nvif_object *device = &drm->client.device.object;
173 int or = nv_encoder->or; 173 int or = nv_encoder->or;
174 u32 div, val; 174 u32 div, val;
175 175
@@ -187,7 +187,7 @@ nva3_set_intensity(struct backlight_device *bd)
187{ 187{
188 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 188 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
190 struct nvif_object *device = &drm->device.object; 190 struct nvif_object *device = &drm->client.device.object;
191 int or = nv_encoder->or; 191 int or = nv_encoder->or;
192 u32 div, val; 192 u32 div, val;
193 193
@@ -213,7 +213,7 @@ static int
213nv50_backlight_init(struct drm_connector *connector) 213nv50_backlight_init(struct drm_connector *connector)
214{ 214{
215 struct nouveau_drm *drm = nouveau_drm(connector->dev); 215 struct nouveau_drm *drm = nouveau_drm(connector->dev);
216 struct nvif_object *device = &drm->device.object; 216 struct nvif_object *device = &drm->client.device.object;
217 struct nouveau_encoder *nv_encoder; 217 struct nouveau_encoder *nv_encoder;
218 struct backlight_properties props; 218 struct backlight_properties props;
219 struct backlight_device *bd; 219 struct backlight_device *bd;
@@ -231,9 +231,9 @@ nv50_backlight_init(struct drm_connector *connector)
231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
232 return 0; 232 return 0;
233 233
234 if (drm->device.info.chipset <= 0xa0 || 234 if (drm->client.device.info.chipset <= 0xa0 ||
235 drm->device.info.chipset == 0xaa || 235 drm->client.device.info.chipset == 0xaa ||
236 drm->device.info.chipset == 0xac) 236 drm->client.device.info.chipset == 0xac)
237 ops = &nv50_bl_ops; 237 ops = &nv50_bl_ops;
238 else 238 else
239 ops = &nva3_bl_ops; 239 ops = &nva3_bl_ops;
@@ -265,7 +265,7 @@ int
265nouveau_backlight_init(struct drm_device *dev) 265nouveau_backlight_init(struct drm_device *dev)
266{ 266{
267 struct nouveau_drm *drm = nouveau_drm(dev); 267 struct nouveau_drm *drm = nouveau_drm(dev);
268 struct nvif_device *device = &drm->device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 270
271 if (apple_gmux_present()) { 271 if (apple_gmux_present()) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 23ffe8571a99..9a0772ad495a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
215 */ 215 */
216 216
217 struct nouveau_drm *drm = nouveau_drm(dev); 217 struct nouveau_drm *drm = nouveau_drm(dev);
218 struct nvif_object *device = &drm->device.object; 218 struct nvif_object *device = &drm->client.device.object;
219 struct nvbios *bios = &drm->vbios; 219 struct nvbios *bios = &drm->vbios;
220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
221 uint32_t sel_clk_binding, sel_clk; 221 uint32_t sel_clk_binding, sel_clk;
@@ -319,7 +319,7 @@ static int
319get_fp_strap(struct drm_device *dev, struct nvbios *bios) 319get_fp_strap(struct drm_device *dev, struct nvbios *bios)
320{ 320{
321 struct nouveau_drm *drm = nouveau_drm(dev); 321 struct nouveau_drm *drm = nouveau_drm(dev);
322 struct nvif_object *device = &drm->device.object; 322 struct nvif_object *device = &drm->client.device.object;
323 323
324 /* 324 /*
325 * The fp strap is normally dictated by the "User Strap" in 325 * The fp strap is normally dictated by the "User Strap" in
@@ -333,10 +333,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
333 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 333 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
334 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 334 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
335 335
336 if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL) 336 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
337 return nvif_rd32(device, 0x001800) & 0x0000000f; 337 return nvif_rd32(device, 0x001800) & 0x0000000f;
338 else 338 else
339 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 339 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
340 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 340 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
341 else 341 else
342 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; 342 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -638,7 +638,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
638 */ 638 */
639 639
640 struct nouveau_drm *drm = nouveau_drm(dev); 640 struct nouveau_drm *drm = nouveau_drm(dev);
641 struct nvif_object *device = &drm->device.object; 641 struct nvif_object *device = &drm->client.device.object;
642 struct nvbios *bios = &drm->vbios; 642 struct nvbios *bios = &drm->vbios;
643 int cv = bios->chip_version; 643 int cv = bios->chip_version;
644 uint16_t clktable = 0, scriptptr; 644 uint16_t clktable = 0, scriptptr;
@@ -1255,7 +1255,7 @@ olddcb_table(struct drm_device *dev)
1255 struct nouveau_drm *drm = nouveau_drm(dev); 1255 struct nouveau_drm *drm = nouveau_drm(dev);
1256 u8 *dcb = NULL; 1256 u8 *dcb = NULL;
1257 1257
1258 if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) 1258 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT)
1259 dcb = ROMPTR(dev, drm->vbios.data[0x36]); 1259 dcb = ROMPTR(dev, drm->vbios.data[0x36]);
1260 if (!dcb) { 1260 if (!dcb) {
1261 NV_WARN(drm, "No DCB data found in VBIOS\n"); 1261 NV_WARN(drm, "No DCB data found in VBIOS\n");
@@ -1918,7 +1918,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
1918 */ 1918 */
1919 1919
1920 struct nouveau_drm *drm = nouveau_drm(dev); 1920 struct nouveau_drm *drm = nouveau_drm(dev);
1921 struct nvif_object *device = &drm->device.object; 1921 struct nvif_object *device = &drm->client.device.object;
1922 uint8_t bytes_to_write; 1922 uint8_t bytes_to_write;
1923 uint16_t hwsq_entry_offset; 1923 uint16_t hwsq_entry_offset;
1924 int i; 1924 int i;
@@ -2012,7 +2012,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
2012static bool NVInitVBIOS(struct drm_device *dev) 2012static bool NVInitVBIOS(struct drm_device *dev)
2013{ 2013{
2014 struct nouveau_drm *drm = nouveau_drm(dev); 2014 struct nouveau_drm *drm = nouveau_drm(dev);
2015 struct nvkm_bios *bios = nvxx_bios(&drm->device); 2015 struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
2016 struct nvbios *legacy = &drm->vbios; 2016 struct nvbios *legacy = &drm->vbios;
2017 2017
2018 memset(legacy, 0, sizeof(struct nvbios)); 2018 memset(legacy, 0, sizeof(struct nvbios));
@@ -2064,7 +2064,7 @@ nouveau_bios_posted(struct drm_device *dev)
2064 struct nouveau_drm *drm = nouveau_drm(dev); 2064 struct nouveau_drm *drm = nouveau_drm(dev);
2065 unsigned htotal; 2065 unsigned htotal;
2066 2066
2067 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 2067 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
2068 return true; 2068 return true;
2069 2069
2070 htotal = NVReadVgaCrtc(dev, 0, 0x06); 2070 htotal = NVReadVgaCrtc(dev, 0, 0x06);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 8a528ebe30f3..548f36d33924 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
48{ 48{
49 struct nouveau_drm *drm = nouveau_drm(dev); 49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg; 50 int i = reg - drm->tile.reg;
51 struct nvkm_device *device = nvxx_device(&drm->device); 51 struct nvkm_device *device = nvxx_device(&drm->client.device);
52 struct nvkm_fb *fb = device->fb; 52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_fb_tile *tile = &fb->tile.region[i]; 53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
54 54
@@ -100,7 +100,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags) 100 u32 size, u32 pitch, u32 flags)
101{ 101{
102 struct nouveau_drm *drm = nouveau_drm(dev); 102 struct nouveau_drm *drm = nouveau_drm(dev);
103 struct nvkm_fb *fb = nvxx_fb(&drm->device); 103 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
104 struct nouveau_drm_tile *tile, *found = NULL; 104 struct nouveau_drm_tile *tile, *found = NULL;
105 int i; 105 int i;
106 106
@@ -139,60 +139,62 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
139 kfree(nvbo); 139 kfree(nvbo);
140} 140}
141 141
142static inline u64
143roundup_64(u64 x, u32 y)
144{
145 x += y - 1;
146 do_div(x, y);
147 return x * y;
148}
149
142static void 150static void
143nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 151nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
144 int *align, int *size) 152 int *align, u64 *size)
145{ 153{
146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 154 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
147 struct nvif_device *device = &drm->device; 155 struct nvif_device *device = &drm->client.device;
148 156
149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { 157 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
150 if (nvbo->tile_mode) { 158 if (nvbo->tile_mode) {
151 if (device->info.chipset >= 0x40) { 159 if (device->info.chipset >= 0x40) {
152 *align = 65536; 160 *align = 65536;
153 *size = roundup(*size, 64 * nvbo->tile_mode); 161 *size = roundup_64(*size, 64 * nvbo->tile_mode);
154 162
155 } else if (device->info.chipset >= 0x30) { 163 } else if (device->info.chipset >= 0x30) {
156 *align = 32768; 164 *align = 32768;
157 *size = roundup(*size, 64 * nvbo->tile_mode); 165 *size = roundup_64(*size, 64 * nvbo->tile_mode);
158 166
159 } else if (device->info.chipset >= 0x20) { 167 } else if (device->info.chipset >= 0x20) {
160 *align = 16384; 168 *align = 16384;
161 *size = roundup(*size, 64 * nvbo->tile_mode); 169 *size = roundup_64(*size, 64 * nvbo->tile_mode);
162 170
163 } else if (device->info.chipset >= 0x10) { 171 } else if (device->info.chipset >= 0x10) {
164 *align = 16384; 172 *align = 16384;
165 *size = roundup(*size, 32 * nvbo->tile_mode); 173 *size = roundup_64(*size, 32 * nvbo->tile_mode);
166 } 174 }
167 } 175 }
168 } else { 176 } else {
169 *size = roundup(*size, (1 << nvbo->page_shift)); 177 *size = roundup_64(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align); 178 *align = max((1 << nvbo->page_shift), *align);
171 } 179 }
172 180
173 *size = roundup(*size, PAGE_SIZE); 181 *size = roundup_64(*size, PAGE_SIZE);
174} 182}
175 183
176int 184int
177nouveau_bo_new(struct drm_device *dev, int size, int align, 185nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 186 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
179 struct sg_table *sg, struct reservation_object *robj, 187 struct sg_table *sg, struct reservation_object *robj,
180 struct nouveau_bo **pnvbo) 188 struct nouveau_bo **pnvbo)
181{ 189{
182 struct nouveau_drm *drm = nouveau_drm(dev); 190 struct nouveau_drm *drm = nouveau_drm(cli->dev);
183 struct nouveau_bo *nvbo; 191 struct nouveau_bo *nvbo;
184 size_t acc_size; 192 size_t acc_size;
185 int ret; 193 int ret;
186 int type = ttm_bo_type_device; 194 int type = ttm_bo_type_device;
187 int lpg_shift = 12;
188 int max_size;
189
190 if (drm->client.vm)
191 lpg_shift = drm->client.vm->mmu->lpg_shift;
192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
193 195
194 if (size <= 0 || size > max_size) { 196 if (!size) {
195 NV_WARN(drm, "skipped size %x\n", (u32)size); 197 NV_WARN(drm, "skipped size %016llx\n", size);
196 return -EINVAL; 198 return -EINVAL;
197 } 199 }
198 200
@@ -208,8 +210,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
208 nvbo->tile_mode = tile_mode; 210 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags; 211 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev; 212 nvbo->bo.bdev = &drm->ttm.bdev;
213 nvbo->cli = cli;
211 214
212 if (!nvxx_device(&drm->device)->func->cpu_coherent) 215 if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; 216 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
214 217
215 nvbo->page_shift = 12; 218 nvbo->page_shift = 12;
@@ -255,10 +258,10 @@ static void
255set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 258set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
256{ 259{
257 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 260 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
258 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; 261 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
259 unsigned i, fpfn, lpfn; 262 unsigned i, fpfn, lpfn;
260 263
261 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 264 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
262 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 265 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
263 nvbo->bo.mem.num_pages < vram_pages / 4) { 266 nvbo->bo.mem.num_pages < vram_pages / 4) {
264 /* 267 /*
@@ -316,12 +319,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
316 if (ret) 319 if (ret)
317 return ret; 320 return ret;
318 321
319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA && 322 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
320 memtype == TTM_PL_FLAG_VRAM && contig) { 323 memtype == TTM_PL_FLAG_VRAM && contig) {
321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { 324 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
322 if (bo->mem.mem_type == TTM_PL_VRAM) { 325 if (bo->mem.mem_type == TTM_PL_VRAM) {
323 struct nvkm_mem *mem = bo->mem.mm_node; 326 struct nvkm_mem *mem = bo->mem.mm_node;
324 if (!list_is_singular(&mem->regions)) 327 if (!nvkm_mm_contiguous(mem->mem))
325 evict = true; 328 evict = true;
326 } 329 }
327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; 330 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
@@ -443,7 +446,7 @@ void
443nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) 446nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
444{ 447{
445 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 448 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
446 struct nvkm_device *device = nvxx_device(&drm->device); 449 struct nvkm_device *device = nvxx_device(&drm->client.device);
447 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 450 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
448 int i; 451 int i;
449 452
@@ -463,7 +466,7 @@ void
463nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) 466nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
464{ 467{
465 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 468 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
466 struct nvkm_device *device = nvxx_device(&drm->device); 469 struct nvkm_device *device = nvxx_device(&drm->client.device);
467 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 470 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
468 int i; 471 int i;
469 472
@@ -579,9 +582,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
579 TTM_PL_FLAG_WC; 582 TTM_PL_FLAG_WC;
580 man->default_caching = TTM_PL_FLAG_WC; 583 man->default_caching = TTM_PL_FLAG_WC;
581 584
582 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 585 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
583 /* Some BARs do not support being ioremapped WC */ 586 /* Some BARs do not support being ioremapped WC */
584 if (nvxx_bar(&drm->device)->iomap_uncached) { 587 if (nvxx_bar(&drm->client.device)->iomap_uncached) {
585 man->available_caching = TTM_PL_FLAG_UNCACHED; 588 man->available_caching = TTM_PL_FLAG_UNCACHED;
586 man->default_caching = TTM_PL_FLAG_UNCACHED; 589 man->default_caching = TTM_PL_FLAG_UNCACHED;
587 } 590 }
@@ -594,7 +597,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
594 } 597 }
595 break; 598 break;
596 case TTM_PL_TT: 599 case TTM_PL_TT:
597 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 600 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
598 man->func = &nouveau_gart_manager; 601 man->func = &nouveau_gart_manager;
599 else 602 else
600 if (!drm->agp.bridge) 603 if (!drm->agp.bridge)
@@ -654,20 +657,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
654 657
655static int 658static int
656nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 659nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
657 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 660 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
658{ 661{
659 struct nvkm_mem *node = old_mem->mm_node; 662 struct nvkm_mem *mem = old_reg->mm_node;
660 int ret = RING_SPACE(chan, 10); 663 int ret = RING_SPACE(chan, 10);
661 if (ret == 0) { 664 if (ret == 0) {
662 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); 665 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
663 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 666 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
664 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 667 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
665 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 668 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
666 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 669 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
667 OUT_RING (chan, PAGE_SIZE); 670 OUT_RING (chan, PAGE_SIZE);
668 OUT_RING (chan, PAGE_SIZE); 671 OUT_RING (chan, PAGE_SIZE);
669 OUT_RING (chan, PAGE_SIZE); 672 OUT_RING (chan, PAGE_SIZE);
670 OUT_RING (chan, new_mem->num_pages); 673 OUT_RING (chan, new_reg->num_pages);
671 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); 674 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
672 } 675 }
673 return ret; 676 return ret;
@@ -686,15 +689,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
686 689
687static int 690static int
688nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 691nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
689 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 692 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
690{ 693{
691 struct nvkm_mem *node = old_mem->mm_node; 694 struct nvkm_mem *mem = old_reg->mm_node;
692 u64 src_offset = node->vma[0].offset; 695 u64 src_offset = mem->vma[0].offset;
693 u64 dst_offset = node->vma[1].offset; 696 u64 dst_offset = mem->vma[1].offset;
694 u32 page_count = new_mem->num_pages; 697 u32 page_count = new_reg->num_pages;
695 int ret; 698 int ret;
696 699
697 page_count = new_mem->num_pages; 700 page_count = new_reg->num_pages;
698 while (page_count) { 701 while (page_count) {
699 int line_count = (page_count > 8191) ? 8191 : page_count; 702 int line_count = (page_count > 8191) ? 8191 : page_count;
700 703
@@ -724,15 +727,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
724 727
725static int 728static int
726nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 729nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
727 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 730 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
728{ 731{
729 struct nvkm_mem *node = old_mem->mm_node; 732 struct nvkm_mem *mem = old_reg->mm_node;
730 u64 src_offset = node->vma[0].offset; 733 u64 src_offset = mem->vma[0].offset;
731 u64 dst_offset = node->vma[1].offset; 734 u64 dst_offset = mem->vma[1].offset;
732 u32 page_count = new_mem->num_pages; 735 u32 page_count = new_reg->num_pages;
733 int ret; 736 int ret;
734 737
735 page_count = new_mem->num_pages; 738 page_count = new_reg->num_pages;
736 while (page_count) { 739 while (page_count) {
737 int line_count = (page_count > 2047) ? 2047 : page_count; 740 int line_count = (page_count > 2047) ? 2047 : page_count;
738 741
@@ -763,15 +766,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
763 766
764static int 767static int
765nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 768nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
766 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 769 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
767{ 770{
768 struct nvkm_mem *node = old_mem->mm_node; 771 struct nvkm_mem *mem = old_reg->mm_node;
769 u64 src_offset = node->vma[0].offset; 772 u64 src_offset = mem->vma[0].offset;
770 u64 dst_offset = node->vma[1].offset; 773 u64 dst_offset = mem->vma[1].offset;
771 u32 page_count = new_mem->num_pages; 774 u32 page_count = new_reg->num_pages;
772 int ret; 775 int ret;
773 776
774 page_count = new_mem->num_pages; 777 page_count = new_reg->num_pages;
775 while (page_count) { 778 while (page_count) {
776 int line_count = (page_count > 8191) ? 8191 : page_count; 779 int line_count = (page_count > 8191) ? 8191 : page_count;
777 780
@@ -801,35 +804,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
801 804
802static int 805static int
803nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 806nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
804 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 807 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
805{ 808{
806 struct nvkm_mem *node = old_mem->mm_node; 809 struct nvkm_mem *mem = old_reg->mm_node;
807 int ret = RING_SPACE(chan, 7); 810 int ret = RING_SPACE(chan, 7);
808 if (ret == 0) { 811 if (ret == 0) {
809 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); 812 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
810 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 813 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
811 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 814 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
812 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 815 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
813 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 816 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
814 OUT_RING (chan, 0x00000000 /* COPY */); 817 OUT_RING (chan, 0x00000000 /* COPY */);
815 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); 818 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
816 } 819 }
817 return ret; 820 return ret;
818} 821}
819 822
820static int 823static int
821nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 824nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
822 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 825 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
823{ 826{
824 struct nvkm_mem *node = old_mem->mm_node; 827 struct nvkm_mem *mem = old_reg->mm_node;
825 int ret = RING_SPACE(chan, 7); 828 int ret = RING_SPACE(chan, 7);
826 if (ret == 0) { 829 if (ret == 0) {
827 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); 830 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
828 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); 831 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
829 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 832 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
830 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 833 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
831 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 834 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
832 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 835 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
833 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); 836 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
834 } 837 }
835 return ret; 838 return ret;
@@ -853,14 +856,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
853 856
854static int 857static int
855nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 858nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
856 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 859 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
857{ 860{
858 struct nvkm_mem *node = old_mem->mm_node; 861 struct nvkm_mem *mem = old_reg->mm_node;
859 u64 length = (new_mem->num_pages << PAGE_SHIFT); 862 u64 length = (new_reg->num_pages << PAGE_SHIFT);
860 u64 src_offset = node->vma[0].offset; 863 u64 src_offset = mem->vma[0].offset;
861 u64 dst_offset = node->vma[1].offset; 864 u64 dst_offset = mem->vma[1].offset;
862 int src_tiled = !!node->memtype; 865 int src_tiled = !!mem->memtype;
863 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype; 866 int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
864 int ret; 867 int ret;
865 868
866 while (length) { 869 while (length) {
@@ -940,20 +943,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
940 943
941static inline uint32_t 944static inline uint32_t
942nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 945nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
943 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 946 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
944{ 947{
945 if (mem->mem_type == TTM_PL_TT) 948 if (reg->mem_type == TTM_PL_TT)
946 return NvDmaTT; 949 return NvDmaTT;
947 return chan->vram.handle; 950 return chan->vram.handle;
948} 951}
949 952
950static int 953static int
951nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 954nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
952 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 955 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
953{ 956{
954 u32 src_offset = old_mem->start << PAGE_SHIFT; 957 u32 src_offset = old_reg->start << PAGE_SHIFT;
955 u32 dst_offset = new_mem->start << PAGE_SHIFT; 958 u32 dst_offset = new_reg->start << PAGE_SHIFT;
956 u32 page_count = new_mem->num_pages; 959 u32 page_count = new_reg->num_pages;
957 int ret; 960 int ret;
958 961
959 ret = RING_SPACE(chan, 3); 962 ret = RING_SPACE(chan, 3);
@@ -961,10 +964,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
961 return ret; 964 return ret;
962 965
963 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 966 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
964 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); 967 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
965 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); 968 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
966 969
967 page_count = new_mem->num_pages; 970 page_count = new_reg->num_pages;
968 while (page_count) { 971 while (page_count) {
969 int line_count = (page_count > 2047) ? 2047 : page_count; 972 int line_count = (page_count > 2047) ? 2047 : page_count;
970 973
@@ -995,33 +998,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
995 998
996static int 999static int
997nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 1000nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
998 struct ttm_mem_reg *mem) 1001 struct ttm_mem_reg *reg)
999{ 1002{
1000 struct nvkm_mem *old_node = bo->mem.mm_node; 1003 struct nvkm_mem *old_mem = bo->mem.mm_node;
1001 struct nvkm_mem *new_node = mem->mm_node; 1004 struct nvkm_mem *new_mem = reg->mm_node;
1002 u64 size = (u64)mem->num_pages << PAGE_SHIFT; 1005 u64 size = (u64)reg->num_pages << PAGE_SHIFT;
1003 int ret; 1006 int ret;
1004 1007
1005 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift, 1008 ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
1006 NV_MEM_ACCESS_RW, &old_node->vma[0]); 1009 NV_MEM_ACCESS_RW, &old_mem->vma[0]);
1007 if (ret) 1010 if (ret)
1008 return ret; 1011 return ret;
1009 1012
1010 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift, 1013 ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
1011 NV_MEM_ACCESS_RW, &old_node->vma[1]); 1014 NV_MEM_ACCESS_RW, &old_mem->vma[1]);
1012 if (ret) { 1015 if (ret) {
1013 nvkm_vm_put(&old_node->vma[0]); 1016 nvkm_vm_put(&old_mem->vma[0]);
1014 return ret; 1017 return ret;
1015 } 1018 }
1016 1019
1017 nvkm_vm_map(&old_node->vma[0], old_node); 1020 nvkm_vm_map(&old_mem->vma[0], old_mem);
1018 nvkm_vm_map(&old_node->vma[1], new_node); 1021 nvkm_vm_map(&old_mem->vma[1], new_mem);
1019 return 0; 1022 return 0;
1020} 1023}
1021 1024
1022static int 1025static int
1023nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 1026nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1024 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1027 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1025{ 1028{
1026 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1029 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1027 struct nouveau_channel *chan = drm->ttm.chan; 1030 struct nouveau_channel *chan = drm->ttm.chan;
@@ -1033,8 +1036,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1033 * old nvkm_mem node, these will get cleaned up after ttm has 1036 * old nvkm_mem node, these will get cleaned up after ttm has
1034 * destroyed the ttm_mem_reg 1037 * destroyed the ttm_mem_reg
1035 */ 1038 */
1036 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 1039 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1037 ret = nouveau_bo_move_prep(drm, bo, new_mem); 1040 ret = nouveau_bo_move_prep(drm, bo, new_reg);
1038 if (ret) 1041 if (ret)
1039 return ret; 1042 return ret;
1040 } 1043 }
@@ -1042,14 +1045,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1042 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 1045 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1043 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); 1046 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1044 if (ret == 0) { 1047 if (ret == 0) {
1045 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 1048 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1046 if (ret == 0) { 1049 if (ret == 0) {
1047 ret = nouveau_fence_new(chan, false, &fence); 1050 ret = nouveau_fence_new(chan, false, &fence);
1048 if (ret == 0) { 1051 if (ret == 0) {
1049 ret = ttm_bo_move_accel_cleanup(bo, 1052 ret = ttm_bo_move_accel_cleanup(bo,
1050 &fence->base, 1053 &fence->base,
1051 evict, 1054 evict,
1052 new_mem); 1055 new_reg);
1053 nouveau_fence_unref(&fence); 1056 nouveau_fence_unref(&fence);
1054 } 1057 }
1055 } 1058 }
@@ -1124,7 +1127,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1124 1127
1125static int 1128static int
1126nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1129nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1127 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1130 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1128{ 1131{
1129 struct ttm_place placement_memtype = { 1132 struct ttm_place placement_memtype = {
1130 .fpfn = 0, 1133 .fpfn = 0,
@@ -1132,35 +1135,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1132 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1135 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1133 }; 1136 };
1134 struct ttm_placement placement; 1137 struct ttm_placement placement;
1135 struct ttm_mem_reg tmp_mem; 1138 struct ttm_mem_reg tmp_reg;
1136 int ret; 1139 int ret;
1137 1140
1138 placement.num_placement = placement.num_busy_placement = 1; 1141 placement.num_placement = placement.num_busy_placement = 1;
1139 placement.placement = placement.busy_placement = &placement_memtype; 1142 placement.placement = placement.busy_placement = &placement_memtype;
1140 1143
1141 tmp_mem = *new_mem; 1144 tmp_reg = *new_reg;
1142 tmp_mem.mm_node = NULL; 1145 tmp_reg.mm_node = NULL;
1143 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1146 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1144 if (ret) 1147 if (ret)
1145 return ret; 1148 return ret;
1146 1149
1147 ret = ttm_tt_bind(bo->ttm, &tmp_mem); 1150 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
1148 if (ret) 1151 if (ret)
1149 goto out; 1152 goto out;
1150 1153
1151 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); 1154 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1152 if (ret) 1155 if (ret)
1153 goto out; 1156 goto out;
1154 1157
1155 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); 1158 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
1156out: 1159out:
1157 ttm_bo_mem_put(bo, &tmp_mem); 1160 ttm_bo_mem_put(bo, &tmp_reg);
1158 return ret; 1161 return ret;
1159} 1162}
1160 1163
1161static int 1164static int
1162nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1165nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1163 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1166 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1164{ 1167{
1165 struct ttm_place placement_memtype = { 1168 struct ttm_place placement_memtype = {
1166 .fpfn = 0, 1169 .fpfn = 0,
@@ -1168,34 +1171,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1168 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1171 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1169 }; 1172 };
1170 struct ttm_placement placement; 1173 struct ttm_placement placement;
1171 struct ttm_mem_reg tmp_mem; 1174 struct ttm_mem_reg tmp_reg;
1172 int ret; 1175 int ret;
1173 1176
1174 placement.num_placement = placement.num_busy_placement = 1; 1177 placement.num_placement = placement.num_busy_placement = 1;
1175 placement.placement = placement.busy_placement = &placement_memtype; 1178 placement.placement = placement.busy_placement = &placement_memtype;
1176 1179
1177 tmp_mem = *new_mem; 1180 tmp_reg = *new_reg;
1178 tmp_mem.mm_node = NULL; 1181 tmp_reg.mm_node = NULL;
1179 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1182 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1180 if (ret) 1183 if (ret)
1181 return ret; 1184 return ret;
1182 1185
1183 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); 1186 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
1184 if (ret) 1187 if (ret)
1185 goto out; 1188 goto out;
1186 1189
1187 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); 1190 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1188 if (ret) 1191 if (ret)
1189 goto out; 1192 goto out;
1190 1193
1191out: 1194out:
1192 ttm_bo_mem_put(bo, &tmp_mem); 1195 ttm_bo_mem_put(bo, &tmp_reg);
1193 return ret; 1196 return ret;
1194} 1197}
1195 1198
1196static void 1199static void
1197nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, 1200nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1198 struct ttm_mem_reg *new_mem) 1201 struct ttm_mem_reg *new_reg)
1199{ 1202{
1200 struct nouveau_bo *nvbo = nouveau_bo(bo); 1203 struct nouveau_bo *nvbo = nouveau_bo(bo);
1201 struct nvkm_vma *vma; 1204 struct nvkm_vma *vma;
@@ -1205,10 +1208,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1205 return; 1208 return;
1206 1209
1207 list_for_each_entry(vma, &nvbo->vma_list, head) { 1210 list_for_each_entry(vma, &nvbo->vma_list, head) {
1208 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && 1211 if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
1209 (new_mem->mem_type == TTM_PL_VRAM || 1212 (new_reg->mem_type == TTM_PL_VRAM ||
1210 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { 1213 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1211 nvkm_vm_map(vma, new_mem->mm_node); 1214 nvkm_vm_map(vma, new_reg->mm_node);
1212 } else { 1215 } else {
1213 WARN_ON(ttm_bo_wait(bo, false, false)); 1216 WARN_ON(ttm_bo_wait(bo, false, false));
1214 nvkm_vm_unmap(vma); 1217 nvkm_vm_unmap(vma);
@@ -1217,20 +1220,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1217} 1220}
1218 1221
1219static int 1222static int
1220nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 1223nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1221 struct nouveau_drm_tile **new_tile) 1224 struct nouveau_drm_tile **new_tile)
1222{ 1225{
1223 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1226 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1224 struct drm_device *dev = drm->dev; 1227 struct drm_device *dev = drm->dev;
1225 struct nouveau_bo *nvbo = nouveau_bo(bo); 1228 struct nouveau_bo *nvbo = nouveau_bo(bo);
1226 u64 offset = new_mem->start << PAGE_SHIFT; 1229 u64 offset = new_reg->start << PAGE_SHIFT;
1227 1230
1228 *new_tile = NULL; 1231 *new_tile = NULL;
1229 if (new_mem->mem_type != TTM_PL_VRAM) 1232 if (new_reg->mem_type != TTM_PL_VRAM)
1230 return 0; 1233 return 0;
1231 1234
1232 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 1235 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1233 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, 1236 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1234 nvbo->tile_mode, 1237 nvbo->tile_mode,
1235 nvbo->tile_flags); 1238 nvbo->tile_flags);
1236 } 1239 }
@@ -1253,11 +1256,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1253 1256
1254static int 1257static int
1255nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1258nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1256 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1259 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1257{ 1260{
1258 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1261 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1259 struct nouveau_bo *nvbo = nouveau_bo(bo); 1262 struct nouveau_bo *nvbo = nouveau_bo(bo);
1260 struct ttm_mem_reg *old_mem = &bo->mem; 1263 struct ttm_mem_reg *old_reg = &bo->mem;
1261 struct nouveau_drm_tile *new_tile = NULL; 1264 struct nouveau_drm_tile *new_tile = NULL;
1262 int ret = 0; 1265 int ret = 0;
1263 1266
@@ -1268,31 +1271,31 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1268 if (nvbo->pin_refcnt) 1271 if (nvbo->pin_refcnt)
1269 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); 1272 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1270 1273
1271 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1274 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1272 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1275 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1273 if (ret) 1276 if (ret)
1274 return ret; 1277 return ret;
1275 } 1278 }
1276 1279
1277 /* Fake bo copy. */ 1280 /* Fake bo copy. */
1278 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 1281 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1279 BUG_ON(bo->mem.mm_node != NULL); 1282 BUG_ON(bo->mem.mm_node != NULL);
1280 bo->mem = *new_mem; 1283 bo->mem = *new_reg;
1281 new_mem->mm_node = NULL; 1284 new_reg->mm_node = NULL;
1282 goto out; 1285 goto out;
1283 } 1286 }
1284 1287
1285 /* Hardware assisted copy. */ 1288 /* Hardware assisted copy. */
1286 if (drm->ttm.move) { 1289 if (drm->ttm.move) {
1287 if (new_mem->mem_type == TTM_PL_SYSTEM) 1290 if (new_reg->mem_type == TTM_PL_SYSTEM)
1288 ret = nouveau_bo_move_flipd(bo, evict, intr, 1291 ret = nouveau_bo_move_flipd(bo, evict, intr,
1289 no_wait_gpu, new_mem); 1292 no_wait_gpu, new_reg);
1290 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1293 else if (old_reg->mem_type == TTM_PL_SYSTEM)
1291 ret = nouveau_bo_move_flips(bo, evict, intr, 1294 ret = nouveau_bo_move_flips(bo, evict, intr,
1292 no_wait_gpu, new_mem); 1295 no_wait_gpu, new_reg);
1293 else 1296 else
1294 ret = nouveau_bo_move_m2mf(bo, evict, intr, 1297 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1295 no_wait_gpu, new_mem); 1298 no_wait_gpu, new_reg);
1296 if (!ret) 1299 if (!ret)
1297 goto out; 1300 goto out;
1298 } 1301 }
@@ -1300,10 +1303,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1300 /* Fallback to software copy. */ 1303 /* Fallback to software copy. */
1301 ret = ttm_bo_wait(bo, intr, no_wait_gpu); 1304 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1302 if (ret == 0) 1305 if (ret == 0)
1303 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); 1306 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
1304 1307
1305out: 1308out:
1306 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1309 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1307 if (ret) 1310 if (ret)
1308 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1311 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1309 else 1312 else
@@ -1323,54 +1326,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1323} 1326}
1324 1327
1325static int 1328static int
1326nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1329nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1327{ 1330{
1328 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1331 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1329 struct nouveau_drm *drm = nouveau_bdev(bdev); 1332 struct nouveau_drm *drm = nouveau_bdev(bdev);
1330 struct nvkm_device *device = nvxx_device(&drm->device); 1333 struct nvkm_device *device = nvxx_device(&drm->client.device);
1331 struct nvkm_mem *node = mem->mm_node; 1334 struct nvkm_mem *mem = reg->mm_node;
1332 int ret; 1335 int ret;
1333 1336
1334 mem->bus.addr = NULL; 1337 reg->bus.addr = NULL;
1335 mem->bus.offset = 0; 1338 reg->bus.offset = 0;
1336 mem->bus.size = mem->num_pages << PAGE_SHIFT; 1339 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1337 mem->bus.base = 0; 1340 reg->bus.base = 0;
1338 mem->bus.is_iomem = false; 1341 reg->bus.is_iomem = false;
1339 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 1342 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1340 return -EINVAL; 1343 return -EINVAL;
1341 switch (mem->mem_type) { 1344 switch (reg->mem_type) {
1342 case TTM_PL_SYSTEM: 1345 case TTM_PL_SYSTEM:
1343 /* System memory */ 1346 /* System memory */
1344 return 0; 1347 return 0;
1345 case TTM_PL_TT: 1348 case TTM_PL_TT:
1346#if IS_ENABLED(CONFIG_AGP) 1349#if IS_ENABLED(CONFIG_AGP)
1347 if (drm->agp.bridge) { 1350 if (drm->agp.bridge) {
1348 mem->bus.offset = mem->start << PAGE_SHIFT; 1351 reg->bus.offset = reg->start << PAGE_SHIFT;
1349 mem->bus.base = drm->agp.base; 1352 reg->bus.base = drm->agp.base;
1350 mem->bus.is_iomem = !drm->agp.cma; 1353 reg->bus.is_iomem = !drm->agp.cma;
1351 } 1354 }
1352#endif 1355#endif
1353 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) 1356 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
1354 /* untiled */ 1357 /* untiled */
1355 break; 1358 break;
1356 /* fallthrough, tiled memory */ 1359 /* fallthrough, tiled memory */
1357 case TTM_PL_VRAM: 1360 case TTM_PL_VRAM:
1358 mem->bus.offset = mem->start << PAGE_SHIFT; 1361 reg->bus.offset = reg->start << PAGE_SHIFT;
1359 mem->bus.base = device->func->resource_addr(device, 1); 1362 reg->bus.base = device->func->resource_addr(device, 1);
1360 mem->bus.is_iomem = true; 1363 reg->bus.is_iomem = true;
1361 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 1364 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1362 struct nvkm_bar *bar = nvxx_bar(&drm->device); 1365 struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
1363 int page_shift = 12; 1366 int page_shift = 12;
1364 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 1367 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1365 page_shift = node->page_shift; 1368 page_shift = mem->page_shift;
1366 1369
1367 ret = nvkm_bar_umap(bar, node->size << 12, page_shift, 1370 ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
1368 &node->bar_vma); 1371 &mem->bar_vma);
1369 if (ret) 1372 if (ret)
1370 return ret; 1373 return ret;
1371 1374
1372 nvkm_vm_map(&node->bar_vma, node); 1375 nvkm_vm_map(&mem->bar_vma, mem);
1373 mem->bus.offset = node->bar_vma.offset; 1376 reg->bus.offset = mem->bar_vma.offset;
1374 } 1377 }
1375 break; 1378 break;
1376 default: 1379 default:
@@ -1380,15 +1383,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1380} 1383}
1381 1384
1382static void 1385static void
1383nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1386nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1384{ 1387{
1385 struct nvkm_mem *node = mem->mm_node; 1388 struct nvkm_mem *mem = reg->mm_node;
1386 1389
1387 if (!node->bar_vma.node) 1390 if (!mem->bar_vma.node)
1388 return; 1391 return;
1389 1392
1390 nvkm_vm_unmap(&node->bar_vma); 1393 nvkm_vm_unmap(&mem->bar_vma);
1391 nvkm_vm_put(&node->bar_vma); 1394 nvkm_vm_put(&mem->bar_vma);
1392} 1395}
1393 1396
1394static int 1397static int
@@ -1396,7 +1399,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1396{ 1399{
1397 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1400 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1398 struct nouveau_bo *nvbo = nouveau_bo(bo); 1401 struct nouveau_bo *nvbo = nouveau_bo(bo);
1399 struct nvkm_device *device = nvxx_device(&drm->device); 1402 struct nvkm_device *device = nvxx_device(&drm->client.device);
1400 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; 1403 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1401 int i, ret; 1404 int i, ret;
1402 1405
@@ -1404,7 +1407,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1404 * nothing to do here. 1407 * nothing to do here.
1405 */ 1408 */
1406 if (bo->mem.mem_type != TTM_PL_VRAM) { 1409 if (bo->mem.mem_type != TTM_PL_VRAM) {
1407 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || 1410 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1408 !nouveau_bo_tile_layout(nvbo)) 1411 !nouveau_bo_tile_layout(nvbo))
1409 return 0; 1412 return 0;
1410 1413
@@ -1419,7 +1422,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1419 } 1422 }
1420 1423
1421 /* make sure bo is in mappable vram */ 1424 /* make sure bo is in mappable vram */
1422 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1425 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1423 bo->mem.start + bo->mem.num_pages < mappable) 1426 bo->mem.start + bo->mem.num_pages < mappable)
1424 return 0; 1427 return 0;
1425 1428
@@ -1461,7 +1464,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1461 } 1464 }
1462 1465
1463 drm = nouveau_bdev(ttm->bdev); 1466 drm = nouveau_bdev(ttm->bdev);
1464 device = nvxx_device(&drm->device); 1467 device = nvxx_device(&drm->client.device);
1465 dev = drm->dev; 1468 dev = drm->dev;
1466 pdev = device->dev; 1469 pdev = device->dev;
1467 1470
@@ -1518,7 +1521,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1518 return; 1521 return;
1519 1522
1520 drm = nouveau_bdev(ttm->bdev); 1523 drm = nouveau_bdev(ttm->bdev);
1521 device = nvxx_device(&drm->device); 1524 device = nvxx_device(&drm->client.device);
1522 dev = drm->dev; 1525 dev = drm->dev;
1523 pdev = device->dev; 1526 pdev = device->dev;
1524 1527
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index e42360983229..b06a5385d6dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,8 @@ struct nouveau_bo {
26 struct list_head vma_list; 26 struct list_head vma_list;
27 unsigned page_shift; 27 unsigned page_shift;
28 28
29 struct nouveau_cli *cli;
30
29 u32 tile_mode; 31 u32 tile_mode;
30 u32 tile_flags; 32 u32 tile_flags;
31 struct nouveau_drm_tile *tile; 33 struct nouveau_drm_tile *tile;
@@ -69,7 +71,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
69extern struct ttm_bo_driver nouveau_bo_driver; 71extern struct ttm_bo_driver nouveau_bo_driver;
70 72
71void nouveau_bo_move_init(struct nouveau_drm *); 73void nouveau_bo_move_init(struct nouveau_drm *);
72int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, 74int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
73 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 75 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
74 struct reservation_object *robj, 76 struct reservation_object *robj,
75 struct nouveau_bo **); 77 struct nouveau_bo **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index f9b3c811187e..dbc41fa86ee8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -45,10 +45,20 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
45int nouveau_vram_pushbuf; 45int nouveau_vram_pushbuf;
46module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 46module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
47 47
48static int
49nouveau_channel_killed(struct nvif_notify *ntfy)
50{
51 struct nouveau_channel *chan = container_of(ntfy, typeof(*chan), kill);
52 struct nouveau_cli *cli = (void *)chan->user.client;
53 NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
54 atomic_set(&chan->killed, 1);
55 return NVIF_NOTIFY_DROP;
56}
57
48int 58int
49nouveau_channel_idle(struct nouveau_channel *chan) 59nouveau_channel_idle(struct nouveau_channel *chan)
50{ 60{
51 if (likely(chan && chan->fence)) { 61 if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
52 struct nouveau_cli *cli = (void *)chan->user.client; 62 struct nouveau_cli *cli = (void *)chan->user.client;
53 struct nouveau_fence *fence = NULL; 63 struct nouveau_fence *fence = NULL;
54 int ret; 64 int ret;
@@ -78,6 +88,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
78 nvif_object_fini(&chan->nvsw); 88 nvif_object_fini(&chan->nvsw);
79 nvif_object_fini(&chan->gart); 89 nvif_object_fini(&chan->gart);
80 nvif_object_fini(&chan->vram); 90 nvif_object_fini(&chan->vram);
91 nvif_notify_fini(&chan->kill);
81 nvif_object_fini(&chan->user); 92 nvif_object_fini(&chan->user);
82 nvif_object_fini(&chan->push.ctxdma); 93 nvif_object_fini(&chan->push.ctxdma);
83 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 94 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
@@ -107,13 +118,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
107 118
108 chan->device = device; 119 chan->device = device;
109 chan->drm = drm; 120 chan->drm = drm;
121 atomic_set(&chan->killed, 0);
110 122
111 /* allocate memory for dma push buffer */ 123 /* allocate memory for dma push buffer */
112 target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; 124 target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
113 if (nouveau_vram_pushbuf) 125 if (nouveau_vram_pushbuf)
114 target = TTM_PL_FLAG_VRAM; 126 target = TTM_PL_FLAG_VRAM;
115 127
116 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, 128 ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
117 &chan->push.buffer); 129 &chan->push.buffer);
118 if (ret == 0) { 130 if (ret == 0) {
119 ret = nouveau_bo_pin(chan->push.buffer, target, false); 131 ret = nouveau_bo_pin(chan->push.buffer, target, false);
@@ -301,12 +313,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
301{ 313{
302 struct nvif_device *device = chan->device; 314 struct nvif_device *device = chan->device;
303 struct nouveau_cli *cli = (void *)chan->user.client; 315 struct nouveau_cli *cli = (void *)chan->user.client;
316 struct nouveau_drm *drm = chan->drm;
304 struct nvkm_mmu *mmu = nvxx_mmu(device); 317 struct nvkm_mmu *mmu = nvxx_mmu(device);
305 struct nv_dma_v0 args = {}; 318 struct nv_dma_v0 args = {};
306 int ret, i; 319 int ret, i;
307 320
308 nvif_object_map(&chan->user); 321 nvif_object_map(&chan->user);
309 322
323 if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
324 ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
325 true, NV906F_V0_NTFY_KILLED,
326 NULL, 0, 0, &chan->kill);
327 if (ret == 0)
328 ret = nvif_notify_get(&chan->kill);
329 if (ret) {
330 NV_ERROR(drm, "Failed to request channel kill "
331 "notification: %d\n", ret);
332 return ret;
333 }
334 }
335
310 /* allocate dma objects to cover all allowed vram, and gart */ 336 /* allocate dma objects to cover all allowed vram, and gart */
311 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 337 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
312 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 338 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 48062c94f36d..46b947ba1cf4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,7 +1,7 @@
1#ifndef __NOUVEAU_CHAN_H__ 1#ifndef __NOUVEAU_CHAN_H__
2#define __NOUVEAU_CHAN_H__ 2#define __NOUVEAU_CHAN_H__
3
4#include <nvif/object.h> 3#include <nvif/object.h>
4#include <nvif/notify.h>
5struct nvif_device; 5struct nvif_device;
6 6
7struct nouveau_channel { 7struct nouveau_channel {
@@ -38,6 +38,9 @@ struct nouveau_channel {
38 u32 user_put; 38 u32 user_put;
39 39
40 struct nvif_object user; 40 struct nvif_object user;
41
42 struct nvif_notify kill;
43 atomic_t killed;
41}; 44};
42 45
43 46
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 966d20ab4de4..f5add64c093f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -419,7 +419,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
419 struct drm_device *dev = connector->dev; 419 struct drm_device *dev = connector->dev;
420 struct nouveau_connector *nv_connector = nouveau_connector(connector); 420 struct nouveau_connector *nv_connector = nouveau_connector(connector);
421 struct nouveau_drm *drm = nouveau_drm(dev); 421 struct nouveau_drm *drm = nouveau_drm(dev);
422 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 422 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
423 struct nouveau_encoder *nv_encoder; 423 struct nouveau_encoder *nv_encoder;
424 struct drm_encoder *encoder; 424 struct drm_encoder *encoder;
425 int i, panel = -ENODEV; 425 int i, panel = -ENODEV;
@@ -521,7 +521,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
521 return; 521 return;
522 nv_connector->detected_encoder = nv_encoder; 522 nv_connector->detected_encoder = nv_encoder;
523 523
524 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 524 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
525 connector->interlace_allowed = true; 525 connector->interlace_allowed = true;
526 connector->doublescan_allowed = true; 526 connector->doublescan_allowed = true;
527 } else 527 } else
@@ -531,8 +531,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
531 connector->interlace_allowed = false; 531 connector->interlace_allowed = false;
532 } else { 532 } else {
533 connector->doublescan_allowed = true; 533 connector->doublescan_allowed = true;
534 if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN || 534 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
535 (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 535 (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
536 (dev->pdev->device & 0x0ff0) != 0x0100 && 536 (dev->pdev->device & 0x0ff0) != 0x0100 &&
537 (dev->pdev->device & 0x0ff0) != 0x0150)) 537 (dev->pdev->device & 0x0ff0) != 0x0150))
538 /* HW is broken */ 538 /* HW is broken */
@@ -984,17 +984,17 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
984 /* Note: these limits are conservative, some Fermi's 984 /* Note: these limits are conservative, some Fermi's
985 * can do 297 MHz. Unclear how this can be determined. 985 * can do 297 MHz. Unclear how this can be determined.
986 */ 986 */
987 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER) 987 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
988 return 297000; 988 return 297000;
989 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 989 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
990 return 225000; 990 return 225000;
991 } 991 }
992 if (dcb->location != DCB_LOC_ON_CHIP || 992 if (dcb->location != DCB_LOC_ON_CHIP ||
993 drm->device.info.chipset >= 0x46) 993 drm->client.device.info.chipset >= 0x46)
994 return 165000; 994 return 165000;
995 else if (drm->device.info.chipset >= 0x40) 995 else if (drm->client.device.info.chipset >= 0x40)
996 return 155000; 996 return 155000;
997 else if (drm->device.info.chipset >= 0x18) 997 else if (drm->client.device.info.chipset >= 0x18)
998 return 135000; 998 return 135000;
999 else 999 else
1000 return 112000; 1000 return 112000;
@@ -1041,7 +1041,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
1041 clock = clock * (connector->display_info.bpc * 3) / 10; 1041 clock = clock * (connector->display_info.bpc * 3) / 10;
1042 break; 1042 break;
1043 default: 1043 default:
1044 BUG_ON(1); 1044 BUG();
1045 return MODE_BAD; 1045 return MODE_BAD;
1046 } 1046 }
1047 1047
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 411c12cdb249..fd64dfdc7d4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -259,8 +259,9 @@ nouveau_debugfs_init(struct nouveau_drm *drm)
259 if (!drm->debugfs) 259 if (!drm->debugfs)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
262 ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL, 262 ret = nvif_object_init(&drm->client.device.object, 0,
263 NULL, 0, &drm->debugfs->ctrl); 263 NVIF_CLASS_CONTROL, NULL, 0,
264 &drm->debugfs->ctrl);
264 if (ret) 265 if (ret)
265 return ret; 266 return ret;
266 267
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6b570079d185..d614af8196b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -495,7 +495,7 @@ int
495nouveau_display_create(struct drm_device *dev) 495nouveau_display_create(struct drm_device *dev)
496{ 496{
497 struct nouveau_drm *drm = nouveau_drm(dev); 497 struct nouveau_drm *drm = nouveau_drm(dev);
498 struct nvkm_device *device = nvxx_device(&drm->device); 498 struct nvkm_device *device = nvxx_device(&drm->client.device);
499 struct nouveau_display *disp; 499 struct nouveau_display *disp;
500 int ret; 500 int ret;
501 501
@@ -512,15 +512,15 @@ nouveau_display_create(struct drm_device *dev)
512 512
513 dev->mode_config.min_width = 0; 513 dev->mode_config.min_width = 0;
514 dev->mode_config.min_height = 0; 514 dev->mode_config.min_height = 0;
515 if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { 515 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
516 dev->mode_config.max_width = 2048; 516 dev->mode_config.max_width = 2048;
517 dev->mode_config.max_height = 2048; 517 dev->mode_config.max_height = 2048;
518 } else 518 } else
519 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 519 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
520 dev->mode_config.max_width = 4096; 520 dev->mode_config.max_width = 4096;
521 dev->mode_config.max_height = 4096; 521 dev->mode_config.max_height = 4096;
522 } else 522 } else
523 if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { 523 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
524 dev->mode_config.max_width = 8192; 524 dev->mode_config.max_width = 8192;
525 dev->mode_config.max_height = 8192; 525 dev->mode_config.max_height = 8192;
526 } else { 526 } else {
@@ -531,7 +531,7 @@ nouveau_display_create(struct drm_device *dev)
531 dev->mode_config.preferred_depth = 24; 531 dev->mode_config.preferred_depth = 24;
532 dev->mode_config.prefer_shadow = 1; 532 dev->mode_config.prefer_shadow = 1;
533 533
534 if (drm->device.info.chipset < 0x11) 534 if (drm->client.device.info.chipset < 0x11)
535 dev->mode_config.async_page_flip = false; 535 dev->mode_config.async_page_flip = false;
536 else 536 else
537 dev->mode_config.async_page_flip = true; 537 dev->mode_config.async_page_flip = true;
@@ -558,7 +558,7 @@ nouveau_display_create(struct drm_device *dev)
558 int i; 558 int i;
559 559
560 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { 560 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
561 ret = nvif_object_init(&drm->device.object, 0, 561 ret = nvif_object_init(&drm->client.device.object, 0,
562 oclass[i], NULL, 0, &disp->disp); 562 oclass[i], NULL, 0, &disp->disp);
563 } 563 }
564 564
@@ -1057,6 +1057,7 @@ int
1057nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 1057nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1058 struct drm_mode_create_dumb *args) 1058 struct drm_mode_create_dumb *args)
1059{ 1059{
1060 struct nouveau_cli *cli = nouveau_cli(file_priv);
1060 struct nouveau_bo *bo; 1061 struct nouveau_bo *bo;
1061 uint32_t domain; 1062 uint32_t domain;
1062 int ret; 1063 int ret;
@@ -1066,12 +1067,12 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1066 args->size = roundup(args->size, PAGE_SIZE); 1067 args->size = roundup(args->size, PAGE_SIZE);
1067 1068
1068 /* Use VRAM if there is any ; otherwise fallback to system memory */ 1069 /* Use VRAM if there is any ; otherwise fallback to system memory */
1069 if (nouveau_drm(dev)->device.info.ram_size != 0) 1070 if (nouveau_drm(dev)->client.device.info.ram_size != 0)
1070 domain = NOUVEAU_GEM_DOMAIN_VRAM; 1071 domain = NOUVEAU_GEM_DOMAIN_VRAM;
1071 else 1072 else
1072 domain = NOUVEAU_GEM_DOMAIN_GART; 1073 domain = NOUVEAU_GEM_DOMAIN_GART;
1073 1074
1074 ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo); 1075 ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
1075 if (ret) 1076 if (ret)
1076 return ret; 1077 return ret;
1077 1078
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index dd7b52ab505a..95529d1f2b5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,8 @@
37#include <core/pci.h> 37#include <core/pci.h>
38#include <core/tegra.h> 38#include <core/tegra.h>
39 39
40#include <nvif/driver.h>
41
40#include <nvif/class.h> 42#include <nvif/class.h>
41#include <nvif/cl0002.h> 43#include <nvif/cl0002.h>
42#include <nvif/cla06f.h> 44#include <nvif/cla06f.h>
@@ -109,35 +111,53 @@ nouveau_name(struct drm_device *dev)
109 return nouveau_platform_name(dev->platformdev); 111 return nouveau_platform_name(dev->platformdev);
110} 112}
111 113
114static void
115nouveau_cli_fini(struct nouveau_cli *cli)
116{
117 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
118 usif_client_fini(cli);
119 nvif_device_fini(&cli->device);
120 nvif_client_fini(&cli->base);
121}
122
112static int 123static int
113nouveau_cli_create(struct drm_device *dev, const char *sname, 124nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
114 int size, void **pcli) 125 struct nouveau_cli *cli)
115{ 126{
116 struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); 127 u64 device = nouveau_name(drm->dev);
117 int ret; 128 int ret;
118 if (cli) {
119 snprintf(cli->name, sizeof(cli->name), "%s", sname);
120 cli->dev = dev;
121 129
122 ret = nvif_client_init(NULL, cli->name, nouveau_name(dev), 130 snprintf(cli->name, sizeof(cli->name), "%s", sname);
123 nouveau_config, nouveau_debug, 131 cli->dev = drm->dev;
132 mutex_init(&cli->mutex);
133 usif_client_init(cli);
134
135 if (cli == &drm->client) {
136 ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
137 cli->name, device, &cli->base);
138 } else {
139 ret = nvif_client_init(&drm->client.base, cli->name, device,
124 &cli->base); 140 &cli->base);
125 if (ret == 0) {
126 mutex_init(&cli->mutex);
127 usif_client_init(cli);
128 }
129 return ret;
130 } 141 }
131 return -ENOMEM; 142 if (ret) {
132} 143 NV_ERROR(drm, "Client allocation failed: %d\n", ret);
144 goto done;
145 }
133 146
134static void 147 ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
135nouveau_cli_destroy(struct nouveau_cli *cli) 148 &(struct nv_device_v0) {
136{ 149 .device = ~0,
137 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 150 }, sizeof(struct nv_device_v0),
138 nvif_client_fini(&cli->base); 151 &cli->device);
139 usif_client_fini(cli); 152 if (ret) {
140 kfree(cli); 153 NV_ERROR(drm, "Device allocation failed: %d\n", ret);
154 goto done;
155 }
156
157done:
158 if (ret)
159 nouveau_cli_fini(cli);
160 return ret;
141} 161}
142 162
143static void 163static void
@@ -161,7 +181,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
161static void 181static void
162nouveau_accel_init(struct nouveau_drm *drm) 182nouveau_accel_init(struct nouveau_drm *drm)
163{ 183{
164 struct nvif_device *device = &drm->device; 184 struct nvif_device *device = &drm->client.device;
165 struct nvif_sclass *sclass; 185 struct nvif_sclass *sclass;
166 u32 arg0, arg1; 186 u32 arg0, arg1;
167 int ret, i, n; 187 int ret, i, n;
@@ -215,7 +235,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
215 } 235 }
216 236
217 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { 237 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
218 ret = nouveau_channel_new(drm, &drm->device, 238 ret = nouveau_channel_new(drm, &drm->client.device,
219 NVA06F_V0_ENGINE_CE0 | 239 NVA06F_V0_ENGINE_CE0 |
220 NVA06F_V0_ENGINE_CE1, 240 NVA06F_V0_ENGINE_CE1,
221 0, &drm->cechan); 241 0, &drm->cechan);
@@ -228,7 +248,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
228 if (device->info.chipset >= 0xa3 && 248 if (device->info.chipset >= 0xa3 &&
229 device->info.chipset != 0xaa && 249 device->info.chipset != 0xaa &&
230 device->info.chipset != 0xac) { 250 device->info.chipset != 0xac) {
231 ret = nouveau_channel_new(drm, &drm->device, 251 ret = nouveau_channel_new(drm, &drm->client.device,
232 NvDmaFB, NvDmaTT, &drm->cechan); 252 NvDmaFB, NvDmaTT, &drm->cechan);
233 if (ret) 253 if (ret)
234 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 254 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -240,7 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
240 arg1 = NvDmaTT; 260 arg1 = NvDmaTT;
241 } 261 }
242 262
243 ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel); 263 ret = nouveau_channel_new(drm, &drm->client.device,
264 arg0, arg1, &drm->channel);
244 if (ret) { 265 if (ret) {
245 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); 266 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
246 nouveau_accel_fini(drm); 267 nouveau_accel_fini(drm);
@@ -280,8 +301,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
280 } 301 }
281 302
282 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 303 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
283 ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false, 304 ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
284 NULL, &drm->notify); 305 false, NULL, &drm->notify);
285 if (ret) { 306 if (ret) {
286 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); 307 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
287 nouveau_accel_fini(drm); 308 nouveau_accel_fini(drm);
@@ -407,12 +428,17 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
407 struct nouveau_drm *drm; 428 struct nouveau_drm *drm;
408 int ret; 429 int ret;
409 430
410 ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm); 431 if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
432 return -ENOMEM;
433 dev->dev_private = drm;
434 drm->dev = dev;
435
436 ret = nouveau_cli_init(drm, "DRM", &drm->client);
411 if (ret) 437 if (ret)
412 return ret; 438 return ret;
413 439
414 dev->dev_private = drm; 440 dev->irq_enabled = true;
415 drm->dev = dev; 441
416 nvxx_client(&drm->client.base)->debug = 442 nvxx_client(&drm->client.base)->debug =
417 nvkm_dbgopt(nouveau_debug, "DRM"); 443 nvkm_dbgopt(nouveau_debug, "DRM");
418 444
@@ -421,33 +447,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
421 447
422 nouveau_get_hdmi_dev(drm); 448 nouveau_get_hdmi_dev(drm);
423 449
424 ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE,
425 &(struct nv_device_v0) {
426 .device = ~0,
427 }, sizeof(struct nv_device_v0),
428 &drm->device);
429 if (ret)
430 goto fail_device;
431
432 dev->irq_enabled = true;
433
434 /* workaround an odd issue on nvc1 by disabling the device's 450 /* workaround an odd issue on nvc1 by disabling the device's
435 * nosnoop capability. hopefully won't cause issues until a 451 * nosnoop capability. hopefully won't cause issues until a
436 * better fix is found - assuming there is one... 452 * better fix is found - assuming there is one...
437 */ 453 */
438 if (drm->device.info.chipset == 0xc1) 454 if (drm->client.device.info.chipset == 0xc1)
439 nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000); 455 nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
440 456
441 nouveau_vga_init(drm); 457 nouveau_vga_init(drm);
442 458
443 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 459 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
444 if (!nvxx_device(&drm->device)->mmu) { 460 if (!nvxx_device(&drm->client.device)->mmu) {
445 ret = -ENOSYS; 461 ret = -ENOSYS;
446 goto fail_device; 462 goto fail_device;
447 } 463 }
448 464
449 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 465 ret = nvkm_vm_new(nvxx_device(&drm->client.device),
450 0x1000, NULL, &drm->client.vm); 466 0, (1ULL << 40), 0x1000, NULL,
467 &drm->client.vm);
451 if (ret) 468 if (ret)
452 goto fail_device; 469 goto fail_device;
453 470
@@ -497,8 +514,8 @@ fail_bios:
497fail_ttm: 514fail_ttm:
498 nouveau_vga_fini(drm); 515 nouveau_vga_fini(drm);
499fail_device: 516fail_device:
500 nvif_device_fini(&drm->device); 517 nouveau_cli_fini(&drm->client);
501 nouveau_cli_destroy(&drm->client); 518 kfree(drm);
502 return ret; 519 return ret;
503} 520}
504 521
@@ -527,10 +544,10 @@ nouveau_drm_unload(struct drm_device *dev)
527 nouveau_ttm_fini(drm); 544 nouveau_ttm_fini(drm);
528 nouveau_vga_fini(drm); 545 nouveau_vga_fini(drm);
529 546
530 nvif_device_fini(&drm->device);
531 if (drm->hdmi_device) 547 if (drm->hdmi_device)
532 pci_dev_put(drm->hdmi_device); 548 pci_dev_put(drm->hdmi_device);
533 nouveau_cli_destroy(&drm->client); 549 nouveau_cli_fini(&drm->client);
550 kfree(drm);
534} 551}
535 552
536void 553void
@@ -560,7 +577,6 @@ static int
560nouveau_do_suspend(struct drm_device *dev, bool runtime) 577nouveau_do_suspend(struct drm_device *dev, bool runtime)
561{ 578{
562 struct nouveau_drm *drm = nouveau_drm(dev); 579 struct nouveau_drm *drm = nouveau_drm(dev);
563 struct nouveau_cli *cli;
564 int ret; 580 int ret;
565 581
566 nouveau_led_suspend(dev); 582 nouveau_led_suspend(dev);
@@ -590,7 +606,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
590 goto fail_display; 606 goto fail_display;
591 } 607 }
592 608
593 NV_INFO(drm, "suspending client object trees...\n"); 609 NV_INFO(drm, "suspending fence...\n");
594 if (drm->fence && nouveau_fence(drm)->suspend) { 610 if (drm->fence && nouveau_fence(drm)->suspend) {
595 if (!nouveau_fence(drm)->suspend(drm)) { 611 if (!nouveau_fence(drm)->suspend(drm)) {
596 ret = -ENOMEM; 612 ret = -ENOMEM;
@@ -598,13 +614,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
598 } 614 }
599 } 615 }
600 616
601 list_for_each_entry(cli, &drm->clients, head) { 617 NV_INFO(drm, "suspending object tree...\n");
602 ret = nvif_client_suspend(&cli->base);
603 if (ret)
604 goto fail_client;
605 }
606
607 NV_INFO(drm, "suspending kernel object tree...\n");
608 ret = nvif_client_suspend(&drm->client.base); 618 ret = nvif_client_suspend(&drm->client.base);
609 if (ret) 619 if (ret)
610 goto fail_client; 620 goto fail_client;
@@ -612,10 +622,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
612 return 0; 622 return 0;
613 623
614fail_client: 624fail_client:
615 list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
616 nvif_client_resume(&cli->base);
617 }
618
619 if (drm->fence && nouveau_fence(drm)->resume) 625 if (drm->fence && nouveau_fence(drm)->resume)
620 nouveau_fence(drm)->resume(drm); 626 nouveau_fence(drm)->resume(drm);
621 627
@@ -631,19 +637,14 @@ static int
631nouveau_do_resume(struct drm_device *dev, bool runtime) 637nouveau_do_resume(struct drm_device *dev, bool runtime)
632{ 638{
633 struct nouveau_drm *drm = nouveau_drm(dev); 639 struct nouveau_drm *drm = nouveau_drm(dev);
634 struct nouveau_cli *cli;
635 640
636 NV_INFO(drm, "resuming kernel object tree...\n"); 641 NV_INFO(drm, "resuming object tree...\n");
637 nvif_client_resume(&drm->client.base); 642 nvif_client_resume(&drm->client.base);
638 643
639 NV_INFO(drm, "resuming client object trees...\n"); 644 NV_INFO(drm, "resuming fence...\n");
640 if (drm->fence && nouveau_fence(drm)->resume) 645 if (drm->fence && nouveau_fence(drm)->resume)
641 nouveau_fence(drm)->resume(drm); 646 nouveau_fence(drm)->resume(drm);
642 647
643 list_for_each_entry(cli, &drm->clients, head) {
644 nvif_client_resume(&cli->base);
645 }
646
647 nouveau_run_vbios_init(dev); 648 nouveau_run_vbios_init(dev);
648 649
649 if (dev->mode_config.num_crtc) { 650 if (dev->mode_config.num_crtc) {
@@ -758,7 +759,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
758{ 759{
759 struct pci_dev *pdev = to_pci_dev(dev); 760 struct pci_dev *pdev = to_pci_dev(dev);
760 struct drm_device *drm_dev = pci_get_drvdata(pdev); 761 struct drm_device *drm_dev = pci_get_drvdata(pdev);
761 struct nvif_device *device = &nouveau_drm(drm_dev)->device; 762 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
762 int ret; 763 int ret;
763 764
764 if (nouveau_runtime_pm == 0) 765 if (nouveau_runtime_pm == 0)
@@ -841,20 +842,20 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
841 get_task_comm(tmpname, current); 842 get_task_comm(tmpname, current);
842 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 843 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
843 844
844 ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli); 845 if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
846 return ret;
845 847
848 ret = nouveau_cli_init(drm, name, cli);
846 if (ret) 849 if (ret)
847 goto out_suspend; 850 goto done;
848 851
849 cli->base.super = false; 852 cli->base.super = false;
850 853
851 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 854 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
852 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 855 ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
853 0x1000, NULL, &cli->vm); 856 (1ULL << 40), 0x1000, NULL, &cli->vm);
854 if (ret) { 857 if (ret)
855 nouveau_cli_destroy(cli); 858 goto done;
856 goto out_suspend;
857 }
858 859
859 nvxx_client(&cli->base)->vm = cli->vm; 860 nvxx_client(&cli->base)->vm = cli->vm;
860 } 861 }
@@ -865,10 +866,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
865 list_add(&cli->head, &drm->clients); 866 list_add(&cli->head, &drm->clients);
866 mutex_unlock(&drm->client.mutex); 867 mutex_unlock(&drm->client.mutex);
867 868
868out_suspend: 869done:
870 if (ret && cli) {
871 nouveau_cli_fini(cli);
872 kfree(cli);
873 }
874
869 pm_runtime_mark_last_busy(dev->dev); 875 pm_runtime_mark_last_busy(dev->dev);
870 pm_runtime_put_autosuspend(dev->dev); 876 pm_runtime_put_autosuspend(dev->dev);
871
872 return ret; 877 return ret;
873} 878}
874 879
@@ -895,7 +900,8 @@ static void
895nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) 900nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
896{ 901{
897 struct nouveau_cli *cli = nouveau_cli(fpriv); 902 struct nouveau_cli *cli = nouveau_cli(fpriv);
898 nouveau_cli_destroy(cli); 903 nouveau_cli_fini(cli);
904 kfree(cli);
899 pm_runtime_mark_last_busy(dev->dev); 905 pm_runtime_mark_last_busy(dev->dev);
900 pm_runtime_put_autosuspend(dev->dev); 906 pm_runtime_put_autosuspend(dev->dev);
901} 907}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..92c8b20f6229 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -86,14 +86,17 @@ enum nouveau_drm_handle {
86 86
87struct nouveau_cli { 87struct nouveau_cli {
88 struct nvif_client base; 88 struct nvif_client base;
89 struct drm_device *dev;
90 struct mutex mutex;
91
92 struct nvif_device device;
93
89 struct nvkm_vm *vm; /*XXX*/ 94 struct nvkm_vm *vm; /*XXX*/
90 struct list_head head; 95 struct list_head head;
91 struct mutex mutex;
92 void *abi16; 96 void *abi16;
93 struct list_head objects; 97 struct list_head objects;
94 struct list_head notifys; 98 struct list_head notifys;
95 char name[32]; 99 char name[32];
96 struct drm_device *dev;
97}; 100};
98 101
99static inline struct nouveau_cli * 102static inline struct nouveau_cli *
@@ -111,7 +114,6 @@ struct nouveau_drm {
111 struct nouveau_cli client; 114 struct nouveau_cli client;
112 struct drm_device *dev; 115 struct drm_device *dev;
113 116
114 struct nvif_device device;
115 struct list_head clients; 117 struct list_head clients;
116 118
117 struct { 119 struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 971c147a3984..91312953e7eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -60,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{ 60{
61 struct nouveau_fbdev *fbcon = info->par; 61 struct nouveau_fbdev *fbcon = info->par;
62 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 62 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
63 struct nvif_device *device = &drm->device; 63 struct nvif_device *device = &drm->client.device;
64 int ret; 64 int ret;
65 65
66 if (info->state != FBINFO_STATE_RUNNING) 66 if (info->state != FBINFO_STATE_RUNNING)
@@ -92,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
92{ 92{
93 struct nouveau_fbdev *fbcon = info->par; 93 struct nouveau_fbdev *fbcon = info->par;
94 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 94 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
95 struct nvif_device *device = &drm->device; 95 struct nvif_device *device = &drm->client.device;
96 int ret; 96 int ret;
97 97
98 if (info->state != FBINFO_STATE_RUNNING) 98 if (info->state != FBINFO_STATE_RUNNING)
@@ -124,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
124{ 124{
125 struct nouveau_fbdev *fbcon = info->par; 125 struct nouveau_fbdev *fbcon = info->par;
126 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 126 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
127 struct nvif_device *device = &drm->device; 127 struct nvif_device *device = &drm->client.device;
128 int ret; 128 int ret;
129 129
130 if (info->state != FBINFO_STATE_RUNNING) 130 if (info->state != FBINFO_STATE_RUNNING)
@@ -266,10 +266,10 @@ nouveau_fbcon_accel_init(struct drm_device *dev)
266 struct fb_info *info = fbcon->helper.fbdev; 266 struct fb_info *info = fbcon->helper.fbdev;
267 int ret; 267 int ret;
268 268
269 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) 269 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
270 ret = nv04_fbcon_accel_init(info); 270 ret = nv04_fbcon_accel_init(info);
271 else 271 else
272 if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) 272 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
273 ret = nv50_fbcon_accel_init(info); 273 ret = nv50_fbcon_accel_init(info);
274 else 274 else
275 ret = nvc0_fbcon_accel_init(info); 275 ret = nvc0_fbcon_accel_init(info);
@@ -324,7 +324,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
324 container_of(helper, struct nouveau_fbdev, helper); 324 container_of(helper, struct nouveau_fbdev, helper);
325 struct drm_device *dev = fbcon->helper.dev; 325 struct drm_device *dev = fbcon->helper.dev;
326 struct nouveau_drm *drm = nouveau_drm(dev); 326 struct nouveau_drm *drm = nouveau_drm(dev);
327 struct nvif_device *device = &drm->device; 327 struct nvif_device *device = &drm->client.device;
328 struct fb_info *info; 328 struct fb_info *info;
329 struct nouveau_framebuffer *fb; 329 struct nouveau_framebuffer *fb;
330 struct nouveau_channel *chan; 330 struct nouveau_channel *chan;
@@ -341,8 +341,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
341 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 341 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
342 sizes->surface_depth); 342 sizes->surface_depth);
343 343
344 ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height, 344 ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
345 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); 345 mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
346 0, 0x0000, &nvbo);
346 if (ret) { 347 if (ret) {
347 NV_ERROR(drm, "failed to allocate framebuffer\n"); 348 NV_ERROR(drm, "failed to allocate framebuffer\n");
348 goto out; 349 goto out;
@@ -515,10 +516,10 @@ nouveau_fbcon_init(struct drm_device *dev)
515 if (ret) 516 if (ret)
516 goto fini; 517 goto fini;
517 518
518 if (drm->device.info.ram_size <= 32 * 1024 * 1024) 519 if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
519 preferred_bpp = 8; 520 preferred_bpp = 8;
520 else 521 else
521 if (drm->device.info.ram_size <= 64 * 1024 * 1024) 522 if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
522 preferred_bpp = 16; 523 preferred_bpp = 16;
523 else 524 else
524 preferred_bpp = 32; 525 preferred_bpp = 32;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a6126c93f215..f3e551f1aa46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -190,7 +190,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
190 return; 190 return;
191 191
192 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler, 192 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
193 false, G82_CHANNEL_DMA_V0_NTFY_UEVENT, 193 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
194 &(struct nvif_notify_uevent_req) { }, 194 &(struct nvif_notify_uevent_req) { },
195 sizeof(struct nvif_notify_uevent_req), 195 sizeof(struct nvif_notify_uevent_req),
196 sizeof(struct nvif_notify_uevent_rep), 196 sizeof(struct nvif_notify_uevent_rep),
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 201b52b750dd..ca5397beb357 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -175,11 +175,11 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
175} 175}
176 176
177int 177int
178nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, 178nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
179 uint32_t tile_mode, uint32_t tile_flags, 179 uint32_t tile_mode, uint32_t tile_flags,
180 struct nouveau_bo **pnvbo) 180 struct nouveau_bo **pnvbo)
181{ 181{
182 struct nouveau_drm *drm = nouveau_drm(dev); 182 struct nouveau_drm *drm = nouveau_drm(cli->dev);
183 struct nouveau_bo *nvbo; 183 struct nouveau_bo *nvbo;
184 u32 flags = 0; 184 u32 flags = 0;
185 int ret; 185 int ret;
@@ -194,7 +194,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
194 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) 194 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
195 flags |= TTM_PL_FLAG_UNCACHED; 195 flags |= TTM_PL_FLAG_UNCACHED;
196 196
197 ret = nouveau_bo_new(dev, size, align, flags, tile_mode, 197 ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
198 tile_flags, NULL, NULL, pnvbo); 198 tile_flags, NULL, NULL, pnvbo);
199 if (ret) 199 if (ret)
200 return ret; 200 return ret;
@@ -206,12 +206,12 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
206 */ 206 */
207 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 207 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208 NOUVEAU_GEM_DOMAIN_GART; 208 NOUVEAU_GEM_DOMAIN_GART;
209 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 209 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210 nvbo->valid_domains &= domain; 210 nvbo->valid_domains &= domain;
211 211
212 /* Initialize the embedded gem-object. We return a single gem-reference 212 /* Initialize the embedded gem-object. We return a single gem-reference
213 * to the caller, instead of a normal nouveau_bo ttm reference. */ 213 * to the caller, instead of a normal nouveau_bo ttm reference. */
214 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); 214 ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
215 if (ret) { 215 if (ret) {
216 nouveau_bo_ref(NULL, pnvbo); 216 nouveau_bo_ref(NULL, pnvbo);
217 return -ENOMEM; 217 return -ENOMEM;
@@ -257,7 +257,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
257{ 257{
258 struct nouveau_drm *drm = nouveau_drm(dev); 258 struct nouveau_drm *drm = nouveau_drm(dev);
259 struct nouveau_cli *cli = nouveau_cli(file_priv); 259 struct nouveau_cli *cli = nouveau_cli(file_priv);
260 struct nvkm_fb *fb = nvxx_fb(&drm->device); 260 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
261 struct drm_nouveau_gem_new *req = data; 261 struct drm_nouveau_gem_new *req = data;
262 struct nouveau_bo *nvbo = NULL; 262 struct nouveau_bo *nvbo = NULL;
263 int ret = 0; 263 int ret = 0;
@@ -267,7 +267,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
267 return -EINVAL; 267 return -EINVAL;
268 } 268 }
269 269
270 ret = nouveau_gem_new(dev, req->info.size, req->align, 270 ret = nouveau_gem_new(cli, req->info.size, req->align,
271 req->info.domain, req->info.tile_mode, 271 req->info.domain, req->info.tile_mode,
272 req->info.tile_flags, &nvbo); 272 req->info.tile_flags, &nvbo);
273 if (ret) 273 if (ret)
@@ -496,7 +496,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
496 return ret; 496 return ret;
497 } 497 }
498 498
499 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 499 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
500 if (nvbo->bo.offset == b->presumed.offset && 500 if (nvbo->bo.offset == b->presumed.offset &&
501 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 501 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
502 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 502 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -767,7 +767,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
767 push[i].length); 767 push[i].length);
768 } 768 }
769 } else 769 } else
770 if (drm->device.info.chipset >= 0x25) { 770 if (drm->client.device.info.chipset >= 0x25) {
771 ret = RING_SPACE(chan, req->nr_push * 2); 771 ret = RING_SPACE(chan, req->nr_push * 2);
772 if (ret) { 772 if (ret) {
773 NV_PRINTK(err, cli, "cal_space: %d\n", ret); 773 NV_PRINTK(err, cli, "cal_space: %d\n", ret);
@@ -840,7 +840,7 @@ out_next:
840 req->suffix0 = 0x00000000; 840 req->suffix0 = 0x00000000;
841 req->suffix1 = 0x00000000; 841 req->suffix1 = 0x00000000;
842 } else 842 } else
843 if (drm->device.info.chipset >= 0x25) { 843 if (drm->client.device.info.chipset >= 0x25) {
844 req->suffix0 = 0x00020000; 844 req->suffix0 = 0x00020000;
845 req->suffix1 = 0x00000000; 845 req->suffix1 = 0x00000000;
846 } else { 846 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 7e32da2e037a..8fa6ed9ddd3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -16,7 +16,7 @@ nouveau_gem_object(struct drm_gem_object *gem)
16} 16}
17 17
18/* nouveau_gem.c */ 18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align, 19extern int nouveau_gem_new(struct nouveau_cli *, u64 size, int align,
20 uint32_t domain, uint32_t tile_mode, 20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **); 21 uint32_t tile_flags, struct nouveau_bo **);
22extern void nouveau_gem_object_del(struct drm_gem_object *); 22extern void nouveau_gem_object_del(struct drm_gem_object *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 71f764bf4cc6..23b1670c1c2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -43,7 +43,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
43{ 43{
44 struct drm_device *dev = dev_get_drvdata(d); 44 struct drm_device *dev = dev_get_drvdata(d);
45 struct nouveau_drm *drm = nouveau_drm(dev); 45 struct nouveau_drm *drm = nouveau_drm(dev);
46 struct nvkm_therm *therm = nvxx_therm(&drm->device); 46 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
47 int temp = nvkm_therm_temp_get(therm); 47 int temp = nvkm_therm_temp_get(therm);
48 48
49 if (temp < 0) 49 if (temp < 0)
@@ -69,7 +69,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
69{ 69{
70 struct drm_device *dev = dev_get_drvdata(d); 70 struct drm_device *dev = dev_get_drvdata(d);
71 struct nouveau_drm *drm = nouveau_drm(dev); 71 struct nouveau_drm *drm = nouveau_drm(dev);
72 struct nvkm_therm *therm = nvxx_therm(&drm->device); 72 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
73 73
74 return snprintf(buf, PAGE_SIZE, "%d\n", 74 return snprintf(buf, PAGE_SIZE, "%d\n",
75 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000); 75 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -81,7 +81,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
81{ 81{
82 struct drm_device *dev = dev_get_drvdata(d); 82 struct drm_device *dev = dev_get_drvdata(d);
83 struct nouveau_drm *drm = nouveau_drm(dev); 83 struct nouveau_drm *drm = nouveau_drm(dev);
84 struct nvkm_therm *therm = nvxx_therm(&drm->device); 84 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
85 long value; 85 long value;
86 86
87 if (kstrtol(buf, 10, &value) == -EINVAL) 87 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -102,7 +102,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
102{ 102{
103 struct drm_device *dev = dev_get_drvdata(d); 103 struct drm_device *dev = dev_get_drvdata(d);
104 struct nouveau_drm *drm = nouveau_drm(dev); 104 struct nouveau_drm *drm = nouveau_drm(dev);
105 struct nvkm_therm *therm = nvxx_therm(&drm->device); 105 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
106 106
107 return snprintf(buf, PAGE_SIZE, "%d\n", 107 return snprintf(buf, PAGE_SIZE, "%d\n",
108 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); 108 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -114,7 +114,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
114{ 114{
115 struct drm_device *dev = dev_get_drvdata(d); 115 struct drm_device *dev = dev_get_drvdata(d);
116 struct nouveau_drm *drm = nouveau_drm(dev); 116 struct nouveau_drm *drm = nouveau_drm(dev);
117 struct nvkm_therm *therm = nvxx_therm(&drm->device); 117 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
118 long value; 118 long value;
119 119
120 if (kstrtol(buf, 10, &value) == -EINVAL) 120 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -134,7 +134,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
134{ 134{
135 struct drm_device *dev = dev_get_drvdata(d); 135 struct drm_device *dev = dev_get_drvdata(d);
136 struct nouveau_drm *drm = nouveau_drm(dev); 136 struct nouveau_drm *drm = nouveau_drm(dev);
137 struct nvkm_therm *therm = nvxx_therm(&drm->device); 137 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
138 138
139 return snprintf(buf, PAGE_SIZE, "%d\n", 139 return snprintf(buf, PAGE_SIZE, "%d\n",
140 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000); 140 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000);
@@ -145,7 +145,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
145{ 145{
146 struct drm_device *dev = dev_get_drvdata(d); 146 struct drm_device *dev = dev_get_drvdata(d);
147 struct nouveau_drm *drm = nouveau_drm(dev); 147 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvkm_therm *therm = nvxx_therm(&drm->device); 148 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
149 long value; 149 long value;
150 150
151 if (kstrtol(buf, 10, &value) == -EINVAL) 151 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -165,7 +165,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
165{ 165{
166 struct drm_device *dev = dev_get_drvdata(d); 166 struct drm_device *dev = dev_get_drvdata(d);
167 struct nouveau_drm *drm = nouveau_drm(dev); 167 struct nouveau_drm *drm = nouveau_drm(dev);
168 struct nvkm_therm *therm = nvxx_therm(&drm->device); 168 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
169 169
170 return snprintf(buf, PAGE_SIZE, "%d\n", 170 return snprintf(buf, PAGE_SIZE, "%d\n",
171 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); 171 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
@@ -176,7 +176,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
176{ 176{
177 struct drm_device *dev = dev_get_drvdata(d); 177 struct drm_device *dev = dev_get_drvdata(d);
178 struct nouveau_drm *drm = nouveau_drm(dev); 178 struct nouveau_drm *drm = nouveau_drm(dev);
179 struct nvkm_therm *therm = nvxx_therm(&drm->device); 179 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
180 long value; 180 long value;
181 181
182 if (kstrtol(buf, 10, &value) == -EINVAL) 182 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -197,7 +197,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
197{ 197{
198 struct drm_device *dev = dev_get_drvdata(d); 198 struct drm_device *dev = dev_get_drvdata(d);
199 struct nouveau_drm *drm = nouveau_drm(dev); 199 struct nouveau_drm *drm = nouveau_drm(dev);
200 struct nvkm_therm *therm = nvxx_therm(&drm->device); 200 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
201 201
202 return snprintf(buf, PAGE_SIZE, "%d\n", 202 return snprintf(buf, PAGE_SIZE, "%d\n",
203 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000); 203 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000);
@@ -209,7 +209,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
209{ 209{
210 struct drm_device *dev = dev_get_drvdata(d); 210 struct drm_device *dev = dev_get_drvdata(d);
211 struct nouveau_drm *drm = nouveau_drm(dev); 211 struct nouveau_drm *drm = nouveau_drm(dev);
212 struct nvkm_therm *therm = nvxx_therm(&drm->device); 212 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
213 long value; 213 long value;
214 214
215 if (kstrtol(buf, 10, &value) == -EINVAL) 215 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -230,7 +230,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
230{ 230{
231 struct drm_device *dev = dev_get_drvdata(d); 231 struct drm_device *dev = dev_get_drvdata(d);
232 struct nouveau_drm *drm = nouveau_drm(dev); 232 struct nouveau_drm *drm = nouveau_drm(dev);
233 struct nvkm_therm *therm = nvxx_therm(&drm->device); 233 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
234 234
235 return snprintf(buf, PAGE_SIZE, "%d\n", 235 return snprintf(buf, PAGE_SIZE, "%d\n",
236 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); 236 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
@@ -243,7 +243,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
243{ 243{
244 struct drm_device *dev = dev_get_drvdata(d); 244 struct drm_device *dev = dev_get_drvdata(d);
245 struct nouveau_drm *drm = nouveau_drm(dev); 245 struct nouveau_drm *drm = nouveau_drm(dev);
246 struct nvkm_therm *therm = nvxx_therm(&drm->device); 246 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
247 long value; 247 long value;
248 248
249 if (kstrtol(buf, 10, &value) == -EINVAL) 249 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -263,7 +263,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
263{ 263{
264 struct drm_device *dev = dev_get_drvdata(d); 264 struct drm_device *dev = dev_get_drvdata(d);
265 struct nouveau_drm *drm = nouveau_drm(dev); 265 struct nouveau_drm *drm = nouveau_drm(dev);
266 struct nvkm_therm *therm = nvxx_therm(&drm->device); 266 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
267 267
268 return snprintf(buf, PAGE_SIZE, "%d\n", 268 return snprintf(buf, PAGE_SIZE, "%d\n",
269 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000); 269 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000);
@@ -275,7 +275,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
275{ 275{
276 struct drm_device *dev = dev_get_drvdata(d); 276 struct drm_device *dev = dev_get_drvdata(d);
277 struct nouveau_drm *drm = nouveau_drm(dev); 277 struct nouveau_drm *drm = nouveau_drm(dev);
278 struct nvkm_therm *therm = nvxx_therm(&drm->device); 278 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
279 long value; 279 long value;
280 280
281 if (kstrtol(buf, 10, &value) == -EINVAL) 281 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -296,7 +296,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
296{ 296{
297 struct drm_device *dev = dev_get_drvdata(d); 297 struct drm_device *dev = dev_get_drvdata(d);
298 struct nouveau_drm *drm = nouveau_drm(dev); 298 struct nouveau_drm *drm = nouveau_drm(dev);
299 struct nvkm_therm *therm = nvxx_therm(&drm->device); 299 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
300 300
301 return snprintf(buf, PAGE_SIZE, "%d\n", 301 return snprintf(buf, PAGE_SIZE, "%d\n",
302 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); 302 therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
@@ -309,7 +309,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
309{ 309{
310 struct drm_device *dev = dev_get_drvdata(d); 310 struct drm_device *dev = dev_get_drvdata(d);
311 struct nouveau_drm *drm = nouveau_drm(dev); 311 struct nouveau_drm *drm = nouveau_drm(dev);
312 struct nvkm_therm *therm = nvxx_therm(&drm->device); 312 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
313 long value; 313 long value;
314 314
315 if (kstrtol(buf, 10, &value) == -EINVAL) 315 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -349,7 +349,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
349{ 349{
350 struct drm_device *dev = dev_get_drvdata(d); 350 struct drm_device *dev = dev_get_drvdata(d);
351 struct nouveau_drm *drm = nouveau_drm(dev); 351 struct nouveau_drm *drm = nouveau_drm(dev);
352 struct nvkm_therm *therm = nvxx_therm(&drm->device); 352 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
353 353
354 return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm)); 354 return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
355} 355}
@@ -362,7 +362,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
362{ 362{
363 struct drm_device *dev = dev_get_drvdata(d); 363 struct drm_device *dev = dev_get_drvdata(d);
364 struct nouveau_drm *drm = nouveau_drm(dev); 364 struct nouveau_drm *drm = nouveau_drm(dev);
365 struct nvkm_therm *therm = nvxx_therm(&drm->device); 365 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
366 int ret; 366 int ret;
367 367
368 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE); 368 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
@@ -378,7 +378,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
378{ 378{
379 struct drm_device *dev = dev_get_drvdata(d); 379 struct drm_device *dev = dev_get_drvdata(d);
380 struct nouveau_drm *drm = nouveau_drm(dev); 380 struct nouveau_drm *drm = nouveau_drm(dev);
381 struct nvkm_therm *therm = nvxx_therm(&drm->device); 381 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
382 long value; 382 long value;
383 int ret; 383 int ret;
384 384
@@ -401,7 +401,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
401{ 401{
402 struct drm_device *dev = dev_get_drvdata(d); 402 struct drm_device *dev = dev_get_drvdata(d);
403 struct nouveau_drm *drm = nouveau_drm(dev); 403 struct nouveau_drm *drm = nouveau_drm(dev);
404 struct nvkm_therm *therm = nvxx_therm(&drm->device); 404 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
405 int ret; 405 int ret;
406 406
407 ret = therm->fan_get(therm); 407 ret = therm->fan_get(therm);
@@ -417,7 +417,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
417{ 417{
418 struct drm_device *dev = dev_get_drvdata(d); 418 struct drm_device *dev = dev_get_drvdata(d);
419 struct nouveau_drm *drm = nouveau_drm(dev); 419 struct nouveau_drm *drm = nouveau_drm(dev);
420 struct nvkm_therm *therm = nvxx_therm(&drm->device); 420 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
421 int ret = -ENODEV; 421 int ret = -ENODEV;
422 long value; 422 long value;
423 423
@@ -441,7 +441,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
441{ 441{
442 struct drm_device *dev = dev_get_drvdata(d); 442 struct drm_device *dev = dev_get_drvdata(d);
443 struct nouveau_drm *drm = nouveau_drm(dev); 443 struct nouveau_drm *drm = nouveau_drm(dev);
444 struct nvkm_therm *therm = nvxx_therm(&drm->device); 444 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
445 int ret; 445 int ret;
446 446
447 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY); 447 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
@@ -457,7 +457,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
457{ 457{
458 struct drm_device *dev = dev_get_drvdata(d); 458 struct drm_device *dev = dev_get_drvdata(d);
459 struct nouveau_drm *drm = nouveau_drm(dev); 459 struct nouveau_drm *drm = nouveau_drm(dev);
460 struct nvkm_therm *therm = nvxx_therm(&drm->device); 460 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
461 long value; 461 long value;
462 int ret; 462 int ret;
463 463
@@ -481,7 +481,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
481{ 481{
482 struct drm_device *dev = dev_get_drvdata(d); 482 struct drm_device *dev = dev_get_drvdata(d);
483 struct nouveau_drm *drm = nouveau_drm(dev); 483 struct nouveau_drm *drm = nouveau_drm(dev);
484 struct nvkm_therm *therm = nvxx_therm(&drm->device); 484 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
485 int ret; 485 int ret;
486 486
487 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY); 487 ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
@@ -497,7 +497,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
497{ 497{
498 struct drm_device *dev = dev_get_drvdata(d); 498 struct drm_device *dev = dev_get_drvdata(d);
499 struct nouveau_drm *drm = nouveau_drm(dev); 499 struct nouveau_drm *drm = nouveau_drm(dev);
500 struct nvkm_therm *therm = nvxx_therm(&drm->device); 500 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
501 long value; 501 long value;
502 int ret; 502 int ret;
503 503
@@ -521,7 +521,7 @@ nouveau_hwmon_get_in0_input(struct device *d,
521{ 521{
522 struct drm_device *dev = dev_get_drvdata(d); 522 struct drm_device *dev = dev_get_drvdata(d);
523 struct nouveau_drm *drm = nouveau_drm(dev); 523 struct nouveau_drm *drm = nouveau_drm(dev);
524 struct nvkm_volt *volt = nvxx_volt(&drm->device); 524 struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
525 int ret; 525 int ret;
526 526
527 ret = nvkm_volt_get(volt); 527 ret = nvkm_volt_get(volt);
@@ -540,7 +540,7 @@ nouveau_hwmon_get_in0_min(struct device *d,
540{ 540{
541 struct drm_device *dev = dev_get_drvdata(d); 541 struct drm_device *dev = dev_get_drvdata(d);
542 struct nouveau_drm *drm = nouveau_drm(dev); 542 struct nouveau_drm *drm = nouveau_drm(dev);
543 struct nvkm_volt *volt = nvxx_volt(&drm->device); 543 struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
544 544
545 if (!volt || !volt->min_uv) 545 if (!volt || !volt->min_uv)
546 return -ENODEV; 546 return -ENODEV;
@@ -557,7 +557,7 @@ nouveau_hwmon_get_in0_max(struct device *d,
557{ 557{
558 struct drm_device *dev = dev_get_drvdata(d); 558 struct drm_device *dev = dev_get_drvdata(d);
559 struct nouveau_drm *drm = nouveau_drm(dev); 559 struct nouveau_drm *drm = nouveau_drm(dev);
560 struct nvkm_volt *volt = nvxx_volt(&drm->device); 560 struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
561 561
562 if (!volt || !volt->max_uv) 562 if (!volt || !volt->max_uv)
563 return -ENODEV; 563 return -ENODEV;
@@ -584,7 +584,7 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
584{ 584{
585 struct drm_device *dev = dev_get_drvdata(d); 585 struct drm_device *dev = dev_get_drvdata(d);
586 struct nouveau_drm *drm = nouveau_drm(dev); 586 struct nouveau_drm *drm = nouveau_drm(dev);
587 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device); 587 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
588 int result = nvkm_iccsense_read_all(iccsense); 588 int result = nvkm_iccsense_read_all(iccsense);
589 589
590 if (result < 0) 590 if (result < 0)
@@ -596,6 +596,32 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
596static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, 596static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO,
597 nouveau_hwmon_get_power1_input, NULL, 0); 597 nouveau_hwmon_get_power1_input, NULL, 0);
598 598
599static ssize_t
600nouveau_hwmon_get_power1_max(struct device *d, struct device_attribute *a,
601 char *buf)
602{
603 struct drm_device *dev = dev_get_drvdata(d);
604 struct nouveau_drm *drm = nouveau_drm(dev);
605 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
606 return sprintf(buf, "%i\n", iccsense->power_w_max);
607}
608
609static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO,
610 nouveau_hwmon_get_power1_max, NULL, 0);
611
612static ssize_t
613nouveau_hwmon_get_power1_crit(struct device *d, struct device_attribute *a,
614 char *buf)
615{
616 struct drm_device *dev = dev_get_drvdata(d);
617 struct nouveau_drm *drm = nouveau_drm(dev);
618 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
619 return sprintf(buf, "%i\n", iccsense->power_w_crit);
620}
621
622static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO,
623 nouveau_hwmon_get_power1_crit, NULL, 0);
624
599static struct attribute *hwmon_default_attributes[] = { 625static struct attribute *hwmon_default_attributes[] = {
600 &sensor_dev_attr_name.dev_attr.attr, 626 &sensor_dev_attr_name.dev_attr.attr,
601 &sensor_dev_attr_update_rate.dev_attr.attr, 627 &sensor_dev_attr_update_rate.dev_attr.attr,
@@ -639,6 +665,12 @@ static struct attribute *hwmon_power_attributes[] = {
639 NULL 665 NULL
640}; 666};
641 667
668static struct attribute *hwmon_power_caps_attributes[] = {
669 &sensor_dev_attr_power1_max.dev_attr.attr,
670 &sensor_dev_attr_power1_crit.dev_attr.attr,
671 NULL
672};
673
642static const struct attribute_group hwmon_default_attrgroup = { 674static const struct attribute_group hwmon_default_attrgroup = {
643 .attrs = hwmon_default_attributes, 675 .attrs = hwmon_default_attributes,
644}; 676};
@@ -657,6 +689,9 @@ static const struct attribute_group hwmon_in0_attrgroup = {
657static const struct attribute_group hwmon_power_attrgroup = { 689static const struct attribute_group hwmon_power_attrgroup = {
658 .attrs = hwmon_power_attributes, 690 .attrs = hwmon_power_attributes,
659}; 691};
692static const struct attribute_group hwmon_power_caps_attrgroup = {
693 .attrs = hwmon_power_caps_attributes,
694};
660#endif 695#endif
661 696
662int 697int
@@ -664,9 +699,9 @@ nouveau_hwmon_init(struct drm_device *dev)
664{ 699{
665#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 700#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
666 struct nouveau_drm *drm = nouveau_drm(dev); 701 struct nouveau_drm *drm = nouveau_drm(dev);
667 struct nvkm_therm *therm = nvxx_therm(&drm->device); 702 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
668 struct nvkm_volt *volt = nvxx_volt(&drm->device); 703 struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
669 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device); 704 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
670 struct nouveau_hwmon *hwmon; 705 struct nouveau_hwmon *hwmon;
671 struct device *hwmon_dev; 706 struct device *hwmon_dev;
672 int ret = 0; 707 int ret = 0;
@@ -728,8 +763,16 @@ nouveau_hwmon_init(struct drm_device *dev)
728 if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) { 763 if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) {
729 ret = sysfs_create_group(&hwmon_dev->kobj, 764 ret = sysfs_create_group(&hwmon_dev->kobj,
730 &hwmon_power_attrgroup); 765 &hwmon_power_attrgroup);
766
731 if (ret) 767 if (ret)
732 goto error; 768 goto error;
769
770 if (iccsense->power_w_max && iccsense->power_w_crit) {
771 ret = sysfs_create_group(&hwmon_dev->kobj,
772 &hwmon_power_caps_attrgroup);
773 if (ret)
774 goto error;
775 }
733 } 776 }
734 777
735 hwmon->hwmon = hwmon_dev; 778 hwmon->hwmon = hwmon_dev;
@@ -759,6 +802,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
759 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup); 802 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
760 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup); 803 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
761 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup); 804 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup);
805 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_caps_attrgroup);
762 806
763 hwmon_device_unregister(hwmon->hwmon); 807 hwmon_device_unregister(hwmon->hwmon);
764 } 808 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
index 3e2f1b6cd4df..2c5e0628da12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.c
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -38,7 +38,7 @@ nouveau_led_get_brightness(struct led_classdev *led)
38{ 38{
39 struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; 39 struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
40 struct nouveau_drm *drm = nouveau_drm(drm_dev); 40 struct nouveau_drm *drm = nouveau_drm(drm_dev);
41 struct nvif_object *device = &drm->device.object; 41 struct nvif_object *device = &drm->client.device.object;
42 u32 div, duty; 42 u32 div, duty;
43 43
44 div = nvif_rd32(device, 0x61c880) & 0x00ffffff; 44 div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
@@ -55,7 +55,7 @@ nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
55{ 55{
56 struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; 56 struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
57 struct nouveau_drm *drm = nouveau_drm(drm_dev); 57 struct nouveau_drm *drm = nouveau_drm(drm_dev);
58 struct nvif_object *device = &drm->device.object; 58 struct nvif_object *device = &drm->client.device.object;
59 59
60 u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */ 60 u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
61 u32 freq = 100; /* this is what nvidia uses and it should be good-enough */ 61 u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
@@ -78,7 +78,7 @@ int
78nouveau_led_init(struct drm_device *dev) 78nouveau_led_init(struct drm_device *dev)
79{ 79{
80 struct nouveau_drm *drm = nouveau_drm(dev); 80 struct nouveau_drm *drm = nouveau_drm(dev);
81 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 81 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
82 struct dcb_gpio_func logo_led; 82 struct dcb_gpio_func logo_led;
83 int ret; 83 int ret;
84 84
@@ -102,6 +102,7 @@ nouveau_led_init(struct drm_device *dev)
102 ret = led_classdev_register(dev->dev, &drm->led->led); 102 ret = led_classdev_register(dev->dev, &drm->led->led);
103 if (ret) { 103 if (ret) {
104 kfree(drm->led); 104 kfree(drm->led);
105 drm->led = NULL;
105 return ret; 106 return ret;
106 } 107 }
107 108
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 15f0925ea13b..b3f29b1ce9ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -60,20 +60,15 @@ nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
60static int 60static int
61nvkm_client_resume(void *priv) 61nvkm_client_resume(void *priv)
62{ 62{
63 return nvkm_client_init(priv); 63 struct nvkm_client *client = priv;
64 return nvkm_object_init(&client->object);
64} 65}
65 66
66static int 67static int
67nvkm_client_suspend(void *priv) 68nvkm_client_suspend(void *priv)
68{ 69{
69 return nvkm_client_fini(priv, true);
70}
71
72static void
73nvkm_client_driver_fini(void *priv)
74{
75 struct nvkm_client *client = priv; 70 struct nvkm_client *client = priv;
76 nvkm_client_del(&client); 71 return nvkm_object_fini(&client->object, true);
77} 72}
78 73
79static int 74static int
@@ -108,23 +103,14 @@ static int
108nvkm_client_driver_init(const char *name, u64 device, const char *cfg, 103nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
109 const char *dbg, void **ppriv) 104 const char *dbg, void **ppriv)
110{ 105{
111 struct nvkm_client *client; 106 return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy,
112 int ret; 107 (struct nvkm_client **)ppriv);
113
114 ret = nvkm_client_new(name, device, cfg, dbg, &client);
115 *ppriv = client;
116 if (ret)
117 return ret;
118
119 client->ntfy = nvkm_client_ntfy;
120 return 0;
121} 108}
122 109
123const struct nvif_driver 110const struct nvif_driver
124nvif_driver_nvkm = { 111nvif_driver_nvkm = {
125 .name = "nvkm", 112 .name = "nvkm",
126 .init = nvkm_client_driver_init, 113 .init = nvkm_client_driver_init,
127 .fini = nvkm_client_driver_fini,
128 .suspend = nvkm_client_suspend, 114 .suspend = nvkm_client_suspend,
129 .resume = nvkm_client_resume, 115 .resume = nvkm_client_resume,
130 .ioctl = nvkm_client_ioctl, 116 .ioctl = nvkm_client_ioctl,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index a0a9704cfe2b..1fefc93af1d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
60 struct dma_buf_attachment *attach, 60 struct dma_buf_attachment *attach,
61 struct sg_table *sg) 61 struct sg_table *sg)
62{ 62{
63 struct nouveau_drm *drm = nouveau_drm(dev);
63 struct nouveau_bo *nvbo; 64 struct nouveau_bo *nvbo;
64 struct reservation_object *robj = attach->dmabuf->resv; 65 struct reservation_object *robj = attach->dmabuf->resv;
65 u32 flags = 0; 66 u32 flags = 0;
@@ -68,7 +69,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
68 flags = TTM_PL_FLAG_TT; 69 flags = TTM_PL_FLAG_TT;
69 70
70 ww_mutex_lock(&robj->lock, NULL); 71 ww_mutex_lock(&robj->lock, NULL);
71 ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0, 72 ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
72 sg, robj, &nvbo); 73 sg, robj, &nvbo);
73 ww_mutex_unlock(&robj->lock); 74 ww_mutex_unlock(&robj->lock);
74 if (ret) 75 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index db35ab5883ac..b7ab268f7d6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
24} 24}
25 25
26static int 26static int
27nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 27nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
28{ 28{
29 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 29 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
30 struct nvkm_mem *node = mem->mm_node; 30 struct nvkm_mem *node = reg->mm_node;
31 31
32 if (ttm->sg) { 32 if (ttm->sg) {
33 node->sg = ttm->sg; 33 node->sg = ttm->sg;
@@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
36 node->sg = NULL; 36 node->sg = NULL;
37 node->pages = nvbe->ttm.dma_address; 37 node->pages = nvbe->ttm.dma_address;
38 } 38 }
39 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 39 node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
40 40
41 nvkm_vm_map(&node->vma[0], node); 41 nvkm_vm_map(&node->vma[0], node);
42 nvbe->node = node; 42 nvbe->node = node;
@@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
58}; 58};
59 59
60static int 60static int
61nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 61nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
62{ 62{
63 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 63 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
64 struct nvkm_mem *node = mem->mm_node; 64 struct nvkm_mem *node = reg->mm_node;
65 65
66 /* noop: bound in move_notify() */ 66 /* noop: bound in move_notify() */
67 if (ttm->sg) { 67 if (ttm->sg) {
@@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
71 node->sg = NULL; 71 node->sg = NULL;
72 node->pages = nvbe->ttm.dma_address; 72 node->pages = nvbe->ttm.dma_address;
73 } 73 }
74 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 74 node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
75 return 0; 75 return 0;
76} 76}
77 77
@@ -100,7 +100,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
100 if (!nvbe) 100 if (!nvbe)
101 return NULL; 101 return NULL;
102 102
103 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) 103 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
104 nvbe->ttm.ttm.func = &nv04_sgdma_backend; 104 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
105 else 105 else
106 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 106 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index ec4668a41e01..13e5cc5f07fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -36,7 +36,7 @@ static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{ 37{
38 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 38 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39 struct nvkm_fb *fb = nvxx_fb(&drm->device); 39 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
40 man->priv = fb; 40 man->priv = fb;
41 return 0; 41 return 0;
42} 42}
@@ -64,45 +64,45 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
64 64
65static void 65static void
66nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 66nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
67 struct ttm_mem_reg *mem) 67 struct ttm_mem_reg *reg)
68{ 68{
69 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 69 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
70 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 70 struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
71 nvkm_mem_node_cleanup(mem->mm_node); 71 nvkm_mem_node_cleanup(reg->mm_node);
72 ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node); 72 ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
73} 73}
74 74
75static int 75static int
76nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 76nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
77 struct ttm_buffer_object *bo, 77 struct ttm_buffer_object *bo,
78 const struct ttm_place *place, 78 const struct ttm_place *place,
79 struct ttm_mem_reg *mem) 79 struct ttm_mem_reg *reg)
80{ 80{
81 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 81 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
82 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 82 struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
83 struct nouveau_bo *nvbo = nouveau_bo(bo); 83 struct nouveau_bo *nvbo = nouveau_bo(bo);
84 struct nvkm_mem *node; 84 struct nvkm_mem *node;
85 u32 size_nc = 0; 85 u32 size_nc = 0;
86 int ret; 86 int ret;
87 87
88 if (drm->device.info.ram_size == 0) 88 if (drm->client.device.info.ram_size == 0)
89 return -ENOMEM; 89 return -ENOMEM;
90 90
91 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 91 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
92 size_nc = 1 << nvbo->page_shift; 92 size_nc = 1 << nvbo->page_shift;
93 93
94 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT, 94 ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
95 mem->page_alignment << PAGE_SHIFT, size_nc, 95 reg->page_alignment << PAGE_SHIFT, size_nc,
96 (nvbo->tile_flags >> 8) & 0x3ff, &node); 96 (nvbo->tile_flags >> 8) & 0x3ff, &node);
97 if (ret) { 97 if (ret) {
98 mem->mm_node = NULL; 98 reg->mm_node = NULL;
99 return (ret == -ENOSPC) ? 0 : ret; 99 return (ret == -ENOSPC) ? 0 : ret;
100 } 100 }
101 101
102 node->page_shift = nvbo->page_shift; 102 node->page_shift = nvbo->page_shift;
103 103
104 mem->mm_node = node; 104 reg->mm_node = node;
105 mem->start = node->offset >> PAGE_SHIFT; 105 reg->start = node->offset >> PAGE_SHIFT;
106 return 0; 106 return 0;
107} 107}
108 108
@@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
127 127
128static void 128static void
129nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 129nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
130 struct ttm_mem_reg *mem) 130 struct ttm_mem_reg *reg)
131{ 131{
132 nvkm_mem_node_cleanup(mem->mm_node); 132 nvkm_mem_node_cleanup(reg->mm_node);
133 kfree(mem->mm_node); 133 kfree(reg->mm_node);
134 mem->mm_node = NULL; 134 reg->mm_node = NULL;
135} 135}
136 136
137static int 137static int
138nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 138nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
139 struct ttm_buffer_object *bo, 139 struct ttm_buffer_object *bo,
140 const struct ttm_place *place, 140 const struct ttm_place *place,
141 struct ttm_mem_reg *mem) 141 struct ttm_mem_reg *reg)
142{ 142{
143 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 143 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
144 struct nouveau_bo *nvbo = nouveau_bo(bo); 144 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -150,7 +150,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
150 150
151 node->page_shift = 12; 151 node->page_shift = 12;
152 152
153 switch (drm->device.info.family) { 153 switch (drm->client.device.info.family) {
154 case NV_DEVICE_INFO_V0_TNT: 154 case NV_DEVICE_INFO_V0_TNT:
155 case NV_DEVICE_INFO_V0_CELSIUS: 155 case NV_DEVICE_INFO_V0_CELSIUS:
156 case NV_DEVICE_INFO_V0_KELVIN: 156 case NV_DEVICE_INFO_V0_KELVIN:
@@ -158,7 +158,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
158 case NV_DEVICE_INFO_V0_CURIE: 158 case NV_DEVICE_INFO_V0_CURIE:
159 break; 159 break;
160 case NV_DEVICE_INFO_V0_TESLA: 160 case NV_DEVICE_INFO_V0_TESLA:
161 if (drm->device.info.chipset != 0x50) 161 if (drm->client.device.info.chipset != 0x50)
162 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 162 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
163 break; 163 break;
164 case NV_DEVICE_INFO_V0_FERMI: 164 case NV_DEVICE_INFO_V0_FERMI:
@@ -169,12 +169,12 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
169 break; 169 break;
170 default: 170 default:
171 NV_WARN(drm, "%s: unhandled family type %x\n", __func__, 171 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
172 drm->device.info.family); 172 drm->client.device.info.family);
173 break; 173 break;
174 } 174 }
175 175
176 mem->mm_node = node; 176 reg->mm_node = node;
177 mem->start = 0; 177 reg->start = 0;
178 return 0; 178 return 0;
179} 179}
180 180
@@ -197,7 +197,7 @@ static int
197nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 197nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
198{ 198{
199 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 199 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
200 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 200 struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
201 struct nv04_mmu *priv = (void *)mmu; 201 struct nv04_mmu *priv = (void *)mmu;
202 struct nvkm_vm *vm = NULL; 202 struct nvkm_vm *vm = NULL;
203 nvkm_vm_ref(priv->vm, &vm, NULL); 203 nvkm_vm_ref(priv->vm, &vm, NULL);
@@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
215} 215}
216 216
217static void 217static void
218nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) 218nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
219{ 219{
220 struct nvkm_mem *node = mem->mm_node; 220 struct nvkm_mem *node = reg->mm_node;
221 if (node->vma[0].node) 221 if (node->vma[0].node)
222 nvkm_vm_put(&node->vma[0]); 222 nvkm_vm_put(&node->vma[0]);
223 kfree(mem->mm_node); 223 kfree(reg->mm_node);
224 mem->mm_node = NULL; 224 reg->mm_node = NULL;
225} 225}
226 226
227static int 227static int
228nv04_gart_manager_new(struct ttm_mem_type_manager *man, 228nv04_gart_manager_new(struct ttm_mem_type_manager *man,
229 struct ttm_buffer_object *bo, 229 struct ttm_buffer_object *bo,
230 const struct ttm_place *place, 230 const struct ttm_place *place,
231 struct ttm_mem_reg *mem) 231 struct ttm_mem_reg *reg)
232{ 232{
233 struct nvkm_mem *node; 233 struct nvkm_mem *node;
234 int ret; 234 int ret;
@@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
239 239
240 node->page_shift = 12; 240 node->page_shift = 12;
241 241
242 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, 242 ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
243 NV_MEM_ACCESS_RW, &node->vma[0]); 243 NV_MEM_ACCESS_RW, &node->vma[0]);
244 if (ret) { 244 if (ret) {
245 kfree(node); 245 kfree(node);
246 return ret; 246 return ret;
247 } 247 }
248 248
249 mem->mm_node = node; 249 reg->mm_node = node;
250 mem->start = node->vma[0].offset >> PAGE_SHIFT; 250 reg->start = node->vma[0].offset >> PAGE_SHIFT;
251 return 0; 251 return 0;
252} 252}
253 253
@@ -339,7 +339,7 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
339int 339int
340nouveau_ttm_init(struct nouveau_drm *drm) 340nouveau_ttm_init(struct nouveau_drm *drm)
341{ 341{
342 struct nvkm_device *device = nvxx_device(&drm->device); 342 struct nvkm_device *device = nvxx_device(&drm->client.device);
343 struct nvkm_pci *pci = device->pci; 343 struct nvkm_pci *pci = device->pci;
344 struct drm_device *dev = drm->dev; 344 struct drm_device *dev = drm->dev;
345 u8 bits; 345 u8 bits;
@@ -352,8 +352,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
352 drm->agp.cma = pci->agp.cma; 352 drm->agp.cma = pci->agp.cma;
353 } 353 }
354 354
355 bits = nvxx_mmu(&drm->device)->dma_bits; 355 bits = nvxx_mmu(&drm->client.device)->dma_bits;
356 if (nvxx_device(&drm->device)->func->pci) { 356 if (nvxx_device(&drm->client.device)->func->pci) {
357 if (drm->agp.bridge) 357 if (drm->agp.bridge)
358 bits = 32; 358 bits = 32;
359 } else if (device->func->tegra) { 359 } else if (device->func->tegra) {
@@ -396,7 +396,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
396 } 396 }
397 397
398 /* VRAM init */ 398 /* VRAM init */
399 drm->gem.vram_available = drm->device.info.ram_user; 399 drm->gem.vram_available = drm->client.device.info.ram_user;
400 400
401 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), 401 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
402 device->func->resource_size(device, 1)); 402 device->func->resource_size(device, 1));
@@ -413,7 +413,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
413 413
414 /* GART init */ 414 /* GART init */
415 if (!drm->agp.bridge) { 415 if (!drm->agp.bridge) {
416 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 416 drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
417 } else { 417 } else {
418 drm->gem.gart_available = drm->agp.size; 418 drm->gem.gart_available = drm->agp.size;
419 } 419 }
@@ -433,7 +433,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
433void 433void
434nouveau_ttm_fini(struct nouveau_drm *drm) 434nouveau_ttm_fini(struct nouveau_drm *drm)
435{ 435{
436 struct nvkm_device *device = nvxx_device(&drm->device); 436 struct nvkm_device *device = nvxx_device(&drm->client.device);
437 437
438 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 438 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
439 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 439 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..58508b580f08 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -103,7 +103,7 @@ usif_notify(const void *header, u32 length, const void *data, u32 size)
103 } 103 }
104 break; 104 break;
105 default: 105 default:
106 BUG_ON(1); 106 BUG();
107 break; 107 break;
108 } 108 }
109 109
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c6a180a0c284..eef22c6b9665 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -13,13 +13,13 @@ static unsigned int
13nouveau_vga_set_decode(void *priv, bool state) 13nouveau_vga_set_decode(void *priv, bool state)
14{ 14{
15 struct nouveau_drm *drm = nouveau_drm(priv); 15 struct nouveau_drm *drm = nouveau_drm(priv);
16 struct nvif_object *device = &drm->device.object; 16 struct nvif_object *device = &drm->client.device.object;
17 17
18 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE && 18 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
19 drm->device.info.chipset >= 0x4c) 19 drm->client.device.info.chipset >= 0x4c)
20 nvif_wr32(device, 0x088060, state); 20 nvif_wr32(device, 0x088060, state);
21 else 21 else
22 if (drm->device.info.chipset >= 0x40) 22 if (drm->client.device.info.chipset >= 0x40)
23 nvif_wr32(device, 0x088054, state); 23 nvif_wr32(device, 0x088054, state);
24 else 24 else
25 nvif_wr32(device, 0x001854, state); 25 nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 6a2b187e3c3b..01731dbeb3d8 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -136,7 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
136 struct drm_device *dev = nfbdev->helper.dev; 136 struct drm_device *dev = nfbdev->helper.dev;
137 struct nouveau_drm *drm = nouveau_drm(dev); 137 struct nouveau_drm *drm = nouveau_drm(dev);
138 struct nouveau_channel *chan = drm->channel; 138 struct nouveau_channel *chan = drm->channel;
139 struct nvif_device *device = &drm->device; 139 struct nvif_device *device = &drm->client.device;
140 int surface_fmt, pattern_fmt, rect_fmt; 140 int surface_fmt, pattern_fmt, rect_fmt;
141 int ret; 141 int ret;
142 142
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 79bc01111351..6477b7069e14 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
76{ 76{
77 struct nv10_fence_priv *priv = chan->drm->fence; 77 struct nv10_fence_priv *priv = chan->drm->fence;
78 struct nv10_fence_chan *fctx; 78 struct nv10_fence_chan *fctx;
79 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 79 struct ttm_mem_reg *reg = &priv->bo->bo.mem;
80 u32 start = mem->start * PAGE_SIZE; 80 u32 start = reg->start * PAGE_SIZE;
81 u32 limit = start + mem->size - 1; 81 u32 limit = start + reg->size - 1;
82 int ret = 0; 82 int ret = 0;
83 83
84 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 84 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm)
129 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); 129 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
130 spin_lock_init(&priv->lock); 130 spin_lock_init(&priv->lock);
131 131
132 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 132 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
133 0, 0x0000, NULL, NULL, &priv->bo); 133 0, 0x0000, NULL, NULL, &priv->bo);
134 if (!ret) { 134 if (!ret) {
135 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); 135 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 452da483ca01..de5e322b4e14 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -447,18 +447,18 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
447 args.base.target = NV_DMA_V0_TARGET_VRAM; 447 args.base.target = NV_DMA_V0_TARGET_VRAM;
448 args.base.access = NV_DMA_V0_ACCESS_RDWR; 448 args.base.access = NV_DMA_V0_ACCESS_RDWR;
449 args.base.start = 0; 449 args.base.start = 0;
450 args.base.limit = drm->device.info.ram_user - 1; 450 args.base.limit = drm->client.device.info.ram_user - 1;
451 451
452 if (drm->device.info.chipset < 0x80) { 452 if (drm->client.device.info.chipset < 0x80) {
453 args.nv50.part = NV50_DMA_V0_PART_256; 453 args.nv50.part = NV50_DMA_V0_PART_256;
454 argc += sizeof(args.nv50); 454 argc += sizeof(args.nv50);
455 } else 455 } else
456 if (drm->device.info.chipset < 0xc0) { 456 if (drm->client.device.info.chipset < 0xc0) {
457 args.nv50.part = NV50_DMA_V0_PART_256; 457 args.nv50.part = NV50_DMA_V0_PART_256;
458 args.nv50.kind = kind; 458 args.nv50.kind = kind;
459 argc += sizeof(args.nv50); 459 argc += sizeof(args.nv50);
460 } else 460 } else
461 if (drm->device.info.chipset < 0xd0) { 461 if (drm->client.device.info.chipset < 0xd0) {
462 args.gf100.kind = kind; 462 args.gf100.kind = kind;
463 argc += sizeof(args.gf100); 463 argc += sizeof(args.gf100);
464 } else { 464 } else {
@@ -848,7 +848,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
848 asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; 848 asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
849 if (asyw->image.kind) { 849 if (asyw->image.kind) {
850 asyw->image.layout = 0; 850 asyw->image.layout = 0;
851 if (drm->device.info.chipset >= 0xc0) 851 if (drm->client.device.info.chipset >= 0xc0)
852 asyw->image.block = fb->nvbo->tile_mode >> 4; 852 asyw->image.block = fb->nvbo->tile_mode >> 4;
853 else 853 else
854 asyw->image.block = fb->nvbo->tile_mode; 854 asyw->image.block = fb->nvbo->tile_mode;
@@ -1397,7 +1397,7 @@ nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1397{ 1397{
1398 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); 1398 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1399 struct nv50_disp *disp = nv50_disp(wndw->plane.dev); 1399 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1400 if (nvif_msec(&drm->device, 2000ULL, 1400 if (nvif_msec(&drm->client.device, 2000ULL,
1401 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4); 1401 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1402 if ((data & 0xc0000000) == 0x40000000) 1402 if ((data & 0xc0000000) == 0x40000000)
1403 break; 1403 break;
@@ -1522,7 +1522,7 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1522 return ret; 1522 return ret;
1523 } 1523 }
1524 1524
1525 ret = nv50_base_create(&drm->device, disp->disp, base->id, 1525 ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
1526 disp->sync->bo.offset, &base->chan); 1526 disp->sync->bo.offset, &base->chan);
1527 if (ret) 1527 if (ret)
1528 return ret; 1528 return ret;
@@ -2394,7 +2394,7 @@ static int
2394nv50_head_create(struct drm_device *dev, int index) 2394nv50_head_create(struct drm_device *dev, int index)
2395{ 2395{
2396 struct nouveau_drm *drm = nouveau_drm(dev); 2396 struct nouveau_drm *drm = nouveau_drm(dev);
2397 struct nvif_device *device = &drm->device; 2397 struct nvif_device *device = &drm->client.device;
2398 struct nv50_disp *disp = nv50_disp(dev); 2398 struct nv50_disp *disp = nv50_disp(dev);
2399 struct nv50_head *head; 2399 struct nv50_head *head;
2400 struct nv50_base *base; 2400 struct nv50_base *base;
@@ -2428,7 +2428,7 @@ nv50_head_create(struct drm_device *dev, int index)
2428 drm_crtc_helper_add(crtc, &nv50_head_help); 2428 drm_crtc_helper_add(crtc, &nv50_head_help);
2429 drm_mode_crtc_set_gamma_size(crtc, 256); 2429 drm_mode_crtc_set_gamma_size(crtc, 256);
2430 2430
2431 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, 2431 ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
2432 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); 2432 0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
2433 if (!ret) { 2433 if (!ret) {
2434 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); 2434 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
@@ -2667,7 +2667,7 @@ static int
2667nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) 2667nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2668{ 2668{
2669 struct nouveau_drm *drm = nouveau_drm(connector->dev); 2669 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2670 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 2670 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2671 struct nvkm_i2c_bus *bus; 2671 struct nvkm_i2c_bus *bus;
2672 struct nouveau_encoder *nv_encoder; 2672 struct nouveau_encoder *nv_encoder;
2673 struct drm_encoder *encoder; 2673 struct drm_encoder *encoder;
@@ -3623,7 +3623,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
3623 nv50_audio_enable(encoder, mode); 3623 nv50_audio_enable(encoder, mode);
3624 break; 3624 break;
3625 default: 3625 default:
3626 BUG_ON(1); 3626 BUG();
3627 break; 3627 break;
3628 } 3628 }
3629 3629
@@ -3657,7 +3657,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3657{ 3657{
3658 struct nouveau_connector *nv_connector = nouveau_connector(connector); 3658 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3659 struct nouveau_drm *drm = nouveau_drm(connector->dev); 3659 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3660 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 3660 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3661 struct nouveau_encoder *nv_encoder; 3661 struct nouveau_encoder *nv_encoder;
3662 struct drm_encoder *encoder; 3662 struct drm_encoder *encoder;
3663 int type, ret; 3663 int type, ret;
@@ -3796,7 +3796,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
3796 proto = 0x0; 3796 proto = 0x0;
3797 break; 3797 break;
3798 default: 3798 default:
3799 BUG_ON(1); 3799 BUG();
3800 break; 3800 break;
3801 } 3801 }
3802 3802
@@ -3842,7 +3842,7 @@ static int
3842nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) 3842nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3843{ 3843{
3844 struct nouveau_drm *drm = nouveau_drm(connector->dev); 3844 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3845 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 3845 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3846 struct nvkm_i2c_bus *bus = NULL; 3846 struct nvkm_i2c_bus *bus = NULL;
3847 struct nvkm_i2c_aux *aux = NULL; 3847 struct nvkm_i2c_aux *aux = NULL;
3848 struct i2c_adapter *ddc; 3848 struct i2c_adapter *ddc;
@@ -3915,7 +3915,7 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3915 evo_data(push, 0x00000000); 3915 evo_data(push, 0x00000000);
3916 nouveau_bo_wr32(disp->sync, 0, 0x00000000); 3916 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3917 evo_kick(push, core); 3917 evo_kick(push, core);
3918 if (nvif_msec(&drm->device, 2000ULL, 3918 if (nvif_msec(&drm->client.device, 2000ULL,
3919 if (nouveau_bo_rd32(disp->sync, 0)) 3919 if (nouveau_bo_rd32(disp->sync, 0))
3920 break; 3920 break;
3921 usleep_range(1, 2); 3921 usleep_range(1, 2);
@@ -4427,7 +4427,7 @@ module_param_named(atomic, nouveau_atomic, int, 0400);
4427int 4427int
4428nv50_display_create(struct drm_device *dev) 4428nv50_display_create(struct drm_device *dev)
4429{ 4429{
4430 struct nvif_device *device = &nouveau_drm(dev)->device; 4430 struct nvif_device *device = &nouveau_drm(dev)->client.device;
4431 struct nouveau_drm *drm = nouveau_drm(dev); 4431 struct nouveau_drm *drm = nouveau_drm(dev);
4432 struct dcb_table *dcb = &drm->vbios.dcb; 4432 struct dcb_table *dcb = &drm->vbios.dcb;
4433 struct drm_connector *connector, *tmp; 4433 struct drm_connector *connector, *tmp;
@@ -4451,7 +4451,7 @@ nv50_display_create(struct drm_device *dev)
4451 dev->driver->driver_features |= DRIVER_ATOMIC; 4451 dev->driver->driver_features |= DRIVER_ATOMIC;
4452 4452
4453 /* small shared memory area we use for notifiers and semaphores */ 4453 /* small shared memory area we use for notifiers and semaphores */
4454 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 4454 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4455 0, 0x0000, NULL, NULL, &disp->sync); 4455 0, 0x0000, NULL, NULL, &disp->sync);
4456 if (!ret) { 4456 if (!ret) {
4457 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); 4457 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f68c7054fd53..a369d978e267 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
37{ 37{
38 struct nv10_fence_priv *priv = chan->drm->fence; 38 struct nv10_fence_priv *priv = chan->drm->fence;
39 struct nv10_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
40 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *reg = &priv->bo->bo.mem;
41 u32 start = mem->start * PAGE_SIZE; 41 u32 start = reg->start * PAGE_SIZE;
42 u32 limit = start + mem->size - 1; 42 u32 limit = start + reg->size - 1;
43 int ret; 43 int ret;
44 44
45 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 45 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -82,7 +82,7 @@ nv50_fence_create(struct nouveau_drm *drm)
82 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); 82 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
83 spin_lock_init(&priv->lock); 83 spin_lock_init(&priv->lock);
84 84
85 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 85 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
86 0, 0x0000, NULL, NULL, &priv->bo); 86 0, 0x0000, NULL, NULL, &priv->bo);
87 if (!ret) { 87 if (!ret) {
88 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); 88 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..86360fca4b18 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -193,7 +193,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
193int 193int
194nv84_fence_create(struct nouveau_drm *drm) 194nv84_fence_create(struct nouveau_drm *drm)
195{ 195{
196 struct nvkm_fifo *fifo = nvxx_fifo(&drm->device); 196 struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
197 struct nv84_fence_priv *priv; 197 struct nv84_fence_priv *priv;
198 u32 domain; 198 u32 domain;
199 int ret; 199 int ret;
@@ -213,14 +213,14 @@ nv84_fence_create(struct nouveau_drm *drm)
213 priv->base.uevent = true; 213 priv->base.uevent = true;
214 214
215 /* Use VRAM if there is any ; otherwise fallback to system memory */ 215 /* Use VRAM if there is any ; otherwise fallback to system memory */
216 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : 216 domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
217 /* 217 /*
218 * fences created in sysmem must be non-cached or we 218 * fences created in sysmem must be non-cached or we
219 * will lose CPU/GPU coherency! 219 * will lose CPU/GPU coherency!
220 */ 220 */
221 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; 221 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
222 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0, 222 ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
223 0, NULL, NULL, &priv->bo); 223 domain, 0, 0, NULL, NULL, &priv->bo);
224 if (ret == 0) { 224 if (ret == 0) {
225 ret = nouveau_bo_pin(priv->bo, domain, false); 225 ret = nouveau_bo_pin(priv->bo, domain, false);
226 if (ret == 0) { 226 if (ret == 0) {
@@ -233,7 +233,7 @@ nv84_fence_create(struct nouveau_drm *drm)
233 } 233 }
234 234
235 if (ret == 0) 235 if (ret == 0)
236 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, 236 ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
237 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0, 237 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
238 0, NULL, NULL, &priv->bo_gart); 238 0, NULL, NULL, &priv->bo_gart);
239 if (ret == 0) { 239 if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index ff8ed3a04d06..067b5e9f5ec1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -1,4 +1,5 @@
1nvif-y := nvif/object.o 1nvif-y := nvif/object.o
2nvif-y += nvif/client.o 2nvif-y += nvif/client.o
3nvif-y += nvif/device.o 3nvif-y += nvif/device.o
4nvif-y += nvif/driver.o
4nvif-y += nvif/notify.o 5nvif-y += nvif/notify.o
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 29c20dfd894d..12db54965c20 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -26,6 +26,9 @@
26#include <nvif/driver.h> 26#include <nvif/driver.h>
27#include <nvif/ioctl.h> 27#include <nvif/ioctl.h>
28 28
29#include <nvif/class.h>
30#include <nvif/if0000.h>
31
29int 32int
30nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) 33nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
31{ 34{
@@ -47,37 +50,29 @@ nvif_client_resume(struct nvif_client *client)
47void 50void
48nvif_client_fini(struct nvif_client *client) 51nvif_client_fini(struct nvif_client *client)
49{ 52{
53 nvif_object_fini(&client->object);
50 if (client->driver) { 54 if (client->driver) {
51 client->driver->fini(client->object.priv); 55 if (client->driver->fini)
56 client->driver->fini(client->object.priv);
52 client->driver = NULL; 57 client->driver = NULL;
53 client->object.client = NULL;
54 nvif_object_fini(&client->object);
55 } 58 }
56} 59}
57 60
58static const struct nvif_driver *
59nvif_drivers[] = {
60#ifdef __KERNEL__
61 &nvif_driver_nvkm,
62#else
63 &nvif_driver_drm,
64 &nvif_driver_lib,
65 &nvif_driver_null,
66#endif
67 NULL
68};
69
70int 61int
71nvif_client_init(const char *driver, const char *name, u64 device, 62nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
72 const char *cfg, const char *dbg, struct nvif_client *client) 63 struct nvif_client *client)
73{ 64{
65 struct nvif_client_v0 args = { .device = device };
74 struct { 66 struct {
75 struct nvif_ioctl_v0 ioctl; 67 struct nvif_ioctl_v0 ioctl;
76 struct nvif_ioctl_nop_v0 nop; 68 struct nvif_ioctl_nop_v0 nop;
77 } args = {}; 69 } nop = {};
78 int ret, i; 70 int ret;
79 71
80 ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object); 72 strncpy(args.name, name, sizeof(args.name));
73 ret = nvif_object_init(parent != client ? &parent->object : NULL,
74 0, NVIF_CLASS_CLIENT, &args, sizeof(args),
75 &client->object);
81 if (ret) 76 if (ret)
82 return ret; 77 return ret;
83 78
@@ -85,19 +80,11 @@ nvif_client_init(const char *driver, const char *name, u64 device,
85 client->object.handle = ~0; 80 client->object.handle = ~0;
86 client->route = NVIF_IOCTL_V0_ROUTE_NVIF; 81 client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
87 client->super = true; 82 client->super = true;
88 83 client->driver = parent->driver;
89 for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
90 if (!driver || !strcmp(client->driver->name, driver)) {
91 ret = client->driver->init(name, device, cfg, dbg,
92 &client->object.priv);
93 if (!ret || driver)
94 break;
95 }
96 }
97 84
98 if (ret == 0) { 85 if (ret == 0) {
99 ret = nvif_client_ioctl(client, &args, sizeof(args)); 86 ret = nvif_client_ioctl(client, &nop, sizeof(nop));
100 client->version = args.nop.version; 87 client->version = nop.nop.version;
101 } 88 }
102 89
103 if (ret) 90 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
new file mode 100644
index 000000000000..701330956e33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <nvif/driver.h>
25#include <nvif/client.h>
26
27static const struct nvif_driver *
28nvif_driver[] = {
29#ifdef __KERNEL__
30 &nvif_driver_nvkm,
31#else
32 &nvif_driver_drm,
33 &nvif_driver_lib,
34 &nvif_driver_null,
35#endif
36 NULL
37};
38
39int
40nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
41 const char *name, u64 device, struct nvif_client *client)
42{
43 int ret = -EINVAL, i;
44
45 for (i = 0; (client->driver = nvif_driver[i]); i++) {
46 if (!drv || !strcmp(client->driver->name, drv)) {
47 ret = client->driver->init(name, device, cfg, dbg,
48 &client->object.priv);
49 if (ret == 0)
50 break;
51 client->driver->fini(client->object.priv);
52 }
53 }
54
55 if (ret == 0)
56 ret = nvif_client_init(client, name, device, client);
57 return ret;
58}
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index 2832147b676c..e664378f6eda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,3 +1,4 @@
1include $(src)/nvkm/core/Kbuild 1include $(src)/nvkm/core/Kbuild
2include $(src)/nvkm/falcon/Kbuild
2include $(src)/nvkm/subdev/Kbuild 3include $(src)/nvkm/subdev/Kbuild
3include $(src)/nvkm/engine/Kbuild 4include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index e1943910858e..0d3a896892b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -31,6 +31,43 @@
31#include <nvif/if0000.h> 31#include <nvif/if0000.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34static int
35nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
36 struct nvkm_object **pobject)
37{
38 union {
39 struct nvif_client_v0 v0;
40 } *args = argv;
41 struct nvkm_client *client;
42 int ret = -ENOSYS;
43
44 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
45 args->v0.name[sizeof(args->v0.name) - 1] = 0;
46 ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
47 NULL, oclass->client->ntfy, &client);
48 if (ret)
49 return ret;
50 } else
51 return ret;
52
53 client->object.client = oclass->client;
54 client->object.handle = oclass->handle;
55 client->object.route = oclass->route;
56 client->object.token = oclass->token;
57 client->object.object = oclass->object;
58 client->debug = oclass->client->debug;
59 *pobject = &client->object;
60 return 0;
61}
62
63const struct nvkm_sclass
64nvkm_uclient_sclass = {
65 .oclass = NVIF_CLASS_CLIENT,
66 .minver = 0,
67 .maxver = 0,
68 .ctor = nvkm_uclient_new,
69};
70
34struct nvkm_client_notify { 71struct nvkm_client_notify {
35 struct nvkm_client *client; 72 struct nvkm_client *client;
36 struct nvkm_notify n; 73 struct nvkm_notify n;
@@ -138,17 +175,30 @@ nvkm_client_notify_new(struct nvkm_object *object,
138 return ret; 175 return ret;
139} 176}
140 177
178static const struct nvkm_object_func nvkm_client;
179struct nvkm_client *
180nvkm_client_search(struct nvkm_client *client, u64 handle)
181{
182 struct nvkm_object *object;
183
184 object = nvkm_object_search(client, handle, &nvkm_client);
185 if (IS_ERR(object))
186 return (void *)object;
187
188 return nvkm_client(object);
189}
190
141static int 191static int
142nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size) 192nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
143{ 193{
144 union { 194 union {
145 struct nv_client_devlist_v0 v0; 195 struct nvif_client_devlist_v0 v0;
146 } *args = data; 196 } *args = data;
147 int ret = -ENOSYS; 197 int ret = -ENOSYS;
148 198
149 nvif_ioctl(object, "client devlist size %d\n", size); 199 nvif_ioctl(&client->object, "client devlist size %d\n", size);
150 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { 200 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
151 nvif_ioctl(object, "client devlist vers %d count %d\n", 201 nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
152 args->v0.version, args->v0.count); 202 args->v0.version, args->v0.count);
153 if (size == sizeof(args->v0.device[0]) * args->v0.count) { 203 if (size == sizeof(args->v0.device[0]) * args->v0.count) {
154 ret = nvkm_device_list(args->v0.device, args->v0.count); 204 ret = nvkm_device_list(args->v0.device, args->v0.count);
@@ -167,9 +217,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
167static int 217static int
168nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 218nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
169{ 219{
220 struct nvkm_client *client = nvkm_client(object);
170 switch (mthd) { 221 switch (mthd) {
171 case NV_CLIENT_DEVLIST: 222 case NVIF_CLIENT_V0_DEVLIST:
172 return nvkm_client_mthd_devlist(object, data, size); 223 return nvkm_client_mthd_devlist(client, data, size);
173 default: 224 default:
174 break; 225 break;
175 } 226 }
@@ -190,7 +241,8 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
190 const struct nvkm_sclass *sclass; 241 const struct nvkm_sclass *sclass;
191 242
192 switch (index) { 243 switch (index) {
193 case 0: sclass = &nvkm_udevice_sclass; break; 244 case 0: sclass = &nvkm_uclient_sclass; break;
245 case 1: sclass = &nvkm_udevice_sclass; break;
194 default: 246 default:
195 return -EINVAL; 247 return -EINVAL;
196 } 248 }
@@ -200,110 +252,54 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
200 return 0; 252 return 0;
201} 253}
202 254
203static const struct nvkm_object_func 255static int
204nvkm_client_object_func = { 256nvkm_client_fini(struct nvkm_object *object, bool suspend)
205 .mthd = nvkm_client_mthd,
206 .sclass = nvkm_client_child_get,
207};
208
209void
210nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
211{
212 if (!RB_EMPTY_NODE(&object->node))
213 rb_erase(&object->node, &client->objroot);
214}
215
216bool
217nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
218{
219 struct rb_node **ptr = &client->objroot.rb_node;
220 struct rb_node *parent = NULL;
221
222 while (*ptr) {
223 struct nvkm_object *this =
224 container_of(*ptr, typeof(*this), node);
225 parent = *ptr;
226 if (object->object < this->object)
227 ptr = &parent->rb_left;
228 else
229 if (object->object > this->object)
230 ptr = &parent->rb_right;
231 else
232 return false;
233 }
234
235 rb_link_node(&object->node, parent, ptr);
236 rb_insert_color(&object->node, &client->objroot);
237 return true;
238}
239
240struct nvkm_object *
241nvkm_client_search(struct nvkm_client *client, u64 handle)
242{
243 struct rb_node *node = client->objroot.rb_node;
244 while (node) {
245 struct nvkm_object *object =
246 container_of(node, typeof(*object), node);
247 if (handle < object->object)
248 node = node->rb_left;
249 else
250 if (handle > object->object)
251 node = node->rb_right;
252 else
253 return object;
254 }
255 return NULL;
256}
257
258int
259nvkm_client_fini(struct nvkm_client *client, bool suspend)
260{ 257{
261 struct nvkm_object *object = &client->object; 258 struct nvkm_client *client = nvkm_client(object);
262 const char *name[2] = { "fini", "suspend" }; 259 const char *name[2] = { "fini", "suspend" };
263 int i; 260 int i;
264 nvif_debug(object, "%s notify\n", name[suspend]); 261 nvif_debug(object, "%s notify\n", name[suspend]);
265 for (i = 0; i < ARRAY_SIZE(client->notify); i++) 262 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
266 nvkm_client_notify_put(client, i); 263 nvkm_client_notify_put(client, i);
267 return nvkm_object_fini(&client->object, suspend); 264 return 0;
268}
269
270int
271nvkm_client_init(struct nvkm_client *client)
272{
273 return nvkm_object_init(&client->object);
274} 265}
275 266
276void 267static void *
277nvkm_client_del(struct nvkm_client **pclient) 268nvkm_client_dtor(struct nvkm_object *object)
278{ 269{
279 struct nvkm_client *client = *pclient; 270 struct nvkm_client *client = nvkm_client(object);
280 int i; 271 int i;
281 if (client) { 272 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
282 nvkm_client_fini(client, false); 273 nvkm_client_notify_del(client, i);
283 for (i = 0; i < ARRAY_SIZE(client->notify); i++) 274 return client;
284 nvkm_client_notify_del(client, i);
285 nvkm_object_dtor(&client->object);
286 kfree(*pclient);
287 *pclient = NULL;
288 }
289} 275}
290 276
277static const struct nvkm_object_func
278nvkm_client = {
279 .dtor = nvkm_client_dtor,
280 .fini = nvkm_client_fini,
281 .mthd = nvkm_client_mthd,
282 .sclass = nvkm_client_child_get,
283};
284
291int 285int
292nvkm_client_new(const char *name, u64 device, const char *cfg, 286nvkm_client_new(const char *name, u64 device, const char *cfg,
293 const char *dbg, struct nvkm_client **pclient) 287 const char *dbg,
288 int (*ntfy)(const void *, u32, const void *, u32),
289 struct nvkm_client **pclient)
294{ 290{
295 struct nvkm_oclass oclass = {}; 291 struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass };
296 struct nvkm_client *client; 292 struct nvkm_client *client;
297 293
298 if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL))) 294 if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
299 return -ENOMEM; 295 return -ENOMEM;
300 oclass.client = client; 296 oclass.client = client;
301 297
302 nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object); 298 nvkm_object_ctor(&nvkm_client, &oclass, &client->object);
303 snprintf(client->name, sizeof(client->name), "%s", name); 299 snprintf(client->name, sizeof(client->name), "%s", name);
304 client->device = device; 300 client->device = device;
305 client->debug = nvkm_dbgopt(dbg, "CLIENT"); 301 client->debug = nvkm_dbgopt(dbg, "CLIENT");
306 client->objroot = RB_ROOT; 302 client->objroot = RB_ROOT;
307 client->dmaroot = RB_ROOT; 303 client->ntfy = ntfy;
308 return 0; 304 return 0;
309} 305}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index ee8e5831fe37..b6c916954a10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -27,6 +27,14 @@
27 27
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29 29
30bool
31nvkm_engine_chsw_load(struct nvkm_engine *engine)
32{
33 if (engine->func->chsw_load)
34 return engine->func->chsw_load(engine);
35 return false;
36}
37
30void 38void
31nvkm_engine_unref(struct nvkm_engine **pengine) 39nvkm_engine_unref(struct nvkm_engine **pengine)
32{ 40{
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index b0db51847c36..be19bbe56bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -29,7 +29,8 @@
29#include <nvif/ioctl.h> 29#include <nvif/ioctl.h>
30 30
31static int 31static int
32nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size) 32nvkm_ioctl_nop(struct nvkm_client *client,
33 struct nvkm_object *object, void *data, u32 size)
33{ 34{
34 union { 35 union {
35 struct nvif_ioctl_nop_v0 v0; 36 struct nvif_ioctl_nop_v0 v0;
@@ -46,7 +47,8 @@ nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
46} 47}
47 48
48static int 49static int
49nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size) 50nvkm_ioctl_sclass(struct nvkm_client *client,
51 struct nvkm_object *object, void *data, u32 size)
50{ 52{
51 union { 53 union {
52 struct nvif_ioctl_sclass_v0 v0; 54 struct nvif_ioctl_sclass_v0 v0;
@@ -78,12 +80,12 @@ nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
78} 80}
79 81
80static int 82static int
81nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) 83nvkm_ioctl_new(struct nvkm_client *client,
84 struct nvkm_object *parent, void *data, u32 size)
82{ 85{
83 union { 86 union {
84 struct nvif_ioctl_new_v0 v0; 87 struct nvif_ioctl_new_v0 v0;
85 } *args = data; 88 } *args = data;
86 struct nvkm_client *client = parent->client;
87 struct nvkm_object *object = NULL; 89 struct nvkm_object *object = NULL;
88 struct nvkm_oclass oclass; 90 struct nvkm_oclass oclass;
89 int ret = -ENOSYS, i = 0; 91 int ret = -ENOSYS, i = 0;
@@ -104,9 +106,11 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
104 106
105 do { 107 do {
106 memset(&oclass, 0x00, sizeof(oclass)); 108 memset(&oclass, 0x00, sizeof(oclass));
107 oclass.client = client;
108 oclass.handle = args->v0.handle; 109 oclass.handle = args->v0.handle;
110 oclass.route = args->v0.route;
111 oclass.token = args->v0.token;
109 oclass.object = args->v0.object; 112 oclass.object = args->v0.object;
113 oclass.client = client;
110 oclass.parent = parent; 114 oclass.parent = parent;
111 ret = parent->func->sclass(parent, i++, &oclass); 115 ret = parent->func->sclass(parent, i++, &oclass);
112 if (ret) 116 if (ret)
@@ -125,10 +129,7 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
125 ret = nvkm_object_init(object); 129 ret = nvkm_object_init(object);
126 if (ret == 0) { 130 if (ret == 0) {
127 list_add(&object->head, &parent->tree); 131 list_add(&object->head, &parent->tree);
128 object->route = args->v0.route; 132 if (nvkm_object_insert(object)) {
129 object->token = args->v0.token;
130 object->object = args->v0.object;
131 if (nvkm_client_insert(client, object)) {
132 client->data = object; 133 client->data = object;
133 return 0; 134 return 0;
134 } 135 }
@@ -142,7 +143,8 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
142} 143}
143 144
144static int 145static int
145nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size) 146nvkm_ioctl_del(struct nvkm_client *client,
147 struct nvkm_object *object, void *data, u32 size)
146{ 148{
147 union { 149 union {
148 struct nvif_ioctl_del none; 150 struct nvif_ioctl_del none;
@@ -156,11 +158,12 @@ nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
156 nvkm_object_del(&object); 158 nvkm_object_del(&object);
157 } 159 }
158 160
159 return ret; 161 return ret ? ret : 1;
160} 162}
161 163
162static int 164static int
163nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size) 165nvkm_ioctl_mthd(struct nvkm_client *client,
166 struct nvkm_object *object, void *data, u32 size)
164{ 167{
165 union { 168 union {
166 struct nvif_ioctl_mthd_v0 v0; 169 struct nvif_ioctl_mthd_v0 v0;
@@ -179,7 +182,8 @@ nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
179 182
180 183
181static int 184static int
182nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size) 185nvkm_ioctl_rd(struct nvkm_client *client,
186 struct nvkm_object *object, void *data, u32 size)
183{ 187{
184 union { 188 union {
185 struct nvif_ioctl_rd_v0 v0; 189 struct nvif_ioctl_rd_v0 v0;
@@ -218,7 +222,8 @@ nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
218} 222}
219 223
220static int 224static int
221nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size) 225nvkm_ioctl_wr(struct nvkm_client *client,
226 struct nvkm_object *object, void *data, u32 size)
222{ 227{
223 union { 228 union {
224 struct nvif_ioctl_wr_v0 v0; 229 struct nvif_ioctl_wr_v0 v0;
@@ -246,7 +251,8 @@ nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
246} 251}
247 252
248static int 253static int
249nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size) 254nvkm_ioctl_map(struct nvkm_client *client,
255 struct nvkm_object *object, void *data, u32 size)
250{ 256{
251 union { 257 union {
252 struct nvif_ioctl_map_v0 v0; 258 struct nvif_ioctl_map_v0 v0;
@@ -264,7 +270,8 @@ nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
264} 270}
265 271
266static int 272static int
267nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size) 273nvkm_ioctl_unmap(struct nvkm_client *client,
274 struct nvkm_object *object, void *data, u32 size)
268{ 275{
269 union { 276 union {
270 struct nvif_ioctl_unmap none; 277 struct nvif_ioctl_unmap none;
@@ -280,7 +287,8 @@ nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
280} 287}
281 288
282static int 289static int
283nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size) 290nvkm_ioctl_ntfy_new(struct nvkm_client *client,
291 struct nvkm_object *object, void *data, u32 size)
284{ 292{
285 union { 293 union {
286 struct nvif_ioctl_ntfy_new_v0 v0; 294 struct nvif_ioctl_ntfy_new_v0 v0;
@@ -306,9 +314,9 @@ nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
306} 314}
307 315
308static int 316static int
309nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size) 317nvkm_ioctl_ntfy_del(struct nvkm_client *client,
318 struct nvkm_object *object, void *data, u32 size)
310{ 319{
311 struct nvkm_client *client = object->client;
312 union { 320 union {
313 struct nvif_ioctl_ntfy_del_v0 v0; 321 struct nvif_ioctl_ntfy_del_v0 v0;
314 } *args = data; 322 } *args = data;
@@ -325,9 +333,9 @@ nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
325} 333}
326 334
327static int 335static int
328nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size) 336nvkm_ioctl_ntfy_get(struct nvkm_client *client,
337 struct nvkm_object *object, void *data, u32 size)
329{ 338{
330 struct nvkm_client *client = object->client;
331 union { 339 union {
332 struct nvif_ioctl_ntfy_get_v0 v0; 340 struct nvif_ioctl_ntfy_get_v0 v0;
333 } *args = data; 341 } *args = data;
@@ -344,9 +352,9 @@ nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
344} 352}
345 353
346static int 354static int
347nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size) 355nvkm_ioctl_ntfy_put(struct nvkm_client *client,
356 struct nvkm_object *object, void *data, u32 size)
348{ 357{
349 struct nvkm_client *client = object->client;
350 union { 358 union {
351 struct nvif_ioctl_ntfy_put_v0 v0; 359 struct nvif_ioctl_ntfy_put_v0 v0;
352 } *args = data; 360 } *args = data;
@@ -364,7 +372,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
364 372
365static struct { 373static struct {
366 int version; 374 int version;
367 int (*func)(struct nvkm_object *, void *, u32); 375 int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32);
368} 376}
369nvkm_ioctl_v0[] = { 377nvkm_ioctl_v0[] = {
370 { 0x00, nvkm_ioctl_nop }, 378 { 0x00, nvkm_ioctl_nop },
@@ -389,13 +397,10 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
389 struct nvkm_object *object; 397 struct nvkm_object *object;
390 int ret; 398 int ret;
391 399
392 if (handle) 400 object = nvkm_object_search(client, handle, NULL);
393 object = nvkm_client_search(client, handle); 401 if (IS_ERR(object)) {
394 else
395 object = &client->object;
396 if (unlikely(!object)) {
397 nvif_ioctl(&client->object, "object not found\n"); 402 nvif_ioctl(&client->object, "object not found\n");
398 return -ENOENT; 403 return PTR_ERR(object);
399 } 404 }
400 405
401 if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { 406 if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
@@ -407,7 +412,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
407 412
408 if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { 413 if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
409 if (nvkm_ioctl_v0[type].version == 0) 414 if (nvkm_ioctl_v0[type].version == 0)
410 ret = nvkm_ioctl_v0[type].func(object, data, size); 415 ret = nvkm_ioctl_v0[type].func(client, object, data, size);
411 } 416 }
412 417
413 return ret; 418 return ret;
@@ -436,12 +441,13 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
436 &args->v0.route, &args->v0.token); 441 &args->v0.route, &args->v0.token);
437 } 442 }
438 443
439 nvif_ioctl(object, "return %d\n", ret); 444 if (ret != 1) {
440 if (hack) { 445 nvif_ioctl(object, "return %d\n", ret);
441 *hack = client->data; 446 if (hack) {
442 client->data = NULL; 447 *hack = client->data;
448 client->data = NULL;
449 }
443 } 450 }
444 451
445 client->super = false;
446 return ret; 452 return ret;
447} 453}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 09a1eee8fd33..fd19d652a7ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
147 if (!this) 147 if (!this)
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 this->next = NULL;
150 this->type = type; 151 this->type = type;
151 list_del(&this->fl_entry); 152 list_del(&this->fl_entry);
152 *pnode = this; 153 *pnode = this;
@@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
225 if (!this) 226 if (!this)
226 return -ENOMEM; 227 return -ENOMEM;
227 228
229 this->next = NULL;
228 this->type = type; 230 this->type = type;
229 list_del(&this->fl_entry); 231 list_del(&this->fl_entry);
230 *pnode = this; 232 *pnode = this;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 67aa7223dcd7..89d2e9da11c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -25,6 +25,65 @@
25#include <core/client.h> 25#include <core/client.h>
26#include <core/engine.h> 26#include <core/engine.h>
27 27
28struct nvkm_object *
29nvkm_object_search(struct nvkm_client *client, u64 handle,
30 const struct nvkm_object_func *func)
31{
32 struct nvkm_object *object;
33
34 if (handle) {
35 struct rb_node *node = client->objroot.rb_node;
36 while (node) {
37 object = rb_entry(node, typeof(*object), node);
38 if (handle < object->object)
39 node = node->rb_left;
40 else
41 if (handle > object->object)
42 node = node->rb_right;
43 else
44 goto done;
45 }
46 return ERR_PTR(-ENOENT);
47 } else {
48 object = &client->object;
49 }
50
51done:
52 if (unlikely(func && object->func != func))
53 return ERR_PTR(-EINVAL);
54 return object;
55}
56
57void
58nvkm_object_remove(struct nvkm_object *object)
59{
60 if (!RB_EMPTY_NODE(&object->node))
61 rb_erase(&object->node, &object->client->objroot);
62}
63
64bool
65nvkm_object_insert(struct nvkm_object *object)
66{
67 struct rb_node **ptr = &object->client->objroot.rb_node;
68 struct rb_node *parent = NULL;
69
70 while (*ptr) {
71 struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
72 parent = *ptr;
73 if (object->object < this->object)
74 ptr = &parent->rb_left;
75 else
76 if (object->object > this->object)
77 ptr = &parent->rb_right;
78 else
79 return false;
80 }
81
82 rb_link_node(&object->node, parent, ptr);
83 rb_insert_color(&object->node, &object->client->objroot);
84 return true;
85}
86
28int 87int
29nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 88nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
30{ 89{
@@ -214,7 +273,7 @@ nvkm_object_del(struct nvkm_object **pobject)
214 struct nvkm_object *object = *pobject; 273 struct nvkm_object *object = *pobject;
215 if (object && !WARN_ON(!object->func)) { 274 if (object && !WARN_ON(!object->func)) {
216 *pobject = nvkm_object_dtor(object); 275 *pobject = nvkm_object_dtor(object);
217 nvkm_client_remove(object->client, object); 276 nvkm_object_remove(object);
218 list_del(&object->head); 277 list_del(&object->head);
219 kfree(*pobject); 278 kfree(*pobject);
220 *pobject = NULL; 279 *pobject = NULL;
@@ -230,6 +289,9 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
230 object->engine = nvkm_engine_ref(oclass->engine); 289 object->engine = nvkm_engine_ref(oclass->engine);
231 object->oclass = oclass->base.oclass; 290 object->oclass = oclass->base.oclass;
232 object->handle = oclass->handle; 291 object->handle = oclass->handle;
292 object->route = oclass->route;
293 object->token = oclass->token;
294 object->object = oclass->object;
233 INIT_LIST_HEAD(&object->head); 295 INIT_LIST_HEAD(&object->head);
234 INIT_LIST_HEAD(&object->tree); 296 INIT_LIST_HEAD(&object->tree);
235 RB_CLEAR_NODE(&object->node); 297 RB_CLEAR_NODE(&object->node);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index cceda959b47c..273562dd6bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -993,7 +993,7 @@ nv92_chipset = {
993 .mc = g84_mc_new, 993 .mc = g84_mc_new,
994 .mmu = nv50_mmu_new, 994 .mmu = nv50_mmu_new,
995 .mxm = nv50_mxm_new, 995 .mxm = nv50_mxm_new,
996 .pci = g84_pci_new, 996 .pci = g92_pci_new,
997 .therm = g84_therm_new, 997 .therm = g84_therm_new,
998 .timer = nv41_timer_new, 998 .timer = nv41_timer_new,
999 .volt = nv40_volt_new, 999 .volt = nv40_volt_new,
@@ -2138,6 +2138,7 @@ nv12b_chipset = {
2138 .ltc = gm200_ltc_new, 2138 .ltc = gm200_ltc_new,
2139 .mc = gk20a_mc_new, 2139 .mc = gk20a_mc_new,
2140 .mmu = gf100_mmu_new, 2140 .mmu = gf100_mmu_new,
2141 .pmu = gm20b_pmu_new,
2141 .secboot = gm20b_secboot_new, 2142 .secboot = gm20b_secboot_new,
2142 .timer = gk20a_timer_new, 2143 .timer = gk20a_timer_new,
2143 .top = gk104_top_new, 2144 .top = gk104_top_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 0a1381a84552..070ec5e18fdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -137,7 +137,6 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
137 const struct nvkm_oclass *oclass, 137 const struct nvkm_oclass *oclass,
138 struct nvkm_object **pobject) 138 struct nvkm_object **pobject)
139{ 139{
140 struct nvkm_device *device = root->disp->base.engine.subdev.device;
141 struct nvkm_client *client = oclass->client; 140 struct nvkm_client *client = oclass->client;
142 struct nvkm_dmaobj *dmaobj; 141 struct nvkm_dmaobj *dmaobj;
143 struct nv50_disp_dmac *chan; 142 struct nv50_disp_dmac *chan;
@@ -153,9 +152,9 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
153 if (ret) 152 if (ret)
154 return ret; 153 return ret;
155 154
156 dmaobj = nvkm_dma_search(device->dma, client, push); 155 dmaobj = nvkm_dmaobj_search(client, push);
157 if (!dmaobj) 156 if (IS_ERR(dmaobj))
158 return -ENOENT; 157 return PTR_ERR(dmaobj);
159 158
160 if (dmaobj->limit - dmaobj->start != 0xfff) 159 if (dmaobj->limit - dmaobj->start != 0xfff)
161 return -EINVAL; 160 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 4510cb6e10a8..627b9ee1ddd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -39,13 +39,6 @@ g94_sor_loff(struct nvkm_output_dp *outp)
39} 39}
40 40
41/******************************************************************************* 41/*******************************************************************************
42 * TMDS/LVDS
43 ******************************************************************************/
44static const struct nvkm_output_func
45g94_sor_output_func = {
46};
47
48/*******************************************************************************
49 * DisplayPort 42 * DisplayPort
50 ******************************************************************************/ 43 ******************************************************************************/
51u32 44u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index f11ebdd16c77..11b7b8fd5dda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -28,24 +28,6 @@
28 28
29#include <nvif/class.h> 29#include <nvif/class.h>
30 30
31struct nvkm_dmaobj *
32nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
33{
34 struct rb_node *node = client->dmaroot.rb_node;
35 while (node) {
36 struct nvkm_dmaobj *dmaobj =
37 container_of(node, typeof(*dmaobj), rb);
38 if (object < dmaobj->handle)
39 node = node->rb_left;
40 else
41 if (object > dmaobj->handle)
42 node = node->rb_right;
43 else
44 return dmaobj;
45 }
46 return NULL;
47}
48
49static int 31static int
50nvkm_dma_oclass_new(struct nvkm_device *device, 32nvkm_dma_oclass_new(struct nvkm_device *device,
51 const struct nvkm_oclass *oclass, void *data, u32 size, 33 const struct nvkm_oclass *oclass, void *data, u32 size,
@@ -53,34 +35,12 @@ nvkm_dma_oclass_new(struct nvkm_device *device,
53{ 35{
54 struct nvkm_dma *dma = nvkm_dma(oclass->engine); 36 struct nvkm_dma *dma = nvkm_dma(oclass->engine);
55 struct nvkm_dmaobj *dmaobj = NULL; 37 struct nvkm_dmaobj *dmaobj = NULL;
56 struct nvkm_client *client = oclass->client;
57 struct rb_node **ptr = &client->dmaroot.rb_node;
58 struct rb_node *parent = NULL;
59 int ret; 38 int ret;
60 39
61 ret = dma->func->class_new(dma, oclass, data, size, &dmaobj); 40 ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
62 if (dmaobj) 41 if (dmaobj)
63 *pobject = &dmaobj->object; 42 *pobject = &dmaobj->object;
64 if (ret) 43 return ret;
65 return ret;
66
67 dmaobj->handle = oclass->object;
68
69 while (*ptr) {
70 struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
71 parent = *ptr;
72 if (dmaobj->handle < obj->handle)
73 ptr = &parent->rb_left;
74 else
75 if (dmaobj->handle > obj->handle)
76 ptr = &parent->rb_right;
77 else
78 return -EEXIST;
79 }
80
81 rb_link_node(&dmaobj->rb, parent, ptr);
82 rb_insert_color(&dmaobj->rb, &client->dmaroot);
83 return 0;
84} 44}
85 45
86static const struct nvkm_device_oclass 46static const struct nvkm_device_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index 13c661b1ef14..d20cc0681a88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -31,6 +31,19 @@
31#include <nvif/cl0002.h> 31#include <nvif/cl0002.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34static const struct nvkm_object_func nvkm_dmaobj_func;
35struct nvkm_dmaobj *
36nvkm_dmaobj_search(struct nvkm_client *client, u64 handle)
37{
38 struct nvkm_object *object;
39
40 object = nvkm_object_search(client, handle, &nvkm_dmaobj_func);
41 if (IS_ERR(object))
42 return (void *)object;
43
44 return nvkm_dmaobj(object);
45}
46
34static int 47static int
35nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj, 48nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
36 int align, struct nvkm_gpuobj **pgpuobj) 49 int align, struct nvkm_gpuobj **pgpuobj)
@@ -42,10 +55,7 @@ nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
42static void * 55static void *
43nvkm_dmaobj_dtor(struct nvkm_object *base) 56nvkm_dmaobj_dtor(struct nvkm_object *base)
44{ 57{
45 struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base); 58 return nvkm_dmaobj(base);
46 if (!RB_EMPTY_NODE(&dmaobj->rb))
47 rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
48 return dmaobj;
49} 59}
50 60
51static const struct nvkm_object_func 61static const struct nvkm_object_func
@@ -74,7 +84,6 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
74 nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object); 84 nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
75 dmaobj->func = func; 85 dmaobj->func = func;
76 dmaobj->dma = dma; 86 dmaobj->dma = dma;
77 RB_CLEAR_NODE(&dmaobj->rb);
78 87
79 nvif_ioctl(parent, "create dma size %d\n", *psize); 88 nvif_ioctl(parent, "create dma size %d\n", *psize);
80 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { 89 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 1c9682ae3a6b..660ca7aa95ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -32,6 +32,17 @@
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34void 34void
35nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
36{
37 unsigned long flags;
38 if (WARN_ON(!fifo->func->recover_chan))
39 return;
40 spin_lock_irqsave(&fifo->lock, flags);
41 fifo->func->recover_chan(fifo, chid);
42 spin_unlock_irqrestore(&fifo->lock, flags);
43}
44
45void
35nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) 46nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
36{ 47{
37 return fifo->func->pause(fifo, flags); 48 return fifo->func->pause(fifo, flags);
@@ -55,19 +66,29 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
55} 66}
56 67
57struct nvkm_fifo_chan * 68struct nvkm_fifo_chan *
58nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) 69nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
59{ 70{
60 struct nvkm_fifo_chan *chan; 71 struct nvkm_fifo_chan *chan;
61 unsigned long flags;
62 spin_lock_irqsave(&fifo->lock, flags);
63 list_for_each_entry(chan, &fifo->chan, head) { 72 list_for_each_entry(chan, &fifo->chan, head) {
64 if (chan->inst->addr == inst) { 73 if (chan->inst->addr == inst) {
65 list_del(&chan->head); 74 list_del(&chan->head);
66 list_add(&chan->head, &fifo->chan); 75 list_add(&chan->head, &fifo->chan);
67 *rflags = flags;
68 return chan; 76 return chan;
69 } 77 }
70 } 78 }
79 return NULL;
80}
81
82struct nvkm_fifo_chan *
83nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
84{
85 struct nvkm_fifo_chan *chan;
86 unsigned long flags;
87 spin_lock_irqsave(&fifo->lock, flags);
88 if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
89 *rflags = flags;
90 return chan;
91 }
71 spin_unlock_irqrestore(&fifo->lock, flags); 92 spin_unlock_irqrestore(&fifo->lock, flags);
72 return NULL; 93 return NULL;
73} 94}
@@ -90,9 +111,34 @@ nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
90 return NULL; 111 return NULL;
91} 112}
92 113
114void
115nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
116{
117 nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
118}
119
93static int 120static int
94nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, 121nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
95 struct nvkm_notify *notify) 122 struct nvkm_notify *notify)
123{
124 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
125 if (size == 0) {
126 notify->size = 0;
127 notify->types = 1;
128 notify->index = chan->chid;
129 return 0;
130 }
131 return -ENOSYS;
132}
133
134static const struct nvkm_event_func
135nvkm_fifo_kevent_func = {
136 .ctor = nvkm_fifo_kevent_ctor,
137};
138
139static int
140nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
141 struct nvkm_notify *notify)
96{ 142{
97 if (size == 0) { 143 if (size == 0) {
98 notify->size = 0; 144 notify->size = 0;
@@ -104,10 +150,16 @@ nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
104} 150}
105 151
106static const struct nvkm_event_func 152static const struct nvkm_event_func
107nvkm_fifo_event_func = { 153nvkm_fifo_cevent_func = {
108 .ctor = nvkm_fifo_event_ctor, 154 .ctor = nvkm_fifo_cevent_ctor,
109}; 155};
110 156
157void
158nvkm_fifo_cevent(struct nvkm_fifo *fifo)
159{
160 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
161}
162
111static void 163static void
112nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 164nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
113{ 165{
@@ -241,6 +293,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
241 void *data = fifo; 293 void *data = fifo;
242 if (fifo->func->dtor) 294 if (fifo->func->dtor)
243 data = fifo->func->dtor(fifo); 295 data = fifo->func->dtor(fifo);
296 nvkm_event_fini(&fifo->kevent);
244 nvkm_event_fini(&fifo->cevent); 297 nvkm_event_fini(&fifo->cevent);
245 nvkm_event_fini(&fifo->uevent); 298 nvkm_event_fini(&fifo->uevent);
246 return data; 299 return data;
@@ -283,5 +336,9 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
283 return ret; 336 return ret;
284 } 337 }
285 338
286 return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); 339 ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
340 if (ret)
341 return ret;
342
343 return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
287} 344}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index dc6d4678f228..fab760ae922f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -371,9 +371,9 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
371 371
372 /* allocate push buffer ctxdma instance */ 372 /* allocate push buffer ctxdma instance */
373 if (push) { 373 if (push) {
374 dmaobj = nvkm_dma_search(device->dma, oclass->client, push); 374 dmaobj = nvkm_dmaobj_search(client, push);
375 if (!dmaobj) 375 if (IS_ERR(dmaobj))
376 return -ENOENT; 376 return PTR_ERR(dmaobj);
377 377
378 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, 378 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
379 &chan->push); 379 &chan->push);
@@ -410,6 +410,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
410 base + user * chan->chid; 410 base + user * chan->chid;
411 chan->size = user; 411 chan->size = user;
412 412
413 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); 413 nvkm_fifo_cevent(fifo);
414 return 0; 414 return 0;
415} 415}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 55dc415c5c08..d8019bdacd61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -29,5 +29,5 @@ struct nvkm_fifo_chan_oclass {
29 struct nvkm_sclass base; 29 struct nvkm_sclass base;
30}; 30};
31 31
32int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **); 32int gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
33#endif 33#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 15a992b3580a..61797c4dd07a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -30,12 +30,12 @@
30 30
31#include <nvif/cl826e.h> 31#include <nvif/cl826e.h>
32 32
33int 33static int
34g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, 34g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
35 struct nvkm_event **pevent) 35 struct nvkm_event **pevent)
36{ 36{
37 switch (type) { 37 switch (type) {
38 case G82_CHANNEL_DMA_V0_NTFY_UEVENT: 38 case NV826E_V0_NTFY_NON_STALL_INTERRUPT:
39 *pevent = &chan->fifo->uevent; 39 *pevent = &chan->fifo->uevent;
40 return 0; 40 return 0;
41 default: 41 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index ec68ea9747d5..cd468ab1db12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -68,7 +68,14 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
68 } 68 }
69 nvkm_done(cur); 69 nvkm_done(cur);
70 70
71 target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0; 71 switch (nvkm_memory_target(cur)) {
72 case NVKM_MEM_TARGET_VRAM: target = 0; break;
73 case NVKM_MEM_TARGET_NCOH: target = 3; break;
74 default:
75 mutex_unlock(&subdev->mutex);
76 WARN_ON(1);
77 return;
78 }
72 79
73 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) | 80 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
74 (target << 28)); 81 (target << 28));
@@ -183,6 +190,7 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
183 if (engine != &fifo->base.engine) 190 if (engine != &fifo->base.engine)
184 fifo->recover.mask |= 1ULL << engine->subdev.index; 191 fifo->recover.mask |= 1ULL << engine->subdev.index;
185 schedule_work(&fifo->recover.work); 192 schedule_work(&fifo->recover.work);
193 nvkm_fifo_kevent(&fifo->base, chid);
186} 194}
187 195
188static const struct nvkm_enum 196static const struct nvkm_enum
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 38c0910722c0..3a24788c3185 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -27,11 +27,71 @@
27#include <core/client.h> 27#include <core/client.h>
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <subdev/bar.h> 29#include <subdev/bar.h>
30#include <subdev/timer.h>
30#include <subdev/top.h> 31#include <subdev/top.h>
31#include <engine/sw.h> 32#include <engine/sw.h>
32 33
33#include <nvif/class.h> 34#include <nvif/class.h>
34 35
36struct gk104_fifo_engine_status {
37 bool busy;
38 bool faulted;
39 bool chsw;
40 bool save;
41 bool load;
42 struct {
43 bool tsg;
44 u32 id;
45 } prev, next, *chan;
46};
47
48static void
49gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
50 struct gk104_fifo_engine_status *status)
51{
52 struct nvkm_engine *engine = fifo->engine[engn].engine;
53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
54 struct nvkm_device *device = subdev->device;
55 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
56
57 status->busy = !!(stat & 0x80000000);
58 status->faulted = !!(stat & 0x40000000);
59 status->next.tsg = !!(stat & 0x10000000);
60 status->next.id = (stat & 0x0fff0000) >> 16;
61 status->chsw = !!(stat & 0x00008000);
62 status->save = !!(stat & 0x00004000);
63 status->load = !!(stat & 0x00002000);
64 status->prev.tsg = !!(stat & 0x00001000);
65 status->prev.id = (stat & 0x00000fff);
66 status->chan = NULL;
67
68 if (status->busy && status->chsw) {
69 if (status->load && status->save) {
70 if (engine && nvkm_engine_chsw_load(engine))
71 status->chan = &status->next;
72 else
73 status->chan = &status->prev;
74 } else
75 if (status->load) {
76 status->chan = &status->next;
77 } else {
78 status->chan = &status->prev;
79 }
80 } else
81 if (status->load) {
82 status->chan = &status->prev;
83 }
84
85 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
86 "save %d load %d %sid %d%s-> %sid %d%s\n",
87 engn, status->busy, status->faulted,
88 status->chsw, status->save, status->load,
89 status->prev.tsg ? "tsg" : "ch", status->prev.id,
90 status->chan == &status->prev ? "*" : " ",
91 status->next.tsg ? "tsg" : "ch", status->next.id,
92 status->chan == &status->next ? "*" : " ");
93}
94
35static int 95static int
36gk104_fifo_class_get(struct nvkm_fifo *base, int index, 96gk104_fifo_class_get(struct nvkm_fifo *base, int index,
37 const struct nvkm_fifo_chan_oclass **psclass) 97 const struct nvkm_fifo_chan_oclass **psclass)
@@ -83,10 +143,13 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
83 } 143 }
84 nvkm_done(mem); 144 nvkm_done(mem);
85 145
86 if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM) 146 switch (nvkm_memory_target(mem)) {
87 target = 0; 147 case NVKM_MEM_TARGET_VRAM: target = 0; break;
88 else 148 case NVKM_MEM_TARGET_NCOH: target = 3; break;
89 target = 3; 149 default:
150 WARN_ON(1);
151 return;
152 }
90 153
91 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
92 (target << 28)); 155 (target << 28));
@@ -149,31 +212,137 @@ gk104_fifo_recover_work(struct work_struct *w)
149 nvkm_mask(device, 0x002630, runm, 0x00000000); 212 nvkm_mask(device, 0x002630, runm, 0x00000000);
150} 213}
151 214
215static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
216
152static void 217static void
153gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, 218gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
154 struct gk104_fifo_chan *chan)
155{ 219{
156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 220 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
157 struct nvkm_device *device = subdev->device; 221 struct nvkm_device *device = subdev->device;
158 u32 chid = chan->base.chid; 222 const u32 runm = BIT(runl);
159 int engn;
160 223
161 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
162 nvkm_subdev_name[engine->subdev.index], chid);
163 assert_spin_locked(&fifo->base.lock); 224 assert_spin_locked(&fifo->base.lock);
225 if (fifo->recover.runm & runm)
226 return;
227 fifo->recover.runm |= runm;
164 228
165 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); 229 /* Block runlist to prevent channel assignment(s) from changing. */
166 list_del_init(&chan->head); 230 nvkm_mask(device, 0x002630, runm, runm);
167 chan->killed = true;
168 231
169 for (engn = 0; engn < fifo->engine_nr; engn++) { 232 /* Schedule recovery. */
170 if (fifo->engine[engn].engine == engine) { 233 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
171 fifo->recover.engm |= BIT(engn); 234 schedule_work(&fifo->recover.work);
235}
236
237static void
238gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
239{
240 struct gk104_fifo *fifo = gk104_fifo(base);
241 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
242 struct nvkm_device *device = subdev->device;
243 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
244 const u32 runl = (stat & 0x000f0000) >> 16;
245 const bool used = (stat & 0x00000001);
246 unsigned long engn, engm = fifo->runlist[runl].engm;
247 struct gk104_fifo_chan *chan;
248
249 assert_spin_locked(&fifo->base.lock);
250 if (!used)
251 return;
252
253 /* Lookup SW state for channel, and mark it as dead. */
254 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
255 if (chan->base.chid == chid) {
256 list_del_init(&chan->head);
257 chan->killed = true;
258 nvkm_fifo_kevent(&fifo->base, chid);
172 break; 259 break;
173 } 260 }
174 } 261 }
175 262
176 fifo->recover.runm |= BIT(chan->runl); 263 /* Disable channel. */
264 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
265 nvkm_warn(subdev, "channel %d: killed\n", chid);
266
267 /* Block channel assignments from changing during recovery. */
268 gk104_fifo_recover_runl(fifo, runl);
269
270 /* Schedule recovery for any engines the channel is on. */
271 for_each_set_bit(engn, &engm, fifo->engine_nr) {
272 struct gk104_fifo_engine_status status;
273 gk104_fifo_engine_status(fifo, engn, &status);
274 if (!status.chan || status.chan->id != chid)
275 continue;
276 gk104_fifo_recover_engn(fifo, engn);
277 }
278}
279
280static void
281gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
282{
283 struct nvkm_engine *engine = fifo->engine[engn].engine;
284 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
285 struct nvkm_device *device = subdev->device;
286 const u32 runl = fifo->engine[engn].runl;
287 const u32 engm = BIT(engn);
288 struct gk104_fifo_engine_status status;
289 int mmui = -1;
290
291 assert_spin_locked(&fifo->base.lock);
292 if (fifo->recover.engm & engm)
293 return;
294 fifo->recover.engm |= engm;
295
296 /* Block channel assignments from changing during recovery. */
297 gk104_fifo_recover_runl(fifo, runl);
298
299 /* Determine which channel (if any) is currently on the engine. */
300 gk104_fifo_engine_status(fifo, engn, &status);
301 if (status.chan) {
302 /* The channel is not longer viable, kill it. */
303 gk104_fifo_recover_chan(&fifo->base, status.chan->id);
304 }
305
306 /* Determine MMU fault ID for the engine, if we're not being
307 * called from the fault handler already.
308 */
309 if (!status.faulted && engine) {
310 mmui = nvkm_top_fault_id(device, engine->subdev.index);
311 if (mmui < 0) {
312 const struct nvkm_enum *en = fifo->func->fault.engine;
313 for (; en && en->name; en++) {
314 if (en->data2 == engine->subdev.index) {
315 mmui = en->value;
316 break;
317 }
318 }
319 }
320 WARN_ON(mmui < 0);
321 }
322
323 /* Trigger a MMU fault for the engine.
324 *
325 * No good idea why this is needed, but nvgpu does something similar,
326 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
327 */
328 if (mmui >= 0) {
329 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
330
331 /* Wait for fault to trigger. */
332 nvkm_msec(device, 2000,
333 gk104_fifo_engine_status(fifo, engn, &status);
334 if (status.faulted)
335 break;
336 );
337
338 /* Release MMU fault trigger, and ACK the fault. */
339 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
340 nvkm_wr32(device, 0x00259c, BIT(mmui));
341 nvkm_wr32(device, 0x002100, 0x10000000);
342 }
343
344 /* Schedule recovery. */
345 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
177 schedule_work(&fifo->recover.work); 346 schedule_work(&fifo->recover.work);
178} 347}
179 348
@@ -211,34 +380,30 @@ static void
211gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 380gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
212{ 381{
213 struct nvkm_device *device = fifo->base.engine.subdev.device; 382 struct nvkm_device *device = fifo->base.engine.subdev.device;
214 struct gk104_fifo_chan *chan; 383 unsigned long flags, engm = 0;
215 unsigned long flags;
216 u32 engn; 384 u32 engn;
217 385
386 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
387 * as MMU_FAULT cannot be triggered while it's pending.
388 */
218 spin_lock_irqsave(&fifo->base.lock, flags); 389 spin_lock_irqsave(&fifo->base.lock, flags);
390 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
391 nvkm_wr32(device, 0x002100, 0x00000100);
392
219 for (engn = 0; engn < fifo->engine_nr; engn++) { 393 for (engn = 0; engn < fifo->engine_nr; engn++) {
220 struct nvkm_engine *engine = fifo->engine[engn].engine; 394 struct gk104_fifo_engine_status status;
221 int runl = fifo->engine[engn].runl; 395
222 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 396 gk104_fifo_engine_status(fifo, engn, &status);
223 u32 busy = (stat & 0x80000000); 397 if (!status.busy || !status.chsw)
224 u32 next = (stat & 0x0fff0000) >> 16;
225 u32 chsw = (stat & 0x00008000);
226 u32 save = (stat & 0x00004000);
227 u32 load = (stat & 0x00002000);
228 u32 prev = (stat & 0x00000fff);
229 u32 chid = load ? next : prev;
230 (void)save;
231
232 if (!busy || !chsw)
233 continue; 398 continue;
234 399
235 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 400 engm |= BIT(engn);
236 if (chan->base.chid == chid && engine) {
237 gk104_fifo_recover(fifo, engine, chan);
238 break;
239 }
240 }
241 } 401 }
402
403 for_each_set_bit(engn, &engm, fifo->engine_nr)
404 gk104_fifo_recover_engn(fifo, engn);
405
406 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
242 spin_unlock_irqrestore(&fifo->base.lock, flags); 407 spin_unlock_irqrestore(&fifo->base.lock, flags);
243} 408}
244 409
@@ -301,6 +466,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
301 struct nvkm_fifo_chan *chan; 466 struct nvkm_fifo_chan *chan;
302 unsigned long flags; 467 unsigned long flags;
303 char gpcid[8] = "", en[16] = ""; 468 char gpcid[8] = "", en[16] = "";
469 int engn;
304 470
305 er = nvkm_enum_find(fifo->func->fault.reason, reason); 471 er = nvkm_enum_find(fifo->func->fault.reason, reason);
306 eu = nvkm_enum_find(fifo->func->fault.engine, unit); 472 eu = nvkm_enum_find(fifo->func->fault.engine, unit);
@@ -342,7 +508,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
342 snprintf(en, sizeof(en), "%s", eu->name); 508 snprintf(en, sizeof(en), "%s", eu->name);
343 } 509 }
344 510
345 chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags); 511 spin_lock_irqsave(&fifo->base.lock, flags);
512 chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
346 513
347 nvkm_error(subdev, 514 nvkm_error(subdev,
348 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " 515 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
@@ -353,9 +520,23 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
353 (u64)inst << 12, 520 (u64)inst << 12,
354 chan ? chan->object.client->name : "unknown"); 521 chan ? chan->object.client->name : "unknown");
355 522
356 if (engine && chan) 523
357 gk104_fifo_recover(fifo, engine, (void *)chan); 524 /* Kill the channel that caused the fault. */
358 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 525 if (chan)
526 gk104_fifo_recover_chan(&fifo->base, chan->chid);
527
528 /* Channel recovery will probably have already done this for the
529 * correct engine(s), but just in case we can't find the channel
530 * information...
531 */
532 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
533 if (fifo->engine[engn].engine == engine) {
534 gk104_fifo_recover_engn(fifo, engn);
535 break;
536 }
537 }
538
539 spin_unlock_irqrestore(&fifo->base.lock, flags);
359} 540}
360 541
361static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 542static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -716,6 +897,7 @@ gk104_fifo_ = {
716 .intr = gk104_fifo_intr, 897 .intr = gk104_fifo_intr,
717 .uevent_init = gk104_fifo_uevent_init, 898 .uevent_init = gk104_fifo_uevent_init,
718 .uevent_fini = gk104_fifo_uevent_fini, 899 .uevent_fini = gk104_fifo_uevent_fini,
900 .recover_chan = gk104_fifo_recover_chan,
719 .class_get = gk104_fifo_class_get, 901 .class_get = gk104_fifo_class_get,
720}; 902};
721 903
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index 12d964260a29..f9e0377d3d24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -32,6 +32,23 @@
32#include <nvif/cl906f.h> 32#include <nvif/cl906f.h>
33#include <nvif/unpack.h> 33#include <nvif/unpack.h>
34 34
35int
36gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
37 struct nvkm_event **pevent)
38{
39 switch (type) {
40 case NV906F_V0_NTFY_NON_STALL_INTERRUPT:
41 *pevent = &chan->fifo->uevent;
42 return 0;
43 case NV906F_V0_NTFY_KILLED:
44 *pevent = &chan->fifo->kevent;
45 return 0;
46 default:
47 break;
48 }
49 return -EINVAL;
50}
51
35static u32 52static u32
36gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) 53gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
37{ 54{
@@ -184,7 +201,7 @@ gf100_fifo_gpfifo_func = {
184 .dtor = gf100_fifo_gpfifo_dtor, 201 .dtor = gf100_fifo_gpfifo_dtor,
185 .init = gf100_fifo_gpfifo_init, 202 .init = gf100_fifo_gpfifo_init,
186 .fini = gf100_fifo_gpfifo_fini, 203 .fini = gf100_fifo_gpfifo_fini,
187 .ntfy = g84_fifo_chan_ntfy, 204 .ntfy = gf100_fifo_chan_ntfy,
188 .engine_ctor = gf100_fifo_gpfifo_engine_ctor, 205 .engine_ctor = gf100_fifo_gpfifo_engine_ctor,
189 .engine_dtor = gf100_fifo_gpfifo_engine_dtor, 206 .engine_dtor = gf100_fifo_gpfifo_engine_dtor,
190 .engine_init = gf100_fifo_gpfifo_engine_init, 207 .engine_init = gf100_fifo_gpfifo_engine_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index a2df4f3e7763..8abf6f8ef445 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -50,6 +50,7 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
50 ) < 0) { 50 ) < 0) {
51 nvkm_error(subdev, "channel %d [%s] kick timeout\n", 51 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
52 chan->base.chid, client->name); 52 chan->base.chid, client->name);
53 nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
53 ret = -ETIMEDOUT; 54 ret = -ETIMEDOUT;
54 } 55 }
55 mutex_unlock(&subdev->mutex); 56 mutex_unlock(&subdev->mutex);
@@ -213,7 +214,7 @@ gk104_fifo_gpfifo_func = {
213 .dtor = gk104_fifo_gpfifo_dtor, 214 .dtor = gk104_fifo_gpfifo_dtor,
214 .init = gk104_fifo_gpfifo_init, 215 .init = gk104_fifo_gpfifo_init,
215 .fini = gk104_fifo_gpfifo_fini, 216 .fini = gk104_fifo_gpfifo_fini,
216 .ntfy = g84_fifo_chan_ntfy, 217 .ntfy = gf100_fifo_chan_ntfy,
217 .engine_ctor = gk104_fifo_gpfifo_engine_ctor, 218 .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
218 .engine_dtor = gk104_fifo_gpfifo_engine_dtor, 219 .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
219 .engine_init = gk104_fifo_gpfifo_engine_init, 220 .engine_init = gk104_fifo_gpfifo_engine_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index f6dfb37d9429..f889b13b5e41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,12 @@
6int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, 6int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
7 int index, int nr, struct nvkm_fifo *); 7 int index, int nr, struct nvkm_fifo *);
8void nvkm_fifo_uevent(struct nvkm_fifo *); 8void nvkm_fifo_uevent(struct nvkm_fifo *);
9void nvkm_fifo_cevent(struct nvkm_fifo *);
10void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
11void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
12
13struct nvkm_fifo_chan *
14nvkm_fifo_chan_inst_locked(struct nvkm_fifo *, u64 inst);
9 15
10struct nvkm_fifo_chan_oclass; 16struct nvkm_fifo_chan_oclass;
11struct nvkm_fifo_func { 17struct nvkm_fifo_func {
@@ -18,6 +24,7 @@ struct nvkm_fifo_func {
18 void (*start)(struct nvkm_fifo *, unsigned long *); 24 void (*start)(struct nvkm_fifo *, unsigned long *);
19 void (*uevent_init)(struct nvkm_fifo *); 25 void (*uevent_init)(struct nvkm_fifo *);
20 void (*uevent_fini)(struct nvkm_fifo *); 26 void (*uevent_fini)(struct nvkm_fifo *);
27 void (*recover_chan)(struct nvkm_fifo *, int chid);
21 int (*class_get)(struct nvkm_fifo *, int index, 28 int (*class_get)(struct nvkm_fifo *, int index,
22 const struct nvkm_fifo_chan_oclass **); 29 const struct nvkm_fifo_chan_oclass **);
23 const struct nvkm_fifo_chan_oclass *chan[]; 30 const struct nvkm_fifo_chan_oclass *chan[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 467065d1b4e6..cd8cf6f7024c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -25,6 +25,15 @@
25 25
26#include <engine/fifo.h> 26#include <engine/fifo.h>
27 27
28static bool
29nvkm_gr_chsw_load(struct nvkm_engine *engine)
30{
31 struct nvkm_gr *gr = nvkm_gr(engine);
32 if (gr->func->chsw_load)
33 return gr->func->chsw_load(gr);
34 return false;
35}
36
28static void 37static void
29nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile) 38nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
30{ 39{
@@ -106,6 +115,15 @@ nvkm_gr_init(struct nvkm_engine *engine)
106 return gr->func->init(gr); 115 return gr->func->init(gr);
107} 116}
108 117
118static int
119nvkm_gr_fini(struct nvkm_engine *engine, bool suspend)
120{
121 struct nvkm_gr *gr = nvkm_gr(engine);
122 if (gr->func->fini)
123 return gr->func->fini(gr, suspend);
124 return 0;
125}
126
109static void * 127static void *
110nvkm_gr_dtor(struct nvkm_engine *engine) 128nvkm_gr_dtor(struct nvkm_engine *engine)
111{ 129{
@@ -120,8 +138,10 @@ nvkm_gr = {
120 .dtor = nvkm_gr_dtor, 138 .dtor = nvkm_gr_dtor,
121 .oneinit = nvkm_gr_oneinit, 139 .oneinit = nvkm_gr_oneinit,
122 .init = nvkm_gr_init, 140 .init = nvkm_gr_init,
141 .fini = nvkm_gr_fini,
123 .intr = nvkm_gr_intr, 142 .intr = nvkm_gr_intr,
124 .tile = nvkm_gr_tile, 143 .tile = nvkm_gr_tile,
144 .chsw_load = nvkm_gr_chsw_load,
125 .fifo.cclass = nvkm_gr_cclass_new, 145 .fifo.cclass = nvkm_gr_cclass_new,
126 .fifo.sclass = nvkm_gr_oclass_get, 146 .fifo.sclass = nvkm_gr_oclass_get,
127}; 147};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
index ce913300539f..da1ba74682b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -25,6 +25,8 @@
25 25
26#include <subdev/timer.h> 26#include <subdev/timer.h>
27 27
28#include <nvif/class.h>
29
28static const struct nvkm_bitfield nv50_gr_status[] = { 30static const struct nvkm_bitfield nv50_gr_status[] = {
29 { 0x00000001, "BUSY" }, /* set when any bit is set */ 31 { 0x00000001, "BUSY" }, /* set when any bit is set */
30 { 0x00000002, "DISPATCH" }, 32 { 0x00000002, "DISPATCH" },
@@ -180,11 +182,11 @@ g84_gr = {
180 .tlb_flush = g84_gr_tlb_flush, 182 .tlb_flush = g84_gr_tlb_flush,
181 .units = nv50_gr_units, 183 .units = nv50_gr_units,
182 .sclass = { 184 .sclass = {
183 { -1, -1, 0x0030, &nv50_gr_object }, 185 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
184 { -1, -1, 0x502d, &nv50_gr_object }, 186 { -1, -1, NV50_TWOD, &nv50_gr_object },
185 { -1, -1, 0x5039, &nv50_gr_object }, 187 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
186 { -1, -1, 0x50c0, &nv50_gr_object }, 188 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
187 { -1, -1, 0x8297, &nv50_gr_object }, 189 { -1, -1, G82_TESLA, &nv50_gr_object },
188 {} 190 {}
189 } 191 }
190}; 192};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f65a5b0a1a4d..f9acb8a944d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -702,6 +702,22 @@ gf100_gr_pack_mmio[] = {
702 * PGRAPH engine/subdev functions 702 * PGRAPH engine/subdev functions
703 ******************************************************************************/ 703 ******************************************************************************/
704 704
705static bool
706gf100_gr_chsw_load(struct nvkm_gr *base)
707{
708 struct gf100_gr *gr = gf100_gr(base);
709 if (!gr->firmware) {
710 u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c);
711 if (trace & 0x00000040)
712 return true;
713 } else {
714 u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808);
715 if (mthd & 0x00080000)
716 return true;
717 }
718 return false;
719}
720
705int 721int
706gf100_gr_rops(struct gf100_gr *gr) 722gf100_gr_rops(struct gf100_gr *gr)
707{ 723{
@@ -1136,7 +1152,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
1136 if (trap & 0x00000008) { 1152 if (trap & 0x00000008) {
1137 u32 stat = nvkm_rd32(device, 0x408030); 1153 u32 stat = nvkm_rd32(device, 0x408030);
1138 1154
1139 nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error, 1155 nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
1140 stat & 0x3fffffff); 1156 stat & 0x3fffffff);
1141 nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error); 1157 nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
1142 nvkm_wr32(device, 0x408030, 0xc0000000); 1158 nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -1391,26 +1407,11 @@ gf100_gr_intr(struct nvkm_gr *base)
1391} 1407}
1392 1408
1393static void 1409static void
1394gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base, 1410gf100_gr_init_fw(struct nvkm_falcon *falcon,
1395 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1411 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
1396{ 1412{
1397 struct nvkm_device *device = gr->base.engine.subdev.device; 1413 nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
1398 int i; 1414 nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
1399
1400 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
1401 for (i = 0; i < data->size / 4; i++)
1402 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
1403
1404 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
1405 for (i = 0; i < code->size / 4; i++) {
1406 if ((i & 0x3f) == 0)
1407 nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
1408 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
1409 }
1410
1411 /* code must be padded to 0x40 words */
1412 for (; i & 0x3f; i++)
1413 nvkm_wr32(device, fuc_base + 0x0184, 0);
1414} 1415}
1415 1416
1416static void 1417static void
@@ -1455,162 +1456,149 @@ gf100_gr_init_csdata(struct gf100_gr *gr,
1455 nvkm_wr32(device, falcon + 0x01c4, star + 4); 1456 nvkm_wr32(device, falcon + 0x01c4, star + 4);
1456} 1457}
1457 1458
1458int 1459/* Initialize context from an external (secure or not) firmware */
1459gf100_gr_init_ctxctl(struct gf100_gr *gr) 1460static int
1461gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
1460{ 1462{
1461 const struct gf100_grctx_func *grctx = gr->func->grctx;
1462 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1463 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1463 struct nvkm_device *device = subdev->device; 1464 struct nvkm_device *device = subdev->device;
1464 struct nvkm_secboot *sb = device->secboot; 1465 struct nvkm_secboot *sb = device->secboot;
1465 int i;
1466 int ret = 0; 1466 int ret = 0;
1467 1467
1468 if (gr->firmware) { 1468 /* load fuc microcode */
1469 /* load fuc microcode */ 1469 nvkm_mc_unk260(device, 0);
1470 nvkm_mc_unk260(device, 0);
1471
1472 /* securely-managed falcons must be reset using secure boot */
1473 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
1474 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
1475 else
1476 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
1477 &gr->fuc409d);
1478 if (ret)
1479 return ret;
1480 1470
1481 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) 1471 /* securely-managed falcons must be reset using secure boot */
1482 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); 1472 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
1483 else 1473 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
1484 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, 1474 else
1485 &gr->fuc41ad); 1475 gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
1486 if (ret) 1476 if (ret)
1487 return ret; 1477 return ret;
1488 1478
1489 nvkm_mc_unk260(device, 1); 1479 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
1490 1480 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
1491 /* start both of them running */ 1481 else
1492 nvkm_wr32(device, 0x409840, 0xffffffff); 1482 gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
1493 nvkm_wr32(device, 0x41a10c, 0x00000000); 1483 if (ret)
1494 nvkm_wr32(device, 0x40910c, 0x00000000); 1484 return ret;
1495 1485
1496 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) 1486 nvkm_mc_unk260(device, 1);
1497 nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS); 1487
1498 else 1488 /* start both of them running */
1499 nvkm_wr32(device, 0x41a100, 0x00000002); 1489 nvkm_wr32(device, 0x409840, 0xffffffff);
1500 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) 1490 nvkm_wr32(device, 0x41a10c, 0x00000000);
1501 nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS); 1491 nvkm_wr32(device, 0x40910c, 0x00000000);
1502 else 1492
1503 nvkm_wr32(device, 0x409100, 0x00000002); 1493 nvkm_falcon_start(gr->gpccs);
1504 if (nvkm_msec(device, 2000, 1494 nvkm_falcon_start(gr->fecs);
1505 if (nvkm_rd32(device, 0x409800) & 0x00000001)
1506 break;
1507 ) < 0)
1508 return -EBUSY;
1509 1495
1510 nvkm_wr32(device, 0x409840, 0xffffffff); 1496 if (nvkm_msec(device, 2000,
1511 nvkm_wr32(device, 0x409500, 0x7fffffff); 1497 if (nvkm_rd32(device, 0x409800) & 0x00000001)
1512 nvkm_wr32(device, 0x409504, 0x00000021); 1498 break;
1499 ) < 0)
1500 return -EBUSY;
1501
1502 nvkm_wr32(device, 0x409840, 0xffffffff);
1503 nvkm_wr32(device, 0x409500, 0x7fffffff);
1504 nvkm_wr32(device, 0x409504, 0x00000021);
1505
1506 nvkm_wr32(device, 0x409840, 0xffffffff);
1507 nvkm_wr32(device, 0x409500, 0x00000000);
1508 nvkm_wr32(device, 0x409504, 0x00000010);
1509 if (nvkm_msec(device, 2000,
1510 if ((gr->size = nvkm_rd32(device, 0x409800)))
1511 break;
1512 ) < 0)
1513 return -EBUSY;
1514
1515 nvkm_wr32(device, 0x409840, 0xffffffff);
1516 nvkm_wr32(device, 0x409500, 0x00000000);
1517 nvkm_wr32(device, 0x409504, 0x00000016);
1518 if (nvkm_msec(device, 2000,
1519 if (nvkm_rd32(device, 0x409800))
1520 break;
1521 ) < 0)
1522 return -EBUSY;
1523
1524 nvkm_wr32(device, 0x409840, 0xffffffff);
1525 nvkm_wr32(device, 0x409500, 0x00000000);
1526 nvkm_wr32(device, 0x409504, 0x00000025);
1527 if (nvkm_msec(device, 2000,
1528 if (nvkm_rd32(device, 0x409800))
1529 break;
1530 ) < 0)
1531 return -EBUSY;
1513 1532
1514 nvkm_wr32(device, 0x409840, 0xffffffff); 1533 if (device->chipset >= 0xe0) {
1515 nvkm_wr32(device, 0x409500, 0x00000000); 1534 nvkm_wr32(device, 0x409800, 0x00000000);
1516 nvkm_wr32(device, 0x409504, 0x00000010); 1535 nvkm_wr32(device, 0x409500, 0x00000001);
1536 nvkm_wr32(device, 0x409504, 0x00000030);
1517 if (nvkm_msec(device, 2000, 1537 if (nvkm_msec(device, 2000,
1518 if ((gr->size = nvkm_rd32(device, 0x409800))) 1538 if (nvkm_rd32(device, 0x409800))
1519 break; 1539 break;
1520 ) < 0) 1540 ) < 0)
1521 return -EBUSY; 1541 return -EBUSY;
1522 1542
1523 nvkm_wr32(device, 0x409840, 0xffffffff); 1543 nvkm_wr32(device, 0x409810, 0xb00095c8);
1524 nvkm_wr32(device, 0x409500, 0x00000000); 1544 nvkm_wr32(device, 0x409800, 0x00000000);
1525 nvkm_wr32(device, 0x409504, 0x00000016); 1545 nvkm_wr32(device, 0x409500, 0x00000001);
1546 nvkm_wr32(device, 0x409504, 0x00000031);
1526 if (nvkm_msec(device, 2000, 1547 if (nvkm_msec(device, 2000,
1527 if (nvkm_rd32(device, 0x409800)) 1548 if (nvkm_rd32(device, 0x409800))
1528 break; 1549 break;
1529 ) < 0) 1550 ) < 0)
1530 return -EBUSY; 1551 return -EBUSY;
1531 1552
1532 nvkm_wr32(device, 0x409840, 0xffffffff); 1553 nvkm_wr32(device, 0x409810, 0x00080420);
1533 nvkm_wr32(device, 0x409500, 0x00000000); 1554 nvkm_wr32(device, 0x409800, 0x00000000);
1534 nvkm_wr32(device, 0x409504, 0x00000025); 1555 nvkm_wr32(device, 0x409500, 0x00000001);
1556 nvkm_wr32(device, 0x409504, 0x00000032);
1535 if (nvkm_msec(device, 2000, 1557 if (nvkm_msec(device, 2000,
1536 if (nvkm_rd32(device, 0x409800)) 1558 if (nvkm_rd32(device, 0x409800))
1537 break; 1559 break;
1538 ) < 0) 1560 ) < 0)
1539 return -EBUSY; 1561 return -EBUSY;
1540 1562
1541 if (device->chipset >= 0xe0) { 1563 nvkm_wr32(device, 0x409614, 0x00000070);
1542 nvkm_wr32(device, 0x409800, 0x00000000); 1564 nvkm_wr32(device, 0x409614, 0x00000770);
1543 nvkm_wr32(device, 0x409500, 0x00000001); 1565 nvkm_wr32(device, 0x40802c, 0x00000001);
1544 nvkm_wr32(device, 0x409504, 0x00000030); 1566 }
1545 if (nvkm_msec(device, 2000,
1546 if (nvkm_rd32(device, 0x409800))
1547 break;
1548 ) < 0)
1549 return -EBUSY;
1550
1551 nvkm_wr32(device, 0x409810, 0xb00095c8);
1552 nvkm_wr32(device, 0x409800, 0x00000000);
1553 nvkm_wr32(device, 0x409500, 0x00000001);
1554 nvkm_wr32(device, 0x409504, 0x00000031);
1555 if (nvkm_msec(device, 2000,
1556 if (nvkm_rd32(device, 0x409800))
1557 break;
1558 ) < 0)
1559 return -EBUSY;
1560
1561 nvkm_wr32(device, 0x409810, 0x00080420);
1562 nvkm_wr32(device, 0x409800, 0x00000000);
1563 nvkm_wr32(device, 0x409500, 0x00000001);
1564 nvkm_wr32(device, 0x409504, 0x00000032);
1565 if (nvkm_msec(device, 2000,
1566 if (nvkm_rd32(device, 0x409800))
1567 break;
1568 ) < 0)
1569 return -EBUSY;
1570 1567
1571 nvkm_wr32(device, 0x409614, 0x00000070); 1568 if (gr->data == NULL) {
1572 nvkm_wr32(device, 0x409614, 0x00000770); 1569 int ret = gf100_grctx_generate(gr);
1573 nvkm_wr32(device, 0x40802c, 0x00000001); 1570 if (ret) {
1571 nvkm_error(subdev, "failed to construct context\n");
1572 return ret;
1574 } 1573 }
1574 }
1575 1575
1576 if (gr->data == NULL) { 1576 return 0;
1577 int ret = gf100_grctx_generate(gr); 1577}
1578 if (ret) { 1578
1579 nvkm_error(subdev, "failed to construct context\n"); 1579static int
1580 return ret; 1580gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
1581 } 1581{
1582 } 1582 const struct gf100_grctx_func *grctx = gr->func->grctx;
1583 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1584 struct nvkm_device *device = subdev->device;
1583 1585
1584 return 0;
1585 } else
1586 if (!gr->func->fecs.ucode) { 1586 if (!gr->func->fecs.ucode) {
1587 return -ENOSYS; 1587 return -ENOSYS;
1588 } 1588 }
1589 1589
1590 /* load HUB microcode */ 1590 /* load HUB microcode */
1591 nvkm_mc_unk260(device, 0); 1591 nvkm_mc_unk260(device, 0);
1592 nvkm_wr32(device, 0x4091c0, 0x01000000); 1592 nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0,
1593 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) 1593 gr->func->fecs.ucode->data.size, 0);
1594 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); 1594 nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0,
1595 1595 gr->func->fecs.ucode->code.size, 0, 0, false);
1596 nvkm_wr32(device, 0x409180, 0x01000000);
1597 for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
1598 if ((i & 0x3f) == 0)
1599 nvkm_wr32(device, 0x409188, i >> 6);
1600 nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
1601 }
1602 1596
1603 /* load GPC microcode */ 1597 /* load GPC microcode */
1604 nvkm_wr32(device, 0x41a1c0, 0x01000000); 1598 nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0,
1605 for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++) 1599 gr->func->gpccs.ucode->data.size, 0);
1606 nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]); 1600 nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0,
1607 1601 gr->func->gpccs.ucode->code.size, 0, 0, false);
1608 nvkm_wr32(device, 0x41a180, 0x01000000);
1609 for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
1610 if ((i & 0x3f) == 0)
1611 nvkm_wr32(device, 0x41a188, i >> 6);
1612 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
1613 }
1614 nvkm_mc_unk260(device, 1); 1602 nvkm_mc_unk260(device, 1);
1615 1603
1616 /* load register lists */ 1604 /* load register lists */
@@ -1642,6 +1630,19 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
1642 return 0; 1630 return 0;
1643} 1631}
1644 1632
1633int
1634gf100_gr_init_ctxctl(struct gf100_gr *gr)
1635{
1636 int ret;
1637
1638 if (gr->firmware)
1639 ret = gf100_gr_init_ctxctl_ext(gr);
1640 else
1641 ret = gf100_gr_init_ctxctl_int(gr);
1642
1643 return ret;
1644}
1645
1645static int 1646static int
1646gf100_gr_oneinit(struct nvkm_gr *base) 1647gf100_gr_oneinit(struct nvkm_gr *base)
1647{ 1648{
@@ -1711,10 +1712,32 @@ static int
1711gf100_gr_init_(struct nvkm_gr *base) 1712gf100_gr_init_(struct nvkm_gr *base)
1712{ 1713{
1713 struct gf100_gr *gr = gf100_gr(base); 1714 struct gf100_gr *gr = gf100_gr(base);
1715 struct nvkm_subdev *subdev = &base->engine.subdev;
1716 u32 ret;
1717
1714 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); 1718 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
1719
1720 ret = nvkm_falcon_get(gr->fecs, subdev);
1721 if (ret)
1722 return ret;
1723
1724 ret = nvkm_falcon_get(gr->gpccs, subdev);
1725 if (ret)
1726 return ret;
1727
1715 return gr->func->init(gr); 1728 return gr->func->init(gr);
1716} 1729}
1717 1730
1731static int
1732gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
1733{
1734 struct gf100_gr *gr = gf100_gr(base);
1735 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1736 nvkm_falcon_put(gr->gpccs, subdev);
1737 nvkm_falcon_put(gr->fecs, subdev);
1738 return 0;
1739}
1740
1718void 1741void
1719gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1742gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
1720{ 1743{
@@ -1737,6 +1760,9 @@ gf100_gr_dtor(struct nvkm_gr *base)
1737 gr->func->dtor(gr); 1760 gr->func->dtor(gr);
1738 kfree(gr->data); 1761 kfree(gr->data);
1739 1762
1763 nvkm_falcon_del(&gr->gpccs);
1764 nvkm_falcon_del(&gr->fecs);
1765
1740 gf100_gr_dtor_fw(&gr->fuc409c); 1766 gf100_gr_dtor_fw(&gr->fuc409c);
1741 gf100_gr_dtor_fw(&gr->fuc409d); 1767 gf100_gr_dtor_fw(&gr->fuc409d);
1742 gf100_gr_dtor_fw(&gr->fuc41ac); 1768 gf100_gr_dtor_fw(&gr->fuc41ac);
@@ -1755,10 +1781,12 @@ gf100_gr_ = {
1755 .dtor = gf100_gr_dtor, 1781 .dtor = gf100_gr_dtor,
1756 .oneinit = gf100_gr_oneinit, 1782 .oneinit = gf100_gr_oneinit,
1757 .init = gf100_gr_init_, 1783 .init = gf100_gr_init_,
1784 .fini = gf100_gr_fini_,
1758 .intr = gf100_gr_intr, 1785 .intr = gf100_gr_intr,
1759 .units = gf100_gr_units, 1786 .units = gf100_gr_units,
1760 .chan_new = gf100_gr_chan_new, 1787 .chan_new = gf100_gr_chan_new,
1761 .object_get = gf100_gr_object_get, 1788 .object_get = gf100_gr_object_get,
1789 .chsw_load = gf100_gr_chsw_load,
1762}; 1790};
1763 1791
1764int 1792int
@@ -1828,6 +1856,7 @@ int
1828gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, 1856gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
1829 int index, struct gf100_gr *gr) 1857 int index, struct gf100_gr *gr)
1830{ 1858{
1859 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1831 int ret; 1860 int ret;
1832 1861
1833 gr->func = func; 1862 gr->func = func;
@@ -1840,7 +1869,11 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
1840 if (ret) 1869 if (ret)
1841 return ret; 1870 return ret;
1842 1871
1843 return 0; 1872 ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
1873 if (ret)
1874 return ret;
1875
1876 return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
1844} 1877}
1845 1878
1846int 1879int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 268b8d60ff73..db6ee3b06841 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -29,6 +29,7 @@
29#include <core/gpuobj.h> 29#include <core/gpuobj.h>
30#include <subdev/ltc.h> 30#include <subdev/ltc.h>
31#include <subdev/mmu.h> 31#include <subdev/mmu.h>
32#include <engine/falcon.h>
32 33
33#define GPC_MAX 32 34#define GPC_MAX 32
34#define TPC_MAX_PER_GPC 8 35#define TPC_MAX_PER_GPC 8
@@ -75,6 +76,8 @@ struct gf100_gr {
75 const struct gf100_gr_func *func; 76 const struct gf100_gr_func *func;
76 struct nvkm_gr base; 77 struct nvkm_gr base;
77 78
79 struct nvkm_falcon *fecs;
80 struct nvkm_falcon *gpccs;
78 struct gf100_gr_fuc fuc409c; 81 struct gf100_gr_fuc fuc409c;
79 struct gf100_gr_fuc fuc409d; 82 struct gf100_gr_fuc fuc409d;
80 struct gf100_gr_fuc fuc41ac; 83 struct gf100_gr_fuc fuc41ac;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
index 2e68919f00b2..c711a55ce392 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -23,6 +23,8 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <nvif/class.h>
27
26static const struct nvkm_gr_func 28static const struct nvkm_gr_func
27gt200_gr = { 29gt200_gr = {
28 .init = nv50_gr_init, 30 .init = nv50_gr_init,
@@ -31,11 +33,11 @@ gt200_gr = {
31 .tlb_flush = g84_gr_tlb_flush, 33 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units, 34 .units = nv50_gr_units,
33 .sclass = { 35 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object }, 36 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object }, 37 { -1, -1, NV50_TWOD, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object }, 38 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object }, 39 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
38 { -1, -1, 0x8397, &nv50_gr_object }, 40 { -1, -1, GT200_TESLA, &nv50_gr_object },
39 {} 41 {}
40 } 42 }
41}; 43};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
index 2bf7aac360cc..fa103df32ec7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -23,6 +23,8 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <nvif/class.h>
27
26static const struct nvkm_gr_func 28static const struct nvkm_gr_func
27gt215_gr = { 29gt215_gr = {
28 .init = nv50_gr_init, 30 .init = nv50_gr_init,
@@ -31,12 +33,12 @@ gt215_gr = {
31 .tlb_flush = g84_gr_tlb_flush, 33 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units, 34 .units = nv50_gr_units,
33 .sclass = { 35 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object }, 36 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object }, 37 { -1, -1, NV50_TWOD, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object }, 38 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object }, 39 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
38 { -1, -1, 0x8597, &nv50_gr_object }, 40 { -1, -1, GT214_TESLA, &nv50_gr_object },
39 { -1, -1, 0x85c0, &nv50_gr_object }, 41 { -1, -1, GT214_COMPUTE, &nv50_gr_object },
40 {} 42 {}
41 } 43 }
42}; 44};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
index 95d5219faf93..eb1a90644752 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -23,6 +23,8 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <nvif/class.h>
27
26static const struct nvkm_gr_func 28static const struct nvkm_gr_func
27mcp79_gr = { 29mcp79_gr = {
28 .init = nv50_gr_init, 30 .init = nv50_gr_init,
@@ -30,11 +32,11 @@ mcp79_gr = {
30 .chan_new = nv50_gr_chan_new, 32 .chan_new = nv50_gr_chan_new,
31 .units = nv50_gr_units, 33 .units = nv50_gr_units,
32 .sclass = { 34 .sclass = {
33 { -1, -1, 0x0030, &nv50_gr_object }, 35 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
34 { -1, -1, 0x502d, &nv50_gr_object }, 36 { -1, -1, NV50_TWOD, &nv50_gr_object },
35 { -1, -1, 0x5039, &nv50_gr_object }, 37 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
36 { -1, -1, 0x50c0, &nv50_gr_object }, 38 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
37 { -1, -1, 0x8397, &nv50_gr_object }, 39 { -1, -1, GT200_TESLA, &nv50_gr_object },
38 {} 40 {}
39 } 41 }
40}; 42};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
index 027b58e5976b..c91eb56e9327 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -23,6 +23,8 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <nvif/class.h>
27
26static const struct nvkm_gr_func 28static const struct nvkm_gr_func
27mcp89_gr = { 29mcp89_gr = {
28 .init = nv50_gr_init, 30 .init = nv50_gr_init,
@@ -31,12 +33,12 @@ mcp89_gr = {
31 .tlb_flush = g84_gr_tlb_flush, 33 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units, 34 .units = nv50_gr_units,
33 .sclass = { 35 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object }, 36 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object }, 37 { -1, -1, NV50_TWOD, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object }, 38 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object }, 39 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
38 { -1, -1, 0x85c0, &nv50_gr_object }, 40 { -1, -1, GT214_COMPUTE, &nv50_gr_object },
39 { -1, -1, 0x8697, &nv50_gr_object }, 41 { -1, -1, GT21A_TESLA, &nv50_gr_object },
40 {} 42 {}
41 } 43 }
42}; 44};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index fca67de43f2b..df16ffda1749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -27,6 +27,8 @@
27#include <core/gpuobj.h> 27#include <core/gpuobj.h>
28#include <engine/fifo.h> 28#include <engine/fifo.h>
29 29
30#include <nvif/class.h>
31
30u64 32u64
31nv50_gr_units(struct nvkm_gr *gr) 33nv50_gr_units(struct nvkm_gr *gr)
32{ 34{
@@ -778,11 +780,11 @@ nv50_gr = {
778 .chan_new = nv50_gr_chan_new, 780 .chan_new = nv50_gr_chan_new,
779 .units = nv50_gr_units, 781 .units = nv50_gr_units,
780 .sclass = { 782 .sclass = {
781 { -1, -1, 0x0030, &nv50_gr_object }, 783 { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
782 { -1, -1, 0x502d, &nv50_gr_object }, 784 { -1, -1, NV50_TWOD, &nv50_gr_object },
783 { -1, -1, 0x5039, &nv50_gr_object }, 785 { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
784 { -1, -1, 0x5097, &nv50_gr_object }, 786 { -1, -1, NV50_TESLA, &nv50_gr_object },
785 { -1, -1, 0x50c0, &nv50_gr_object }, 787 { -1, -1, NV50_COMPUTE, &nv50_gr_object },
786 {} 788 {}
787 } 789 }
788}; 790};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index d8adcdf6985a..2a52d9f026ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -15,6 +15,7 @@ struct nvkm_gr_func {
15 void *(*dtor)(struct nvkm_gr *); 15 void *(*dtor)(struct nvkm_gr *);
16 int (*oneinit)(struct nvkm_gr *); 16 int (*oneinit)(struct nvkm_gr *);
17 int (*init)(struct nvkm_gr *); 17 int (*init)(struct nvkm_gr *);
18 int (*fini)(struct nvkm_gr *, bool);
18 void (*intr)(struct nvkm_gr *); 19 void (*intr)(struct nvkm_gr *);
19 void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *); 20 void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
20 int (*tlb_flush)(struct nvkm_gr *); 21 int (*tlb_flush)(struct nvkm_gr *);
@@ -24,6 +25,7 @@ struct nvkm_gr_func {
24 /* Returns chipset-specific counts of units packed into an u64. 25 /* Returns chipset-specific counts of units packed into an u64.
25 */ 26 */
26 u64 (*units)(struct nvkm_gr *); 27 u64 (*units)(struct nvkm_gr *);
28 bool (*chsw_load)(struct nvkm_gr *);
27 struct nvkm_sclass sclass[]; 29 struct nvkm_sclass sclass[];
28}; 30};
29 31
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
new file mode 100644
index 000000000000..584863db9bfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -0,0 +1,2 @@
1nvkm-y += nvkm/falcon/base.o
2nvkm-y += nvkm/falcon/v1.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
new file mode 100644
index 000000000000..4852f313762f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -0,0 +1,191 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <subdev/mc.h>
25
26void
27nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
28 u32 size, u16 tag, u8 port, bool secure)
29{
30 if (secure && !falcon->secret) {
31 nvkm_warn(falcon->user,
32 "writing with secure tag on a non-secure falcon!\n");
33 return;
34 }
35
36 falcon->func->load_imem(falcon, data, start, size, tag, port,
37 secure);
38}
39
40void
41nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
42 u32 size, u8 port)
43{
44 falcon->func->load_dmem(falcon, data, start, size, port);
45}
46
47void
48nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
49 void *data)
50{
51 falcon->func->read_dmem(falcon, start, size, port, data);
52}
53
54void
55nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
56{
57 if (!falcon->func->bind_context) {
58 nvkm_error(falcon->user,
59 "Context binding not supported on this falcon!\n");
60 return;
61 }
62
63 falcon->func->bind_context(falcon, inst);
64}
65
66void
67nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
68{
69 falcon->func->set_start_addr(falcon, start_addr);
70}
71
72void
73nvkm_falcon_start(struct nvkm_falcon *falcon)
74{
75 falcon->func->start(falcon);
76}
77
78int
79nvkm_falcon_enable(struct nvkm_falcon *falcon)
80{
81 struct nvkm_device *device = falcon->owner->device;
82 enum nvkm_devidx id = falcon->owner->index;
83 int ret;
84
85 nvkm_mc_enable(device, id);
86 ret = falcon->func->enable(falcon);
87 if (ret) {
88 nvkm_mc_disable(device, id);
89 return ret;
90 }
91
92 return 0;
93}
94
95void
96nvkm_falcon_disable(struct nvkm_falcon *falcon)
97{
98 struct nvkm_device *device = falcon->owner->device;
99 enum nvkm_devidx id = falcon->owner->index;
100
101 /* already disabled, return or wait_idle will timeout */
102 if (!nvkm_mc_enabled(device, id))
103 return;
104
105 falcon->func->disable(falcon);
106
107 nvkm_mc_disable(device, id);
108}
109
110int
111nvkm_falcon_reset(struct nvkm_falcon *falcon)
112{
113 nvkm_falcon_disable(falcon);
114 return nvkm_falcon_enable(falcon);
115}
116
117int
118nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
119{
120 return falcon->func->wait_for_halt(falcon, ms);
121}
122
123int
124nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
125{
126 return falcon->func->clear_interrupt(falcon, mask);
127}
128
129void
130nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
131{
132 mutex_lock(&falcon->mutex);
133 if (falcon->user == user) {
134 nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
135 falcon->user = NULL;
136 }
137 mutex_unlock(&falcon->mutex);
138}
139
140int
141nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
142{
143 mutex_lock(&falcon->mutex);
144 if (falcon->user) {
145 nvkm_error(user, "%s falcon already acquired by %s!\n",
146 falcon->name, nvkm_subdev_name[falcon->user->index]);
147 mutex_unlock(&falcon->mutex);
148 return -EBUSY;
149 }
150
151 nvkm_debug(user, "acquired %s falcon\n", falcon->name);
152 falcon->user = user;
153 mutex_unlock(&falcon->mutex);
154 return 0;
155}
156
157void
158nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
159 struct nvkm_subdev *subdev, const char *name, u32 addr,
160 struct nvkm_falcon *falcon)
161{
162 u32 reg;
163
164 falcon->func = func;
165 falcon->owner = subdev;
166 falcon->name = name;
167 falcon->addr = addr;
168 mutex_init(&falcon->mutex);
169
170 reg = nvkm_falcon_rd32(falcon, 0x12c);
171 falcon->version = reg & 0xf;
172 falcon->secret = (reg >> 4) & 0x3;
173 falcon->code.ports = (reg >> 8) & 0xf;
174 falcon->data.ports = (reg >> 12) & 0xf;
175
176 reg = nvkm_falcon_rd32(falcon, 0x108);
177 falcon->code.limit = (reg & 0x1ff) << 8;
178 falcon->data.limit = (reg & 0x3fe00) >> 1;
179
180 reg = nvkm_falcon_rd32(falcon, 0xc08);
181 falcon->debug = (reg >> 20) & 0x1;
182}
183
184void
185nvkm_falcon_del(struct nvkm_falcon **pfalcon)
186{
187 if (*pfalcon) {
188 kfree(*pfalcon);
189 *pfalcon = NULL;
190 }
191}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
new file mode 100644
index 000000000000..97b56f759d0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -0,0 +1,8 @@
1#ifndef __NVKM_FALCON_PRIV_H__
2#define __NVKM_FALCON_PRIV_H__
3#include <engine/falcon.h>
4
5void
6nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
7 const char *, u32, struct nvkm_falcon *);
8#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
new file mode 100644
index 000000000000..b537f111f39c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -0,0 +1,266 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/gpuobj.h>
25#include <core/memory.h>
26#include <subdev/timer.h>
27
28static void
29nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
30 u32 size, u16 tag, u8 port, bool secure)
31{
32 u8 rem = size % 4;
33 u32 reg;
34 int i;
35
36 size -= rem;
37
38 reg = start | BIT(24) | (secure ? BIT(28) : 0);
39 nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
40 for (i = 0; i < size / 4; i++) {
41 /* write new tag every 256B */
42 if ((i & 0x3f) == 0)
43 nvkm_falcon_wr32(falcon, 0x188, tag++);
44 nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]);
45 }
46
47 /*
48 * If size is not a multiple of 4, mask the last work to ensure garbage
49 * does not get written
50 */
51 if (rem) {
52 u32 extra = ((u32 *)data)[i];
53
54 /* write new tag every 256B */
55 if ((i & 0x3f) == 0)
56 nvkm_falcon_wr32(falcon, 0x188, tag++);
57 nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1));
58 ++i;
59 }
60
61 /* code must be padded to 0x40 words */
62 for (; i & 0x3f; i++)
63 nvkm_falcon_wr32(falcon, 0x184, 0);
64}
65
66static void
67nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
68 u32 size, u8 port)
69{
70 u8 rem = size % 4;
71 int i;
72
73 size -= rem;
74
75 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24));
76 for (i = 0; i < size / 4; i++)
77 nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]);
78
79 /*
80 * If size is not a multiple of 4, mask the last work to ensure garbage
81 * does not get read
82 */
83 if (rem) {
84 u32 extra = ((u32 *)data)[i];
85
86 nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1));
87 }
88}
89
90static void
91nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
92 u8 port, void *data)
93{
94 u8 rem = size % 4;
95 int i;
96
97 size -= rem;
98
99 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25));
100 for (i = 0; i < size / 4; i++)
101 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4);
102
103 /*
104 * If size is not a multiple of 4, mask the last work to ensure garbage
105 * does not get read
106 */
107 if (rem) {
108 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4);
109
110 for (i = size; i < size + rem; i++) {
111 ((u8 *)data)[i] = (u8)(extra & 0xff);
112 extra >>= 8;
113 }
114 }
115}
116
117static void
118nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
119{
120 u32 inst_loc;
121
122 /* disable instance block binding */
123 if (ctx == NULL) {
124 nvkm_falcon_wr32(falcon, 0x10c, 0x0);
125 return;
126 }
127
128 nvkm_falcon_wr32(falcon, 0x10c, 0x1);
129
130 /* setup apertures - virtual */
131 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4);
132 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0);
133 /* setup apertures - physical */
134 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
135 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
136 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
137
138 /* Set context */
139 switch (nvkm_memory_target(ctx->memory)) {
140 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
141 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
142 default:
143 WARN_ON(1);
144 return;
145 }
146
147 /* Enable context */
148 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
149 nvkm_falcon_wr32(falcon, 0x480,
150 ((ctx->addr >> 12) & 0xfffffff) |
151 (inst_loc << 28) | (1 << 30));
152}
153
154static void
155nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
156{
157 nvkm_falcon_wr32(falcon, 0x104, start_addr);
158}
159
160static void
161nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
162{
163 u32 reg = nvkm_falcon_rd32(falcon, 0x100);
164
165 if (reg & BIT(6))
166 nvkm_falcon_wr32(falcon, 0x130, 0x2);
167 else
168 nvkm_falcon_wr32(falcon, 0x100, 0x2);
169}
170
171static int
172nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
173{
174 struct nvkm_device *device = falcon->owner->device;
175 int ret;
176
177 ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
178 if (ret < 0)
179 return ret;
180
181 return 0;
182}
183
184static int
185nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
186{
187 struct nvkm_device *device = falcon->owner->device;
188 int ret;
189
190 /* clear interrupt(s) */
191 nvkm_falcon_mask(falcon, 0x004, mask, mask);
192 /* wait until interrupts are cleared */
193 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
194 if (ret < 0)
195 return ret;
196
197 return 0;
198}
199
200static int
201falcon_v1_wait_idle(struct nvkm_falcon *falcon)
202{
203 struct nvkm_device *device = falcon->owner->device;
204 int ret;
205
206 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
207 if (ret < 0)
208 return ret;
209
210 return 0;
211}
212
213static int
214nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
215{
216 struct nvkm_device *device = falcon->owner->device;
217 int ret;
218
219 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
220 if (ret < 0) {
221 nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
222 return ret;
223 }
224
225 ret = falcon_v1_wait_idle(falcon);
226 if (ret)
227 return ret;
228
229 /* enable IRQs */
230 nvkm_falcon_wr32(falcon, 0x010, 0xff);
231
232 return 0;
233}
234
235static void
236nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
237{
238 /* disable IRQs and wait for any previous code to complete */
239 nvkm_falcon_wr32(falcon, 0x014, 0xff);
240 falcon_v1_wait_idle(falcon);
241}
242
243static const struct nvkm_falcon_func
244nvkm_falcon_v1 = {
245 .load_imem = nvkm_falcon_v1_load_imem,
246 .load_dmem = nvkm_falcon_v1_load_dmem,
247 .read_dmem = nvkm_falcon_v1_read_dmem,
248 .bind_context = nvkm_falcon_v1_bind_context,
249 .start = nvkm_falcon_v1_start,
250 .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
251 .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
252 .enable = nvkm_falcon_v1_enable,
253 .disable = nvkm_falcon_v1_disable,
254 .set_start_addr = nvkm_falcon_v1_set_start_addr,
255};
256
257int
258nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
259 struct nvkm_falcon **pfalcon)
260{
261 struct nvkm_falcon *falcon;
262 if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
263 return -ENOMEM;
264 nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
265 return 0;
266}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index be57220a2e01..6b4f1e06a38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -19,6 +19,7 @@ nvkm-y += nvkm/subdev/bios/pcir.o
19nvkm-y += nvkm/subdev/bios/perf.o 19nvkm-y += nvkm/subdev/bios/perf.o
20nvkm-y += nvkm/subdev/bios/pll.o 20nvkm-y += nvkm/subdev/bios/pll.o
21nvkm-y += nvkm/subdev/bios/pmu.o 21nvkm-y += nvkm/subdev/bios/pmu.o
22nvkm-y += nvkm/subdev/bios/power_budget.o
22nvkm-y += nvkm/subdev/bios/ramcfg.o 23nvkm-y += nvkm/subdev/bios/ramcfg.o
23nvkm-y += nvkm/subdev/bios/rammap.o 24nvkm-y += nvkm/subdev/bios/rammap.o
24nvkm-y += nvkm/subdev/bios/shadow.o 25nvkm-y += nvkm/subdev/bios/shadow.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
new file mode 100644
index 000000000000..617bfffce4ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2016 Karol Herbst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst
23 */
24#include <subdev/bios.h>
25#include <subdev/bios/bit.h>
26#include <subdev/bios/power_budget.h>
27
28static u32
29nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
30 u8 *len)
31{
32 struct bit_entry bit_P;
33 u32 power_budget;
34
35 if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
36 bit_P.length < 0x2c)
37 return 0;
38
39 power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c);
40 if (!power_budget)
41 return 0;
42
43 *ver = nvbios_rd08(bios, power_budget);
44 switch (*ver) {
45 case 0x20:
46 case 0x30:
47 *hdr = nvbios_rd08(bios, power_budget + 0x1);
48 *len = nvbios_rd08(bios, power_budget + 0x2);
49 *cnt = nvbios_rd08(bios, power_budget + 0x3);
50 return power_budget;
51 default:
52 break;
53 }
54
55 return 0;
56}
57
58int
59nvbios_power_budget_header(struct nvkm_bios *bios,
60 struct nvbios_power_budget *budget)
61{
62 struct nvkm_subdev *subdev = &bios->subdev;
63 u8 ver, hdr, cnt, len, cap_entry;
64 u32 header;
65
66 if (!bios || !budget)
67 return -EINVAL;
68
69 header = nvbios_power_budget_table(bios, &ver, &hdr, &cnt, &len);
70 if (!header || !cnt)
71 return -ENODEV;
72
73 switch (ver) {
74 case 0x20:
75 cap_entry = nvbios_rd08(bios, header + 0x9);
76 break;
77 case 0x30:
78 cap_entry = nvbios_rd08(bios, header + 0xa);
79 break;
80 default:
81 cap_entry = 0xff;
82 }
83
84 if (cap_entry >= cnt && cap_entry != 0xff) {
85 nvkm_warn(subdev,
86 "invalid cap_entry in power budget table found\n");
87 budget->cap_entry = 0xff;
88 return -EINVAL;
89 }
90
91 budget->offset = header;
92 budget->ver = ver;
93 budget->hlen = hdr;
94 budget->elen = len;
95 budget->ecount = cnt;
96
97 budget->cap_entry = cap_entry;
98
99 return 0;
100}
101
102int
103nvbios_power_budget_entry(struct nvkm_bios *bios,
104 struct nvbios_power_budget *budget,
105 u8 idx, struct nvbios_power_budget_entry *entry)
106{
107 u32 entry_offset;
108
109 if (!bios || !budget || !budget->offset || idx >= budget->ecount
110 || !entry)
111 return -EINVAL;
112
113 entry_offset = budget->offset + budget->hlen + idx * budget->elen;
114
115 if (budget->ver >= 0x20) {
116 entry->min_w = nvbios_rd32(bios, entry_offset + 0x2);
117 entry->avg_w = nvbios_rd32(bios, entry_offset + 0x6);
118 entry->max_w = nvbios_rd32(bios, entry_offset + 0xa);
119 } else {
120 entry->min_w = 0;
121 entry->max_w = nvbios_rd32(bios, entry_offset + 0x2);
122 entry->avg_w = entry->max_w;
123 }
124
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 5841f297973c..da1770e47490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -112,7 +112,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
112 M = (coef & 0x000000ff) >> 0; 112 M = (coef & 0x000000ff) >> 0;
113 break; 113 break;
114 default: 114 default:
115 BUG_ON(1); 115 BUG();
116 } 116 }
117 117
118 if (M) 118 if (M)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index c714b097719c..59362f8dee22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -50,7 +50,7 @@ nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
50 ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P); 50 ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
51 if (!ret) { 51 if (!ret) {
52 nvkm_error(subdev, "failed pll calculation\n"); 52 nvkm_error(subdev, "failed pll calculation\n");
53 return ret; 53 return -EINVAL;
54 } 54 }
55 55
56 switch (info.type) { 56 switch (info.type) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 093223d1df4f..6758da93a3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
445{ 445{
446 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc; 446 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
447 struct nvkm_mm *mm = &ram->vram; 447 struct nvkm_mm *mm = &ram->vram;
448 struct nvkm_mm_node *r; 448 struct nvkm_mm_node **node, *r;
449 struct nvkm_mem *mem; 449 struct nvkm_mem *mem;
450 int type = (memtype & 0x0ff); 450 int type = (memtype & 0x0ff);
451 int back = (memtype & 0x800); 451 int back = (memtype & 0x800);
@@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
462 if (!mem) 462 if (!mem)
463 return -ENOMEM; 463 return -ENOMEM;
464 464
465 INIT_LIST_HEAD(&mem->regions);
466 mem->size = size; 465 mem->size = size;
467 466
468 mutex_lock(&ram->fb->subdev.mutex); 467 mutex_lock(&ram->fb->subdev.mutex);
@@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
478 } 477 }
479 mem->memtype = type; 478 mem->memtype = type;
480 479
480 node = &mem->mem;
481 do { 481 do {
482 if (back) 482 if (back)
483 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); 483 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
@@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
489 return ret; 489 return ret;
490 } 490 }
491 491
492 list_add_tail(&r->rl_entry, &mem->regions); 492 *node = r;
493 node = &r->next;
493 size -= r->length; 494 size -= r->length;
494 } while (size); 495 } while (size);
495 mutex_unlock(&ram->fb->subdev.mutex); 496 mutex_unlock(&ram->fb->subdev.mutex);
496 497
497 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 498 mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
498 mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
499 *pmem = mem; 499 *pmem = mem;
500 return 0; 500 return 0;
501} 501}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 7904fa41acef..fb8a1239743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -989,7 +989,7 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
989 int *N1, int *fN1, int *M1, int *P1, 989 int *N1, int *fN1, int *M1, int *P1,
990 int *N2, int *M2, int *P2) 990 int *N2, int *M2, int *P2)
991{ 991{
992 int best_clk = 0, best_err = target_khz, p_ref, n_ref; 992 int best_err = target_khz, p_ref, n_ref;
993 bool upper = false; 993 bool upper = false;
994 994
995 *M1 = 1; 995 *M1 = 1;
@@ -1010,7 +1010,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
1010 /* we found a better combination */ 1010 /* we found a better combination */
1011 if (cur_err < best_err) { 1011 if (cur_err < best_err) {
1012 best_err = cur_err; 1012 best_err = cur_err;
1013 best_clk = cur_clk;
1014 *N2 = cur_N; 1013 *N2 = cur_N;
1015 *N1 = n_ref; 1014 *N1 = n_ref;
1016 *P1 = p_ref; 1015 *P1 = p_ref;
@@ -1022,7 +1021,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
1022 - target_khz; 1021 - target_khz;
1023 if (cur_err < best_err) { 1022 if (cur_err < best_err) {
1024 best_err = cur_err; 1023 best_err = cur_err;
1025 best_clk = cur_clk;
1026 *N2 = cur_N; 1024 *N2 = cur_N;
1027 *N1 = n_ref; 1025 *N1 = n_ref;
1028 *P1 = p_ref; 1026 *P1 = p_ref;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 0a0e44b75577..017a91de74a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -39,7 +39,7 @@ mcp77_ram_init(struct nvkm_ram *base)
39 u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1; 39 u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
40 40
41 /* Enable NISO poller for various clients and set their associated 41 /* Enable NISO poller for various clients and set their associated
42 * read address, only for MCP77/78 and MCP79/7A. (fd#25701) 42 * read address, only for MCP77/78 and MCP79/7A. (fd#27501)
43 */ 43 */
44 nvkm_wr32(device, 0x100c18, dniso); 44 nvkm_wr32(device, 0x100c18, dniso);
45 nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001); 45 nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 87bde8ff2d6b..6549b0588309 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base)
496void 496void
497__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem) 497__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
498{ 498{
499 struct nvkm_mm_node *this; 499 struct nvkm_mm_node *next = mem->mem;
500 500 struct nvkm_mm_node *node;
501 while (!list_empty(&mem->regions)) { 501 while ((node = next)) {
502 this = list_first_entry(&mem->regions, typeof(*this), rl_entry); 502 next = node->next;
503 503 nvkm_mm_free(&ram->vram, &node);
504 list_del(&this->rl_entry);
505 nvkm_mm_free(&ram->vram, &this);
506 } 504 }
507
508 nvkm_mm_free(&ram->tags, &mem->tag); 505 nvkm_mm_free(&ram->tags, &mem->tag);
509} 506}
510 507
@@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
530{ 527{
531 struct nvkm_mm *heap = &ram->vram; 528 struct nvkm_mm *heap = &ram->vram;
532 struct nvkm_mm *tags = &ram->tags; 529 struct nvkm_mm *tags = &ram->tags;
533 struct nvkm_mm_node *r; 530 struct nvkm_mm_node **node, *r;
534 struct nvkm_mem *mem; 531 struct nvkm_mem *mem;
535 int comp = (memtype & 0x300) >> 8; 532 int comp = (memtype & 0x300) >> 8;
536 int type = (memtype & 0x07f); 533 int type = (memtype & 0x07f);
@@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
559 comp = 0; 556 comp = 0;
560 } 557 }
561 558
562 INIT_LIST_HEAD(&mem->regions);
563 mem->memtype = (comp << 7) | type; 559 mem->memtype = (comp << 7) | type;
564 mem->size = max; 560 mem->size = max;
565 561
566 type = nv50_fb_memtype[type]; 562 type = nv50_fb_memtype[type];
563 node = &mem->mem;
567 do { 564 do {
568 if (back) 565 if (back)
569 ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); 566 ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
@@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
575 return ret; 572 return ret;
576 } 573 }
577 574
578 list_add_tail(&r->rl_entry, &mem->regions); 575 *node = r;
576 node = &r->next;
579 max -= r->length; 577 max -= r->length;
580 } while (max); 578 } while (max);
581 mutex_unlock(&ram->fb->subdev.mutex); 579 mutex_unlock(&ram->fb->subdev.mutex);
582 580
583 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 581 mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
584 mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
585 *pmem = mem; 582 *pmem = mem;
586 return 0; 583 return 0;
587} 584}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index f0af2a381eea..fecfa6afcf54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -26,6 +26,7 @@
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bios/extdev.h> 27#include <subdev/bios/extdev.h>
28#include <subdev/bios/iccsense.h> 28#include <subdev/bios/iccsense.h>
29#include <subdev/bios/power_budget.h>
29#include <subdev/i2c.h> 30#include <subdev/i2c.h>
30 31
31static bool 32static bool
@@ -216,10 +217,25 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
216{ 217{
217 struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev); 218 struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
218 struct nvkm_bios *bios = subdev->device->bios; 219 struct nvkm_bios *bios = subdev->device->bios;
220 struct nvbios_power_budget budget;
219 struct nvbios_iccsense stbl; 221 struct nvbios_iccsense stbl;
220 int i; 222 int i, ret;
221 223
222 if (!bios || nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry) 224 if (!bios)
225 return 0;
226
227 ret = nvbios_power_budget_header(bios, &budget);
228 if (!ret && budget.cap_entry != 0xff) {
229 struct nvbios_power_budget_entry entry;
230 ret = nvbios_power_budget_entry(bios, &budget,
231 budget.cap_entry, &entry);
232 if (!ret) {
233 iccsense->power_w_max = entry.avg_w;
234 iccsense->power_w_crit = entry.max_w;
235 }
236 }
237
238 if (nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
223 return 0; 239 return 0;
224 240
225 iccsense->data_valid = true; 241 iccsense->data_valid = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a6a7fa0d7679..9dec58ec3d9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -116,7 +116,7 @@ struct gk20a_instmem {
116static enum nvkm_memory_target 116static enum nvkm_memory_target
117gk20a_instobj_target(struct nvkm_memory *memory) 117gk20a_instobj_target(struct nvkm_memory *memory)
118{ 118{
119 return NVKM_MEM_TARGET_HOST; 119 return NVKM_MEM_TARGET_NCOH;
120} 120}
121 121
122static u64 122static u64
@@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
305 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); 305 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
306 struct gk20a_instmem *imem = node->base.imem; 306 struct gk20a_instmem *imem = node->base.imem;
307 struct device *dev = imem->base.subdev.device->dev; 307 struct device *dev = imem->base.subdev.device->dev;
308 struct nvkm_mm_node *r; 308 struct nvkm_mm_node *r = node->base.mem.mem;
309 unsigned long flags; 309 unsigned long flags;
310 int i; 310 int i;
311 311
312 if (unlikely(list_empty(&node->base.mem.regions))) 312 if (unlikely(!r))
313 goto out; 313 goto out;
314 314
315 spin_lock_irqsave(&imem->lock, flags); 315 spin_lock_irqsave(&imem->lock, flags);
@@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
320 320
321 spin_unlock_irqrestore(&imem->lock, flags); 321 spin_unlock_irqrestore(&imem->lock, flags);
322 322
323 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
324 rl_entry);
325
326 /* clear IOMMU bit to unmap pages */ 323 /* clear IOMMU bit to unmap pages */
327 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); 324 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
328 325
@@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
404 node->r.length = (npages << PAGE_SHIFT) >> 12; 401 node->r.length = (npages << PAGE_SHIFT) >> 12;
405 402
406 node->base.mem.offset = node->handle; 403 node->base.mem.offset = node->handle;
407 404 node->base.mem.mem = &node->r;
408 INIT_LIST_HEAD(&node->base.mem.regions);
409 list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
410
411 return 0; 405 return 0;
412} 406}
413 407
@@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
484 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); 478 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
485 479
486 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; 480 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
487 481 node->base.mem.mem = r;
488 INIT_LIST_HEAD(&node->base.mem.regions);
489 list_add_tail(&r->rl_entry, &node->base.mem.regions);
490
491 return 0; 482 return 0;
492 483
493release_area: 484release_area:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 6b25e25f9eba..09f669ac6630 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -161,6 +161,16 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
161 } 161 }
162} 162}
163 163
164bool
165nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
166{
167 u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
168
169 return (pmc_enable != 0) &&
170 ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
171}
172
173
164static int 174static int
165nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) 175nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
166{ 176{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 5df9669ea39c..d06ad2c372bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
31{ 31{
32 struct nvkm_vm *vm = vma->vm; 32 struct nvkm_vm *vm = vma->vm;
33 struct nvkm_mmu *mmu = vm->mmu; 33 struct nvkm_mmu *mmu = vm->mmu;
34 struct nvkm_mm_node *r; 34 struct nvkm_mm_node *r = node->mem;
35 int big = vma->node->type != mmu->func->spg_shift; 35 int big = vma->node->type != mmu->func->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12); 36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12; 37 u32 bits = vma->node->type - 12;
@@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
41 u32 end, len; 41 u32 end, len;
42 42
43 delta = 0; 43 delta = 0;
44 list_for_each_entry(r, &node->regions, rl_entry) { 44 while (r) {
45 u64 phys = (u64)r->offset << 12; 45 u64 phys = (u64)r->offset << 12;
46 u32 num = r->length >> bits; 46 u32 num = r->length >> bits;
47 47
@@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
65 65
66 delta += (u64)len << vma->node->type; 66 delta += (u64)len << vma->node->type;
67 } 67 }
68 } 68 r = r->next;
69 };
69 70
70 mmu->func->flush(vm); 71 mmu->func->flush(vm);
71} 72}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 2a31b7d66a6d..87bf41cef0c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -6,6 +6,7 @@ nvkm-y += nvkm/subdev/pci/nv40.o
6nvkm-y += nvkm/subdev/pci/nv46.o 6nvkm-y += nvkm/subdev/pci/nv46.o
7nvkm-y += nvkm/subdev/pci/nv4c.o 7nvkm-y += nvkm/subdev/pci/nv4c.o
8nvkm-y += nvkm/subdev/pci/g84.o 8nvkm-y += nvkm/subdev/pci/g84.o
9nvkm-y += nvkm/subdev/pci/g92.o
9nvkm-y += nvkm/subdev/pci/g94.o 10nvkm-y += nvkm/subdev/pci/g94.o
10nvkm-y += nvkm/subdev/pci/gf100.o 11nvkm-y += nvkm/subdev/pci/gf100.o
11nvkm-y += nvkm/subdev/pci/gf106.o 12nvkm-y += nvkm/subdev/pci/gf106.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
new file mode 100644
index 000000000000..48874359d5f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26int
27g92_pcie_version_supported(struct nvkm_pci *pci)
28{
29 if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
30 return 2;
31 return 1;
32}
33
34static const struct nvkm_pci_func
35g92_pci_func = {
36 .init = g84_pci_init,
37 .rd32 = nv40_pci_rd32,
38 .wr08 = nv40_pci_wr08,
39 .wr32 = nv40_pci_wr32,
40 .msi_rearm = nv46_pci_msi_rearm,
41
42 .pcie.init = g84_pcie_init,
43 .pcie.set_link = g84_pcie_set_link,
44
45 .pcie.max_speed = g84_pcie_max_speed,
46 .pcie.cur_speed = g84_pcie_cur_speed,
47
48 .pcie.set_version = g84_pcie_set_version,
49 .pcie.version = g84_pcie_version,
50 .pcie.version_supported = g92_pcie_version_supported,
51};
52
53int
54g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
55{
56 return nvkm_pci_new_(&g92_pci_func, device, index, ppci);
57}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 43444123bc04..09adb37a5664 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -23,14 +23,6 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26int
27g94_pcie_version_supported(struct nvkm_pci *pci)
28{
29 if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
30 return 2;
31 return 1;
32}
33
34static const struct nvkm_pci_func 26static const struct nvkm_pci_func
35g94_pci_func = { 27g94_pci_func = {
36 .init = g84_pci_init, 28 .init = g84_pci_init,
@@ -47,7 +39,7 @@ g94_pci_func = {
47 39
48 .pcie.set_version = g84_pcie_set_version, 40 .pcie.set_version = g84_pcie_set_version,
49 .pcie.version = g84_pcie_version, 41 .pcie.version = g84_pcie_version,
50 .pcie.version_supported = g94_pcie_version_supported, 42 .pcie.version_supported = g92_pcie_version_supported,
51}; 43};
52 44
53int 45int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index e30ea676baf6..00a5e7d3ee9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -92,7 +92,7 @@ gf100_pci_func = {
92 92
93 .pcie.set_version = gf100_pcie_set_version, 93 .pcie.set_version = gf100_pcie_set_version,
94 .pcie.version = gf100_pcie_version, 94 .pcie.version = gf100_pcie_version,
95 .pcie.version_supported = g94_pcie_version_supported, 95 .pcie.version_supported = g92_pcie_version_supported,
96}; 96};
97 97
98int 98int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index c3b798c5c6dd..11bf419afe3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -39,7 +39,7 @@ gf106_pci_func = {
39 39
40 .pcie.set_version = gf100_pcie_set_version, 40 .pcie.set_version = gf100_pcie_set_version,
41 .pcie.version = gf100_pcie_version, 41 .pcie.version = gf100_pcie_version,
42 .pcie.version_supported = g94_pcie_version_supported, 42 .pcie.version_supported = g92_pcie_version_supported,
43}; 43};
44 44
45int 45int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 23de3180aae5..86921ec962d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -44,7 +44,7 @@ enum nvkm_pcie_speed g84_pcie_max_speed(struct nvkm_pci *);
44int g84_pcie_init(struct nvkm_pci *); 44int g84_pcie_init(struct nvkm_pci *);
45int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8); 45int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8);
46 46
47int g94_pcie_version_supported(struct nvkm_pci *); 47int g92_pcie_version_supported(struct nvkm_pci *);
48 48
49void gf100_pcie_set_version(struct nvkm_pci *, u8); 49void gf100_pcie_set_version(struct nvkm_pci *, u8);
50int gf100_pcie_version(struct nvkm_pci *); 50int gf100_pcie_version(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 51fb4bf94a44..ca57c1e491b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
8nvkm-y += nvkm/subdev/pmu/gk208.o 8nvkm-y += nvkm/subdev/pmu/gk208.o
9nvkm-y += nvkm/subdev/pmu/gk20a.o 9nvkm-y += nvkm/subdev/pmu/gk20a.o
10nvkm-y += nvkm/subdev/pmu/gm107.o 10nvkm-y += nvkm/subdev/pmu/gm107.o
11nvkm-y += nvkm/subdev/pmu/gm20b.o
11nvkm-y += nvkm/subdev/pmu/gp100.o 12nvkm-y += nvkm/subdev/pmu/gp100.o
12nvkm-y += nvkm/subdev/pmu/gp102.o 13nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index e611ce80f8ef..a73f690eb4b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -116,6 +116,8 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
116static void * 116static void *
117nvkm_pmu_dtor(struct nvkm_subdev *subdev) 117nvkm_pmu_dtor(struct nvkm_subdev *subdev)
118{ 118{
119 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
120 nvkm_falcon_del(&pmu->falcon);
119 return nvkm_pmu(subdev); 121 return nvkm_pmu(subdev);
120} 122}
121 123
@@ -129,15 +131,22 @@ nvkm_pmu = {
129}; 131};
130 132
131int 133int
134nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
135 int index, struct nvkm_pmu *pmu)
136{
137 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
138 pmu->func = func;
139 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
140 init_waitqueue_head(&pmu->recv.wait);
141 return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
142}
143
144int
132nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, 145nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
133 int index, struct nvkm_pmu **ppmu) 146 int index, struct nvkm_pmu **ppmu)
134{ 147{
135 struct nvkm_pmu *pmu; 148 struct nvkm_pmu *pmu;
136 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) 149 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
137 return -ENOMEM; 150 return -ENOMEM;
138 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); 151 return nvkm_pmu_ctor(func, device, index, *ppmu);
139 pmu->func = func;
140 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
141 init_waitqueue_head(&pmu->recv.wait);
142 return 0;
143} 152}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index f996d90c9f0d..9ca0db796cbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,7 +19,7 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev) 22#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
23#include "priv.h" 23#include "priv.h"
24 24
25#include <subdev/clk.h> 25#include <subdev/clk.h>
@@ -43,9 +43,8 @@ struct gk20a_pmu {
43}; 43};
44 44
45struct gk20a_pmu_dvfs_dev_status { 45struct gk20a_pmu_dvfs_dev_status {
46 unsigned long total; 46 u32 total;
47 unsigned long busy; 47 u32 busy;
48 int cur_state;
49}; 48};
50 49
51static int 50static int
@@ -56,13 +55,12 @@ gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
56 return nvkm_clk_astate(clk, *state, 0, false); 55 return nvkm_clk_astate(clk, *state, 0, false);
57} 56}
58 57
59static int 58static void
60gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) 59gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
61{ 60{
62 struct nvkm_clk *clk = pmu->base.subdev.device->clk; 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
63 62
64 *state = clk->pstate; 63 *state = clk->pstate;
65 return 0;
66} 64}
67 65
68static int 66static int
@@ -90,28 +88,26 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
90 88
91 *state = level; 89 *state = level;
92 90
93 if (level == cur_level) 91 return (level != cur_level);
94 return 0;
95 else
96 return 1;
97} 92}
98 93
99static int 94static void
100gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, 95gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
101 struct gk20a_pmu_dvfs_dev_status *status) 96 struct gk20a_pmu_dvfs_dev_status *status)
102{ 97{
103 struct nvkm_device *device = pmu->base.subdev.device; 98 struct nvkm_falcon *falcon = pmu->base.falcon;
104 status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10)); 99
105 status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10)); 100 status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
106 return 0; 101 status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
107} 102}
108 103
109static void 104static void
110gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) 105gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
111{ 106{
112 struct nvkm_device *device = pmu->base.subdev.device; 107 struct nvkm_falcon *falcon = pmu->base.falcon;
113 nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); 108
114 nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); 109 nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
110 nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
115} 111}
116 112
117static void 113static void
@@ -127,7 +123,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
127 struct nvkm_timer *tmr = device->timer; 123 struct nvkm_timer *tmr = device->timer;
128 struct nvkm_volt *volt = device->volt; 124 struct nvkm_volt *volt = device->volt;
129 u32 utilization = 0; 125 u32 utilization = 0;
130 int state, ret; 126 int state;
131 127
132 /* 128 /*
133 * The PMU is initialized before CLK and VOLT, so we have to make sure the 129 * The PMU is initialized before CLK and VOLT, so we have to make sure the
@@ -136,11 +132,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
136 if (!clk || !volt) 132 if (!clk || !volt)
137 goto resched; 133 goto resched;
138 134
139 ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status); 135 gk20a_pmu_dvfs_get_dev_status(pmu, &status);
140 if (ret) {
141 nvkm_warn(subdev, "failed to get device status\n");
142 goto resched;
143 }
144 136
145 if (status.total) 137 if (status.total)
146 utilization = div_u64((u64)status.busy * 100, status.total); 138 utilization = div_u64((u64)status.busy * 100, status.total);
@@ -150,11 +142,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
150 nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n", 142 nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
151 utilization, data->avg_load); 143 utilization, data->avg_load);
152 144
153 ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state); 145 gk20a_pmu_dvfs_get_cur_state(pmu, &state);
154 if (ret) {
155 nvkm_warn(subdev, "failed to get current state\n");
156 goto resched;
157 }
158 146
159 if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { 147 if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
160 nvkm_trace(subdev, "set new state to %d\n", state); 148 nvkm_trace(subdev, "set new state to %d\n", state);
@@ -166,32 +154,36 @@ resched:
166 nvkm_timer_alarm(tmr, 100000000, alarm); 154 nvkm_timer_alarm(tmr, 100000000, alarm);
167} 155}
168 156
169static int 157static void
170gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend) 158gk20a_pmu_fini(struct nvkm_pmu *pmu)
171{ 159{
172 struct gk20a_pmu *pmu = gk20a_pmu(subdev); 160 struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
173 nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm); 161 nvkm_timer_alarm_cancel(pmu->subdev.device->timer, &gpmu->alarm);
174 return 0;
175}
176 162
177static void * 163 nvkm_falcon_put(pmu->falcon, &pmu->subdev);
178gk20a_pmu_dtor(struct nvkm_subdev *subdev)
179{
180 return gk20a_pmu(subdev);
181} 164}
182 165
183static int 166static int
184gk20a_pmu_init(struct nvkm_subdev *subdev) 167gk20a_pmu_init(struct nvkm_pmu *pmu)
185{ 168{
186 struct gk20a_pmu *pmu = gk20a_pmu(subdev); 169 struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
187 struct nvkm_device *device = pmu->base.subdev.device; 170 struct nvkm_subdev *subdev = &pmu->subdev;
171 struct nvkm_device *device = pmu->subdev.device;
172 struct nvkm_falcon *falcon = pmu->falcon;
173 int ret;
174
175 ret = nvkm_falcon_get(falcon, subdev);
176 if (ret) {
177 nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name);
178 return ret;
179 }
188 180
189 /* init pwr perf counter */ 181 /* init pwr perf counter */
190 nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); 182 nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001);
191 nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); 183 nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002);
192 nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); 184 nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003);
193 185
194 nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm); 186 nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm);
195 return 0; 187 return 0;
196} 188}
197 189
@@ -202,26 +194,26 @@ gk20a_dvfs_data= {
202 .p_smooth = 1, 194 .p_smooth = 1,
203}; 195};
204 196
205static const struct nvkm_subdev_func 197static const struct nvkm_pmu_func
206gk20a_pmu = { 198gk20a_pmu = {
207 .init = gk20a_pmu_init, 199 .init = gk20a_pmu_init,
208 .fini = gk20a_pmu_fini, 200 .fini = gk20a_pmu_fini,
209 .dtor = gk20a_pmu_dtor, 201 .reset = gt215_pmu_reset,
210}; 202};
211 203
212int 204int
213gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 205gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
214{ 206{
215 static const struct nvkm_pmu_func func = {};
216 struct gk20a_pmu *pmu; 207 struct gk20a_pmu *pmu;
217 208
218 if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) 209 if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
219 return -ENOMEM; 210 return -ENOMEM;
220 pmu->base.func = &func;
221 *ppmu = &pmu->base; 211 *ppmu = &pmu->base;
222 212
223 nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev); 213 nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
214
224 pmu->data = &gk20a_dvfs_data; 215 pmu->data = &gk20a_dvfs_data;
225 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); 216 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
217
226 return 0; 218 return 0;
227} 219}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
new file mode 100644
index 000000000000..0b8a1cc4a0ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "priv.h"
24
25static const struct nvkm_pmu_func
26gm20b_pmu = {
27 .reset = gt215_pmu_reset,
28};
29
30int
31gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
32{
33 return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
34}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 2e2179a4ad17..096cba069f72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,6 +4,8 @@
4#include <subdev/pmu.h> 4#include <subdev/pmu.h>
5#include <subdev/pmu/fuc/os.h> 5#include <subdev/pmu/fuc/os.h>
6 6
7int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
8 int index, struct nvkm_pmu *);
7int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, 9int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
8 int index, struct nvkm_pmu **); 10 int index, struct nvkm_pmu **);
9 11
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index b02b868a6589..5076d1500f47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -1,3 +1,7 @@
1nvkm-y += nvkm/subdev/secboot/base.o 1nvkm-y += nvkm/subdev/secboot/base.o
2nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
3nvkm-y += nvkm/subdev/secboot/acr.o
4nvkm-y += nvkm/subdev/secboot/acr_r352.o
5nvkm-y += nvkm/subdev/secboot/acr_r361.o
2nvkm-y += nvkm/subdev/secboot/gm200.o 6nvkm-y += nvkm/subdev/secboot/gm200.o
3nvkm-y += nvkm/subdev/secboot/gm20b.o 7nvkm-y += nvkm/subdev/secboot/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
new file mode 100644
index 000000000000..75dc06557877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr.h"
24
25#include <core/firmware.h>
26
27/**
28 * Convenience function to duplicate a firmware file in memory and check that
29 * it has the required minimum size.
30 */
31void *
32nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
33 size_t min_size)
34{
35 const struct firmware *fw;
36 void *blob;
37 int ret;
38
39 ret = nvkm_firmware_get(subdev->device, name, &fw);
40 if (ret)
41 return ERR_PTR(ret);
42 if (fw->size < min_size) {
43 nvkm_error(subdev, "%s is smaller than expected size %zu\n",
44 name, min_size);
45 nvkm_firmware_put(fw);
46 return ERR_PTR(-EINVAL);
47 }
48 blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
49 nvkm_firmware_put(fw);
50 if (!blob)
51 return ERR_PTR(-ENOMEM);
52
53 return blob;
54}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
new file mode 100644
index 000000000000..97795b342b6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef __NVKM_SECBOOT_ACR_H__
23#define __NVKM_SECBOOT_ACR_H__
24
25#include "priv.h"
26
27struct nvkm_acr;
28
29/**
30 * struct nvkm_acr_func - properties and functions specific to an ACR
31 *
32 * @load: make the ACR ready to run on the given secboot device
33 * @reset: reset the specified falcon
34 * @start: start the specified falcon (assumed to have been reset)
35 */
36struct nvkm_acr_func {
37 void (*dtor)(struct nvkm_acr *);
38 int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
39 int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
40 int (*load)(struct nvkm_acr *, struct nvkm_secboot *,
41 struct nvkm_gpuobj *, u64);
42 int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
43 enum nvkm_secboot_falcon);
44 int (*start)(struct nvkm_acr *, struct nvkm_secboot *,
45 enum nvkm_secboot_falcon);
46};
47
48/**
49 * struct nvkm_acr - instance of an ACR
50 *
51 * @boot_falcon: ID of the falcon that will perform secure boot
52 * @managed_falcons: bitfield of falcons managed by this ACR
53 * @start_address: virtual start address of the HS bootloader
54 */
55struct nvkm_acr {
56 const struct nvkm_acr_func *func;
57 const struct nvkm_subdev *subdev;
58
59 enum nvkm_secboot_falcon boot_falcon;
60 unsigned long managed_falcons;
61 u32 start_address;
62};
63
64void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
65
66struct nvkm_acr *acr_r352_new(unsigned long);
67struct nvkm_acr *acr_r361_new(unsigned long);
68
69#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
new file mode 100644
index 000000000000..1aa37ea18580
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -0,0 +1,936 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r352.h"
24
25#include <core/gpuobj.h>
26#include <core/firmware.h>
27#include <engine/falcon.h>
28
29/**
30 * struct hsf_fw_header - HS firmware descriptor
31 * @sig_dbg_offset: offset of the debug signature
32 * @sig_dbg_size: size of the debug signature
33 * @sig_prod_offset: offset of the production signature
34 * @sig_prod_size: size of the production signature
35 * @patch_loc: offset of the offset (sic) of where the signature is
36 * @patch_sig: offset of the offset (sic) to add to sig_*_offset
37 * @hdr_offset: offset of the load header (see struct hs_load_header)
38 * @hdr_size: size of above header
39 *
40 * This structure is embedded in the HS firmware image at
41 * hs_bin_hdr.header_offset.
42 */
43struct hsf_fw_header {
44 u32 sig_dbg_offset;
45 u32 sig_dbg_size;
46 u32 sig_prod_offset;
47 u32 sig_prod_size;
48 u32 patch_loc;
49 u32 patch_sig;
50 u32 hdr_offset;
51 u32 hdr_size;
52};
53
54/**
55 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
56 * @signature: 16B signature for secure code. 0s if no secure code
57 * @ctx_dma: DMA context to be used by BL while loading code/data
58 * @code_dma_base: 256B-aligned Physical FB Address where code is located
59 * (falcon's $xcbase register)
60 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
61 * located. The offset must be multiple of 256 to help perf
62 * @non_sec_code_size: the size of the nonSecure code part.
63 * @sec_code_off: offset from code_dma_base where the secure code is
64 * located. The offset must be multiple of 256 to help perf
65 * @sec_code_size: offset from code_dma_base where the secure code is
66 * located. The offset must be multiple of 256 to help perf
67 * @code_entry_point: code entry point which will be invoked by BL after
68 * code is loaded.
69 * @data_dma_base: 256B aligned Physical FB Address where data is located.
70 * (falcon's $xdbase register)
71 * @data_size: size of data block. Should be multiple of 256B
72 *
73 * Structure used by the bootloader to load the rest of the code. This has
74 * to be filled by host and copied into DMEM at offset provided in the
75 * hsflcn_bl_desc.bl_desc_dmem_load_off.
76 */
77struct acr_r352_flcn_bl_desc {
78 u32 reserved[4];
79 u32 signature[4];
80 u32 ctx_dma;
81 u32 code_dma_base;
82 u32 non_sec_code_off;
83 u32 non_sec_code_size;
84 u32 sec_code_off;
85 u32 sec_code_size;
86 u32 code_entry_point;
87 u32 data_dma_base;
88 u32 data_size;
89 u32 code_dma_base1;
90 u32 data_dma_base1;
91};
92
93/**
94 * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
95 */
96static void
97acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
98 const struct ls_ucode_img *_img, u64 wpr_addr,
99 void *_desc)
100{
101 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
102 struct acr_r352_flcn_bl_desc *desc = _desc;
103 const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc;
104 u64 base, addr_code, addr_data;
105
106 base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
107 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
108 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
109
110 desc->ctx_dma = FALCON_DMAIDX_UCODE;
111 desc->code_dma_base = lower_32_bits(addr_code);
112 desc->code_dma_base1 = upper_32_bits(addr_code);
113 desc->non_sec_code_off = pdesc->app_resident_code_offset;
114 desc->non_sec_code_size = pdesc->app_resident_code_size;
115 desc->code_entry_point = pdesc->app_imem_entry;
116 desc->data_dma_base = lower_32_bits(addr_data);
117 desc->data_dma_base1 = upper_32_bits(addr_data);
118 desc->data_size = pdesc->app_resident_data_size;
119}
120
121
122/**
123 * struct hsflcn_acr_desc - data section of the HS firmware
124 *
125 * This header is to be copied at the beginning of DMEM by the HS bootloader.
126 *
127 * @signature: signature of ACR ucode
128 * @wpr_region_id: region ID holding the WPR header and its details
129 * @wpr_offset: offset from the WPR region holding the wpr header
130 * @regions: region descriptors
131 * @nonwpr_ucode_blob_size: size of LS blob
132 * @nonwpr_ucode_blob_start: FB location of LS blob is
133 */
134struct hsflcn_acr_desc {
135 union {
136 u8 reserved_dmem[0x200];
137 u32 signatures[4];
138 } ucode_reserved_space;
139 u32 wpr_region_id;
140 u32 wpr_offset;
141 u32 mmu_mem_range;
142#define FLCN_ACR_MAX_REGIONS 2
143 struct {
144 u32 no_regions;
145 struct {
146 u32 start_addr;
147 u32 end_addr;
148 u32 region_id;
149 u32 read_mask;
150 u32 write_mask;
151 u32 client_mask;
152 } region_props[FLCN_ACR_MAX_REGIONS];
153 } regions;
154 u32 ucode_blob_size;
155 u64 ucode_blob_base __aligned(8);
156 struct {
157 u32 vpr_enabled;
158 u32 vpr_start;
159 u32 vpr_end;
160 u32 hdcp_policies;
161 } vpr_desc;
162};
163
164
165/*
166 * Low-secure blob creation
167 */
168
169/**
170 * ls_ucode_img_load() - create a lsf_ucode_img and load it
171 */
172struct ls_ucode_img *
173acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
174 enum nvkm_secboot_falcon falcon_id)
175{
176 const struct nvkm_subdev *subdev = acr->base.subdev;
177 struct ls_ucode_img_r352 *img;
178 int ret;
179
180 img = kzalloc(sizeof(*img), GFP_KERNEL);
181 if (!img)
182 return ERR_PTR(-ENOMEM);
183
184 img->base.falcon_id = falcon_id;
185
186 ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
187
188 if (ret) {
189 kfree(img->base.ucode_data);
190 kfree(img->base.sig);
191 kfree(img);
192 return ERR_PTR(ret);
193 }
194
195 /* Check that the signature size matches our expectations... */
196 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
197 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
198 nvkm_secboot_falcon_name[falcon_id]);
199 return ERR_PTR(-EINVAL);
200 }
201
202 /* Copy signature to the right place */
203 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
204
205 /* not needed? the signature should already have the right value */
206 img->lsb_header.signature.falcon_id = falcon_id;
207
208 return &img->base;
209}
210
211#define LSF_LSB_HEADER_ALIGN 256
212#define LSF_BL_DATA_ALIGN 256
213#define LSF_BL_DATA_SIZE_ALIGN 256
214#define LSF_BL_CODE_SIZE_ALIGN 256
215#define LSF_UCODE_DATA_ALIGN 4096
216
217/**
218 * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
219 * @acr: ACR to use
220 * @img: image to generate for
221 * @offset: offset in the WPR region where this image starts
222 *
223 * Allocate space in the WPR area from offset and write the WPR and LSB headers
224 * accordingly.
225 *
226 * Return: offset at the end of this image.
227 */
228static u32
229acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
230 struct ls_ucode_img_r352 *img, u32 offset)
231{
232 struct ls_ucode_img *_img = &img->base;
233 struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
234 struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
235 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
236 const struct acr_r352_ls_func *func =
237 acr->func->ls_func[_img->falcon_id];
238
239 /* Fill WPR header */
240 whdr->falcon_id = _img->falcon_id;
241 whdr->bootstrap_owner = acr->base.boot_falcon;
242 whdr->status = LSF_IMAGE_STATUS_COPY;
243
244 /* Skip bootstrapping falcons started by someone else than ACR */
245 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
246 whdr->lazy_bootstrap = 1;
247
248 /* Align, save off, and include an LSB header size */
249 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
250 whdr->lsb_offset = offset;
251 offset += sizeof(*lhdr);
252
253 /*
254 * Align, save off, and include the original (static) ucode
255 * image size
256 */
257 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
258 lhdr->ucode_off = offset;
259 offset += _img->ucode_size;
260
261 /*
262 * For falcons that use a boot loader (BL), we append a loader
263 * desc structure on the end of the ucode image and consider
264 * this the boot loader data. The host will then copy the loader
265 * desc args to this space within the WPR region (before locking
266 * down) and the HS bin will then copy them to DMEM 0 for the
267 * loader.
268 */
269 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
270 LSF_BL_CODE_SIZE_ALIGN);
271 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
272 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
273 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
274 lhdr->bl_code_size - lhdr->ucode_size;
275 /*
276 * Though the BL is located at 0th offset of the image, the VA
277 * is different to make sure that it doesn't collide the actual
278 * OS VA range
279 */
280 lhdr->bl_imem_off = desc->bootloader_imem_offset;
281 lhdr->app_code_off = desc->app_start_offset +
282 desc->app_resident_code_offset;
283 lhdr->app_code_size = desc->app_resident_code_size;
284 lhdr->app_data_off = desc->app_start_offset +
285 desc->app_resident_data_offset;
286 lhdr->app_data_size = desc->app_resident_data_size;
287
288 lhdr->flags = func->lhdr_flags;
289 if (_img->falcon_id == acr->base.boot_falcon)
290 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
291
292 /* Align and save off BL descriptor size */
293 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
294
295 /*
296 * Align, save off, and include the additional BL data
297 */
298 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
299 lhdr->bl_data_off = offset;
300 offset += lhdr->bl_data_size;
301
302 return offset;
303}
304
305/**
306 * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
307 */
308int
309acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
310{
311 struct ls_ucode_img_r352 *img;
312 struct list_head *l;
313 u32 count = 0;
314 u32 offset;
315
316 /* Count the number of images to manage */
317 list_for_each(l, imgs)
318 count++;
319
320 /*
321 * Start with an array of WPR headers at the base of the WPR.
322 * The expectation here is that the secure falcon will do a single DMA
323 * read of this array and cache it internally so it's ok to pack these.
324 * Also, we add 1 to the falcon count to indicate the end of the array.
325 */
326 offset = sizeof(img->wpr_header) * (count + 1);
327
328 /*
329 * Walk the managed falcons, accounting for the LSB structs
330 * as well as the ucode images.
331 */
332 list_for_each_entry(img, imgs, base.node) {
333 offset = acr_r352_ls_img_fill_headers(acr, img, offset);
334 }
335
336 return offset;
337}
338
339/**
340 * acr_r352_ls_write_wpr - write the WPR blob contents
341 */
342int
343acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
344 struct nvkm_gpuobj *wpr_blob, u32 wpr_addr)
345{
346 struct ls_ucode_img *_img;
347 u32 pos = 0;
348
349 nvkm_kmap(wpr_blob);
350
351 list_for_each_entry(_img, imgs, node) {
352 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
353 const struct acr_r352_ls_func *ls_func =
354 acr->func->ls_func[_img->falcon_id];
355 u8 gdesc[ls_func->bl_desc_size];
356
357 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
358 sizeof(img->wpr_header));
359
360 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
361 &img->lsb_header, sizeof(img->lsb_header));
362
363 /* Generate and write BL descriptor */
364 memset(gdesc, 0, ls_func->bl_desc_size);
365 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
366
367 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
368 gdesc, ls_func->bl_desc_size);
369
370 /* Copy ucode */
371 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
372 _img->ucode_data, _img->ucode_size);
373
374 pos += sizeof(img->wpr_header);
375 }
376
377 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
378
379 nvkm_done(wpr_blob);
380
381 return 0;
382}
383
384/* Both size and address of WPR need to be 128K-aligned */
385#define WPR_ALIGNMENT 0x20000
386/**
387 * acr_r352_prepare_ls_blob() - prepare the LS blob
388 *
389 * For each securely managed falcon, load the FW, signatures and bootloaders and
390 * prepare a ucode blob. Then, compute the offsets in the WPR region for each
391 * blob, and finally write the headers and ucode blobs into a GPU object that
392 * will be copied into the WPR region by the HS firmware.
393 */
394static int
395acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
396{
397 const struct nvkm_subdev *subdev = acr->base.subdev;
398 struct list_head imgs;
399 struct ls_ucode_img *img, *t;
400 unsigned long managed_falcons = acr->base.managed_falcons;
401 int managed_count = 0;
402 u32 image_wpr_size;
403 int falcon_id;
404 int ret;
405
406 INIT_LIST_HEAD(&imgs);
407
408 /* Load all LS blobs */
409 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
410 struct ls_ucode_img *img;
411
412 img = acr->func->ls_ucode_img_load(acr, falcon_id);
413 if (IS_ERR(img)) {
414 ret = PTR_ERR(img);
415 goto cleanup;
416 }
417
418 list_add_tail(&img->node, &imgs);
419 managed_count++;
420 }
421
422 /*
423 * Fill the WPR and LSF headers with the right offsets and compute
424 * required WPR size
425 */
426 image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
427 image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
428
429 /* Allocate GPU object that will contain the WPR region */
430 ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT,
431 false, NULL, &acr->ls_blob);
432 if (ret)
433 goto cleanup;
434
435 nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
436 managed_count, image_wpr_size);
437
438 /* If WPR address and size are not fixed, set them to fit the LS blob */
439 if (wpr_size == 0) {
440 wpr_addr = acr->ls_blob->addr;
441 wpr_size = image_wpr_size;
442 /*
443 * But if the WPR region is set by the bootloader, it is illegal for
444 * the HS blob to be larger than this region.
445 */
446 } else if (image_wpr_size > wpr_size) {
447 nvkm_error(subdev, "WPR region too small for FW blob!\n");
448 nvkm_error(subdev, "required: %dB\n", image_wpr_size);
449 nvkm_error(subdev, "available: %dB\n", wpr_size);
450 ret = -ENOSPC;
451 goto cleanup;
452 }
453
454 /* Write LS blob */
455 ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
456 if (ret)
457 nvkm_gpuobj_del(&acr->ls_blob);
458
459cleanup:
460 list_for_each_entry_safe(img, t, &imgs, node) {
461 kfree(img->ucode_data);
462 kfree(img->sig);
463 kfree(img);
464 }
465
466 return ret;
467}
468
469
470
471
472/**
473 * acr_r352_hsf_patch_signature() - patch HS blob with correct signature
474 */
475static void
476acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
477{
478 struct fw_bin_header *hsbin_hdr = acr_image;
479 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
480 void *hs_data = acr_image + hsbin_hdr->data_offset;
481 void *sig;
482 u32 sig_size;
483
484 /* Falcon in debug or production mode? */
485 if (sb->boot_falcon->debug) {
486 sig = acr_image + fw_hdr->sig_dbg_offset;
487 sig_size = fw_hdr->sig_dbg_size;
488 } else {
489 sig = acr_image + fw_hdr->sig_prod_offset;
490 sig_size = fw_hdr->sig_prod_size;
491 }
492
493 /* Patch signature */
494 memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
495}
496
497static void
498acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
499 struct hsflcn_acr_desc *desc)
500{
501 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
502
503 /* WPR region information if WPR is not fixed */
504 if (sb->wpr_size == 0) {
505 u32 wpr_start = ls_blob->addr;
506 u32 wpr_end = wpr_start + ls_blob->size;
507
508 desc->wpr_region_id = 1;
509 desc->regions.no_regions = 2;
510 desc->regions.region_props[0].start_addr = wpr_start >> 8;
511 desc->regions.region_props[0].end_addr = wpr_end >> 8;
512 desc->regions.region_props[0].region_id = 1;
513 desc->regions.region_props[0].read_mask = 0xf;
514 desc->regions.region_props[0].write_mask = 0xc;
515 desc->regions.region_props[0].client_mask = 0x2;
516 } else {
517 desc->ucode_blob_base = ls_blob->addr;
518 desc->ucode_blob_size = ls_blob->size;
519 }
520}
521
522static void
523acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
524 u64 offset)
525{
526 struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
527 u64 addr_code, addr_data;
528
529 addr_code = offset >> 8;
530 addr_data = (offset + hdr->data_dma_base) >> 8;
531
532 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
533 bl_desc->code_dma_base = lower_32_bits(addr_code);
534 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
535 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
536 bl_desc->sec_code_off = hdr->app[0].sec_code_off;
537 bl_desc->sec_code_size = hdr->app[0].sec_code_size;
538 bl_desc->code_entry_point = 0;
539 bl_desc->data_dma_base = lower_32_bits(addr_data);
540 bl_desc->data_size = hdr->data_size;
541}
542
543/**
544 * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
545 *
546 * @sb secure boot instance to prepare for
547 * @fw name of the HS firmware to load
548 * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
549 * @bl_desc pointer to the BL descriptor to write for this firmware
550 * @patch whether we should patch the HS descriptor (only for HS loaders)
551 */
552static int
553acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
554 const char *fw, struct nvkm_gpuobj **blob,
555 struct hsf_load_header *load_header, bool patch)
556{
557 struct nvkm_subdev *subdev = &sb->subdev;
558 void *acr_image;
559 struct fw_bin_header *hsbin_hdr;
560 struct hsf_fw_header *fw_hdr;
561 struct hsf_load_header *load_hdr;
562 void *acr_data;
563 int ret;
564
565 acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
566 if (IS_ERR(acr_image))
567 return PTR_ERR(acr_image);
568
569 hsbin_hdr = acr_image;
570 fw_hdr = acr_image + hsbin_hdr->header_offset;
571 load_hdr = acr_image + fw_hdr->hdr_offset;
572 acr_data = acr_image + hsbin_hdr->data_offset;
573
574 /* Patch signature */
575 acr_r352_hsf_patch_signature(sb, acr_image);
576
577 /* Patch descriptor with WPR information? */
578 if (patch) {
579 struct hsflcn_acr_desc *desc;
580
581 desc = acr_data + load_hdr->data_dma_base;
582 acr_r352_fixup_hs_desc(acr, sb, desc);
583 }
584
585 if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
586 nvkm_error(subdev, "more apps (%d) than supported (%d)!",
587 load_hdr->num_apps, ACR_R352_MAX_APPS);
588 ret = -EINVAL;
589 goto cleanup;
590 }
591 memcpy(load_header, load_hdr, sizeof(*load_header) +
592 (sizeof(load_hdr->app[0]) * load_hdr->num_apps));
593
594 /* Create ACR blob and copy HS data to it */
595 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
596 0x1000, false, NULL, blob);
597 if (ret)
598 goto cleanup;
599
600 nvkm_kmap(*blob);
601 nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
602 nvkm_done(*blob);
603
604cleanup:
605 kfree(acr_image);
606
607 return ret;
608}
609
610static int
611acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
612{
613 const struct nvkm_subdev *subdev = acr->base.subdev;
614 struct fw_bin_header *hdr;
615 struct fw_bl_desc *hsbl_desc;
616
617 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
618 if (IS_ERR(acr->hsbl_blob)) {
619 int ret = PTR_ERR(acr->hsbl_blob);
620
621 acr->hsbl_blob = NULL;
622 return ret;
623 }
624
625 hdr = acr->hsbl_blob;
626 hsbl_desc = acr->hsbl_blob + hdr->header_offset;
627
628 /* virtual start address for boot vector */
629 acr->base.start_address = hsbl_desc->start_tag << 8;
630
631 return 0;
632}
633
634/**
635 * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
636 *
637 * This includes the LS blob, HS ucode loading blob, and HS bootloader.
638 *
639 * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
640 */
641int
642acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
643{
644 int ret;
645
646 /* Firmware already loaded? */
647 if (acr->firmware_ok)
648 return 0;
649
650 /* Load and prepare the managed falcon's firmwares */
651 ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
652 if (ret)
653 return ret;
654
655 /* Load the HS firmware that will load the LS firmwares */
656 if (!acr->load_blob) {
657 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
658 &acr->load_blob,
659 &acr->load_bl_header, true);
660 if (ret)
661 return ret;
662 }
663
664 /* If the ACR region is dynamically programmed, we need an unload FW */
665 if (sb->wpr_size == 0) {
666 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
667 &acr->unload_blob,
668 &acr->unload_bl_header, false);
669 if (ret)
670 return ret;
671 }
672
673 /* Load the HS firmware bootloader */
674 if (!acr->hsbl_blob) {
675 ret = acr_r352_prepare_hsbl_blob(acr);
676 if (ret)
677 return ret;
678 }
679
680 acr->firmware_ok = true;
681 nvkm_debug(&sb->subdev, "LS blob successfully created\n");
682
683 return 0;
684}
685
686/**
687 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped
688 * at GPU address offset.
689 */
690static int
691acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
692 struct nvkm_gpuobj *blob, u64 offset)
693{
694 struct acr_r352 *acr = acr_r352(_acr);
695 struct nvkm_falcon *falcon = sb->boot_falcon;
696 struct fw_bin_header *hdr = acr->hsbl_blob;
697 struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
698 void *blob_data = acr->hsbl_blob + hdr->data_offset;
699 void *hsbl_code = blob_data + hsbl_desc->code_off;
700 void *hsbl_data = blob_data + hsbl_desc->data_off;
701 u32 code_size = ALIGN(hsbl_desc->code_size, 256);
702 const struct hsf_load_header *load_hdr;
703 const u32 bl_desc_size = acr->func->hs_bl_desc_size;
704 u8 bl_desc[bl_desc_size];
705
706 /* Find the bootloader descriptor for our blob and copy it */
707 if (blob == acr->load_blob) {
708 load_hdr = &acr->load_bl_header;
709 } else if (blob == acr->unload_blob) {
710 load_hdr = &acr->unload_bl_header;
711 } else {
712 nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
713 return -EINVAL;
714 }
715
716 /*
717 * Copy HS bootloader data
718 */
719 nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
720
721 /* Copy HS bootloader code to end of IMEM */
722 nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
723 code_size, hsbl_desc->start_tag, 0, false);
724
725 /* Generate the BL header */
726 memset(bl_desc, 0, bl_desc_size);
727 acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
728
729 /*
730 * Copy HS BL header where the HS descriptor expects it to be
731 */
732 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
733 bl_desc_size, 0);
734
735 return 0;
736}
737
738static int
739acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
740{
741 int i;
742
743 /* Run the unload blob to unprotect the WPR region */
744 if (acr->unload_blob && sb->wpr_set) {
745 int ret;
746
747 nvkm_debug(&sb->subdev, "running HS unload blob\n");
748 ret = sb->func->run_blob(sb, acr->unload_blob);
749 if (ret)
750 return ret;
751 nvkm_debug(&sb->subdev, "HS unload blob completed\n");
752 }
753
754 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
755 acr->falcon_state[i] = NON_SECURE;
756
757 sb->wpr_set = false;
758
759 return 0;
760}
761
762static int
763acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
764{
765 int ret;
766
767 if (sb->wpr_set)
768 return 0;
769
770 /* Make sure all blobs are ready */
771 ret = acr_r352_load_blobs(acr, sb);
772 if (ret)
773 return ret;
774
775 nvkm_debug(&sb->subdev, "running HS load blob\n");
776 ret = sb->func->run_blob(sb, acr->load_blob);
777 /* clear halt interrupt */
778 nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
779 if (ret)
780 return ret;
781 nvkm_debug(&sb->subdev, "HS load blob completed\n");
782
783 sb->wpr_set = true;
784
785 return 0;
786}
787
788/*
789 * acr_r352_reset() - execute secure boot from the prepared state
790 *
791 * Load the HS bootloader and ask the falcon to run it. This will in turn
792 * load the HS firmware and run it, so once the falcon stops all the managed
793 * falcons should have their LS firmware loaded and be ready to run.
794 */
795static int
796acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
797 enum nvkm_secboot_falcon falcon)
798{
799 struct acr_r352 *acr = acr_r352(_acr);
800 int ret;
801
802 /*
803 * Dummy GM200 implementation: perform secure boot each time we are
804 * called on FECS. Since only FECS and GPCCS are managed and started
805 * together, this ought to be safe.
806 *
807 * Once we have proper PMU firmware and support, this will be changed
808 * to a proper call to the PMU method.
809 */
810 if (falcon != NVKM_SECBOOT_FALCON_FECS)
811 goto end;
812
813 ret = acr_r352_shutdown(acr, sb);
814 if (ret)
815 return ret;
816
817 acr_r352_bootstrap(acr, sb);
818 if (ret)
819 return ret;
820
821end:
822 acr->falcon_state[falcon] = RESET;
823 return 0;
824}
825
826static int
827acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
828 enum nvkm_secboot_falcon falcon)
829{
830 struct acr_r352 *acr = acr_r352(_acr);
831 const struct nvkm_subdev *subdev = &sb->subdev;
832 int base;
833
834 switch (falcon) {
835 case NVKM_SECBOOT_FALCON_FECS:
836 base = 0x409000;
837 break;
838 case NVKM_SECBOOT_FALCON_GPCCS:
839 base = 0x41a000;
840 break;
841 default:
842 nvkm_error(subdev, "cannot start unhandled falcon!\n");
843 return -EINVAL;
844 }
845
846 nvkm_wr32(subdev->device, base + 0x130, 0x00000002);
847 acr->falcon_state[falcon] = RUNNING;
848
849 return 0;
850}
851
852static int
853acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
854{
855 struct acr_r352 *acr = acr_r352(_acr);
856
857 return acr_r352_shutdown(acr, sb);
858}
859
860static void
861acr_r352_dtor(struct nvkm_acr *_acr)
862{
863 struct acr_r352 *acr = acr_r352(_acr);
864
865 nvkm_gpuobj_del(&acr->unload_blob);
866
867 kfree(acr->hsbl_blob);
868 nvkm_gpuobj_del(&acr->load_blob);
869 nvkm_gpuobj_del(&acr->ls_blob);
870
871 kfree(acr);
872}
873
874const struct acr_r352_ls_func
875acr_r352_ls_fecs_func = {
876 .load = acr_ls_ucode_load_fecs,
877 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
878 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
879};
880
881const struct acr_r352_ls_func
882acr_r352_ls_gpccs_func = {
883 .load = acr_ls_ucode_load_gpccs,
884 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
885 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
886 /* GPCCS will be loaded using PRI */
887 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
888};
889
890const struct acr_r352_func
891acr_r352_func = {
892 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
893 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
894 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
895 .ls_fill_headers = acr_r352_ls_fill_headers,
896 .ls_write_wpr = acr_r352_ls_write_wpr,
897 .ls_func = {
898 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
899 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
900 },
901};
902
903static const struct nvkm_acr_func
904acr_r352_base_func = {
905 .dtor = acr_r352_dtor,
906 .fini = acr_r352_fini,
907 .load = acr_r352_load,
908 .reset = acr_r352_reset,
909 .start = acr_r352_start,
910};
911
912struct nvkm_acr *
913acr_r352_new_(const struct acr_r352_func *func,
914 enum nvkm_secboot_falcon boot_falcon,
915 unsigned long managed_falcons)
916{
917 struct acr_r352 *acr;
918
919 acr = kzalloc(sizeof(*acr), GFP_KERNEL);
920 if (!acr)
921 return ERR_PTR(-ENOMEM);
922
923 acr->base.boot_falcon = boot_falcon;
924 acr->base.managed_falcons = managed_falcons;
925 acr->base.func = &acr_r352_base_func;
926 acr->func = func;
927
928 return &acr->base;
929}
930
931struct nvkm_acr *
932acr_r352_new(unsigned long managed_falcons)
933{
934 return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
935 managed_falcons);
936}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
new file mode 100644
index 000000000000..ad5923b0fd3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -0,0 +1,250 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef __NVKM_SECBOOT_ACR_R352_H__
23#define __NVKM_SECBOOT_ACR_R352_H__
24
25#include "acr.h"
26#include "ls_ucode.h"
27
28struct ls_ucode_img;
29
30#define ACR_R352_MAX_APPS 8
31
32/*
33 *
34 * LS blob structures
35 *
36 */
37
38/**
39 * struct acr_r352_lsf_lsb_header - LS firmware header
40 * @signature: signature to verify the firmware against
41 * @ucode_off: offset of the ucode blob in the WPR region. The ucode
42 * blob contains the bootloader, code and data of the
43 * LS falcon
44 * @ucode_size: size of the ucode blob, including bootloader
45 * @data_size: size of the ucode blob data
46 * @bl_code_size: size of the bootloader code
47 * @bl_imem_off: offset in imem of the bootloader
48 * @bl_data_off: offset of the bootloader data in WPR region
49 * @bl_data_size: size of the bootloader data
50 * @app_code_off: offset of the app code relative to ucode_off
51 * @app_code_size: size of the app code
52 * @app_data_off: offset of the app data relative to ucode_off
53 * @app_data_size: size of the app data
54 * @flags: flags for the secure bootloader
55 *
56 * This structure is written into the WPR region for each managed falcon. Each
57 * instance is referenced by the lsb_offset member of the corresponding
58 * lsf_wpr_header.
59 */
60struct acr_r352_lsf_lsb_header {
61 /**
62 * LS falcon signatures
63 * @prd_keys: signature to use in production mode
64 * @dgb_keys: signature to use in debug mode
65 * @b_prd_present: whether the production key is present
66 * @b_dgb_present: whether the debug key is present
67 * @falcon_id: ID of the falcon the ucode applies to
68 */
69 struct {
70 u8 prd_keys[2][16];
71 u8 dbg_keys[2][16];
72 u32 b_prd_present;
73 u32 b_dbg_present;
74 u32 falcon_id;
75 } signature;
76 u32 ucode_off;
77 u32 ucode_size;
78 u32 data_size;
79 u32 bl_code_size;
80 u32 bl_imem_off;
81 u32 bl_data_off;
82 u32 bl_data_size;
83 u32 app_code_off;
84 u32 app_code_size;
85 u32 app_data_off;
86 u32 app_data_size;
87 u32 flags;
88#define LSF_FLAG_LOAD_CODE_AT_0 1
89#define LSF_FLAG_DMACTL_REQ_CTX 4
90#define LSF_FLAG_FORCE_PRIV_LOAD 8
91};
92
93/**
94 * struct acr_r352_lsf_wpr_header - LS blob WPR Header
95 * @falcon_id: LS falcon ID
96 * @lsb_offset: offset of the lsb_lsf_header in the WPR region
97 * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
98 * @lazy_bootstrap: skip bootstrapping by ACR
99 * @status: bootstrapping status
100 *
101 * An array of these is written at the beginning of the WPR region, one for
102 * each managed falcon. The array is terminated by an instance which falcon_id
103 * is LSF_FALCON_ID_INVALID.
104 */
105struct acr_r352_lsf_wpr_header {
106 u32 falcon_id;
107 u32 lsb_offset;
108 u32 bootstrap_owner;
109 u32 lazy_bootstrap;
110 u32 status;
111#define LSF_IMAGE_STATUS_NONE 0
112#define LSF_IMAGE_STATUS_COPY 1
113#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
114#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
115#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
116#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
117#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
118};
119
120/**
121 * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
122 */
123struct ls_ucode_img_r352 {
124 struct ls_ucode_img base;
125
126 struct acr_r352_lsf_wpr_header wpr_header;
127 struct acr_r352_lsf_lsb_header lsb_header;
128};
129#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
130
131
132/*
133 * HS blob structures
134 */
135
136struct hsf_load_header_app {
137 u32 sec_code_off;
138 u32 sec_code_size;
139};
140
141/**
142 * struct hsf_load_header - HS firmware load header
143 */
144struct hsf_load_header {
145 u32 non_sec_code_off;
146 u32 non_sec_code_size;
147 u32 data_dma_base;
148 u32 data_size;
149 u32 num_apps;
150 struct hsf_load_header_app app[0];
151};
152
153/**
154 * struct acr_r352_ls_func - manages a single LS firmware
155 *
156 * @load: load the external firmware into a ls_ucode_img
157 * @generate_bl_desc: function called on a block of bl_desc_size to generate the
158 * proper bootloader descriptor for this LS firmware
159 * @bl_desc_size: size of the bootloader descriptor
160 * @lhdr_flags: LS flags
161 */
162struct acr_r352_ls_func {
163 int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *);
164 void (*generate_bl_desc)(const struct nvkm_acr *,
165 const struct ls_ucode_img *, u64, void *);
166 u32 bl_desc_size;
167 u32 lhdr_flags;
168};
169
170struct acr_r352;
171
172/**
173 * struct acr_r352_func - manages nuances between ACR versions
174 *
175 * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
176 * the proper HS bootloader descriptor
177 * @hs_bl_desc_size: size of the HS bootloader descriptor
178 */
179struct acr_r352_func {
180 void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
181 u64);
182 u32 hs_bl_desc_size;
183
184 struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
185 enum nvkm_secboot_falcon);
186 int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
187 int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
188 struct nvkm_gpuobj *, u32);
189
190 const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
191};
192
193/**
194 * struct acr_r352 - ACR data for driver release 352 (and beyond)
195 */
196struct acr_r352 {
197 struct nvkm_acr base;
198 const struct acr_r352_func *func;
199
200 /*
201 * HS FW - lock WPR region (dGPU only) and load LS FWs
202 * on Tegra the HS FW copies the LS blob into the fixed WPR instead
203 */
204 struct nvkm_gpuobj *load_blob;
205 struct {
206 struct hsf_load_header load_bl_header;
207 struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS];
208 };
209
210 /* HS FW - unlock WPR region (dGPU only) */
211 struct nvkm_gpuobj *unload_blob;
212 struct {
213 struct hsf_load_header unload_bl_header;
214 struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS];
215 };
216
217 /* HS bootloader */
218 void *hsbl_blob;
219
220 /* LS FWs, to be loaded by the HS ACR */
221 struct nvkm_gpuobj *ls_blob;
222
223 /* Firmware already loaded? */
224 bool firmware_ok;
225
226 /* Falcons to lazy-bootstrap */
227 u32 lazy_bootstrap;
228
229 /* To keep track of the state of all managed falcons */
230 enum {
231 /* In non-secure state, no firmware loaded, no privileges*/
232 NON_SECURE = 0,
233 /* In low-secure mode and ready to be started */
234 RESET,
235 /* In low-secure mode and running */
236 RUNNING,
237 } falcon_state[NVKM_SECBOOT_FALCON_END];
238};
239#define acr_r352(acr) container_of(acr, struct acr_r352, base)
240
241struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
242 enum nvkm_secboot_falcon, unsigned long);
243
244struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
245 enum nvkm_secboot_falcon);
246int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
247int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
248 struct nvkm_gpuobj *, u32);
249
250#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
new file mode 100644
index 000000000000..f0aff1d98474
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -0,0 +1,138 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r352.h"
24
25#include <engine/falcon.h>
26
27/**
28 * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
29 * @signature: 16B signature for secure code. 0s if no secure code
30 * @ctx_dma: DMA context to be used by BL while loading code/data
31 * @code_dma_base: 256B-aligned Physical FB Address where code is located
32 * (falcon's $xcbase register)
33 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
34 * located. The offset must be multiple of 256 to help perf
35 * @non_sec_code_size: the size of the nonSecure code part.
36 * @sec_code_off: offset from code_dma_base where the secure code is
37 * located. The offset must be multiple of 256 to help perf
38 * @sec_code_size: offset from code_dma_base where the secure code is
39 * located. The offset must be multiple of 256 to help perf
40 * @code_entry_point: code entry point which will be invoked by BL after
41 * code is loaded.
42 * @data_dma_base: 256B aligned Physical FB Address where data is located.
43 * (falcon's $xdbase register)
44 * @data_size: size of data block. Should be multiple of 256B
45 *
46 * Structure used by the bootloader to load the rest of the code. This has
47 * to be filled by host and copied into DMEM at offset provided in the
48 * hsflcn_bl_desc.bl_desc_dmem_load_off.
49 */
50struct acr_r361_flcn_bl_desc {
51 u32 reserved[4];
52 u32 signature[4];
53 u32 ctx_dma;
54 struct flcn_u64 code_dma_base;
55 u32 non_sec_code_off;
56 u32 non_sec_code_size;
57 u32 sec_code_off;
58 u32 sec_code_size;
59 u32 code_entry_point;
60 struct flcn_u64 data_dma_base;
61 u32 data_size;
62};
63
64static void
65acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
66 const struct ls_ucode_img *_img, u64 wpr_addr,
67 void *_desc)
68{
69 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
70 struct acr_r361_flcn_bl_desc *desc = _desc;
71 const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc;
72 u64 base, addr_code, addr_data;
73
74 base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
75 addr_code = base + pdesc->app_resident_code_offset;
76 addr_data = base + pdesc->app_resident_data_offset;
77
78 desc->ctx_dma = FALCON_DMAIDX_UCODE;
79 desc->code_dma_base = u64_to_flcn64(addr_code);
80 desc->non_sec_code_off = pdesc->app_resident_code_offset;
81 desc->non_sec_code_size = pdesc->app_resident_code_size;
82 desc->code_entry_point = pdesc->app_imem_entry;
83 desc->data_dma_base = u64_to_flcn64(addr_data);
84 desc->data_size = pdesc->app_resident_data_size;
85}
86
87static void
88acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
89 u64 offset)
90{
91 struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
92
93 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
94 bl_desc->code_dma_base = u64_to_flcn64(offset);
95 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
96 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
97 bl_desc->sec_code_off = hdr->app[0].sec_code_off;
98 bl_desc->sec_code_size = hdr->app[0].sec_code_size;
99 bl_desc->code_entry_point = 0;
100 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
101 bl_desc->data_size = hdr->data_size;
102}
103
104const struct acr_r352_ls_func
105acr_r361_ls_fecs_func = {
106 .load = acr_ls_ucode_load_fecs,
107 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
108 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
109};
110
111const struct acr_r352_ls_func
112acr_r361_ls_gpccs_func = {
113 .load = acr_ls_ucode_load_gpccs,
114 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
115 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
116 /* GPCCS will be loaded using PRI */
117 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
118};
119
120const struct acr_r352_func
121acr_r361_func = {
122 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
123 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
124 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
125 .ls_fill_headers = acr_r352_ls_fill_headers,
126 .ls_write_wpr = acr_r352_ls_write_wpr,
127 .ls_func = {
128 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
129 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
130 },
131};
132
133struct nvkm_acr *
134acr_r361_new(unsigned long managed_falcons)
135{
136 return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
137 managed_falcons);
138}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 314be2192b7d..27c9dfffb9a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -19,184 +19,108 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22
23/*
24 * Secure boot is the process by which NVIDIA-signed firmware is loaded into
25 * some of the falcons of a GPU. For production devices this is the only way
26 * for the firmware to access useful (but sensitive) registers.
27 *
28 * A Falcon microprocessor supporting advanced security modes can run in one of
29 * three modes:
30 *
31 * - Non-secure (NS). In this mode, functionality is similar to Falcon
32 * architectures before security modes were introduced (pre-Maxwell), but
33 * capability is restricted. In particular, certain registers may be
34 * inaccessible for reads and/or writes, and physical memory access may be
35 * disabled (on certain Falcon instances). This is the only possible mode that
36 * can be used if you don't have microcode cryptographically signed by NVIDIA.
37 *
38 * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
39 * not possible to read or write any Falcon internal state or Falcon registers
40 * from outside the Falcon (for example, from the host system). The only way
41 * to enable this mode is by loading microcode that has been signed by NVIDIA.
42 * (The loading process involves tagging the IMEM block as secure, writing the
43 * signature into a Falcon register, and starting execution. The hardware will
44 * validate the signature, and if valid, grant HS privileges.)
45 *
46 * - Light Secure (LS). In this mode, the microprocessor has more privileges
47 * than NS but fewer than HS. Some of the microprocessor state is visible to
48 * host software to ease debugging. The only way to enable this mode is by HS
49 * microcode enabling LS mode. Some privileges available to HS mode are not
50 * available here. LS mode is introduced in GM20x.
51 *
52 * Secure boot consists in temporarily switching a HS-capable falcon (typically
53 * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
54 * load them, and switch managed falcons into LS mode. Once secure boot
55 * completes, no falcon remains in HS mode.
56 *
57 * Secure boot requires a write-protected memory region (WPR) which can only be
58 * written by the secure falcon. On dGPU, the driver sets up the WPR region in
59 * video memory. On Tegra, it is set up by the bootloader and its location and
60 * size written into memory controller registers.
61 *
62 * The secure boot process takes place as follows:
63 *
64 * 1) A LS blob is constructed that contains all the LS firmwares we want to
65 * load, along with their signatures and bootloaders.
66 *
67 * 2) A HS blob (also called ACR) is created that contains the signed HS
68 * firmware in charge of loading the LS firmwares into their respective
69 * falcons.
70 *
71 * 3) The HS blob is loaded (via its own bootloader) and executed on the
72 * HS-capable falcon. It authenticates itself, switches the secure falcon to
73 * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
74 * LS blob into the WPR region (Tegra).
75 *
76 * 4) The LS blob is now secure from all external tampering. The HS falcon
77 * checks the signatures of the LS firmwares and, if valid, switches the
78 * managed falcons to LS mode and makes them ready to run the LS firmware.
79 *
80 * 5) The managed falcons remain in LS mode and can be started.
81 *
82 */
83
22#include "priv.h" 84#include "priv.h"
85#include "acr.h"
23 86
24#include <subdev/mc.h> 87#include <subdev/mc.h>
25#include <subdev/timer.h> 88#include <subdev/timer.h>
89#include <subdev/pmu.h>
26 90
27static const char * 91const char *
28managed_falcons_names[] = { 92nvkm_secboot_falcon_name[] = {
29 [NVKM_SECBOOT_FALCON_PMU] = "PMU", 93 [NVKM_SECBOOT_FALCON_PMU] = "PMU",
30 [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", 94 [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
31 [NVKM_SECBOOT_FALCON_FECS] = "FECS", 95 [NVKM_SECBOOT_FALCON_FECS] = "FECS",
32 [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", 96 [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
33 [NVKM_SECBOOT_FALCON_END] = "<invalid>", 97 [NVKM_SECBOOT_FALCON_END] = "<invalid>",
34}; 98};
35
36/*
37 * Helper falcon functions
38 */
39
40static int
41falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base)
42{
43 int ret;
44
45 /* clear halt interrupt */
46 nvkm_mask(device, base + 0x004, 0x10, 0x10);
47 /* wait until halt interrupt is cleared */
48 ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0);
49 if (ret < 0)
50 return ret;
51
52 return 0;
53}
54
55static int
56falcon_wait_idle(struct nvkm_device *device, u32 base)
57{
58 int ret;
59
60 ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0);
61 if (ret < 0)
62 return ret;
63
64 return 0;
65}
66
67static int
68nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
69{
70 struct nvkm_device *device = sb->subdev.device;
71 int ret;
72
73 /* enable engine */
74 nvkm_mc_enable(device, sb->devidx);
75 ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
76 if (ret < 0) {
77 nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
78 nvkm_mc_disable(device, sb->devidx);
79 return ret;
80 }
81
82 ret = falcon_wait_idle(device, sb->base);
83 if (ret)
84 return ret;
85
86 /* enable IRQs */
87 nvkm_wr32(device, sb->base + 0x010, 0xff);
88 nvkm_mc_intr_mask(device, sb->devidx, true);
89
90 return 0;
91}
92
93static int
94nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
95{
96 struct nvkm_device *device = sb->subdev.device;
97
98 /* disable IRQs and wait for any previous code to complete */
99 nvkm_mc_intr_mask(device, sb->devidx, false);
100 nvkm_wr32(device, sb->base + 0x014, 0xff);
101
102 falcon_wait_idle(device, sb->base);
103
104 /* disable engine */
105 nvkm_mc_disable(device, sb->devidx);
106
107 return 0;
108}
109
110int
111nvkm_secboot_falcon_reset(struct nvkm_secboot *sb)
112{
113 int ret;
114
115 ret = nvkm_secboot_falcon_disable(sb);
116 if (ret)
117 return ret;
118
119 ret = nvkm_secboot_falcon_enable(sb);
120 if (ret)
121 return ret;
122
123 return 0;
124}
125
126/**
127 * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
128 *
129 * This function is to be called after all chip-specific preparations have
130 * been completed. It will start the falcon to perform secure boot, wait for
131 * it to halt, and report if an error occurred.
132 */
133int
134nvkm_secboot_falcon_run(struct nvkm_secboot *sb)
135{
136 struct nvkm_device *device = sb->subdev.device;
137 int ret;
138
139 /* Start falcon */
140 nvkm_wr32(device, sb->base + 0x100, 0x2);
141
142 /* Wait for falcon halt */
143 ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10);
144 if (ret < 0)
145 return ret;
146
147 /* If mailbox register contains an error code, then ACR has failed */
148 ret = nvkm_rd32(device, sb->base + 0x040);
149 if (ret) {
150 nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret);
151 falcon_clear_halt_interrupt(device, sb->base);
152 return -EINVAL;
153 }
154
155 return 0;
156}
157
158
159/** 99/**
160 * nvkm_secboot_reset() - reset specified falcon 100 * nvkm_secboot_reset() - reset specified falcon
161 */ 101 */
162int 102int
163nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon) 103nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
164{ 104{
165 /* Unmanaged falcon? */ 105 /* Unmanaged falcon? */
166 if (!(BIT(falcon) & sb->func->managed_falcons)) { 106 if (!(BIT(falcon) & sb->acr->managed_falcons)) {
167 nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); 107 nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
168 return -EINVAL; 108 return -EINVAL;
169 } 109 }
170 110
171 return sb->func->reset(sb, falcon); 111 return sb->acr->func->reset(sb->acr, sb, falcon);
172}
173
174/**
175 * nvkm_secboot_start() - start specified falcon
176 */
177int
178nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon)
179{
180 /* Unmanaged falcon? */
181 if (!(BIT(falcon) & sb->func->managed_falcons)) {
182 nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n");
183 return -EINVAL;
184 }
185
186 return sb->func->start(sb, falcon);
187} 112}
188 113
189/** 114/**
190 * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed 115 * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
191 */ 116 */
192bool 117bool
193nvkm_secboot_is_managed(struct nvkm_secboot *secboot, 118nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
194 enum nvkm_secboot_falcon fid)
195{ 119{
196 if (!secboot) 120 if (!sb)
197 return false; 121 return false;
198 122
199 return secboot->func->managed_falcons & BIT(fid); 123 return sb->acr->managed_falcons & BIT(fid);
200} 124}
201 125
202static int 126static int
@@ -205,9 +129,19 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
205 struct nvkm_secboot *sb = nvkm_secboot(subdev); 129 struct nvkm_secboot *sb = nvkm_secboot(subdev);
206 int ret = 0; 130 int ret = 0;
207 131
132 switch (sb->acr->boot_falcon) {
133 case NVKM_SECBOOT_FALCON_PMU:
134 sb->boot_falcon = subdev->device->pmu->falcon;
135 break;
136 default:
137 nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
138 nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
139 return -EINVAL;
140 }
141
208 /* Call chip-specific init function */ 142 /* Call chip-specific init function */
209 if (sb->func->init) 143 if (sb->func->oneinit)
210 ret = sb->func->init(sb); 144 ret = sb->func->oneinit(sb);
211 if (ret) { 145 if (ret) {
212 nvkm_error(subdev, "Secure Boot initialization failed: %d\n", 146 nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
213 ret); 147 ret);
@@ -249,7 +183,7 @@ nvkm_secboot = {
249}; 183};
250 184
251int 185int
252nvkm_secboot_ctor(const struct nvkm_secboot_func *func, 186nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
253 struct nvkm_device *device, int index, 187 struct nvkm_device *device, int index,
254 struct nvkm_secboot *sb) 188 struct nvkm_secboot *sb)
255{ 189{
@@ -257,22 +191,14 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
257 191
258 nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); 192 nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
259 sb->func = func; 193 sb->func = func;
260 194 sb->acr = acr;
261 /* setup the performing falcon's base address and masks */ 195 acr->subdev = &sb->subdev;
262 switch (func->boot_falcon) {
263 case NVKM_SECBOOT_FALCON_PMU:
264 sb->devidx = NVKM_SUBDEV_PMU;
265 sb->base = 0x10a000;
266 break;
267 default:
268 nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
269 return -EINVAL;
270 };
271 196
272 nvkm_debug(&sb->subdev, "securely managed falcons:\n"); 197 nvkm_debug(&sb->subdev, "securely managed falcons:\n");
273 for_each_set_bit(fid, &sb->func->managed_falcons, 198 for_each_set_bit(fid, &sb->acr->managed_falcons,
274 NVKM_SECBOOT_FALCON_END) 199 NVKM_SECBOOT_FALCON_END)
275 nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]); 200 nvkm_debug(&sb->subdev, "- %s\n",
201 nvkm_secboot_falcon_name[fid]);
276 202
277 return 0; 203 return 0;
278} 204}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index ec48e4ace37a..813c4eb0b25f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -20,1313 +20,84 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23/*
24 * Secure boot is the process by which NVIDIA-signed firmware is loaded into
25 * some of the falcons of a GPU. For production devices this is the only way
26 * for the firmware to access useful (but sensitive) registers.
27 *
28 * A Falcon microprocessor supporting advanced security modes can run in one of
29 * three modes:
30 *
31 * - Non-secure (NS). In this mode, functionality is similar to Falcon
32 * architectures before security modes were introduced (pre-Maxwell), but
33 * capability is restricted. In particular, certain registers may be
34 * inaccessible for reads and/or writes, and physical memory access may be
35 * disabled (on certain Falcon instances). This is the only possible mode that
36 * can be used if you don't have microcode cryptographically signed by NVIDIA.
37 *
38 * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
39 * not possible to read or write any Falcon internal state or Falcon registers
40 * from outside the Falcon (for example, from the host system). The only way
41 * to enable this mode is by loading microcode that has been signed by NVIDIA.
42 * (The loading process involves tagging the IMEM block as secure, writing the
43 * signature into a Falcon register, and starting execution. The hardware will
44 * validate the signature, and if valid, grant HS privileges.)
45 *
46 * - Light Secure (LS). In this mode, the microprocessor has more privileges
47 * than NS but fewer than HS. Some of the microprocessor state is visible to
48 * host software to ease debugging. The only way to enable this mode is by HS
49 * microcode enabling LS mode. Some privileges available to HS mode are not
50 * available here. LS mode is introduced in GM20x.
51 *
52 * Secure boot consists in temporarily switching a HS-capable falcon (typically
53 * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
54 * load them, and switch managed falcons into LS mode. Once secure boot
55 * completes, no falcon remains in HS mode.
56 *
57 * Secure boot requires a write-protected memory region (WPR) which can only be
58 * written by the secure falcon. On dGPU, the driver sets up the WPR region in
59 * video memory. On Tegra, it is set up by the bootloader and its location and
60 * size written into memory controller registers.
61 *
62 * The secure boot process takes place as follows:
63 *
64 * 1) A LS blob is constructed that contains all the LS firmwares we want to
65 * load, along with their signatures and bootloaders.
66 *
67 * 2) A HS blob (also called ACR) is created that contains the signed HS
68 * firmware in charge of loading the LS firmwares into their respective
69 * falcons.
70 *
71 * 3) The HS blob is loaded (via its own bootloader) and executed on the
72 * HS-capable falcon. It authenticates itself, switches the secure falcon to
73 * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
74 * LS blob into the WPR region (Tegra).
75 *
76 * 4) The LS blob is now secure from all external tampering. The HS falcon
77 * checks the signatures of the LS firmwares and, if valid, switches the
78 * managed falcons to LS mode and makes them ready to run the LS firmware.
79 *
80 * 5) The managed falcons remain in LS mode and can be started.
81 *
82 */
83 23
84#include "priv.h" 24#include "acr.h"
25#include "gm200.h"
85 26
86#include <core/gpuobj.h> 27#include <core/gpuobj.h>
87#include <core/firmware.h>
88#include <subdev/fb.h> 28#include <subdev/fb.h>
89 29#include <engine/falcon.h>
90enum { 30#include <subdev/mc.h>
91 FALCON_DMAIDX_UCODE = 0,
92 FALCON_DMAIDX_VIRT = 1,
93 FALCON_DMAIDX_PHYS_VID = 2,
94 FALCON_DMAIDX_PHYS_SYS_COH = 3,
95 FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
96};
97
98/**
99 * struct fw_bin_header - header of firmware files
100 * @bin_magic: always 0x3b1d14f0
101 * @bin_ver: version of the bin format
102 * @bin_size: entire image size including this header
103 * @header_offset: offset of the firmware/bootloader header in the file
104 * @data_offset: offset of the firmware/bootloader payload in the file
105 * @data_size: size of the payload
106 *
107 * This header is located at the beginning of the HS firmware and HS bootloader
108 * files, to describe where the headers and data can be found.
109 */
110struct fw_bin_header {
111 u32 bin_magic;
112 u32 bin_ver;
113 u32 bin_size;
114 u32 header_offset;
115 u32 data_offset;
116 u32 data_size;
117};
118
119/**
120 * struct fw_bl_desc - firmware bootloader descriptor
121 * @start_tag: starting tag of bootloader
122 * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
123 * @code_off: offset of code section
124 * @code_size: size of code section
125 * @data_off: offset of data section
126 * @data_size: size of data section
127 *
128 * This structure is embedded in bootloader firmware files at to describe the
129 * IMEM and DMEM layout expected by the bootloader.
130 */
131struct fw_bl_desc {
132 u32 start_tag;
133 u32 dmem_load_off;
134 u32 code_off;
135 u32 code_size;
136 u32 data_off;
137 u32 data_size;
138};
139
140
141/*
142 *
143 * LS blob structures
144 *
145 */
146
147/**
148 * struct lsf_ucode_desc - LS falcon signatures
149 * @prd_keys: signature to use when the GPU is in production mode
150 * @dgb_keys: signature to use when the GPU is in debug mode
151 * @b_prd_present: whether the production key is present
152 * @b_dgb_present: whether the debug key is present
153 * @falcon_id: ID of the falcon the ucode applies to
154 *
155 * Directly loaded from a signature file.
156 */
157struct lsf_ucode_desc {
158 u8 prd_keys[2][16];
159 u8 dbg_keys[2][16];
160 u32 b_prd_present;
161 u32 b_dbg_present;
162 u32 falcon_id;
163};
164
165/**
166 * struct lsf_lsb_header - LS firmware header
167 * @signature: signature to verify the firmware against
168 * @ucode_off: offset of the ucode blob in the WPR region. The ucode
169 * blob contains the bootloader, code and data of the
170 * LS falcon
171 * @ucode_size: size of the ucode blob, including bootloader
172 * @data_size: size of the ucode blob data
173 * @bl_code_size: size of the bootloader code
174 * @bl_imem_off: offset in imem of the bootloader
175 * @bl_data_off: offset of the bootloader data in WPR region
176 * @bl_data_size: size of the bootloader data
177 * @app_code_off: offset of the app code relative to ucode_off
178 * @app_code_size: size of the app code
179 * @app_data_off: offset of the app data relative to ucode_off
180 * @app_data_size: size of the app data
181 * @flags: flags for the secure bootloader
182 *
183 * This structure is written into the WPR region for each managed falcon. Each
184 * instance is referenced by the lsb_offset member of the corresponding
185 * lsf_wpr_header.
186 */
187struct lsf_lsb_header {
188 struct lsf_ucode_desc signature;
189 u32 ucode_off;
190 u32 ucode_size;
191 u32 data_size;
192 u32 bl_code_size;
193 u32 bl_imem_off;
194 u32 bl_data_off;
195 u32 bl_data_size;
196 u32 app_code_off;
197 u32 app_code_size;
198 u32 app_data_off;
199 u32 app_data_size;
200 u32 flags;
201#define LSF_FLAG_LOAD_CODE_AT_0 1
202#define LSF_FLAG_DMACTL_REQ_CTX 4
203#define LSF_FLAG_FORCE_PRIV_LOAD 8
204};
205
206/**
207 * struct lsf_wpr_header - LS blob WPR Header
208 * @falcon_id: LS falcon ID
209 * @lsb_offset: offset of the lsb_lsf_header in the WPR region
210 * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
211 * @lazy_bootstrap: skip bootstrapping by ACR
212 * @status: bootstrapping status
213 *
214 * An array of these is written at the beginning of the WPR region, one for
215 * each managed falcon. The array is terminated by an instance which falcon_id
216 * is LSF_FALCON_ID_INVALID.
217 */
218struct lsf_wpr_header {
219 u32 falcon_id;
220 u32 lsb_offset;
221 u32 bootstrap_owner;
222 u32 lazy_bootstrap;
223 u32 status;
224#define LSF_IMAGE_STATUS_NONE 0
225#define LSF_IMAGE_STATUS_COPY 1
226#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
227#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
228#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
229#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
230#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
231};
232
233
234/**
235 * struct ls_ucode_img_desc - descriptor of firmware image
236 * @descriptor_size: size of this descriptor
237 * @image_size: size of the whole image
238 * @bootloader_start_offset: start offset of the bootloader in ucode image
239 * @bootloader_size: size of the bootloader
240 * @bootloader_imem_offset: start off set of the bootloader in IMEM
241 * @bootloader_entry_point: entry point of the bootloader in IMEM
242 * @app_start_offset: start offset of the LS firmware
243 * @app_size: size of the LS firmware's code and data
244 * @app_imem_offset: offset of the app in IMEM
245 * @app_imem_entry: entry point of the app in IMEM
246 * @app_dmem_offset: offset of the data in DMEM
247 * @app_resident_code_offset: offset of app code from app_start_offset
248 * @app_resident_code_size: size of the code
249 * @app_resident_data_offset: offset of data from app_start_offset
250 * @app_resident_data_size: size of data
251 *
252 * A firmware image contains the code, data, and bootloader of a given LS
253 * falcon in a single blob. This structure describes where everything is.
254 *
255 * This can be generated from a (bootloader, code, data) set if they have
256 * been loaded separately, or come directly from a file.
257 */
258struct ls_ucode_img_desc {
259 u32 descriptor_size;
260 u32 image_size;
261 u32 tools_version;
262 u32 app_version;
263 char date[64];
264 u32 bootloader_start_offset;
265 u32 bootloader_size;
266 u32 bootloader_imem_offset;
267 u32 bootloader_entry_point;
268 u32 app_start_offset;
269 u32 app_size;
270 u32 app_imem_offset;
271 u32 app_imem_entry;
272 u32 app_dmem_offset;
273 u32 app_resident_code_offset;
274 u32 app_resident_code_size;
275 u32 app_resident_data_offset;
276 u32 app_resident_data_size;
277 u32 nb_overlays;
278 struct {u32 start; u32 size; } load_ovl[64];
279 u32 compressed;
280};
281
282/**
283 * struct ls_ucode_img - temporary storage for loaded LS firmwares
284 * @node: to link within lsf_ucode_mgr
285 * @falcon_id: ID of the falcon this LS firmware is for
286 * @ucode_desc: loaded or generated map of ucode_data
287 * @ucode_header: header of the firmware
288 * @ucode_data: firmware payload (code and data)
289 * @ucode_size: size in bytes of data in ucode_data
290 * @wpr_header: WPR header to be written to the LS blob
291 * @lsb_header: LSB header to be written to the LS blob
292 *
293 * Preparing the WPR LS blob requires information about all the LS firmwares
294 * (size, etc) to be known. This structure contains all the data of one LS
295 * firmware.
296 */
297struct ls_ucode_img {
298 struct list_head node;
299 enum nvkm_secboot_falcon falcon_id;
300
301 struct ls_ucode_img_desc ucode_desc;
302 u32 *ucode_header;
303 u8 *ucode_data;
304 u32 ucode_size;
305
306 struct lsf_wpr_header wpr_header;
307 struct lsf_lsb_header lsb_header;
308};
309
310/**
311 * struct ls_ucode_mgr - manager for all LS falcon firmwares
312 * @count: number of managed LS falcons
313 * @wpr_size: size of the required WPR region in bytes
314 * @img_list: linked list of lsf_ucode_img
315 */
316struct ls_ucode_mgr {
317 u16 count;
318 u32 wpr_size;
319 struct list_head img_list;
320};
321
322
323/*
324 *
325 * HS blob structures
326 *
327 */
328
329/**
330 * struct hsf_fw_header - HS firmware descriptor
331 * @sig_dbg_offset: offset of the debug signature
332 * @sig_dbg_size: size of the debug signature
333 * @sig_prod_offset: offset of the production signature
334 * @sig_prod_size: size of the production signature
335 * @patch_loc: offset of the offset (sic) of where the signature is
336 * @patch_sig: offset of the offset (sic) to add to sig_*_offset
337 * @hdr_offset: offset of the load header (see struct hs_load_header)
338 * @hdr_size: size of above header
339 *
340 * This structure is embedded in the HS firmware image at
341 * hs_bin_hdr.header_offset.
342 */
343struct hsf_fw_header {
344 u32 sig_dbg_offset;
345 u32 sig_dbg_size;
346 u32 sig_prod_offset;
347 u32 sig_prod_size;
348 u32 patch_loc;
349 u32 patch_sig;
350 u32 hdr_offset;
351 u32 hdr_size;
352};
353
354/**
355 * struct hsf_load_header - HS firmware load header
356 */
357struct hsf_load_header {
358 u32 non_sec_code_off;
359 u32 non_sec_code_size;
360 u32 data_dma_base;
361 u32 data_size;
362 u32 num_apps;
363 struct {
364 u32 sec_code_off;
365 u32 sec_code_size;
366 } app[0];
367};
368
369/**
370 * Convenience function to duplicate a firmware file in memory and check that
371 * it has the required minimum size.
372 */
373static void *
374gm200_secboot_load_firmware(struct nvkm_subdev *subdev, const char *name,
375 size_t min_size)
376{
377 const struct firmware *fw;
378 void *blob;
379 int ret;
380
381 ret = nvkm_firmware_get(subdev->device, name, &fw);
382 if (ret)
383 return ERR_PTR(ret);
384 if (fw->size < min_size) {
385 nvkm_error(subdev, "%s is smaller than expected size %zu\n",
386 name, min_size);
387 nvkm_firmware_put(fw);
388 return ERR_PTR(-EINVAL);
389 }
390 blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
391 nvkm_firmware_put(fw);
392 if (!blob)
393 return ERR_PTR(-ENOMEM);
394
395 return blob;
396}
397
398
399/*
400 * Low-secure blob creation
401 */
402
403#define BL_DESC_BLK_SIZE 256
404/**
405 * Build a ucode image and descriptor from provided bootloader, code and data.
406 *
407 * @bl: bootloader image, including 16-bytes descriptor
408 * @code: LS firmware code segment
409 * @data: LS firmware data segment
410 * @desc: ucode descriptor to be written
411 *
412 * Return: allocated ucode image with corresponding descriptor information. desc
413 * is also updated to contain the right offsets within returned image.
414 */
415static void *
416ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
417 const struct firmware *data, struct ls_ucode_img_desc *desc)
418{
419 struct fw_bin_header *bin_hdr = (void *)bl->data;
420 struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
421 void *bl_data = (void *)bl->data + bin_hdr->data_offset;
422 u32 pos = 0;
423 void *image;
424
425 desc->bootloader_start_offset = pos;
426 desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
427 desc->bootloader_imem_offset = bl_desc->start_tag * 256;
428 desc->bootloader_entry_point = bl_desc->start_tag * 256;
429
430 pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
431 desc->app_start_offset = pos;
432 desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
433 ALIGN(data->size, BL_DESC_BLK_SIZE);
434 desc->app_imem_offset = 0;
435 desc->app_imem_entry = 0;
436 desc->app_dmem_offset = 0;
437 desc->app_resident_code_offset = 0;
438 desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
439
440 pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
441 desc->app_resident_data_offset = pos - desc->app_start_offset;
442 desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
443
444 desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
445 desc->app_size;
446
447 image = kzalloc(desc->image_size, GFP_KERNEL);
448 if (!image)
449 return ERR_PTR(-ENOMEM);
450
451 memcpy(image + desc->bootloader_start_offset, bl_data,
452 bl_desc->code_size);
453 memcpy(image + desc->app_start_offset, code->data, code->size);
454 memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
455 data->data, data->size);
456
457 return image;
458}
459
460/**
461 * ls_ucode_img_load_generic() - load and prepare a LS ucode image
462 *
463 * Load the LS microcode, bootloader and signature and pack them into a single
464 * blob. Also generate the corresponding ucode descriptor.
465 */
466static int
467ls_ucode_img_load_generic(struct nvkm_subdev *subdev,
468 struct ls_ucode_img *img, const char *falcon_name,
469 const u32 falcon_id)
470{
471 const struct firmware *bl, *code, *data;
472 struct lsf_ucode_desc *lsf_desc;
473 char f[64];
474 int ret;
475
476 img->ucode_header = NULL;
477
478 snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
479 ret = nvkm_firmware_get(subdev->device, f, &bl);
480 if (ret)
481 goto error;
482
483 snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
484 ret = nvkm_firmware_get(subdev->device, f, &code);
485 if (ret)
486 goto free_bl;
487
488 snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
489 ret = nvkm_firmware_get(subdev->device, f, &data);
490 if (ret)
491 goto free_inst;
492
493 img->ucode_data = ls_ucode_img_build(bl, code, data,
494 &img->ucode_desc);
495 if (IS_ERR(img->ucode_data)) {
496 ret = PTR_ERR(img->ucode_data);
497 goto free_data;
498 }
499 img->ucode_size = img->ucode_desc.image_size;
500
501 snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
502 lsf_desc = gm200_secboot_load_firmware(subdev, f, sizeof(*lsf_desc));
503 if (IS_ERR(lsf_desc)) {
504 ret = PTR_ERR(lsf_desc);
505 goto free_image;
506 }
507 /* not needed? the signature should already have the right value */
508 lsf_desc->falcon_id = falcon_id;
509 memcpy(&img->lsb_header.signature, lsf_desc, sizeof(*lsf_desc));
510 img->falcon_id = lsf_desc->falcon_id;
511 kfree(lsf_desc);
512
513 /* success path - only free requested firmware files */
514 goto free_data;
515
516free_image:
517 kfree(img->ucode_data);
518free_data:
519 nvkm_firmware_put(data);
520free_inst:
521 nvkm_firmware_put(code);
522free_bl:
523 nvkm_firmware_put(bl);
524error:
525 return ret;
526}
527
528typedef int (*lsf_load_func)(struct nvkm_subdev *, struct ls_ucode_img *);
529
530static int
531ls_ucode_img_load_fecs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
532{
533 return ls_ucode_img_load_generic(subdev, img, "fecs",
534 NVKM_SECBOOT_FALCON_FECS);
535}
536
537static int
538ls_ucode_img_load_gpccs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
539{
540 return ls_ucode_img_load_generic(subdev, img, "gpccs",
541 NVKM_SECBOOT_FALCON_GPCCS);
542}
543
544/**
545 * ls_ucode_img_load() - create a lsf_ucode_img and load it
546 */
547static struct ls_ucode_img *
548ls_ucode_img_load(struct nvkm_subdev *subdev, lsf_load_func load_func)
549{
550 struct ls_ucode_img *img;
551 int ret;
552
553 img = kzalloc(sizeof(*img), GFP_KERNEL);
554 if (!img)
555 return ERR_PTR(-ENOMEM);
556
557 ret = load_func(subdev, img);
558 if (ret) {
559 kfree(img);
560 return ERR_PTR(ret);
561 }
562
563 return img;
564}
565
566static const lsf_load_func lsf_load_funcs[] = {
567 [NVKM_SECBOOT_FALCON_END] = NULL, /* reserve enough space */
568 [NVKM_SECBOOT_FALCON_FECS] = ls_ucode_img_load_fecs,
569 [NVKM_SECBOOT_FALCON_GPCCS] = ls_ucode_img_load_gpccs,
570};
571
572/**
573 * ls_ucode_img_populate_bl_desc() - populate a DMEM BL descriptor for LS image
574 * @img: ucode image to generate against
575 * @desc: descriptor to populate
576 * @sb: secure boot state to use for base addresses
577 *
578 * Populate the DMEM BL descriptor with the information contained in a
579 * ls_ucode_desc.
580 *
581 */
582static void
583ls_ucode_img_populate_bl_desc(struct ls_ucode_img *img, u64 wpr_addr,
584 struct gm200_flcn_bl_desc *desc)
585{
586 struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
587 u64 addr_base;
588
589 addr_base = wpr_addr + img->lsb_header.ucode_off +
590 pdesc->app_start_offset;
591
592 memset(desc, 0, sizeof(*desc));
593 desc->ctx_dma = FALCON_DMAIDX_UCODE;
594 desc->code_dma_base.lo = lower_32_bits(
595 (addr_base + pdesc->app_resident_code_offset));
596 desc->code_dma_base.hi = upper_32_bits(
597 (addr_base + pdesc->app_resident_code_offset));
598 desc->non_sec_code_size = pdesc->app_resident_code_size;
599 desc->data_dma_base.lo = lower_32_bits(
600 (addr_base + pdesc->app_resident_data_offset));
601 desc->data_dma_base.hi = upper_32_bits(
602 (addr_base + pdesc->app_resident_data_offset));
603 desc->data_size = pdesc->app_resident_data_size;
604 desc->code_entry_point = pdesc->app_imem_entry;
605}
606
607#define LSF_LSB_HEADER_ALIGN 256
608#define LSF_BL_DATA_ALIGN 256
609#define LSF_BL_DATA_SIZE_ALIGN 256
610#define LSF_BL_CODE_SIZE_ALIGN 256
611#define LSF_UCODE_DATA_ALIGN 4096
612
613/**
614 * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image
615 * @gsb: secure boot device used
616 * @img: image to generate for
617 * @offset: offset in the WPR region where this image starts
618 *
619 * Allocate space in the WPR area from offset and write the WPR and LSB headers
620 * accordingly.
621 *
622 * Return: offset at the end of this image.
623 */
624static u32
625ls_ucode_img_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_img *img,
626 u32 offset)
627{
628 struct lsf_wpr_header *whdr = &img->wpr_header;
629 struct lsf_lsb_header *lhdr = &img->lsb_header;
630 struct ls_ucode_img_desc *desc = &img->ucode_desc;
631
632 if (img->ucode_header) {
633 nvkm_fatal(&gsb->base.subdev,
634 "images withough loader are not supported yet!\n");
635 return offset;
636 }
637
638 /* Fill WPR header */
639 whdr->falcon_id = img->falcon_id;
640 whdr->bootstrap_owner = gsb->base.func->boot_falcon;
641 whdr->status = LSF_IMAGE_STATUS_COPY;
642
643 /* Align, save off, and include an LSB header size */
644 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
645 whdr->lsb_offset = offset;
646 offset += sizeof(struct lsf_lsb_header);
647
648 /*
649 * Align, save off, and include the original (static) ucode
650 * image size
651 */
652 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
653 lhdr->ucode_off = offset;
654 offset += img->ucode_size;
655
656 /*
657 * For falcons that use a boot loader (BL), we append a loader
658 * desc structure on the end of the ucode image and consider
659 * this the boot loader data. The host will then copy the loader
660 * desc args to this space within the WPR region (before locking
661 * down) and the HS bin will then copy them to DMEM 0 for the
662 * loader.
663 */
664 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
665 LSF_BL_CODE_SIZE_ALIGN);
666 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
667 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
668 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
669 lhdr->bl_code_size - lhdr->ucode_size;
670 /*
671 * Though the BL is located at 0th offset of the image, the VA
672 * is different to make sure that it doesn't collide the actual
673 * OS VA range
674 */
675 lhdr->bl_imem_off = desc->bootloader_imem_offset;
676 lhdr->app_code_off = desc->app_start_offset +
677 desc->app_resident_code_offset;
678 lhdr->app_code_size = desc->app_resident_code_size;
679 lhdr->app_data_off = desc->app_start_offset +
680 desc->app_resident_data_offset;
681 lhdr->app_data_size = desc->app_resident_data_size;
682
683 lhdr->flags = 0;
684 if (img->falcon_id == gsb->base.func->boot_falcon)
685 lhdr->flags = LSF_FLAG_DMACTL_REQ_CTX;
686
687 /* GPCCS will be loaded using PRI */
688 if (img->falcon_id == NVKM_SECBOOT_FALCON_GPCCS)
689 lhdr->flags |= LSF_FLAG_FORCE_PRIV_LOAD;
690
691 /* Align (size bloat) and save off BL descriptor size */
692 lhdr->bl_data_size = ALIGN(sizeof(struct gm200_flcn_bl_desc),
693 LSF_BL_DATA_SIZE_ALIGN);
694 /*
695 * Align, save off, and include the additional BL data
696 */
697 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
698 lhdr->bl_data_off = offset;
699 offset += lhdr->bl_data_size;
700
701 return offset;
702}
703
704static void
705ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
706{
707 memset(mgr, 0, sizeof(*mgr));
708 INIT_LIST_HEAD(&mgr->img_list);
709}
710
711static void
712ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
713{
714 struct ls_ucode_img *img, *t;
715
716 list_for_each_entry_safe(img, t, &mgr->img_list, node) {
717 kfree(img->ucode_data);
718 kfree(img->ucode_header);
719 kfree(img);
720 }
721}
722
723static void
724ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
725{
726 mgr->count++;
727 list_add_tail(&img->node, &mgr->img_list);
728}
729
730/**
731 * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
732 */
733static void
734ls_ucode_mgr_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr)
735{
736 struct ls_ucode_img *img;
737 u32 offset;
738
739 /*
740 * Start with an array of WPR headers at the base of the WPR.
741 * The expectation here is that the secure falcon will do a single DMA
742 * read of this array and cache it internally so it's ok to pack these.
743 * Also, we add 1 to the falcon count to indicate the end of the array.
744 */
745 offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1);
746
747 /*
748 * Walk the managed falcons, accounting for the LSB structs
749 * as well as the ucode images.
750 */
751 list_for_each_entry(img, &mgr->img_list, node) {
752 offset = ls_ucode_img_fill_headers(gsb, img, offset);
753 }
754
755 mgr->wpr_size = offset;
756}
757
758/**
759 * ls_ucode_mgr_write_wpr - write the WPR blob contents
760 */
761static int
762ls_ucode_mgr_write_wpr(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr,
763 struct nvkm_gpuobj *wpr_blob)
764{
765 struct ls_ucode_img *img;
766 u32 pos = 0;
767
768 nvkm_kmap(wpr_blob);
769
770 list_for_each_entry(img, &mgr->img_list, node) {
771 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
772 sizeof(img->wpr_header));
773
774 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
775 &img->lsb_header, sizeof(img->lsb_header));
776
777 /* Generate and write BL descriptor */
778 if (!img->ucode_header) {
779 u8 desc[gsb->func->bl_desc_size];
780 struct gm200_flcn_bl_desc gdesc;
781
782 ls_ucode_img_populate_bl_desc(img, gsb->wpr_addr,
783 &gdesc);
784 gsb->func->fixup_bl_desc(&gdesc, &desc);
785 nvkm_gpuobj_memcpy_to(wpr_blob,
786 img->lsb_header.bl_data_off,
787 &desc, gsb->func->bl_desc_size);
788 }
789
790 /* Copy ucode */
791 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
792 img->ucode_data, img->ucode_size);
793
794 pos += sizeof(img->wpr_header);
795 }
796
797 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
798
799 nvkm_done(wpr_blob);
800
801 return 0;
802}
803
804/* Both size and address of WPR need to be 128K-aligned */
805#define WPR_ALIGNMENT 0x20000
806/**
807 * gm200_secboot_prepare_ls_blob() - prepare the LS blob
808 *
809 * For each securely managed falcon, load the FW, signatures and bootloaders and
810 * prepare a ucode blob. Then, compute the offsets in the WPR region for each
811 * blob, and finally write the headers and ucode blobs into a GPU object that
812 * will be copied into the WPR region by the HS firmware.
813 */
814static int
815gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
816{
817 struct nvkm_secboot *sb = &gsb->base;
818 struct nvkm_device *device = sb->subdev.device;
819 struct ls_ucode_mgr mgr;
820 int falcon_id;
821 int ret;
822
823 ls_ucode_mgr_init(&mgr);
824
825 /* Load all LS blobs */
826 for_each_set_bit(falcon_id, &gsb->base.func->managed_falcons,
827 NVKM_SECBOOT_FALCON_END) {
828 struct ls_ucode_img *img;
829
830 img = ls_ucode_img_load(&sb->subdev, lsf_load_funcs[falcon_id]);
831
832 if (IS_ERR(img)) {
833 ret = PTR_ERR(img);
834 goto cleanup;
835 }
836 ls_ucode_mgr_add_img(&mgr, img);
837 }
838
839 /*
840 * Fill the WPR and LSF headers with the right offsets and compute
841 * required WPR size
842 */
843 ls_ucode_mgr_fill_headers(gsb, &mgr);
844 mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT);
845
846 /* Allocate GPU object that will contain the WPR region */
847 ret = nvkm_gpuobj_new(device, mgr.wpr_size, WPR_ALIGNMENT, false, NULL,
848 &gsb->ls_blob);
849 if (ret)
850 goto cleanup;
851
852 nvkm_debug(&sb->subdev, "%d managed LS falcons, WPR size is %d bytes\n",
853 mgr.count, mgr.wpr_size);
854
855 /* If WPR address and size are not fixed, set them to fit the LS blob */
856 if (!gsb->wpr_size) {
857 gsb->wpr_addr = gsb->ls_blob->addr;
858 gsb->wpr_size = gsb->ls_blob->size;
859 }
860
861 /* Write LS blob */
862 ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
863 if (ret)
864 nvkm_gpuobj_del(&gsb->ls_blob);
865
866cleanup:
867 ls_ucode_mgr_cleanup(&mgr);
868
869 return ret;
870}
871
872/*
873 * High-secure blob creation
874 */
875
876/**
877 * gm200_secboot_hsf_patch_signature() - patch HS blob with correct signature
878 */
879static void
880gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image)
881{
882 struct nvkm_secboot *sb = &gsb->base;
883 struct fw_bin_header *hsbin_hdr = acr_image;
884 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
885 void *hs_data = acr_image + hsbin_hdr->data_offset;
886 void *sig;
887 u32 sig_size;
888
889 /* Falcon in debug or production mode? */
890 if ((nvkm_rd32(sb->subdev.device, sb->base + 0xc08) >> 20) & 0x1) {
891 sig = acr_image + fw_hdr->sig_dbg_offset;
892 sig_size = fw_hdr->sig_dbg_size;
893 } else {
894 sig = acr_image + fw_hdr->sig_prod_offset;
895 sig_size = fw_hdr->sig_prod_size;
896 }
897
898 /* Patch signature */
899 memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
900}
901
902/**
903 * gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image
904 */
905static void
906gm200_secboot_populate_hsf_bl_desc(void *acr_image,
907 struct gm200_flcn_bl_desc *bl_desc)
908{
909 struct fw_bin_header *hsbin_hdr = acr_image;
910 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
911 struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
912
913 /*
914 * Descriptor for the bootloader that will load the ACR image into
915 * IMEM/DMEM memory.
916 */
917 fw_hdr = acr_image + hsbin_hdr->header_offset;
918 load_hdr = acr_image + fw_hdr->hdr_offset;
919 memset(bl_desc, 0, sizeof(*bl_desc));
920 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
921 bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
922 bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
923 bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
924 bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
925 bl_desc->code_entry_point = 0;
926 /*
927 * We need to set code_dma_base to the virtual address of the acr_blob,
928 * and add this address to data_dma_base before writing it into DMEM
929 */
930 bl_desc->code_dma_base.lo = 0;
931 bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
932 bl_desc->data_size = load_hdr->data_size;
933}
934
935/**
936 * gm200_secboot_prepare_hs_blob - load and prepare a HS blob and BL descriptor
937 *
938 * @gsb secure boot instance to prepare for
939 * @fw name of the HS firmware to load
940 * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
941 * @bl_desc pointer to the BL descriptor to write for this firmware
942 * @patch whether we should patch the HS descriptor (only for HS loaders)
943 */
944static int
945gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw,
946 struct nvkm_gpuobj **blob,
947 struct gm200_flcn_bl_desc *bl_desc, bool patch)
948{
949 struct nvkm_subdev *subdev = &gsb->base.subdev;
950 void *acr_image;
951 struct fw_bin_header *hsbin_hdr;
952 struct hsf_fw_header *fw_hdr;
953 void *acr_data;
954 struct hsf_load_header *load_hdr;
955 struct hsflcn_acr_desc *desc;
956 int ret;
957
958 acr_image = gm200_secboot_load_firmware(subdev, fw, 0);
959 if (IS_ERR(acr_image))
960 return PTR_ERR(acr_image);
961 hsbin_hdr = acr_image;
962
963 /* Patch signature */
964 gm200_secboot_hsf_patch_signature(gsb, acr_image);
965
966 acr_data = acr_image + hsbin_hdr->data_offset;
967
968 /* Patch descriptor? */
969 if (patch) {
970 fw_hdr = acr_image + hsbin_hdr->header_offset;
971 load_hdr = acr_image + fw_hdr->hdr_offset;
972 desc = acr_data + load_hdr->data_dma_base;
973 gsb->func->fixup_hs_desc(gsb, desc);
974 }
975
976 /* Generate HS BL descriptor */
977 gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc);
978
979 /* Create ACR blob and copy HS data to it */
980 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
981 0x1000, false, NULL, blob);
982 if (ret)
983 goto cleanup;
984
985 nvkm_kmap(*blob);
986 nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
987 nvkm_done(*blob);
988
989cleanup:
990 kfree(acr_image);
991
992 return ret;
993}
994
995/*
996 * High-secure bootloader blob creation
997 */
998
999static int
1000gm200_secboot_prepare_hsbl_blob(struct gm200_secboot *gsb)
1001{
1002 struct nvkm_subdev *subdev = &gsb->base.subdev;
1003
1004 gsb->hsbl_blob = gm200_secboot_load_firmware(subdev, "acr/bl", 0);
1005 if (IS_ERR(gsb->hsbl_blob)) {
1006 int ret = PTR_ERR(gsb->hsbl_blob);
1007
1008 gsb->hsbl_blob = NULL;
1009 return ret;
1010 }
1011
1012 return 0;
1013}
1014 31
1015/** 32/**
1016 * gm20x_secboot_prepare_blobs - load blobs common to all GM20X GPUs. 33 * gm200_secboot_run_blob() - run the given high-secure blob
1017 * 34 *
1018 * This includes the LS blob, HS ucode loading blob, and HS bootloader.
1019 *
1020 * The HS ucode unload blob is only used on dGPU.
1021 */ 35 */
1022int 36int
1023gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb) 37gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
1024{
1025 int ret;
1026
1027 /* Load and prepare the managed falcon's firmwares */
1028 if (!gsb->ls_blob) {
1029 ret = gm200_secboot_prepare_ls_blob(gsb);
1030 if (ret)
1031 return ret;
1032 }
1033
1034 /* Load the HS firmware that will load the LS firmwares */
1035 if (!gsb->acr_load_blob) {
1036 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
1037 &gsb->acr_load_blob,
1038 &gsb->acr_load_bl_desc, true);
1039 if (ret)
1040 return ret;
1041 }
1042
1043 /* Load the HS firmware bootloader */
1044 if (!gsb->hsbl_blob) {
1045 ret = gm200_secboot_prepare_hsbl_blob(gsb);
1046 if (ret)
1047 return ret;
1048 }
1049
1050 return 0;
1051}
1052
1053static int
1054gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
1055{
1056 int ret;
1057
1058 ret = gm20x_secboot_prepare_blobs(gsb);
1059 if (ret)
1060 return ret;
1061
1062 /* dGPU only: load the HS firmware that unprotects the WPR region */
1063 if (!gsb->acr_unload_blob) {
1064 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
1065 &gsb->acr_unload_blob,
1066 &gsb->acr_unload_bl_desc, false);
1067 if (ret)
1068 return ret;
1069 }
1070
1071 return 0;
1072}
1073
1074static int
1075gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
1076{ 38{
39 struct gm200_secboot *gsb = gm200_secboot(sb);
1077 struct nvkm_subdev *subdev = &gsb->base.subdev; 40 struct nvkm_subdev *subdev = &gsb->base.subdev;
41 struct nvkm_falcon *falcon = gsb->base.boot_falcon;
42 struct nvkm_vma vma;
1078 int ret; 43 int ret;
1079 44
1080 /* firmware already loaded, nothing to do... */ 45 ret = nvkm_falcon_get(falcon, subdev);
1081 if (gsb->firmware_ok)
1082 return 0;
1083
1084 ret = gsb->func->prepare_blobs(gsb);
1085 if (ret) {
1086 nvkm_error(subdev, "failed to load secure firmware\n");
1087 return ret;
1088 }
1089
1090 gsb->firmware_ok = true;
1091
1092 return 0;
1093}
1094
1095
1096/*
1097 * Secure Boot Execution
1098 */
1099
1100/**
1101 * gm200_secboot_load_hs_bl() - load HS bootloader into DMEM and IMEM
1102 */
1103static void
1104gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, void *data, u32 data_size)
1105{
1106 struct nvkm_device *device = gsb->base.subdev.device;
1107 struct fw_bin_header *hdr = gsb->hsbl_blob;
1108 struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
1109 void *blob_data = gsb->hsbl_blob + hdr->data_offset;
1110 void *hsbl_code = blob_data + hsbl_desc->code_off;
1111 void *hsbl_data = blob_data + hsbl_desc->data_off;
1112 u32 code_size = ALIGN(hsbl_desc->code_size, 256);
1113 const u32 base = gsb->base.base;
1114 u32 blk;
1115 u32 tag;
1116 int i;
1117
1118 /*
1119 * Copy HS bootloader data
1120 */
1121 nvkm_wr32(device, base + 0x1c0, (0x00000000 | (0x1 << 24)));
1122 for (i = 0; i < hsbl_desc->data_size / 4; i++)
1123 nvkm_wr32(device, base + 0x1c4, ((u32 *)hsbl_data)[i]);
1124
1125 /*
1126 * Copy HS bootloader interface structure where the HS descriptor
1127 * expects it to be
1128 */
1129 nvkm_wr32(device, base + 0x1c0,
1130 (hsbl_desc->dmem_load_off | (0x1 << 24)));
1131 for (i = 0; i < data_size / 4; i++)
1132 nvkm_wr32(device, base + 0x1c4, ((u32 *)data)[i]);
1133
1134 /* Copy HS bootloader code to end of IMEM */
1135 blk = (nvkm_rd32(device, base + 0x108) & 0x1ff) - (code_size >> 8);
1136 tag = hsbl_desc->start_tag;
1137 nvkm_wr32(device, base + 0x180, ((blk & 0xff) << 8) | (0x1 << 24));
1138 for (i = 0; i < code_size / 4; i++) {
1139 /* write new tag every 256B */
1140 if ((i & 0x3f) == 0) {
1141 nvkm_wr32(device, base + 0x188, tag & 0xffff);
1142 tag++;
1143 }
1144 nvkm_wr32(device, base + 0x184, ((u32 *)hsbl_code)[i]);
1145 }
1146 nvkm_wr32(device, base + 0x188, 0);
1147}
1148
1149/**
1150 * gm200_secboot_setup_falcon() - set up the secure falcon for secure boot
1151 */
1152static int
1153gm200_secboot_setup_falcon(struct gm200_secboot *gsb)
1154{
1155 struct nvkm_device *device = gsb->base.subdev.device;
1156 struct fw_bin_header *hdr = gsb->hsbl_blob;
1157 struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
1158 /* virtual start address for boot vector */
1159 u32 virt_addr = hsbl_desc->start_tag << 8;
1160 const u32 base = gsb->base.base;
1161 const u32 reg_base = base + 0xe00;
1162 u32 inst_loc;
1163 int ret;
1164
1165 ret = nvkm_secboot_falcon_reset(&gsb->base);
1166 if (ret) 46 if (ret)
1167 return ret; 47 return ret;
1168 48
1169 /* setup apertures - virtual */
1170 nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_UCODE), 0x4);
1171 nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_VIRT), 0x0);
1172 /* setup apertures - physical */
1173 nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_VID), 0x4);
1174 nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_COH),
1175 0x4 | 0x1);
1176 nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_NCOH),
1177 0x4 | 0x2);
1178
1179 /* Set context */
1180 if (nvkm_memory_target(gsb->inst->memory) == NVKM_MEM_TARGET_VRAM)
1181 inst_loc = 0x0; /* FB */
1182 else
1183 inst_loc = 0x3; /* Non-coherent sysmem */
1184
1185 nvkm_mask(device, base + 0x048, 0x1, 0x1);
1186 nvkm_wr32(device, base + 0x480,
1187 ((gsb->inst->addr >> 12) & 0xfffffff) |
1188 (inst_loc << 28) | (1 << 30));
1189
1190 /* Set boot vector to code's starting virtual address */
1191 nvkm_wr32(device, base + 0x104, virt_addr);
1192
1193 return 0;
1194}
1195
1196/**
1197 * gm200_secboot_run_hs_blob() - run the given high-secure blob
1198 */
1199static int
1200gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
1201 struct gm200_flcn_bl_desc *desc)
1202{
1203 struct nvkm_vma vma;
1204 u64 vma_addr;
1205 const u32 bl_desc_size = gsb->func->bl_desc_size;
1206 u8 bl_desc[bl_desc_size];
1207 int ret;
1208
1209 /* Map the HS firmware so the HS bootloader can see it */ 49 /* Map the HS firmware so the HS bootloader can see it */
1210 ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma); 50 ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
1211 if (ret) 51 if (ret) {
52 nvkm_falcon_put(falcon, subdev);
1212 return ret; 53 return ret;
54 }
1213 55
1214 /* Add the mapping address to the DMA bases */ 56 /* Reset and set the falcon up */
1215 vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset; 57 ret = nvkm_falcon_reset(falcon);
1216 desc->code_dma_base.lo = lower_32_bits(vma_addr);
1217 desc->code_dma_base.hi = upper_32_bits(vma_addr);
1218 vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset;
1219 desc->data_dma_base.lo = lower_32_bits(vma_addr);
1220 desc->data_dma_base.hi = upper_32_bits(vma_addr);
1221
1222 /* Fixup the BL header */
1223 gsb->func->fixup_bl_desc(desc, &bl_desc);
1224
1225 /* Reset the falcon and make it ready to run the HS bootloader */
1226 ret = gm200_secboot_setup_falcon(gsb);
1227 if (ret) 58 if (ret)
1228 goto done; 59 goto end;
60 nvkm_falcon_bind_context(falcon, gsb->inst);
1229 61
1230 /* Load the HS bootloader into the falcon's IMEM/DMEM */ 62 /* Load the HS bootloader into the falcon's IMEM/DMEM */
1231 gm200_secboot_load_hs_bl(gsb, &bl_desc, bl_desc_size); 63 ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset);
1232
1233 /* Start the HS bootloader */
1234 ret = nvkm_secboot_falcon_run(&gsb->base);
1235 if (ret) 64 if (ret)
1236 goto done; 65 goto end;
1237
1238done:
1239 /* Restore the original DMA addresses */
1240 vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset;
1241 desc->code_dma_base.lo = lower_32_bits(vma_addr);
1242 desc->code_dma_base.hi = upper_32_bits(vma_addr);
1243 vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset;
1244 desc->data_dma_base.lo = lower_32_bits(vma_addr);
1245 desc->data_dma_base.hi = upper_32_bits(vma_addr);
1246
1247 /* We don't need the ACR firmware anymore */
1248 nvkm_gpuobj_unmap(&vma);
1249 66
1250 return ret; 67 /* Disable interrupts as we will poll for the HALT bit */
1251} 68 nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
1252 69
1253/* 70 /* Set default error value in mailbox register */
1254 * gm200_secboot_reset() - execute secure boot from the prepared state 71 nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
1255 *
1256 * Load the HS bootloader and ask the falcon to run it. This will in turn
1257 * load the HS firmware and run it, so once the falcon stops all the managed
1258 * falcons should have their LS firmware loaded and be ready to run.
1259 */
1260int
1261gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
1262{
1263 struct gm200_secboot *gsb = gm200_secboot(sb);
1264 int ret;
1265 72
1266 /* Make sure all blobs are ready */ 73 /* Start the HS bootloader */
1267 ret = gm200_secboot_blobs_ready(gsb); 74 nvkm_falcon_set_start_addr(falcon, sb->acr->start_address);
75 nvkm_falcon_start(falcon);
76 ret = nvkm_falcon_wait_for_halt(falcon, 100);
1268 if (ret) 77 if (ret)
1269 return ret;
1270
1271 /*
1272 * Dummy GM200 implementation: perform secure boot each time we are
1273 * called on FECS. Since only FECS and GPCCS are managed and started
1274 * together, this ought to be safe.
1275 *
1276 * Once we have proper PMU firmware and support, this will be changed
1277 * to a proper call to the PMU method.
1278 */
1279 if (falcon != NVKM_SECBOOT_FALCON_FECS)
1280 goto end; 78 goto end;
1281 79
1282 /* If WPR is set and we have an unload blob, run it to unlock WPR */ 80 /* If mailbox register contains an error code, then ACR has failed */
1283 if (gsb->acr_unload_blob && 81 ret = nvkm_falcon_rd32(falcon, 0x040);
1284 gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) { 82 if (ret) {
1285 ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob, 83 nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
1286 &gsb->acr_unload_bl_desc); 84 ret = -EINVAL;
1287 if (ret) 85 goto end;
1288 return ret;
1289 } 86 }
1290 87
1291 /* Reload all managed falcons */
1292 ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob,
1293 &gsb->acr_load_bl_desc);
1294 if (ret)
1295 return ret;
1296
1297end: 88end:
1298 gsb->falcon_state[falcon] = RESET; 89 /* Reenable interrupts */
1299 return 0; 90 nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
1300}
1301 91
1302int 92 /* We don't need the ACR firmware anymore */
1303gm200_secboot_start(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) 93 nvkm_gpuobj_unmap(&vma);
1304{ 94 nvkm_falcon_put(falcon, subdev);
1305 struct gm200_secboot *gsb = gm200_secboot(sb);
1306 int base;
1307
1308 switch (falcon) {
1309 case NVKM_SECBOOT_FALCON_FECS:
1310 base = 0x409000;
1311 break;
1312 case NVKM_SECBOOT_FALCON_GPCCS:
1313 base = 0x41a000;
1314 break;
1315 default:
1316 nvkm_error(&sb->subdev, "cannot start unhandled falcon!\n");
1317 return -EINVAL;
1318 }
1319
1320 nvkm_wr32(sb->subdev.device, base + 0x130, 0x00000002);
1321 gsb->falcon_state[falcon] = RUNNING;
1322 95
1323 return 0; 96 return ret;
1324} 97}
1325 98
1326
1327
1328int 99int
1329gm200_secboot_init(struct nvkm_secboot *sb) 100gm200_secboot_oneinit(struct nvkm_secboot *sb)
1330{ 101{
1331 struct gm200_secboot *gsb = gm200_secboot(sb); 102 struct gm200_secboot *gsb = gm200_secboot(sb);
1332 struct nvkm_device *device = sb->subdev.device; 103 struct nvkm_device *device = sb->subdev.device;
@@ -1361,24 +132,22 @@ gm200_secboot_init(struct nvkm_secboot *sb)
1361 nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1)); 132 nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
1362 nvkm_done(gsb->inst); 133 nvkm_done(gsb->inst);
1363 134
135 if (sb->acr->func->oneinit) {
136 ret = sb->acr->func->oneinit(sb->acr, sb);
137 if (ret)
138 return ret;
139 }
140
1364 return 0; 141 return 0;
1365} 142}
1366 143
1367static int 144int
1368gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) 145gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
1369{ 146{
1370 struct gm200_secboot *gsb = gm200_secboot(sb);
1371 int ret = 0; 147 int ret = 0;
1372 int i;
1373 148
1374 /* Run the unload blob to unprotect the WPR region */ 149 if (sb->acr->func->fini)
1375 if (gsb->acr_unload_blob && 150 ret = sb->acr->func->fini(sb->acr, sb, suspend);
1376 gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
1377 ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
1378 &gsb->acr_unload_bl_desc);
1379
1380 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
1381 gsb->falcon_state[i] = NON_SECURE;
1382 151
1383 return ret; 152 return ret;
1384} 153}
@@ -1388,11 +157,7 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
1388{ 157{
1389 struct gm200_secboot *gsb = gm200_secboot(sb); 158 struct gm200_secboot *gsb = gm200_secboot(sb);
1390 159
1391 nvkm_gpuobj_del(&gsb->acr_unload_blob); 160 sb->acr->func->dtor(sb->acr);
1392
1393 kfree(gsb->hsbl_blob);
1394 nvkm_gpuobj_del(&gsb->acr_load_blob);
1395 nvkm_gpuobj_del(&gsb->ls_blob);
1396 161
1397 nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd); 162 nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
1398 nvkm_gpuobj_del(&gsb->pgd); 163 nvkm_gpuobj_del(&gsb->pgd);
@@ -1405,50 +170,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
1405static const struct nvkm_secboot_func 170static const struct nvkm_secboot_func
1406gm200_secboot = { 171gm200_secboot = {
1407 .dtor = gm200_secboot_dtor, 172 .dtor = gm200_secboot_dtor,
1408 .init = gm200_secboot_init, 173 .oneinit = gm200_secboot_oneinit,
1409 .fini = gm200_secboot_fini, 174 .fini = gm200_secboot_fini,
1410 .reset = gm200_secboot_reset, 175 .run_blob = gm200_secboot_run_blob,
1411 .start = gm200_secboot_start,
1412 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
1413 BIT(NVKM_SECBOOT_FALCON_GPCCS),
1414 .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
1415};
1416
1417/**
1418 * gm200_fixup_bl_desc - just copy the BL descriptor
1419 *
1420 * Use the GM200 descriptor format by default.
1421 */
1422static void
1423gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
1424{
1425 memcpy(ret, desc, sizeof(*desc));
1426}
1427
1428static void
1429gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
1430 struct hsflcn_acr_desc *desc)
1431{
1432 desc->ucode_blob_base = gsb->ls_blob->addr;
1433 desc->ucode_blob_size = gsb->ls_blob->size;
1434
1435 desc->wpr_offset = 0;
1436
1437 /* WPR region information for the HS binary to set up */
1438 desc->wpr_region_id = 1;
1439 desc->regions.no_regions = 1;
1440 desc->regions.region_props[0].region_id = 1;
1441 desc->regions.region_props[0].start_addr = gsb->wpr_addr >> 8;
1442 desc->regions.region_props[0].end_addr =
1443 (gsb->wpr_addr + gsb->wpr_size) >> 8;
1444}
1445
1446static const struct gm200_secboot_func
1447gm200_secboot_func = {
1448 .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
1449 .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
1450 .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
1451 .prepare_blobs = gm200_secboot_prepare_blobs,
1452}; 176};
1453 177
1454int 178int
@@ -1457,6 +181,12 @@ gm200_secboot_new(struct nvkm_device *device, int index,
1457{ 181{
1458 int ret; 182 int ret;
1459 struct gm200_secboot *gsb; 183 struct gm200_secboot *gsb;
184 struct nvkm_acr *acr;
185
186 acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
187 BIT(NVKM_SECBOOT_FALCON_GPCCS));
188 if (IS_ERR(acr))
189 return PTR_ERR(acr);
1460 190
1461 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 191 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
1462 if (!gsb) { 192 if (!gsb) {
@@ -1465,15 +195,14 @@ gm200_secboot_new(struct nvkm_device *device, int index,
1465 } 195 }
1466 *psb = &gsb->base; 196 *psb = &gsb->base;
1467 197
1468 ret = nvkm_secboot_ctor(&gm200_secboot, device, index, &gsb->base); 198 ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
1469 if (ret) 199 if (ret)
1470 return ret; 200 return ret;
1471 201
1472 gsb->func = &gm200_secboot_func;
1473
1474 return 0; 202 return 0;
1475} 203}
1476 204
205
1477MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); 206MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
1478MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); 207MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
1479MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); 208MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
new file mode 100644
index 000000000000..45adf1a3bc20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_GM200_H__
24#define __NVKM_SECBOOT_GM200_H__
25
26#include "priv.h"
27
28struct gm200_secboot {
29 struct nvkm_secboot base;
30
31 /* Instance block & address space used for HS FW execution */
32 struct nvkm_gpuobj *inst;
33 struct nvkm_gpuobj *pgd;
34 struct nvkm_vm *vm;
35};
36#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
37
38int gm200_secboot_oneinit(struct nvkm_secboot *);
39int gm200_secboot_fini(struct nvkm_secboot *, bool);
40void *gm200_secboot_dtor(struct nvkm_secboot *);
41int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *);
42
43#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index d5395ebfe8d3..6707b8edc086 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -20,103 +20,8 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include "priv.h" 23#include "acr.h"
24 24#include "gm200.h"
25#include <core/gpuobj.h>
26
27/*
28 * The BL header format used by GM20B's firmware is slightly different
29 * from the one of GM200. Fix the differences here.
30 */
31struct gm20b_flcn_bl_desc {
32 u32 reserved[4];
33 u32 signature[4];
34 u32 ctx_dma;
35 u32 code_dma_base;
36 u32 non_sec_code_off;
37 u32 non_sec_code_size;
38 u32 sec_code_off;
39 u32 sec_code_size;
40 u32 code_entry_point;
41 u32 data_dma_base;
42 u32 data_size;
43};
44
45static int
46gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
47{
48 struct nvkm_subdev *subdev = &gsb->base.subdev;
49 int acr_size;
50 int ret;
51
52 ret = gm20x_secboot_prepare_blobs(gsb);
53 if (ret)
54 return ret;
55
56 acr_size = gsb->acr_load_blob->size;
57 /*
58 * On Tegra the WPR region is set by the bootloader. It is illegal for
59 * the HS blob to be larger than this region.
60 */
61 if (acr_size > gsb->wpr_size) {
62 nvkm_error(subdev, "WPR region too small for FW blob!\n");
63 nvkm_error(subdev, "required: %dB\n", acr_size);
64 nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
65 return -ENOSPC;
66 }
67
68 return 0;
69}
70
71/**
72 * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
73 *
74 * There is only a slight format difference (DMA addresses being 32-bits and
75 * 256B-aligned) to address.
76 */
77static void
78gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
79{
80 struct gm20b_flcn_bl_desc *gdesc = ret;
81 u64 addr;
82
83 memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
84 memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
85 gdesc->ctx_dma = desc->ctx_dma;
86 addr = desc->code_dma_base.hi;
87 addr <<= 32;
88 addr |= desc->code_dma_base.lo;
89 gdesc->code_dma_base = lower_32_bits(addr >> 8);
90 gdesc->non_sec_code_off = desc->non_sec_code_off;
91 gdesc->non_sec_code_size = desc->non_sec_code_size;
92 gdesc->sec_code_off = desc->sec_code_off;
93 gdesc->sec_code_size = desc->sec_code_size;
94 gdesc->code_entry_point = desc->code_entry_point;
95 addr = desc->data_dma_base.hi;
96 addr <<= 32;
97 addr |= desc->data_dma_base.lo;
98 gdesc->data_dma_base = lower_32_bits(addr >> 8);
99 gdesc->data_size = desc->data_size;
100}
101
102static void
103gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
104 struct hsflcn_acr_desc *desc)
105{
106 desc->ucode_blob_base = gsb->ls_blob->addr;
107 desc->ucode_blob_size = gsb->ls_blob->size;
108
109 desc->wpr_offset = 0;
110}
111
112static const struct gm200_secboot_func
113gm20b_secboot_func = {
114 .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
115 .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
116 .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
117 .prepare_blobs = gm20b_secboot_prepare_blobs,
118};
119
120 25
121#ifdef CONFIG_ARCH_TEGRA 26#ifdef CONFIG_ARCH_TEGRA
122#define TEGRA_MC_BASE 0x70019000 27#define TEGRA_MC_BASE 0x70019000
@@ -144,15 +49,15 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
144 nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); 49 nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
145 return PTR_ERR(mc); 50 return PTR_ERR(mc);
146 } 51 }
147 gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | 52 sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
148 ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); 53 ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
149 gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) 54 sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
150 << 17; 55 << 17;
151 cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); 56 cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
152 iounmap(mc); 57 iounmap(mc);
153 58
154 /* Check that WPR settings are valid */ 59 /* Check that WPR settings are valid */
155 if (gsb->wpr_size == 0) { 60 if (sb->wpr_size == 0) {
156 nvkm_error(&sb->subdev, "WPR region is empty\n"); 61 nvkm_error(&sb->subdev, "WPR region is empty\n");
157 return -EINVAL; 62 return -EINVAL;
158 } 63 }
@@ -174,7 +79,7 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
174#endif 79#endif
175 80
176static int 81static int
177gm20b_secboot_init(struct nvkm_secboot *sb) 82gm20b_secboot_oneinit(struct nvkm_secboot *sb)
178{ 83{
179 struct gm200_secboot *gsb = gm200_secboot(sb); 84 struct gm200_secboot *gsb = gm200_secboot(sb);
180 int ret; 85 int ret;
@@ -183,17 +88,15 @@ gm20b_secboot_init(struct nvkm_secboot *sb)
183 if (ret) 88 if (ret)
184 return ret; 89 return ret;
185 90
186 return gm200_secboot_init(sb); 91 return gm200_secboot_oneinit(sb);
187} 92}
188 93
189static const struct nvkm_secboot_func 94static const struct nvkm_secboot_func
190gm20b_secboot = { 95gm20b_secboot = {
191 .dtor = gm200_secboot_dtor, 96 .dtor = gm200_secboot_dtor,
192 .init = gm20b_secboot_init, 97 .oneinit = gm20b_secboot_oneinit,
193 .reset = gm200_secboot_reset, 98 .fini = gm200_secboot_fini,
194 .start = gm200_secboot_start, 99 .run_blob = gm200_secboot_run_blob,
195 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
196 .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
197}; 100};
198 101
199int 102int
@@ -202,6 +105,11 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
202{ 105{
203 int ret; 106 int ret;
204 struct gm200_secboot *gsb; 107 struct gm200_secboot *gsb;
108 struct nvkm_acr *acr;
109
110 acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS));
111 if (IS_ERR(acr))
112 return PTR_ERR(acr);
205 113
206 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 114 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
207 if (!gsb) { 115 if (!gsb) {
@@ -210,12 +118,10 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
210 } 118 }
211 *psb = &gsb->base; 119 *psb = &gsb->base;
212 120
213 ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base); 121 ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
214 if (ret) 122 if (ret)
215 return ret; 123 return ret;
216 124
217 gsb->func = &gm20b_secboot_func;
218
219 return 0; 125 return 0;
220} 126}
221 127
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
new file mode 100644
index 000000000000..00886cee57eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_LS_UCODE_H__
24#define __NVKM_SECBOOT_LS_UCODE_H__
25
26#include <core/os.h>
27#include <core/subdev.h>
28#include <subdev/secboot.h>
29
30
31/**
32 * struct ls_ucode_img_desc - descriptor of firmware image
33 * @descriptor_size: size of this descriptor
34 * @image_size: size of the whole image
35 * @bootloader_start_offset: start offset of the bootloader in ucode image
36 * @bootloader_size: size of the bootloader
37 * @bootloader_imem_offset: start off set of the bootloader in IMEM
38 * @bootloader_entry_point: entry point of the bootloader in IMEM
39 * @app_start_offset: start offset of the LS firmware
40 * @app_size: size of the LS firmware's code and data
41 * @app_imem_offset: offset of the app in IMEM
42 * @app_imem_entry: entry point of the app in IMEM
43 * @app_dmem_offset: offset of the data in DMEM
44 * @app_resident_code_offset: offset of app code from app_start_offset
45 * @app_resident_code_size: size of the code
46 * @app_resident_data_offset: offset of data from app_start_offset
47 * @app_resident_data_size: size of data
48 *
49 * A firmware image contains the code, data, and bootloader of a given LS
50 * falcon in a single blob. This structure describes where everything is.
51 *
52 * This can be generated from a (bootloader, code, data) set if they have
53 * been loaded separately, or come directly from a file.
54 */
55struct ls_ucode_img_desc {
56 u32 descriptor_size;
57 u32 image_size;
58 u32 tools_version;
59 u32 app_version;
60 char date[64];
61 u32 bootloader_start_offset;
62 u32 bootloader_size;
63 u32 bootloader_imem_offset;
64 u32 bootloader_entry_point;
65 u32 app_start_offset;
66 u32 app_size;
67 u32 app_imem_offset;
68 u32 app_imem_entry;
69 u32 app_dmem_offset;
70 u32 app_resident_code_offset;
71 u32 app_resident_code_size;
72 u32 app_resident_data_offset;
73 u32 app_resident_data_size;
74 u32 nb_overlays;
75 struct {u32 start; u32 size; } load_ovl[64];
76 u32 compressed;
77};
78
79/**
80 * struct ls_ucode_img - temporary storage for loaded LS firmwares
81 * @node: to link within lsf_ucode_mgr
82 * @falcon_id: ID of the falcon this LS firmware is for
83 * @ucode_desc: loaded or generated map of ucode_data
84 * @ucode_data: firmware payload (code and data)
85 * @ucode_size: size in bytes of data in ucode_data
86 * @sig: signature for this firmware
87 * @sig:size: size of the signature in bytes
88 *
89 * Preparing the WPR LS blob requires information about all the LS firmwares
90 * (size, etc) to be known. This structure contains all the data of one LS
91 * firmware.
92 */
93struct ls_ucode_img {
94 struct list_head node;
95 enum nvkm_secboot_falcon falcon_id;
96
97 struct ls_ucode_img_desc ucode_desc;
98 u8 *ucode_data;
99 u32 ucode_size;
100
101 u8 *sig;
102 u32 sig_size;
103};
104
105/**
106 * struct fw_bin_header - header of firmware files
107 * @bin_magic: always 0x3b1d14f0
108 * @bin_ver: version of the bin format
109 * @bin_size: entire image size including this header
110 * @header_offset: offset of the firmware/bootloader header in the file
111 * @data_offset: offset of the firmware/bootloader payload in the file
112 * @data_size: size of the payload
113 *
114 * This header is located at the beginning of the HS firmware and HS bootloader
115 * files, to describe where the headers and data can be found.
116 */
117struct fw_bin_header {
118 u32 bin_magic;
119 u32 bin_ver;
120 u32 bin_size;
121 u32 header_offset;
122 u32 data_offset;
123 u32 data_size;
124};
125
126/**
127 * struct fw_bl_desc - firmware bootloader descriptor
128 * @start_tag: starting tag of bootloader
129 * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
130 * @code_off: offset of code section
131 * @code_size: size of code section
132 * @data_off: offset of data section
133 * @data_size: size of data section
134 *
135 * This structure is embedded in bootloader firmware files at to describe the
136 * IMEM and DMEM layout expected by the bootloader.
137 */
138struct fw_bl_desc {
139 u32 start_tag;
140 u32 dmem_load_off;
141 u32 code_off;
142 u32 code_size;
143 u32 data_off;
144 u32 data_size;
145};
146
147int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
148int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
149
150
151#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
new file mode 100644
index 000000000000..40a6df77bb8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23
24#include "ls_ucode.h"
25#include "acr.h"
26
27#include <core/firmware.h>
28
29#define BL_DESC_BLK_SIZE 256
30/**
31 * Build a ucode image and descriptor from provided bootloader, code and data.
32 *
33 * @bl: bootloader image, including 16-bytes descriptor
34 * @code: LS firmware code segment
35 * @data: LS firmware data segment
36 * @desc: ucode descriptor to be written
37 *
38 * Return: allocated ucode image with corresponding descriptor information. desc
39 * is also updated to contain the right offsets within returned image.
40 */
41static void *
42ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
43 const struct firmware *data, struct ls_ucode_img_desc *desc)
44{
45 struct fw_bin_header *bin_hdr = (void *)bl->data;
46 struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
47 void *bl_data = (void *)bl->data + bin_hdr->data_offset;
48 u32 pos = 0;
49 void *image;
50
51 desc->bootloader_start_offset = pos;
52 desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
53 desc->bootloader_imem_offset = bl_desc->start_tag * 256;
54 desc->bootloader_entry_point = bl_desc->start_tag * 256;
55
56 pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
57 desc->app_start_offset = pos;
58 desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
59 ALIGN(data->size, BL_DESC_BLK_SIZE);
60 desc->app_imem_offset = 0;
61 desc->app_imem_entry = 0;
62 desc->app_dmem_offset = 0;
63 desc->app_resident_code_offset = 0;
64 desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
65
66 pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
67 desc->app_resident_data_offset = pos - desc->app_start_offset;
68 desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
69
70 desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
71 desc->app_size;
72
73 image = kzalloc(desc->image_size, GFP_KERNEL);
74 if (!image)
75 return ERR_PTR(-ENOMEM);
76
77 memcpy(image + desc->bootloader_start_offset, bl_data,
78 bl_desc->code_size);
79 memcpy(image + desc->app_start_offset, code->data, code->size);
80 memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
81 data->data, data->size);
82
83 return image;
84}
85
86/**
87 * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
88 *
89 * Load the LS microcode, bootloader and signature and pack them into a single
90 * blob. Also generate the corresponding ucode descriptor.
91 */
92static int
93ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
94 const char *falcon_name)
95{
96 const struct firmware *bl, *code, *data, *sig;
97 char f[64];
98 int ret;
99
100 snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
101 ret = nvkm_firmware_get(subdev->device, f, &bl);
102 if (ret)
103 goto error;
104
105 snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
106 ret = nvkm_firmware_get(subdev->device, f, &code);
107 if (ret)
108 goto free_bl;
109
110 snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
111 ret = nvkm_firmware_get(subdev->device, f, &data);
112 if (ret)
113 goto free_inst;
114
115 snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
116 ret = nvkm_firmware_get(subdev->device, f, &sig);
117 if (ret)
118 goto free_data;
119 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
120 if (!img->sig) {
121 ret = -ENOMEM;
122 goto free_sig;
123 }
124 img->sig_size = sig->size;
125
126 img->ucode_data = ls_ucode_img_build(bl, code, data,
127 &img->ucode_desc);
128 if (IS_ERR(img->ucode_data)) {
129 ret = PTR_ERR(img->ucode_data);
130 goto free_data;
131 }
132 img->ucode_size = img->ucode_desc.image_size;
133
134free_sig:
135 nvkm_firmware_put(sig);
136free_data:
137 nvkm_firmware_put(data);
138free_inst:
139 nvkm_firmware_put(code);
140free_bl:
141 nvkm_firmware_put(bl);
142error:
143 return ret;
144}
145
146int
147acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev,
148 struct ls_ucode_img *img)
149{
150 return ls_ucode_img_load_gr(subdev, img, "fecs");
151}
152
153int
154acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev,
155 struct ls_ucode_img *img)
156{
157 return ls_ucode_img_load_gr(subdev, img, "gpccs");
158}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index a9a8a0e1017e..936a65f5658c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -27,20 +27,16 @@
27#include <subdev/mmu.h> 27#include <subdev/mmu.h>
28 28
29struct nvkm_secboot_func { 29struct nvkm_secboot_func {
30 int (*init)(struct nvkm_secboot *); 30 int (*oneinit)(struct nvkm_secboot *);
31 int (*fini)(struct nvkm_secboot *, bool suspend); 31 int (*fini)(struct nvkm_secboot *, bool suspend);
32 void *(*dtor)(struct nvkm_secboot *); 32 void *(*dtor)(struct nvkm_secboot *);
33 int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon); 33 int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *);
34 int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
35
36 /* ID of the falcon that will perform secure boot */
37 enum nvkm_secboot_falcon boot_falcon;
38 /* Bit-mask of IDs of managed falcons */
39 unsigned long managed_falcons;
40}; 34};
41 35
42int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_device *, 36extern const char *nvkm_secboot_falcon_name[];
43 int index, struct nvkm_secboot *); 37
38int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
39 struct nvkm_device *, int, struct nvkm_secboot *);
44int nvkm_secboot_falcon_reset(struct nvkm_secboot *); 40int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
45int nvkm_secboot_falcon_run(struct nvkm_secboot *); 41int nvkm_secboot_falcon_run(struct nvkm_secboot *);
46 42
@@ -48,187 +44,20 @@ struct flcn_u64 {
48 u32 lo; 44 u32 lo;
49 u32 hi; 45 u32 hi;
50}; 46};
47
51static inline u64 flcn64_to_u64(const struct flcn_u64 f) 48static inline u64 flcn64_to_u64(const struct flcn_u64 f)
52{ 49{
53 return ((u64)f.hi) << 32 | f.lo; 50 return ((u64)f.hi) << 32 | f.lo;
54} 51}
55 52
56/** 53static inline struct flcn_u64 u64_to_flcn64(u64 u)
57 * struct gm200_flcn_bl_desc - DMEM bootloader descriptor 54{
58 * @signature: 16B signature for secure code. 0s if no secure code 55 struct flcn_u64 ret;
59 * @ctx_dma: DMA context to be used by BL while loading code/data
60 * @code_dma_base: 256B-aligned Physical FB Address where code is located
61 * (falcon's $xcbase register)
62 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
63 * located. The offset must be multiple of 256 to help perf
64 * @non_sec_code_size: the size of the nonSecure code part.
65 * @sec_code_off: offset from code_dma_base where the secure code is
66 * located. The offset must be multiple of 256 to help perf
67 * @sec_code_size: offset from code_dma_base where the secure code is
68 * located. The offset must be multiple of 256 to help perf
69 * @code_entry_point: code entry point which will be invoked by BL after
70 * code is loaded.
71 * @data_dma_base: 256B aligned Physical FB Address where data is located.
72 * (falcon's $xdbase register)
73 * @data_size: size of data block. Should be multiple of 256B
74 *
75 * Structure used by the bootloader to load the rest of the code. This has
76 * to be filled by host and copied into DMEM at offset provided in the
77 * hsflcn_bl_desc.bl_desc_dmem_load_off.
78 */
79struct gm200_flcn_bl_desc {
80 u32 reserved[4];
81 u32 signature[4];
82 u32 ctx_dma;
83 struct flcn_u64 code_dma_base;
84 u32 non_sec_code_off;
85 u32 non_sec_code_size;
86 u32 sec_code_off;
87 u32 sec_code_size;
88 u32 code_entry_point;
89 struct flcn_u64 data_dma_base;
90 u32 data_size;
91};
92
93/**
94 * struct hsflcn_acr_desc - data section of the HS firmware
95 *
96 * This header is to be copied at the beginning of DMEM by the HS bootloader.
97 *
98 * @signature: signature of ACR ucode
99 * @wpr_region_id: region ID holding the WPR header and its details
100 * @wpr_offset: offset from the WPR region holding the wpr header
101 * @regions: region descriptors
102 * @nonwpr_ucode_blob_size: size of LS blob
103 * @nonwpr_ucode_blob_start: FB location of LS blob is
104 */
105struct hsflcn_acr_desc {
106 union {
107 u8 reserved_dmem[0x200];
108 u32 signatures[4];
109 } ucode_reserved_space;
110 u32 wpr_region_id;
111 u32 wpr_offset;
112 u32 mmu_mem_range;
113#define FLCN_ACR_MAX_REGIONS 2
114 struct {
115 u32 no_regions;
116 struct {
117 u32 start_addr;
118 u32 end_addr;
119 u32 region_id;
120 u32 read_mask;
121 u32 write_mask;
122 u32 client_mask;
123 } region_props[FLCN_ACR_MAX_REGIONS];
124 } regions;
125 u32 ucode_blob_size;
126 u64 ucode_blob_base __aligned(8);
127 struct {
128 u32 vpr_enabled;
129 u32 vpr_start;
130 u32 vpr_end;
131 u32 hdcp_policies;
132 } vpr_desc;
133};
134
135/**
136 * Contains the whole secure boot state, allowing it to be performed as needed
137 * @wpr_addr: physical address of the WPR region
138 * @wpr_size: size in bytes of the WPR region
139 * @ls_blob: LS blob of all the LS firmwares, signatures, bootloaders
140 * @ls_blob_size: size of the LS blob
141 * @ls_blob_nb_regions: number of LS firmwares that will be loaded
142 * @acr_blob: HS blob
143 * @acr_blob_vma: mapping of the HS blob into the secure falcon's VM
144 * @acr_bl_desc: bootloader descriptor of the HS blob
145 * @hsbl_blob: HS blob bootloader
146 * @inst: instance block for HS falcon
147 * @pgd: page directory for the HS falcon
148 * @vm: address space used by the HS falcon
149 * @falcon_state: current state of the managed falcons
150 * @firmware_ok: whether the firmware blobs have been created
151 */
152struct gm200_secboot {
153 struct nvkm_secboot base;
154 const struct gm200_secboot_func *func;
155
156 /*
157 * Address and size of the WPR region. On dGPU this will be the
158 * address of the LS blob. On Tegra this is a fixed region set by the
159 * bootloader
160 */
161 u64 wpr_addr;
162 u32 wpr_size;
163
164 /*
165 * HS FW - lock WPR region (dGPU only) and load LS FWs
166 * on Tegra the HS FW copies the LS blob into the fixed WPR instead
167 */
168 struct nvkm_gpuobj *acr_load_blob;
169 struct gm200_flcn_bl_desc acr_load_bl_desc;
170
171 /* HS FW - unlock WPR region (dGPU only) */
172 struct nvkm_gpuobj *acr_unload_blob;
173 struct gm200_flcn_bl_desc acr_unload_bl_desc;
174
175 /* HS bootloader */
176 void *hsbl_blob;
177
178 /* LS FWs, to be loaded by the HS ACR */
179 struct nvkm_gpuobj *ls_blob;
180
181 /* Instance block & address space used for HS FW execution */
182 struct nvkm_gpuobj *inst;
183 struct nvkm_gpuobj *pgd;
184 struct nvkm_vm *vm;
185
186 /* To keep track of the state of all managed falcons */
187 enum {
188 /* In non-secure state, no firmware loaded, no privileges*/
189 NON_SECURE = 0,
190 /* In low-secure mode and ready to be started */
191 RESET,
192 /* In low-secure mode and running */
193 RUNNING,
194 } falcon_state[NVKM_SECBOOT_FALCON_END];
195
196 bool firmware_ok;
197};
198#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
199
200/**
201 * Contains functions we wish to abstract between GM200-like implementations
202 * @bl_desc_size: size of the BL descriptor used by this chip.
203 * @fixup_bl_desc: hook that generates the proper BL descriptor format from
204 * the generic GM200 format into a data array of size
205 * bl_desc_size
206 * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
207 * @prepare_blobs: prepares the various blobs needed for secure booting
208 */
209struct gm200_secboot_func {
210 /*
211 * Size of the bootloader descriptor for this chip. A block of this
212 * size is allocated before booting a falcon and the fixup_bl_desc
213 * callback is called on it
214 */
215 u32 bl_desc_size;
216 void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *);
217
218 /*
219 * Chip-specific modifications of the HS descriptor can be done here.
220 * On dGPU this is used to fill the information about the WPR region
221 * we want the HS FW to set up.
222 */
223 void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
224 int (*prepare_blobs)(struct gm200_secboot *);
225};
226 56
227int gm200_secboot_init(struct nvkm_secboot *); 57 ret.hi = upper_32_bits(u);
228void *gm200_secboot_dtor(struct nvkm_secboot *); 58 ret.lo = lower_32_bits(u);
229int gm200_secboot_reset(struct nvkm_secboot *, u32);
230int gm200_secboot_start(struct nvkm_secboot *, u32);
231 59
232int gm20x_secboot_prepare_blobs(struct gm200_secboot *); 60 return ret;
61}
233 62
234#endif 63#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 8894fee30cbc..df949fa7d05d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -64,10 +64,9 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
64} 64}
65 65
66static int 66static int
67nvkm_therm_update_linear(struct nvkm_therm *therm) 67nvkm_therm_compute_linear_duty(struct nvkm_therm *therm, u8 linear_min_temp,
68 u8 linear_max_temp)
68{ 69{
69 u8 linear_min_temp = therm->fan->bios.linear_min_temp;
70 u8 linear_max_temp = therm->fan->bios.linear_max_temp;
71 u8 temp = therm->func->temp_get(therm); 70 u8 temp = therm->func->temp_get(therm);
72 u16 duty; 71 u16 duty;
73 72
@@ -85,6 +84,21 @@ nvkm_therm_update_linear(struct nvkm_therm *therm)
85 return duty; 84 return duty;
86} 85}
87 86
87static int
88nvkm_therm_update_linear(struct nvkm_therm *therm)
89{
90 u8 min = therm->fan->bios.linear_min_temp;
91 u8 max = therm->fan->bios.linear_max_temp;
92 return nvkm_therm_compute_linear_duty(therm, min, max);
93}
94
95static int
96nvkm_therm_update_linear_fallback(struct nvkm_therm *therm)
97{
98 u8 max = therm->bios_sensor.thrs_fan_boost.temp;
99 return nvkm_therm_compute_linear_duty(therm, 30, max);
100}
101
88static void 102static void
89nvkm_therm_update(struct nvkm_therm *therm, int mode) 103nvkm_therm_update(struct nvkm_therm *therm, int mode)
90{ 104{
@@ -119,6 +133,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
119 case NVBIOS_THERM_FAN_OTHER: 133 case NVBIOS_THERM_FAN_OTHER:
120 if (therm->cstate) 134 if (therm->cstate)
121 duty = therm->cstate; 135 duty = therm->cstate;
136 else
137 duty = nvkm_therm_update_linear_fallback(therm);
122 poll = false; 138 poll = false;
123 break; 139 break;
124 } 140 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index fe063d5728e2..67ada1d9a28c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -95,6 +95,20 @@ nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
95 return intr & ~handled; 95 return intr & ~handled;
96} 96}
97 97
98int
99nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
100{
101 struct nvkm_top *top = device->top;
102 struct nvkm_top_device *info;
103
104 list_for_each_entry(info, &top->device, head) {
105 if (info->index == devidx && info->fault >= 0)
106 return info->fault;
107 }
108
109 return -ENOENT;
110}
111
98enum nvkm_devidx 112enum nvkm_devidx
99nvkm_top_fault(struct nvkm_device *device, int fault) 113nvkm_top_fault(struct nvkm_device *device, int fault)
100{ 114{