aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-03-07 21:54:58 -0500
committerDave Airlie <airlied@redhat.com>2017-03-07 21:54:58 -0500
commit6796b129b0e98162a84e0b6322ac28587556d427 (patch)
treefbfdc303194fc3f5a643d5606aff00ba2187037f
parent2e16101780e9cc8c4c68566db002e7513a1530eb (diff)
parent97e5268d57bb2ec9c82cf8758fa97a2f04ea9d1b (diff)
Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next
- Re-architecture of the code to handle proprietary fw, more abstracted to support the multitude of differences that NVIDIA introduce - Support in the said code for GP10x ACR and GR fw, giving acceleration support \o/ - Fix for GTX 970 GPUs that are in an odd MMU configuration * 'linux-4.12' of git://github.com/skeggsb/linux: (60 commits) drm/nouveau/fb/gf100-: rework ram detection drm/nouveau/fb/gm200: split ram implementation from gm107 drm/nouveau/fb/gf108: split implementation from gf100 drm/nouveau/fb/gf100-: modify constructors to allow more customisation drm/nouveau/kms/nv50: use drm core i2c-over-aux algorithm drm/nouveau/i2c/g94-: return REPLY_M value on reads drm/nouveau/i2c: modify aux interface to return length actually transferred drm/nouveau/gp10x: enable secboot and GR drm/nouveau/gr/gp102: initial support drm/nouveau/falcon: support for gp10x msgqueue drm/nouveau/secboot: add gp102/gp104/gp106/gp107 support drm/nouveau/secboot: put HS code loading code into own file drm/nouveau/secboot: support for r375 ACR drm/nouveau/secboot: support for r367 ACR drm/nouveau/secboot: support for r364 ACR drm/nouveau/secboot: workaround bug when starting SEC2 firmware drm/nouveau/secboot: support standard NVIDIA HS binaries drm/nouveau/secboot: support for unload blob bootloader drm/nouveau/secboot: let callers interpret return value of blobs drm/nouveau/secboot: support for different load and unload falcons ...
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h47
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c553
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c323
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c589
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c388
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c251
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
80 files changed, 4468 insertions, 566 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 3a2c0137d4b4..d08da82ba7ed 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -125,6 +125,7 @@
125#define MAXWELL_B /* cl9097.h */ 0x0000b197 125#define MAXWELL_B /* cl9097.h */ 0x0000b197
126 126
127#define PASCAL_A /* cl9097.h */ 0x0000c097 127#define PASCAL_A /* cl9097.h */ 0x0000c097
128#define PASCAL_B /* cl9097.h */ 0x0000c197
128 129
129#define NV74_BSP 0x000074b0 130#define NV74_BSP 0x000074b0
130 131
@@ -163,6 +164,7 @@
163#define MAXWELL_COMPUTE_A 0x0000b0c0 164#define MAXWELL_COMPUTE_A 0x0000b0c0
164#define MAXWELL_COMPUTE_B 0x0000b1c0 165#define MAXWELL_COMPUTE_B 0x0000b1c0
165#define PASCAL_COMPUTE_A 0x0000c0c0 166#define PASCAL_COMPUTE_A 0x0000c0c0
167#define PASCAL_COMPUTE_B 0x0000c1c0
166 168
167#define NV74_CIPHER 0x000074c1 169#define NV74_CIPHER 0x000074c1
168#endif 170#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d426b86e2712..bb4c214f1046 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -59,6 +59,7 @@ enum nvkm_devidx {
59 NVKM_ENGINE_NVDEC, 59 NVKM_ENGINE_NVDEC,
60 NVKM_ENGINE_PM, 60 NVKM_ENGINE_PM,
61 NVKM_ENGINE_SEC, 61 NVKM_ENGINE_SEC,
62 NVKM_ENGINE_SEC2,
62 NVKM_ENGINE_SW, 63 NVKM_ENGINE_SW,
63 NVKM_ENGINE_VIC, 64 NVKM_ENGINE_VIC,
64 NVKM_ENGINE_VP, 65 NVKM_ENGINE_VP,
@@ -155,9 +156,10 @@ struct nvkm_device {
155 struct nvkm_engine *msppp; 156 struct nvkm_engine *msppp;
156 struct nvkm_engine *msvld; 157 struct nvkm_engine *msvld;
157 struct nvkm_engine *nvenc[3]; 158 struct nvkm_engine *nvenc[3];
158 struct nvkm_engine *nvdec; 159 struct nvkm_nvdec *nvdec;
159 struct nvkm_pm *pm; 160 struct nvkm_pm *pm;
160 struct nvkm_engine *sec; 161 struct nvkm_engine *sec;
162 struct nvkm_sec2 *sec2;
161 struct nvkm_sw *sw; 163 struct nvkm_sw *sw;
162 struct nvkm_engine *vic; 164 struct nvkm_engine *vic;
163 struct nvkm_engine *vp; 165 struct nvkm_engine *vp;
@@ -225,9 +227,10 @@ struct nvkm_device_chip {
225 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); 227 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
226 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); 228 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
227 int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); 229 int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
228 int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **); 230 int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
229 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); 231 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
230 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); 232 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
233 int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
231 int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **); 234 int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
232 int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **); 235 int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
233 int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **); 236 int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
new file mode 100644
index 000000000000..fac0824197f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_CORE_MSGQUEUE_H
24#define __NVKM_CORE_MSGQUEUE_H
25
26#include <core/os.h>
27
28struct nvkm_falcon;
29struct nvkm_msgqueue;
30enum nvkm_secboot_falcon;
31
32/* Hopefully we will never have firmware arguments larger than that... */
33#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
34
35int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
36void nvkm_msgqueue_del(struct nvkm_msgqueue **);
37void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
38int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
39
40/* useful if we run a NVIDIA-signed firmware */
41void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
42
43/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
44int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
45 enum nvkm_secboot_falcon);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 7e498e65b1e8..e1a854e2ade1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -10,6 +10,7 @@ enum nvkm_falcon_dmaidx {
10 FALCON_DMAIDX_PHYS_VID = 2, 10 FALCON_DMAIDX_PHYS_VID = 2,
11 FALCON_DMAIDX_PHYS_SYS_COH = 3, 11 FALCON_DMAIDX_PHYS_SYS_COH = 3,
12 FALCON_DMAIDX_PHYS_SYS_NCOH = 4, 12 FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
13 FALCON_SEC2_DMAIDX_UCODE = 6,
13}; 14};
14 15
15struct nvkm_falcon { 16struct nvkm_falcon {
@@ -19,11 +20,13 @@ struct nvkm_falcon {
19 u32 addr; 20 u32 addr;
20 21
21 struct mutex mutex; 22 struct mutex mutex;
23 struct mutex dmem_mutex;
22 const struct nvkm_subdev *user; 24 const struct nvkm_subdev *user;
23 25
24 u8 version; 26 u8 version;
25 u8 secret; 27 u8 secret;
26 bool debug; 28 bool debug;
29 bool has_emem;
27 30
28 struct nvkm_memory *core; 31 struct nvkm_memory *core;
29 bool external; 32 bool external;
@@ -45,8 +48,14 @@ struct nvkm_falcon {
45 struct nvkm_engine engine; 48 struct nvkm_engine engine;
46}; 49};
47 50
51/* This constructor must be called from the owner's oneinit() hook and
52 * *not* its constructor. This is to ensure that DEVINIT has been
53 * completed, and that the device is correctly enabled before we touch
54 * falcon registers.
55 */
48int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, 56int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
49 struct nvkm_falcon **); 57 struct nvkm_falcon **);
58
50void nvkm_falcon_del(struct nvkm_falcon **); 59void nvkm_falcon_del(struct nvkm_falcon **);
51int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); 60int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
52void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); 61void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 89cf99307828..0a636833e0eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -43,4 +43,5 @@ int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
43int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 43int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
44int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 44int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
45int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 45int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
46int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
46#endif 47#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 30b76d13fdcb..00b2b227ff41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -1,4 +1,12 @@
1#ifndef __NVKM_NVDEC_H__ 1#ifndef __NVKM_NVDEC_H__
2#define __NVKM_NVDEC_H__ 2#define __NVKM_NVDEC_H__
3#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
3#include <core/engine.h> 4#include <core/engine.h>
5
6struct nvkm_nvdec {
7 struct nvkm_engine engine;
8 struct nvkm_falcon *falcon;
9};
10
11int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
4#endif 12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
new file mode 100644
index 000000000000..d3db1b1e75c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -0,0 +1,13 @@
1#ifndef __NVKM_SEC2_H__
2#define __NVKM_SEC2_H__
3#include <core/engine.h>
4
5struct nvkm_sec2 {
6 struct nvkm_engine engine;
7 struct nvkm_falcon *falcon;
8 struct nvkm_msgqueue *queue;
9 struct work_struct work;
10};
11
12int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
13#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 0b26a4c860ec..891497a0fe3b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -89,6 +89,7 @@ int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
89int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 89int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
90int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 90int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
91int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 91int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
92int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
92int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 93int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
93int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 94int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
94int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 95int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
@@ -146,6 +147,12 @@ struct nvkm_ram {
146}; 147};
147 148
148struct nvkm_ram_func { 149struct nvkm_ram_func {
150 u64 upper;
151 u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
152 int fbp, int *pltcs);
153 u32 (*probe_fbp_amount)(const struct nvkm_ram_func *, u32 fbpao,
154 struct nvkm_device *, int fbp, int *pltcs);
155 u32 (*probe_fbpa_amount)(struct nvkm_device *, int fbpa);
149 void *(*dtor)(struct nvkm_ram *); 156 void *(*dtor)(struct nvkm_ram *);
150 int (*init)(struct nvkm_ram *); 157 int (*init)(struct nvkm_ram *);
151 158
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a63c5ac69f66..ce23cc6c672e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -64,7 +64,7 @@ void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
64int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *); 64int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
65void nvkm_i2c_aux_release(struct nvkm_i2c_aux *); 65void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
66int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, 66int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
67 u32 addr, u8 *data, u8 size); 67 u32 addr, u8 *data, u8 *size);
68int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw, 68int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
69 bool enhanced_framing); 69 bool enhanced_framing);
70 70
@@ -162,9 +162,11 @@ nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
162static inline int 162static inline int
163nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size) 163nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
164{ 164{
165 const u8 xfer = size;
165 int ret = nvkm_i2c_aux_acquire(aux); 166 int ret = nvkm_i2c_aux_acquire(aux);
166 if (ret == 0) { 167 if (ret == 0) {
167 ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size); 168 ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, &size);
169 WARN_ON(!ret && size != xfer);
168 nvkm_i2c_aux_release(aux); 170 nvkm_i2c_aux_release(aux);
169 } 171 }
170 return ret; 172 return ret;
@@ -175,7 +177,7 @@ nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
175{ 177{
176 int ret = nvkm_i2c_aux_acquire(aux); 178 int ret = nvkm_i2c_aux_acquire(aux);
177 if (ret == 0) { 179 if (ret == 0) {
178 ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size); 180 ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, &size);
179 nvkm_i2c_aux_release(aux); 181 nvkm_i2c_aux_release(aux);
180 } 182 }
181 return ret; 183 return ret;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 179b6ed3f595..e7f04732a425 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -7,6 +7,7 @@ struct nvkm_pmu {
7 const struct nvkm_pmu_func *func; 7 const struct nvkm_pmu_func *func;
8 struct nvkm_subdev subdev; 8 struct nvkm_subdev subdev;
9 struct nvkm_falcon *falcon; 9 struct nvkm_falcon *falcon;
10 struct nvkm_msgqueue *queue;
10 11
11 struct { 12 struct {
12 u32 base; 13 u32 base;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index 5dbd8aa4f8c2..d6a4bdb6573b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -30,10 +30,13 @@ enum nvkm_secboot_falcon {
30 NVKM_SECBOOT_FALCON_RESERVED = 1, 30 NVKM_SECBOOT_FALCON_RESERVED = 1,
31 NVKM_SECBOOT_FALCON_FECS = 2, 31 NVKM_SECBOOT_FALCON_FECS = 2,
32 NVKM_SECBOOT_FALCON_GPCCS = 3, 32 NVKM_SECBOOT_FALCON_GPCCS = 3,
33 NVKM_SECBOOT_FALCON_END = 4, 33 NVKM_SECBOOT_FALCON_SEC2 = 7,
34 NVKM_SECBOOT_FALCON_END = 8,
34 NVKM_SECBOOT_FALCON_INVALID = 0xffffffff, 35 NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
35}; 36};
36 37
38extern const char *nvkm_secboot_falcon_name[];
39
37/** 40/**
38 * @wpr_set: whether the WPR region is currently set 41 * @wpr_set: whether the WPR region is currently set
39*/ 42*/
@@ -42,6 +45,7 @@ struct nvkm_secboot {
42 struct nvkm_acr *acr; 45 struct nvkm_acr *acr;
43 struct nvkm_subdev subdev; 46 struct nvkm_subdev subdev;
44 struct nvkm_falcon *boot_falcon; 47 struct nvkm_falcon *boot_falcon;
48 struct nvkm_falcon *halt_falcon;
45 49
46 u64 wpr_addr; 50 u64 wpr_addr;
47 u32 wpr_size; 51 u32 wpr_size;
@@ -55,5 +59,6 @@ int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
55 59
56int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 60int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
57int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 61int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
62int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
58 63
59#endif 64#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index f5add64c093f..f802bcd94457 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1147,6 +1147,7 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
1147 container_of(obj, typeof(*nv_connector), aux); 1147 container_of(obj, typeof(*nv_connector), aux);
1148 struct nouveau_encoder *nv_encoder; 1148 struct nouveau_encoder *nv_encoder;
1149 struct nvkm_i2c_aux *aux; 1149 struct nvkm_i2c_aux *aux;
1150 u8 size = msg->size;
1150 int ret; 1151 int ret;
1151 1152
1152 nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP); 1153 nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
@@ -1162,11 +1163,11 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
1162 return ret; 1163 return ret;
1163 1164
1164 ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address, 1165 ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
1165 msg->buffer, msg->size); 1166 msg->buffer, &size);
1166 nvkm_i2c_aux_release(aux); 1167 nvkm_i2c_aux_release(aux);
1167 if (ret >= 0) { 1168 if (ret >= 0) {
1168 msg->reply = ret; 1169 msg->reply = ret;
1169 return msg->size; 1170 return size;
1170 } 1171 }
1171 1172
1172 return ret; 1173 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 16915c29ec52..7ad1ee580cf0 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3627,7 +3627,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3627 struct nvkm_i2c_aux *aux = 3627 struct nvkm_i2c_aux *aux =
3628 nvkm_i2c_aux_find(i2c, dcbe->i2c_index); 3628 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3629 if (aux) { 3629 if (aux) {
3630 nv_encoder->i2c = &aux->i2c; 3630 nv_encoder->i2c = &nv_connector->aux.ddc;
3631 nv_encoder->aux = aux; 3631 nv_encoder->aux = aux;
3632 } 3632 }
3633 3633
@@ -3777,6 +3777,7 @@ nv50_pior_func = {
3777static int 3777static int
3778nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) 3778nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3779{ 3779{
3780 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3780 struct nouveau_drm *drm = nouveau_drm(connector->dev); 3781 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3781 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); 3782 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3782 struct nvkm_i2c_bus *bus = NULL; 3783 struct nvkm_i2c_bus *bus = NULL;
@@ -3794,7 +3795,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3794 break; 3795 break;
3795 case DCB_OUTPUT_DP: 3796 case DCB_OUTPUT_DP:
3796 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev)); 3797 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3797 ddc = aux ? &aux->i2c : NULL; 3798 ddc = aux ? &nv_connector->aux.ddc : NULL;
3798 type = DRM_MODE_ENCODER_TMDS; 3799 type = DRM_MODE_ENCODER_TMDS;
3799 break; 3800 break;
3800 default: 3801 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 19044aba265e..a134d225f958 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -78,6 +78,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
78 [NVKM_ENGINE_NVDEC ] = "nvdec", 78 [NVKM_ENGINE_NVDEC ] = "nvdec",
79 [NVKM_ENGINE_PM ] = "pm", 79 [NVKM_ENGINE_PM ] = "pm",
80 [NVKM_ENGINE_SEC ] = "sec", 80 [NVKM_ENGINE_SEC ] = "sec",
81 [NVKM_ENGINE_SEC2 ] = "sec2",
81 [NVKM_ENGINE_SW ] = "sw", 82 [NVKM_ENGINE_SW ] = "sw",
82 [NVKM_ENGINE_VIC ] = "vic", 83 [NVKM_ENGINE_VIC ] = "vic",
83 [NVKM_ENGINE_VP ] = "vp", 84 [NVKM_ENGINE_VP ] = "vp",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index c2c8d2ac01b8..78571e8b01c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -18,6 +18,7 @@ include $(src)/nvkm/engine/nvenc/Kbuild
18include $(src)/nvkm/engine/nvdec/Kbuild 18include $(src)/nvkm/engine/nvdec/Kbuild
19include $(src)/nvkm/engine/pm/Kbuild 19include $(src)/nvkm/engine/pm/Kbuild
20include $(src)/nvkm/engine/sec/Kbuild 20include $(src)/nvkm/engine/sec/Kbuild
21include $(src)/nvkm/engine/sec2/Kbuild
21include $(src)/nvkm/engine/sw/Kbuild 22include $(src)/nvkm/engine/sw/Kbuild
22include $(src)/nvkm/engine/vic/Kbuild 23include $(src)/nvkm/engine/vic/Kbuild
23include $(src)/nvkm/engine/vp/Kbuild 24include $(src)/nvkm/engine/vp/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 273562dd6bbd..1076949b802a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1379,7 +1379,7 @@ nvc1_chipset = {
1379 .bus = gf100_bus_new, 1379 .bus = gf100_bus_new,
1380 .clk = gf100_clk_new, 1380 .clk = gf100_clk_new,
1381 .devinit = gf100_devinit_new, 1381 .devinit = gf100_devinit_new,
1382 .fb = gf100_fb_new, 1382 .fb = gf108_fb_new,
1383 .fuse = gf100_fuse_new, 1383 .fuse = gf100_fuse_new,
1384 .gpio = g94_gpio_new, 1384 .gpio = g94_gpio_new,
1385 .i2c = g94_i2c_new, 1385 .i2c = g94_i2c_new,
@@ -2200,6 +2200,9 @@ nv132_chipset = {
2200 .ltc = gp100_ltc_new, 2200 .ltc = gp100_ltc_new,
2201 .mc = gp100_mc_new, 2201 .mc = gp100_mc_new,
2202 .mmu = gf100_mmu_new, 2202 .mmu = gf100_mmu_new,
2203 .secboot = gp102_secboot_new,
2204 .sec2 = gp102_sec2_new,
2205 .nvdec = gp102_nvdec_new,
2203 .pci = gp100_pci_new, 2206 .pci = gp100_pci_new,
2204 .pmu = gp102_pmu_new, 2207 .pmu = gp102_pmu_new,
2205 .timer = gk20a_timer_new, 2208 .timer = gk20a_timer_new,
@@ -2211,6 +2214,8 @@ nv132_chipset = {
2211 .disp = gp102_disp_new, 2214 .disp = gp102_disp_new,
2212 .dma = gf119_dma_new, 2215 .dma = gf119_dma_new,
2213 .fifo = gp100_fifo_new, 2216 .fifo = gp100_fifo_new,
2217 .gr = gp102_gr_new,
2218 .sw = gf100_sw_new,
2214}; 2219};
2215 2220
2216static const struct nvkm_device_chip 2221static const struct nvkm_device_chip
@@ -2229,6 +2234,9 @@ nv134_chipset = {
2229 .ltc = gp100_ltc_new, 2234 .ltc = gp100_ltc_new,
2230 .mc = gp100_mc_new, 2235 .mc = gp100_mc_new,
2231 .mmu = gf100_mmu_new, 2236 .mmu = gf100_mmu_new,
2237 .secboot = gp102_secboot_new,
2238 .sec2 = gp102_sec2_new,
2239 .nvdec = gp102_nvdec_new,
2232 .pci = gp100_pci_new, 2240 .pci = gp100_pci_new,
2233 .pmu = gp102_pmu_new, 2241 .pmu = gp102_pmu_new,
2234 .timer = gk20a_timer_new, 2242 .timer = gk20a_timer_new,
@@ -2240,6 +2248,8 @@ nv134_chipset = {
2240 .disp = gp102_disp_new, 2248 .disp = gp102_disp_new,
2241 .dma = gf119_dma_new, 2249 .dma = gf119_dma_new,
2242 .fifo = gp100_fifo_new, 2250 .fifo = gp100_fifo_new,
2251 .gr = gp102_gr_new,
2252 .sw = gf100_sw_new,
2243}; 2253};
2244 2254
2245static const struct nvkm_device_chip 2255static const struct nvkm_device_chip
@@ -2258,6 +2268,9 @@ nv136_chipset = {
2258 .ltc = gp100_ltc_new, 2268 .ltc = gp100_ltc_new,
2259 .mc = gp100_mc_new, 2269 .mc = gp100_mc_new,
2260 .mmu = gf100_mmu_new, 2270 .mmu = gf100_mmu_new,
2271 .secboot = gp102_secboot_new,
2272 .sec2 = gp102_sec2_new,
2273 .nvdec = gp102_nvdec_new,
2261 .pci = gp100_pci_new, 2274 .pci = gp100_pci_new,
2262 .pmu = gp102_pmu_new, 2275 .pmu = gp102_pmu_new,
2263 .timer = gk20a_timer_new, 2276 .timer = gk20a_timer_new,
@@ -2269,6 +2282,8 @@ nv136_chipset = {
2269 .disp = gp102_disp_new, 2282 .disp = gp102_disp_new,
2270 .dma = gf119_dma_new, 2283 .dma = gf119_dma_new,
2271 .fifo = gp100_fifo_new, 2284 .fifo = gp100_fifo_new,
2285 .gr = gp102_gr_new,
2286 .sw = gf100_sw_new,
2272}; 2287};
2273 2288
2274static int 2289static int
@@ -2362,9 +2377,10 @@ nvkm_device_engine(struct nvkm_device *device, int index)
2362 _(NVENC0 , device->nvenc[0], device->nvenc[0]); 2377 _(NVENC0 , device->nvenc[0], device->nvenc[0]);
2363 _(NVENC1 , device->nvenc[1], device->nvenc[1]); 2378 _(NVENC1 , device->nvenc[1], device->nvenc[1]);
2364 _(NVENC2 , device->nvenc[2], device->nvenc[2]); 2379 _(NVENC2 , device->nvenc[2], device->nvenc[2]);
2365 _(NVDEC , device->nvdec , device->nvdec); 2380 _(NVDEC , device->nvdec , &device->nvdec->engine);
2366 _(PM , device->pm , &device->pm->engine); 2381 _(PM , device->pm , &device->pm->engine);
2367 _(SEC , device->sec , device->sec); 2382 _(SEC , device->sec , device->sec);
2383 _(SEC2 , device->sec2 , &device->sec2->engine);
2368 _(SW , device->sw , &device->sw->engine); 2384 _(SW , device->sw , &device->sw->engine);
2369 _(VIC , device->vic , device->vic); 2385 _(VIC , device->vic , device->vic);
2370 _(VP , device->vp , device->vp); 2386 _(VP , device->vp , device->vp);
@@ -2812,6 +2828,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2812 _(NVKM_ENGINE_NVDEC , nvdec); 2828 _(NVKM_ENGINE_NVDEC , nvdec);
2813 _(NVKM_ENGINE_PM , pm); 2829 _(NVKM_ENGINE_PM , pm);
2814 _(NVKM_ENGINE_SEC , sec); 2830 _(NVKM_ENGINE_SEC , sec);
2831 _(NVKM_ENGINE_SEC2 , sec2);
2815 _(NVKM_ENGINE_SW , sw); 2832 _(NVKM_ENGINE_SW , sw);
2816 _(NVKM_ENGINE_VIC , vic); 2833 _(NVKM_ENGINE_VIC , vic);
2817 _(NVKM_ENGINE_VP , vp); 2834 _(NVKM_ENGINE_VP , vp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 1a06ac175f55..6c16f3835f44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -41,6 +41,7 @@
41#include <engine/nvdec.h> 41#include <engine/nvdec.h>
42#include <engine/pm.h> 42#include <engine/pm.h>
43#include <engine/sec.h> 43#include <engine/sec.h>
44#include <engine/sec2.h>
44#include <engine/sw.h> 45#include <engine/sw.h>
45#include <engine/vic.h> 46#include <engine/vic.h>
46#include <engine/vp.h> 47#include <engine/vp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index f1c494182248..2938ad5aca40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -32,6 +32,7 @@ nvkm-y += nvkm/engine/gr/gm107.o
32nvkm-y += nvkm/engine/gr/gm200.o 32nvkm-y += nvkm/engine/gr/gm200.o
33nvkm-y += nvkm/engine/gr/gm20b.o 33nvkm-y += nvkm/engine/gr/gm20b.o
34nvkm-y += nvkm/engine/gr/gp100.o 34nvkm-y += nvkm/engine/gr/gp100.o
35nvkm-y += nvkm/engine/gr/gp102.o
35 36
36nvkm-y += nvkm/engine/gr/ctxnv40.o 37nvkm-y += nvkm/engine/gr/ctxnv40.o
37nvkm-y += nvkm/engine/gr/ctxnv50.o 38nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -50,3 +51,4 @@ nvkm-y += nvkm/engine/gr/ctxgm107.o
50nvkm-y += nvkm/engine/gr/ctxgm200.o 51nvkm-y += nvkm/engine/gr/ctxgm200.o
51nvkm-y += nvkm/engine/gr/ctxgm20b.o 52nvkm-y += nvkm/engine/gr/ctxgm20b.o
52nvkm-y += nvkm/engine/gr/ctxgp100.o 53nvkm-y += nvkm/engine/gr/ctxgp100.o
54nvkm-y += nvkm/engine/gr/ctxgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 52048b5a5274..0ae032fa2909 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -102,6 +102,10 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
102extern const struct gf100_grctx_func gm20b_grctx; 102extern const struct gf100_grctx_func gm20b_grctx;
103 103
104extern const struct gf100_grctx_func gp100_grctx; 104extern const struct gf100_grctx_func gp100_grctx;
105void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
106void gp100_grctx_generate_pagepool(struct gf100_grctx *);
107
108extern const struct gf100_grctx_func gp102_grctx;
105 109
106/* context init value lists */ 110/* context init value lists */
107 111
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 3d1ae7ddf7dd..7833bc777a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -29,7 +29,7 @@
29 * PGRAPH context implementation 29 * PGRAPH context implementation
30 ******************************************************************************/ 30 ******************************************************************************/
31 31
32static void 32void
33gp100_grctx_generate_pagepool(struct gf100_grctx *info) 33gp100_grctx_generate_pagepool(struct gf100_grctx *info)
34{ 34{
35 const struct gf100_grctx_func *grctx = info->gr->func->grctx; 35 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
@@ -123,7 +123,7 @@ gp100_grctx_generate_405b60(struct gf100_gr *gr)
123 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); 123 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
124} 124}
125 125
126static void 126void
127gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 127gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
128{ 128{
129 struct nvkm_device *device = gr->base.engine.subdev.device; 129 struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
new file mode 100644
index 000000000000..ee26d64af73a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "ctxgf100.h"
25
26#include <subdev/fb.h>
27
28/*******************************************************************************
29 * PGRAPH context implementation
30 ******************************************************************************/
31
32static void
33gp102_grctx_generate_attrib(struct gf100_grctx *info)
34{
35 struct gf100_gr *gr = info->gr;
36 const struct gf100_grctx_func *grctx = gr->func->grctx;
37 const u32 alpha = grctx->alpha_nr;
38 const u32 attrib = grctx->attrib_nr;
39 const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
40 const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
41 const u32 access = NV_MEM_ACCESS_RW;
42 const int s = 12;
43 const int b = mmio_vram(info, size, (1 << s), access);
44 const int max_batches = 0xffff;
45 u32 ao = 0;
46 u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
47 int gpc, ppc, n = 0;
48
49 mmio_refn(info, 0x418810, 0x80000000, s, b);
50 mmio_refn(info, 0x419848, 0x10000000, s, b);
51 mmio_refn(info, 0x419c2c, 0x10000000, s, b);
52 mmio_refn(info, 0x419b00, 0x00000000, s, b);
53 mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
54 mmio_wr32(info, 0x405830, attrib);
55 mmio_wr32(info, 0x40585c, alpha);
56 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
57
58 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
59 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
60 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
61 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
62 const u32 u = 0x418ea0 + (n * 0x04);
63 const u32 o = PPC_UNIT(gpc, ppc, 0);
64 const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
65 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
66 continue;
67 mmio_wr32(info, o + 0xc0, bs);
68 mmio_wr32(info, p, bs);
69 mmio_wr32(info, o + 0xf4, bo);
70 mmio_wr32(info, o + 0xf0, bs);
71 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
72 mmio_wr32(info, o + 0xe4, as);
73 mmio_wr32(info, o + 0xf8, ao);
74 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
75 mmio_wr32(info, u, bs);
76 }
77 }
78
79 mmio_wr32(info, 0x4181e4, 0x00000100);
80 mmio_wr32(info, 0x41befc, 0x00000100);
81}
82
83const struct gf100_grctx_func
84gp102_grctx = {
85 .main = gp100_grctx_generate_main,
86 .unkn = gk104_grctx_generate_unkn,
87 .bundle = gm107_grctx_generate_bundle,
88 .bundle_size = 0x3000,
89 .bundle_min_gpm_fifo_depth = 0x180,
90 .bundle_token_limit = 0x900,
91 .pagepool = gp100_grctx_generate_pagepool,
92 .pagepool_size = 0x20000,
93 .attrib = gp102_grctx_generate_attrib,
94 .attrib_nr_max = 0x5d4,
95 .attrib_nr = 0x320,
96 .alpha_nr_max = 0xc00,
97 .alpha_nr = 0x800,
98};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f9acb8a944d2..a4410ef19db5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1647,8 +1647,18 @@ static int
1647gf100_gr_oneinit(struct nvkm_gr *base) 1647gf100_gr_oneinit(struct nvkm_gr *base)
1648{ 1648{
1649 struct gf100_gr *gr = gf100_gr(base); 1649 struct gf100_gr *gr = gf100_gr(base);
1650 struct nvkm_device *device = gr->base.engine.subdev.device; 1650 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1651 struct nvkm_device *device = subdev->device;
1651 int i, j; 1652 int i, j;
1653 int ret;
1654
1655 ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
1656 if (ret)
1657 return ret;
1658
1659 ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
1660 if (ret)
1661 return ret;
1652 1662
1653 nvkm_pmu_pgob(device->pmu, false); 1663 nvkm_pmu_pgob(device->pmu, false);
1654 1664
@@ -1856,24 +1866,13 @@ int
1856gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, 1866gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
1857 int index, struct gf100_gr *gr) 1867 int index, struct gf100_gr *gr)
1858{ 1868{
1859 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1860 int ret;
1861
1862 gr->func = func; 1869 gr->func = func;
1863 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1870 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
1864 func->fecs.ucode == NULL); 1871 func->fecs.ucode == NULL);
1865 1872
1866 ret = nvkm_gr_ctor(&gf100_gr_, device, index, 1873 return nvkm_gr_ctor(&gf100_gr_, device, index,
1867 gr->firmware || func->fecs.ucode != NULL, 1874 gr->firmware || func->fecs.ucode != NULL,
1868 &gr->base); 1875 &gr->base);
1869 if (ret)
1870 return ret;
1871
1872 ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
1873 if (ret)
1874 return ret;
1875
1876 return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
1877} 1876}
1878 1877
1879int 1878int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index db6ee3b06841..1d2101af2a87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -124,6 +124,7 @@ struct gf100_gr_func {
124 void (*init_gpc_mmu)(struct gf100_gr *); 124 void (*init_gpc_mmu)(struct gf100_gr *);
125 void (*init_rop_active_fbps)(struct gf100_gr *); 125 void (*init_rop_active_fbps)(struct gf100_gr *);
126 void (*init_ppc_exceptions)(struct gf100_gr *); 126 void (*init_ppc_exceptions)(struct gf100_gr *);
127 void (*init_swdx_pes_mask)(struct gf100_gr *);
127 void (*set_hww_esr_report_mask)(struct gf100_gr *); 128 void (*set_hww_esr_report_mask)(struct gf100_gr *);
128 const struct gf100_gr_pack *mmio; 129 const struct gf100_gr_pack *mmio;
129 struct { 130 struct {
@@ -150,6 +151,9 @@ int gk20a_gr_init(struct gf100_gr *);
150int gm200_gr_init(struct gf100_gr *); 151int gm200_gr_init(struct gf100_gr *);
151int gm200_gr_rops(struct gf100_gr *); 152int gm200_gr_rops(struct gf100_gr *);
152 153
154int gp100_gr_init(struct gf100_gr *);
155void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
156
153#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) 157#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
154 158
155struct gf100_gr_chan { 159struct gf100_gr_chan {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 26ad79def0ff..94ed7debb714 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -30,7 +30,7 @@
30 * PGRAPH engine/subdev functions 30 * PGRAPH engine/subdev functions
31 ******************************************************************************/ 31 ******************************************************************************/
32 32
33static void 33void
34gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) 34gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
35{ 35{
36 struct nvkm_device *device = gr->base.engine.subdev.device; 36 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -40,7 +40,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
40 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ 40 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
41} 41}
42 42
43static int 43int
44gp100_gr_init(struct gf100_gr *gr) 44gp100_gr_init(struct gf100_gr *gr)
45{ 45{
46 struct nvkm_device *device = gr->base.engine.subdev.device; 46 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -85,6 +85,8 @@ gp100_gr_init(struct gf100_gr *gr)
85 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); 85 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
86 86
87 gr->func->init_rop_active_fbps(gr); 87 gr->func->init_rop_active_fbps(gr);
88 if (gr->func->init_swdx_pes_mask)
89 gr->func->init_swdx_pes_mask(gr);
88 90
89 nvkm_wr32(device, 0x400500, 0x00010001); 91 nvkm_wr32(device, 0x400500, 0x00010001);
90 nvkm_wr32(device, 0x400100, 0xffffffff); 92 nvkm_wr32(device, 0x400100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
new file mode 100644
index 000000000000..1d5117a16299
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ctxgf100.h"
26
27#include <nvif/class.h>
28
29static void
30gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
31{
32 struct nvkm_device *device = gr->base.engine.subdev.device;
33 u32 mask = 0, data, gpc;
34
35 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
36 data = nvkm_rd32(device, GPC_UNIT(gpc, 0x0c50)) & 0x0000000f;
37 mask |= data << (gpc * 4);
38 }
39
40 nvkm_wr32(device, 0x4181d0, mask);
41}
42
43static const struct gf100_gr_func
44gp102_gr = {
45 .init = gp100_gr_init,
46 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
47 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
48 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
49 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
50 .rops = gm200_gr_rops,
51 .ppc_nr = 3,
52 .grctx = &gp102_grctx,
53 .sclass = {
54 { -1, -1, FERMI_TWOD_A },
55 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
56 { -1, -1, PASCAL_B, &gf100_fermi },
57 { -1, -1, PASCAL_COMPUTE_B },
58 {}
59 }
60};
61
62int
63gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
64{
65 return gm200_gr_new_(&gp102_gr, device, index, pgr);
66}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 13b7c71ff900..98477beb823a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1 +1,2 @@
1#nvkm-y += nvkm/engine/nvdec/base.o 1nvkm-y += nvkm/engine/nvdec/base.o
2nvkm-y += nvkm/engine/nvdec/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
new file mode 100644
index 000000000000..4807021fd990
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <engine/falcon.h>
25
26static int
27nvkm_nvdec_oneinit(struct nvkm_engine *engine)
28{
29 struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
30 return nvkm_falcon_v1_new(&nvdec->engine.subdev, "NVDEC", 0x84000,
31 &nvdec->falcon);
32}
33
34static void *
35nvkm_nvdec_dtor(struct nvkm_engine *engine)
36{
37 struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
38 nvkm_falcon_del(&nvdec->falcon);
39 return nvdec;
40}
41
42static const struct nvkm_engine_func
43nvkm_nvdec = {
44 .dtor = nvkm_nvdec_dtor,
45 .oneinit = nvkm_nvdec_oneinit,
46};
47
48int
49nvkm_nvdec_new_(struct nvkm_device *device, int index,
50 struct nvkm_nvdec **pnvdec)
51{
52 struct nvkm_nvdec *nvdec;
53
54 if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
55 return -ENOMEM;
56
57 return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
58 &nvdec->engine);
59};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
new file mode 100644
index 000000000000..fde6328c6d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "priv.h"
24
25int
26gp102_nvdec_new(struct nvkm_device *device, int index,
27 struct nvkm_nvdec **pnvdec)
28{
29 return nvkm_nvdec_new_(device, index, pnvdec);
30}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
new file mode 100644
index 000000000000..353b94f51205
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -0,0 +1,6 @@
1#ifndef __NVKM_NVDEC_PRIV_H__
2#define __NVKM_NVDEC_PRIV_H__
3#include <engine/nvdec.h>
4
5int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
6#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
new file mode 100644
index 000000000000..4b17254cfbd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -0,0 +1,2 @@
1nvkm-y += nvkm/engine/sec2/base.o
2nvkm-y += nvkm/engine/sec2/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
new file mode 100644
index 000000000000..814daf35e21f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/msgqueue.h>
25#include <engine/falcon.h>
26
27static void *
28nvkm_sec2_dtor(struct nvkm_engine *engine)
29{
30 struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
31 nvkm_msgqueue_del(&sec2->queue);
32 nvkm_falcon_del(&sec2->falcon);
33 return sec2;
34}
35
36static void
37nvkm_sec2_intr(struct nvkm_engine *engine)
38{
39 struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
40 struct nvkm_subdev *subdev = &engine->subdev;
41 struct nvkm_device *device = subdev->device;
42 u32 disp = nvkm_rd32(device, 0x8701c);
43 u32 intr = nvkm_rd32(device, 0x87008) & disp & ~(disp >> 16);
44
45 if (intr & 0x00000040) {
46 schedule_work(&sec2->work);
47 nvkm_wr32(device, 0x87004, 0x00000040);
48 intr &= ~0x00000040;
49 }
50
51 if (intr) {
52 nvkm_error(subdev, "unhandled intr %08x\n", intr);
53 nvkm_wr32(device, 0x87004, intr);
54
55 }
56}
57
58static void
59nvkm_sec2_recv(struct work_struct *work)
60{
61 struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
62 nvkm_msgqueue_recv(sec2->queue);
63}
64
65
66static int
67nvkm_sec2_oneinit(struct nvkm_engine *engine)
68{
69 struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
70 return nvkm_falcon_v1_new(&sec2->engine.subdev, "SEC2", 0x87000,
71 &sec2->falcon);
72}
73
74static int
75nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
76{
77 struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
78 flush_work(&sec2->work);
79 return 0;
80}
81
82static const struct nvkm_engine_func
83nvkm_sec2 = {
84 .dtor = nvkm_sec2_dtor,
85 .oneinit = nvkm_sec2_oneinit,
86 .fini = nvkm_sec2_fini,
87 .intr = nvkm_sec2_intr,
88};
89
90int
91nvkm_sec2_new_(struct nvkm_device *device, int index,
92 struct nvkm_sec2 **psec2)
93{
94 struct nvkm_sec2 *sec2;
95
96 if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
97 return -ENOMEM;
98 INIT_WORK(&sec2->work, nvkm_sec2_recv);
99
100 return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
101};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
new file mode 100644
index 000000000000..9be1524c08f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "priv.h"
24
25int
26gp102_sec2_new(struct nvkm_device *device, int index,
27 struct nvkm_sec2 **psec2)
28{
29 return nvkm_sec2_new_(device, index, psec2);
30}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
new file mode 100644
index 000000000000..7ecc9d4724dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -0,0 +1,9 @@
1#ifndef __NVKM_SEC2_PRIV_H__
2#define __NVKM_SEC2_PRIV_H__
3#include <engine/sec2.h>
4
5#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
6
7int nvkm_sec2_new_(struct nvkm_device *, int, struct nvkm_sec2 **);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index 584863db9bfc..2aa040ba39e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,2 +1,5 @@
1nvkm-y += nvkm/falcon/base.o 1nvkm-y += nvkm/falcon/base.o
2nvkm-y += nvkm/falcon/v1.o 2nvkm-y += nvkm/falcon/v1.o
3nvkm-y += nvkm/falcon/msgqueue.o
4nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
5nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 4852f313762f..1b7f48efd8b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -41,14 +41,22 @@ void
41nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, 41nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
42 u32 size, u8 port) 42 u32 size, u8 port)
43{ 43{
44 mutex_lock(&falcon->dmem_mutex);
45
44 falcon->func->load_dmem(falcon, data, start, size, port); 46 falcon->func->load_dmem(falcon, data, start, size, port);
47
48 mutex_unlock(&falcon->dmem_mutex);
45} 49}
46 50
47void 51void
48nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, 52nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
49 void *data) 53 void *data)
50{ 54{
55 mutex_lock(&falcon->dmem_mutex);
56
51 falcon->func->read_dmem(falcon, start, size, port, data); 57 falcon->func->read_dmem(falcon, start, size, port, data);
58
59 mutex_unlock(&falcon->dmem_mutex);
52} 60}
53 61
54void 62void
@@ -129,6 +137,9 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
129void 137void
130nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) 138nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
131{ 139{
140 if (unlikely(!falcon))
141 return;
142
132 mutex_lock(&falcon->mutex); 143 mutex_lock(&falcon->mutex);
133 if (falcon->user == user) { 144 if (falcon->user == user) {
134 nvkm_debug(falcon->user, "released %s falcon\n", falcon->name); 145 nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
@@ -159,6 +170,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
159 struct nvkm_subdev *subdev, const char *name, u32 addr, 170 struct nvkm_subdev *subdev, const char *name, u32 addr,
160 struct nvkm_falcon *falcon) 171 struct nvkm_falcon *falcon)
161{ 172{
173 u32 debug_reg;
162 u32 reg; 174 u32 reg;
163 175
164 falcon->func = func; 176 falcon->func = func;
@@ -166,6 +178,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
166 falcon->name = name; 178 falcon->name = name;
167 falcon->addr = addr; 179 falcon->addr = addr;
168 mutex_init(&falcon->mutex); 180 mutex_init(&falcon->mutex);
181 mutex_init(&falcon->dmem_mutex);
169 182
170 reg = nvkm_falcon_rd32(falcon, 0x12c); 183 reg = nvkm_falcon_rd32(falcon, 0x12c);
171 falcon->version = reg & 0xf; 184 falcon->version = reg & 0xf;
@@ -177,8 +190,31 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
177 falcon->code.limit = (reg & 0x1ff) << 8; 190 falcon->code.limit = (reg & 0x1ff) << 8;
178 falcon->data.limit = (reg & 0x3fe00) >> 1; 191 falcon->data.limit = (reg & 0x3fe00) >> 1;
179 192
180 reg = nvkm_falcon_rd32(falcon, 0xc08); 193 switch (subdev->index) {
181 falcon->debug = (reg >> 20) & 0x1; 194 case NVKM_ENGINE_GR:
195 debug_reg = 0x0;
196 break;
197 case NVKM_SUBDEV_PMU:
198 debug_reg = 0xc08;
199 break;
200 case NVKM_ENGINE_NVDEC:
201 debug_reg = 0xd00;
202 break;
203 case NVKM_ENGINE_SEC2:
204 debug_reg = 0x408;
205 falcon->has_emem = true;
206 break;
207 default:
208 nvkm_warn(subdev, "unsupported falcon %s!\n",
209 nvkm_subdev_name[subdev->index]);
210 debug_reg = 0;
211 break;
212 }
213
214 if (debug_reg) {
215 u32 val = nvkm_falcon_rd32(falcon, debug_reg);
216 falcon->debug = (val >> 20) & 0x1;
217 }
182} 218}
183 219
184void 220void
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
new file mode 100644
index 000000000000..a063fb823117
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -0,0 +1,553 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "msgqueue.h"
25#include <engine/falcon.h>
26
27#include <subdev/secboot.h>
28
29
30#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
31#define QUEUE_ALIGNMENT 4
32/* max size of the messages we can receive */
33#define MSG_BUF_SIZE 128
34
35static int
36msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
37{
38 struct nvkm_falcon *falcon = priv->falcon;
39
40 mutex_lock(&queue->mutex);
41
42 queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
43
44 return 0;
45}
46
47static void
48msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
49 bool commit)
50{
51 struct nvkm_falcon *falcon = priv->falcon;
52
53 if (commit)
54 nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
55
56 mutex_unlock(&queue->mutex);
57}
58
59static bool
60msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
61{
62 struct nvkm_falcon *falcon = priv->falcon;
63 u32 head, tail;
64
65 head = nvkm_falcon_rd32(falcon, queue->head_reg);
66 tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
67
68 return head == tail;
69}
70
71static int
72msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
73 void *data, u32 size)
74{
75 struct nvkm_falcon *falcon = priv->falcon;
76 const struct nvkm_subdev *subdev = priv->falcon->owner;
77 u32 head, tail, available;
78
79 head = nvkm_falcon_rd32(falcon, queue->head_reg);
80 /* has the buffer looped? */
81 if (head < queue->position)
82 queue->position = queue->offset;
83
84 tail = queue->position;
85
86 available = head - tail;
87
88 if (available == 0) {
89 nvkm_warn(subdev, "no message data available\n");
90 return 0;
91 }
92
93 if (size > available) {
94 nvkm_warn(subdev, "message data smaller than read request\n");
95 size = available;
96 }
97
98 nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
99 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
100
101 return size;
102}
103
104static int
105msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
106 struct nvkm_msgqueue_hdr *hdr)
107{
108 const struct nvkm_subdev *subdev = priv->falcon->owner;
109 int err;
110
111 err = msg_queue_open(priv, queue);
112 if (err) {
113 nvkm_error(subdev, "fail to open queue %d\n", queue->index);
114 return err;
115 }
116
117 if (msg_queue_empty(priv, queue)) {
118 err = 0;
119 goto close;
120 }
121
122 err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
123 if (err >= 0 && err != HDR_SIZE)
124 err = -EINVAL;
125 if (err < 0) {
126 nvkm_error(subdev, "failed to read message header: %d\n", err);
127 goto close;
128 }
129
130 if (hdr->size > MSG_BUF_SIZE) {
131 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
132 err = -ENOSPC;
133 goto close;
134 }
135
136 if (hdr->size > HDR_SIZE) {
137 u32 read_size = hdr->size - HDR_SIZE;
138
139 err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
140 if (err >= 0 && err != read_size)
141 err = -EINVAL;
142 if (err < 0) {
143 nvkm_error(subdev, "failed to read message: %d\n", err);
144 goto close;
145 }
146 }
147
148close:
149 msg_queue_close(priv, queue, (err >= 0));
150
151 return err;
152}
153
154static bool
155cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
156 u32 size, bool *rewind)
157{
158 struct nvkm_falcon *falcon = priv->falcon;
159 u32 head, tail, free;
160
161 size = ALIGN(size, QUEUE_ALIGNMENT);
162
163 head = nvkm_falcon_rd32(falcon, queue->head_reg);
164 tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
165
166 if (head >= tail) {
167 free = queue->offset + queue->size - head;
168 free -= HDR_SIZE;
169
170 if (size > free) {
171 *rewind = true;
172 head = queue->offset;
173 }
174 }
175
176 if (head < tail)
177 free = tail - head - 1;
178
179 return size <= free;
180}
181
182static int
183cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
184 void *data, u32 size)
185{
186 nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
187 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
188
189 return 0;
190}
191
192/* REWIND unit is always 0x00 */
193#define MSGQUEUE_UNIT_REWIND 0x00
194
195static void
196cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
197{
198 const struct nvkm_subdev *subdev = priv->falcon->owner;
199 struct nvkm_msgqueue_hdr cmd;
200 int err;
201
202 cmd.unit_id = MSGQUEUE_UNIT_REWIND;
203 cmd.size = sizeof(cmd);
204 err = cmd_queue_push(priv, queue, &cmd, cmd.size);
205 if (err)
206 nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
207 else
208 nvkm_error(subdev, "queue %d rewinded\n", queue->index);
209
210 queue->position = queue->offset;
211}
212
213static int
214cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
215 u32 size)
216{
217 struct nvkm_falcon *falcon = priv->falcon;
218 const struct nvkm_subdev *subdev = priv->falcon->owner;
219 bool rewind = false;
220
221 mutex_lock(&queue->mutex);
222
223 if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
224 nvkm_error(subdev, "queue full\n");
225 mutex_unlock(&queue->mutex);
226 return -EAGAIN;
227 }
228
229 queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
230
231 if (rewind)
232 cmd_queue_rewind(priv, queue);
233
234 return 0;
235}
236
237static void
238cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
239 bool commit)
240{
241 struct nvkm_falcon *falcon = priv->falcon;
242
243 if (commit)
244 nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
245
246 mutex_unlock(&queue->mutex);
247}
248
249static int
250cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
251 struct nvkm_msgqueue_queue *queue)
252{
253 const struct nvkm_subdev *subdev = priv->falcon->owner;
254 static unsigned long timeout = ~0;
255 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
256 int ret = -EAGAIN;
257 bool commit = true;
258
259 while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
260 ret = cmd_queue_open(priv, queue, cmd->size);
261 if (ret) {
262 nvkm_error(subdev, "pmu_queue_open_write failed\n");
263 return ret;
264 }
265
266 ret = cmd_queue_push(priv, queue, cmd, cmd->size);
267 if (ret) {
268 nvkm_error(subdev, "pmu_queue_push failed\n");
269 commit = false;
270 }
271
272 cmd_queue_close(priv, queue, commit);
273
274 return ret;
275}
276
277static struct nvkm_msgqueue_seq *
278msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
279{
280 const struct nvkm_subdev *subdev = priv->falcon->owner;
281 struct nvkm_msgqueue_seq *seq;
282 u32 index;
283
284 mutex_lock(&priv->seq_lock);
285
286 index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
287
288 if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
289 nvkm_error(subdev, "no free sequence available\n");
290 mutex_unlock(&priv->seq_lock);
291 return ERR_PTR(-EAGAIN);
292 }
293
294 set_bit(index, priv->seq_tbl);
295
296 mutex_unlock(&priv->seq_lock);
297
298 seq = &priv->seq[index];
299 seq->state = SEQ_STATE_PENDING;
300
301 return seq;
302}
303
304static void
305msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
306{
307 /* no need to acquire seq_lock since clear_bit is atomic */
308 seq->state = SEQ_STATE_FREE;
309 seq->callback = NULL;
310 seq->completion = NULL;
311 clear_bit(seq->id, priv->seq_tbl);
312}
313
314/* specifies that we want to know the command status in the answer message */
315#define CMD_FLAGS_STATUS BIT(0)
316/* specifies that we want an interrupt when the answer message is queued */
317#define CMD_FLAGS_INTR BIT(1)
318
319int
320nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
321 struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
322 struct completion *completion, bool wait_init)
323{
324 struct nvkm_msgqueue_seq *seq;
325 struct nvkm_msgqueue_queue *queue;
326 int ret;
327
328 if (wait_init && !wait_for_completion_timeout(&priv->init_done,
329 msecs_to_jiffies(1000)))
330 return -ETIMEDOUT;
331
332 queue = priv->func->cmd_queue(priv, prio);
333 if (IS_ERR(queue))
334 return PTR_ERR(queue);
335
336 seq = msgqueue_seq_acquire(priv);
337 if (IS_ERR(seq))
338 return PTR_ERR(seq);
339
340 cmd->seq_id = seq->id;
341 cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
342
343 seq->callback = cb;
344 seq->state = SEQ_STATE_USED;
345 seq->completion = completion;
346
347 ret = cmd_write(priv, cmd, queue);
348 if (ret) {
349 seq->state = SEQ_STATE_PENDING;
350 msgqueue_seq_release(priv, seq);
351 }
352
353 return ret;
354}
355
356static int
357msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
358{
359 const struct nvkm_subdev *subdev = priv->falcon->owner;
360 struct nvkm_msgqueue_seq *seq;
361
362 seq = &priv->seq[hdr->seq_id];
363 if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
364 nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
365 return -EINVAL;
366 }
367
368 if (seq->state == SEQ_STATE_USED) {
369 if (seq->callback)
370 seq->callback(priv, hdr);
371 }
372
373 if (seq->completion)
374 complete(seq->completion);
375
376 msgqueue_seq_release(priv, seq);
377
378 return 0;
379}
380
381static int
382msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
383 struct nvkm_msgqueue_hdr *hdr)
384{
385 struct nvkm_falcon *falcon = priv->falcon;
386 const struct nvkm_subdev *subdev = falcon->owner;
387 u32 tail;
388 u32 tail_reg;
389 int ret;
390
391 /*
392 * Of course the message queue registers vary depending on the falcon
393 * used...
394 */
395 switch (falcon->owner->index) {
396 case NVKM_SUBDEV_PMU:
397 tail_reg = 0x4cc;
398 break;
399 case NVKM_ENGINE_SEC2:
400 tail_reg = 0xa34;
401 break;
402 default:
403 nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
404 nvkm_subdev_name[falcon->owner->index]);
405 return -EINVAL;
406 }
407
408 /*
409 * Read the message - queues are not initialized yet so we cannot rely
410 * on msg_queue_read()
411 */
412 tail = nvkm_falcon_rd32(falcon, tail_reg);
413 nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
414
415 if (hdr->size > MSG_BUF_SIZE) {
416 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
417 return -ENOSPC;
418 }
419
420 nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
421 (hdr + 1));
422
423 tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
424 nvkm_falcon_wr32(falcon, tail_reg, tail);
425
426 ret = priv->func->init_func->init_callback(priv, hdr);
427 if (ret)
428 return ret;
429
430 return 0;
431}
432
433void
434nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
435 struct nvkm_msgqueue_queue *queue)
436{
437 /*
438 * We are invoked from a worker thread, so normally we have plenty of
439 * stack space to work with.
440 */
441 u8 msg_buffer[MSG_BUF_SIZE];
442 struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
443 int ret;
444
445 /* the first message we receive must be the init message */
446 if ((!priv->init_msg_received)) {
447 ret = msgqueue_handle_init_msg(priv, hdr);
448 if (!ret)
449 priv->init_msg_received = true;
450 } else {
451 while (msg_queue_read(priv, queue, hdr) > 0)
452 msgqueue_msg_handle(priv, hdr);
453 }
454}
455
456void
457nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
458{
459 if (!queue || !queue->func || !queue->func->init_func)
460 return;
461
462 queue->func->init_func->gen_cmdline(queue, buf);
463}
464
465int
466nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
467{
468 if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
469 return -ENODEV;
470
471 return queue->func->acr_func->boot_falcon(queue, falcon);
472}
473
474int
475nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
476{
477 const struct nvkm_subdev *subdev = falcon->owner;
478 int ret = -EINVAL;
479
480 switch (version) {
481 case 0x0137c63d:
482 ret = msgqueue_0137c63d_new(falcon, queue);
483 break;
484 case 0x0148cdec:
485 ret = msgqueue_0148cdec_new(falcon, queue);
486 break;
487 default:
488 nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
489 version);
490 break;
491 }
492
493 if (ret == 0) {
494 nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
495 (*queue)->fw_version = version;
496 }
497
498 return ret;
499}
500
501void
502nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
503{
504 if (*queue) {
505 (*queue)->func->dtor(*queue);
506 *queue = NULL;
507 }
508}
509
510void
511nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
512{
513 if (!queue || !queue->func || !queue->func->recv) {
514 const struct nvkm_subdev *subdev = queue->falcon->owner;
515
516 nvkm_warn(subdev,
517 "cmdqueue recv function called while no firmware set!\n");
518 return;
519 }
520
521 queue->func->recv(queue);
522}
523
524int
525nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
526{
527 /* firmware not set yet... */
528 if (!queue)
529 return 0;
530
531 queue->init_msg_received = false;
532 reinit_completion(&queue->init_done);
533
534 return 0;
535}
536
537void
538nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
539 struct nvkm_falcon *falcon,
540 struct nvkm_msgqueue *queue)
541{
542 int i;
543
544 queue->func = func;
545 queue->falcon = falcon;
546 mutex_init(&queue->seq_lock);
547 for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
548 queue->seq[i].id = i;
549
550 init_completion(&queue->init_done);
551
552
553}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
new file mode 100644
index 000000000000..f37afe963d3e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
25#define __NVKM_CORE_FALCON_MSGQUEUE_H
26
27#include <core/msgqueue.h>
28
29/*
30 * The struct nvkm_msgqueue (named so for lack of better candidate) manages
31 * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
32 *
33 * Such firmwares expect to receive commands (through one or several command
34 * queues) and will reply to such command by sending messages (using one
35 * message queue).
36 *
37 * Each firmware can support one or several units - ACR for managing secure
38 * falcons, PMU for power management, etc. A unit can be seen as a class to
39 * which command can be sent.
40 *
41 * One usage example would be to send a command to the SEC falcon to ask it to
42 * reset a secure falcon. The SEC falcon will receive the command, process it,
43 * and send a message to signal success or failure. Only when the corresponding
44 * message is received can the requester assume the request has been processed.
45 *
46 * Since we expect many variations between the firmwares NVIDIA will release
47 * across GPU generations, this library is built in a very modular way. Message
48 * formats and queues details (such as number of usage) are left to
49 * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
50 * take care of posting commands and processing messages in a fashion that is
51 * universal.
52 *
53 */
54
55enum msgqueue_msg_priority {
56 MSGQUEUE_MSG_PRIORITY_HIGH,
57 MSGQUEUE_MSG_PRIORITY_LOW,
58};
59
60/**
61 * struct nvkm_msgqueue_hdr - header for all commands/messages
62 * @unit_id: id of firmware using receiving the command/sending the message
63 * @size: total size of command/message
64 * @ctrl_flags: type of command/message
65 * @seq_id: used to match a message from its corresponding command
66 */
67struct nvkm_msgqueue_hdr {
68 u8 unit_id;
69 u8 size;
70 u8 ctrl_flags;
71 u8 seq_id;
72};
73
74/**
75 * struct nvkm_msgqueue_msg - base message.
76 *
77 * This is just a header and a message (or command) type. Useful when
78 * building command-specific structures.
79 */
80struct nvkm_msgqueue_msg {
81 struct nvkm_msgqueue_hdr hdr;
82 u8 msg_type;
83};
84
85struct nvkm_msgqueue;
86typedef void
87(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
88
89/**
90 * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
91 *
92 * @gen_cmdline: build the commandline into a pre-allocated buffer
93 * @init_callback: called to process the init message
94 */
95struct nvkm_msgqueue_init_func {
96 void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
97 int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
98};
99
100/**
101 * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
102 *
103 * @boot_falcon: build and send the command to reset a given falcon
104 */
105struct nvkm_msgqueue_acr_func {
106 int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
107};
108
109struct nvkm_msgqueue_func {
110 const struct nvkm_msgqueue_init_func *init_func;
111 const struct nvkm_msgqueue_acr_func *acr_func;
112 void (*dtor)(struct nvkm_msgqueue *);
113 struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
114 enum msgqueue_msg_priority);
115 void (*recv)(struct nvkm_msgqueue *queue);
116};
117
118/**
119 * struct nvkm_msgqueue_queue - information about a command or message queue
120 *
121 * The number of queues is firmware-dependent. All queues must have their
122 * information filled by the init message handler.
123 *
124 * @mutex_lock: to be acquired when the queue is being used
125 * @index: physical queue index
126 * @offset: DMEM offset where this queue begins
127 * @size: size allocated to this queue in DMEM (in bytes)
128 * @position: current write position
129 * @head_reg: address of the HEAD register for this queue
130 * @tail_reg: address of the TAIL register for this queue
131 */
132struct nvkm_msgqueue_queue {
133 struct mutex mutex;
134 u32 index;
135 u32 offset;
136 u32 size;
137 u32 position;
138
139 u32 head_reg;
140 u32 tail_reg;
141};
142
143/**
144 * struct nvkm_msgqueue_seq - keep track of ongoing commands
145 *
146 * Every time a command is sent, a sequence is assigned to it so the
147 * corresponding message can be matched. Upon receiving the message, a callback
148 * can be called and/or a completion signaled.
149 *
150 * @id: sequence ID
151 * @state: current state
152 * @callback: callback to call upon receiving matching message
153 * @completion: completion to signal after callback is called
154 */
155struct nvkm_msgqueue_seq {
156 u16 id;
157 enum {
158 SEQ_STATE_FREE = 0,
159 SEQ_STATE_PENDING,
160 SEQ_STATE_USED,
161 SEQ_STATE_CANCELLED
162 } state;
163 nvkm_msgqueue_callback callback;
164 struct completion *completion;
165};
166
167/*
168 * We can have an arbitrary number of sequences, but realistically we will
169 * probably not use that much simultaneously.
170 */
171#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
172
173/**
174 * struct nvkm_msgqueue - manage a command/message based FW on a falcon
175 *
176 * @falcon: falcon to be managed
177 * @func: implementation of the firmware to use
178 * @init_msg_received: whether the init message has already been received
179 * @init_done: whether all init is complete and commands can be processed
180 * @seq_lock: protects seq and seq_tbl
181 * @seq: sequences to match commands and messages
182 * @seq_tbl: bitmap of sequences currently in use
183 */
184struct nvkm_msgqueue {
185 struct nvkm_falcon *falcon;
186 const struct nvkm_msgqueue_func *func;
187 u32 fw_version;
188 bool init_msg_received;
189 struct completion init_done;
190
191 struct mutex seq_lock;
192 struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
193 unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
194};
195
196void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
197 struct nvkm_msgqueue *);
198int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
199 struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
200 struct completion *, bool);
201void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
202 struct nvkm_msgqueue_queue *);
203
204int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
205int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
206
207#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
new file mode 100644
index 000000000000..bba91207fb18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "msgqueue.h"
24#include <engine/falcon.h>
25#include <subdev/secboot.h>
26
27/* Queues identifiers */
28enum {
29 /* High Priority Command Queue for Host -> PMU communication */
30 MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
31 /* Low Priority Command Queue for Host -> PMU communication */
32 MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
33 /* Message queue for PMU -> Host communication */
34 MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
35 MSGQUEUE_0137C63D_NUM_QUEUES = 5,
36};
37
38struct msgqueue_0137c63d {
39 struct nvkm_msgqueue base;
40
41 struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
42};
43#define msgqueue_0137c63d(q) \
44 container_of(q, struct msgqueue_0137c63d, base)
45
46static struct nvkm_msgqueue_queue *
47msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
48 enum msgqueue_msg_priority priority)
49{
50 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
51 const struct nvkm_subdev *subdev = priv->base.falcon->owner;
52
53 switch (priority) {
54 case MSGQUEUE_MSG_PRIORITY_HIGH:
55 return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
56 case MSGQUEUE_MSG_PRIORITY_LOW:
57 return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
58 default:
59 nvkm_error(subdev, "invalid command queue!\n");
60 return ERR_PTR(-EINVAL);
61 }
62}
63
64static void
65msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
66{
67 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
68 struct nvkm_msgqueue_queue *q_queue =
69 &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
70
71 nvkm_msgqueue_process_msgs(&priv->base, q_queue);
72}
73
74/* Init unit */
75#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
76
77enum {
78 INIT_MSG_INIT = 0x0,
79};
80
81static void
82init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
83{
84 struct {
85 u32 reserved;
86 u32 freq_hz;
87 u32 trace_size;
88 u32 trace_dma_base;
89 u16 trace_dma_base1;
90 u8 trace_dma_offset;
91 u32 trace_dma_idx;
92 bool secure_mode;
93 bool raise_priv_sec;
94 struct {
95 u32 dma_base;
96 u16 dma_base1;
97 u8 dma_offset;
98 u16 fb_size;
99 u8 dma_idx;
100 } gc6_ctx;
101 u8 pad;
102 } *args = buf;
103
104 args->secure_mode = 1;
105}
106
107/* forward declaration */
108static int acr_init_wpr(struct nvkm_msgqueue *queue);
109
110static int
111init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
112{
113 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
114 struct {
115 struct nvkm_msgqueue_msg base;
116
117 u8 pad;
118 u16 os_debug_entry_point;
119
120 struct {
121 u16 size;
122 u16 offset;
123 u8 index;
124 u8 pad;
125 } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
126
127 u16 sw_managed_area_offset;
128 u16 sw_managed_area_size;
129 } *init = (void *)hdr;
130 const struct nvkm_subdev *subdev = _queue->falcon->owner;
131 int i;
132
133 if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
134 nvkm_error(subdev, "expected message from init unit\n");
135 return -EINVAL;
136 }
137
138 if (init->base.msg_type != INIT_MSG_INIT) {
139 nvkm_error(subdev, "expected PMU init msg\n");
140 return -EINVAL;
141 }
142
143 for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
144 struct nvkm_msgqueue_queue *queue = &priv->queue[i];
145
146 mutex_init(&queue->mutex);
147
148 queue->index = init->queue_info[i].index;
149 queue->offset = init->queue_info[i].offset;
150 queue->size = init->queue_info[i].size;
151
152 if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
153 queue->head_reg = 0x4a0 + (queue->index * 4);
154 queue->tail_reg = 0x4b0 + (queue->index * 4);
155 } else {
156 queue->head_reg = 0x4c8;
157 queue->tail_reg = 0x4cc;
158 }
159
160 nvkm_debug(subdev,
161 "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
162 i, queue->index, queue->offset, queue->size);
163 }
164
165 /* Complete initialization by initializing WPR region */
166 return acr_init_wpr(&priv->base);
167}
168
169static const struct nvkm_msgqueue_init_func
170msgqueue_0137c63d_init_func = {
171 .gen_cmdline = init_gen_cmdline,
172 .init_callback = init_callback,
173};
174
175
176
177/* ACR unit */
178#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
179
180enum {
181 ACR_CMD_INIT_WPR_REGION = 0x00,
182 ACR_CMD_BOOTSTRAP_FALCON = 0x01,
183};
184
185static void
186acr_init_wpr_callback(struct nvkm_msgqueue *queue,
187 struct nvkm_msgqueue_hdr *hdr)
188{
189 struct {
190 struct nvkm_msgqueue_msg base;
191 u32 error_code;
192 } *msg = (void *)hdr;
193 const struct nvkm_subdev *subdev = queue->falcon->owner;
194
195 if (msg->error_code) {
196 nvkm_error(subdev, "ACR WPR init failure: %d\n",
197 msg->error_code);
198 return;
199 }
200
201 nvkm_debug(subdev, "ACR WPR init complete\n");
202 complete_all(&queue->init_done);
203}
204
205static int
206acr_init_wpr(struct nvkm_msgqueue *queue)
207{
208 /*
209 * region_id: region ID in WPR region
210 * wpr_offset: offset in WPR region
211 */
212 struct {
213 struct nvkm_msgqueue_hdr hdr;
214 u8 cmd_type;
215 u32 region_id;
216 u32 wpr_offset;
217 } cmd;
218 memset(&cmd, 0, sizeof(cmd));
219
220 cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
221 cmd.hdr.size = sizeof(cmd);
222 cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
223 cmd.region_id = 0x01;
224 cmd.wpr_offset = 0x00;
225
226 nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
227 acr_init_wpr_callback, NULL, false);
228
229 return 0;
230}
231
232
233static void
234acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
235 struct nvkm_msgqueue_hdr *hdr)
236{
237 struct acr_bootstrap_falcon_msg {
238 struct nvkm_msgqueue_msg base;
239
240 u32 falcon_id;
241 } *msg = (void *)hdr;
242 const struct nvkm_subdev *subdev = priv->falcon->owner;
243 u32 falcon_id = msg->falcon_id;
244
245 if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
246 nvkm_error(subdev, "in bootstrap falcon callback:\n");
247 nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
248 return;
249 }
250 nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
251}
252
253enum {
254 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
255 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
256};
257
258static int
259acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
260{
261 DECLARE_COMPLETION_ONSTACK(completed);
262 /*
263 * flags - Flag specifying RESET or no RESET.
264 * falcon id - Falcon id specifying falcon to bootstrap.
265 */
266 struct {
267 struct nvkm_msgqueue_hdr hdr;
268 u8 cmd_type;
269 u32 flags;
270 u32 falcon_id;
271 } cmd;
272
273 memset(&cmd, 0, sizeof(cmd));
274
275 cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
276 cmd.hdr.size = sizeof(cmd);
277 cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
278 cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
279 cmd.falcon_id = falcon;
280 nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
281 acr_boot_falcon_callback, &completed, true);
282
283 if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
284 return -ETIMEDOUT;
285
286 return 0;
287}
288
289static const struct nvkm_msgqueue_acr_func
290msgqueue_0137c63d_acr_func = {
291 .boot_falcon = acr_boot_falcon,
292};
293
294static void
295msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
296{
297 kfree(msgqueue_0137c63d(queue));
298}
299
300static const struct nvkm_msgqueue_func
301msgqueue_0137c63d_func = {
302 .init_func = &msgqueue_0137c63d_init_func,
303 .acr_func = &msgqueue_0137c63d_acr_func,
304 .cmd_queue = msgqueue_0137c63d_cmd_queue,
305 .recv = msgqueue_0137c63d_process_msgs,
306 .dtor = msgqueue_0137c63d_dtor,
307};
308
309int
310msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
311{
312 struct msgqueue_0137c63d *ret;
313
314 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
315 if (!ret)
316 return -ENOMEM;
317
318 *queue = &ret->base;
319
320 nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
321
322 return 0;
323}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
new file mode 100644
index 000000000000..ed5d0da4f4e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "msgqueue.h"
25#include <engine/falcon.h>
26#include <subdev/secboot.h>
27
28/*
29 * This firmware runs on the SEC falcon. It only has one command and one
30 * message queue, and uses a different command line and init message.
31 */
32
33enum {
34 MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
35 MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
36 MSGQUEUE_0148CDEC_NUM_QUEUES,
37};
38
39struct msgqueue_0148cdec {
40 struct nvkm_msgqueue base;
41
42 struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
43};
44#define msgqueue_0148cdec(q) \
45 container_of(q, struct msgqueue_0148cdec, base)
46
47static struct nvkm_msgqueue_queue *
48msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
49 enum msgqueue_msg_priority priority)
50{
51 struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
52
53 return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
54}
55
56static void
57msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
58{
59 struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
60 struct nvkm_msgqueue_queue *q_queue =
61 &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
62
63 nvkm_msgqueue_process_msgs(&priv->base, q_queue);
64}
65
66
67/* Init unit */
68#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
69
70enum {
71 INIT_MSG_INIT = 0x0,
72};
73
74static void
75init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
76{
77 struct {
78 u32 freq_hz;
79 u32 falc_trace_size;
80 u32 falc_trace_dma_base;
81 u32 falc_trace_dma_idx;
82 bool secure_mode;
83 } *args = buf;
84
85 args->secure_mode = false;
86}
87
88static int
89init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
90{
91 struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
92 struct {
93 struct nvkm_msgqueue_msg base;
94
95 u8 num_queues;
96 u16 os_debug_entry_point;
97
98 struct {
99 u32 offset;
100 u16 size;
101 u8 index;
102 u8 id;
103 } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
104
105 u16 sw_managed_area_offset;
106 u16 sw_managed_area_size;
107 } *init = (void *)hdr;
108 const struct nvkm_subdev *subdev = _queue->falcon->owner;
109 int i;
110
111 if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
112 nvkm_error(subdev, "expected message from init unit\n");
113 return -EINVAL;
114 }
115
116 if (init->base.msg_type != INIT_MSG_INIT) {
117 nvkm_error(subdev, "expected SEC init msg\n");
118 return -EINVAL;
119 }
120
121 for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
122 u8 id = init->queue_info[i].id;
123 struct nvkm_msgqueue_queue *queue = &priv->queue[id];
124
125 mutex_init(&queue->mutex);
126
127 queue->index = init->queue_info[i].index;
128 queue->offset = init->queue_info[i].offset;
129 queue->size = init->queue_info[i].size;
130
131 if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
132 queue->head_reg = 0xa30 + (queue->index * 8);
133 queue->tail_reg = 0xa34 + (queue->index * 8);
134 } else {
135 queue->head_reg = 0xa00 + (queue->index * 8);
136 queue->tail_reg = 0xa04 + (queue->index * 8);
137 }
138
139 nvkm_debug(subdev,
140 "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
141 id, queue->index, queue->offset, queue->size);
142 }
143
144 complete_all(&_queue->init_done);
145
146 return 0;
147}
148
149static const struct nvkm_msgqueue_init_func
150msgqueue_0148cdec_init_func = {
151 .gen_cmdline = init_gen_cmdline,
152 .init_callback = init_callback,
153};
154
155
156
157/* ACR unit */
158#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
159
160enum {
161 ACR_CMD_BOOTSTRAP_FALCON = 0x00,
162};
163
164static void
165acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
166 struct nvkm_msgqueue_hdr *hdr)
167{
168 struct acr_bootstrap_falcon_msg {
169 struct nvkm_msgqueue_msg base;
170
171 u32 error_code;
172 u32 falcon_id;
173 } *msg = (void *)hdr;
174 const struct nvkm_subdev *subdev = priv->falcon->owner;
175 u32 falcon_id = msg->falcon_id;
176
177 if (msg->error_code) {
178 nvkm_error(subdev, "in bootstrap falcon callback:\n");
179 nvkm_error(subdev, "expected error code 0x%x\n",
180 msg->error_code);
181 return;
182 }
183
184 if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
185 nvkm_error(subdev, "in bootstrap falcon callback:\n");
186 nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
187 return;
188 }
189
190 nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
191}
192
193enum {
194 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
195 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
196};
197
198static int
199acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
200{
201 DECLARE_COMPLETION_ONSTACK(completed);
202 /*
203 * flags - Flag specifying RESET or no RESET.
204 * falcon id - Falcon id specifying falcon to bootstrap.
205 */
206 struct {
207 struct nvkm_msgqueue_hdr hdr;
208 u8 cmd_type;
209 u32 flags;
210 u32 falcon_id;
211 } cmd;
212
213 memset(&cmd, 0, sizeof(cmd));
214
215 cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
216 cmd.hdr.size = sizeof(cmd);
217 cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
218 cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
219 cmd.falcon_id = falcon;
220 nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
221 acr_boot_falcon_callback, &completed, true);
222
223 if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
224 return -ETIMEDOUT;
225
226 return 0;
227}
228
229const struct nvkm_msgqueue_acr_func
230msgqueue_0148cdec_acr_func = {
231 .boot_falcon = acr_boot_falcon,
232};
233
234static void
235msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
236{
237 kfree(msgqueue_0148cdec(queue));
238}
239
240const struct nvkm_msgqueue_func
241msgqueue_0148cdec_func = {
242 .init_func = &msgqueue_0148cdec_init_func,
243 .acr_func = &msgqueue_0148cdec_acr_func,
244 .cmd_queue = msgqueue_0148cdec_cmd_queue,
245 .recv = msgqueue_0148cdec_process_msgs,
246 .dtor = msgqueue_0148cdec_dtor,
247};
248
249int
250msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
251{
252 struct msgqueue_0148cdec *ret;
253
254 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
255 if (!ret)
256 return -ENOMEM;
257
258 *queue = &ret->base;
259
260 nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
261
262 return 0;
263}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index b537f111f39c..669c24028470 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -40,8 +40,8 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
40 for (i = 0; i < size / 4; i++) { 40 for (i = 0; i < size / 4; i++) {
41 /* write new tag every 256B */ 41 /* write new tag every 256B */
42 if ((i & 0x3f) == 0) 42 if ((i & 0x3f) == 0)
43 nvkm_falcon_wr32(falcon, 0x188, tag++); 43 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
44 nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]); 44 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
45 } 45 }
46 46
47 /* 47 /*
@@ -53,37 +53,98 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
53 53
54 /* write new tag every 256B */ 54 /* write new tag every 256B */
55 if ((i & 0x3f) == 0) 55 if ((i & 0x3f) == 0)
56 nvkm_falcon_wr32(falcon, 0x188, tag++); 56 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
57 nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1)); 57 nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
58 extra & (BIT(rem * 8) - 1));
58 ++i; 59 ++i;
59 } 60 }
60 61
61 /* code must be padded to 0x40 words */ 62 /* code must be padded to 0x40 words */
62 for (; i & 0x3f; i++) 63 for (; i & 0x3f; i++)
63 nvkm_falcon_wr32(falcon, 0x184, 0); 64 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
64} 65}
65 66
66static void 67static void
68nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
69 u32 size, u8 port)
70{
71 u8 rem = size % 4;
72 int i;
73
74 size -= rem;
75
76 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
77 for (i = 0; i < size / 4; i++)
78 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
79
80 /*
81 * If size is not a multiple of 4, mask the last word to ensure garbage
82 * does not get written
83 */
84 if (rem) {
85 u32 extra = ((u32 *)data)[i];
86
87 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
88 extra & (BIT(rem * 8) - 1));
89 }
90}
91
92static const u32 EMEM_START_ADDR = 0x1000000;
93
94static void
67nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, 95nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
68 u32 size, u8 port) 96 u32 size, u8 port)
69{ 97{
70 u8 rem = size % 4; 98 u8 rem = size % 4;
71 int i; 99 int i;
72 100
101 if (start >= EMEM_START_ADDR && falcon->has_emem)
102 return nvkm_falcon_v1_load_emem(falcon, data,
103 start - EMEM_START_ADDR, size,
104 port);
105
73 size -= rem; 106 size -= rem;
74 107
75 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24)); 108 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
76 for (i = 0; i < size / 4; i++) 109 for (i = 0; i < size / 4; i++)
77 nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]); 110 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
78 111
79 /* 112 /*
80 * If size is not a multiple of 4, mask the last work to ensure garbage 113 * If size is not a multiple of 4, mask the last word to ensure garbage
81 * does not get read 114 * does not get written
82 */ 115 */
83 if (rem) { 116 if (rem) {
84 u32 extra = ((u32 *)data)[i]; 117 u32 extra = ((u32 *)data)[i];
85 118
86 nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1)); 119 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
120 extra & (BIT(rem * 8) - 1));
121 }
122}
123
124static void
125nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
126 u8 port, void *data)
127{
128 u8 rem = size % 4;
129 int i;
130
131 size -= rem;
132
133 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
134 for (i = 0; i < size / 4; i++)
135 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
136
137 /*
138 * If size is not a multiple of 4, mask the last word to ensure garbage
139 * does not get read
140 */
141 if (rem) {
142 u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
143
144 for (i = size; i < size + rem; i++) {
145 ((u8 *)data)[i] = (u8)(extra & 0xff);
146 extra >>= 8;
147 }
87 } 148 }
88} 149}
89 150
@@ -94,18 +155,22 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
94 u8 rem = size % 4; 155 u8 rem = size % 4;
95 int i; 156 int i;
96 157
158 if (start >= EMEM_START_ADDR && falcon->has_emem)
159 return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
160 size, port, data);
161
97 size -= rem; 162 size -= rem;
98 163
99 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25)); 164 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
100 for (i = 0; i < size / 4; i++) 165 for (i = 0; i < size / 4; i++)
101 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4); 166 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
102 167
103 /* 168 /*
104 * If size is not a multiple of 4, mask the last work to ensure garbage 169 * If size is not a multiple of 4, mask the last word to ensure garbage
105 * does not get read 170 * does not get read
106 */ 171 */
107 if (rem) { 172 if (rem) {
108 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4); 173 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
109 174
110 for (i = size; i < size + rem; i++) { 175 for (i = size; i < size + rem; i++) {
111 ((u8 *)data)[i] = (u8)(extra & 0xff); 176 ((u8 *)data)[i] = (u8)(extra & 0xff);
@@ -118,6 +183,7 @@ static void
118nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) 183nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
119{ 184{
120 u32 inst_loc; 185 u32 inst_loc;
186 u32 fbif;
121 187
122 /* disable instance block binding */ 188 /* disable instance block binding */
123 if (ctx == NULL) { 189 if (ctx == NULL) {
@@ -125,19 +191,34 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
125 return; 191 return;
126 } 192 }
127 193
194 switch (falcon->owner->index) {
195 case NVKM_ENGINE_NVENC0:
196 case NVKM_ENGINE_NVENC1:
197 case NVKM_ENGINE_NVENC2:
198 fbif = 0x800;
199 break;
200 case NVKM_SUBDEV_PMU:
201 fbif = 0xe00;
202 break;
203 default:
204 fbif = 0x600;
205 break;
206 }
207
128 nvkm_falcon_wr32(falcon, 0x10c, 0x1); 208 nvkm_falcon_wr32(falcon, 0x10c, 0x1);
129 209
130 /* setup apertures - virtual */ 210 /* setup apertures - virtual */
131 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4); 211 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
132 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0); 212 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
133 /* setup apertures - physical */ 213 /* setup apertures - physical */
134 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4); 214 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
135 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5); 215 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
136 nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); 216 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
137 217
138 /* Set context */ 218 /* Set context */
139 switch (nvkm_memory_target(ctx->memory)) { 219 switch (nvkm_memory_target(ctx->memory)) {
140 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; 220 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
221 case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
141 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; 222 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
142 default: 223 default:
143 WARN_ON(1); 224 WARN_ON(1);
@@ -146,9 +227,12 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
146 227
147 /* Enable context */ 228 /* Enable context */
148 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); 229 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
149 nvkm_falcon_wr32(falcon, 0x480, 230 nvkm_falcon_wr32(falcon, 0x054,
150 ((ctx->addr >> 12) & 0xfffffff) | 231 ((ctx->addr >> 12) & 0xfffffff) |
151 (inst_loc << 28) | (1 << 30)); 232 (inst_loc << 28) | (1 << 30));
233
234 nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
235 nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
152} 236}
153 237
154static void 238static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 63566ba12fbb..1c5e5ba487a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -20,6 +20,7 @@ nvkm-y += nvkm/subdev/fb/gt215.o
20nvkm-y += nvkm/subdev/fb/mcp77.o 20nvkm-y += nvkm/subdev/fb/mcp77.o
21nvkm-y += nvkm/subdev/fb/mcp89.o 21nvkm-y += nvkm/subdev/fb/mcp89.o
22nvkm-y += nvkm/subdev/fb/gf100.o 22nvkm-y += nvkm/subdev/fb/gf100.o
23nvkm-y += nvkm/subdev/fb/gf108.o
23nvkm-y += nvkm/subdev/fb/gk104.o 24nvkm-y += nvkm/subdev/fb/gk104.o
24nvkm-y += nvkm/subdev/fb/gk20a.o 25nvkm-y += nvkm/subdev/fb/gk20a.o
25nvkm-y += nvkm/subdev/fb/gm107.o 26nvkm-y += nvkm/subdev/fb/gm107.o
@@ -42,8 +43,10 @@ nvkm-y += nvkm/subdev/fb/ramnv50.o
42nvkm-y += nvkm/subdev/fb/ramgt215.o 43nvkm-y += nvkm/subdev/fb/ramgt215.o
43nvkm-y += nvkm/subdev/fb/rammcp77.o 44nvkm-y += nvkm/subdev/fb/rammcp77.o
44nvkm-y += nvkm/subdev/fb/ramgf100.o 45nvkm-y += nvkm/subdev/fb/ramgf100.o
46nvkm-y += nvkm/subdev/fb/ramgf108.o
45nvkm-y += nvkm/subdev/fb/ramgk104.o 47nvkm-y += nvkm/subdev/fb/ramgk104.o
46nvkm-y += nvkm/subdev/fb/ramgm107.o 48nvkm-y += nvkm/subdev/fb/ramgm107.o
49nvkm-y += nvkm/subdev/fb/ramgm200.o
47nvkm-y += nvkm/subdev/fb/ramgp100.o 50nvkm-y += nvkm/subdev/fb/ramgp100.o
48nvkm-y += nvkm/subdev/fb/sddr2.o 51nvkm-y += nvkm/subdev/fb/sddr2.o
49nvkm-y += nvkm/subdev/fb/sddr3.o 52nvkm-y += nvkm/subdev/fb/sddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
new file mode 100644
index 000000000000..56af84aa333b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ram.h"
26
27static const struct nvkm_fb_func
28gf108_fb = {
29 .dtor = gf100_fb_dtor,
30 .oneinit = gf100_fb_oneinit,
31 .init = gf100_fb_init,
32 .init_page = gf100_fb_init_page,
33 .intr = gf100_fb_intr,
34 .ram_new = gf108_ram_new,
35 .memtype_valid = gf100_fb_memtype_valid,
36};
37
38int
39gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
40{
41 return gf100_fb_new_(&gf108_fb, device, index, pfb);
42}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index fe5886013ac0..d83da5ddbc1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -68,7 +68,7 @@ gm200_fb = {
68 .init = gm200_fb_init, 68 .init = gm200_fb_init,
69 .init_page = gm200_fb_init_page, 69 .init_page = gm200_fb_init_page,
70 .intr = gf100_fb_intr, 70 .intr = gf100_fb_intr,
71 .ram_new = gm107_ram_new, 71 .ram_new = gm200_ram_new,
72 .memtype_valid = gf100_fb_memtype_valid, 72 .memtype_valid = gf100_fb_memtype_valid,
73}; 73};
74 74
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b60068b7d8f9..fac7e73c3ddf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -19,13 +19,38 @@ int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
19void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **); 19void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
20void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *); 20void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
21 21
22int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
23 struct nvkm_ram **);
22int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *, 24int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
23 u32, struct nvkm_ram *); 25 struct nvkm_ram *);
26u32 gf100_ram_probe_fbp(const struct nvkm_ram_func *,
27 struct nvkm_device *, int, int *);
28u32 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
29 struct nvkm_device *, int, int *);
30u32 gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
24int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **); 31int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
25void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **); 32void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
33int gf100_ram_init(struct nvkm_ram *);
34int gf100_ram_calc(struct nvkm_ram *, u32);
35int gf100_ram_prog(struct nvkm_ram *);
36void gf100_ram_tidy(struct nvkm_ram *);
37
38u32 gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
39 struct nvkm_device *, int, int *);
40
41int gk104_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
42 struct nvkm_ram **);
43void *gk104_ram_dtor(struct nvkm_ram *);
44int gk104_ram_init(struct nvkm_ram *);
45int gk104_ram_calc(struct nvkm_ram *, u32);
46int gk104_ram_prog(struct nvkm_ram *);
47void gk104_ram_tidy(struct nvkm_ram *);
48
49u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
50 struct nvkm_device *, int, int *);
26 51
27int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32); 52u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
28int gk104_ram_init(struct nvkm_ram *ram); 53 struct nvkm_device *, int, int *);
29 54
30/* RAM type-specific MR calculation routines */ 55/* RAM type-specific MR calculation routines */
31int nvkm_sddr2_calc(struct nvkm_ram *); 56int nvkm_sddr2_calc(struct nvkm_ram *);
@@ -46,7 +71,9 @@ int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
46int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **); 71int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
47int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **); 72int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
48int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **); 73int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
74int gf108_ram_new(struct nvkm_fb *, struct nvkm_ram **);
49int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **); 75int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
50int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **); 76int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
77int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
51int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **); 78int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
52#endif 79#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 6758da93a3a1..53c32fc694e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -124,7 +124,7 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
124 } 124 }
125} 125}
126 126
127static int 127int
128gf100_ram_calc(struct nvkm_ram *base, u32 freq) 128gf100_ram_calc(struct nvkm_ram *base, u32 freq)
129{ 129{
130 struct gf100_ram *ram = gf100_ram(base); 130 struct gf100_ram *ram = gf100_ram(base);
@@ -404,7 +404,7 @@ gf100_ram_calc(struct nvkm_ram *base, u32 freq)
404 return 0; 404 return 0;
405} 405}
406 406
407static int 407int
408gf100_ram_prog(struct nvkm_ram *base) 408gf100_ram_prog(struct nvkm_ram *base)
409{ 409{
410 struct gf100_ram *ram = gf100_ram(base); 410 struct gf100_ram *ram = gf100_ram(base);
@@ -413,7 +413,7 @@ gf100_ram_prog(struct nvkm_ram *base)
413 return 0; 413 return 0;
414} 414}
415 415
416static void 416void
417gf100_ram_tidy(struct nvkm_ram *base) 417gf100_ram_tidy(struct nvkm_ram *base)
418{ 418{
419 struct gf100_ram *ram = gf100_ram(base); 419 struct gf100_ram *ram = gf100_ram(base);
@@ -500,7 +500,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
500 return 0; 500 return 0;
501} 501}
502 502
503static int 503int
504gf100_ram_init(struct nvkm_ram *base) 504gf100_ram_init(struct nvkm_ram *base)
505{ 505{
506 static const u8 train0[] = { 506 static const u8 train0[] = {
@@ -543,77 +543,96 @@ gf100_ram_init(struct nvkm_ram *base)
543 return 0; 543 return 0;
544} 544}
545 545
546static const struct nvkm_ram_func 546u32
547gf100_ram_func = { 547gf100_ram_probe_fbpa_amount(struct nvkm_device *device, int fbpa)
548 .init = gf100_ram_init, 548{
549 .get = gf100_ram_get, 549 return nvkm_rd32(device, 0x11020c + (fbpa * 0x1000));
550 .put = gf100_ram_put, 550}
551 .calc = gf100_ram_calc, 551
552 .prog = gf100_ram_prog, 552u32
553 .tidy = gf100_ram_tidy, 553gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
554}; 554 struct nvkm_device *device, int fbp, int *pltcs)
555{
556 if (!(fbpao & BIT(fbp))) {
557 *pltcs = 1;
558 return func->probe_fbpa_amount(device, fbp);
559 }
560 return 0;
561}
562
563u32
564gf100_ram_probe_fbp(const struct nvkm_ram_func *func,
565 struct nvkm_device *device, int fbp, int *pltcs)
566{
567 u32 fbpao = nvkm_rd32(device, 0x022554);
568 return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
569}
555 570
556int 571int
557gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, 572gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
558 u32 maskaddr, struct nvkm_ram *ram) 573 struct nvkm_ram *ram)
559{ 574{
560 struct nvkm_subdev *subdev = &fb->subdev; 575 struct nvkm_subdev *subdev = &fb->subdev;
561 struct nvkm_device *device = subdev->device; 576 struct nvkm_device *device = subdev->device;
562 struct nvkm_bios *bios = device->bios; 577 struct nvkm_bios *bios = device->bios;
563 const u32 rsvd_head = ( 256 * 1024); /* vga memory */ 578 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
564 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */ 579 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
565 u32 parts = nvkm_rd32(device, 0x022438);
566 u32 pmask = nvkm_rd32(device, maskaddr);
567 u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
568 u64 psize, size = 0;
569 enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios); 580 enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
570 bool uniform = true; 581 u32 fbps = nvkm_rd32(device, 0x022438);
571 int ret, i; 582 u64 total = 0, lcomm = ~0, lower, ubase, usize;
572 583 int ret, fbp, ltcs, ltcn = 0;
573 nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800)); 584
574 nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask); 585 nvkm_debug(subdev, "%d FBP(s)\n", fbps);
575 586 for (fbp = 0; fbp < fbps; fbp++) {
576 /* read amount of vram attached to each memory controller */ 587 u32 size = func->probe_fbp(func, device, fbp, &ltcs);
577 for (i = 0; i < parts; i++) { 588 if (size) {
578 if (pmask & (1 << i)) 589 nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
579 continue; 590 fbp, size, ltcs);
580 591 lcomm = min(lcomm, (u64)(size / ltcs) << 20);
581 psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20; 592 total += size << 20;
582 if (psize != bsize) { 593 ltcn += ltcs;
583 if (psize < bsize) 594 } else {
584 bsize = psize; 595 nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
585 uniform = false;
586 } 596 }
587
588 nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
589 size += psize;
590 } 597 }
591 598
592 ret = nvkm_ram_ctor(func, fb, type, size, 0, ram); 599 lower = lcomm * ltcn;
600 ubase = lcomm + func->upper;
601 usize = total - lower;
602
603 nvkm_debug(subdev, "Lower: %4lld MiB @ %010llx\n", lower >> 20, 0ULL);
604 nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
605 nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
606
607 ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
593 if (ret) 608 if (ret)
594 return ret; 609 return ret;
595 610
596 nvkm_mm_fini(&ram->vram); 611 nvkm_mm_fini(&ram->vram);
597 612
598 /* if all controllers have the same amount attached, there's no holes */ 613 /* Some GPUs are in what's known as a "mixed memory" configuration.
599 if (uniform) { 614 *
615 * This is either where some FBPs have more memory than the others,
616 * or where LTCs have been disabled on a FBP.
617 */
618 if (lower != total) {
619 /* The common memory amount is addressed normally. */
600 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, 620 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
601 (size - rsvd_head - rsvd_tail) >> 621 (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
602 NVKM_RAM_MM_SHIFT, 1);
603 if (ret) 622 if (ret)
604 return ret; 623 return ret;
605 } else { 624
606 /* otherwise, address lowest common amount from 0GiB */ 625 /* And the rest is much higher in the physical address
607 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, 626 * space, and may not be usable for certain operations.
608 ((bsize * parts) - rsvd_head) >> 627 */
609 NVKM_RAM_MM_SHIFT, 1); 628 ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
629 (usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
610 if (ret) 630 if (ret)
611 return ret; 631 return ret;
612 632 } else {
613 /* and the rest starting from (8GiB + common_size) */ 633 /* GPUs without mixed-memory are a lot nicer... */
614 ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >> 634 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
615 NVKM_RAM_MM_SHIFT, 635 (total - rsvd_head - rsvd_tail) >>
616 (size - (bsize * parts) - rsvd_tail) >>
617 NVKM_RAM_MM_SHIFT, 1); 636 NVKM_RAM_MM_SHIFT, 1);
618 if (ret) 637 if (ret)
619 return ret; 638 return ret;
@@ -624,7 +643,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
624} 643}
625 644
626int 645int
627gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 646gf100_ram_new_(const struct nvkm_ram_func *func,
647 struct nvkm_fb *fb, struct nvkm_ram **pram)
628{ 648{
629 struct nvkm_subdev *subdev = &fb->subdev; 649 struct nvkm_subdev *subdev = &fb->subdev;
630 struct nvkm_bios *bios = subdev->device->bios; 650 struct nvkm_bios *bios = subdev->device->bios;
@@ -635,7 +655,7 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
635 return -ENOMEM; 655 return -ENOMEM;
636 *pram = &ram->base; 656 *pram = &ram->base;
637 657
638 ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base); 658 ret = gf100_ram_ctor(func, fb, &ram->base);
639 if (ret) 659 if (ret)
640 return ret; 660 return ret;
641 661
@@ -711,3 +731,23 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
711 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4); 731 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
712 return 0; 732 return 0;
713} 733}
734
735static const struct nvkm_ram_func
736gf100_ram = {
737 .upper = 0x0200000000,
738 .probe_fbp = gf100_ram_probe_fbp,
739 .probe_fbp_amount = gf100_ram_probe_fbp_amount,
740 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
741 .init = gf100_ram_init,
742 .get = gf100_ram_get,
743 .put = gf100_ram_put,
744 .calc = gf100_ram_calc,
745 .prog = gf100_ram_prog,
746 .tidy = gf100_ram_tidy,
747};
748
749int
750gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
751{
752 return gf100_ram_new_(&gf100_ram, fb, pram);
753}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
new file mode 100644
index 000000000000..985ec64cf369
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "ram.h"
25
26u32
27gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
28 struct nvkm_device *device, int fbp, int *pltcs)
29{
30 u32 fbpt = nvkm_rd32(device, 0x022438);
31 u32 fbpat = nvkm_rd32(device, 0x02243c);
32 u32 fbpas = fbpat / fbpt;
33 u32 fbpa = fbp * fbpas;
34 u32 size = 0;
35 while (fbpas--) {
36 if (!(fbpao & BIT(fbpa)))
37 size += func->probe_fbpa_amount(device, fbpa);
38 fbpa++;
39 }
40 *pltcs = 1;
41 return size;
42}
43
44static const struct nvkm_ram_func
45gf108_ram = {
46 .upper = 0x0200000000,
47 .probe_fbp = gf100_ram_probe_fbp,
48 .probe_fbp_amount = gf108_ram_probe_fbp_amount,
49 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
50 .init = gf100_ram_init,
51 .get = gf100_ram_get,
52 .put = gf100_ram_put,
53 .calc = gf100_ram_calc,
54 .prog = gf100_ram_prog,
55 .tidy = gf100_ram_tidy,
56};
57
58int
59gf108_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
60{
61 return gf100_ram_new_(&gf108_ram, fb, pram);
62}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index fb8a1239743d..f6c00791722c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1108,7 +1108,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
1108 return ret; 1108 return ret;
1109} 1109}
1110 1110
1111static int 1111int
1112gk104_ram_calc(struct nvkm_ram *base, u32 freq) 1112gk104_ram_calc(struct nvkm_ram *base, u32 freq)
1113{ 1113{
1114 struct gk104_ram *ram = gk104_ram(base); 1114 struct gk104_ram *ram = gk104_ram(base);
@@ -1227,7 +1227,7 @@ gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
1227 nvkm_mask(device, 0x10f444, mask, data); 1227 nvkm_mask(device, 0x10f444, mask, data);
1228} 1228}
1229 1229
1230static int 1230int
1231gk104_ram_prog(struct nvkm_ram *base) 1231gk104_ram_prog(struct nvkm_ram *base)
1232{ 1232{
1233 struct gk104_ram *ram = gk104_ram(base); 1233 struct gk104_ram *ram = gk104_ram(base);
@@ -1247,7 +1247,7 @@ gk104_ram_prog(struct nvkm_ram *base)
1247 return (ram->base.next == &ram->base.xition); 1247 return (ram->base.next == &ram->base.xition);
1248} 1248}
1249 1249
1250static void 1250void
1251gk104_ram_tidy(struct nvkm_ram *base) 1251gk104_ram_tidy(struct nvkm_ram *base)
1252{ 1252{
1253 struct gk104_ram *ram = gk104_ram(base); 1253 struct gk104_ram *ram = gk104_ram(base);
@@ -1509,7 +1509,7 @@ done:
1509 return ret; 1509 return ret;
1510} 1510}
1511 1511
1512static void * 1512void *
1513gk104_ram_dtor(struct nvkm_ram *base) 1513gk104_ram_dtor(struct nvkm_ram *base)
1514{ 1514{
1515 struct gk104_ram *ram = gk104_ram(base); 1515 struct gk104_ram *ram = gk104_ram(base);
@@ -1522,31 +1522,14 @@ gk104_ram_dtor(struct nvkm_ram *base)
1522 return ram; 1522 return ram;
1523} 1523}
1524 1524
1525static const struct nvkm_ram_func
1526gk104_ram_func = {
1527 .dtor = gk104_ram_dtor,
1528 .init = gk104_ram_init,
1529 .get = gf100_ram_get,
1530 .put = gf100_ram_put,
1531 .calc = gk104_ram_calc,
1532 .prog = gk104_ram_prog,
1533 .tidy = gk104_ram_tidy,
1534};
1535
1536int 1525int
1537gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 1526gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
1538{ 1527 struct nvkm_ram **pram)
1539 return gk104_ram_ctor(fb, pram, 0x022554);
1540}
1541
1542int
1543gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
1544{ 1528{
1545 struct nvkm_subdev *subdev = &fb->subdev; 1529 struct nvkm_subdev *subdev = &fb->subdev;
1546 struct nvkm_device *device = subdev->device; 1530 struct nvkm_device *device = subdev->device;
1547 struct nvkm_bios *bios = device->bios; 1531 struct nvkm_bios *bios = device->bios;
1548 struct nvkm_gpio *gpio = device->gpio; 1532 struct dcb_gpio_func gpio;
1549 struct dcb_gpio_func func;
1550 struct gk104_ram *ram; 1533 struct gk104_ram *ram;
1551 int ret, i; 1534 int ret, i;
1552 u8 ramcfg = nvbios_ramcfg_index(subdev); 1535 u8 ramcfg = nvbios_ramcfg_index(subdev);
@@ -1556,7 +1539,7 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
1556 return -ENOMEM; 1539 return -ENOMEM;
1557 *pram = &ram->base; 1540 *pram = &ram->base;
1558 1541
1559 ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base); 1542 ret = gf100_ram_ctor(func, fb, &ram->base);
1560 if (ret) 1543 if (ret)
1561 return ret; 1544 return ret;
1562 1545
@@ -1614,18 +1597,18 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
1614 } 1597 }
1615 1598
1616 /* lookup memory voltage gpios */ 1599 /* lookup memory voltage gpios */
1617 ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func); 1600 ret = nvkm_gpio_find(device->gpio, 0, 0x18, DCB_GPIO_UNUSED, &gpio);
1618 if (ret == 0) { 1601 if (ret == 0) {
1619 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1602 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
1620 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12; 1603 ram->fuc.r_funcMV[0] = (gpio.log[0] ^ 2) << 12;
1621 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12; 1604 ram->fuc.r_funcMV[1] = (gpio.log[1] ^ 2) << 12;
1622 } 1605 }
1623 1606
1624 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 1607 ret = nvkm_gpio_find(device->gpio, 0, 0x2e, DCB_GPIO_UNUSED, &gpio);
1625 if (ret == 0) { 1608 if (ret == 0) {
1626 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1609 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
1627 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12; 1610 ram->fuc.r_func2E[0] = (gpio.log[0] ^ 2) << 12;
1628 ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12; 1611 ram->fuc.r_func2E[1] = (gpio.log[1] ^ 2) << 12;
1629 } 1612 }
1630 1613
1631 ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604); 1614 ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
@@ -1717,3 +1700,24 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
1717 ram->fuc.r_0x100750 = ramfuc_reg(0x100750); 1700 ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
1718 return 0; 1701 return 0;
1719} 1702}
1703
1704static const struct nvkm_ram_func
1705gk104_ram = {
1706 .upper = 0x0200000000,
1707 .probe_fbp = gf100_ram_probe_fbp,
1708 .probe_fbp_amount = gf108_ram_probe_fbp_amount,
1709 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
1710 .dtor = gk104_ram_dtor,
1711 .init = gk104_ram_init,
1712 .get = gf100_ram_get,
1713 .put = gf100_ram_put,
1714 .calc = gk104_ram_calc,
1715 .prog = gk104_ram_prog,
1716 .tidy = gk104_ram_tidy,
1717};
1718
1719int
1720gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
1721{
1722 return gk104_ram_new_(&gk104_ram, fb, pram);
1723}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index ac862d1d77bd..3f0b56347291 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,8 +23,31 @@
23 */ 23 */
24#include "ram.h" 24#include "ram.h"
25 25
26u32
27gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
28 struct nvkm_device *device, int fbp, int *pltcs)
29{
30 u32 fbpao = nvkm_rd32(device, 0x021c14);
31 return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
32}
33
34static const struct nvkm_ram_func
35gm107_ram = {
36 .upper = 0x1000000000,
37 .probe_fbp = gm107_ram_probe_fbp,
38 .probe_fbp_amount = gf108_ram_probe_fbp_amount,
39 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
40 .dtor = gk104_ram_dtor,
41 .init = gk104_ram_init,
42 .get = gf100_ram_get,
43 .put = gf100_ram_put,
44 .calc = gk104_ram_calc,
45 .prog = gk104_ram_prog,
46 .tidy = gk104_ram_tidy,
47};
48
26int 49int
27gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 50gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28{ 51{
29 return gk104_ram_ctor(fb, pram, 0x021c14); 52 return gk104_ram_new_(&gm107_ram, fb, pram);
30} 53}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
new file mode 100644
index 000000000000..fd8facf90476
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "ram.h"
25
26u32
27gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
28 struct nvkm_device *device, int fbp, int *pltcs)
29{
30 u32 ltcs = nvkm_rd32(device, 0x022450);
31 u32 fbpas = nvkm_rd32(device, 0x022458);
32 u32 fbpa = fbp * fbpas;
33 u32 size = 0;
34 if (!(nvkm_rd32(device, 0x021d38) & BIT(fbp))) {
35 u32 ltco = nvkm_rd32(device, 0x021d70 + (fbp * 4));
36 u32 ltcm = ~ltco & ((1 << ltcs) - 1);
37
38 while (fbpas--) {
39 if (!(fbpao & (1 << fbpa)))
40 size += func->probe_fbpa_amount(device, fbpa);
41 fbpa++;
42 }
43
44 *pltcs = hweight32(ltcm);
45 }
46 return size;
47}
48
49static const struct nvkm_ram_func
50gm200_ram = {
51 .upper = 0x1000000000,
52 .probe_fbp = gm107_ram_probe_fbp,
53 .probe_fbp_amount = gm200_ram_probe_fbp_amount,
54 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
55 .dtor = gk104_ram_dtor,
56 .init = gk104_ram_init,
57 .get = gf100_ram_get,
58 .put = gf100_ram_put,
59 .calc = gk104_ram_calc,
60 .prog = gk104_ram_prog,
61 .tidy = gk104_ram_tidy,
62};
63
64int
65gm200_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
66{
67 return gk104_ram_new_(&gm200_ram, fb, pram);
68}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index 405faabe8dcd..cac70047ad5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -76,8 +76,18 @@ gp100_ram_init(struct nvkm_ram *ram)
76 return 0; 76 return 0;
77} 77}
78 78
79static u32
80gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
81{
82 return nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
83}
84
79static const struct nvkm_ram_func 85static const struct nvkm_ram_func
80gp100_ram_func = { 86gp100_ram = {
87 .upper = 0x1000000000,
88 .probe_fbp = gm107_ram_probe_fbp,
89 .probe_fbp_amount = gm200_ram_probe_fbp_amount,
90 .probe_fbpa_amount = gp100_ram_probe_fbpa,
81 .init = gp100_ram_init, 91 .init = gp100_ram_init,
82 .get = gf100_ram_get, 92 .get = gf100_ram_get,
83 .put = gf100_ram_put, 93 .put = gf100_ram_put,
@@ -87,60 +97,10 @@ int
87gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 97gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
88{ 98{
89 struct nvkm_ram *ram; 99 struct nvkm_ram *ram;
90 struct nvkm_subdev *subdev = &fb->subdev;
91 struct nvkm_device *device = subdev->device;
92 enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
93 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
94 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
95 u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
96 u32 fbio_opt = nvkm_rd32(device, 0x021c14);
97 u64 part, size = 0, comm = ~0ULL;
98 bool mixed = false;
99 int ret;
100
101 nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
102 nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
103 for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
104 if (!(fbio_opt & (1 << fbpa))) {
105 part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
106 nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
107 part = part << 20;
108 if (part != comm) {
109 if (comm != ~0ULL)
110 mixed = true;
111 comm = min(comm, part);
112 }
113 size = size + part;
114 }
115 }
116
117 ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
118 *pram = ram;
119 if (ret)
120 return ret;
121 100
122 nvkm_mm_fini(&ram->vram); 101 if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
102 return -ENOMEM;
123 103
124 if (mixed) { 104 return gf100_ram_ctor(&gp100_ram, fb, ram);
125 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
126 ((comm * fbpa_num) - rsvd_head) >>
127 NVKM_RAM_MM_SHIFT, 1);
128 if (ret)
129 return ret;
130 105
131 ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
132 NVKM_RAM_MM_SHIFT,
133 (size - (comm * fbpa_num) - rsvd_tail) >>
134 NVKM_RAM_MM_SHIFT, 1);
135 if (ret)
136 return ret;
137 } else {
138 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
139 (size - rsvd_head - rsvd_tail) >>
140 NVKM_RAM_MM_SHIFT, 1);
141 if (ret)
142 return ret;
143 }
144
145 return 0;
146} 106}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index b7b01c3f7037..dd391809fef7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -134,7 +134,7 @@ struct anx9805_aux {
134 134
135static int 135static int
136anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry, 136anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
137 u8 type, u32 addr, u8 *data, u8 size) 137 u8 type, u32 addr, u8 *data, u8 *size)
138{ 138{
139 struct anx9805_aux *aux = anx9805_aux(base); 139 struct anx9805_aux *aux = anx9805_aux(base);
140 struct anx9805_pad *pad = aux->pad; 140 struct anx9805_pad *pad = aux->pad;
@@ -143,7 +143,7 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
143 u8 buf[16] = {}; 143 u8 buf[16] = {};
144 u8 tmp; 144 u8 tmp;
145 145
146 AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size); 146 AUX_DBG(&aux->base, "%02x %05x %d", type, addr, *size);
147 147
148 tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04; 148 tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
149 nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04); 149 nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
@@ -152,12 +152,12 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
152 152
153 nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80); 153 nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
154 if (!(type & 1)) { 154 if (!(type & 1)) {
155 memcpy(buf, data, size); 155 memcpy(buf, data, *size);
156 AUX_DBG(&aux->base, "%16ph", buf); 156 AUX_DBG(&aux->base, "%16ph", buf);
157 for (i = 0; i < size; i++) 157 for (i = 0; i < *size; i++)
158 nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]); 158 nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
159 } 159 }
160 nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type); 160 nvkm_wri2cr(adap, aux->addr, 0xe5, ((*size - 1) << 4) | type);
161 nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0); 161 nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
162 nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8); 162 nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
163 nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16); 163 nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
@@ -176,10 +176,10 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
176 } 176 }
177 177
178 if (type & 1) { 178 if (type & 1) {
179 for (i = 0; i < size; i++) 179 for (i = 0; i < *size; i++)
180 buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i); 180 buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
181 AUX_DBG(&aux->base, "%16ph", buf); 181 AUX_DBG(&aux->base, "%16ph", buf);
182 memcpy(data, buf, size); 182 memcpy(data, buf, *size);
183 } 183 }
184 184
185 ret = 0; 185 ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 01d5c5a56e2e..d172e42dd228 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -51,7 +51,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 51 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 52 cmd |= 4; /* MOT */
53 53
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt); 54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
55 if (ret < 0) { 55 if (ret < 0) {
56 nvkm_i2c_aux_release(aux); 56 nvkm_i2c_aux_release(aux);
57 return ret; 57 return ret;
@@ -115,7 +115,7 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
115 115
116int 116int
117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, 117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
118 u32 addr, u8 *data, u8 size) 118 u32 addr, u8 *data, u8 *size)
119{ 119{
120 return aux->func->xfer(aux, retry, type, addr, data, size); 120 return aux->func->xfer(aux, retry, type, addr, data, size);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index fc6b162fa0b1..27a4a39c87f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -4,7 +4,7 @@
4 4
5struct nvkm_i2c_aux_func { 5struct nvkm_i2c_aux_func {
6 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, 6 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
7 u32 addr, u8 *data, u8 size); 7 u32 addr, u8 *data, u8 *size);
8 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, 8 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
9 bool enhanced_framing); 9 bool enhanced_framing);
10}; 10};
@@ -15,7 +15,7 @@ int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
15 int id, struct nvkm_i2c_aux **); 15 int id, struct nvkm_i2c_aux **);
16void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); 16void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
17int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, 17int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
18 u32 addr, u8 *data, u8 size); 18 u32 addr, u8 *data, u8 *size);
19 19
20int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 20int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
21int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 21int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index b80236a4eeac..ab8cb196c34e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -74,7 +74,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
74 74
75static int 75static int
76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, 76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 size) 77 u8 type, u32 addr, u8 *data, u8 *size)
78{ 78{
79 struct g94_i2c_aux *aux = g94_i2c_aux(obj); 79 struct g94_i2c_aux *aux = g94_i2c_aux(obj);
80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device; 80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
@@ -83,7 +83,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
83 u32 xbuf[4] = {}; 83 u32 xbuf[4] = {};
84 int ret, i; 84 int ret, i;
85 85
86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size); 86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
87 87
88 ret = g94_i2c_aux_init(aux); 88 ret = g94_i2c_aux_init(aux);
89 if (ret < 0) 89 if (ret < 0)
@@ -97,7 +97,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
97 } 97 }
98 98
99 if (!(type & 1)) { 99 if (!(type & 1)) {
100 memcpy(xbuf, data, size); 100 memcpy(xbuf, data, *size);
101 for (i = 0; i < 16; i += 4) { 101 for (i = 0; i < 16; i += 4) {
102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]); 102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
103 nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]); 103 nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
@@ -107,7 +107,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
107 ctrl = nvkm_rd32(device, 0x00e4e4 + base); 107 ctrl = nvkm_rd32(device, 0x00e4e4 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f0ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= size - 1; 110 ctrl |= *size - 1;
111 nvkm_wr32(device, 0x00e4e0 + base, addr); 111 nvkm_wr32(device, 0x00e4e0 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -151,7 +151,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
151 xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i); 151 xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]); 152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
153 } 153 }
154 memcpy(data, xbuf, size); 154 memcpy(data, xbuf, *size);
155 *size = stat & 0x0000001f;
155 } 156 }
156 157
157out: 158out:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ed458c7f056b..ee091fa79628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -74,7 +74,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
74 74
75static int 75static int
76gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, 76gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 size) 77 u8 type, u32 addr, u8 *data, u8 *size)
78{ 78{
79 struct gm200_i2c_aux *aux = gm200_i2c_aux(obj); 79 struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device; 80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
@@ -83,7 +83,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
83 u32 xbuf[4] = {}; 83 u32 xbuf[4] = {};
84 int ret, i; 84 int ret, i;
85 85
86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size); 86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
87 87
88 ret = gm200_i2c_aux_init(aux); 88 ret = gm200_i2c_aux_init(aux);
89 if (ret < 0) 89 if (ret < 0)
@@ -97,7 +97,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
97 } 97 }
98 98
99 if (!(type & 1)) { 99 if (!(type & 1)) {
100 memcpy(xbuf, data, size); 100 memcpy(xbuf, data, *size);
101 for (i = 0; i < 16; i += 4) { 101 for (i = 0; i < 16; i += 4) {
102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]); 102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
103 nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]); 103 nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
@@ -107,7 +107,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
107 ctrl = nvkm_rd32(device, 0x00d954 + base); 107 ctrl = nvkm_rd32(device, 0x00d954 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f0ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= size - 1; 110 ctrl |= *size - 1;
111 nvkm_wr32(device, 0x00d950 + base, addr); 111 nvkm_wr32(device, 0x00d950 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -151,7 +151,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
151 xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i); 151 xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]); 152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
153 } 153 }
154 memcpy(data, xbuf, size); 154 memcpy(data, xbuf, *size);
155 *size = stat & 0x0000001f;
155 } 156 }
156 157
157out: 158out:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 2c6b374f1420..d80dbc8f09b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -30,7 +30,7 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400)); 30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
33 nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); 33 nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
34 nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); 34 nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
35} 35}
36 36
@@ -41,7 +41,7 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400)); 41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
44 nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); 44 nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
45 nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); 45 nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
46} 46}
47 47
@@ -52,7 +52,7 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400)); 52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
55 nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); 55 nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); 56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
57} 57}
58 58
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index c673853f3213..9025ed1bd2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -30,7 +30,7 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800)); 30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
33 nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); 33 nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
34 nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); 34 nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
35} 35}
36 36
@@ -41,7 +41,7 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800)); 41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
44 nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); 44 nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
45 nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); 45 nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
46} 46}
47 47
@@ -52,7 +52,7 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800)); 52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
55 nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); 55 nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); 56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
57} 57}
58 58
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index a73f690eb4b5..3306f9fe7140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/msgqueue.h>
26#include <subdev/timer.h> 27#include <subdev/timer.h>
27 28
28void 29void
@@ -85,7 +86,8 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
85 ); 86 );
86 87
87 /* Reset. */ 88 /* Reset. */
88 pmu->func->reset(pmu); 89 if (pmu->func->reset)
90 pmu->func->reset(pmu);
89 91
90 /* Wait for IMEM/DMEM scrubbing to be complete. */ 92 /* Wait for IMEM/DMEM scrubbing to be complete. */
91 nvkm_msec(device, 2000, 93 nvkm_msec(device, 2000,
@@ -113,10 +115,18 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
113 return ret; 115 return ret;
114} 116}
115 117
118static int
119nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
120{
121 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
122 return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
123}
124
116static void * 125static void *
117nvkm_pmu_dtor(struct nvkm_subdev *subdev) 126nvkm_pmu_dtor(struct nvkm_subdev *subdev)
118{ 127{
119 struct nvkm_pmu *pmu = nvkm_pmu(subdev); 128 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
129 nvkm_msgqueue_del(&pmu->queue);
120 nvkm_falcon_del(&pmu->falcon); 130 nvkm_falcon_del(&pmu->falcon);
121 return nvkm_pmu(subdev); 131 return nvkm_pmu(subdev);
122} 132}
@@ -125,6 +135,7 @@ static const struct nvkm_subdev_func
125nvkm_pmu = { 135nvkm_pmu = {
126 .dtor = nvkm_pmu_dtor, 136 .dtor = nvkm_pmu_dtor,
127 .preinit = nvkm_pmu_preinit, 137 .preinit = nvkm_pmu_preinit,
138 .oneinit = nvkm_pmu_oneinit,
128 .init = nvkm_pmu_init, 139 .init = nvkm_pmu_init,
129 .fini = nvkm_pmu_fini, 140 .fini = nvkm_pmu_fini,
130 .intr = nvkm_pmu_intr, 141 .intr = nvkm_pmu_intr,
@@ -138,7 +149,7 @@ nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
138 pmu->func = func; 149 pmu->func = func;
139 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); 150 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
140 init_waitqueue_head(&pmu->recv.wait); 151 init_waitqueue_head(&pmu->recv.wait);
141 return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); 152 return 0;
142} 153}
143 154
144int 155int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 0b8a1cc4a0ee..48ae02d45656 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -20,15 +20,30 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include <engine/falcon.h>
24#include <core/msgqueue.h>
23#include "priv.h" 25#include "priv.h"
24 26
27static void
28gm20b_pmu_recv(struct nvkm_pmu *pmu)
29{
30 nvkm_msgqueue_recv(pmu->queue);
31}
32
25static const struct nvkm_pmu_func 33static const struct nvkm_pmu_func
26gm20b_pmu = { 34gm20b_pmu = {
27 .reset = gt215_pmu_reset, 35 .intr = gt215_pmu_intr,
36 .recv = gm20b_pmu_recv,
28}; 37};
29 38
30int 39int
31gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 40gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
32{ 41{
33 return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); 42 int ret;
43
44 ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
45 if (ret)
46 return ret;
47
48 return 0;
34} 49}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index 5076d1500f47..ac7f50ae53c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -1,7 +1,13 @@
1nvkm-y += nvkm/subdev/secboot/base.o 1nvkm-y += nvkm/subdev/secboot/base.o
2nvkm-y += nvkm/subdev/secboot/hs_ucode.o
2nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o 3nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
4nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
3nvkm-y += nvkm/subdev/secboot/acr.o 5nvkm-y += nvkm/subdev/secboot/acr.o
4nvkm-y += nvkm/subdev/secboot/acr_r352.o 6nvkm-y += nvkm/subdev/secboot/acr_r352.o
5nvkm-y += nvkm/subdev/secboot/acr_r361.o 7nvkm-y += nvkm/subdev/secboot/acr_r361.o
8nvkm-y += nvkm/subdev/secboot/acr_r364.o
9nvkm-y += nvkm/subdev/secboot/acr_r367.o
10nvkm-y += nvkm/subdev/secboot/acr_r375.o
6nvkm-y += nvkm/subdev/secboot/gm200.o 11nvkm-y += nvkm/subdev/secboot/gm200.o
7nvkm-y += nvkm/subdev/secboot/gm20b.o 12nvkm-y += nvkm/subdev/secboot/gm20b.o
13nvkm-y += nvkm/subdev/secboot/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index 97795b342b6f..93d804652d44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -37,12 +37,10 @@ struct nvkm_acr_func {
37 void (*dtor)(struct nvkm_acr *); 37 void (*dtor)(struct nvkm_acr *);
38 int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); 38 int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
39 int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); 39 int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
40 int (*load)(struct nvkm_acr *, struct nvkm_secboot *, 40 int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
41 struct nvkm_gpuobj *, u64); 41 struct nvkm_gpuobj *, u64);
42 int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, 42 int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
43 enum nvkm_secboot_falcon); 43 enum nvkm_secboot_falcon);
44 int (*start)(struct nvkm_acr *, struct nvkm_secboot *,
45 enum nvkm_secboot_falcon);
46}; 44};
47 45
48/** 46/**
@@ -50,7 +48,7 @@ struct nvkm_acr_func {
50 * 48 *
51 * @boot_falcon: ID of the falcon that will perform secure boot 49 * @boot_falcon: ID of the falcon that will perform secure boot
52 * @managed_falcons: bitfield of falcons managed by this ACR 50 * @managed_falcons: bitfield of falcons managed by this ACR
53 * @start_address: virtual start address of the HS bootloader 51 * @optional_falcons: bitfield of falcons we can live without
54 */ 52 */
55struct nvkm_acr { 53struct nvkm_acr {
56 const struct nvkm_acr_func *func; 54 const struct nvkm_acr_func *func;
@@ -58,12 +56,15 @@ struct nvkm_acr {
58 56
59 enum nvkm_secboot_falcon boot_falcon; 57 enum nvkm_secboot_falcon boot_falcon;
60 unsigned long managed_falcons; 58 unsigned long managed_falcons;
61 u32 start_address; 59 unsigned long optional_falcons;
62}; 60};
63 61
64void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); 62void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
65 63
66struct nvkm_acr *acr_r352_new(unsigned long); 64struct nvkm_acr *acr_r352_new(unsigned long);
67struct nvkm_acr *acr_r361_new(unsigned long); 65struct nvkm_acr *acr_r361_new(unsigned long);
66struct nvkm_acr *acr_r364_new(unsigned long);
67struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
68struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
68 69
69#endif 70#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1aa37ea18580..993a38eb3ed5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -21,35 +21,16 @@
21 */ 21 */
22 22
23#include "acr_r352.h" 23#include "acr_r352.h"
24#include "hs_ucode.h"
24 25
25#include <core/gpuobj.h> 26#include <core/gpuobj.h>
26#include <core/firmware.h> 27#include <core/firmware.h>
27#include <engine/falcon.h> 28#include <engine/falcon.h>
28 29#include <subdev/mc.h>
29/** 30#include <subdev/timer.h>
30 * struct hsf_fw_header - HS firmware descriptor 31#include <subdev/pmu.h>
31 * @sig_dbg_offset: offset of the debug signature 32#include <core/msgqueue.h>
32 * @sig_dbg_size: size of the debug signature 33#include <engine/sec2.h>
33 * @sig_prod_offset: offset of the production signature
34 * @sig_prod_size: size of the production signature
35 * @patch_loc: offset of the offset (sic) of where the signature is
36 * @patch_sig: offset of the offset (sic) to add to sig_*_offset
37 * @hdr_offset: offset of the load header (see struct hs_load_header)
38 * @hdr_size: size of above header
39 *
40 * This structure is embedded in the HS firmware image at
41 * hs_bin_hdr.header_offset.
42 */
43struct hsf_fw_header {
44 u32 sig_dbg_offset;
45 u32 sig_dbg_size;
46 u32 sig_prod_offset;
47 u32 sig_prod_size;
48 u32 patch_loc;
49 u32 patch_sig;
50 u32 hdr_offset;
51 u32 hdr_size;
52};
53 34
54/** 35/**
55 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor 36 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
@@ -95,15 +76,14 @@ struct acr_r352_flcn_bl_desc {
95 */ 76 */
96static void 77static void
97acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, 78acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
98 const struct ls_ucode_img *_img, u64 wpr_addr, 79 const struct ls_ucode_img *img, u64 wpr_addr,
99 void *_desc) 80 void *_desc)
100{ 81{
101 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
102 struct acr_r352_flcn_bl_desc *desc = _desc; 82 struct acr_r352_flcn_bl_desc *desc = _desc;
103 const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc; 83 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
104 u64 base, addr_code, addr_data; 84 u64 base, addr_code, addr_data;
105 85
106 base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; 86 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
107 addr_code = (base + pdesc->app_resident_code_offset) >> 8; 87 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
108 addr_data = (base + pdesc->app_resident_data_offset) >> 8; 88 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
109 89
@@ -167,6 +147,96 @@ struct hsflcn_acr_desc {
167 */ 147 */
168 148
169/** 149/**
150 * struct acr_r352_lsf_lsb_header - LS firmware header
151 * @signature: signature to verify the firmware against
152 * @ucode_off: offset of the ucode blob in the WPR region. The ucode
153 * blob contains the bootloader, code and data of the
154 * LS falcon
155 * @ucode_size: size of the ucode blob, including bootloader
156 * @data_size: size of the ucode blob data
157 * @bl_code_size: size of the bootloader code
158 * @bl_imem_off: offset in imem of the bootloader
159 * @bl_data_off: offset of the bootloader data in WPR region
160 * @bl_data_size: size of the bootloader data
161 * @app_code_off: offset of the app code relative to ucode_off
162 * @app_code_size: size of the app code
163 * @app_data_off: offset of the app data relative to ucode_off
164 * @app_data_size: size of the app data
165 * @flags: flags for the secure bootloader
166 *
167 * This structure is written into the WPR region for each managed falcon. Each
168 * instance is referenced by the lsb_offset member of the corresponding
169 * lsf_wpr_header.
170 */
171struct acr_r352_lsf_lsb_header {
172 /**
173 * LS falcon signatures
174 * @prd_keys: signature to use in production mode
175 * @dgb_keys: signature to use in debug mode
176 * @b_prd_present: whether the production key is present
177 * @b_dgb_present: whether the debug key is present
178 * @falcon_id: ID of the falcon the ucode applies to
179 */
180 struct {
181 u8 prd_keys[2][16];
182 u8 dbg_keys[2][16];
183 u32 b_prd_present;
184 u32 b_dbg_present;
185 u32 falcon_id;
186 } signature;
187 u32 ucode_off;
188 u32 ucode_size;
189 u32 data_size;
190 u32 bl_code_size;
191 u32 bl_imem_off;
192 u32 bl_data_off;
193 u32 bl_data_size;
194 u32 app_code_off;
195 u32 app_code_size;
196 u32 app_data_off;
197 u32 app_data_size;
198 u32 flags;
199};
200
201/**
202 * struct acr_r352_lsf_wpr_header - LS blob WPR Header
203 * @falcon_id: LS falcon ID
204 * @lsb_offset: offset of the lsb_lsf_header in the WPR region
205 * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
206 * @lazy_bootstrap: skip bootstrapping by ACR
207 * @status: bootstrapping status
208 *
209 * An array of these is written at the beginning of the WPR region, one for
210 * each managed falcon. The array is terminated by an instance which falcon_id
211 * is LSF_FALCON_ID_INVALID.
212 */
213struct acr_r352_lsf_wpr_header {
214 u32 falcon_id;
215 u32 lsb_offset;
216 u32 bootstrap_owner;
217 u32 lazy_bootstrap;
218 u32 status;
219#define LSF_IMAGE_STATUS_NONE 0
220#define LSF_IMAGE_STATUS_COPY 1
221#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
222#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
223#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
224#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
225#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
226};
227
228/**
229 * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
230 */
231struct ls_ucode_img_r352 {
232 struct ls_ucode_img base;
233
234 struct acr_r352_lsf_wpr_header wpr_header;
235 struct acr_r352_lsf_lsb_header lsb_header;
236};
237#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
238
239/**
170 * ls_ucode_img_load() - create a lsf_ucode_img and load it 240 * ls_ucode_img_load() - create a lsf_ucode_img and load it
171 */ 241 */
172struct ls_ucode_img * 242struct ls_ucode_img *
@@ -255,7 +325,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
255 * image size 325 * image size
256 */ 326 */
257 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); 327 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
258 lhdr->ucode_off = offset; 328 _img->ucode_off = lhdr->ucode_off = offset;
259 offset += _img->ucode_size; 329 offset += _img->ucode_size;
260 330
261 /* 331 /*
@@ -341,7 +411,7 @@ acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
341 */ 411 */
342int 412int
343acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, 413acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
344 struct nvkm_gpuobj *wpr_blob, u32 wpr_addr) 414 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
345{ 415{
346 struct ls_ucode_img *_img; 416 struct ls_ucode_img *_img;
347 u32 pos = 0; 417 u32 pos = 0;
@@ -381,8 +451,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
381 return 0; 451 return 0;
382} 452}
383 453
384/* Both size and address of WPR need to be 128K-aligned */ 454/* Both size and address of WPR need to be 256K-aligned */
385#define WPR_ALIGNMENT 0x20000 455#define WPR_ALIGNMENT 0x40000
386/** 456/**
387 * acr_r352_prepare_ls_blob() - prepare the LS blob 457 * acr_r352_prepare_ls_blob() - prepare the LS blob
388 * 458 *
@@ -399,7 +469,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
399 struct ls_ucode_img *img, *t; 469 struct ls_ucode_img *img, *t;
400 unsigned long managed_falcons = acr->base.managed_falcons; 470 unsigned long managed_falcons = acr->base.managed_falcons;
401 int managed_count = 0; 471 int managed_count = 0;
402 u32 image_wpr_size; 472 u32 image_wpr_size, ls_blob_size;
403 int falcon_id; 473 int falcon_id;
404 int ret; 474 int ret;
405 475
@@ -411,6 +481,12 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
411 481
412 img = acr->func->ls_ucode_img_load(acr, falcon_id); 482 img = acr->func->ls_ucode_img_load(acr, falcon_id);
413 if (IS_ERR(img)) { 483 if (IS_ERR(img)) {
484 if (acr->base.optional_falcons & BIT(falcon_id)) {
485 managed_falcons &= ~BIT(falcon_id);
486 nvkm_info(subdev, "skipping %s falcon...\n",
487 nvkm_secboot_falcon_name[falcon_id]);
488 continue;
489 }
414 ret = PTR_ERR(img); 490 ret = PTR_ERR(img);
415 goto cleanup; 491 goto cleanup;
416 } 492 }
@@ -419,6 +495,24 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
419 managed_count++; 495 managed_count++;
420 } 496 }
421 497
498 /* Commit the actual list of falcons we will manage from now on */
499 acr->base.managed_falcons = managed_falcons;
500
501 /*
502 * If the boot falcon has a firmare, let it manage the bootstrap of other
503 * falcons.
504 */
505 if (acr->func->ls_func[acr->base.boot_falcon] &&
506 (managed_falcons & BIT(acr->base.boot_falcon))) {
507 for_each_set_bit(falcon_id, &managed_falcons,
508 NVKM_SECBOOT_FALCON_END) {
509 if (falcon_id == acr->base.boot_falcon)
510 continue;
511
512 acr->lazy_bootstrap |= BIT(falcon_id);
513 }
514 }
515
422 /* 516 /*
423 * Fill the WPR and LSF headers with the right offsets and compute 517 * Fill the WPR and LSF headers with the right offsets and compute
424 * required WPR size 518 * required WPR size
@@ -426,8 +520,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
426 image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); 520 image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
427 image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); 521 image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
428 522
523 ls_blob_size = image_wpr_size;
524
525 /*
526 * If we need a shadow area, allocate twice the size and use the
527 * upper half as WPR
528 */
529 if (wpr_size == 0 && acr->func->shadow_blob)
530 ls_blob_size *= 2;
531
429 /* Allocate GPU object that will contain the WPR region */ 532 /* Allocate GPU object that will contain the WPR region */
430 ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT, 533 ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
431 false, NULL, &acr->ls_blob); 534 false, NULL, &acr->ls_blob);
432 if (ret) 535 if (ret)
433 goto cleanup; 536 goto cleanup;
@@ -438,6 +541,9 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
438 /* If WPR address and size are not fixed, set them to fit the LS blob */ 541 /* If WPR address and size are not fixed, set them to fit the LS blob */
439 if (wpr_size == 0) { 542 if (wpr_size == 0) {
440 wpr_addr = acr->ls_blob->addr; 543 wpr_addr = acr->ls_blob->addr;
544 if (acr->func->shadow_blob)
545 wpr_addr += acr->ls_blob->size / 2;
546
441 wpr_size = image_wpr_size; 547 wpr_size = image_wpr_size;
442 /* 548 /*
443 * But if the WPR region is set by the bootloader, it is illegal for 549 * But if the WPR region is set by the bootloader, it is illegal for
@@ -469,41 +575,17 @@ cleanup:
469 575
470 576
471 577
472/** 578void
473 * acr_r352_hsf_patch_signature() - patch HS blob with correct signature
474 */
475static void
476acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
477{
478 struct fw_bin_header *hsbin_hdr = acr_image;
479 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
480 void *hs_data = acr_image + hsbin_hdr->data_offset;
481 void *sig;
482 u32 sig_size;
483
484 /* Falcon in debug or production mode? */
485 if (sb->boot_falcon->debug) {
486 sig = acr_image + fw_hdr->sig_dbg_offset;
487 sig_size = fw_hdr->sig_dbg_size;
488 } else {
489 sig = acr_image + fw_hdr->sig_prod_offset;
490 sig_size = fw_hdr->sig_prod_size;
491 }
492
493 /* Patch signature */
494 memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
495}
496
497static void
498acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, 579acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
499 struct hsflcn_acr_desc *desc) 580 void *_desc)
500{ 581{
582 struct hsflcn_acr_desc *desc = _desc;
501 struct nvkm_gpuobj *ls_blob = acr->ls_blob; 583 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
502 584
503 /* WPR region information if WPR is not fixed */ 585 /* WPR region information if WPR is not fixed */
504 if (sb->wpr_size == 0) { 586 if (sb->wpr_size == 0) {
505 u32 wpr_start = ls_blob->addr; 587 u64 wpr_start = ls_blob->addr;
506 u32 wpr_end = wpr_start + ls_blob->size; 588 u64 wpr_end = wpr_start + ls_blob->size;
507 589
508 desc->wpr_region_id = 1; 590 desc->wpr_region_id = 1;
509 desc->regions.no_regions = 2; 591 desc->regions.no_regions = 2;
@@ -533,8 +615,8 @@ acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
533 bl_desc->code_dma_base = lower_32_bits(addr_code); 615 bl_desc->code_dma_base = lower_32_bits(addr_code);
534 bl_desc->non_sec_code_off = hdr->non_sec_code_off; 616 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
535 bl_desc->non_sec_code_size = hdr->non_sec_code_size; 617 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
536 bl_desc->sec_code_off = hdr->app[0].sec_code_off; 618 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
537 bl_desc->sec_code_size = hdr->app[0].sec_code_size; 619 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
538 bl_desc->code_entry_point = 0; 620 bl_desc->code_entry_point = 0;
539 bl_desc->data_dma_base = lower_32_bits(addr_data); 621 bl_desc->data_dma_base = lower_32_bits(addr_data);
540 bl_desc->data_size = hdr->data_size; 622 bl_desc->data_size = hdr->data_size;
@@ -562,7 +644,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
562 void *acr_data; 644 void *acr_data;
563 int ret; 645 int ret;
564 646
565 acr_image = nvkm_acr_load_firmware(subdev, fw, 0); 647 acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
566 if (IS_ERR(acr_image)) 648 if (IS_ERR(acr_image))
567 return PTR_ERR(acr_image); 649 return PTR_ERR(acr_image);
568 650
@@ -571,15 +653,12 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
571 load_hdr = acr_image + fw_hdr->hdr_offset; 653 load_hdr = acr_image + fw_hdr->hdr_offset;
572 acr_data = acr_image + hsbin_hdr->data_offset; 654 acr_data = acr_image + hsbin_hdr->data_offset;
573 655
574 /* Patch signature */
575 acr_r352_hsf_patch_signature(sb, acr_image);
576
577 /* Patch descriptor with WPR information? */ 656 /* Patch descriptor with WPR information? */
578 if (patch) { 657 if (patch) {
579 struct hsflcn_acr_desc *desc; 658 struct hsflcn_acr_desc *desc;
580 659
581 desc = acr_data + load_hdr->data_dma_base; 660 desc = acr_data + load_hdr->data_dma_base;
582 acr_r352_fixup_hs_desc(acr, sb, desc); 661 acr->func->fixup_hs_desc(acr, sb, desc);
583 } 662 }
584 663
585 if (load_hdr->num_apps > ACR_R352_MAX_APPS) { 664 if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
@@ -589,7 +668,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
589 goto cleanup; 668 goto cleanup;
590 } 669 }
591 memcpy(load_header, load_hdr, sizeof(*load_header) + 670 memcpy(load_header, load_hdr, sizeof(*load_header) +
592 (sizeof(load_hdr->app[0]) * load_hdr->num_apps)); 671 (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
593 672
594 /* Create ACR blob and copy HS data to it */ 673 /* Create ACR blob and copy HS data to it */
595 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), 674 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
@@ -607,30 +686,6 @@ cleanup:
607 return ret; 686 return ret;
608} 687}
609 688
610static int
611acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
612{
613 const struct nvkm_subdev *subdev = acr->base.subdev;
614 struct fw_bin_header *hdr;
615 struct fw_bl_desc *hsbl_desc;
616
617 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
618 if (IS_ERR(acr->hsbl_blob)) {
619 int ret = PTR_ERR(acr->hsbl_blob);
620
621 acr->hsbl_blob = NULL;
622 return ret;
623 }
624
625 hdr = acr->hsbl_blob;
626 hsbl_desc = acr->hsbl_blob + hdr->header_offset;
627
628 /* virtual start address for boot vector */
629 acr->base.start_address = hsbl_desc->start_tag << 8;
630
631 return 0;
632}
633
634/** 689/**
635 * acr_r352_load_blobs - load blobs common to all ACR V1 versions. 690 * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
636 * 691 *
@@ -641,6 +696,7 @@ acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
641int 696int
642acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) 697acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
643{ 698{
699 struct nvkm_subdev *subdev = &sb->subdev;
644 int ret; 700 int ret;
645 701
646 /* Firmware already loaded? */ 702 /* Firmware already loaded? */
@@ -672,9 +728,24 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
672 728
673 /* Load the HS firmware bootloader */ 729 /* Load the HS firmware bootloader */
674 if (!acr->hsbl_blob) { 730 if (!acr->hsbl_blob) {
675 ret = acr_r352_prepare_hsbl_blob(acr); 731 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
676 if (ret) 732 if (IS_ERR(acr->hsbl_blob)) {
733 ret = PTR_ERR(acr->hsbl_blob);
734 acr->hsbl_blob = NULL;
677 return ret; 735 return ret;
736 }
737
738 if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
739 acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
740 "acr/unload_bl", 0);
741 if (IS_ERR(acr->hsbl_unload_blob)) {
742 ret = PTR_ERR(acr->hsbl_unload_blob);
743 acr->hsbl_unload_blob = NULL;
744 return ret;
745 }
746 } else {
747 acr->hsbl_unload_blob = acr->hsbl_blob;
748 }
678 } 749 }
679 750
680 acr->firmware_ok = true; 751 acr->firmware_ok = true;
@@ -684,35 +755,42 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
684} 755}
685 756
686/** 757/**
687 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped 758 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
688 * at GPU address offset. 759 *
760 * Returns the start address to use, or a negative error value.
689 */ 761 */
690static int 762static int
691acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb, 763acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
692 struct nvkm_gpuobj *blob, u64 offset) 764 struct nvkm_gpuobj *blob, u64 offset)
693{ 765{
694 struct acr_r352 *acr = acr_r352(_acr); 766 struct acr_r352 *acr = acr_r352(_acr);
695 struct nvkm_falcon *falcon = sb->boot_falcon;
696 struct fw_bin_header *hdr = acr->hsbl_blob;
697 struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
698 void *blob_data = acr->hsbl_blob + hdr->data_offset;
699 void *hsbl_code = blob_data + hsbl_desc->code_off;
700 void *hsbl_data = blob_data + hsbl_desc->data_off;
701 u32 code_size = ALIGN(hsbl_desc->code_size, 256);
702 const struct hsf_load_header *load_hdr;
703 const u32 bl_desc_size = acr->func->hs_bl_desc_size; 767 const u32 bl_desc_size = acr->func->hs_bl_desc_size;
768 const struct hsf_load_header *load_hdr;
769 struct fw_bin_header *bl_hdr;
770 struct fw_bl_desc *hsbl_desc;
771 void *bl, *blob_data, *hsbl_code, *hsbl_data;
772 u32 code_size;
704 u8 bl_desc[bl_desc_size]; 773 u8 bl_desc[bl_desc_size];
705 774
706 /* Find the bootloader descriptor for our blob and copy it */ 775 /* Find the bootloader descriptor for our blob and copy it */
707 if (blob == acr->load_blob) { 776 if (blob == acr->load_blob) {
708 load_hdr = &acr->load_bl_header; 777 load_hdr = &acr->load_bl_header;
778 bl = acr->hsbl_blob;
709 } else if (blob == acr->unload_blob) { 779 } else if (blob == acr->unload_blob) {
710 load_hdr = &acr->unload_bl_header; 780 load_hdr = &acr->unload_bl_header;
781 bl = acr->hsbl_unload_blob;
711 } else { 782 } else {
712 nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); 783 nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
713 return -EINVAL; 784 return -EINVAL;
714 } 785 }
715 786
787 bl_hdr = bl;
788 hsbl_desc = bl + bl_hdr->header_offset;
789 blob_data = bl + bl_hdr->data_offset;
790 hsbl_code = blob_data + hsbl_desc->code_off;
791 hsbl_data = blob_data + hsbl_desc->data_off;
792 code_size = ALIGN(hsbl_desc->code_size, 256);
793
716 /* 794 /*
717 * Copy HS bootloader data 795 * Copy HS bootloader data
718 */ 796 */
@@ -732,23 +810,32 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
732 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, 810 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
733 bl_desc_size, 0); 811 bl_desc_size, 0);
734 812
735 return 0; 813 return hsbl_desc->start_tag << 8;
736} 814}
737 815
738static int 816static int
739acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) 817acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
740{ 818{
819 struct nvkm_subdev *subdev = &sb->subdev;
741 int i; 820 int i;
742 821
743 /* Run the unload blob to unprotect the WPR region */ 822 /* Run the unload blob to unprotect the WPR region */
744 if (acr->unload_blob && sb->wpr_set) { 823 if (acr->unload_blob && sb->wpr_set) {
745 int ret; 824 int ret;
746 825
747 nvkm_debug(&sb->subdev, "running HS unload blob\n"); 826 nvkm_debug(subdev, "running HS unload blob\n");
748 ret = sb->func->run_blob(sb, acr->unload_blob); 827 ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
749 if (ret) 828 if (ret < 0)
750 return ret; 829 return ret;
751 nvkm_debug(&sb->subdev, "HS unload blob completed\n"); 830 /*
831 * Unload blob will return this error code - it is not an error
832 * and the expected behavior on RM as well
833 */
834 if (ret && ret != 0x1d) {
835 nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
836 return -EINVAL;
837 }
838 nvkm_debug(subdev, "HS unload blob completed\n");
752 } 839 }
753 840
754 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) 841 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
@@ -759,9 +846,44 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
759 return 0; 846 return 0;
760} 847}
761 848
849/**
850 * Check if the WPR region has been indeed set by the ACR firmware, and
851 * matches where it should be.
852 */
853static bool
854acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
855{
856 const struct nvkm_subdev *subdev = &sb->subdev;
857 const struct nvkm_device *device = subdev->device;
858 u64 wpr_lo, wpr_hi;
859 u64 wpr_range_lo, wpr_range_hi;
860
861 nvkm_wr32(device, 0x100cd4, 0x2);
862 wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
863 wpr_lo <<= 8;
864 nvkm_wr32(device, 0x100cd4, 0x3);
865 wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
866 wpr_hi <<= 8;
867
868 if (sb->wpr_size != 0) {
869 wpr_range_lo = sb->wpr_addr;
870 wpr_range_hi = wpr_range_lo + sb->wpr_size;
871 } else {
872 wpr_range_lo = acr->ls_blob->addr;
873 wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
874 }
875
876 return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
877 wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
878}
879
762static int 880static int
763acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) 881acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
764{ 882{
883 const struct nvkm_subdev *subdev = &sb->subdev;
884 unsigned long managed_falcons = acr->base.managed_falcons;
885 u32 reg;
886 int falcon_id;
765 int ret; 887 int ret;
766 888
767 if (sb->wpr_set) 889 if (sb->wpr_set)
@@ -772,40 +894,95 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
772 if (ret) 894 if (ret)
773 return ret; 895 return ret;
774 896
775 nvkm_debug(&sb->subdev, "running HS load blob\n"); 897 nvkm_debug(subdev, "running HS load blob\n");
776 ret = sb->func->run_blob(sb, acr->load_blob); 898 ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
777 /* clear halt interrupt */ 899 /* clear halt interrupt */
778 nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); 900 nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
779 if (ret) 901 sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
902 if (ret < 0) {
780 return ret; 903 return ret;
781 nvkm_debug(&sb->subdev, "HS load blob completed\n"); 904 } else if (ret > 0) {
905 nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
906 return -EINVAL;
907 }
908 nvkm_debug(subdev, "HS load blob completed\n");
909 /* WPR must be set at this point */
910 if (!sb->wpr_set) {
911 nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
912 return -EINVAL;
913 }
914
915 /* Run LS firmwares post_run hooks */
916 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
917 const struct acr_r352_ls_func *func =
918 acr->func->ls_func[falcon_id];
919
920 if (func->post_run)
921 func->post_run(&acr->base, sb);
922 }
923
924 /* Re-start ourselves if we are managed */
925 if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
926 return 0;
927
928 /* Enable interrupts */
929 nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
930 nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
931
932 /* Start LS firmware on boot falcon */
933 nvkm_falcon_start(sb->boot_falcon);
934
935 /*
936 * There is a bug where the LS firmware sometimes require to be started
937 * twice (this happens only on SEC). Detect and workaround that
938 * condition.
939 *
940 * Once started, the falcon will end up in STOPPED condition (bit 5)
941 * if successful, or in HALT condition (bit 4) if not.
942 */
943 nvkm_msec(subdev->device, 1,
944 if ((reg = nvkm_rd32(subdev->device,
945 sb->boot_falcon->addr + 0x100)
946 & 0x30) != 0)
947 break;
948 );
949 if (reg & BIT(4)) {
950 nvkm_debug(subdev, "applying workaround for start bug...");
951 nvkm_falcon_start(sb->boot_falcon);
952 nvkm_msec(subdev->device, 1,
953 if ((reg = nvkm_rd32(subdev->device,
954 sb->boot_falcon->addr + 0x100)
955 & 0x30) != 0)
956 break;
957 );
958 if (reg & BIT(4)) {
959 nvkm_error(subdev, "%s failed to start\n",
960 nvkm_secboot_falcon_name[acr->base.boot_falcon]);
961 return -EINVAL;
962 }
963 }
782 964
783 sb->wpr_set = true; 965 nvkm_debug(subdev, "%s started\n",
966 nvkm_secboot_falcon_name[acr->base.boot_falcon]);
784 967
785 return 0; 968 return 0;
786} 969}
787 970
788/* 971/**
789 * acr_r352_reset() - execute secure boot from the prepared state 972 * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
790 * 973 *
791 * Load the HS bootloader and ask the falcon to run it. This will in turn 974 * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
792 * load the HS firmware and run it, so once the falcon stops all the managed 975 * disabled. This has the effect of making all managed falcons ready-to-run.
793 * falcons should have their LS firmware loaded and be ready to run.
794 */ 976 */
795static int 977static int
796acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, 978acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
797 enum nvkm_secboot_falcon falcon) 979 enum nvkm_secboot_falcon falcon)
798{ 980{
799 struct acr_r352 *acr = acr_r352(_acr);
800 int ret; 981 int ret;
801 982
802 /* 983 /*
803 * Dummy GM200 implementation: perform secure boot each time we are 984 * Perform secure boot each time we are called on FECS. Since only FECS
804 * called on FECS. Since only FECS and GPCCS are managed and started 985 * and GPCCS are managed and started together, this ought to be safe.
805 * together, this ought to be safe.
806 *
807 * Once we have proper PMU firmware and support, this will be changed
808 * to a proper call to the PMU method.
809 */ 986 */
810 if (falcon != NVKM_SECBOOT_FALCON_FECS) 987 if (falcon != NVKM_SECBOOT_FALCON_FECS)
811 goto end; 988 goto end;
@@ -814,7 +991,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
814 if (ret) 991 if (ret)
815 return ret; 992 return ret;
816 993
817 acr_r352_bootstrap(acr, sb); 994 ret = acr_r352_bootstrap(acr, sb);
818 if (ret) 995 if (ret)
819 return ret; 996 return ret;
820 997
@@ -823,28 +1000,57 @@ end:
823 return 0; 1000 return 0;
824} 1001}
825 1002
1003/*
1004 * acr_r352_reset() - execute secure boot from the prepared state
1005 *
1006 * Load the HS bootloader and ask the falcon to run it. This will in turn
1007 * load the HS firmware and run it, so once the falcon stops all the managed
1008 * falcons should have their LS firmware loaded and be ready to run.
1009 */
826static int 1010static int
827acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb, 1011acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
828 enum nvkm_secboot_falcon falcon) 1012 enum nvkm_secboot_falcon falcon)
829{ 1013{
830 struct acr_r352 *acr = acr_r352(_acr); 1014 struct acr_r352 *acr = acr_r352(_acr);
831 const struct nvkm_subdev *subdev = &sb->subdev; 1015 struct nvkm_msgqueue *queue;
832 int base; 1016 const char *fname = nvkm_secboot_falcon_name[falcon];
1017 bool wpr_already_set = sb->wpr_set;
1018 int ret;
833 1019
834 switch (falcon) { 1020 /* Make sure secure boot is performed */
835 case NVKM_SECBOOT_FALCON_FECS: 1021 ret = acr_r352_bootstrap(acr, sb);
836 base = 0x409000; 1022 if (ret)
1023 return ret;
1024
1025 /* No PMU interface? */
1026 if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
1027 /* Redo secure boot entirely if it was already done */
1028 if (wpr_already_set)
1029 return acr_r352_reset_nopmu(acr, sb, falcon);
1030 /* Else return the result of the initial invokation */
1031 else
1032 return ret;
1033 }
1034
1035 switch (_acr->boot_falcon) {
1036 case NVKM_SECBOOT_FALCON_PMU:
1037 queue = sb->subdev.device->pmu->queue;
837 break; 1038 break;
838 case NVKM_SECBOOT_FALCON_GPCCS: 1039 case NVKM_SECBOOT_FALCON_SEC2:
839 base = 0x41a000; 1040 queue = sb->subdev.device->sec2->queue;
840 break; 1041 break;
841 default: 1042 default:
842 nvkm_error(subdev, "cannot start unhandled falcon!\n");
843 return -EINVAL; 1043 return -EINVAL;
844 } 1044 }
845 1045
846 nvkm_wr32(subdev->device, base + 0x130, 0x00000002); 1046 /* Otherwise just ask the LS firmware to reset the falcon */
847 acr->falcon_state[falcon] = RUNNING; 1047 nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
1048 ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
1049 if (ret) {
1050 nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
1051 return ret;
1052 }
1053 nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
848 1054
849 return 0; 1055 return 0;
850} 1056}
@@ -864,6 +1070,8 @@ acr_r352_dtor(struct nvkm_acr *_acr)
864 1070
865 nvkm_gpuobj_del(&acr->unload_blob); 1071 nvkm_gpuobj_del(&acr->unload_blob);
866 1072
1073 if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
1074 kfree(acr->hsbl_unload_blob);
867 kfree(acr->hsbl_blob); 1075 kfree(acr->hsbl_blob);
868 nvkm_gpuobj_del(&acr->load_blob); 1076 nvkm_gpuobj_del(&acr->load_blob);
869 nvkm_gpuobj_del(&acr->ls_blob); 1077 nvkm_gpuobj_del(&acr->ls_blob);
@@ -887,8 +1095,88 @@ acr_r352_ls_gpccs_func = {
887 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 1095 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
888}; 1096};
889 1097
1098
1099
1100/**
1101 * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
1102 * @dma_idx: DMA context to be used by BL while loading code/data
1103 * @code_dma_base: 256B-aligned Physical FB Address where code is located
1104 * @total_code_size: total size of the code part in the ucode
1105 * @code_size_to_load: size of the code part to load in PMU IMEM.
1106 * @code_entry_point: entry point in the code.
1107 * @data_dma_base: Physical FB address where data part of ucode is located
1108 * @data_size: Total size of the data portion.
1109 * @overlay_dma_base: Physical Fb address for resident code present in ucode
1110 * @argc: Total number of args
1111 * @argv: offset where args are copied into PMU's DMEM.
1112 *
1113 * Structure used by the PMU bootloader to load the rest of the code
1114 */
1115struct acr_r352_pmu_bl_desc {
1116 u32 dma_idx;
1117 u32 code_dma_base;
1118 u32 code_size_total;
1119 u32 code_size_to_load;
1120 u32 code_entry_point;
1121 u32 data_dma_base;
1122 u32 data_size;
1123 u32 overlay_dma_base;
1124 u32 argc;
1125 u32 argv;
1126 u16 code_dma_base1;
1127 u16 data_dma_base1;
1128 u16 overlay_dma_base1;
1129};
1130
1131/**
1132 * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
1133 *
1134 */
1135static void
1136acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1137 const struct ls_ucode_img *img, u64 wpr_addr,
1138 void *_desc)
1139{
1140 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
1141 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
1142 struct acr_r352_pmu_bl_desc *desc = _desc;
1143 u64 base;
1144 u64 addr_code;
1145 u64 addr_data;
1146 u32 addr_args;
1147
1148 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
1149 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
1150 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
1151 addr_args = pmu->falcon->data.limit;
1152 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
1153
1154 desc->dma_idx = FALCON_DMAIDX_UCODE;
1155 desc->code_dma_base = lower_32_bits(addr_code);
1156 desc->code_dma_base1 = upper_32_bits(addr_code);
1157 desc->code_size_total = pdesc->app_size;
1158 desc->code_size_to_load = pdesc->app_resident_code_size;
1159 desc->code_entry_point = pdesc->app_imem_entry;
1160 desc->data_dma_base = lower_32_bits(addr_data);
1161 desc->data_dma_base1 = upper_32_bits(addr_data);
1162 desc->data_size = pdesc->app_resident_data_size;
1163 desc->overlay_dma_base = lower_32_bits(addr_code);
1164 desc->overlay_dma_base1 = upper_32_bits(addr_code);
1165 desc->argc = 1;
1166 desc->argv = addr_args;
1167}
1168
1169static const struct acr_r352_ls_func
1170acr_r352_ls_pmu_func = {
1171 .load = acr_ls_ucode_load_pmu,
1172 .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1173 .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1174 .post_run = acr_ls_pmu_post_run,
1175};
1176
890const struct acr_r352_func 1177const struct acr_r352_func
891acr_r352_func = { 1178acr_r352_func = {
1179 .fixup_hs_desc = acr_r352_fixup_hs_desc,
892 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, 1180 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
893 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1181 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
894 .ls_ucode_img_load = acr_r352_ls_ucode_img_load, 1182 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
@@ -897,6 +1185,7 @@ acr_r352_func = {
897 .ls_func = { 1185 .ls_func = {
898 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, 1186 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
899 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, 1187 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
1188 [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
900 }, 1189 },
901}; 1190};
902 1191
@@ -906,7 +1195,6 @@ acr_r352_base_func = {
906 .fini = acr_r352_fini, 1195 .fini = acr_r352_fini,
907 .load = acr_r352_load, 1196 .load = acr_r352_load,
908 .reset = acr_r352_reset, 1197 .reset = acr_r352_reset,
909 .start = acr_r352_start,
910}; 1198};
911 1199
912struct nvkm_acr * 1200struct nvkm_acr *
@@ -915,6 +1203,13 @@ acr_r352_new_(const struct acr_r352_func *func,
915 unsigned long managed_falcons) 1203 unsigned long managed_falcons)
916{ 1204{
917 struct acr_r352 *acr; 1205 struct acr_r352 *acr;
1206 int i;
1207
1208 /* Check that all requested falcons are supported */
1209 for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
1210 if (!func->ls_func[i])
1211 return ERR_PTR(-ENOTSUPP);
1212 }
918 1213
919 acr = kzalloc(sizeof(*acr), GFP_KERNEL); 1214 acr = kzalloc(sizeof(*acr), GFP_KERNEL);
920 if (!acr) 1215 if (!acr)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index ad5923b0fd3c..6e88520566c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -24,131 +24,27 @@
24 24
25#include "acr.h" 25#include "acr.h"
26#include "ls_ucode.h" 26#include "ls_ucode.h"
27#include "hs_ucode.h"
27 28
28struct ls_ucode_img; 29struct ls_ucode_img;
29 30
30#define ACR_R352_MAX_APPS 8 31#define ACR_R352_MAX_APPS 8
31 32
32/*
33 *
34 * LS blob structures
35 *
36 */
37
38/**
39 * struct acr_r352_lsf_lsb_header - LS firmware header
40 * @signature: signature to verify the firmware against
41 * @ucode_off: offset of the ucode blob in the WPR region. The ucode
42 * blob contains the bootloader, code and data of the
43 * LS falcon
44 * @ucode_size: size of the ucode blob, including bootloader
45 * @data_size: size of the ucode blob data
46 * @bl_code_size: size of the bootloader code
47 * @bl_imem_off: offset in imem of the bootloader
48 * @bl_data_off: offset of the bootloader data in WPR region
49 * @bl_data_size: size of the bootloader data
50 * @app_code_off: offset of the app code relative to ucode_off
51 * @app_code_size: size of the app code
52 * @app_data_off: offset of the app data relative to ucode_off
53 * @app_data_size: size of the app data
54 * @flags: flags for the secure bootloader
55 *
56 * This structure is written into the WPR region for each managed falcon. Each
57 * instance is referenced by the lsb_offset member of the corresponding
58 * lsf_wpr_header.
59 */
60struct acr_r352_lsf_lsb_header {
61 /**
62 * LS falcon signatures
63 * @prd_keys: signature to use in production mode
64 * @dgb_keys: signature to use in debug mode
65 * @b_prd_present: whether the production key is present
66 * @b_dgb_present: whether the debug key is present
67 * @falcon_id: ID of the falcon the ucode applies to
68 */
69 struct {
70 u8 prd_keys[2][16];
71 u8 dbg_keys[2][16];
72 u32 b_prd_present;
73 u32 b_dbg_present;
74 u32 falcon_id;
75 } signature;
76 u32 ucode_off;
77 u32 ucode_size;
78 u32 data_size;
79 u32 bl_code_size;
80 u32 bl_imem_off;
81 u32 bl_data_off;
82 u32 bl_data_size;
83 u32 app_code_off;
84 u32 app_code_size;
85 u32 app_data_off;
86 u32 app_data_size;
87 u32 flags;
88#define LSF_FLAG_LOAD_CODE_AT_0 1 33#define LSF_FLAG_LOAD_CODE_AT_0 1
89#define LSF_FLAG_DMACTL_REQ_CTX 4 34#define LSF_FLAG_DMACTL_REQ_CTX 4
90#define LSF_FLAG_FORCE_PRIV_LOAD 8 35#define LSF_FLAG_FORCE_PRIV_LOAD 8
91};
92
93/**
94 * struct acr_r352_lsf_wpr_header - LS blob WPR Header
95 * @falcon_id: LS falcon ID
96 * @lsb_offset: offset of the lsb_lsf_header in the WPR region
97 * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
98 * @lazy_bootstrap: skip bootstrapping by ACR
99 * @status: bootstrapping status
100 *
101 * An array of these is written at the beginning of the WPR region, one for
102 * each managed falcon. The array is terminated by an instance which falcon_id
103 * is LSF_FALCON_ID_INVALID.
104 */
105struct acr_r352_lsf_wpr_header {
106 u32 falcon_id;
107 u32 lsb_offset;
108 u32 bootstrap_owner;
109 u32 lazy_bootstrap;
110 u32 status;
111#define LSF_IMAGE_STATUS_NONE 0
112#define LSF_IMAGE_STATUS_COPY 1
113#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
114#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
115#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
116#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
117#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
118};
119
120/**
121 * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
122 */
123struct ls_ucode_img_r352 {
124 struct ls_ucode_img base;
125
126 struct acr_r352_lsf_wpr_header wpr_header;
127 struct acr_r352_lsf_lsb_header lsb_header;
128};
129#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
130
131
132/*
133 * HS blob structures
134 */
135 36
136struct hsf_load_header_app { 37static inline u32
137 u32 sec_code_off; 38hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
138 u32 sec_code_size; 39{
139}; 40 return hdr->apps[app];
41}
140 42
141/** 43static inline u32
142 * struct hsf_load_header - HS firmware load header 44hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
143 */ 45{
144struct hsf_load_header { 46 return hdr->apps[hdr->num_apps + app];
145 u32 non_sec_code_off; 47}
146 u32 non_sec_code_size;
147 u32 data_dma_base;
148 u32 data_size;
149 u32 num_apps;
150 struct hsf_load_header_app app[0];
151};
152 48
153/** 49/**
154 * struct acr_r352_ls_func - manages a single LS firmware 50 * struct acr_r352_ls_func - manages a single LS firmware
@@ -157,6 +53,7 @@ struct hsf_load_header {
157 * @generate_bl_desc: function called on a block of bl_desc_size to generate the 53 * @generate_bl_desc: function called on a block of bl_desc_size to generate the
158 * proper bootloader descriptor for this LS firmware 54 * proper bootloader descriptor for this LS firmware
159 * @bl_desc_size: size of the bootloader descriptor 55 * @bl_desc_size: size of the bootloader descriptor
56 * @post_run: hook called right after the ACR is executed
160 * @lhdr_flags: LS flags 57 * @lhdr_flags: LS flags
161 */ 58 */
162struct acr_r352_ls_func { 59struct acr_r352_ls_func {
@@ -164,6 +61,7 @@ struct acr_r352_ls_func {
164 void (*generate_bl_desc)(const struct nvkm_acr *, 61 void (*generate_bl_desc)(const struct nvkm_acr *,
165 const struct ls_ucode_img *, u64, void *); 62 const struct ls_ucode_img *, u64, void *);
166 u32 bl_desc_size; 63 u32 bl_desc_size;
64 void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
167 u32 lhdr_flags; 65 u32 lhdr_flags;
168}; 66};
169 67
@@ -179,13 +77,15 @@ struct acr_r352;
179struct acr_r352_func { 77struct acr_r352_func {
180 void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, 78 void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
181 u64); 79 u64);
80 void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
182 u32 hs_bl_desc_size; 81 u32 hs_bl_desc_size;
82 bool shadow_blob;
183 83
184 struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, 84 struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
185 enum nvkm_secboot_falcon); 85 enum nvkm_secboot_falcon);
186 int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); 86 int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
187 int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, 87 int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
188 struct nvkm_gpuobj *, u32); 88 struct nvkm_gpuobj *, u64);
189 89
190 const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; 90 const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
191}; 91};
@@ -204,19 +104,22 @@ struct acr_r352 {
204 struct nvkm_gpuobj *load_blob; 104 struct nvkm_gpuobj *load_blob;
205 struct { 105 struct {
206 struct hsf_load_header load_bl_header; 106 struct hsf_load_header load_bl_header;
207 struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS]; 107 u32 __load_apps[ACR_R352_MAX_APPS * 2];
208 }; 108 };
209 109
210 /* HS FW - unlock WPR region (dGPU only) */ 110 /* HS FW - unlock WPR region (dGPU only) */
211 struct nvkm_gpuobj *unload_blob; 111 struct nvkm_gpuobj *unload_blob;
212 struct { 112 struct {
213 struct hsf_load_header unload_bl_header; 113 struct hsf_load_header unload_bl_header;
214 struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS]; 114 u32 __unload_apps[ACR_R352_MAX_APPS * 2];
215 }; 115 };
216 116
217 /* HS bootloader */ 117 /* HS bootloader */
218 void *hsbl_blob; 118 void *hsbl_blob;
219 119
120 /* HS bootloader for unload blob, if using a different falcon */
121 void *hsbl_unload_blob;
122
220 /* LS FWs, to be loaded by the HS ACR */ 123 /* LS FWs, to be loaded by the HS ACR */
221 struct nvkm_gpuobj *ls_blob; 124 struct nvkm_gpuobj *ls_blob;
222 125
@@ -245,6 +148,8 @@ struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
245 enum nvkm_secboot_falcon); 148 enum nvkm_secboot_falcon);
246int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); 149int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
247int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, 150int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
248 struct nvkm_gpuobj *, u32); 151 struct nvkm_gpuobj *, u64);
152
153void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
249 154
250#endif 155#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index f0aff1d98474..14b36ef93628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -20,58 +20,23 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include "acr_r352.h" 23#include "acr_r361.h"
24 24
25#include <engine/falcon.h> 25#include <engine/falcon.h>
26 26#include <core/msgqueue.h>
27/** 27#include <subdev/pmu.h>
28 * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor 28#include <engine/sec2.h>
29 * @signature: 16B signature for secure code. 0s if no secure code
30 * @ctx_dma: DMA context to be used by BL while loading code/data
31 * @code_dma_base: 256B-aligned Physical FB Address where code is located
32 * (falcon's $xcbase register)
33 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
34 * located. The offset must be multiple of 256 to help perf
35 * @non_sec_code_size: the size of the nonSecure code part.
36 * @sec_code_off: offset from code_dma_base where the secure code is
37 * located. The offset must be multiple of 256 to help perf
38 * @sec_code_size: offset from code_dma_base where the secure code is
39 * located. The offset must be multiple of 256 to help perf
40 * @code_entry_point: code entry point which will be invoked by BL after
41 * code is loaded.
42 * @data_dma_base: 256B aligned Physical FB Address where data is located.
43 * (falcon's $xdbase register)
44 * @data_size: size of data block. Should be multiple of 256B
45 *
46 * Structure used by the bootloader to load the rest of the code. This has
47 * to be filled by host and copied into DMEM at offset provided in the
48 * hsflcn_bl_desc.bl_desc_dmem_load_off.
49 */
50struct acr_r361_flcn_bl_desc {
51 u32 reserved[4];
52 u32 signature[4];
53 u32 ctx_dma;
54 struct flcn_u64 code_dma_base;
55 u32 non_sec_code_off;
56 u32 non_sec_code_size;
57 u32 sec_code_off;
58 u32 sec_code_size;
59 u32 code_entry_point;
60 struct flcn_u64 data_dma_base;
61 u32 data_size;
62};
63 29
64static void 30static void
65acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, 31acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
66 const struct ls_ucode_img *_img, u64 wpr_addr, 32 const struct ls_ucode_img *img, u64 wpr_addr,
67 void *_desc) 33 void *_desc)
68{ 34{
69 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
70 struct acr_r361_flcn_bl_desc *desc = _desc; 35 struct acr_r361_flcn_bl_desc *desc = _desc;
71 const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc; 36 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
72 u64 base, addr_code, addr_data; 37 u64 base, addr_code, addr_data;
73 38
74 base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; 39 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
75 addr_code = base + pdesc->app_resident_code_offset; 40 addr_code = base + pdesc->app_resident_code_offset;
76 addr_data = base + pdesc->app_resident_data_offset; 41 addr_data = base + pdesc->app_resident_data_offset;
77 42
@@ -84,7 +49,7 @@ acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
84 desc->data_size = pdesc->app_resident_data_size; 49 desc->data_size = pdesc->app_resident_data_size;
85} 50}
86 51
87static void 52void
88acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, 53acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
89 u64 offset) 54 u64 offset)
90{ 55{
@@ -94,8 +59,8 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
94 bl_desc->code_dma_base = u64_to_flcn64(offset); 59 bl_desc->code_dma_base = u64_to_flcn64(offset);
95 bl_desc->non_sec_code_off = hdr->non_sec_code_off; 60 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
96 bl_desc->non_sec_code_size = hdr->non_sec_code_size; 61 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
97 bl_desc->sec_code_off = hdr->app[0].sec_code_off; 62 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
98 bl_desc->sec_code_size = hdr->app[0].sec_code_size; 63 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
99 bl_desc->code_entry_point = 0; 64 bl_desc->code_entry_point = 0;
100 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); 65 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
101 bl_desc->data_size = hdr->data_size; 66 bl_desc->data_size = hdr->data_size;
@@ -117,8 +82,100 @@ acr_r361_ls_gpccs_func = {
117 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 82 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
118}; 83};
119 84
85struct acr_r361_pmu_bl_desc {
86 u32 reserved;
87 u32 dma_idx;
88 struct flcn_u64 code_dma_base;
89 u32 total_code_size;
90 u32 code_size_to_load;
91 u32 code_entry_point;
92 struct flcn_u64 data_dma_base;
93 u32 data_size;
94 struct flcn_u64 overlay_dma_base;
95 u32 argc;
96 u32 argv;
97};
98
99static void
100acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
101 const struct ls_ucode_img *img, u64 wpr_addr,
102 void *_desc)
103{
104 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
105 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
106 struct acr_r361_pmu_bl_desc *desc = _desc;
107 u64 base, addr_code, addr_data;
108 u32 addr_args;
109
110 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
111 addr_code = base + pdesc->app_resident_code_offset;
112 addr_data = base + pdesc->app_resident_data_offset;
113 addr_args = pmu->falcon->data.limit;
114 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
115
116 desc->dma_idx = FALCON_DMAIDX_UCODE;
117 desc->code_dma_base = u64_to_flcn64(addr_code);
118 desc->total_code_size = pdesc->app_size;
119 desc->code_size_to_load = pdesc->app_resident_code_size;
120 desc->code_entry_point = pdesc->app_imem_entry;
121 desc->data_dma_base = u64_to_flcn64(addr_data);
122 desc->data_size = pdesc->app_resident_data_size;
123 desc->overlay_dma_base = u64_to_flcn64(addr_code);
124 desc->argc = 1;
125 desc->argv = addr_args;
126}
127
128const struct acr_r352_ls_func
129acr_r361_ls_pmu_func = {
130 .load = acr_ls_ucode_load_pmu,
131 .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
132 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
133 .post_run = acr_ls_pmu_post_run,
134};
135
136static void
137acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
138 const struct ls_ucode_img *img, u64 wpr_addr,
139 void *_desc)
140{
141 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
142 const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
143 struct acr_r361_pmu_bl_desc *desc = _desc;
144 u64 base, addr_code, addr_data;
145 u32 addr_args;
146
147 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
148 /* For some reason we should not add app_resident_code_offset here */
149 addr_code = base;
150 addr_data = base + pdesc->app_resident_data_offset;
151 addr_args = sec->falcon->data.limit;
152 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
153
154 desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
155 desc->code_dma_base = u64_to_flcn64(addr_code);
156 desc->total_code_size = pdesc->app_size;
157 desc->code_size_to_load = pdesc->app_resident_code_size;
158 desc->code_entry_point = pdesc->app_imem_entry;
159 desc->data_dma_base = u64_to_flcn64(addr_data);
160 desc->data_size = pdesc->app_resident_data_size;
161 desc->overlay_dma_base = u64_to_flcn64(addr_code);
162 desc->argc = 1;
163 /* args are stored at the beginning of EMEM */
164 desc->argv = 0x01000000;
165}
166
167const struct acr_r352_ls_func
168acr_r361_ls_sec2_func = {
169 .load = acr_ls_ucode_load_sec2,
170 .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
171 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
172 .post_run = acr_ls_sec2_post_run,
173};
174
175
120const struct acr_r352_func 176const struct acr_r352_func
121acr_r361_func = { 177acr_r361_func = {
178 .fixup_hs_desc = acr_r352_fixup_hs_desc,
122 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, 179 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
123 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 180 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
124 .ls_ucode_img_load = acr_r352_ls_ucode_img_load, 181 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
@@ -127,6 +184,8 @@ acr_r361_func = {
127 .ls_func = { 184 .ls_func = {
128 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, 185 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
129 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, 186 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
187 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
188 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
130 }, 189 },
131}; 190};
132 191
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
new file mode 100644
index 000000000000..f9f978daadb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_ACR_R361_H__
24#define __NVKM_SECBOOT_ACR_R361_H__
25
26#include "acr_r352.h"
27
28/**
29 * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
30 * @signature: 16B signature for secure code. 0s if no secure code
31 * @ctx_dma: DMA context to be used by BL while loading code/data
32 * @code_dma_base: 256B-aligned Physical FB Address where code is located
33 * (falcon's $xcbase register)
34 * @non_sec_code_off: offset from code_dma_base where the non-secure code is
35 * located. The offset must be multiple of 256 to help perf
36 * @non_sec_code_size: the size of the nonSecure code part.
37 * @sec_code_off: offset from code_dma_base where the secure code is
38 * located. The offset must be multiple of 256 to help perf
39 * @sec_code_size: offset from code_dma_base where the secure code is
40 * located. The offset must be multiple of 256 to help perf
41 * @code_entry_point: code entry point which will be invoked by BL after
42 * code is loaded.
43 * @data_dma_base: 256B aligned Physical FB Address where data is located.
44 * (falcon's $xdbase register)
45 * @data_size: size of data block. Should be multiple of 256B
46 *
47 * Structure used by the bootloader to load the rest of the code. This has
48 * to be filled by host and copied into DMEM at offset provided in the
49 * hsflcn_bl_desc.bl_desc_dmem_load_off.
50 */
51struct acr_r361_flcn_bl_desc {
52 u32 reserved[4];
53 u32 signature[4];
54 u32 ctx_dma;
55 struct flcn_u64 code_dma_base;
56 u32 non_sec_code_off;
57 u32 non_sec_code_size;
58 u32 sec_code_off;
59 u32 sec_code_size;
60 u32 code_entry_point;
61 struct flcn_u64 data_dma_base;
62 u32 data_size;
63};
64
65void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
66
67extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
68extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
69extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
70extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
71
72#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
new file mode 100644
index 000000000000..30cf04109991
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r361.h"
24
25#include <core/gpuobj.h>
26
27/*
28 * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
29 * parameter.
30 */
31
32struct acr_r364_hsflcn_desc {
33 union {
34 u8 reserved_dmem[0x200];
35 u32 signatures[4];
36 } ucode_reserved_space;
37 u32 wpr_region_id;
38 u32 wpr_offset;
39 u32 mmu_memory_range;
40 struct {
41 u32 no_regions;
42 struct {
43 u32 start_addr;
44 u32 end_addr;
45 u32 region_id;
46 u32 read_mask;
47 u32 write_mask;
48 u32 client_mask;
49 u32 shadow_mem_start_addr;
50 } region_props[2];
51 } regions;
52 u32 ucode_blob_size;
53 u64 ucode_blob_base __aligned(8);
54 struct {
55 u32 vpr_enabled;
56 u32 vpr_start;
57 u32 vpr_end;
58 u32 hdcp_policies;
59 } vpr_desc;
60};
61
62static void
63acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
64 void *_desc)
65{
66 struct acr_r364_hsflcn_desc *desc = _desc;
67 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
68
69 /* WPR region information if WPR is not fixed */
70 if (sb->wpr_size == 0) {
71 u64 wpr_start = ls_blob->addr;
72 u64 wpr_end = ls_blob->addr + ls_blob->size;
73
74 if (acr->func->shadow_blob)
75 wpr_start += ls_blob->size / 2;
76
77 desc->wpr_region_id = 1;
78 desc->regions.no_regions = 2;
79 desc->regions.region_props[0].start_addr = wpr_start >> 8;
80 desc->regions.region_props[0].end_addr = wpr_end >> 8;
81 desc->regions.region_props[0].region_id = 1;
82 desc->regions.region_props[0].read_mask = 0xf;
83 desc->regions.region_props[0].write_mask = 0xc;
84 desc->regions.region_props[0].client_mask = 0x2;
85 if (acr->func->shadow_blob)
86 desc->regions.region_props[0].shadow_mem_start_addr =
87 ls_blob->addr >> 8;
88 else
89 desc->regions.region_props[0].shadow_mem_start_addr = 0;
90 } else {
91 desc->ucode_blob_base = ls_blob->addr;
92 desc->ucode_blob_size = ls_blob->size;
93 }
94}
95
96const struct acr_r352_func
97acr_r364_func = {
98 .fixup_hs_desc = acr_r364_fixup_hs_desc,
99 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
100 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
101 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
102 .ls_fill_headers = acr_r352_ls_fill_headers,
103 .ls_write_wpr = acr_r352_ls_write_wpr,
104 .ls_func = {
105 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
106 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
107 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
108 },
109};
110
111
112struct nvkm_acr *
113acr_r364_new(unsigned long managed_falcons)
114{
115 return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
116 managed_falcons);
117}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
new file mode 100644
index 000000000000..f860713642f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r367.h"
24#include "acr_r361.h"
25
26#include <core/gpuobj.h>
27
28/*
29 * r367 ACR: new LS signature format requires a rewrite of LS firmware and
30 * blob creation functions. Also the hsflcn_desc layout has changed slightly.
31 */
32
33#define LSF_LSB_DEPMAP_SIZE 11
34
35/**
36 * struct acr_r367_lsf_lsb_header - LS firmware header
37 *
38 * See also struct acr_r352_lsf_lsb_header for documentation.
39 */
40struct acr_r367_lsf_lsb_header {
41 /**
42 * LS falcon signatures
43 * @prd_keys: signature to use in production mode
44 * @dgb_keys: signature to use in debug mode
45 * @b_prd_present: whether the production key is present
46 * @b_dgb_present: whether the debug key is present
47 * @falcon_id: ID of the falcon the ucode applies to
48 */
49 struct {
50 u8 prd_keys[2][16];
51 u8 dbg_keys[2][16];
52 u32 b_prd_present;
53 u32 b_dbg_present;
54 u32 falcon_id;
55 u32 supports_versioning;
56 u32 version;
57 u32 depmap_count;
58 u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
59 u8 kdf[16];
60 } signature;
61 u32 ucode_off;
62 u32 ucode_size;
63 u32 data_size;
64 u32 bl_code_size;
65 u32 bl_imem_off;
66 u32 bl_data_off;
67 u32 bl_data_size;
68 u32 app_code_off;
69 u32 app_code_size;
70 u32 app_data_off;
71 u32 app_data_size;
72 u32 flags;
73};
74
75/**
76 * struct acr_r367_lsf_wpr_header - LS blob WPR Header
77 *
78 * See also struct acr_r352_lsf_wpr_header for documentation.
79 */
80struct acr_r367_lsf_wpr_header {
81 u32 falcon_id;
82 u32 lsb_offset;
83 u32 bootstrap_owner;
84 u32 lazy_bootstrap;
85 u32 bin_version;
86 u32 status;
87#define LSF_IMAGE_STATUS_NONE 0
88#define LSF_IMAGE_STATUS_COPY 1
89#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
90#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
91#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
92#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
93#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
94#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
95};
96
97/**
98 * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
99 */
100struct ls_ucode_img_r367 {
101 struct ls_ucode_img base;
102
103 struct acr_r367_lsf_wpr_header wpr_header;
104 struct acr_r367_lsf_lsb_header lsb_header;
105};
106#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
107
108struct ls_ucode_img *
109acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
110 enum nvkm_secboot_falcon falcon_id)
111{
112 const struct nvkm_subdev *subdev = acr->base.subdev;
113 struct ls_ucode_img_r367 *img;
114 int ret;
115
116 img = kzalloc(sizeof(*img), GFP_KERNEL);
117 if (!img)
118 return ERR_PTR(-ENOMEM);
119
120 img->base.falcon_id = falcon_id;
121
122 ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
123 if (ret) {
124 kfree(img->base.ucode_data);
125 kfree(img->base.sig);
126 kfree(img);
127 return ERR_PTR(ret);
128 }
129
130 /* Check that the signature size matches our expectations... */
131 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
132 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
133 nvkm_secboot_falcon_name[falcon_id]);
134 return ERR_PTR(-EINVAL);
135 }
136
137 /* Copy signature to the right place */
138 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
139
140 /* not needed? the signature should already have the right value */
141 img->lsb_header.signature.falcon_id = falcon_id;
142
143 return &img->base;
144}
145
146#define LSF_LSB_HEADER_ALIGN 256
147#define LSF_BL_DATA_ALIGN 256
148#define LSF_BL_DATA_SIZE_ALIGN 256
149#define LSF_BL_CODE_SIZE_ALIGN 256
150#define LSF_UCODE_DATA_ALIGN 4096
151
152static u32
153acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
154 struct ls_ucode_img_r367 *img, u32 offset)
155{
156 struct ls_ucode_img *_img = &img->base;
157 struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
158 struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
159 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
160 const struct acr_r352_ls_func *func =
161 acr->func->ls_func[_img->falcon_id];
162
163 /* Fill WPR header */
164 whdr->falcon_id = _img->falcon_id;
165 whdr->bootstrap_owner = acr->base.boot_falcon;
166 whdr->bin_version = lhdr->signature.version;
167 whdr->status = LSF_IMAGE_STATUS_COPY;
168
169 /* Skip bootstrapping falcons started by someone else than ACR */
170 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
171 whdr->lazy_bootstrap = 1;
172
173 /* Align, save off, and include an LSB header size */
174 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
175 whdr->lsb_offset = offset;
176 offset += sizeof(*lhdr);
177
178 /*
179 * Align, save off, and include the original (static) ucode
180 * image size
181 */
182 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
183 _img->ucode_off = lhdr->ucode_off = offset;
184 offset += _img->ucode_size;
185
186 /*
187 * For falcons that use a boot loader (BL), we append a loader
188 * desc structure on the end of the ucode image and consider
189 * this the boot loader data. The host will then copy the loader
190 * desc args to this space within the WPR region (before locking
191 * down) and the HS bin will then copy them to DMEM 0 for the
192 * loader.
193 */
194 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
195 LSF_BL_CODE_SIZE_ALIGN);
196 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
197 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
198 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
199 lhdr->bl_code_size - lhdr->ucode_size;
200 /*
201 * Though the BL is located at 0th offset of the image, the VA
202 * is different to make sure that it doesn't collide the actual
203 * OS VA range
204 */
205 lhdr->bl_imem_off = desc->bootloader_imem_offset;
206 lhdr->app_code_off = desc->app_start_offset +
207 desc->app_resident_code_offset;
208 lhdr->app_code_size = desc->app_resident_code_size;
209 lhdr->app_data_off = desc->app_start_offset +
210 desc->app_resident_data_offset;
211 lhdr->app_data_size = desc->app_resident_data_size;
212
213 lhdr->flags = func->lhdr_flags;
214 if (_img->falcon_id == acr->base.boot_falcon)
215 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
216
217 /* Align and save off BL descriptor size */
218 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
219
220 /*
221 * Align, save off, and include the additional BL data
222 */
223 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
224 lhdr->bl_data_off = offset;
225 offset += lhdr->bl_data_size;
226
227 return offset;
228}
229
230int
231acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
232{
233 struct ls_ucode_img_r367 *img;
234 struct list_head *l;
235 u32 count = 0;
236 u32 offset;
237
238 /* Count the number of images to manage */
239 list_for_each(l, imgs)
240 count++;
241
242 /*
243 * Start with an array of WPR headers at the base of the WPR.
244 * The expectation here is that the secure falcon will do a single DMA
245 * read of this array and cache it internally so it's ok to pack these.
246 * Also, we add 1 to the falcon count to indicate the end of the array.
247 */
248 offset = sizeof(img->wpr_header) * (count + 1);
249
250 /*
251 * Walk the managed falcons, accounting for the LSB structs
252 * as well as the ucode images.
253 */
254 list_for_each_entry(img, imgs, base.node) {
255 offset = acr_r367_ls_img_fill_headers(acr, img, offset);
256 }
257
258 return offset;
259}
260
261int
262acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
263 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
264{
265 struct ls_ucode_img *_img;
266 u32 pos = 0;
267
268 nvkm_kmap(wpr_blob);
269
270 list_for_each_entry(_img, imgs, node) {
271 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
272 const struct acr_r352_ls_func *ls_func =
273 acr->func->ls_func[_img->falcon_id];
274 u8 gdesc[ls_func->bl_desc_size];
275
276 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
277 sizeof(img->wpr_header));
278
279 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
280 &img->lsb_header, sizeof(img->lsb_header));
281
282 /* Generate and write BL descriptor */
283 memset(gdesc, 0, ls_func->bl_desc_size);
284 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
285
286 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
287 gdesc, ls_func->bl_desc_size);
288
289 /* Copy ucode */
290 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
291 _img->ucode_data, _img->ucode_size);
292
293 pos += sizeof(img->wpr_header);
294 }
295
296 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
297
298 nvkm_done(wpr_blob);
299
300 return 0;
301}
302
303struct acr_r367_hsflcn_desc {
304 u8 reserved_dmem[0x200];
305 u32 signatures[4];
306 u32 wpr_region_id;
307 u32 wpr_offset;
308 u32 mmu_memory_range;
309#define FLCN_ACR_MAX_REGIONS 2
310 struct {
311 u32 no_regions;
312 struct {
313 u32 start_addr;
314 u32 end_addr;
315 u32 region_id;
316 u32 read_mask;
317 u32 write_mask;
318 u32 client_mask;
319 u32 shadow_mem_start_addr;
320 } region_props[FLCN_ACR_MAX_REGIONS];
321 } regions;
322 u32 ucode_blob_size;
323 u64 ucode_blob_base __aligned(8);
324 struct {
325 u32 vpr_enabled;
326 u32 vpr_start;
327 u32 vpr_end;
328 u32 hdcp_policies;
329 } vpr_desc;
330};
331
332void
333acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
334 void *_desc)
335{
336 struct acr_r367_hsflcn_desc *desc = _desc;
337 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
338
339 /* WPR region information if WPR is not fixed */
340 if (sb->wpr_size == 0) {
341 u64 wpr_start = ls_blob->addr;
342 u64 wpr_end = ls_blob->addr + ls_blob->size;
343
344 if (acr->func->shadow_blob)
345 wpr_start += ls_blob->size / 2;
346
347 desc->wpr_region_id = 1;
348 desc->regions.no_regions = 2;
349 desc->regions.region_props[0].start_addr = wpr_start >> 8;
350 desc->regions.region_props[0].end_addr = wpr_end >> 8;
351 desc->regions.region_props[0].region_id = 1;
352 desc->regions.region_props[0].read_mask = 0xf;
353 desc->regions.region_props[0].write_mask = 0xc;
354 desc->regions.region_props[0].client_mask = 0x2;
355 if (acr->func->shadow_blob)
356 desc->regions.region_props[0].shadow_mem_start_addr =
357 ls_blob->addr >> 8;
358 else
359 desc->regions.region_props[0].shadow_mem_start_addr = 0;
360 } else {
361 desc->ucode_blob_base = ls_blob->addr;
362 desc->ucode_blob_size = ls_blob->size;
363 }
364}
365
366const struct acr_r352_func
367acr_r367_func = {
368 .fixup_hs_desc = acr_r367_fixup_hs_desc,
369 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
370 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
371 .shadow_blob = true,
372 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
373 .ls_fill_headers = acr_r367_ls_fill_headers,
374 .ls_write_wpr = acr_r367_ls_write_wpr,
375 .ls_func = {
376 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
377 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
378 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
379 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
380 },
381};
382
383struct nvkm_acr *
384acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
385 unsigned long managed_falcons)
386{
387 return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
388}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
new file mode 100644
index 000000000000..ec6a71ca36be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_ACR_R367_H__
24#define __NVKM_SECBOOT_ACR_R367_H__
25
26#include "acr_r352.h"
27
28void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
29
30struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
31 enum nvkm_secboot_falcon);
32int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
33int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
34 struct nvkm_gpuobj *, u64);
35#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
new file mode 100644
index 000000000000..ddb795bb007b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r367.h"
24
25#include <engine/falcon.h>
26#include <core/msgqueue.h>
27#include <subdev/pmu.h>
28
29/*
30 * r375 ACR: similar to r367, but with a unified bootloader descriptor
31 * structure for GR and PMU falcons.
32 */
33
34/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
35struct acr_r375_flcn_bl_desc {
36 u32 reserved[4];
37 u32 signature[4];
38 u32 ctx_dma;
39 struct flcn_u64 code_dma_base;
40 u32 non_sec_code_off;
41 u32 non_sec_code_size;
42 u32 sec_code_off;
43 u32 sec_code_size;
44 u32 code_entry_point;
45 struct flcn_u64 data_dma_base;
46 u32 data_size;
47 u32 argc;
48 u32 argv;
49};
50
51static void
52acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
53 const struct ls_ucode_img *img, u64 wpr_addr,
54 void *_desc)
55{
56 struct acr_r375_flcn_bl_desc *desc = _desc;
57 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
58 u64 base, addr_code, addr_data;
59
60 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
61 addr_code = base + pdesc->app_resident_code_offset;
62 addr_data = base + pdesc->app_resident_data_offset;
63
64 desc->ctx_dma = FALCON_DMAIDX_UCODE;
65 desc->code_dma_base = u64_to_flcn64(addr_code);
66 desc->non_sec_code_off = pdesc->app_resident_code_offset;
67 desc->non_sec_code_size = pdesc->app_resident_code_size;
68 desc->code_entry_point = pdesc->app_imem_entry;
69 desc->data_dma_base = u64_to_flcn64(addr_data);
70 desc->data_size = pdesc->app_resident_data_size;
71}
72
73static void
74acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
75 u64 offset)
76{
77 struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
78
79 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
80 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
81 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
82 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
83 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
84 bl_desc->code_entry_point = 0;
85 bl_desc->code_dma_base = u64_to_flcn64(offset);
86 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
87 bl_desc->data_size = hdr->data_size;
88}
89
90const struct acr_r352_ls_func
91acr_r375_ls_fecs_func = {
92 .load = acr_ls_ucode_load_fecs,
93 .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
94 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
95};
96
97const struct acr_r352_ls_func
98acr_r375_ls_gpccs_func = {
99 .load = acr_ls_ucode_load_gpccs,
100 .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
101 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
102 /* GPCCS will be loaded using PRI */
103 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
104};
105
106
107static void
108acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
109 const struct ls_ucode_img *img, u64 wpr_addr,
110 void *_desc)
111{
112 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
113 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
114 struct acr_r375_flcn_bl_desc *desc = _desc;
115 u64 base, addr_code, addr_data;
116 u32 addr_args;
117
118 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
119 addr_code = base + pdesc->app_resident_code_offset;
120 addr_data = base + pdesc->app_resident_data_offset;
121 addr_args = pmu->falcon->data.limit;
122 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
123
124 desc->ctx_dma = FALCON_DMAIDX_UCODE;
125 desc->code_dma_base = u64_to_flcn64(addr_code);
126 desc->non_sec_code_off = pdesc->app_resident_code_offset;
127 desc->non_sec_code_size = pdesc->app_resident_code_size;
128 desc->code_entry_point = pdesc->app_imem_entry;
129 desc->data_dma_base = u64_to_flcn64(addr_data);
130 desc->data_size = pdesc->app_resident_data_size;
131 desc->argc = 1;
132 desc->argv = addr_args;
133}
134
135const struct acr_r352_ls_func
136acr_r375_ls_pmu_func = {
137 .load = acr_ls_ucode_load_pmu,
138 .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
139 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
140 .post_run = acr_ls_pmu_post_run,
141};
142
143
144const struct acr_r352_func
145acr_r375_func = {
146 .fixup_hs_desc = acr_r367_fixup_hs_desc,
147 .generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
148 .hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
149 .shadow_blob = true,
150 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
151 .ls_fill_headers = acr_r367_ls_fill_headers,
152 .ls_write_wpr = acr_r367_ls_write_wpr,
153 .ls_func = {
154 [NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
155 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
156 [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
157 },
158};
159
160struct nvkm_acr *
161acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
162 unsigned long managed_falcons)
163{
164 return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
165}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 27c9dfffb9a6..5c11e8c50964 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -87,6 +87,7 @@
87#include <subdev/mc.h> 87#include <subdev/mc.h>
88#include <subdev/timer.h> 88#include <subdev/timer.h>
89#include <subdev/pmu.h> 89#include <subdev/pmu.h>
90#include <engine/sec2.h>
90 91
91const char * 92const char *
92nvkm_secboot_falcon_name[] = { 93nvkm_secboot_falcon_name[] = {
@@ -94,6 +95,7 @@ nvkm_secboot_falcon_name[] = {
94 [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", 95 [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
95 [NVKM_SECBOOT_FALCON_FECS] = "FECS", 96 [NVKM_SECBOOT_FALCON_FECS] = "FECS",
96 [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", 97 [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
98 [NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
97 [NVKM_SECBOOT_FALCON_END] = "<invalid>", 99 [NVKM_SECBOOT_FALCON_END] = "<invalid>",
98}; 100};
99/** 101/**
@@ -131,13 +133,20 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
131 133
132 switch (sb->acr->boot_falcon) { 134 switch (sb->acr->boot_falcon) {
133 case NVKM_SECBOOT_FALCON_PMU: 135 case NVKM_SECBOOT_FALCON_PMU:
134 sb->boot_falcon = subdev->device->pmu->falcon; 136 sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
137 break;
138 case NVKM_SECBOOT_FALCON_SEC2:
139 /* we must keep SEC2 alive forever since ACR will run on it */
140 nvkm_engine_ref(&subdev->device->sec2->engine);
141 sb->boot_falcon = subdev->device->sec2->falcon;
142 sb->halt_falcon = subdev->device->pmu->falcon;
135 break; 143 break;
136 default: 144 default:
137 nvkm_error(subdev, "Unmanaged boot falcon %s!\n", 145 nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
138 nvkm_secboot_falcon_name[sb->acr->boot_falcon]); 146 nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
139 return -EINVAL; 147 return -EINVAL;
140 } 148 }
149 nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
141 150
142 /* Call chip-specific init function */ 151 /* Call chip-specific init function */
143 if (sb->func->oneinit) 152 if (sb->func->oneinit)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 813c4eb0b25f..73ca1203281d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -34,12 +34,13 @@
34 * 34 *
35 */ 35 */
36int 36int
37gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob) 37gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
38 struct nvkm_falcon *falcon)
38{ 39{
39 struct gm200_secboot *gsb = gm200_secboot(sb); 40 struct gm200_secboot *gsb = gm200_secboot(sb);
40 struct nvkm_subdev *subdev = &gsb->base.subdev; 41 struct nvkm_subdev *subdev = &gsb->base.subdev;
41 struct nvkm_falcon *falcon = gsb->base.boot_falcon;
42 struct nvkm_vma vma; 42 struct nvkm_vma vma;
43 u32 start_address;
43 int ret; 44 int ret;
44 45
45 ret = nvkm_falcon_get(falcon, subdev); 46 ret = nvkm_falcon_get(falcon, subdev);
@@ -60,10 +61,12 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
60 nvkm_falcon_bind_context(falcon, gsb->inst); 61 nvkm_falcon_bind_context(falcon, gsb->inst);
61 62
62 /* Load the HS bootloader into the falcon's IMEM/DMEM */ 63 /* Load the HS bootloader into the falcon's IMEM/DMEM */
63 ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset); 64 ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
64 if (ret) 65 if (ret < 0)
65 goto end; 66 goto end;
66 67
68 start_address = ret;
69
67 /* Disable interrupts as we will poll for the HALT bit */ 70 /* Disable interrupts as we will poll for the HALT bit */
68 nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); 71 nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
69 72
@@ -71,19 +74,17 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
71 nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); 74 nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
72 75
73 /* Start the HS bootloader */ 76 /* Start the HS bootloader */
74 nvkm_falcon_set_start_addr(falcon, sb->acr->start_address); 77 nvkm_falcon_set_start_addr(falcon, start_address);
75 nvkm_falcon_start(falcon); 78 nvkm_falcon_start(falcon);
76 ret = nvkm_falcon_wait_for_halt(falcon, 100); 79 ret = nvkm_falcon_wait_for_halt(falcon, 100);
77 if (ret) 80 if (ret)
78 goto end; 81 goto end;
79 82
80 /* If mailbox register contains an error code, then ACR has failed */ 83 /*
84 * The mailbox register contains the (positive) error code - return this
85 * to the caller
86 */
81 ret = nvkm_falcon_rd32(falcon, 0x040); 87 ret = nvkm_falcon_rd32(falcon, 0x040);
82 if (ret) {
83 nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
84 ret = -EINVAL;
85 goto end;
86 }
87 88
88end: 89end:
89 /* Reenable interrupts */ 90 /* Reenable interrupts */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index 45adf1a3bc20..6dc9fc384f24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -38,6 +38,7 @@ struct gm200_secboot {
38int gm200_secboot_oneinit(struct nvkm_secboot *); 38int gm200_secboot_oneinit(struct nvkm_secboot *);
39int gm200_secboot_fini(struct nvkm_secboot *, bool); 39int gm200_secboot_fini(struct nvkm_secboot *, bool);
40void *gm200_secboot_dtor(struct nvkm_secboot *); 40void *gm200_secboot_dtor(struct nvkm_secboot *);
41int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *); 41int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
42 struct nvkm_falcon *);
42 43
43#endif 44#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 6707b8edc086..29e6f73dfd7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -107,9 +107,12 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
107 struct gm200_secboot *gsb; 107 struct gm200_secboot *gsb;
108 struct nvkm_acr *acr; 108 struct nvkm_acr *acr;
109 109
110 acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS)); 110 acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
111 BIT(NVKM_SECBOOT_FALCON_PMU));
111 if (IS_ERR(acr)) 112 if (IS_ERR(acr))
112 return PTR_ERR(acr); 113 return PTR_ERR(acr);
114 /* Support the initial GM20B firmware release without PMU */
115 acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
113 116
114 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 117 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
115 if (!gsb) { 118 if (!gsb) {
@@ -137,3 +140,6 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
137MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); 140MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
138MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); 141MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
139MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); 142MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
143MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
144MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
145MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
new file mode 100644
index 000000000000..8570c84c8a29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr.h"
24#include "gm200.h"
25
26#include "ls_ucode.h"
27#include "hs_ucode.h"
28#include <subdev/mc.h>
29#include <subdev/timer.h>
30#include <engine/falcon.h>
31#include <engine/nvdec.h>
32
33static bool
34gp102_secboot_scrub_required(struct nvkm_secboot *sb)
35{
36 struct nvkm_subdev *subdev = &sb->subdev;
37 struct nvkm_device *device = subdev->device;
38 u32 reg;
39
40 nvkm_wr32(device, 0x100cd0, 0x2);
41 reg = nvkm_rd32(device, 0x100cd0);
42
43 return (reg & BIT(4));
44}
45
46static int
47gp102_run_secure_scrub(struct nvkm_secboot *sb)
48{
49 struct nvkm_subdev *subdev = &sb->subdev;
50 struct nvkm_device *device = subdev->device;
51 struct nvkm_engine *engine;
52 struct nvkm_falcon *falcon;
53 void *scrub_image;
54 struct fw_bin_header *hsbin_hdr;
55 struct hsf_fw_header *fw_hdr;
56 struct hsf_load_header *lhdr;
57 void *scrub_data;
58 int ret;
59
60 nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
61
62 if (!(engine = nvkm_engine_ref(&device->nvdec->engine)))
63 return PTR_ERR(engine);
64 falcon = device->nvdec->falcon;
65
66 nvkm_falcon_get(falcon, &sb->subdev);
67
68 scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
69 if (IS_ERR(scrub_image))
70 return PTR_ERR(scrub_image);
71
72 nvkm_falcon_reset(falcon);
73 nvkm_falcon_bind_context(falcon, NULL);
74
75 hsbin_hdr = scrub_image;
76 fw_hdr = scrub_image + hsbin_hdr->header_offset;
77 lhdr = scrub_image + fw_hdr->hdr_offset;
78 scrub_data = scrub_image + hsbin_hdr->data_offset;
79
80 nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
81 lhdr->non_sec_code_size,
82 lhdr->non_sec_code_off >> 8, 0, false);
83 nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
84 ALIGN(lhdr->apps[0], 0x100),
85 lhdr->apps[1],
86 lhdr->apps[0] >> 8, 0, true);
87 nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
88 lhdr->data_size, 0);
89
90 kfree(scrub_image);
91
92 nvkm_falcon_set_start_addr(falcon, 0x0);
93 nvkm_falcon_start(falcon);
94
95 ret = nvkm_falcon_wait_for_halt(falcon, 500);
96 if (ret < 0) {
97 nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
98 ret = -ETIMEDOUT;
99 goto end;
100 }
101
102 /* put nvdec in clean state - without reset it will remain in HS mode */
103 nvkm_falcon_reset(falcon);
104
105 if (gp102_secboot_scrub_required(sb)) {
106 nvkm_error(subdev, "VPR scrubber binary failed!\n");
107 ret = -EINVAL;
108 goto end;
109 }
110
111 nvkm_debug(subdev, "VPR scrub successfully completed\n");
112
113end:
114 nvkm_falcon_put(falcon, &sb->subdev);
115 nvkm_engine_unref(&engine);
116 return ret;
117}
118
119static int
120gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
121 struct nvkm_falcon *falcon)
122{
123 int ret;
124
125 /* make sure the VPR region is unlocked */
126 if (gp102_secboot_scrub_required(sb)) {
127 ret = gp102_run_secure_scrub(sb);
128 if (ret)
129 return ret;
130 }
131
132 return gm200_secboot_run_blob(sb, blob, falcon);
133}
134
135static const struct nvkm_secboot_func
136gp102_secboot = {
137 .dtor = gm200_secboot_dtor,
138 .oneinit = gm200_secboot_oneinit,
139 .fini = gm200_secboot_fini,
140 .run_blob = gp102_secboot_run_blob,
141};
142
143int
144gp102_secboot_new(struct nvkm_device *device, int index,
145 struct nvkm_secboot **psb)
146{
147 int ret;
148 struct gm200_secboot *gsb;
149 struct nvkm_acr *acr;
150
151 acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
152 BIT(NVKM_SECBOOT_FALCON_FECS) |
153 BIT(NVKM_SECBOOT_FALCON_GPCCS) |
154 BIT(NVKM_SECBOOT_FALCON_SEC2));
155 if (IS_ERR(acr))
156 return PTR_ERR(acr);
157
158 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
159 if (!gsb) {
160 psb = NULL;
161 return -ENOMEM;
162 }
163 *psb = &gsb->base;
164
165 ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
166 if (ret)
167 return ret;
168
169 return 0;
170}
171
172MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
173MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
174MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
175MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
176MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
177MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
178MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
179MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
180MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
181MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
182MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
183MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
184MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
185MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
186MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
187MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
188MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
189MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
190MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
191MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
192MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
193MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
194MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
195MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
196MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
197MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
198MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
199MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
200MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
201MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
202MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
203MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
204MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
205MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
206MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
207MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
208MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
209MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
210MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
211MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
212MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
213MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
214MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
215MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
216MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
217MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
218MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
219MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
220MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
221MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
222MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
223MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
224MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
225MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
226MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
227MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
228MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
229MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
230MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
231MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
232MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
233MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
234MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
235MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
236MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
237MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
238MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
239MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
240MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
241MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
242MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
243MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
244MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
245MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
246MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
247MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
248MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
249MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
250MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
251MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
new file mode 100644
index 000000000000..6b33182ddc2f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "hs_ucode.h"
24#include "ls_ucode.h"
25#include "acr.h"
26
27#include <engine/falcon.h>
28
29/**
30 * hs_ucode_patch_signature() - patch HS blob with correct signature for
31 * specified falcon.
32 */
33static void
34hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
35 bool new_format)
36{
37 struct fw_bin_header *hsbin_hdr = acr_image;
38 struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
39 void *hs_data = acr_image + hsbin_hdr->data_offset;
40 void *sig;
41 u32 sig_size;
42 u32 patch_loc, patch_sig;
43
44 /*
45 * I had the brilliant idea to "improve" the binary format by
46 * removing this useless indirection. However to make NVIDIA files
47 * directly compatible, let's support both format.
48 */
49 if (new_format) {
50 patch_loc = fw_hdr->patch_loc;
51 patch_sig = fw_hdr->patch_sig;
52 } else {
53 patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
54 patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
55 }
56
57 /* Falcon in debug or production mode? */
58 if (falcon->debug) {
59 sig = acr_image + fw_hdr->sig_dbg_offset;
60 sig_size = fw_hdr->sig_dbg_size;
61 } else {
62 sig = acr_image + fw_hdr->sig_prod_offset;
63 sig_size = fw_hdr->sig_prod_size;
64 }
65
66 /* Patch signature */
67 memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
68}
69
70void *
71hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
72 const char *fw)
73{
74 void *acr_image;
75 bool new_format;
76
77 acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
78 if (IS_ERR(acr_image))
79 return acr_image;
80
81 /* detect the format to define how signature should be patched */
82 switch (((u32 *)acr_image)[0]) {
83 case 0x3b1d14f0:
84 new_format = true;
85 break;
86 case 0x000010de:
87 new_format = false;
88 break;
89 default:
90 nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
91 return ERR_PTR(-EINVAL);
92 }
93
94 hs_ucode_patch_signature(falcon, acr_image, new_format);
95
96 return acr_image;
97}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
new file mode 100644
index 000000000000..d8cfc6f7752a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_HS_UCODE_H__
24#define __NVKM_SECBOOT_HS_UCODE_H__
25
26#include <core/os.h>
27#include <core/subdev.h>
28
29struct nvkm_falcon;
30
31/**
32 * struct hsf_fw_header - HS firmware descriptor
33 * @sig_dbg_offset: offset of the debug signature
34 * @sig_dbg_size: size of the debug signature
35 * @sig_prod_offset: offset of the production signature
36 * @sig_prod_size: size of the production signature
37 * @patch_loc: offset of the offset (sic) of where the signature is
38 * @patch_sig: offset of the offset (sic) to add to sig_*_offset
39 * @hdr_offset: offset of the load header (see struct hs_load_header)
40 * @hdr_size: size of above header
41 *
42 * This structure is embedded in the HS firmware image at
43 * hs_bin_hdr.header_offset.
44 */
45struct hsf_fw_header {
46 u32 sig_dbg_offset;
47 u32 sig_dbg_size;
48 u32 sig_prod_offset;
49 u32 sig_prod_size;
50 u32 patch_loc;
51 u32 patch_sig;
52 u32 hdr_offset;
53 u32 hdr_size;
54};
55
56/**
57 * struct hsf_load_header - HS firmware load header
58 */
59struct hsf_load_header {
60 u32 non_sec_code_off;
61 u32 non_sec_code_size;
62 u32 data_dma_base;
63 u32 data_size;
64 u32 num_apps;
65 /*
66 * Organized as follows:
67 * - app0_code_off
68 * - app1_code_off
69 * - ...
70 * - appn_code_off
71 * - app0_code_size
72 * - app1_code_size
73 * - ...
74 */
75 u32 apps[0];
76};
77
78void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
79 const char *);
80
81#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 00886cee57eb..4ff9138a2a83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -27,6 +27,7 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <subdev/secboot.h> 28#include <subdev/secboot.h>
29 29
30struct nvkm_acr;
30 31
31/** 32/**
32 * struct ls_ucode_img_desc - descriptor of firmware image 33 * struct ls_ucode_img_desc - descriptor of firmware image
@@ -83,6 +84,7 @@ struct ls_ucode_img_desc {
83 * @ucode_desc: loaded or generated map of ucode_data 84 * @ucode_desc: loaded or generated map of ucode_data
84 * @ucode_data: firmware payload (code and data) 85 * @ucode_data: firmware payload (code and data)
85 * @ucode_size: size in bytes of data in ucode_data 86 * @ucode_size: size in bytes of data in ucode_data
87 * @ucode_off: offset of the ucode in ucode_data
86 * @sig: signature for this firmware 88 * @sig: signature for this firmware
87 * @sig:size: size of the signature in bytes 89 * @sig:size: size of the signature in bytes
88 * 90 *
@@ -97,6 +99,7 @@ struct ls_ucode_img {
97 struct ls_ucode_img_desc ucode_desc; 99 struct ls_ucode_img_desc ucode_desc;
98 u8 *ucode_data; 100 u8 *ucode_data;
99 u32 ucode_size; 101 u32 ucode_size;
102 u32 ucode_off;
100 103
101 u8 *sig; 104 u8 *sig;
102 u32 sig_size; 105 u32 sig_size;
@@ -146,6 +149,9 @@ struct fw_bl_desc {
146 149
147int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *); 150int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
148int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *); 151int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
149 152int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
153void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
154int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
155void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
150 156
151#endif 157#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
new file mode 100644
index 000000000000..ef0b298b70d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23
24#include "ls_ucode.h"
25#include "acr.h"
26
27#include <core/firmware.h>
28#include <core/msgqueue.h>
29#include <subdev/pmu.h>
30#include <engine/sec2.h>
31
32/**
33 * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
34 *
35 * Load the LS microcode, desc and signature and pack them into a single
36 * blob.
37 */
38static int
39acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
40 struct ls_ucode_img *img)
41{
42 const struct firmware *image, *desc, *sig;
43 char f[64];
44 int ret;
45
46 snprintf(f, sizeof(f), "%s/image", name);
47 ret = nvkm_firmware_get(subdev->device, f, &image);
48 if (ret)
49 return ret;
50 img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
51 nvkm_firmware_put(image);
52 if (!img->ucode_data)
53 return -ENOMEM;
54
55 snprintf(f, sizeof(f), "%s/desc", name);
56 ret = nvkm_firmware_get(subdev->device, f, &desc);
57 if (ret)
58 return ret;
59 memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
60 img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
61 nvkm_firmware_put(desc);
62
63 snprintf(f, sizeof(f), "%s/sig", name);
64 ret = nvkm_firmware_get(subdev->device, f, &sig);
65 if (ret)
66 return ret;
67 img->sig_size = sig->size;
68 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
69 nvkm_firmware_put(sig);
70 if (!img->sig)
71 return -ENOMEM;
72
73 return 0;
74}
75
76static void
77acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
78 struct nvkm_falcon *falcon, u32 addr_args)
79{
80 u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
81 u8 buf[cmdline_size];
82
83 memset(buf, 0, cmdline_size);
84 nvkm_msgqueue_write_cmdline(queue, buf);
85 nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
86 /* rearm the queue so it will wait for the init message */
87 nvkm_msgqueue_reinit(queue);
88}
89
90int
91acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
92 struct ls_ucode_img *img)
93{
94 struct nvkm_pmu *pmu = subdev->device->pmu;
95 int ret;
96
97 ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
98 if (ret)
99 return ret;
100
101 /* Allocate the PMU queue corresponding to the FW version */
102 ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
103 &pmu->queue);
104 if (ret)
105 return ret;
106
107 return 0;
108}
109
110void
111acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
112{
113 struct nvkm_device *device = sb->subdev.device;
114 struct nvkm_pmu *pmu = device->pmu;
115 u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
116
117 acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
118}
119
120int
121acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
122 struct ls_ucode_img *img)
123{
124 struct nvkm_sec2 *sec = subdev->device->sec2;
125 int ret;
126
127 ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
128 if (ret)
129 return ret;
130
131 /* Allocate the PMU queue corresponding to the FW version */
132 ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
133 &sec->queue);
134 if (ret)
135 return ret;
136
137 return 0;
138}
139
140void
141acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
142{
143 struct nvkm_device *device = sb->subdev.device;
144 struct nvkm_sec2 *sec = device->sec2;
145 /* on SEC arguments are always at the beginning of EMEM */
146 u32 addr_args = 0x01000000;
147
148 acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
149}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index 936a65f5658c..885e919a8720 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -30,11 +30,10 @@ struct nvkm_secboot_func {
30 int (*oneinit)(struct nvkm_secboot *); 30 int (*oneinit)(struct nvkm_secboot *);
31 int (*fini)(struct nvkm_secboot *, bool suspend); 31 int (*fini)(struct nvkm_secboot *, bool suspend);
32 void *(*dtor)(struct nvkm_secboot *); 32 void *(*dtor)(struct nvkm_secboot *);
33 int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *); 33 int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
34 struct nvkm_falcon *);
34}; 35};
35 36
36extern const char *nvkm_secboot_falcon_name[];
37
38int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, 37int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
39 struct nvkm_device *, int, struct nvkm_secboot *); 38 struct nvkm_device *, int, struct nvkm_secboot *);
40int nvkm_secboot_falcon_reset(struct nvkm_secboot *); 39int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index efac3402f9dd..fea4957291da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -82,7 +82,7 @@ gk104_top_oneinit(struct nvkm_top *top)
82 case 0x0000000a: A_(MSVLD ); break; 82 case 0x0000000a: A_(MSVLD ); break;
83 case 0x0000000b: A_(MSENC ); break; 83 case 0x0000000b: A_(MSENC ); break;
84 case 0x0000000c: A_(VIC ); break; 84 case 0x0000000c: A_(VIC ); break;
85 case 0x0000000d: A_(SEC ); break; 85 case 0x0000000d: A_(SEC2 ); break;
86 case 0x0000000e: B_(NVENC ); break; 86 case 0x0000000e: B_(NVENC ); break;
87 case 0x0000000f: A_(NVENC1); break; 87 case 0x0000000f: A_(NVENC1); break;
88 case 0x00000010: A_(NVDEC ); break; 88 case 0x00000010: A_(NVDEC ); break;