diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-01-13 23:11:21 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-01-21 21:17:49 -0500 |
commit | 5025407b9862349d17b1dff25737aaef6520a439 (patch) | |
tree | 6d11493365d06c00d79daf5818b42dfa2d421c1e | |
parent | 989aa5b76ad2af7653353cf01bdebec2ba9436aa (diff) |
drm/nouveau/core: namespace + nvidia gpu names (no binary change)
The namespace of NVKM is being changed to nvkm_ instead of nouveau_,
which will be used for the DRM part of the driver. This is being
done in order to make it very clear as to what part of the driver a
given symbol belongs to, and as a minor step towards splitting the
DRM driver out to be able to stand on its own (for virt).
Because there's already a large amount of churn here anyway, this is
as good a time as any to also switch to NVIDIA's device and chipset
naming to ease collaboration with them.
A comparison of objdump disassemblies proves no code changes.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
55 files changed, 1240 insertions, 1157 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 827c4e972ed3..a35b38244502 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h | |||
@@ -1,15 +1,14 @@ | |||
1 | #ifndef __NOUVEAU_CLIENT_H__ | 1 | #ifndef __NVKM_CLIENT_H__ |
2 | #define __NOUVEAU_CLIENT_H__ | 2 | #define __NVKM_CLIENT_H__ |
3 | |||
4 | #include <core/namedb.h> | 3 | #include <core/namedb.h> |
5 | 4 | ||
6 | struct nouveau_client { | 5 | struct nvkm_client { |
7 | struct nouveau_namedb namedb; | 6 | struct nvkm_namedb namedb; |
8 | struct nouveau_handle *root; | 7 | struct nvkm_handle *root; |
9 | struct nouveau_object *device; | 8 | struct nvkm_object *device; |
10 | char name[32]; | 9 | char name[32]; |
11 | u32 debug; | 10 | u32 debug; |
12 | struct nouveau_vm *vm; | 11 | struct nvkm_vm *vm; |
13 | bool super; | 12 | bool super; |
14 | void *data; | 13 | void *data; |
15 | 14 | ||
@@ -17,7 +16,7 @@ struct nouveau_client { | |||
17 | struct nvkm_client_notify *notify[16]; | 16 | struct nvkm_client_notify *notify[16]; |
18 | }; | 17 | }; |
19 | 18 | ||
20 | static inline struct nouveau_client * | 19 | static inline struct nvkm_client * |
21 | nv_client(void *obj) | 20 | nv_client(void *obj) |
22 | { | 21 | { |
23 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 22 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -27,31 +26,30 @@ nv_client(void *obj) | |||
27 | return obj; | 26 | return obj; |
28 | } | 27 | } |
29 | 28 | ||
30 | static inline struct nouveau_client * | 29 | static inline struct nvkm_client * |
31 | nouveau_client(void *obj) | 30 | nvkm_client(void *obj) |
32 | { | 31 | { |
33 | struct nouveau_object *client = nv_object(obj); | 32 | struct nvkm_object *client = nv_object(obj); |
34 | while (client && !(nv_iclass(client, NV_CLIENT_CLASS))) | 33 | while (client && !(nv_iclass(client, NV_CLIENT_CLASS))) |
35 | client = client->parent; | 34 | client = client->parent; |
36 | return (void *)client; | 35 | return (void *)client; |
37 | } | 36 | } |
38 | 37 | ||
39 | #define nouveau_client_create(n,c,oc,od,d) \ | 38 | #define nvkm_client_create(n,c,oc,od,d) \ |
40 | nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d) | 39 | nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d) |
41 | 40 | ||
42 | int nouveau_client_create_(const char *name, u64 device, const char *cfg, | 41 | int nvkm_client_create_(const char *name, u64 device, const char *cfg, |
43 | const char *dbg, int, void **); | 42 | const char *dbg, int, void **); |
44 | #define nouveau_client_destroy(p) \ | 43 | #define nvkm_client_destroy(p) \ |
45 | nouveau_namedb_destroy(&(p)->base) | 44 | nvkm_namedb_destroy(&(p)->base) |
46 | 45 | ||
47 | int nouveau_client_init(struct nouveau_client *); | 46 | int nvkm_client_init(struct nvkm_client *); |
48 | int nouveau_client_fini(struct nouveau_client *, bool suspend); | 47 | int nvkm_client_fini(struct nvkm_client *, bool suspend); |
49 | const char *nouveau_client_name(void *obj); | 48 | const char *nvkm_client_name(void *obj); |
50 | 49 | ||
51 | int nvkm_client_notify_new(struct nouveau_object *, struct nvkm_event *, | 50 | int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, |
52 | void *data, u32 size); | 51 | void *data, u32 size); |
53 | int nvkm_client_notify_del(struct nouveau_client *, int index); | 52 | int nvkm_client_notify_del(struct nvkm_client *, int index); |
54 | int nvkm_client_notify_get(struct nouveau_client *, int index); | 53 | int nvkm_client_notify_get(struct nvkm_client *, int index); |
55 | int nvkm_client_notify_put(struct nouveau_client *, int index); | 54 | int nvkm_client_notify_put(struct nvkm_client *, int index); |
56 | |||
57 | #endif | 55 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h index 8092e2e90323..d07cb860b56c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h | |||
@@ -1,6 +1,5 @@ | |||
1 | #ifndef __NOUVEAU_DEBUG_H__ | 1 | #ifndef __NVKM_DEBUG_H__ |
2 | #define __NOUVEAU_DEBUG_H__ | 2 | #define __NVKM_DEBUG_H__ |
3 | |||
4 | extern int nv_info_debug_level; | 3 | extern int nv_info_debug_level; |
5 | 4 | ||
6 | #define NV_DBG_FATAL 0 | 5 | #define NV_DBG_FATAL 0 |
@@ -16,5 +15,4 @@ extern int nv_info_debug_level; | |||
16 | #define NV_DBG_INFO_SILENT NV_DBG_DEBUG | 15 | #define NV_DBG_INFO_SILENT NV_DBG_DEBUG |
17 | 16 | ||
18 | #define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a | 17 | #define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a |
19 | |||
20 | #endif | 18 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 23d71c58c3f1..9c2f13694a57 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
@@ -1,13 +1,10 @@ | |||
1 | #ifndef __NOUVEAU_DEVICE_H__ | 1 | #ifndef __NVKM_DEVICE_H__ |
2 | #define __NOUVEAU_DEVICE_H__ | 2 | #define __NVKM_DEVICE_H__ |
3 | |||
4 | #include <core/object.h> | ||
5 | #include <core/subdev.h> | ||
6 | #include <core/engine.h> | 3 | #include <core/engine.h> |
7 | #include <core/event.h> | 4 | #include <core/event.h> |
8 | 5 | ||
9 | struct nouveau_device { | 6 | struct nvkm_device { |
10 | struct nouveau_engine engine; | 7 | struct nvkm_engine engine; |
11 | struct list_head head; | 8 | struct list_head head; |
12 | 9 | ||
13 | struct pci_dev *pdev; | 10 | struct pci_dev *pdev; |
@@ -38,53 +35,53 @@ struct nouveau_device { | |||
38 | u8 chiprev; | 35 | u8 chiprev; |
39 | u32 crystal; | 36 | u32 crystal; |
40 | 37 | ||
41 | struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; | 38 | struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR]; |
42 | struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; | 39 | struct nvkm_object *subdev[NVDEV_SUBDEV_NR]; |
43 | 40 | ||
44 | struct { | 41 | struct { |
45 | struct notifier_block nb; | 42 | struct notifier_block nb; |
46 | } acpi; | 43 | } acpi; |
47 | }; | 44 | }; |
48 | 45 | ||
49 | int nouveau_device_list(u64 *name, int size); | 46 | struct nvkm_device *nvkm_device_find(u64 name); |
47 | int nvkm_device_list(u64 *name, int size); | ||
50 | 48 | ||
51 | struct nouveau_device *nv_device(void *obj); | 49 | struct nvkm_device *nv_device(void *obj); |
52 | 50 | ||
53 | static inline bool | 51 | static inline bool |
54 | nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub) | 52 | nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub) |
55 | { | 53 | { |
56 | struct nouveau_device *device = nv_device(object); | 54 | struct nvkm_device *device = nv_device(object); |
57 | return device->pdev->device == dev && | 55 | return device->pdev->device == dev && |
58 | device->pdev->subsystem_vendor == ven && | 56 | device->pdev->subsystem_vendor == ven && |
59 | device->pdev->subsystem_device == sub; | 57 | device->pdev->subsystem_device == sub; |
60 | } | 58 | } |
61 | 59 | ||
62 | static inline bool | 60 | static inline bool |
63 | nv_device_is_pci(struct nouveau_device *device) | 61 | nv_device_is_pci(struct nvkm_device *device) |
64 | { | 62 | { |
65 | return device->pdev != NULL; | 63 | return device->pdev != NULL; |
66 | } | 64 | } |
67 | 65 | ||
68 | static inline bool | 66 | static inline bool |
69 | nv_device_is_cpu_coherent(struct nouveau_device *device) | 67 | nv_device_is_cpu_coherent(struct nvkm_device *device) |
70 | { | 68 | { |
71 | return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device)); | 69 | return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device)); |
72 | } | 70 | } |
73 | 71 | ||
74 | static inline struct device * | 72 | static inline struct device * |
75 | nv_device_base(struct nouveau_device *device) | 73 | nv_device_base(struct nvkm_device *device) |
76 | { | 74 | { |
77 | return nv_device_is_pci(device) ? &device->pdev->dev : | 75 | return nv_device_is_pci(device) ? &device->pdev->dev : |
78 | &device->platformdev->dev; | 76 | &device->platformdev->dev; |
79 | } | 77 | } |
80 | 78 | ||
81 | resource_size_t | 79 | resource_size_t |
82 | nv_device_resource_start(struct nouveau_device *device, unsigned int bar); | 80 | nv_device_resource_start(struct nvkm_device *device, unsigned int bar); |
83 | 81 | ||
84 | resource_size_t | 82 | resource_size_t |
85 | nv_device_resource_len(struct nouveau_device *device, unsigned int bar); | 83 | nv_device_resource_len(struct nvkm_device *device, unsigned int bar); |
86 | 84 | ||
87 | int | 85 | int |
88 | nv_device_get_irq(struct nouveau_device *device, bool stall); | 86 | nv_device_get_irq(struct nvkm_device *device, bool stall); |
89 | |||
90 | #endif | 87 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h index 9d696e4747e7..1bf2e8eb4268 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h | |||
@@ -1,7 +1,5 @@ | |||
1 | #ifndef __NOUVEAU_ENGCTX_H__ | 1 | #ifndef __NVKM_ENGCTX_H__ |
2 | #define __NOUVEAU_ENGCTX_H__ | 2 | #define __NVKM_ENGCTX_H__ |
3 | |||
4 | #include <core/object.h> | ||
5 | #include <core/gpuobj.h> | 3 | #include <core/gpuobj.h> |
6 | 4 | ||
7 | #include <subdev/mmu.h> | 5 | #include <subdev/mmu.h> |
@@ -9,15 +7,15 @@ | |||
9 | #define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng)) | 7 | #define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng)) |
10 | #define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var)) | 8 | #define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var)) |
11 | 9 | ||
12 | struct nouveau_engctx { | 10 | struct nvkm_engctx { |
13 | struct nouveau_gpuobj gpuobj; | 11 | struct nvkm_gpuobj gpuobj; |
14 | struct nouveau_vma vma; | 12 | struct nvkm_vma vma; |
15 | struct list_head head; | 13 | struct list_head head; |
16 | unsigned long save; | 14 | unsigned long save; |
17 | u64 addr; | 15 | u64 addr; |
18 | }; | 16 | }; |
19 | 17 | ||
20 | static inline struct nouveau_engctx * | 18 | static inline struct nvkm_engctx * |
21 | nv_engctx(void *obj) | 19 | nv_engctx(void *obj) |
22 | { | 20 | { |
23 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 21 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -27,28 +25,27 @@ nv_engctx(void *obj) | |||
27 | return obj; | 25 | return obj; |
28 | } | 26 | } |
29 | 27 | ||
30 | #define nouveau_engctx_create(p,e,c,g,s,a,f,d) \ | 28 | #define nvkm_engctx_create(p,e,c,g,s,a,f,d) \ |
31 | nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f), \ | 29 | nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f), \ |
32 | sizeof(**d), (void **)d) | 30 | sizeof(**d), (void **)d) |
33 | 31 | ||
34 | int nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *, | 32 | int nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *, |
35 | struct nouveau_oclass *, struct nouveau_object *, | 33 | struct nvkm_oclass *, struct nvkm_object *, |
36 | u32 size, u32 align, u32 flags, | 34 | u32 size, u32 align, u32 flags, |
37 | int length, void **data); | 35 | int length, void **data); |
38 | void nouveau_engctx_destroy(struct nouveau_engctx *); | 36 | void nvkm_engctx_destroy(struct nvkm_engctx *); |
39 | int nouveau_engctx_init(struct nouveau_engctx *); | 37 | int nvkm_engctx_init(struct nvkm_engctx *); |
40 | int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); | 38 | int nvkm_engctx_fini(struct nvkm_engctx *, bool suspend); |
41 | 39 | ||
42 | int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *, | 40 | int _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *, |
43 | struct nouveau_oclass *, void *, u32, | 41 | struct nvkm_oclass *, void *, u32, |
44 | struct nouveau_object **); | 42 | struct nvkm_object **); |
45 | void _nouveau_engctx_dtor(struct nouveau_object *); | 43 | void _nvkm_engctx_dtor(struct nvkm_object *); |
46 | int _nouveau_engctx_init(struct nouveau_object *); | 44 | int _nvkm_engctx_init(struct nvkm_object *); |
47 | int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); | 45 | int _nvkm_engctx_fini(struct nvkm_object *, bool suspend); |
48 | #define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32 | 46 | #define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32 |
49 | #define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32 | 47 | #define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32 |
50 | 48 | ||
51 | struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr); | 49 | struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr); |
52 | void nouveau_engctx_put(struct nouveau_object *); | 50 | void nvkm_engctx_put(struct nvkm_object *); |
53 | |||
54 | #endif | 51 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index d7ebd35ef1ad..faf0fd2f0638 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h | |||
@@ -1,25 +1,23 @@ | |||
1 | #ifndef __NOUVEAU_ENGINE_H__ | 1 | #ifndef __NVKM_ENGINE_H__ |
2 | #define __NOUVEAU_ENGINE_H__ | 2 | #define __NVKM_ENGINE_H__ |
3 | |||
4 | #include <core/object.h> | ||
5 | #include <core/subdev.h> | 3 | #include <core/subdev.h> |
6 | 4 | ||
7 | #define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng)) | 5 | #define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng)) |
8 | #define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var)) | 6 | #define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var)) |
9 | 7 | ||
10 | struct nouveau_engine { | 8 | struct nvkm_engine { |
11 | struct nouveau_subdev subdev; | 9 | struct nvkm_subdev subdev; |
12 | struct nouveau_oclass *cclass; | 10 | struct nvkm_oclass *cclass; |
13 | struct nouveau_oclass *sclass; | 11 | struct nvkm_oclass *sclass; |
14 | 12 | ||
15 | struct list_head contexts; | 13 | struct list_head contexts; |
16 | spinlock_t lock; | 14 | spinlock_t lock; |
17 | 15 | ||
18 | void (*tile_prog)(struct nouveau_engine *, int region); | 16 | void (*tile_prog)(struct nvkm_engine *, int region); |
19 | int (*tlb_flush)(struct nouveau_engine *); | 17 | int (*tlb_flush)(struct nvkm_engine *); |
20 | }; | 18 | }; |
21 | 19 | ||
22 | static inline struct nouveau_engine * | 20 | static inline struct nvkm_engine * |
23 | nv_engine(void *obj) | 21 | nv_engine(void *obj) |
24 | { | 22 | { |
25 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 23 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -30,30 +28,29 @@ nv_engine(void *obj) | |||
30 | } | 28 | } |
31 | 29 | ||
32 | static inline int | 30 | static inline int |
33 | nv_engidx(struct nouveau_engine *engine) | 31 | nv_engidx(struct nvkm_engine *engine) |
34 | { | 32 | { |
35 | return nv_subidx(&engine->subdev); | 33 | return nv_subidx(&engine->subdev); |
36 | } | 34 | } |
37 | 35 | ||
38 | struct nouveau_engine *nouveau_engine(void *obj, int idx); | 36 | struct nvkm_engine *nvkm_engine(void *obj, int idx); |
39 | 37 | ||
40 | #define nouveau_engine_create(p,e,c,d,i,f,r) \ | 38 | #define nvkm_engine_create(p,e,c,d,i,f,r) \ |
41 | nouveau_engine_create_((p), (e), (c), (d), (i), (f), \ | 39 | nvkm_engine_create_((p), (e), (c), (d), (i), (f), \ |
42 | sizeof(**r),(void **)r) | 40 | sizeof(**r),(void **)r) |
43 | 41 | ||
44 | #define nouveau_engine_destroy(p) \ | 42 | #define nvkm_engine_destroy(p) \ |
45 | nouveau_subdev_destroy(&(p)->subdev) | 43 | nvkm_subdev_destroy(&(p)->subdev) |
46 | #define nouveau_engine_init(p) \ | 44 | #define nvkm_engine_init(p) \ |
47 | nouveau_subdev_init(&(p)->subdev) | 45 | nvkm_subdev_init(&(p)->subdev) |
48 | #define nouveau_engine_fini(p,s) \ | 46 | #define nvkm_engine_fini(p,s) \ |
49 | nouveau_subdev_fini(&(p)->subdev, (s)) | 47 | nvkm_subdev_fini(&(p)->subdev, (s)) |
50 | 48 | ||
51 | int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *, | 49 | int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *, |
52 | struct nouveau_oclass *, bool, const char *, | 50 | struct nvkm_oclass *, bool, const char *, |
53 | const char *, int, void **); | 51 | const char *, int, void **); |
54 | 52 | ||
55 | #define _nouveau_engine_dtor _nouveau_subdev_dtor | 53 | #define _nvkm_engine_dtor _nvkm_subdev_dtor |
56 | #define _nouveau_engine_init _nouveau_subdev_init | 54 | #define _nvkm_engine_init _nvkm_subdev_init |
57 | #define _nouveau_engine_fini _nouveau_subdev_fini | 55 | #define _nvkm_engine_fini _nvkm_subdev_fini |
58 | |||
59 | #endif | 56 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h index 4fc62bb8c1f0..e76f76f115e9 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h | |||
@@ -1,24 +1,21 @@ | |||
1 | #ifndef __NOUVEAU_ENUM_H__ | 1 | #ifndef __NVKM_ENUM_H__ |
2 | #define __NOUVEAU_ENUM_H__ | 2 | #define __NVKM_ENUM_H__ |
3 | #include <core/os.h> | ||
3 | 4 | ||
4 | struct nouveau_enum { | 5 | struct nvkm_enum { |
5 | u32 value; | 6 | u32 value; |
6 | const char *name; | 7 | const char *name; |
7 | const void *data; | 8 | const void *data; |
8 | u32 data2; | 9 | u32 data2; |
9 | }; | 10 | }; |
10 | 11 | ||
11 | const struct nouveau_enum * | 12 | const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value); |
12 | nouveau_enum_find(const struct nouveau_enum *, u32 value); | 13 | const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value); |
13 | 14 | ||
14 | const struct nouveau_enum * | 15 | struct nvkm_bitfield { |
15 | nouveau_enum_print(const struct nouveau_enum *en, u32 value); | ||
16 | |||
17 | struct nouveau_bitfield { | ||
18 | u32 mask; | 16 | u32 mask; |
19 | const char *name; | 17 | const char *name; |
20 | }; | 18 | }; |
21 | 19 | ||
22 | void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); | 20 | void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value); |
23 | |||
24 | #endif | 21 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h index 92876528972f..b98fe2de546a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h | |||
@@ -1,15 +1,8 @@ | |||
1 | #ifndef __NVKM_EVENT_H__ | 1 | #ifndef __NVKM_EVENT_H__ |
2 | #define __NVKM_EVENT_H__ | 2 | #define __NVKM_EVENT_H__ |
3 | 3 | #include <core/os.h> | |
4 | #include <core/notify.h> | 4 | struct nvkm_notify; |
5 | 5 | struct nvkm_object; | |
6 | struct nvkm_event_func { | ||
7 | int (*ctor)(struct nouveau_object *, void *data, u32 size, | ||
8 | struct nvkm_notify *); | ||
9 | void (*send)(void *data, u32 size, struct nvkm_notify *); | ||
10 | void (*init)(struct nvkm_event *, int type, int index); | ||
11 | void (*fini)(struct nvkm_event *, int type, int index); | ||
12 | }; | ||
13 | 6 | ||
14 | struct nvkm_event { | 7 | struct nvkm_event { |
15 | const struct nvkm_event_func *func; | 8 | const struct nvkm_event_func *func; |
@@ -23,13 +16,19 @@ struct nvkm_event { | |||
23 | int *refs; | 16 | int *refs; |
24 | }; | 17 | }; |
25 | 18 | ||
26 | int nvkm_event_init(const struct nvkm_event_func *func, | 19 | struct nvkm_event_func { |
27 | int types_nr, int index_nr, | 20 | int (*ctor)(struct nvkm_object *, void *data, u32 size, |
28 | struct nvkm_event *); | 21 | struct nvkm_notify *); |
22 | void (*send)(void *data, u32 size, struct nvkm_notify *); | ||
23 | void (*init)(struct nvkm_event *, int type, int index); | ||
24 | void (*fini)(struct nvkm_event *, int type, int index); | ||
25 | }; | ||
26 | |||
27 | int nvkm_event_init(const struct nvkm_event_func *func, int types_nr, | ||
28 | int index_nr, struct nvkm_event *); | ||
29 | void nvkm_event_fini(struct nvkm_event *); | 29 | void nvkm_event_fini(struct nvkm_event *); |
30 | void nvkm_event_get(struct nvkm_event *, u32 types, int index); | 30 | void nvkm_event_get(struct nvkm_event *, u32 types, int index); |
31 | void nvkm_event_put(struct nvkm_event *, u32 types, int index); | 31 | void nvkm_event_put(struct nvkm_event *, u32 types, int index); |
32 | void nvkm_event_send(struct nvkm_event *, u32 types, int index, | 32 | void nvkm_event_send(struct nvkm_event *, u32 types, int index, |
33 | void *data, u32 size); | 33 | void *data, u32 size); |
34 | |||
35 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h index c262c2505164..e0187e7abb6e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h | |||
@@ -1,30 +1,26 @@ | |||
1 | #ifndef __NOUVEAU_GPUOBJ_H__ | 1 | #ifndef __NVKM_GPUOBJ_H__ |
2 | #define __NOUVEAU_GPUOBJ_H__ | 2 | #define __NVKM_GPUOBJ_H__ |
3 | |||
4 | #include <core/object.h> | 3 | #include <core/object.h> |
5 | #include <core/device.h> | ||
6 | #include <core/parent.h> | ||
7 | #include <core/mm.h> | 4 | #include <core/mm.h> |
8 | 5 | struct nvkm_vma; | |
9 | struct nouveau_vma; | 6 | struct nvkm_vm; |
10 | struct nouveau_vm; | ||
11 | 7 | ||
12 | #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 | 8 | #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 |
13 | #define NVOBJ_FLAG_ZERO_FREE 0x00000002 | 9 | #define NVOBJ_FLAG_ZERO_FREE 0x00000002 |
14 | #define NVOBJ_FLAG_HEAP 0x00000004 | 10 | #define NVOBJ_FLAG_HEAP 0x00000004 |
15 | 11 | ||
16 | struct nouveau_gpuobj { | 12 | struct nvkm_gpuobj { |
17 | struct nouveau_object object; | 13 | struct nvkm_object object; |
18 | struct nouveau_object *parent; | 14 | struct nvkm_object *parent; |
19 | struct nouveau_mm_node *node; | 15 | struct nvkm_mm_node *node; |
20 | struct nouveau_mm heap; | 16 | struct nvkm_mm heap; |
21 | 17 | ||
22 | u32 flags; | 18 | u32 flags; |
23 | u64 addr; | 19 | u64 addr; |
24 | u32 size; | 20 | u32 size; |
25 | }; | 21 | }; |
26 | 22 | ||
27 | static inline struct nouveau_gpuobj * | 23 | static inline struct nvkm_gpuobj * |
28 | nv_gpuobj(void *obj) | 24 | nv_gpuobj(void *obj) |
29 | { | 25 | { |
30 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 26 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -34,38 +30,35 @@ nv_gpuobj(void *obj) | |||
34 | return obj; | 30 | return obj; |
35 | } | 31 | } |
36 | 32 | ||
37 | #define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d) \ | 33 | #define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d) \ |
38 | nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \ | 34 | nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \ |
39 | sizeof(**d), (void **)d) | 35 | sizeof(**d), (void **)d) |
40 | #define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->object) | 36 | #define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object) |
41 | #define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->object, (s)) | 37 | #define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s)) |
42 | int nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *, | 38 | int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *, |
43 | struct nouveau_oclass *, u32 pclass, | 39 | struct nvkm_oclass *, u32 pclass, |
44 | struct nouveau_object *, u32 size, u32 align, | 40 | struct nvkm_object *, u32 size, u32 align, |
45 | u32 flags, int length, void **); | 41 | u32 flags, int length, void **); |
46 | void nouveau_gpuobj_destroy(struct nouveau_gpuobj *); | 42 | void nvkm_gpuobj_destroy(struct nvkm_gpuobj *); |
47 | 43 | ||
48 | int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *, | 44 | int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size, |
49 | u32 size, u32 align, u32 flags, | 45 | u32 align, u32 flags, struct nvkm_gpuobj **); |
50 | struct nouveau_gpuobj **); | 46 | int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *, |
51 | int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *, | 47 | struct nvkm_gpuobj **); |
52 | struct nouveau_gpuobj **); | 48 | int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *); |
53 | 49 | int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access, | |
54 | int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *); | 50 | struct nvkm_vma *); |
55 | int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *, | 51 | void nvkm_gpuobj_unmap(struct nvkm_vma *); |
56 | u32 access, struct nouveau_vma *); | ||
57 | void nouveau_gpuobj_unmap(struct nouveau_vma *); | ||
58 | 52 | ||
59 | static inline void | 53 | static inline void |
60 | nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref) | 54 | nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref) |
61 | { | 55 | { |
62 | nouveau_object_ref(&obj->object, (struct nouveau_object **)ref); | 56 | nvkm_object_ref(&obj->object, (struct nvkm_object **)ref); |
63 | } | 57 | } |
64 | 58 | ||
65 | void _nouveau_gpuobj_dtor(struct nouveau_object *); | 59 | void _nvkm_gpuobj_dtor(struct nvkm_object *); |
66 | int _nouveau_gpuobj_init(struct nouveau_object *); | 60 | int _nvkm_gpuobj_init(struct nvkm_object *); |
67 | int _nouveau_gpuobj_fini(struct nouveau_object *, bool); | 61 | int _nvkm_gpuobj_fini(struct nvkm_object *, bool); |
68 | u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64); | 62 | u32 _nvkm_gpuobj_rd32(struct nvkm_object *, u64); |
69 | void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32); | 63 | void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32); |
70 | |||
71 | #endif | 64 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h index d22a59138a9b..67f384d0916c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h | |||
@@ -1,8 +1,10 @@ | |||
1 | #ifndef __NOUVEAU_HANDLE_H__ | 1 | #ifndef __NVKM_HANDLE_H__ |
2 | #define __NOUVEAU_HANDLE_H__ | 2 | #define __NVKM_HANDLE_H__ |
3 | #include <core/os.h> | ||
4 | struct nvkm_object; | ||
3 | 5 | ||
4 | struct nouveau_handle { | 6 | struct nvkm_handle { |
5 | struct nouveau_namedb *namedb; | 7 | struct nvkm_namedb *namedb; |
6 | struct list_head node; | 8 | struct list_head node; |
7 | 9 | ||
8 | struct list_head head; | 10 | struct list_head head; |
@@ -13,22 +15,20 @@ struct nouveau_handle { | |||
13 | u8 route; | 15 | u8 route; |
14 | u64 token; | 16 | u64 token; |
15 | 17 | ||
16 | struct nouveau_handle *parent; | 18 | struct nvkm_handle *parent; |
17 | struct nouveau_object *object; | 19 | struct nvkm_object *object; |
18 | }; | 20 | }; |
19 | 21 | ||
20 | int nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle, | 22 | int nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle, |
21 | struct nouveau_object *, struct nouveau_handle **); | 23 | struct nvkm_object *, struct nvkm_handle **); |
22 | void nouveau_handle_destroy(struct nouveau_handle *); | 24 | void nvkm_handle_destroy(struct nvkm_handle *); |
23 | int nouveau_handle_init(struct nouveau_handle *); | 25 | int nvkm_handle_init(struct nvkm_handle *); |
24 | int nouveau_handle_fini(struct nouveau_handle *, bool suspend); | 26 | int nvkm_handle_fini(struct nvkm_handle *, bool suspend); |
25 | 27 | ||
26 | struct nouveau_object * | 28 | struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name); |
27 | nouveau_handle_ref(struct nouveau_object *, u32 name); | ||
28 | |||
29 | struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16); | ||
30 | struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64); | ||
31 | struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32); | ||
32 | void nouveau_handle_put(struct nouveau_handle *); | ||
33 | 29 | ||
30 | struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16); | ||
31 | struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64); | ||
32 | struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32); | ||
33 | void nvkm_handle_put(struct nvkm_handle *); | ||
34 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h index ac7935c2474e..88971eb37afa 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __NVKM_IOCTL_H__ | 1 | #ifndef __NVKM_IOCTL_H__ |
2 | #define __NVKM_IOCTL_H__ | 2 | #define __NVKM_IOCTL_H__ |
3 | #include <core/os.h> | ||
4 | struct nvkm_client; | ||
3 | 5 | ||
4 | int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **); | 6 | int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **); |
5 | |||
6 | #endif | 7 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h index bfe6931544fe..096eb1a623ee 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h | |||
@@ -1,7 +1,8 @@ | |||
1 | #ifndef __NOUVEAU_MM_H__ | 1 | #ifndef __NVKM_MM_H__ |
2 | #define __NOUVEAU_MM_H__ | 2 | #define __NVKM_MM_H__ |
3 | #include <core/os.h> | ||
3 | 4 | ||
4 | struct nouveau_mm_node { | 5 | struct nvkm_mm_node { |
5 | struct list_head nl_entry; | 6 | struct list_head nl_entry; |
6 | struct list_head fl_entry; | 7 | struct list_head fl_entry; |
7 | struct list_head rl_entry; | 8 | struct list_head rl_entry; |
@@ -15,7 +16,7 @@ struct nouveau_mm_node { | |||
15 | u32 length; | 16 | u32 length; |
16 | }; | 17 | }; |
17 | 18 | ||
18 | struct nouveau_mm { | 19 | struct nvkm_mm { |
19 | struct list_head nodes; | 20 | struct list_head nodes; |
20 | struct list_head free; | 21 | struct list_head free; |
21 | 22 | ||
@@ -24,17 +25,16 @@ struct nouveau_mm { | |||
24 | }; | 25 | }; |
25 | 26 | ||
26 | static inline bool | 27 | static inline bool |
27 | nouveau_mm_initialised(struct nouveau_mm *mm) | 28 | nvkm_mm_initialised(struct nvkm_mm *mm) |
28 | { | 29 | { |
29 | return mm->block_size != 0; | 30 | return mm->block_size != 0; |
30 | } | 31 | } |
31 | 32 | ||
32 | int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); | 33 | int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block); |
33 | int nouveau_mm_fini(struct nouveau_mm *); | 34 | int nvkm_mm_fini(struct nvkm_mm *); |
34 | int nouveau_mm_head(struct nouveau_mm *, u8 heap, u8 type, u32 size_max, | 35 | int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, |
35 | u32 size_min, u32 align, struct nouveau_mm_node **); | 36 | u32 size_min, u32 align, struct nvkm_mm_node **); |
36 | int nouveau_mm_tail(struct nouveau_mm *, u8 heap, u8 type, u32 size_max, | 37 | int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, |
37 | u32 size_min, u32 align, struct nouveau_mm_node **); | 38 | u32 size_min, u32 align, struct nvkm_mm_node **); |
38 | void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **); | 39 | void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); |
39 | |||
40 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h index 98e666b1e375..4cfe16fcde9b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h | |||
@@ -1,17 +1,15 @@ | |||
1 | #ifndef __NOUVEAU_NAMEDB_H__ | 1 | #ifndef __NVKM_NAMEDB_H__ |
2 | #define __NOUVEAU_NAMEDB_H__ | 2 | #define __NVKM_NAMEDB_H__ |
3 | |||
4 | #include <core/parent.h> | 3 | #include <core/parent.h> |
4 | struct nvkm_handle; | ||
5 | 5 | ||
6 | struct nouveau_handle; | 6 | struct nvkm_namedb { |
7 | 7 | struct nvkm_parent parent; | |
8 | struct nouveau_namedb { | ||
9 | struct nouveau_parent parent; | ||
10 | rwlock_t lock; | 8 | rwlock_t lock; |
11 | struct list_head list; | 9 | struct list_head list; |
12 | }; | 10 | }; |
13 | 11 | ||
14 | static inline struct nouveau_namedb * | 12 | static inline struct nvkm_namedb * |
15 | nv_namedb(void *obj) | 13 | nv_namedb(void *obj) |
16 | { | 14 | { |
17 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 15 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -21,36 +19,35 @@ nv_namedb(void *obj) | |||
21 | return obj; | 19 | return obj; |
22 | } | 20 | } |
23 | 21 | ||
24 | #define nouveau_namedb_create(p,e,c,v,s,m,d) \ | 22 | #define nvkm_namedb_create(p,e,c,v,s,m,d) \ |
25 | nouveau_namedb_create_((p), (e), (c), (v), (s), (m), \ | 23 | nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \ |
26 | sizeof(**d), (void **)d) | 24 | sizeof(**d), (void **)d) |
27 | #define nouveau_namedb_init(p) \ | 25 | #define nvkm_namedb_init(p) \ |
28 | nouveau_parent_init(&(p)->parent) | 26 | nvkm_parent_init(&(p)->parent) |
29 | #define nouveau_namedb_fini(p,s) \ | 27 | #define nvkm_namedb_fini(p,s) \ |
30 | nouveau_parent_fini(&(p)->parent, (s)) | 28 | nvkm_parent_fini(&(p)->parent, (s)) |
31 | #define nouveau_namedb_destroy(p) \ | 29 | #define nvkm_namedb_destroy(p) \ |
32 | nouveau_parent_destroy(&(p)->parent) | 30 | nvkm_parent_destroy(&(p)->parent) |
33 | 31 | ||
34 | int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *, | 32 | int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *, |
35 | struct nouveau_oclass *, u32 pclass, | 33 | struct nvkm_oclass *, u32 pclass, |
36 | struct nouveau_oclass *, u64 engcls, | 34 | struct nvkm_oclass *, u64 engcls, |
37 | int size, void **); | 35 | int size, void **); |
38 | 36 | ||
39 | int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *, | 37 | int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *, |
40 | struct nouveau_oclass *, void *, u32, | 38 | struct nvkm_oclass *, void *, u32, |
41 | struct nouveau_object **); | 39 | struct nvkm_object **); |
42 | #define _nouveau_namedb_dtor _nouveau_parent_dtor | 40 | #define _nvkm_namedb_dtor _nvkm_parent_dtor |
43 | #define _nouveau_namedb_init _nouveau_parent_init | 41 | #define _nvkm_namedb_init _nvkm_parent_init |
44 | #define _nouveau_namedb_fini _nouveau_parent_fini | 42 | #define _nvkm_namedb_fini _nvkm_parent_fini |
45 | 43 | ||
46 | int nouveau_namedb_insert(struct nouveau_namedb *, u32 name, | 44 | int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *, |
47 | struct nouveau_object *, struct nouveau_handle *); | 45 | struct nvkm_handle *); |
48 | void nouveau_namedb_remove(struct nouveau_handle *); | 46 | void nvkm_namedb_remove(struct nvkm_handle *); |
49 | 47 | ||
50 | struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32); | 48 | struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32); |
51 | struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16); | 49 | struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16); |
52 | struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64); | 50 | struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64); |
53 | struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32); | 51 | struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32); |
54 | void nouveau_namedb_put(struct nouveau_handle *); | 52 | void nvkm_namedb_put(struct nvkm_handle *); |
55 | |||
56 | #endif | 53 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h index a7c3c5f578cc..753d08c1767b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h | |||
@@ -1,5 +1,7 @@ | |||
1 | #ifndef __NVKM_NOTIFY_H__ | 1 | #ifndef __NVKM_NOTIFY_H__ |
2 | #define __NVKM_NOTIFY_H__ | 2 | #define __NVKM_NOTIFY_H__ |
3 | #include <core/os.h> | ||
4 | struct nvkm_object; | ||
3 | 5 | ||
4 | struct nvkm_notify { | 6 | struct nvkm_notify { |
5 | struct nvkm_event *event; | 7 | struct nvkm_event *event; |
@@ -25,7 +27,7 @@ struct nvkm_notify { | |||
25 | const void *data; | 27 | const void *data; |
26 | }; | 28 | }; |
27 | 29 | ||
28 | int nvkm_notify_init(struct nouveau_object *, struct nvkm_event *, | 30 | int nvkm_notify_init(struct nvkm_object *, struct nvkm_event *, |
29 | int (*func)(struct nvkm_notify *), bool work, | 31 | int (*func)(struct nvkm_notify *), bool work, |
30 | void *data, u32 size, u32 reply, | 32 | void *data, u32 size, u32 reply, |
31 | struct nvkm_notify *); | 33 | struct nvkm_notify *); |
@@ -33,5 +35,4 @@ void nvkm_notify_fini(struct nvkm_notify *); | |||
33 | void nvkm_notify_get(struct nvkm_notify *); | 35 | void nvkm_notify_get(struct nvkm_notify *); |
34 | void nvkm_notify_put(struct nvkm_notify *); | 36 | void nvkm_notify_put(struct nvkm_notify *); |
35 | void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size); | 37 | void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size); |
36 | |||
37 | #endif | 38 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h index 64c0038f1971..6e3cd3908400 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h | |||
@@ -1,6 +1,5 @@ | |||
1 | #ifndef __NOUVEAU_OBJECT_H__ | 1 | #ifndef __NVKM_OBJECT_H__ |
2 | #define __NOUVEAU_OBJECT_H__ | 2 | #define __NVKM_OBJECT_H__ |
3 | |||
4 | #include <core/os.h> | 3 | #include <core/os.h> |
5 | #include <core/printk.h> | 4 | #include <core/printk.h> |
6 | 5 | ||
@@ -14,52 +13,52 @@ | |||
14 | #define NV_ENGCTX_CLASS 0x01000000 | 13 | #define NV_ENGCTX_CLASS 0x01000000 |
15 | #define NV_OBJECT_CLASS 0x0000ffff | 14 | #define NV_OBJECT_CLASS 0x0000ffff |
16 | 15 | ||
17 | struct nouveau_object { | 16 | struct nvkm_object { |
18 | struct nouveau_oclass *oclass; | 17 | struct nvkm_oclass *oclass; |
19 | struct nouveau_object *parent; | 18 | struct nvkm_object *parent; |
20 | struct nouveau_engine *engine; | 19 | struct nvkm_engine *engine; |
21 | atomic_t refcount; | 20 | atomic_t refcount; |
22 | atomic_t usecount; | 21 | atomic_t usecount; |
23 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 22 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
24 | #define NOUVEAU_OBJECT_MAGIC 0x75ef0bad | 23 | #define NVKM_OBJECT_MAGIC 0x75ef0bad |
25 | struct list_head list; | 24 | struct list_head list; |
26 | u32 _magic; | 25 | u32 _magic; |
27 | #endif | 26 | #endif |
28 | }; | 27 | }; |
29 | 28 | ||
30 | static inline struct nouveau_object * | 29 | static inline struct nvkm_object * |
31 | nv_object(void *obj) | 30 | nv_object(void *obj) |
32 | { | 31 | { |
33 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 32 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
34 | if (likely(obj)) { | 33 | if (likely(obj)) { |
35 | struct nouveau_object *object = obj; | 34 | struct nvkm_object *object = obj; |
36 | if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC)) | 35 | if (unlikely(object->_magic != NVKM_OBJECT_MAGIC)) |
37 | nv_assert("BAD CAST -> NvObject, invalid magic"); | 36 | nv_assert("BAD CAST -> NvObject, invalid magic"); |
38 | } | 37 | } |
39 | #endif | 38 | #endif |
40 | return obj; | 39 | return obj; |
41 | } | 40 | } |
42 | 41 | ||
43 | #define nouveau_object_create(p,e,c,s,d) \ | 42 | #define nvkm_object_create(p,e,c,s,d) \ |
44 | nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d) | 43 | nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d) |
45 | int nouveau_object_create_(struct nouveau_object *, struct nouveau_object *, | 44 | int nvkm_object_create_(struct nvkm_object *, struct nvkm_object *, |
46 | struct nouveau_oclass *, u32, int size, void **); | 45 | struct nvkm_oclass *, u32, int size, void **); |
47 | void nouveau_object_destroy(struct nouveau_object *); | 46 | void nvkm_object_destroy(struct nvkm_object *); |
48 | int nouveau_object_init(struct nouveau_object *); | 47 | int nvkm_object_init(struct nvkm_object *); |
49 | int nouveau_object_fini(struct nouveau_object *, bool suspend); | 48 | int nvkm_object_fini(struct nvkm_object *, bool suspend); |
50 | 49 | ||
51 | int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *, | 50 | int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *, |
52 | struct nouveau_oclass *, void *, u32, | 51 | struct nvkm_oclass *, void *, u32, |
53 | struct nouveau_object **); | 52 | struct nvkm_object **); |
54 | 53 | ||
55 | extern struct nouveau_ofuncs nouveau_object_ofuncs; | 54 | extern struct nvkm_ofuncs nvkm_object_ofuncs; |
56 | 55 | ||
57 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in | 56 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in |
58 | * ".data". */ | 57 | * ".data". */ |
59 | struct nouveau_oclass { | 58 | struct nvkm_oclass { |
60 | u32 handle; | 59 | u32 handle; |
61 | struct nouveau_ofuncs * const ofuncs; | 60 | struct nvkm_ofuncs * const ofuncs; |
62 | struct nouveau_omthds * const omthds; | 61 | struct nvkm_omthds * const omthds; |
63 | struct lock_class_key lock_class_key; | 62 | struct lock_class_key lock_class_key; |
64 | }; | 63 | }; |
65 | 64 | ||
@@ -68,58 +67,57 @@ struct nouveau_oclass { | |||
68 | #define nv_iclass(o,i) (nv_hclass(o) & (i)) | 67 | #define nv_iclass(o,i) (nv_hclass(o) & (i)) |
69 | #define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS) | 68 | #define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS) |
70 | 69 | ||
71 | static inline struct nouveau_object * | 70 | static inline struct nvkm_object * |
72 | nv_pclass(struct nouveau_object *parent, u32 oclass) | 71 | nv_pclass(struct nvkm_object *parent, u32 oclass) |
73 | { | 72 | { |
74 | while (parent && !nv_iclass(parent, oclass)) | 73 | while (parent && !nv_iclass(parent, oclass)) |
75 | parent = parent->parent; | 74 | parent = parent->parent; |
76 | return parent; | 75 | return parent; |
77 | } | 76 | } |
78 | 77 | ||
79 | struct nouveau_omthds { | 78 | struct nvkm_omthds { |
80 | u32 start; | 79 | u32 start; |
81 | u32 limit; | 80 | u32 limit; |
82 | int (*call)(struct nouveau_object *, u32, void *, u32); | 81 | int (*call)(struct nvkm_object *, u32, void *, u32); |
83 | }; | 82 | }; |
84 | 83 | ||
85 | struct nvkm_event; | 84 | struct nvkm_event; |
86 | struct nouveau_ofuncs { | 85 | struct nvkm_ofuncs { |
87 | int (*ctor)(struct nouveau_object *, struct nouveau_object *, | 86 | int (*ctor)(struct nvkm_object *, struct nvkm_object *, |
88 | struct nouveau_oclass *, void *data, u32 size, | 87 | struct nvkm_oclass *, void *data, u32 size, |
89 | struct nouveau_object **); | 88 | struct nvkm_object **); |
90 | void (*dtor)(struct nouveau_object *); | 89 | void (*dtor)(struct nvkm_object *); |
91 | int (*init)(struct nouveau_object *); | 90 | int (*init)(struct nvkm_object *); |
92 | int (*fini)(struct nouveau_object *, bool suspend); | 91 | int (*fini)(struct nvkm_object *, bool suspend); |
93 | int (*mthd)(struct nouveau_object *, u32, void *, u32); | 92 | int (*mthd)(struct nvkm_object *, u32, void *, u32); |
94 | int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **); | 93 | int (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **); |
95 | int (* map)(struct nouveau_object *, u64 *, u32 *); | 94 | int (* map)(struct nvkm_object *, u64 *, u32 *); |
96 | u8 (*rd08)(struct nouveau_object *, u64 offset); | 95 | u8 (*rd08)(struct nvkm_object *, u64 offset); |
97 | u16 (*rd16)(struct nouveau_object *, u64 offset); | 96 | u16 (*rd16)(struct nvkm_object *, u64 offset); |
98 | u32 (*rd32)(struct nouveau_object *, u64 offset); | 97 | u32 (*rd32)(struct nvkm_object *, u64 offset); |
99 | void (*wr08)(struct nouveau_object *, u64 offset, u8 data); | 98 | void (*wr08)(struct nvkm_object *, u64 offset, u8 data); |
100 | void (*wr16)(struct nouveau_object *, u64 offset, u16 data); | 99 | void (*wr16)(struct nvkm_object *, u64 offset, u16 data); |
101 | void (*wr32)(struct nouveau_object *, u64 offset, u32 data); | 100 | void (*wr32)(struct nvkm_object *, u64 offset, u32 data); |
102 | }; | 101 | }; |
103 | 102 | ||
104 | static inline struct nouveau_ofuncs * | 103 | static inline struct nvkm_ofuncs * |
105 | nv_ofuncs(void *obj) | 104 | nv_ofuncs(void *obj) |
106 | { | 105 | { |
107 | return nv_oclass(obj)->ofuncs; | 106 | return nv_oclass(obj)->ofuncs; |
108 | } | 107 | } |
109 | 108 | ||
110 | int nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *, | 109 | int nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *, |
111 | struct nouveau_oclass *, void *, u32, | 110 | struct nvkm_oclass *, void *, u32, |
112 | struct nouveau_object **); | 111 | struct nvkm_object **); |
113 | void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **); | 112 | void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **); |
114 | int nouveau_object_inc(struct nouveau_object *); | 113 | int nvkm_object_inc(struct nvkm_object *); |
115 | int nouveau_object_dec(struct nouveau_object *, bool suspend); | 114 | int nvkm_object_dec(struct nvkm_object *, bool suspend); |
116 | 115 | void nvkm_object_debug(void); | |
117 | void nouveau_object_debug(void); | ||
118 | 116 | ||
119 | static inline int | 117 | static inline int |
120 | nv_exec(void *obj, u32 mthd, void *data, u32 size) | 118 | nv_exec(void *obj, u32 mthd, void *data, u32 size) |
121 | { | 119 | { |
122 | struct nouveau_omthds *method = nv_oclass(obj)->omthds; | 120 | struct nvkm_omthds *method = nv_oclass(obj)->omthds; |
123 | 121 | ||
124 | while (method && method->call) { | 122 | while (method && method->call) { |
125 | if (mthd >= method->start && mthd <= method->limit) | 123 | if (mthd >= method->start && mthd <= method->limit) |
@@ -202,5 +200,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len) | |||
202 | } | 200 | } |
203 | return 0; | 201 | return 0; |
204 | } | 202 | } |
205 | |||
206 | #endif | 203 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h index ed055847887e..532bfa8e3f72 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h | |||
@@ -1,12 +1,10 @@ | |||
1 | #ifndef __NOUVEAU_OPTION_H__ | 1 | #ifndef __NVKM_OPTION_H__ |
2 | #define __NOUVEAU_OPTION_H__ | 2 | #define __NVKM_OPTION_H__ |
3 | |||
4 | #include <core/os.h> | 3 | #include <core/os.h> |
5 | 4 | ||
6 | const char *nouveau_stropt(const char *optstr, const char *opt, int *len); | 5 | const char *nvkm_stropt(const char *optstr, const char *opt, int *len); |
7 | bool nouveau_boolopt(const char *optstr, const char *opt, bool value); | 6 | bool nvkm_boolopt(const char *optstr, const char *opt, bool value); |
8 | 7 | int nvkm_dbgopt(const char *optstr, const char *sub); | |
9 | int nouveau_dbgopt(const char *optstr, const char *sub); | ||
10 | 8 | ||
11 | /* compares unterminated string 'str' with zero-terminated string 'cmp' */ | 9 | /* compares unterminated string 'str' with zero-terminated string 'cmp' */ |
12 | static inline int | 10 | static inline int |
@@ -16,5 +14,4 @@ strncasecmpz(const char *str, const char *cmp, size_t len) | |||
16 | return len; | 14 | return len; |
17 | return strncasecmp(str, cmp, len); | 15 | return strncasecmp(str, cmp, len); |
18 | } | 16 | } |
19 | |||
20 | #endif | 17 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h index cd57e238ddd3..0b5a1e4dff48 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h | |||
@@ -1,4 +1,195 @@ | |||
1 | #ifndef __NVKM_OS_H__ | 1 | #ifndef __NVKM_OS_H__ |
2 | #define __NVKM_OS_H__ | 2 | #define __NVKM_OS_H__ |
3 | #include <nvif/os.h> | 3 | #include <nvif/os.h> |
4 | |||
5 | #define nouveau_client nvkm_client | ||
6 | #define nouveau_client_name nvkm_client_name | ||
7 | #define nouveau_client_create nvkm_client_create | ||
8 | #define nouveau_client_init nvkm_client_init | ||
9 | #define nouveau_client_fini nvkm_client_fini | ||
10 | #define nouveau_engctx nvkm_engctx | ||
11 | #define nouveau_engctx_create nvkm_engctx_create | ||
12 | #define nouveau_engctx_create_ nvkm_engctx_create_ | ||
13 | #define nouveau_engctx_destroy nvkm_engctx_destroy | ||
14 | #define nouveau_engctx_init nvkm_engctx_init | ||
15 | #define nouveau_engctx_fini nvkm_engctx_fini | ||
16 | #define _nouveau_engctx_ctor _nvkm_engctx_ctor | ||
17 | #define _nouveau_engctx_dtor _nvkm_engctx_dtor | ||
18 | #define _nouveau_engctx_init _nvkm_engctx_init | ||
19 | #define _nouveau_engctx_fini _nvkm_engctx_fini | ||
20 | #define _nouveau_engctx_rd32 _nvkm_engctx_rd32 | ||
21 | #define _nouveau_engctx_wr32 _nvkm_engctx_wr32 | ||
22 | #define nouveau_engctx_get nvkm_engctx_get | ||
23 | #define nouveau_engctx_put nvkm_engctx_put | ||
24 | #define nouveau_engine nvkm_engine | ||
25 | #define nouveau_engine_create nvkm_engine_create | ||
26 | #define nouveau_engine_create_ nvkm_engine_create_ | ||
27 | #define nouveau_engine_destroy nvkm_engine_destroy | ||
28 | #define nouveau_engine_init nvkm_engine_init | ||
29 | #define nouveau_engine_fini nvkm_engine_fini | ||
30 | #define _nouveau_engine_ctor _nvkm_engine_ctor | ||
31 | #define _nouveau_engine_dtor _nvkm_engine_dtor | ||
32 | #define _nouveau_engine_init _nvkm_engine_init | ||
33 | #define _nouveau_engine_fini _nvkm_engine_fini | ||
34 | #define nouveau_enum nvkm_enum | ||
35 | #define nouveau_gpuobj nvkm_gpuobj | ||
36 | #define nouveau_gpuobj_create nvkm_gpuobj_create | ||
37 | #define nouveau_gpuobj_destroy nvkm_gpuobj_destroy | ||
38 | #define _nouveau_gpuobj_ctor _nvkm_gpuobj_ctor | ||
39 | #define _nouveau_gpuobj_dtor _nvkm_gpuobj_dtor | ||
40 | #define _nouveau_gpuobj_init _nvkm_gpuobj_init | ||
41 | #define _nouveau_gpuobj_fini _nvkm_gpuobj_fini | ||
42 | #define _nouveau_gpuobj_rd32 _nvkm_gpuobj_rd32 | ||
43 | #define _nouveau_gpuobj_wr32 _nvkm_gpuobj_wr32 | ||
44 | #define nouveau_gpuobj_new nvkm_gpuobj_new | ||
45 | #define nouveau_gpuobj_dup nvkm_gpuobj_dup | ||
46 | #define nouveau_gpuobj_ref nvkm_gpuobj_ref | ||
47 | #define nouveau_gpuobj_map nvkm_gpuobj_map | ||
48 | #define nouveau_gpuobj_map_vm nvkm_gpuobj_map_vm | ||
49 | #define nouveau_gpuobj_unmap nvkm_gpuobj_unmap | ||
50 | #define nouveau_handle nvkm_handle | ||
51 | #define nouveau_handle_ref nvkm_handle_ref | ||
52 | #define nouveau_handle_put nvkm_handle_put | ||
53 | #define nouveau_handle_get_class nvkm_handle_get_class | ||
54 | #define nouveau_handle_get_vinst nvkm_handle_get_vinst | ||
55 | #define nouveau_handle_get_cinst nvkm_handle_get_cinst | ||
56 | #define nouveau_mm nvkm_mm | ||
57 | #define nouveau_mm_node nvkm_mm_node | ||
58 | #define nouveau_mm_init nvkm_mm_init | ||
59 | #define nouveau_mm_fini nvkm_mm_fini | ||
60 | #define nouveau_mm_head nvkm_mm_head | ||
61 | #define nouveau_mm_tail nvkm_mm_tail | ||
62 | #define nouveau_mm_free nvkm_mm_free | ||
63 | #define nouveau_mm_initialised nvkm_mm_initialised | ||
64 | #define nouveau_namedb nvkm_namedb | ||
65 | #define nouveau_namedb_create nvkm_namedb_create | ||
66 | #define nouveau_namedb_create_ nvkm_namedb_create_ | ||
67 | #define nouveau_namedb_destroy nvkm_namedb_destroy | ||
68 | #define nouveau_namedb_init nvkm_namedb_init | ||
69 | #define nouveau_namedb_fini nvkm_namedb_fini | ||
70 | #define _nouveau_namedb_ctor _nvkm_namedb_ctor | ||
71 | #define _nouveau_namedb_dtor _nvkm_namedb_dtor | ||
72 | #define _nouveau_namedb_init _nvkm_namedb_init | ||
73 | #define _nouveau_namedb_fini _nvkm_namedb_fini | ||
74 | #define nouveau_namedb_ref nvkm_namedb_ref | ||
75 | #define nouveau_namedb_put nvkm_namedb_put | ||
76 | #define nouveau_namedb_get nvkm_namedb_get | ||
77 | #define nouveau_namedb_get_class nvkm_namedb_get_class | ||
78 | #define nouveau_namedb_get_vinst nvkm_namedb_get_vinst | ||
79 | #define nouveau_namedb_get_cinst nvkm_namedb_get_cinst | ||
80 | #define nouveau_object_debug nvkm_object_debug | ||
81 | #define nouveau_object nvkm_object | ||
82 | #define nouveau_object_create nvkm_object_create | ||
83 | #define nouveau_object_create_ nvkm_object_create_ | ||
84 | #define nouveau_object_destroy nvkm_object_destroy | ||
85 | #define nouveau_object_init nvkm_object_init | ||
86 | #define nouveau_object_fini nvkm_object_fini | ||
87 | #define _nouveau_object_ctor _nvkm_object_ctor | ||
88 | #define nouveau_object_ctor nvkm_object_ctor | ||
89 | #define nouveau_object_ref nvkm_object_ref | ||
90 | #define nouveau_object_ofuncs nvkm_object_ofuncs | ||
91 | #define nouveau_object_inc nvkm_object_inc | ||
92 | #define nouveau_object_dec nvkm_object_dec | ||
93 | #define nouveau_ofuncs nvkm_ofuncs | ||
94 | #define nouveau_oclass nvkm_oclass | ||
95 | #define nouveau_omthds nvkm_omthds | ||
96 | #define nouveau_parent nvkm_parent | ||
97 | #define nouveau_parent_create nvkm_parent_create | ||
98 | #define nouveau_parent_create_ nvkm_parent_create_ | ||
99 | #define nouveau_parent_destroy nvkm_parent_destroy | ||
100 | #define nouveau_parent_init nvkm_parent_init | ||
101 | #define nouveau_parent_fini nvkm_parent_fini | ||
102 | #define _nouveau_parent_ctor _nvkm_parent_ctor | ||
103 | #define _nouveau_parent_dtor _nvkm_parent_dtor | ||
104 | #define _nouveau_parent_init _nvkm_parent_init | ||
105 | #define _nouveau_parent_fini _nvkm_parent_fini | ||
106 | #define nouveau_printk nvkm_printk | ||
107 | #define nouveau_ramht nvkm_ramht | ||
108 | #define nouveau_ramht_new nvkm_ramht_new | ||
109 | #define nouveau_ramht_ref nvkm_ramht_ref | ||
110 | #define nouveau_ramht_insert nvkm_ramht_insert | ||
111 | #define nouveau_ramht_remove nvkm_ramht_remove | ||
112 | #define nouveau_subdev nvkm_subdev | ||
113 | #define nouveau_subdev_create nvkm_subdev_create | ||
114 | #define nouveau_subdev_create_ nvkm_subdev_create_ | ||
115 | #define nouveau_subdev_destroy nvkm_subdev_destroy | ||
116 | #define nouveau_subdev_init nvkm_subdev_init | ||
117 | #define nouveau_subdev_fini nvkm_subdev_fini | ||
118 | #define _nouveau_subdev_ctor _nvkm_subdev_ctor | ||
119 | #define _nouveau_subdev_dtor _nvkm_subdev_dtor | ||
120 | #define _nouveau_subdev_init _nvkm_subdev_init | ||
121 | #define _nouveau_subdev_fini _nvkm_subdev_fini | ||
122 | #define nouveau_subdev_reset nvkm_subdev_reset | ||
123 | #define nouveau_bitfield nvkm_bitfield | ||
124 | #define nouveau_bitfield_print nvkm_bitfield_print | ||
125 | #define nouveau_enum nvkm_enum | ||
126 | #define nouveau_enum_find nvkm_enum_find | ||
127 | #define nouveau_enum_print nvkm_enum_print | ||
128 | #define nouveau_stropt nvkm_stropt | ||
129 | #define nouveau_boolopt nvkm_boolopt | ||
130 | #define nouveau_dbgopt nvkm_dbgopt | ||
131 | #define nouveau_device nvkm_device | ||
132 | #define nouveau_device_find nvkm_device_find | ||
133 | #define nouveau_device_list nvkm_device_list | ||
134 | #define nouveau_vma nvkm_vma | ||
135 | #define nouveau_vm nvkm_vm | ||
136 | #define nouveau_vm_get nvkm_vm_get | ||
137 | #define nouveau_vm_put nvkm_vm_put | ||
138 | #define nouveau_vm_map nvkm_vm_map | ||
139 | #define nouveau_vm_unmap nvkm_vm_unmap | ||
140 | #define nouveau_instmem nvkm_instmem | ||
141 | #define nouveau_instobj nvkm_instobj | ||
142 | #define nouveau_mem nvkm_mem | ||
143 | #define nouveau_bar nvkm_bar | ||
144 | #define nouveau_falcon nvkm_falcon | ||
145 | #define nouveau_falcon_create nvkm_falcon_create | ||
146 | #define nouveau_falcon_create_ nvkm_falcon_create_ | ||
147 | #define nouveau_falcon_destroy nvkm_falcon_destroy | ||
148 | #define nouveau_falcon_init nvkm_falcon_init | ||
149 | #define nouveau_falcon_fini nvkm_falcon_fini | ||
150 | #define _nouveau_falcon_ctor _nvkm_falcon_ctor | ||
151 | #define _nouveau_falcon_dtor _nvkm_falcon_dtor | ||
152 | #define _nouveau_falcon_init _nvkm_falcon_init | ||
153 | #define _nouveau_falcon_fini _nvkm_falcon_fini | ||
154 | #define _nouveau_falcon_rd32 _nvkm_falcon_rd32 | ||
155 | #define _nouveau_falcon_wr32 _nvkm_falcon_wr32 | ||
156 | #define nouveau_falcon_context nvkm_falcon_context | ||
157 | #define nouveau_falcon_context_create nvkm_falcon_context_create | ||
158 | #define nouveau_falcon_context_create_ nvkm_falcon_context_create_ | ||
159 | #define nouveau_falcon_context_destroy nvkm_falcon_context_destroy | ||
160 | #define nouveau_falcon_context_init nvkm_falcon_context_init | ||
161 | #define nouveau_falcon_context_fini nvkm_falcon_context_fini | ||
162 | #define _nouveau_falcon_context_ctor _nvkm_falcon_context_ctor | ||
163 | #define _nouveau_falcon_context_dtor _nvkm_falcon_context_dtor | ||
164 | #define _nouveau_falcon_context_init _nvkm_falcon_context_init | ||
165 | #define _nouveau_falcon_context_fini _nvkm_falcon_context_fini | ||
166 | #define _nouveau_falcon_context_rd32 _nvkm_falcon_context_rd32 | ||
167 | #define _nouveau_falcon_context_wr32 _nvkm_falcon_context_wr32 | ||
168 | #define nouveau_falcon_intr nvkm_falcon_intr | ||
169 | #define nouveau_xtensa nvkm_xtensa | ||
170 | #define nouveau_xtensa_create nvkm_xtensa_create | ||
171 | #define nouveau_xtensa_create_ nvkm_xtensa_create_ | ||
172 | #define nouveau_xtensa_destroy nvkm_xtensa_destroy | ||
173 | #define nouveau_xtensa_init nvkm_xtensa_init | ||
174 | #define nouveau_xtensa_fini nvkm_xtensa_fini | ||
175 | #define _nouveau_xtensa_ctor _nvkm_xtensa_ctor | ||
176 | #define _nouveau_xtensa_dtor _nvkm_xtensa_dtor | ||
177 | #define _nouveau_xtensa_init _nvkm_xtensa_init | ||
178 | #define _nouveau_xtensa_fini _nvkm_xtensa_fini | ||
179 | #define _nouveau_xtensa_rd32 _nvkm_xtensa_rd32 | ||
180 | #define _nouveau_xtensa_wr32 _nvkm_xtensa_wr32 | ||
181 | #define nouveau_xtensa_context nvkm_xtensa_context | ||
182 | #define nouveau_xtensa_context_create nvkm_xtensa_context_create | ||
183 | #define nouveau_xtensa_context_create_ nvkm_xtensa_context_create_ | ||
184 | #define nouveau_xtensa_context_destroy nvkm_xtensa_context_destroy | ||
185 | #define nouveau_xtensa_context_init nvkm_xtensa_context_init | ||
186 | #define nouveau_xtensa_context_fini nvkm_xtensa_context_fini | ||
187 | #define _nouveau_xtensa_engctx_ctor _nvkm_xtensa_engctx_ctor | ||
188 | #define _nouveau_xtensa_context_dtor _nvkm_xtensa_context_dtor | ||
189 | #define _nouveau_xtensa_context_init _nvkm_xtensa_context_init | ||
190 | #define _nouveau_xtensa_context_fini _nvkm_xtensa_context_fini | ||
191 | #define _nouveau_xtensa_context_rd32 _nvkm_xtensa_context_rd32 | ||
192 | #define _nouveau_xtensa_context_wr32 _nvkm_xtensa_context_wr32 | ||
193 | #define nouveau_xtensa_intr nvkm_xtensa_intr | ||
194 | |||
4 | #endif | 195 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h index 4e2345a5cfdb..837e4fe966a5 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h | |||
@@ -1,32 +1,29 @@ | |||
1 | #ifndef __NOUVEAU_PARENT_H__ | 1 | #ifndef __NVKM_PARENT_H__ |
2 | #define __NOUVEAU_PARENT_H__ | 2 | #define __NVKM_PARENT_H__ |
3 | |||
4 | #include <core/device.h> | ||
5 | #include <core/object.h> | 3 | #include <core/object.h> |
6 | 4 | ||
7 | struct nouveau_sclass { | 5 | struct nvkm_sclass { |
8 | struct nouveau_sclass *sclass; | 6 | struct nvkm_sclass *sclass; |
9 | struct nouveau_engine *engine; | 7 | struct nvkm_engine *engine; |
10 | struct nouveau_oclass *oclass; | 8 | struct nvkm_oclass *oclass; |
11 | }; | 9 | }; |
12 | 10 | ||
13 | struct nouveau_parent { | 11 | struct nvkm_parent { |
14 | struct nouveau_object object; | 12 | struct nvkm_object object; |
15 | 13 | ||
16 | struct nouveau_sclass *sclass; | 14 | struct nvkm_sclass *sclass; |
17 | u64 engine; | 15 | u64 engine; |
18 | 16 | ||
19 | int (*context_attach)(struct nouveau_object *, | 17 | int (*context_attach)(struct nvkm_object *, struct nvkm_object *); |
20 | struct nouveau_object *); | 18 | int (*context_detach)(struct nvkm_object *, bool suspend, |
21 | int (*context_detach)(struct nouveau_object *, bool suspend, | 19 | struct nvkm_object *); |
22 | struct nouveau_object *); | ||
23 | 20 | ||
24 | int (*object_attach)(struct nouveau_object *parent, | 21 | int (*object_attach)(struct nvkm_object *parent, |
25 | struct nouveau_object *object, u32 name); | 22 | struct nvkm_object *object, u32 name); |
26 | void (*object_detach)(struct nouveau_object *parent, int cookie); | 23 | void (*object_detach)(struct nvkm_object *parent, int cookie); |
27 | }; | 24 | }; |
28 | 25 | ||
29 | static inline struct nouveau_parent * | 26 | static inline struct nvkm_parent * |
30 | nv_parent(void *obj) | 27 | nv_parent(void *obj) |
31 | { | 28 | { |
32 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 29 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -36,27 +33,26 @@ nv_parent(void *obj) | |||
36 | return obj; | 33 | return obj; |
37 | } | 34 | } |
38 | 35 | ||
39 | #define nouveau_parent_create(p,e,c,v,s,m,d) \ | 36 | #define nvkm_parent_create(p,e,c,v,s,m,d) \ |
40 | nouveau_parent_create_((p), (e), (c), (v), (s), (m), \ | 37 | nvkm_parent_create_((p), (e), (c), (v), (s), (m), \ |
41 | sizeof(**d), (void **)d) | 38 | sizeof(**d), (void **)d) |
42 | #define nouveau_parent_init(p) \ | 39 | #define nvkm_parent_init(p) \ |
43 | nouveau_object_init(&(p)->object) | 40 | nvkm_object_init(&(p)->object) |
44 | #define nouveau_parent_fini(p,s) \ | 41 | #define nvkm_parent_fini(p,s) \ |
45 | nouveau_object_fini(&(p)->object, (s)) | 42 | nvkm_object_fini(&(p)->object, (s)) |
46 | 43 | ||
47 | int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *, | 44 | int nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *, |
48 | struct nouveau_oclass *, u32 pclass, | 45 | struct nvkm_oclass *, u32 pclass, |
49 | struct nouveau_oclass *, u64 engcls, | 46 | struct nvkm_oclass *, u64 engcls, |
50 | int size, void **); | 47 | int size, void **); |
51 | void nouveau_parent_destroy(struct nouveau_parent *); | 48 | void nvkm_parent_destroy(struct nvkm_parent *); |
52 | |||
53 | void _nouveau_parent_dtor(struct nouveau_object *); | ||
54 | #define _nouveau_parent_init nouveau_object_init | ||
55 | #define _nouveau_parent_fini nouveau_object_fini | ||
56 | 49 | ||
57 | int nouveau_parent_sclass(struct nouveau_object *, u16 handle, | 50 | void _nvkm_parent_dtor(struct nvkm_object *); |
58 | struct nouveau_object **pengine, | 51 | #define _nvkm_parent_init nvkm_object_init |
59 | struct nouveau_oclass **poclass); | 52 | #define _nvkm_parent_fini nvkm_object_fini |
60 | int nouveau_parent_lclass(struct nouveau_object *, u32 *, int); | ||
61 | 53 | ||
54 | int nvkm_parent_sclass(struct nvkm_object *, u16 handle, | ||
55 | struct nvkm_object **pengine, | ||
56 | struct nvkm_oclass **poclass); | ||
57 | int nvkm_parent_lclass(struct nvkm_object *, u32 *, int); | ||
62 | #endif | 58 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h index 451b6ed20b7e..83648177059f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h | |||
@@ -1,13 +1,11 @@ | |||
1 | #ifndef __NOUVEAU_PRINTK_H__ | 1 | #ifndef __NVKM_PRINTK_H__ |
2 | #define __NOUVEAU_PRINTK_H__ | 2 | #define __NVKM_PRINTK_H__ |
3 | |||
4 | #include <core/os.h> | 3 | #include <core/os.h> |
5 | #include <core/debug.h> | 4 | #include <core/debug.h> |
6 | 5 | struct nvkm_object; | |
7 | struct nouveau_object; | ||
8 | 6 | ||
9 | void __printf(3, 4) | 7 | void __printf(3, 4) |
10 | nv_printk_(struct nouveau_object *, int, const char *, ...); | 8 | nv_printk_(struct nvkm_object *, int, const char *, ...); |
11 | 9 | ||
12 | #define nv_printk(o,l,f,a...) do { \ | 10 | #define nv_printk(o,l,f,a...) do { \ |
13 | if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \ | 11 | if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \ |
@@ -21,12 +19,11 @@ nv_printk_(struct nouveau_object *, int, const char *, ...); | |||
21 | #define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) | 19 | #define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) |
22 | #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) | 20 | #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) |
23 | #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) | 21 | #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) |
24 | #define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a) | 22 | #define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a) |
25 | 23 | ||
26 | #define nv_assert(f,a...) do { \ | 24 | #define nv_assert(f,a...) do { \ |
27 | if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ | 25 | if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ |
28 | nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \ | 26 | nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \ |
29 | BUG_ON(1); \ | 27 | BUG_ON(1); \ |
30 | } while(0) | 28 | } while(0) |
31 | |||
32 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h index e51014337ebe..cc132eaa10cc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h | |||
@@ -1,23 +1,20 @@ | |||
1 | #ifndef __NOUVEAU_RAMHT_H__ | 1 | #ifndef __NVKM_RAMHT_H__ |
2 | #define __NOUVEAU_RAMHT_H__ | 2 | #define __NVKM_RAMHT_H__ |
3 | |||
4 | #include <core/gpuobj.h> | 3 | #include <core/gpuobj.h> |
5 | 4 | ||
6 | struct nouveau_ramht { | 5 | struct nvkm_ramht { |
7 | struct nouveau_gpuobj gpuobj; | 6 | struct nvkm_gpuobj gpuobj; |
8 | int bits; | 7 | int bits; |
9 | }; | 8 | }; |
10 | 9 | ||
11 | int nouveau_ramht_insert(struct nouveau_ramht *, int chid, | 10 | int nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context); |
12 | u32 handle, u32 context); | 11 | void nvkm_ramht_remove(struct nvkm_ramht *, int cookie); |
13 | void nouveau_ramht_remove(struct nouveau_ramht *, int cookie); | 12 | int nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size, |
14 | int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *, | 13 | u32 align, struct nvkm_ramht **); |
15 | u32 size, u32 align, struct nouveau_ramht **); | ||
16 | 14 | ||
17 | static inline void | 15 | static inline void |
18 | nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref) | 16 | nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref) |
19 | { | 17 | { |
20 | nouveau_gpuobj_ref(&obj->gpuobj, (struct nouveau_gpuobj **)ref); | 18 | nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref); |
21 | } | 19 | } |
22 | |||
23 | #endif | 20 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h index 152c842e1f66..6fdc39116aac 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h | |||
@@ -1,24 +1,23 @@ | |||
1 | #ifndef __NOUVEAU_SUBDEV_H__ | 1 | #ifndef __NVKM_SUBDEV_H__ |
2 | #define __NOUVEAU_SUBDEV_H__ | 2 | #define __NVKM_SUBDEV_H__ |
3 | |||
4 | #include <core/object.h> | 3 | #include <core/object.h> |
5 | #include <core/devidx.h> | 4 | #include <core/devidx.h> |
6 | 5 | ||
7 | #define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub)) | 6 | #define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub)) |
8 | #define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var)) | 7 | #define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var)) |
9 | 8 | ||
10 | struct nouveau_subdev { | 9 | struct nvkm_subdev { |
11 | struct nouveau_object object; | 10 | struct nvkm_object object; |
12 | struct mutex mutex; | 11 | struct mutex mutex; |
13 | const char *name; | 12 | const char *name; |
14 | void __iomem *mmio; | 13 | void __iomem *mmio; |
15 | u32 debug; | 14 | u32 debug; |
16 | u32 unit; | 15 | u32 unit; |
17 | 16 | ||
18 | void (*intr)(struct nouveau_subdev *); | 17 | void (*intr)(struct nvkm_subdev *); |
19 | }; | 18 | }; |
20 | 19 | ||
21 | static inline struct nouveau_subdev * | 20 | static inline struct nvkm_subdev * |
22 | nv_subdev(void *obj) | 21 | nv_subdev(void *obj) |
23 | { | 22 | { |
24 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | 23 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA |
@@ -29,29 +28,29 @@ nv_subdev(void *obj) | |||
29 | } | 28 | } |
30 | 29 | ||
31 | static inline int | 30 | static inline int |
32 | nv_subidx(struct nouveau_subdev *subdev) | 31 | nv_subidx(struct nvkm_subdev *subdev) |
33 | { | 32 | { |
34 | return nv_hclass(subdev) & 0xff; | 33 | return nv_hclass(subdev) & 0xff; |
35 | } | 34 | } |
36 | 35 | ||
37 | struct nouveau_subdev *nouveau_subdev(void *obj, int idx); | 36 | struct nvkm_subdev *nvkm_subdev(void *obj, int idx); |
38 | 37 | ||
39 | #define nouveau_subdev_create(p,e,o,v,s,f,d) \ | 38 | #define nvkm_subdev_create(p,e,o,v,s,f,d) \ |
40 | nouveau_subdev_create_((p), (e), (o), (v), (s), (f), \ | 39 | nvkm_subdev_create_((p), (e), (o), (v), (s), (f), \ |
41 | sizeof(**d),(void **)d) | 40 | sizeof(**d),(void **)d) |
42 | 41 | ||
43 | int nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *, | 42 | int nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *, |
44 | struct nouveau_oclass *, u32 pclass, | 43 | struct nvkm_oclass *, u32 pclass, |
45 | const char *sname, const char *fname, | 44 | const char *sname, const char *fname, |
46 | int size, void **); | 45 | int size, void **); |
47 | void nouveau_subdev_destroy(struct nouveau_subdev *); | 46 | void nvkm_subdev_destroy(struct nvkm_subdev *); |
48 | int nouveau_subdev_init(struct nouveau_subdev *); | 47 | int nvkm_subdev_init(struct nvkm_subdev *); |
49 | int nouveau_subdev_fini(struct nouveau_subdev *, bool suspend); | 48 | int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); |
50 | void nouveau_subdev_reset(struct nouveau_object *); | 49 | void nvkm_subdev_reset(struct nvkm_object *); |
51 | 50 | ||
52 | void _nouveau_subdev_dtor(struct nouveau_object *); | 51 | void _nvkm_subdev_dtor(struct nvkm_object *); |
53 | int _nouveau_subdev_init(struct nouveau_object *); | 52 | int _nvkm_subdev_init(struct nvkm_object *); |
54 | int _nouveau_subdev_fini(struct nouveau_object *, bool suspend); | 53 | int _nvkm_subdev_fini(struct nvkm_object *, bool suspend); |
55 | 54 | ||
56 | #define s_printk(s,l,f,a...) do { \ | 55 | #define s_printk(s,l,f,a...) do { \ |
57 | if ((s)->debug >= OS_DBG_##l) { \ | 56 | if ((s)->debug >= OS_DBG_##l) { \ |
@@ -62,7 +61,7 @@ int _nouveau_subdev_fini(struct nouveau_object *, bool suspend); | |||
62 | static inline u8 | 61 | static inline u8 |
63 | nv_rd08(void *obj, u32 addr) | 62 | nv_rd08(void *obj, u32 addr) |
64 | { | 63 | { |
65 | struct nouveau_subdev *subdev = nv_subdev(obj); | 64 | struct nvkm_subdev *subdev = nv_subdev(obj); |
66 | u8 data = ioread8(subdev->mmio + addr); | 65 | u8 data = ioread8(subdev->mmio + addr); |
67 | nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data); | 66 | nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data); |
68 | return data; | 67 | return data; |
@@ -71,7 +70,7 @@ nv_rd08(void *obj, u32 addr) | |||
71 | static inline u16 | 70 | static inline u16 |
72 | nv_rd16(void *obj, u32 addr) | 71 | nv_rd16(void *obj, u32 addr) |
73 | { | 72 | { |
74 | struct nouveau_subdev *subdev = nv_subdev(obj); | 73 | struct nvkm_subdev *subdev = nv_subdev(obj); |
75 | u16 data = ioread16_native(subdev->mmio + addr); | 74 | u16 data = ioread16_native(subdev->mmio + addr); |
76 | nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data); | 75 | nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data); |
77 | return data; | 76 | return data; |
@@ -80,7 +79,7 @@ nv_rd16(void *obj, u32 addr) | |||
80 | static inline u32 | 79 | static inline u32 |
81 | nv_rd32(void *obj, u32 addr) | 80 | nv_rd32(void *obj, u32 addr) |
82 | { | 81 | { |
83 | struct nouveau_subdev *subdev = nv_subdev(obj); | 82 | struct nvkm_subdev *subdev = nv_subdev(obj); |
84 | u32 data = ioread32_native(subdev->mmio + addr); | 83 | u32 data = ioread32_native(subdev->mmio + addr); |
85 | nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data); | 84 | nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data); |
86 | return data; | 85 | return data; |
@@ -89,7 +88,7 @@ nv_rd32(void *obj, u32 addr) | |||
89 | static inline void | 88 | static inline void |
90 | nv_wr08(void *obj, u32 addr, u8 data) | 89 | nv_wr08(void *obj, u32 addr, u8 data) |
91 | { | 90 | { |
92 | struct nouveau_subdev *subdev = nv_subdev(obj); | 91 | struct nvkm_subdev *subdev = nv_subdev(obj); |
93 | nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data); | 92 | nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data); |
94 | iowrite8(data, subdev->mmio + addr); | 93 | iowrite8(data, subdev->mmio + addr); |
95 | } | 94 | } |
@@ -97,7 +96,7 @@ nv_wr08(void *obj, u32 addr, u8 data) | |||
97 | static inline void | 96 | static inline void |
98 | nv_wr16(void *obj, u32 addr, u16 data) | 97 | nv_wr16(void *obj, u32 addr, u16 data) |
99 | { | 98 | { |
100 | struct nouveau_subdev *subdev = nv_subdev(obj); | 99 | struct nvkm_subdev *subdev = nv_subdev(obj); |
101 | nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data); | 100 | nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data); |
102 | iowrite16_native(data, subdev->mmio + addr); | 101 | iowrite16_native(data, subdev->mmio + addr); |
103 | } | 102 | } |
@@ -105,7 +104,7 @@ nv_wr16(void *obj, u32 addr, u16 data) | |||
105 | static inline void | 104 | static inline void |
106 | nv_wr32(void *obj, u32 addr, u32 data) | 105 | nv_wr32(void *obj, u32 addr, u32 data) |
107 | { | 106 | { |
108 | struct nouveau_subdev *subdev = nv_subdev(obj); | 107 | struct nvkm_subdev *subdev = nv_subdev(obj); |
109 | nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data); | 108 | nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data); |
110 | iowrite32_native(data, subdev->mmio + addr); | 109 | iowrite32_native(data, subdev->mmio + addr); |
111 | } | 110 | } |
@@ -117,5 +116,4 @@ nv_mask(void *obj, u32 addr, u32 mask, u32 data) | |||
117 | nv_wr32(obj, addr, (temp & ~mask) | data); | 116 | nv_wr32(obj, addr, (temp & ~mask) | data); |
118 | return temp; | 117 | return temp; |
119 | } | 118 | } |
120 | |||
121 | #endif | 119 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h index 672d3c8f4145..5d4805e67e76 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h | |||
@@ -27,7 +27,4 @@ int nv50_identify(struct nouveau_device *); | |||
27 | int nvc0_identify(struct nouveau_device *); | 27 | int nvc0_identify(struct nouveau_device *); |
28 | int nve0_identify(struct nouveau_device *); | 28 | int nve0_identify(struct nouveau_device *); |
29 | int gm100_identify(struct nouveau_device *); | 29 | int gm100_identify(struct nouveau_device *); |
30 | |||
31 | struct nouveau_device *nouveau_device_find(u64 name); | ||
32 | |||
33 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index 181aa7da524d..bd38cf9130fc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h | |||
@@ -1,42 +1,41 @@ | |||
1 | #ifndef __NOUVEAU_FALCON_H__ | 1 | #ifndef __NVKM_FALCON_H__ |
2 | #define __NOUVEAU_FALCON_H__ | 2 | #define __NVKM_FALCON_H__ |
3 | |||
4 | #include <core/engine.h> | ||
5 | #include <core/engctx.h> | 3 | #include <core/engctx.h> |
6 | #include <core/gpuobj.h> | ||
7 | 4 | ||
8 | struct nouveau_falcon_chan { | 5 | struct nvkm_falcon_chan { |
9 | struct nouveau_engctx base; | 6 | struct nvkm_engctx base; |
10 | }; | 7 | }; |
11 | 8 | ||
12 | #define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \ | 9 | #define nvkm_falcon_context_create(p,e,c,g,s,a,f,d) \ |
13 | nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) | 10 | nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) |
14 | #define nouveau_falcon_context_destroy(d) \ | 11 | #define nvkm_falcon_context_destroy(d) \ |
15 | nouveau_engctx_destroy(&(d)->base) | 12 | nvkm_engctx_destroy(&(d)->base) |
16 | #define nouveau_falcon_context_init(d) \ | 13 | #define nvkm_falcon_context_init(d) \ |
17 | nouveau_engctx_init(&(d)->base) | 14 | nvkm_engctx_init(&(d)->base) |
18 | #define nouveau_falcon_context_fini(d,s) \ | 15 | #define nvkm_falcon_context_fini(d,s) \ |
19 | nouveau_engctx_fini(&(d)->base, (s)) | 16 | nvkm_engctx_fini(&(d)->base, (s)) |
20 | 17 | ||
21 | #define _nouveau_falcon_context_ctor _nouveau_engctx_ctor | 18 | #define _nvkm_falcon_context_ctor _nvkm_engctx_ctor |
22 | #define _nouveau_falcon_context_dtor _nouveau_engctx_dtor | 19 | #define _nvkm_falcon_context_dtor _nvkm_engctx_dtor |
23 | #define _nouveau_falcon_context_init _nouveau_engctx_init | 20 | #define _nvkm_falcon_context_init _nvkm_engctx_init |
24 | #define _nouveau_falcon_context_fini _nouveau_engctx_fini | 21 | #define _nvkm_falcon_context_fini _nvkm_engctx_fini |
25 | #define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32 | 22 | #define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32 |
26 | #define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32 | 23 | #define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32 |
27 | 24 | ||
28 | struct nouveau_falcon_data { | 25 | struct nvkm_falcon_data { |
29 | bool external; | 26 | bool external; |
30 | }; | 27 | }; |
31 | 28 | ||
32 | struct nouveau_falcon { | 29 | #include <core/engine.h> |
33 | struct nouveau_engine base; | 30 | |
31 | struct nvkm_falcon { | ||
32 | struct nvkm_engine base; | ||
34 | 33 | ||
35 | u32 addr; | 34 | u32 addr; |
36 | u8 version; | 35 | u8 version; |
37 | u8 secret; | 36 | u8 secret; |
38 | 37 | ||
39 | struct nouveau_gpuobj *core; | 38 | struct nvkm_gpuobj *core; |
40 | bool external; | 39 | bool external; |
41 | 40 | ||
42 | struct { | 41 | struct { |
@@ -54,30 +53,29 @@ struct nouveau_falcon { | |||
54 | 53 | ||
55 | #define nv_falcon(priv) (&(priv)->base) | 54 | #define nv_falcon(priv) (&(priv)->base) |
56 | 55 | ||
57 | #define nouveau_falcon_create(p,e,c,b,d,i,f,r) \ | 56 | #define nvkm_falcon_create(p,e,c,b,d,i,f,r) \ |
58 | nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \ | 57 | nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f), \ |
59 | sizeof(**r),(void **)r) | 58 | sizeof(**r),(void **)r) |
60 | #define nouveau_falcon_destroy(p) \ | 59 | #define nvkm_falcon_destroy(p) \ |
61 | nouveau_engine_destroy(&(p)->base) | 60 | nvkm_engine_destroy(&(p)->base) |
62 | #define nouveau_falcon_init(p) ({ \ | 61 | #define nvkm_falcon_init(p) ({ \ |
63 | struct nouveau_falcon *falcon = (p); \ | 62 | struct nvkm_falcon *falcon = (p); \ |
64 | _nouveau_falcon_init(nv_object(falcon)); \ | 63 | _nvkm_falcon_init(nv_object(falcon)); \ |
65 | }) | 64 | }) |
66 | #define nouveau_falcon_fini(p,s) ({ \ | 65 | #define nvkm_falcon_fini(p,s) ({ \ |
67 | struct nouveau_falcon *falcon = (p); \ | 66 | struct nvkm_falcon *falcon = (p); \ |
68 | _nouveau_falcon_fini(nv_object(falcon), (s)); \ | 67 | _nvkm_falcon_fini(nv_object(falcon), (s)); \ |
69 | }) | 68 | }) |
70 | 69 | ||
71 | int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | 70 | int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *, |
72 | struct nouveau_oclass *, u32, bool, const char *, | 71 | struct nvkm_oclass *, u32, bool, const char *, |
73 | const char *, int, void **); | 72 | const char *, int, void **); |
74 | 73 | ||
75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | 74 | void nvkm_falcon_intr(struct nvkm_subdev *subdev); |
76 | |||
77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | ||
78 | int _nouveau_falcon_init(struct nouveau_object *); | ||
79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | ||
80 | u32 _nouveau_falcon_rd32(struct nouveau_object *, u64); | ||
81 | void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32); | ||
82 | 75 | ||
76 | #define _nvkm_falcon_dtor _nvkm_engine_dtor | ||
77 | int _nvkm_falcon_init(struct nvkm_object *); | ||
78 | int _nvkm_falcon_fini(struct nvkm_object *, bool); | ||
79 | u32 _nvkm_falcon_rd32(struct nvkm_object *, u64); | ||
80 | void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32); | ||
83 | #endif | 81 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h index 306100f31f02..7a216cca2865 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h | |||
@@ -1,38 +1,35 @@ | |||
1 | #ifndef __NOUVEAU_XTENSA_H__ | 1 | #ifndef __NVKM_XTENSA_H__ |
2 | #define __NOUVEAU_XTENSA_H__ | 2 | #define __NVKM_XTENSA_H__ |
3 | |||
4 | #include <core/engine.h> | 3 | #include <core/engine.h> |
5 | #include <core/engctx.h> | 4 | struct nvkm_gpuobj; |
6 | #include <core/gpuobj.h> | ||
7 | 5 | ||
8 | struct nouveau_xtensa { | 6 | struct nvkm_xtensa { |
9 | struct nouveau_engine base; | 7 | struct nvkm_engine base; |
10 | 8 | ||
11 | u32 addr; | 9 | u32 addr; |
12 | struct nouveau_gpuobj *gpu_fw; | 10 | struct nvkm_gpuobj *gpu_fw; |
13 | u32 fifo_val; | 11 | u32 fifo_val; |
14 | u32 unkd28; | 12 | u32 unkd28; |
15 | }; | 13 | }; |
16 | 14 | ||
17 | #define nouveau_xtensa_create(p,e,c,b,d,i,f,r) \ | 15 | #define nvkm_xtensa_create(p,e,c,b,d,i,f,r) \ |
18 | nouveau_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \ | 16 | nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \ |
19 | sizeof(**r),(void **)r) | 17 | sizeof(**r),(void **)r) |
20 | 18 | ||
21 | int _nouveau_xtensa_engctx_ctor(struct nouveau_object *, | 19 | int _nvkm_xtensa_engctx_ctor(struct nvkm_object *, |
22 | struct nouveau_object *, | 20 | struct nvkm_object *, |
23 | struct nouveau_oclass *, void *, u32, | 21 | struct nvkm_oclass *, void *, u32, |
24 | struct nouveau_object **); | 22 | struct nvkm_object **); |
25 | 23 | ||
26 | void _nouveau_xtensa_intr(struct nouveau_subdev *); | 24 | void _nvkm_xtensa_intr(struct nvkm_subdev *); |
27 | int nouveau_xtensa_create_(struct nouveau_object *, | 25 | int nvkm_xtensa_create_(struct nvkm_object *, |
28 | struct nouveau_object *, | 26 | struct nvkm_object *, |
29 | struct nouveau_oclass *, u32, bool, | 27 | struct nvkm_oclass *, u32, bool, |
30 | const char *, const char *, | 28 | const char *, const char *, |
31 | int, void **); | 29 | int, void **); |
32 | #define _nouveau_xtensa_dtor _nouveau_engine_dtor | 30 | #define _nvkm_xtensa_dtor _nvkm_engine_dtor |
33 | int _nouveau_xtensa_init(struct nouveau_object *); | 31 | int _nvkm_xtensa_init(struct nvkm_object *); |
34 | int _nouveau_xtensa_fini(struct nouveau_object *, bool); | 32 | int _nvkm_xtensa_fini(struct nvkm_object *, bool); |
35 | u32 _nouveau_xtensa_rd32(struct nouveau_object *, u64); | 33 | u32 _nvkm_xtensa_rd32(struct nvkm_object *, u64); |
36 | void _nouveau_xtensa_wr32(struct nouveau_object *, u64, u32); | 34 | void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32); |
37 | |||
38 | #endif | 35 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h index 4524d4eea2f1..d42a08837d4f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __NOUVEAU_CLK_H__ | 2 | #define __NOUVEAU_CLK_H__ |
3 | 3 | ||
4 | #include <core/device.h> | 4 | #include <core/device.h> |
5 | #include <core/notify.h> | ||
5 | #include <core/subdev.h> | 6 | #include <core/subdev.h> |
6 | 7 | ||
7 | struct nouveau_pll_vals; | 8 | struct nouveau_pll_vals; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index 6544b84f0303..7f7bd329425e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c | |||
@@ -70,7 +70,7 @@ nvkm_client_suspend(void *priv) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | static void | 72 | static void |
73 | nvkm_client_fini(void *priv) | 73 | nvkm_client_driver_fini(void *priv) |
74 | { | 74 | { |
75 | struct nouveau_object *client = priv; | 75 | struct nouveau_object *client = priv; |
76 | nouveau_client_fini(nv_client(client), false); | 76 | nouveau_client_fini(nv_client(client), false); |
@@ -107,8 +107,8 @@ nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | static int | 109 | static int |
110 | nvkm_client_init(const char *name, u64 device, const char *cfg, | 110 | nvkm_client_driver_init(const char *name, u64 device, const char *cfg, |
111 | const char *dbg, void **ppriv) | 111 | const char *dbg, void **ppriv) |
112 | { | 112 | { |
113 | struct nouveau_client *client; | 113 | struct nouveau_client *client; |
114 | int ret; | 114 | int ret; |
@@ -125,8 +125,8 @@ nvkm_client_init(const char *name, u64 device, const char *cfg, | |||
125 | const struct nvif_driver | 125 | const struct nvif_driver |
126 | nvif_driver_nvkm = { | 126 | nvif_driver_nvkm = { |
127 | .name = "nvkm", | 127 | .name = "nvkm", |
128 | .init = nvkm_client_init, | 128 | .init = nvkm_client_driver_init, |
129 | .fini = nvkm_client_fini, | 129 | .fini = nvkm_client_driver_fini, |
130 | .suspend = nvkm_client_suspend, | 130 | .suspend = nvkm_client_suspend, |
131 | .resume = nvkm_client_resume, | 131 | .resume = nvkm_client_resume, |
132 | .ioctl = nvkm_client_ioctl, | 132 | .ioctl = nvkm_client_ioctl, |
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c index 0898c3155292..8e34748709a0 100644 --- a/drivers/gpu/drm/nouveau/nvif/notify.c +++ b/drivers/gpu/drm/nouveau/nvif/notify.c | |||
@@ -92,7 +92,7 @@ nvif_notify_func(struct nvif_notify *notify, bool keep) | |||
92 | { | 92 | { |
93 | int ret = notify->func(notify); | 93 | int ret = notify->func(notify); |
94 | if (ret == NVIF_NOTIFY_KEEP || | 94 | if (ret == NVIF_NOTIFY_KEEP || |
95 | !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { | 95 | !test_and_clear_bit(NVIF_NOTIFY_USER, ¬ify->flags)) { |
96 | if (!keep) | 96 | if (!keep) |
97 | atomic_dec(¬ify->putcnt); | 97 | atomic_dec(¬ify->putcnt); |
98 | else | 98 | else |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index acff10387846..878a82f8f295 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c | |||
@@ -21,21 +21,18 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/client.h> | 24 | #include <core/client.h> |
25 | #include <core/device.h> | ||
27 | #include <core/handle.h> | 26 | #include <core/handle.h> |
27 | #include <core/notify.h> | ||
28 | #include <core/option.h> | 28 | #include <core/option.h> |
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/class.h> | ||
31 | 29 | ||
32 | #include <nvif/unpack.h> | 30 | #include <nvif/class.h> |
33 | #include <nvif/event.h> | 31 | #include <nvif/event.h> |
34 | 32 | #include <nvif/unpack.h> | |
35 | #include <engine/device.h> | ||
36 | 33 | ||
37 | struct nvkm_client_notify { | 34 | struct nvkm_client_notify { |
38 | struct nouveau_client *client; | 35 | struct nvkm_client *client; |
39 | struct nvkm_notify n; | 36 | struct nvkm_notify n; |
40 | u8 version; | 37 | u8 version; |
41 | u8 size; | 38 | u8 size; |
@@ -48,12 +45,12 @@ static int | |||
48 | nvkm_client_notify(struct nvkm_notify *n) | 45 | nvkm_client_notify(struct nvkm_notify *n) |
49 | { | 46 | { |
50 | struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); | 47 | struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); |
51 | struct nouveau_client *client = notify->client; | 48 | struct nvkm_client *client = notify->client; |
52 | return client->ntfy(¬ify->rep, notify->size, n->data, n->size); | 49 | return client->ntfy(¬ify->rep, notify->size, n->data, n->size); |
53 | } | 50 | } |
54 | 51 | ||
55 | int | 52 | int |
56 | nvkm_client_notify_put(struct nouveau_client *client, int index) | 53 | nvkm_client_notify_put(struct nvkm_client *client, int index) |
57 | { | 54 | { |
58 | if (index < ARRAY_SIZE(client->notify)) { | 55 | if (index < ARRAY_SIZE(client->notify)) { |
59 | if (client->notify[index]) { | 56 | if (client->notify[index]) { |
@@ -65,7 +62,7 @@ nvkm_client_notify_put(struct nouveau_client *client, int index) | |||
65 | } | 62 | } |
66 | 63 | ||
67 | int | 64 | int |
68 | nvkm_client_notify_get(struct nouveau_client *client, int index) | 65 | nvkm_client_notify_get(struct nvkm_client *client, int index) |
69 | { | 66 | { |
70 | if (index < ARRAY_SIZE(client->notify)) { | 67 | if (index < ARRAY_SIZE(client->notify)) { |
71 | if (client->notify[index]) { | 68 | if (client->notify[index]) { |
@@ -77,7 +74,7 @@ nvkm_client_notify_get(struct nouveau_client *client, int index) | |||
77 | } | 74 | } |
78 | 75 | ||
79 | int | 76 | int |
80 | nvkm_client_notify_del(struct nouveau_client *client, int index) | 77 | nvkm_client_notify_del(struct nvkm_client *client, int index) |
81 | { | 78 | { |
82 | if (index < ARRAY_SIZE(client->notify)) { | 79 | if (index < ARRAY_SIZE(client->notify)) { |
83 | if (client->notify[index]) { | 80 | if (client->notify[index]) { |
@@ -91,10 +88,10 @@ nvkm_client_notify_del(struct nouveau_client *client, int index) | |||
91 | } | 88 | } |
92 | 89 | ||
93 | int | 90 | int |
94 | nvkm_client_notify_new(struct nouveau_object *object, | 91 | nvkm_client_notify_new(struct nvkm_object *object, |
95 | struct nvkm_event *event, void *data, u32 size) | 92 | struct nvkm_event *event, void *data, u32 size) |
96 | { | 93 | { |
97 | struct nouveau_client *client = nouveau_client(object); | 94 | struct nvkm_client *client = nvkm_client(object); |
98 | struct nvkm_client_notify *notify; | 95 | struct nvkm_client_notify *notify; |
99 | union { | 96 | union { |
100 | struct nvif_notify_req_v0 v0; | 97 | struct nvif_notify_req_v0 v0; |
@@ -142,7 +139,7 @@ nvkm_client_notify_new(struct nouveau_object *object, | |||
142 | } | 139 | } |
143 | 140 | ||
144 | static int | 141 | static int |
145 | nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size) | 142 | nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size) |
146 | { | 143 | { |
147 | union { | 144 | union { |
148 | struct nv_client_devlist_v0 v0; | 145 | struct nv_client_devlist_v0 v0; |
@@ -154,8 +151,7 @@ nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size) | |||
154 | nv_ioctl(object, "client devlist vers %d count %d\n", | 151 | nv_ioctl(object, "client devlist vers %d count %d\n", |
155 | args->v0.version, args->v0.count); | 152 | args->v0.version, args->v0.count); |
156 | if (size == sizeof(args->v0.device[0]) * args->v0.count) { | 153 | if (size == sizeof(args->v0.device[0]) * args->v0.count) { |
157 | ret = nouveau_device_list(args->v0.device, | 154 | ret = nvkm_device_list(args->v0.device, args->v0.count); |
158 | args->v0.count); | ||
159 | if (ret >= 0) { | 155 | if (ret >= 0) { |
160 | args->v0.count = ret; | 156 | args->v0.count = ret; |
161 | ret = 0; | 157 | ret = 0; |
@@ -169,12 +165,11 @@ nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size) | |||
169 | } | 165 | } |
170 | 166 | ||
171 | static int | 167 | static int |
172 | nouveau_client_mthd(struct nouveau_object *object, u32 mthd, | 168 | nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) |
173 | void *data, u32 size) | ||
174 | { | 169 | { |
175 | switch (mthd) { | 170 | switch (mthd) { |
176 | case NV_CLIENT_DEVLIST: | 171 | case NV_CLIENT_DEVLIST: |
177 | return nouveau_client_devlist(object, data, size); | 172 | return nvkm_client_mthd_devlist(object, data, size); |
178 | default: | 173 | default: |
179 | break; | 174 | break; |
180 | } | 175 | } |
@@ -182,71 +177,71 @@ nouveau_client_mthd(struct nouveau_object *object, u32 mthd, | |||
182 | } | 177 | } |
183 | 178 | ||
184 | static void | 179 | static void |
185 | nouveau_client_dtor(struct nouveau_object *object) | 180 | nvkm_client_dtor(struct nvkm_object *object) |
186 | { | 181 | { |
187 | struct nouveau_client *client = (void *)object; | 182 | struct nvkm_client *client = (void *)object; |
188 | int i; | 183 | int i; |
189 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | 184 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) |
190 | nvkm_client_notify_del(client, i); | 185 | nvkm_client_notify_del(client, i); |
191 | nouveau_object_ref(NULL, &client->device); | 186 | nvkm_object_ref(NULL, &client->device); |
192 | nouveau_handle_destroy(client->root); | 187 | nvkm_handle_destroy(client->root); |
193 | nouveau_namedb_destroy(&client->namedb); | 188 | nvkm_namedb_destroy(&client->namedb); |
194 | } | 189 | } |
195 | 190 | ||
196 | static struct nouveau_oclass | 191 | static struct nvkm_oclass |
197 | nouveau_client_oclass = { | 192 | nvkm_client_oclass = { |
198 | .ofuncs = &(struct nouveau_ofuncs) { | 193 | .ofuncs = &(struct nvkm_ofuncs) { |
199 | .dtor = nouveau_client_dtor, | 194 | .dtor = nvkm_client_dtor, |
200 | .mthd = nouveau_client_mthd, | 195 | .mthd = nvkm_client_mthd, |
201 | }, | 196 | }, |
202 | }; | 197 | }; |
203 | 198 | ||
204 | int | 199 | int |
205 | nouveau_client_create_(const char *name, u64 devname, const char *cfg, | 200 | nvkm_client_create_(const char *name, u64 devname, const char *cfg, |
206 | const char *dbg, int length, void **pobject) | 201 | const char *dbg, int length, void **pobject) |
207 | { | 202 | { |
208 | struct nouveau_object *device; | 203 | struct nvkm_object *device; |
209 | struct nouveau_client *client; | 204 | struct nvkm_client *client; |
210 | int ret; | 205 | int ret; |
211 | 206 | ||
212 | device = (void *)nouveau_device_find(devname); | 207 | device = (void *)nvkm_device_find(devname); |
213 | if (!device) | 208 | if (!device) |
214 | return -ENODEV; | 209 | return -ENODEV; |
215 | 210 | ||
216 | ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass, | 211 | ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass, |
217 | NV_CLIENT_CLASS, NULL, | 212 | NV_CLIENT_CLASS, NULL, |
218 | (1ULL << NVDEV_ENGINE_DEVICE), | 213 | (1ULL << NVDEV_ENGINE_DEVICE), |
219 | length, pobject); | 214 | length, pobject); |
220 | client = *pobject; | 215 | client = *pobject; |
221 | if (ret) | 216 | if (ret) |
222 | return ret; | 217 | return ret; |
223 | 218 | ||
224 | ret = nouveau_handle_create(nv_object(client), ~0, ~0, | 219 | ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client), |
225 | nv_object(client), &client->root); | 220 | &client->root); |
226 | if (ret) | 221 | if (ret) |
227 | return ret; | 222 | return ret; |
228 | 223 | ||
229 | /* prevent init/fini being called, os in in charge of this */ | 224 | /* prevent init/fini being called, os in in charge of this */ |
230 | atomic_set(&nv_object(client)->usecount, 2); | 225 | atomic_set(&nv_object(client)->usecount, 2); |
231 | 226 | ||
232 | nouveau_object_ref(device, &client->device); | 227 | nvkm_object_ref(device, &client->device); |
233 | snprintf(client->name, sizeof(client->name), "%s", name); | 228 | snprintf(client->name, sizeof(client->name), "%s", name); |
234 | client->debug = nouveau_dbgopt(dbg, "CLIENT"); | 229 | client->debug = nvkm_dbgopt(dbg, "CLIENT"); |
235 | return 0; | 230 | return 0; |
236 | } | 231 | } |
237 | 232 | ||
238 | int | 233 | int |
239 | nouveau_client_init(struct nouveau_client *client) | 234 | nvkm_client_init(struct nvkm_client *client) |
240 | { | 235 | { |
241 | int ret; | 236 | int ret; |
242 | nv_debug(client, "init running\n"); | 237 | nv_debug(client, "init running\n"); |
243 | ret = nouveau_handle_init(client->root); | 238 | ret = nvkm_handle_init(client->root); |
244 | nv_debug(client, "init completed with %d\n", ret); | 239 | nv_debug(client, "init completed with %d\n", ret); |
245 | return ret; | 240 | return ret; |
246 | } | 241 | } |
247 | 242 | ||
248 | int | 243 | int |
249 | nouveau_client_fini(struct nouveau_client *client, bool suspend) | 244 | nvkm_client_fini(struct nvkm_client *client, bool suspend) |
250 | { | 245 | { |
251 | const char *name[2] = { "fini", "suspend" }; | 246 | const char *name[2] = { "fini", "suspend" }; |
252 | int ret, i; | 247 | int ret, i; |
@@ -255,16 +250,16 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend) | |||
255 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | 250 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) |
256 | nvkm_client_notify_put(client, i); | 251 | nvkm_client_notify_put(client, i); |
257 | nv_debug(client, "%s object\n", name[suspend]); | 252 | nv_debug(client, "%s object\n", name[suspend]); |
258 | ret = nouveau_handle_fini(client->root, suspend); | 253 | ret = nvkm_handle_fini(client->root, suspend); |
259 | nv_debug(client, "%s completed with %d\n", name[suspend], ret); | 254 | nv_debug(client, "%s completed with %d\n", name[suspend], ret); |
260 | return ret; | 255 | return ret; |
261 | } | 256 | } |
262 | 257 | ||
263 | const char * | 258 | const char * |
264 | nouveau_client_name(void *obj) | 259 | nvkm_client_name(void *obj) |
265 | { | 260 | { |
266 | const char *client_name = "unknown"; | 261 | const char *client_name = "unknown"; |
267 | struct nouveau_client *client = nouveau_client(obj); | 262 | struct nvkm_client *client = nvkm_client(obj); |
268 | if (client) | 263 | if (client) |
269 | client_name = client->name; | 264 | client_name = client->name; |
270 | return client_name; | 265 | return client_name; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c index 892baa461575..a9df07a90f9b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c | |||
@@ -21,21 +21,15 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/namedb.h> | ||
27 | #include <core/handle.h> | ||
28 | #include <core/client.h> | ||
29 | #include <core/engctx.h> | 24 | #include <core/engctx.h> |
30 | 25 | #include <core/client.h> | |
31 | #include <subdev/mmu.h> | ||
32 | 26 | ||
33 | static inline int | 27 | static inline int |
34 | nouveau_engctx_exists(struct nouveau_object *parent, | 28 | nvkm_engctx_exists(struct nvkm_object *parent, |
35 | struct nouveau_engine *engine, void **pobject) | 29 | struct nvkm_engine *engine, void **pobject) |
36 | { | 30 | { |
37 | struct nouveau_engctx *engctx; | 31 | struct nvkm_engctx *engctx; |
38 | struct nouveau_object *parctx; | 32 | struct nvkm_object *parctx; |
39 | 33 | ||
40 | list_for_each_entry(engctx, &engine->contexts, head) { | 34 | list_for_each_entry(engctx, &engine->contexts, head) { |
41 | parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS); | 35 | parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS); |
@@ -50,16 +44,13 @@ nouveau_engctx_exists(struct nouveau_object *parent, | |||
50 | } | 44 | } |
51 | 45 | ||
52 | int | 46 | int |
53 | nouveau_engctx_create_(struct nouveau_object *parent, | 47 | nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj, |
54 | struct nouveau_object *engobj, | 48 | struct nvkm_oclass *oclass, struct nvkm_object *pargpu, |
55 | struct nouveau_oclass *oclass, | 49 | u32 size, u32 align, u32 flags, int length, void **pobject) |
56 | struct nouveau_object *pargpu, | ||
57 | u32 size, u32 align, u32 flags, | ||
58 | int length, void **pobject) | ||
59 | { | 50 | { |
60 | struct nouveau_client *client = nouveau_client(parent); | 51 | struct nvkm_client *client = nvkm_client(parent); |
61 | struct nouveau_engine *engine = nv_engine(engobj); | 52 | struct nvkm_engine *engine = nv_engine(engobj); |
62 | struct nouveau_object *engctx; | 53 | struct nvkm_object *engctx; |
63 | unsigned long save; | 54 | unsigned long save; |
64 | int ret; | 55 | int ret; |
65 | 56 | ||
@@ -67,7 +58,7 @@ nouveau_engctx_create_(struct nouveau_object *parent, | |||
67 | * and reference it instead of creating a new one | 58 | * and reference it instead of creating a new one |
68 | */ | 59 | */ |
69 | spin_lock_irqsave(&engine->lock, save); | 60 | spin_lock_irqsave(&engine->lock, save); |
70 | ret = nouveau_engctx_exists(parent, engine, pobject); | 61 | ret = nvkm_engctx_exists(parent, engine, pobject); |
71 | spin_unlock_irqrestore(&engine->lock, save); | 62 | spin_unlock_irqrestore(&engine->lock, save); |
72 | if (ret) | 63 | if (ret) |
73 | return ret; | 64 | return ret; |
@@ -76,13 +67,12 @@ nouveau_engctx_create_(struct nouveau_object *parent, | |||
76 | * objects backed by instance memory | 67 | * objects backed by instance memory |
77 | */ | 68 | */ |
78 | if (size) { | 69 | if (size) { |
79 | ret = nouveau_gpuobj_create_(parent, engobj, oclass, | 70 | ret = nvkm_gpuobj_create_(parent, engobj, oclass, |
80 | NV_ENGCTX_CLASS, | 71 | NV_ENGCTX_CLASS, pargpu, size, |
81 | pargpu, size, align, flags, | 72 | align, flags, length, pobject); |
82 | length, pobject); | ||
83 | } else { | 73 | } else { |
84 | ret = nouveau_object_create_(parent, engobj, oclass, | 74 | ret = nvkm_object_create_(parent, engobj, oclass, |
85 | NV_ENGCTX_CLASS, length, pobject); | 75 | NV_ENGCTX_CLASS, length, pobject); |
86 | } | 76 | } |
87 | 77 | ||
88 | engctx = *pobject; | 78 | engctx = *pobject; |
@@ -94,10 +84,10 @@ nouveau_engctx_create_(struct nouveau_object *parent, | |||
94 | * it's not possible to allocate the object with it held. | 84 | * it's not possible to allocate the object with it held. |
95 | */ | 85 | */ |
96 | spin_lock_irqsave(&engine->lock, save); | 86 | spin_lock_irqsave(&engine->lock, save); |
97 | ret = nouveau_engctx_exists(parent, engine, pobject); | 87 | ret = nvkm_engctx_exists(parent, engine, pobject); |
98 | if (ret) { | 88 | if (ret) { |
99 | spin_unlock_irqrestore(&engine->lock, save); | 89 | spin_unlock_irqrestore(&engine->lock, save); |
100 | nouveau_object_ref(NULL, &engctx); | 90 | nvkm_object_ref(NULL, &engctx); |
101 | return ret; | 91 | return ret; |
102 | } | 92 | } |
103 | 93 | ||
@@ -110,13 +100,13 @@ nouveau_engctx_create_(struct nouveau_object *parent, | |||
110 | } | 100 | } |
111 | 101 | ||
112 | void | 102 | void |
113 | nouveau_engctx_destroy(struct nouveau_engctx *engctx) | 103 | nvkm_engctx_destroy(struct nvkm_engctx *engctx) |
114 | { | 104 | { |
115 | struct nouveau_engine *engine = engctx->gpuobj.object.engine; | 105 | struct nvkm_engine *engine = engctx->gpuobj.object.engine; |
116 | struct nouveau_client *client = nouveau_client(engctx); | 106 | struct nvkm_client *client = nvkm_client(engctx); |
117 | unsigned long save; | 107 | unsigned long save; |
118 | 108 | ||
119 | nouveau_gpuobj_unmap(&engctx->vma); | 109 | nvkm_gpuobj_unmap(&engctx->vma); |
120 | spin_lock_irqsave(&engine->lock, save); | 110 | spin_lock_irqsave(&engine->lock, save); |
121 | list_del(&engctx->head); | 111 | list_del(&engctx->head); |
122 | spin_unlock_irqrestore(&engine->lock, save); | 112 | spin_unlock_irqrestore(&engine->lock, save); |
@@ -125,21 +115,21 @@ nouveau_engctx_destroy(struct nouveau_engctx *engctx) | |||
125 | atomic_dec(&client->vm->engref[nv_engidx(engine)]); | 115 | atomic_dec(&client->vm->engref[nv_engidx(engine)]); |
126 | 116 | ||
127 | if (engctx->gpuobj.size) | 117 | if (engctx->gpuobj.size) |
128 | nouveau_gpuobj_destroy(&engctx->gpuobj); | 118 | nvkm_gpuobj_destroy(&engctx->gpuobj); |
129 | else | 119 | else |
130 | nouveau_object_destroy(&engctx->gpuobj.object); | 120 | nvkm_object_destroy(&engctx->gpuobj.object); |
131 | } | 121 | } |
132 | 122 | ||
133 | int | 123 | int |
134 | nouveau_engctx_init(struct nouveau_engctx *engctx) | 124 | nvkm_engctx_init(struct nvkm_engctx *engctx) |
135 | { | 125 | { |
136 | struct nouveau_object *object = nv_object(engctx); | 126 | struct nvkm_object *object = nv_object(engctx); |
137 | struct nouveau_subdev *subdev = nv_subdev(object->engine); | 127 | struct nvkm_subdev *subdev = nv_subdev(object->engine); |
138 | struct nouveau_object *parent; | 128 | struct nvkm_object *parent; |
139 | struct nouveau_subdev *pardev; | 129 | struct nvkm_subdev *pardev; |
140 | int ret; | 130 | int ret; |
141 | 131 | ||
142 | ret = nouveau_gpuobj_init(&engctx->gpuobj); | 132 | ret = nvkm_gpuobj_init(&engctx->gpuobj); |
143 | if (ret) | 133 | if (ret) |
144 | return ret; | 134 | return ret; |
145 | 135 | ||
@@ -162,12 +152,12 @@ nouveau_engctx_init(struct nouveau_engctx *engctx) | |||
162 | } | 152 | } |
163 | 153 | ||
164 | int | 154 | int |
165 | nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend) | 155 | nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend) |
166 | { | 156 | { |
167 | struct nouveau_object *object = nv_object(engctx); | 157 | struct nvkm_object *object = nv_object(engctx); |
168 | struct nouveau_subdev *subdev = nv_subdev(object->engine); | 158 | struct nvkm_subdev *subdev = nv_subdev(object->engine); |
169 | struct nouveau_object *parent; | 159 | struct nvkm_object *parent; |
170 | struct nouveau_subdev *pardev; | 160 | struct nvkm_subdev *pardev; |
171 | int ret = 0; | 161 | int ret = 0; |
172 | 162 | ||
173 | parent = nv_pclass(object->parent, NV_PARENT_CLASS); | 163 | parent = nv_pclass(object->parent, NV_PARENT_CLASS); |
@@ -185,47 +175,45 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend) | |||
185 | } | 175 | } |
186 | 176 | ||
187 | nv_debug(parent, "detached %s context\n", subdev->name); | 177 | nv_debug(parent, "detached %s context\n", subdev->name); |
188 | return nouveau_gpuobj_fini(&engctx->gpuobj, suspend); | 178 | return nvkm_gpuobj_fini(&engctx->gpuobj, suspend); |
189 | } | 179 | } |
190 | 180 | ||
191 | int | 181 | int |
192 | _nouveau_engctx_ctor(struct nouveau_object *parent, | 182 | _nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
193 | struct nouveau_object *engine, | 183 | struct nvkm_oclass *oclass, void *data, u32 size, |
194 | struct nouveau_oclass *oclass, void *data, u32 size, | 184 | struct nvkm_object **pobject) |
195 | struct nouveau_object **pobject) | ||
196 | { | 185 | { |
197 | struct nouveau_engctx *engctx; | 186 | struct nvkm_engctx *engctx; |
198 | int ret; | 187 | int ret; |
199 | 188 | ||
200 | ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256, | 189 | ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256, |
201 | NVOBJ_FLAG_ZERO_ALLOC, &engctx); | 190 | NVOBJ_FLAG_ZERO_ALLOC, &engctx); |
202 | *pobject = nv_object(engctx); | 191 | *pobject = nv_object(engctx); |
203 | return ret; | 192 | return ret; |
204 | } | 193 | } |
205 | 194 | ||
206 | void | 195 | void |
207 | _nouveau_engctx_dtor(struct nouveau_object *object) | 196 | _nvkm_engctx_dtor(struct nvkm_object *object) |
208 | { | 197 | { |
209 | nouveau_engctx_destroy(nv_engctx(object)); | 198 | nvkm_engctx_destroy(nv_engctx(object)); |
210 | } | 199 | } |
211 | 200 | ||
212 | int | 201 | int |
213 | _nouveau_engctx_init(struct nouveau_object *object) | 202 | _nvkm_engctx_init(struct nvkm_object *object) |
214 | { | 203 | { |
215 | return nouveau_engctx_init(nv_engctx(object)); | 204 | return nvkm_engctx_init(nv_engctx(object)); |
216 | } | 205 | } |
217 | 206 | ||
218 | |||
219 | int | 207 | int |
220 | _nouveau_engctx_fini(struct nouveau_object *object, bool suspend) | 208 | _nvkm_engctx_fini(struct nvkm_object *object, bool suspend) |
221 | { | 209 | { |
222 | return nouveau_engctx_fini(nv_engctx(object), suspend); | 210 | return nvkm_engctx_fini(nv_engctx(object), suspend); |
223 | } | 211 | } |
224 | 212 | ||
225 | struct nouveau_object * | 213 | struct nvkm_object * |
226 | nouveau_engctx_get(struct nouveau_engine *engine, u64 addr) | 214 | nvkm_engctx_get(struct nvkm_engine *engine, u64 addr) |
227 | { | 215 | { |
228 | struct nouveau_engctx *engctx; | 216 | struct nvkm_engctx *engctx; |
229 | unsigned long flags; | 217 | unsigned long flags; |
230 | 218 | ||
231 | spin_lock_irqsave(&engine->lock, flags); | 219 | spin_lock_irqsave(&engine->lock, flags); |
@@ -240,11 +228,11 @@ nouveau_engctx_get(struct nouveau_engine *engine, u64 addr) | |||
240 | } | 228 | } |
241 | 229 | ||
242 | void | 230 | void |
243 | nouveau_engctx_put(struct nouveau_object *object) | 231 | nvkm_engctx_put(struct nvkm_object *object) |
244 | { | 232 | { |
245 | if (object) { | 233 | if (object) { |
246 | struct nouveau_engine *engine = nv_engine(object->engine); | 234 | struct nvkm_engine *engine = nv_engine(object->engine); |
247 | struct nouveau_engctx *engctx = nv_engctx(object); | 235 | struct nvkm_engctx *engctx = nv_engctx(object); |
248 | spin_unlock_irqrestore(&engine->lock, engctx->save); | 236 | spin_unlock_irqrestore(&engine->lock, engctx->save); |
249 | } | 237 | } |
250 | } | 238 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index c9414b1532ff..60820173c6aa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c | |||
@@ -21,42 +21,40 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/device.h> | ||
26 | #include <core/engine.h> | 24 | #include <core/engine.h> |
25 | #include <core/device.h> | ||
27 | #include <core/option.h> | 26 | #include <core/option.h> |
28 | 27 | ||
29 | struct nouveau_engine * | 28 | struct nvkm_engine * |
30 | nouveau_engine(void *obj, int idx) | 29 | nvkm_engine(void *obj, int idx) |
31 | { | 30 | { |
32 | obj = nouveau_subdev(obj, idx); | 31 | obj = nvkm_subdev(obj, idx); |
33 | if (obj && nv_iclass(obj, NV_ENGINE_CLASS)) | 32 | if (obj && nv_iclass(obj, NV_ENGINE_CLASS)) |
34 | return nv_engine(obj); | 33 | return nv_engine(obj); |
35 | return NULL; | 34 | return NULL; |
36 | } | 35 | } |
37 | 36 | ||
38 | int | 37 | int |
39 | nouveau_engine_create_(struct nouveau_object *parent, | 38 | nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj, |
40 | struct nouveau_object *engobj, | 39 | struct nvkm_oclass *oclass, bool enable, |
41 | struct nouveau_oclass *oclass, bool enable, | 40 | const char *iname, const char *fname, |
42 | const char *iname, const char *fname, | 41 | int length, void **pobject) |
43 | int length, void **pobject) | ||
44 | { | 42 | { |
45 | struct nouveau_engine *engine; | 43 | struct nvkm_engine *engine; |
46 | int ret; | 44 | int ret; |
47 | 45 | ||
48 | ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, | 46 | ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, |
49 | iname, fname, length, pobject); | 47 | iname, fname, length, pobject); |
50 | engine = *pobject; | 48 | engine = *pobject; |
51 | if (ret) | 49 | if (ret) |
52 | return ret; | 50 | return ret; |
53 | 51 | ||
54 | if (parent) { | 52 | if (parent) { |
55 | struct nouveau_device *device = nv_device(parent); | 53 | struct nvkm_device *device = nv_device(parent); |
56 | int engidx = nv_engidx(engine); | 54 | int engidx = nv_engidx(engine); |
57 | 55 | ||
58 | if (device->disable_mask & (1ULL << engidx)) { | 56 | if (device->disable_mask & (1ULL << engidx)) { |
59 | if (!nouveau_boolopt(device->cfgopt, iname, false)) { | 57 | if (!nvkm_boolopt(device->cfgopt, iname, false)) { |
60 | nv_debug(engine, "engine disabled by hw/fw\n"); | 58 | nv_debug(engine, "engine disabled by hw/fw\n"); |
61 | return -ENODEV; | 59 | return -ENODEV; |
62 | } | 60 | } |
@@ -64,7 +62,7 @@ nouveau_engine_create_(struct nouveau_object *parent, | |||
64 | nv_warn(engine, "ignoring hw/fw engine disable\n"); | 62 | nv_warn(engine, "ignoring hw/fw engine disable\n"); |
65 | } | 63 | } |
66 | 64 | ||
67 | if (!nouveau_boolopt(device->cfgopt, iname, enable)) { | 65 | if (!nvkm_boolopt(device->cfgopt, iname, enable)) { |
68 | if (!enable) | 66 | if (!enable) |
69 | nv_warn(engine, "disabled, %s=1 to enable\n", iname); | 67 | nv_warn(engine, "disabled, %s=1 to enable\n", iname); |
70 | return -ENODEV; | 68 | return -ENODEV; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c index dd434790ccc4..4f92bfc13d6b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c | |||
@@ -24,12 +24,10 @@ | |||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | |||
28 | #include <core/os.h> | ||
29 | #include <core/enum.h> | 27 | #include <core/enum.h> |
30 | 28 | ||
31 | const struct nouveau_enum * | 29 | const struct nvkm_enum * |
32 | nouveau_enum_find(const struct nouveau_enum *en, u32 value) | 30 | nvkm_enum_find(const struct nvkm_enum *en, u32 value) |
33 | { | 31 | { |
34 | while (en->name) { | 32 | while (en->name) { |
35 | if (en->value == value) | 33 | if (en->value == value) |
@@ -40,10 +38,10 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value) | |||
40 | return NULL; | 38 | return NULL; |
41 | } | 39 | } |
42 | 40 | ||
43 | const struct nouveau_enum * | 41 | const struct nvkm_enum * |
44 | nouveau_enum_print(const struct nouveau_enum *en, u32 value) | 42 | nvkm_enum_print(const struct nvkm_enum *en, u32 value) |
45 | { | 43 | { |
46 | en = nouveau_enum_find(en, value); | 44 | en = nvkm_enum_find(en, value); |
47 | if (en) | 45 | if (en) |
48 | pr_cont("%s", en->name); | 46 | pr_cont("%s", en->name); |
49 | else | 47 | else |
@@ -52,7 +50,7 @@ nouveau_enum_print(const struct nouveau_enum *en, u32 value) | |||
52 | } | 50 | } |
53 | 51 | ||
54 | void | 52 | void |
55 | nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) | 53 | nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value) |
56 | { | 54 | { |
57 | while (bf->name) { | 55 | while (bf->name) { |
58 | if (value & bf->mask) { | 56 | if (value & bf->mask) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c index 760947e380c9..4e8d3fa042df 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/event.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c | |||
@@ -19,9 +19,8 @@ | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | |||
23 | #include <core/object.h> | ||
24 | #include <core/event.h> | 22 | #include <core/event.h> |
23 | #include <core/notify.h> | ||
25 | 24 | ||
26 | void | 25 | void |
27 | nvkm_event_put(struct nvkm_event *event, u32 types, int index) | 26 | nvkm_event_put(struct nvkm_event *event, u32 types, int index) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c index 68a4232d35cc..0c5cb55fc617 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | |||
@@ -21,8 +21,6 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/gpuobj.h> | 24 | #include <core/gpuobj.h> |
27 | 25 | ||
28 | #include <subdev/instmem.h> | 26 | #include <subdev/instmem.h> |
@@ -30,7 +28,7 @@ | |||
30 | #include <subdev/mmu.h> | 28 | #include <subdev/mmu.h> |
31 | 29 | ||
32 | void | 30 | void |
33 | nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) | 31 | nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) |
34 | { | 32 | { |
35 | int i; | 33 | int i; |
36 | 34 | ||
@@ -39,29 +37,25 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) | |||
39 | nv_wo32(gpuobj, i, 0x00000000); | 37 | nv_wo32(gpuobj, i, 0x00000000); |
40 | } | 38 | } |
41 | 39 | ||
42 | if (gpuobj->node) { | 40 | if (gpuobj->node) |
43 | nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, | 41 | nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node); |
44 | &gpuobj->node); | ||
45 | } | ||
46 | 42 | ||
47 | if (gpuobj->heap.block_size) | 43 | if (gpuobj->heap.block_size) |
48 | nouveau_mm_fini(&gpuobj->heap); | 44 | nvkm_mm_fini(&gpuobj->heap); |
49 | 45 | ||
50 | nouveau_object_destroy(&gpuobj->object); | 46 | nvkm_object_destroy(&gpuobj->object); |
51 | } | 47 | } |
52 | 48 | ||
53 | int | 49 | int |
54 | nouveau_gpuobj_create_(struct nouveau_object *parent, | 50 | nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
55 | struct nouveau_object *engine, | 51 | struct nvkm_oclass *oclass, u32 pclass, |
56 | struct nouveau_oclass *oclass, u32 pclass, | 52 | struct nvkm_object *pargpu, u32 size, u32 align, u32 flags, |
57 | struct nouveau_object *pargpu, | 53 | int length, void **pobject) |
58 | u32 size, u32 align, u32 flags, | ||
59 | int length, void **pobject) | ||
60 | { | 54 | { |
61 | struct nouveau_instmem *imem = nouveau_instmem(parent); | 55 | struct nvkm_instmem *imem = nvkm_instmem(parent); |
62 | struct nouveau_bar *bar = nouveau_bar(parent); | 56 | struct nvkm_bar *bar = nvkm_bar(parent); |
63 | struct nouveau_gpuobj *gpuobj; | 57 | struct nvkm_gpuobj *gpuobj; |
64 | struct nouveau_mm *heap = NULL; | 58 | struct nvkm_mm *heap = NULL; |
65 | int ret, i; | 59 | int ret, i; |
66 | u64 addr; | 60 | u64 addr; |
67 | 61 | ||
@@ -92,19 +86,19 @@ nouveau_gpuobj_create_(struct nouveau_object *parent, | |||
92 | size = nv_memobj(pargpu)->size; | 86 | size = nv_memobj(pargpu)->size; |
93 | 87 | ||
94 | if (bar && bar->alloc) { | 88 | if (bar && bar->alloc) { |
95 | struct nouveau_instobj *iobj = (void *)parent; | 89 | struct nvkm_instobj *iobj = (void *)parent; |
96 | struct nouveau_mem **mem = (void *)(iobj + 1); | 90 | struct nvkm_mem **mem = (void *)(iobj + 1); |
97 | struct nouveau_mem *node = *mem; | 91 | struct nvkm_mem *node = *mem; |
98 | if (!bar->alloc(bar, parent, node, &pargpu)) { | 92 | if (!bar->alloc(bar, parent, node, &pargpu)) { |
99 | nouveau_object_ref(NULL, &parent); | 93 | nvkm_object_ref(NULL, &parent); |
100 | parent = pargpu; | 94 | parent = pargpu; |
101 | } | 95 | } |
102 | } | 96 | } |
103 | } | 97 | } |
104 | 98 | ||
105 | ret = nouveau_object_create_(parent, engine, oclass, pclass | | 99 | ret = nvkm_object_create_(parent, engine, oclass, pclass | |
106 | NV_GPUOBJ_CLASS, length, pobject); | 100 | NV_GPUOBJ_CLASS, length, pobject); |
107 | nouveau_object_ref(NULL, &parent); | 101 | nvkm_object_ref(NULL, &parent); |
108 | gpuobj = *pobject; | 102 | gpuobj = *pobject; |
109 | if (ret) | 103 | if (ret) |
110 | return ret; | 104 | return ret; |
@@ -115,8 +109,8 @@ nouveau_gpuobj_create_(struct nouveau_object *parent, | |||
115 | gpuobj->size = size; | 109 | gpuobj->size = size; |
116 | 110 | ||
117 | if (heap) { | 111 | if (heap) { |
118 | ret = nouveau_mm_head(heap, 0, 1, size, size, | 112 | ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1), |
119 | max(align, (u32)1), &gpuobj->node); | 113 | &gpuobj->node); |
120 | if (ret) | 114 | if (ret) |
121 | return ret; | 115 | return ret; |
122 | 116 | ||
@@ -124,7 +118,7 @@ nouveau_gpuobj_create_(struct nouveau_object *parent, | |||
124 | } | 118 | } |
125 | 119 | ||
126 | if (gpuobj->flags & NVOBJ_FLAG_HEAP) { | 120 | if (gpuobj->flags & NVOBJ_FLAG_HEAP) { |
127 | ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); | 121 | ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); |
128 | if (ret) | 122 | if (ret) |
129 | return ret; | 123 | return ret; |
130 | } | 124 | } |
@@ -137,26 +131,25 @@ nouveau_gpuobj_create_(struct nouveau_object *parent, | |||
137 | return ret; | 131 | return ret; |
138 | } | 132 | } |
139 | 133 | ||
140 | struct nouveau_gpuobj_class { | 134 | struct nvkm_gpuobj_class { |
141 | struct nouveau_object *pargpu; | 135 | struct nvkm_object *pargpu; |
142 | u64 size; | 136 | u64 size; |
143 | u32 align; | 137 | u32 align; |
144 | u32 flags; | 138 | u32 flags; |
145 | }; | 139 | }; |
146 | 140 | ||
147 | static int | 141 | static int |
148 | _nouveau_gpuobj_ctor(struct nouveau_object *parent, | 142 | _nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
149 | struct nouveau_object *engine, | 143 | struct nvkm_oclass *oclass, void *data, u32 size, |
150 | struct nouveau_oclass *oclass, void *data, u32 size, | 144 | struct nvkm_object **pobject) |
151 | struct nouveau_object **pobject) | ||
152 | { | 145 | { |
153 | struct nouveau_gpuobj_class *args = data; | 146 | struct nvkm_gpuobj_class *args = data; |
154 | struct nouveau_gpuobj *object; | 147 | struct nvkm_gpuobj *object; |
155 | int ret; | 148 | int ret; |
156 | 149 | ||
157 | ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu, | 150 | ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu, |
158 | args->size, args->align, args->flags, | 151 | args->size, args->align, args->flags, |
159 | &object); | 152 | &object); |
160 | *pobject = nv_object(object); | 153 | *pobject = nv_object(object); |
161 | if (ret) | 154 | if (ret) |
162 | return ret; | 155 | return ret; |
@@ -165,63 +158,63 @@ _nouveau_gpuobj_ctor(struct nouveau_object *parent, | |||
165 | } | 158 | } |
166 | 159 | ||
167 | void | 160 | void |
168 | _nouveau_gpuobj_dtor(struct nouveau_object *object) | 161 | _nvkm_gpuobj_dtor(struct nvkm_object *object) |
169 | { | 162 | { |
170 | nouveau_gpuobj_destroy(nv_gpuobj(object)); | 163 | nvkm_gpuobj_destroy(nv_gpuobj(object)); |
171 | } | 164 | } |
172 | 165 | ||
173 | int | 166 | int |
174 | _nouveau_gpuobj_init(struct nouveau_object *object) | 167 | _nvkm_gpuobj_init(struct nvkm_object *object) |
175 | { | 168 | { |
176 | return nouveau_gpuobj_init(nv_gpuobj(object)); | 169 | return nvkm_gpuobj_init(nv_gpuobj(object)); |
177 | } | 170 | } |
178 | 171 | ||
179 | int | 172 | int |
180 | _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend) | 173 | _nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend) |
181 | { | 174 | { |
182 | return nouveau_gpuobj_fini(nv_gpuobj(object), suspend); | 175 | return nvkm_gpuobj_fini(nv_gpuobj(object), suspend); |
183 | } | 176 | } |
184 | 177 | ||
185 | u32 | 178 | u32 |
186 | _nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) | 179 | _nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr) |
187 | { | 180 | { |
188 | struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); | 181 | struct nvkm_gpuobj *gpuobj = nv_gpuobj(object); |
189 | struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); | 182 | struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); |
190 | if (gpuobj->node) | 183 | if (gpuobj->node) |
191 | addr += gpuobj->node->offset; | 184 | addr += gpuobj->node->offset; |
192 | return pfuncs->rd32(gpuobj->parent, addr); | 185 | return pfuncs->rd32(gpuobj->parent, addr); |
193 | } | 186 | } |
194 | 187 | ||
195 | void | 188 | void |
196 | _nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data) | 189 | _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data) |
197 | { | 190 | { |
198 | struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); | 191 | struct nvkm_gpuobj *gpuobj = nv_gpuobj(object); |
199 | struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); | 192 | struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); |
200 | if (gpuobj->node) | 193 | if (gpuobj->node) |
201 | addr += gpuobj->node->offset; | 194 | addr += gpuobj->node->offset; |
202 | pfuncs->wr32(gpuobj->parent, addr, data); | 195 | pfuncs->wr32(gpuobj->parent, addr, data); |
203 | } | 196 | } |
204 | 197 | ||
205 | static struct nouveau_oclass | 198 | static struct nvkm_oclass |
206 | _nouveau_gpuobj_oclass = { | 199 | _nvkm_gpuobj_oclass = { |
207 | .handle = 0x00000000, | 200 | .handle = 0x00000000, |
208 | .ofuncs = &(struct nouveau_ofuncs) { | 201 | .ofuncs = &(struct nvkm_ofuncs) { |
209 | .ctor = _nouveau_gpuobj_ctor, | 202 | .ctor = _nvkm_gpuobj_ctor, |
210 | .dtor = _nouveau_gpuobj_dtor, | 203 | .dtor = _nvkm_gpuobj_dtor, |
211 | .init = _nouveau_gpuobj_init, | 204 | .init = _nvkm_gpuobj_init, |
212 | .fini = _nouveau_gpuobj_fini, | 205 | .fini = _nvkm_gpuobj_fini, |
213 | .rd32 = _nouveau_gpuobj_rd32, | 206 | .rd32 = _nvkm_gpuobj_rd32, |
214 | .wr32 = _nouveau_gpuobj_wr32, | 207 | .wr32 = _nvkm_gpuobj_wr32, |
215 | }, | 208 | }, |
216 | }; | 209 | }; |
217 | 210 | ||
218 | int | 211 | int |
219 | nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu, | 212 | nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu, |
220 | u32 size, u32 align, u32 flags, | 213 | u32 size, u32 align, u32 flags, |
221 | struct nouveau_gpuobj **pgpuobj) | 214 | struct nvkm_gpuobj **pgpuobj) |
222 | { | 215 | { |
223 | struct nouveau_object *engine = parent; | 216 | struct nvkm_object *engine = parent; |
224 | struct nouveau_gpuobj_class args = { | 217 | struct nvkm_gpuobj_class args = { |
225 | .pargpu = pargpu, | 218 | .pargpu = pargpu, |
226 | .size = size, | 219 | .size = size, |
227 | .align = align, | 220 | .align = align, |
@@ -232,22 +225,21 @@ nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu, | |||
232 | engine = &engine->engine->subdev.object; | 225 | engine = &engine->engine->subdev.object; |
233 | BUG_ON(engine == NULL); | 226 | BUG_ON(engine == NULL); |
234 | 227 | ||
235 | return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass, | 228 | return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass, |
236 | &args, sizeof(args), | 229 | &args, sizeof(args), |
237 | (struct nouveau_object **)pgpuobj); | 230 | (struct nvkm_object **)pgpuobj); |
238 | } | 231 | } |
239 | 232 | ||
240 | int | 233 | int |
241 | nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access, | 234 | nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma) |
242 | struct nouveau_vma *vma) | ||
243 | { | 235 | { |
244 | struct nouveau_bar *bar = nouveau_bar(gpuobj); | 236 | struct nvkm_bar *bar = nvkm_bar(gpuobj); |
245 | int ret = -EINVAL; | 237 | int ret = -EINVAL; |
246 | 238 | ||
247 | if (bar && bar->umap) { | 239 | if (bar && bar->umap) { |
248 | struct nouveau_instobj *iobj = (void *) | 240 | struct nvkm_instobj *iobj = (void *) |
249 | nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); | 241 | nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); |
250 | struct nouveau_mem **mem = (void *)(iobj + 1); | 242 | struct nvkm_mem **mem = (void *)(iobj + 1); |
251 | ret = bar->umap(bar, *mem, access, vma); | 243 | ret = bar->umap(bar, *mem, access, vma); |
252 | } | 244 | } |
253 | 245 | ||
@@ -255,28 +247,28 @@ nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access, | |||
255 | } | 247 | } |
256 | 248 | ||
257 | int | 249 | int |
258 | nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm, | 250 | nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, |
259 | u32 access, struct nouveau_vma *vma) | 251 | u32 access, struct nvkm_vma *vma) |
260 | { | 252 | { |
261 | struct nouveau_instobj *iobj = (void *) | 253 | struct nvkm_instobj *iobj = (void *) |
262 | nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); | 254 | nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); |
263 | struct nouveau_mem **mem = (void *)(iobj + 1); | 255 | struct nvkm_mem **mem = (void *)(iobj + 1); |
264 | int ret; | 256 | int ret; |
265 | 257 | ||
266 | ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma); | 258 | ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma); |
267 | if (ret) | 259 | if (ret) |
268 | return ret; | 260 | return ret; |
269 | 261 | ||
270 | nouveau_vm_map(vma, *mem); | 262 | nvkm_vm_map(vma, *mem); |
271 | return 0; | 263 | return 0; |
272 | } | 264 | } |
273 | 265 | ||
274 | void | 266 | void |
275 | nouveau_gpuobj_unmap(struct nouveau_vma *vma) | 267 | nvkm_gpuobj_unmap(struct nvkm_vma *vma) |
276 | { | 268 | { |
277 | if (vma->node) { | 269 | if (vma->node) { |
278 | nouveau_vm_unmap(vma); | 270 | nvkm_vm_unmap(vma); |
279 | nouveau_vm_put(vma); | 271 | nvkm_vm_put(vma); |
280 | } | 272 | } |
281 | } | 273 | } |
282 | 274 | ||
@@ -286,37 +278,37 @@ nouveau_gpuobj_unmap(struct nouveau_vma *vma) | |||
286 | */ | 278 | */ |
287 | 279 | ||
288 | static void | 280 | static void |
289 | nouveau_gpudup_dtor(struct nouveau_object *object) | 281 | nvkm_gpudup_dtor(struct nvkm_object *object) |
290 | { | 282 | { |
291 | struct nouveau_gpuobj *gpuobj = (void *)object; | 283 | struct nvkm_gpuobj *gpuobj = (void *)object; |
292 | nouveau_object_ref(NULL, &gpuobj->parent); | 284 | nvkm_object_ref(NULL, &gpuobj->parent); |
293 | nouveau_object_destroy(&gpuobj->object); | 285 | nvkm_object_destroy(&gpuobj->object); |
294 | } | 286 | } |
295 | 287 | ||
296 | static struct nouveau_oclass | 288 | static struct nvkm_oclass |
297 | nouveau_gpudup_oclass = { | 289 | nvkm_gpudup_oclass = { |
298 | .handle = NV_GPUOBJ_CLASS, | 290 | .handle = NV_GPUOBJ_CLASS, |
299 | .ofuncs = &(struct nouveau_ofuncs) { | 291 | .ofuncs = &(struct nvkm_ofuncs) { |
300 | .dtor = nouveau_gpudup_dtor, | 292 | .dtor = nvkm_gpudup_dtor, |
301 | .init = nouveau_object_init, | 293 | .init = nvkm_object_init, |
302 | .fini = nouveau_object_fini, | 294 | .fini = nvkm_object_fini, |
303 | }, | 295 | }, |
304 | }; | 296 | }; |
305 | 297 | ||
306 | int | 298 | int |
307 | nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base, | 299 | nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base, |
308 | struct nouveau_gpuobj **pgpuobj) | 300 | struct nvkm_gpuobj **pgpuobj) |
309 | { | 301 | { |
310 | struct nouveau_gpuobj *gpuobj; | 302 | struct nvkm_gpuobj *gpuobj; |
311 | int ret; | 303 | int ret; |
312 | 304 | ||
313 | ret = nouveau_object_create(parent, &parent->engine->subdev.object, | 305 | ret = nvkm_object_create(parent, &parent->engine->subdev.object, |
314 | &nouveau_gpudup_oclass, 0, &gpuobj); | 306 | &nvkm_gpudup_oclass, 0, &gpuobj); |
315 | *pgpuobj = gpuobj; | 307 | *pgpuobj = gpuobj; |
316 | if (ret) | 308 | if (ret) |
317 | return ret; | 309 | return ret; |
318 | 310 | ||
319 | nouveau_object_ref(nv_object(base), &gpuobj->parent); | 311 | nvkm_object_ref(nv_object(base), &gpuobj->parent); |
320 | gpuobj->addr = base->addr; | 312 | gpuobj->addr = base->addr; |
321 | gpuobj->size = base->size; | 313 | gpuobj->size = base->size; |
322 | return 0; | 314 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c index 13f816cb08bd..dc7ff10ebe7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/handle.c | |||
@@ -21,31 +21,29 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/handle.h> | 24 | #include <core/handle.h> |
27 | #include <core/client.h> | 25 | #include <core/client.h> |
28 | 26 | ||
29 | #define hprintk(h,l,f,a...) do { \ | 27 | #define hprintk(h,l,f,a...) do { \ |
30 | struct nouveau_client *c = nouveau_client((h)->object); \ | 28 | struct nvkm_client *c = nvkm_client((h)->object); \ |
31 | struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ | 29 | struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ |
32 | nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \ | 30 | nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \ |
33 | } while(0) | 31 | } while(0) |
34 | 32 | ||
35 | int | 33 | int |
36 | nouveau_handle_init(struct nouveau_handle *handle) | 34 | nvkm_handle_init(struct nvkm_handle *handle) |
37 | { | 35 | { |
38 | struct nouveau_handle *item; | 36 | struct nvkm_handle *item; |
39 | int ret; | 37 | int ret; |
40 | 38 | ||
41 | hprintk(handle, TRACE, "init running\n"); | 39 | hprintk(handle, TRACE, "init running\n"); |
42 | ret = nouveau_object_inc(handle->object); | 40 | ret = nvkm_object_inc(handle->object); |
43 | if (ret) | 41 | if (ret) |
44 | return ret; | 42 | return ret; |
45 | 43 | ||
46 | hprintk(handle, TRACE, "init children\n"); | 44 | hprintk(handle, TRACE, "init children\n"); |
47 | list_for_each_entry(item, &handle->tree, head) { | 45 | list_for_each_entry(item, &handle->tree, head) { |
48 | ret = nouveau_handle_init(item); | 46 | ret = nvkm_handle_init(item); |
49 | if (ret) | 47 | if (ret) |
50 | goto fail; | 48 | goto fail; |
51 | } | 49 | } |
@@ -55,30 +53,30 @@ nouveau_handle_init(struct nouveau_handle *handle) | |||
55 | fail: | 53 | fail: |
56 | hprintk(handle, ERROR, "init failed with %d\n", ret); | 54 | hprintk(handle, ERROR, "init failed with %d\n", ret); |
57 | list_for_each_entry_continue_reverse(item, &handle->tree, head) { | 55 | list_for_each_entry_continue_reverse(item, &handle->tree, head) { |
58 | nouveau_handle_fini(item, false); | 56 | nvkm_handle_fini(item, false); |
59 | } | 57 | } |
60 | 58 | ||
61 | nouveau_object_dec(handle->object, false); | 59 | nvkm_object_dec(handle->object, false); |
62 | return ret; | 60 | return ret; |
63 | } | 61 | } |
64 | 62 | ||
65 | int | 63 | int |
66 | nouveau_handle_fini(struct nouveau_handle *handle, bool suspend) | 64 | nvkm_handle_fini(struct nvkm_handle *handle, bool suspend) |
67 | { | 65 | { |
68 | static char *name[2] = { "fini", "suspend" }; | 66 | static char *name[2] = { "fini", "suspend" }; |
69 | struct nouveau_handle *item; | 67 | struct nvkm_handle *item; |
70 | int ret; | 68 | int ret; |
71 | 69 | ||
72 | hprintk(handle, TRACE, "%s children\n", name[suspend]); | 70 | hprintk(handle, TRACE, "%s children\n", name[suspend]); |
73 | list_for_each_entry(item, &handle->tree, head) { | 71 | list_for_each_entry(item, &handle->tree, head) { |
74 | ret = nouveau_handle_fini(item, suspend); | 72 | ret = nvkm_handle_fini(item, suspend); |
75 | if (ret && suspend) | 73 | if (ret && suspend) |
76 | goto fail; | 74 | goto fail; |
77 | } | 75 | } |
78 | 76 | ||
79 | hprintk(handle, TRACE, "%s running\n", name[suspend]); | 77 | hprintk(handle, TRACE, "%s running\n", name[suspend]); |
80 | if (handle->object) { | 78 | if (handle->object) { |
81 | ret = nouveau_object_dec(handle->object, suspend); | 79 | ret = nvkm_object_dec(handle->object, suspend); |
82 | if (ret && suspend) | 80 | if (ret && suspend) |
83 | goto fail; | 81 | goto fail; |
84 | } | 82 | } |
@@ -88,7 +86,7 @@ nouveau_handle_fini(struct nouveau_handle *handle, bool suspend) | |||
88 | fail: | 86 | fail: |
89 | hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret); | 87 | hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret); |
90 | list_for_each_entry_continue_reverse(item, &handle->tree, head) { | 88 | list_for_each_entry_continue_reverse(item, &handle->tree, head) { |
91 | int rret = nouveau_handle_init(item); | 89 | int rret = nvkm_handle_init(item); |
92 | if (rret) | 90 | if (rret) |
93 | hprintk(handle, FATAL, "failed to restart, %d\n", rret); | 91 | hprintk(handle, FATAL, "failed to restart, %d\n", rret); |
94 | } | 92 | } |
@@ -97,12 +95,11 @@ fail: | |||
97 | } | 95 | } |
98 | 96 | ||
99 | int | 97 | int |
100 | nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | 98 | nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle, |
101 | struct nouveau_object *object, | 99 | struct nvkm_object *object, struct nvkm_handle **phandle) |
102 | struct nouveau_handle **phandle) | ||
103 | { | 100 | { |
104 | struct nouveau_object *namedb; | 101 | struct nvkm_object *namedb; |
105 | struct nouveau_handle *handle; | 102 | struct nvkm_handle *handle; |
106 | int ret; | 103 | int ret; |
107 | 104 | ||
108 | namedb = parent; | 105 | namedb = parent; |
@@ -118,7 +115,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | |||
118 | handle->name = _handle; | 115 | handle->name = _handle; |
119 | handle->priv = ~0; | 116 | handle->priv = ~0; |
120 | 117 | ||
121 | ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle); | 118 | ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle); |
122 | if (ret) { | 119 | if (ret) { |
123 | kfree(handle); | 120 | kfree(handle); |
124 | return ret; | 121 | return ret; |
@@ -127,7 +124,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | |||
127 | if (nv_parent(parent)->object_attach) { | 124 | if (nv_parent(parent)->object_attach) { |
128 | ret = nv_parent(parent)->object_attach(parent, object, _handle); | 125 | ret = nv_parent(parent)->object_attach(parent, object, _handle); |
129 | if (ret < 0) { | 126 | if (ret < 0) { |
130 | nouveau_handle_destroy(handle); | 127 | nvkm_handle_destroy(handle); |
131 | return ret; | 128 | return ret; |
132 | } | 129 | } |
133 | 130 | ||
@@ -138,10 +135,10 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | |||
138 | while (!nv_iclass(namedb, NV_CLIENT_CLASS)) | 135 | while (!nv_iclass(namedb, NV_CLIENT_CLASS)) |
139 | namedb = namedb->parent; | 136 | namedb = namedb->parent; |
140 | 137 | ||
141 | handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent); | 138 | handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent); |
142 | if (handle->parent) { | 139 | if (handle->parent) { |
143 | list_add(&handle->head, &handle->parent->tree); | 140 | list_add(&handle->head, &handle->parent->tree); |
144 | nouveau_namedb_put(handle->parent); | 141 | nvkm_namedb_put(handle->parent); |
145 | } | 142 | } |
146 | } | 143 | } |
147 | 144 | ||
@@ -151,74 +148,74 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | |||
151 | } | 148 | } |
152 | 149 | ||
153 | void | 150 | void |
154 | nouveau_handle_destroy(struct nouveau_handle *handle) | 151 | nvkm_handle_destroy(struct nvkm_handle *handle) |
155 | { | 152 | { |
156 | struct nouveau_handle *item, *temp; | 153 | struct nvkm_handle *item, *temp; |
157 | 154 | ||
158 | hprintk(handle, TRACE, "destroy running\n"); | 155 | hprintk(handle, TRACE, "destroy running\n"); |
159 | list_for_each_entry_safe(item, temp, &handle->tree, head) { | 156 | list_for_each_entry_safe(item, temp, &handle->tree, head) { |
160 | nouveau_handle_destroy(item); | 157 | nvkm_handle_destroy(item); |
161 | } | 158 | } |
162 | list_del(&handle->head); | 159 | list_del(&handle->head); |
163 | 160 | ||
164 | if (handle->priv != ~0) { | 161 | if (handle->priv != ~0) { |
165 | struct nouveau_object *parent = handle->parent->object; | 162 | struct nvkm_object *parent = handle->parent->object; |
166 | nv_parent(parent)->object_detach(parent, handle->priv); | 163 | nv_parent(parent)->object_detach(parent, handle->priv); |
167 | } | 164 | } |
168 | 165 | ||
169 | hprintk(handle, TRACE, "destroy completed\n"); | 166 | hprintk(handle, TRACE, "destroy completed\n"); |
170 | nouveau_namedb_remove(handle); | 167 | nvkm_namedb_remove(handle); |
171 | kfree(handle); | 168 | kfree(handle); |
172 | } | 169 | } |
173 | 170 | ||
174 | struct nouveau_object * | 171 | struct nvkm_object * |
175 | nouveau_handle_ref(struct nouveau_object *parent, u32 name) | 172 | nvkm_handle_ref(struct nvkm_object *parent, u32 name) |
176 | { | 173 | { |
177 | struct nouveau_object *object = NULL; | 174 | struct nvkm_object *object = NULL; |
178 | struct nouveau_handle *handle; | 175 | struct nvkm_handle *handle; |
179 | 176 | ||
180 | while (!nv_iclass(parent, NV_NAMEDB_CLASS)) | 177 | while (!nv_iclass(parent, NV_NAMEDB_CLASS)) |
181 | parent = parent->parent; | 178 | parent = parent->parent; |
182 | 179 | ||
183 | handle = nouveau_namedb_get(nv_namedb(parent), name); | 180 | handle = nvkm_namedb_get(nv_namedb(parent), name); |
184 | if (handle) { | 181 | if (handle) { |
185 | nouveau_object_ref(handle->object, &object); | 182 | nvkm_object_ref(handle->object, &object); |
186 | nouveau_namedb_put(handle); | 183 | nvkm_namedb_put(handle); |
187 | } | 184 | } |
188 | 185 | ||
189 | return object; | 186 | return object; |
190 | } | 187 | } |
191 | 188 | ||
192 | struct nouveau_handle * | 189 | struct nvkm_handle * |
193 | nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass) | 190 | nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass) |
194 | { | 191 | { |
195 | struct nouveau_namedb *namedb; | 192 | struct nvkm_namedb *namedb; |
196 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | 193 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) |
197 | return nouveau_namedb_get_class(namedb, oclass); | 194 | return nvkm_namedb_get_class(namedb, oclass); |
198 | return NULL; | 195 | return NULL; |
199 | } | 196 | } |
200 | 197 | ||
201 | struct nouveau_handle * | 198 | struct nvkm_handle * |
202 | nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst) | 199 | nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst) |
203 | { | 200 | { |
204 | struct nouveau_namedb *namedb; | 201 | struct nvkm_namedb *namedb; |
205 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | 202 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) |
206 | return nouveau_namedb_get_vinst(namedb, vinst); | 203 | return nvkm_namedb_get_vinst(namedb, vinst); |
207 | return NULL; | 204 | return NULL; |
208 | } | 205 | } |
209 | 206 | ||
210 | struct nouveau_handle * | 207 | struct nvkm_handle * |
211 | nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst) | 208 | nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst) |
212 | { | 209 | { |
213 | struct nouveau_namedb *namedb; | 210 | struct nvkm_namedb *namedb; |
214 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | 211 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) |
215 | return nouveau_namedb_get_cinst(namedb, cinst); | 212 | return nvkm_namedb_get_cinst(namedb, cinst); |
216 | return NULL; | 213 | return NULL; |
217 | } | 214 | } |
218 | 215 | ||
219 | void | 216 | void |
220 | nouveau_handle_put(struct nouveau_handle *handle) | 217 | nvkm_handle_put(struct nvkm_handle *handle) |
221 | { | 218 | { |
222 | if (handle) | 219 | if (handle) |
223 | nouveau_namedb_put(handle); | 220 | nvkm_namedb_put(handle); |
224 | } | 221 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index bdfabb4a8dc9..4459ff5f4cb8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | |||
@@ -21,23 +21,19 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | #include <core/ioctl.h> | |
25 | #include <core/object.h> | 25 | #include <core/client.h> |
26 | #include <core/parent.h> | 26 | #include <core/engine.h> |
27 | #include <core/handle.h> | 27 | #include <core/handle.h> |
28 | #include <core/namedb.h> | 28 | #include <core/namedb.h> |
29 | #include <core/client.h> | ||
30 | #include <core/device.h> | ||
31 | #include <core/ioctl.h> | ||
32 | #include <core/event.h> | ||
33 | 29 | ||
34 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
35 | #include <nvif/ioctl.h> | 31 | #include <nvif/ioctl.h> |
36 | 32 | ||
37 | static int | 33 | static int |
38 | nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size) | 34 | nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size) |
39 | { | 35 | { |
40 | struct nouveau_object *object = handle->object; | 36 | struct nvkm_object *object = handle->object; |
41 | union { | 37 | union { |
42 | struct nvif_ioctl_nop none; | 38 | struct nvif_ioctl_nop none; |
43 | } *args = data; | 39 | } *args = data; |
@@ -52,9 +48,9 @@ nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size) | |||
52 | } | 48 | } |
53 | 49 | ||
54 | static int | 50 | static int |
55 | nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) | 51 | nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size) |
56 | { | 52 | { |
57 | struct nouveau_object *object = handle->object; | 53 | struct nvkm_object *object = handle->object; |
58 | union { | 54 | union { |
59 | struct nvif_ioctl_sclass_v0 v0; | 55 | struct nvif_ioctl_sclass_v0 v0; |
60 | } *args = data; | 56 | } *args = data; |
@@ -70,8 +66,8 @@ nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) | |||
70 | nv_ioctl(object, "sclass vers %d count %d\n", | 66 | nv_ioctl(object, "sclass vers %d count %d\n", |
71 | args->v0.version, args->v0.count); | 67 | args->v0.version, args->v0.count); |
72 | if (size == args->v0.count * sizeof(args->v0.oclass[0])) { | 68 | if (size == args->v0.count * sizeof(args->v0.oclass[0])) { |
73 | ret = nouveau_parent_lclass(object, args->v0.oclass, | 69 | ret = nvkm_parent_lclass(object, args->v0.oclass, |
74 | args->v0.count); | 70 | args->v0.count); |
75 | if (ret >= 0) { | 71 | if (ret >= 0) { |
76 | args->v0.count = ret; | 72 | args->v0.count = ret; |
77 | ret = 0; | 73 | ret = 0; |
@@ -85,17 +81,17 @@ nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) | |||
85 | } | 81 | } |
86 | 82 | ||
87 | static int | 83 | static int |
88 | nvkm_ioctl_new(struct nouveau_handle *handle, void *data, u32 size) | 84 | nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size) |
89 | { | 85 | { |
90 | union { | 86 | union { |
91 | struct nvif_ioctl_new_v0 v0; | 87 | struct nvif_ioctl_new_v0 v0; |
92 | } *args = data; | 88 | } *args = data; |
93 | struct nouveau_client *client = nouveau_client(handle->object); | 89 | struct nvkm_client *client = nvkm_client(handle->object); |
94 | struct nouveau_object *engctx = NULL; | 90 | struct nvkm_object *engctx = NULL; |
95 | struct nouveau_object *object = NULL; | 91 | struct nvkm_object *object = NULL; |
96 | struct nouveau_parent *parent; | 92 | struct nvkm_parent *parent; |
97 | struct nouveau_object *engine; | 93 | struct nvkm_object *engine; |
98 | struct nouveau_oclass *oclass; | 94 | struct nvkm_oclass *oclass; |
99 | u32 _handle, _oclass; | 95 | u32 _handle, _oclass; |
100 | int ret; | 96 | int ret; |
101 | 97 | ||
@@ -108,8 +104,8 @@ nvkm_ioctl_new(struct nouveau_handle *handle, void *data, u32 size) | |||
108 | 104 | ||
109 | nv_ioctl(client, "new vers %d handle %08x class %08x " | 105 | nv_ioctl(client, "new vers %d handle %08x class %08x " |
110 | "route %02x token %llx\n", | 106 | "route %02x token %llx\n", |
111 | args->v0.version, _handle, _oclass, | 107 | args->v0.version, _handle, _oclass, |
112 | args->v0.route, args->v0.token); | 108 | args->v0.route, args->v0.token); |
113 | 109 | ||
114 | if (!nv_iclass(handle->object, NV_PARENT_CLASS)) { | 110 | if (!nv_iclass(handle->object, NV_PARENT_CLASS)) { |
115 | nv_debug(handle->object, "cannot have children (ctor)\n"); | 111 | nv_debug(handle->object, "cannot have children (ctor)\n"); |
@@ -120,7 +116,7 @@ nvkm_ioctl_new(struct nouveau_handle *handle, void *data, u32 size) | |||
120 | parent = nv_parent(handle->object); | 116 | parent = nv_parent(handle->object); |
121 | 117 | ||
122 | /* check that parent supports the requested subclass */ | 118 | /* check that parent supports the requested subclass */ |
123 | ret = nouveau_parent_sclass(&parent->object, _oclass, &engine, &oclass); | 119 | ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass); |
124 | if (ret) { | 120 | if (ret) { |
125 | nv_debug(parent, "illegal class 0x%04x\n", _oclass); | 121 | nv_debug(parent, "illegal class 0x%04x\n", _oclass); |
126 | goto fail_class; | 122 | goto fail_class; |
@@ -131,7 +127,7 @@ nvkm_ioctl_new(struct nouveau_handle *handle, void *data, u32 size) | |||
131 | * state calculated at init (ie. default context construction) | 127 | * state calculated at init (ie. default context construction) |
132 | */ | 128 | */ |
133 | if (engine) { | 129 | if (engine) { |
134 | ret = nouveau_object_inc(engine); | 130 | ret = nvkm_object_inc(engine); |
135 | if (ret) | 131 | if (ret) |
136 | goto fail_class; | 132 | goto fail_class; |
137 | } | 133 | } |
@@ -140,53 +136,53 @@ nvkm_ioctl_new(struct nouveau_handle *handle, void *data, u32 size) | |||
140 | * between the parent and its children (eg. PGRAPH context) | 136 | * between the parent and its children (eg. PGRAPH context) |
141 | */ | 137 | */ |
142 | if (engine && nv_engine(engine)->cclass) { | 138 | if (engine && nv_engine(engine)->cclass) { |
143 | ret = nouveau_object_ctor(&parent->object, engine, | 139 | ret = nvkm_object_ctor(&parent->object, engine, |
144 | nv_engine(engine)->cclass, | 140 | nv_engine(engine)->cclass, |
145 | data, size, &engctx); | 141 | data, size, &engctx); |
146 | if (ret) | 142 | if (ret) |
147 | goto fail_engctx; | 143 | goto fail_engctx; |
148 | } else { | 144 | } else { |
149 | nouveau_object_ref(&parent->object, &engctx); | 145 | nvkm_object_ref(&parent->object, &engctx); |
150 | } | 146 | } |
151 | 147 | ||
152 | /* finally, create new object and bind it to its handle */ | 148 | /* finally, create new object and bind it to its handle */ |
153 | ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); | 149 | ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object); |
154 | client->data = object; | 150 | client->data = object; |
155 | if (ret) | 151 | if (ret) |
156 | goto fail_ctor; | 152 | goto fail_ctor; |
157 | 153 | ||
158 | ret = nouveau_object_inc(object); | 154 | ret = nvkm_object_inc(object); |
159 | if (ret) | 155 | if (ret) |
160 | goto fail_init; | 156 | goto fail_init; |
161 | 157 | ||
162 | ret = nouveau_handle_create(&parent->object, handle->name, | 158 | ret = nvkm_handle_create(&parent->object, handle->name, |
163 | _handle, object, &handle); | 159 | _handle, object, &handle); |
164 | if (ret) | 160 | if (ret) |
165 | goto fail_handle; | 161 | goto fail_handle; |
166 | 162 | ||
167 | ret = nouveau_handle_init(handle); | 163 | ret = nvkm_handle_init(handle); |
168 | handle->route = args->v0.route; | 164 | handle->route = args->v0.route; |
169 | handle->token = args->v0.token; | 165 | handle->token = args->v0.token; |
170 | if (ret) | 166 | if (ret) |
171 | nouveau_handle_destroy(handle); | 167 | nvkm_handle_destroy(handle); |
172 | 168 | ||
173 | fail_handle: | 169 | fail_handle: |
174 | nouveau_object_dec(object, false); | 170 | nvkm_object_dec(object, false); |
175 | fail_init: | 171 | fail_init: |
176 | nouveau_object_ref(NULL, &object); | 172 | nvkm_object_ref(NULL, &object); |
177 | fail_ctor: | 173 | fail_ctor: |
178 | nouveau_object_ref(NULL, &engctx); | 174 | nvkm_object_ref(NULL, &engctx); |
179 | fail_engctx: | 175 | fail_engctx: |
180 | if (engine) | 176 | if (engine) |
181 | nouveau_object_dec(engine, false); | 177 | nvkm_object_dec(engine, false); |
182 | fail_class: | 178 | fail_class: |
183 | return ret; | 179 | return ret; |
184 | } | 180 | } |
185 | 181 | ||
186 | static int | 182 | static int |
187 | nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size) | 183 | nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size) |
188 | { | 184 | { |
189 | struct nouveau_object *object = handle->object; | 185 | struct nvkm_object *object = handle->object; |
190 | union { | 186 | union { |
191 | struct nvif_ioctl_del none; | 187 | struct nvif_ioctl_del none; |
192 | } *args = data; | 188 | } *args = data; |
@@ -195,18 +191,18 @@ nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size) | |||
195 | nv_ioctl(object, "delete size %d\n", size); | 191 | nv_ioctl(object, "delete size %d\n", size); |
196 | if (nvif_unvers(args->none)) { | 192 | if (nvif_unvers(args->none)) { |
197 | nv_ioctl(object, "delete\n"); | 193 | nv_ioctl(object, "delete\n"); |
198 | nouveau_handle_fini(handle, false); | 194 | nvkm_handle_fini(handle, false); |
199 | nouveau_handle_destroy(handle); | 195 | nvkm_handle_destroy(handle); |
200 | } | 196 | } |
201 | 197 | ||
202 | return ret; | 198 | return ret; |
203 | } | 199 | } |
204 | 200 | ||
205 | static int | 201 | static int |
206 | nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size) | 202 | nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size) |
207 | { | 203 | { |
208 | struct nouveau_object *object = handle->object; | 204 | struct nvkm_object *object = handle->object; |
209 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | 205 | struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; |
210 | union { | 206 | union { |
211 | struct nvif_ioctl_mthd_v0 v0; | 207 | struct nvif_ioctl_mthd_v0 v0; |
212 | } *args = data; | 208 | } *args = data; |
@@ -225,10 +221,10 @@ nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size) | |||
225 | 221 | ||
226 | 222 | ||
227 | static int | 223 | static int |
228 | nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) | 224 | nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size) |
229 | { | 225 | { |
230 | struct nouveau_object *object = handle->object; | 226 | struct nvkm_object *object = handle->object; |
231 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | 227 | struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; |
232 | union { | 228 | union { |
233 | struct nvif_ioctl_rd_v0 v0; | 229 | struct nvif_ioctl_rd_v0 v0; |
234 | } *args = data; | 230 | } *args = data; |
@@ -237,7 +233,7 @@ nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) | |||
237 | nv_ioctl(object, "rd size %d\n", size); | 233 | nv_ioctl(object, "rd size %d\n", size); |
238 | if (nvif_unpack(args->v0, 0, 0, false)) { | 234 | if (nvif_unpack(args->v0, 0, 0, false)) { |
239 | nv_ioctl(object, "rd vers %d size %d addr %016llx\n", | 235 | nv_ioctl(object, "rd vers %d size %d addr %016llx\n", |
240 | args->v0.version, args->v0.size, args->v0.addr); | 236 | args->v0.version, args->v0.size, args->v0.addr); |
241 | switch (args->v0.size) { | 237 | switch (args->v0.size) { |
242 | case 1: | 238 | case 1: |
243 | if (ret = -ENODEV, ofuncs->rd08) { | 239 | if (ret = -ENODEV, ofuncs->rd08) { |
@@ -267,10 +263,10 @@ nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) | |||
267 | } | 263 | } |
268 | 264 | ||
269 | static int | 265 | static int |
270 | nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size) | 266 | nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size) |
271 | { | 267 | { |
272 | struct nouveau_object *object = handle->object; | 268 | struct nvkm_object *object = handle->object; |
273 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | 269 | struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; |
274 | union { | 270 | union { |
275 | struct nvif_ioctl_wr_v0 v0; | 271 | struct nvif_ioctl_wr_v0 v0; |
276 | } *args = data; | 272 | } *args = data; |
@@ -310,10 +306,10 @@ nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size) | |||
310 | } | 306 | } |
311 | 307 | ||
312 | static int | 308 | static int |
313 | nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size) | 309 | nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size) |
314 | { | 310 | { |
315 | struct nouveau_object *object = handle->object; | 311 | struct nvkm_object *object = handle->object; |
316 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | 312 | struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; |
317 | union { | 313 | union { |
318 | struct nvif_ioctl_map_v0 v0; | 314 | struct nvif_ioctl_map_v0 v0; |
319 | } *args = data; | 315 | } *args = data; |
@@ -332,9 +328,9 @@ nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size) | |||
332 | } | 328 | } |
333 | 329 | ||
334 | static int | 330 | static int |
335 | nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size) | 331 | nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size) |
336 | { | 332 | { |
337 | struct nouveau_object *object = handle->object; | 333 | struct nvkm_object *object = handle->object; |
338 | union { | 334 | union { |
339 | struct nvif_ioctl_unmap none; | 335 | struct nvif_ioctl_unmap none; |
340 | } *args = data; | 336 | } *args = data; |
@@ -349,10 +345,10 @@ nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size) | |||
349 | } | 345 | } |
350 | 346 | ||
351 | static int | 347 | static int |
352 | nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) | 348 | nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size) |
353 | { | 349 | { |
354 | struct nouveau_object *object = handle->object; | 350 | struct nvkm_object *object = handle->object; |
355 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | 351 | struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs; |
356 | union { | 352 | union { |
357 | struct nvif_ioctl_ntfy_new_v0 v0; | 353 | struct nvif_ioctl_ntfy_new_v0 v0; |
358 | } *args = data; | 354 | } *args = data; |
@@ -378,10 +374,10 @@ nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) | |||
378 | } | 374 | } |
379 | 375 | ||
380 | static int | 376 | static int |
381 | nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size) | 377 | nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size) |
382 | { | 378 | { |
383 | struct nouveau_client *client = nouveau_client(handle->object); | 379 | struct nvkm_client *client = nvkm_client(handle->object); |
384 | struct nouveau_object *object = handle->object; | 380 | struct nvkm_object *object = handle->object; |
385 | union { | 381 | union { |
386 | struct nvif_ioctl_ntfy_del_v0 v0; | 382 | struct nvif_ioctl_ntfy_del_v0 v0; |
387 | } *args = data; | 383 | } *args = data; |
@@ -398,10 +394,10 @@ nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size) | |||
398 | } | 394 | } |
399 | 395 | ||
400 | static int | 396 | static int |
401 | nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size) | 397 | nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size) |
402 | { | 398 | { |
403 | struct nouveau_client *client = nouveau_client(handle->object); | 399 | struct nvkm_client *client = nvkm_client(handle->object); |
404 | struct nouveau_object *object = handle->object; | 400 | struct nvkm_object *object = handle->object; |
405 | union { | 401 | union { |
406 | struct nvif_ioctl_ntfy_get_v0 v0; | 402 | struct nvif_ioctl_ntfy_get_v0 v0; |
407 | } *args = data; | 403 | } *args = data; |
@@ -418,10 +414,10 @@ nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size) | |||
418 | } | 414 | } |
419 | 415 | ||
420 | static int | 416 | static int |
421 | nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size) | 417 | nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size) |
422 | { | 418 | { |
423 | struct nouveau_client *client = nouveau_client(handle->object); | 419 | struct nvkm_client *client = nvkm_client(handle->object); |
424 | struct nouveau_object *object = handle->object; | 420 | struct nvkm_object *object = handle->object; |
425 | union { | 421 | union { |
426 | struct nvif_ioctl_ntfy_put_v0 v0; | 422 | struct nvif_ioctl_ntfy_put_v0 v0; |
427 | } *args = data; | 423 | } *args = data; |
@@ -439,7 +435,7 @@ nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size) | |||
439 | 435 | ||
440 | static struct { | 436 | static struct { |
441 | int version; | 437 | int version; |
442 | int (*func)(struct nouveau_handle *, void *, u32); | 438 | int (*func)(struct nvkm_handle *, void *, u32); |
443 | } | 439 | } |
444 | nvkm_ioctl_v0[] = { | 440 | nvkm_ioctl_v0[] = { |
445 | { 0x00, nvkm_ioctl_nop }, | 441 | { 0x00, nvkm_ioctl_nop }, |
@@ -458,13 +454,12 @@ nvkm_ioctl_v0[] = { | |||
458 | }; | 454 | }; |
459 | 455 | ||
460 | static int | 456 | static int |
461 | nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, | 457 | nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path, |
462 | u32 *path, void *data, u32 size, | 458 | void *data, u32 size, u8 owner, u8 *route, u64 *token) |
463 | u8 owner, u8 *route, u64 *token) | ||
464 | { | 459 | { |
465 | struct nouveau_handle *handle = parent; | 460 | struct nvkm_handle *handle = parent; |
466 | struct nouveau_namedb *namedb; | 461 | struct nvkm_namedb *namedb; |
467 | struct nouveau_object *object; | 462 | struct nvkm_object *object; |
468 | int ret; | 463 | int ret; |
469 | 464 | ||
470 | while ((object = parent->object), nr--) { | 465 | while ((object = parent->object), nr--) { |
@@ -475,16 +470,15 @@ nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, | |||
475 | } | 470 | } |
476 | 471 | ||
477 | if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || | 472 | if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || |
478 | !(handle = nouveau_namedb_get(namedb, path[nr]))) { | 473 | !(handle = nvkm_namedb_get(namedb, path[nr]))) { |
479 | nv_debug(object, "handle 0x%08x not found\n", path[nr]); | 474 | nv_debug(object, "handle 0x%08x not found\n", path[nr]); |
480 | return -ENOENT; | 475 | return -ENOENT; |
481 | } | 476 | } |
482 | nouveau_namedb_put(handle); | 477 | nvkm_namedb_put(handle); |
483 | parent = handle; | 478 | parent = handle; |
484 | } | 479 | } |
485 | 480 | ||
486 | if (owner != NVIF_IOCTL_V0_OWNER_ANY && | 481 | if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) { |
487 | owner != handle->route) { | ||
488 | nv_ioctl(object, "object route != owner\n"); | 482 | nv_ioctl(object, "object route != owner\n"); |
489 | return -EACCES; | 483 | return -EACCES; |
490 | } | 484 | } |
@@ -492,16 +486,15 @@ nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, | |||
492 | *token = handle->token; | 486 | *token = handle->token; |
493 | 487 | ||
494 | if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { | 488 | if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { |
495 | if (nvkm_ioctl_v0[type].version == 0) { | 489 | if (nvkm_ioctl_v0[type].version == 0) |
496 | ret = nvkm_ioctl_v0[type].func(handle, data, size); | 490 | ret = nvkm_ioctl_v0[type].func(handle, data, size); |
497 | } | ||
498 | } | 491 | } |
499 | 492 | ||
500 | return ret; | 493 | return ret; |
501 | } | 494 | } |
502 | 495 | ||
503 | int | 496 | int |
504 | nvkm_ioctl(struct nouveau_client *client, bool supervisor, | 497 | nvkm_ioctl(struct nvkm_client *client, bool supervisor, |
505 | void *data, u32 size, void **hack) | 498 | void *data, u32 size, void **hack) |
506 | { | 499 | { |
507 | union { | 500 | union { |
@@ -519,7 +512,7 @@ nvkm_ioctl(struct nouveau_client *client, bool supervisor, | |||
519 | ret = nvkm_ioctl_path(client->root, args->v0.type, | 512 | ret = nvkm_ioctl_path(client->root, args->v0.type, |
520 | args->v0.path_nr, args->v0.path, | 513 | args->v0.path_nr, args->v0.path, |
521 | data, size, args->v0.owner, | 514 | data, size, args->v0.owner, |
522 | &args->v0.route, &args->v0.token); | 515 | &args->v0.route, &args->v0.token); |
523 | } | 516 | } |
524 | 517 | ||
525 | nv_ioctl(client, "return %d\n", ret); | 518 | nv_ioctl(client, "return %d\n", ret); |
@@ -527,6 +520,7 @@ nvkm_ioctl(struct nouveau_client *client, bool supervisor, | |||
527 | *hack = client->data; | 520 | *hack = client->data; |
528 | client->data = NULL; | 521 | client->data = NULL; |
529 | } | 522 | } |
523 | |||
530 | client->super = false; | 524 | client->super = false; |
531 | return ret; | 525 | return ret; |
532 | } | 526 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c index b4f5db66d5b5..7f458dfd5608 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c | |||
@@ -21,39 +21,37 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | #include <core/mm.h> | ||
24 | 25 | ||
25 | #include "core/os.h" | 26 | #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ |
26 | #include "core/mm.h" | 27 | list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry) |
27 | |||
28 | #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ | ||
29 | list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) | ||
30 | 28 | ||
31 | static void | 29 | static void |
32 | nouveau_mm_dump(struct nouveau_mm *mm, const char *header) | 30 | nvkm_mm_dump(struct nvkm_mm *mm, const char *header) |
33 | { | 31 | { |
34 | struct nouveau_mm_node *node; | 32 | struct nvkm_mm_node *node; |
35 | 33 | ||
36 | printk(KERN_ERR "nouveau: %s\n", header); | 34 | printk(KERN_ERR "nvkm: %s\n", header); |
37 | printk(KERN_ERR "nouveau: node list:\n"); | 35 | printk(KERN_ERR "nvkm: node list:\n"); |
38 | list_for_each_entry(node, &mm->nodes, nl_entry) { | 36 | list_for_each_entry(node, &mm->nodes, nl_entry) { |
39 | printk(KERN_ERR "nouveau: \t%08x %08x %d\n", | 37 | printk(KERN_ERR "nvkm: \t%08x %08x %d\n", |
40 | node->offset, node->length, node->type); | 38 | node->offset, node->length, node->type); |
41 | } | 39 | } |
42 | printk(KERN_ERR "nouveau: free list:\n"); | 40 | printk(KERN_ERR "nvkm: free list:\n"); |
43 | list_for_each_entry(node, &mm->free, fl_entry) { | 41 | list_for_each_entry(node, &mm->free, fl_entry) { |
44 | printk(KERN_ERR "nouveau: \t%08x %08x %d\n", | 42 | printk(KERN_ERR "nvkm: \t%08x %08x %d\n", |
45 | node->offset, node->length, node->type); | 43 | node->offset, node->length, node->type); |
46 | } | 44 | } |
47 | } | 45 | } |
48 | 46 | ||
49 | void | 47 | void |
50 | nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) | 48 | nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) |
51 | { | 49 | { |
52 | struct nouveau_mm_node *this = *pthis; | 50 | struct nvkm_mm_node *this = *pthis; |
53 | 51 | ||
54 | if (this) { | 52 | if (this) { |
55 | struct nouveau_mm_node *prev = node(this, prev); | 53 | struct nvkm_mm_node *prev = node(this, prev); |
56 | struct nouveau_mm_node *next = node(this, next); | 54 | struct nvkm_mm_node *next = node(this, next); |
57 | 55 | ||
58 | if (prev && prev->type == NVKM_MM_TYPE_NONE) { | 56 | if (prev && prev->type == NVKM_MM_TYPE_NONE) { |
59 | prev->length += this->length; | 57 | prev->length += this->length; |
@@ -84,10 +82,10 @@ nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) | |||
84 | *pthis = NULL; | 82 | *pthis = NULL; |
85 | } | 83 | } |
86 | 84 | ||
87 | static struct nouveau_mm_node * | 85 | static struct nvkm_mm_node * |
88 | region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) | 86 | region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) |
89 | { | 87 | { |
90 | struct nouveau_mm_node *b; | 88 | struct nvkm_mm_node *b; |
91 | 89 | ||
92 | if (a->length == size) | 90 | if (a->length == size) |
93 | return a; | 91 | return a; |
@@ -105,14 +103,15 @@ region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) | |||
105 | list_add_tail(&b->nl_entry, &a->nl_entry); | 103 | list_add_tail(&b->nl_entry, &a->nl_entry); |
106 | if (b->type == NVKM_MM_TYPE_NONE) | 104 | if (b->type == NVKM_MM_TYPE_NONE) |
107 | list_add_tail(&b->fl_entry, &a->fl_entry); | 105 | list_add_tail(&b->fl_entry, &a->fl_entry); |
106 | |||
108 | return b; | 107 | return b; |
109 | } | 108 | } |
110 | 109 | ||
111 | int | 110 | int |
112 | nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max, | 111 | nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, |
113 | u32 size_min, u32 align, struct nouveau_mm_node **pnode) | 112 | u32 align, struct nvkm_mm_node **pnode) |
114 | { | 113 | { |
115 | struct nouveau_mm_node *prev, *this, *next; | 114 | struct nvkm_mm_node *prev, *this, *next; |
116 | u32 mask = align - 1; | 115 | u32 mask = align - 1; |
117 | u32 splitoff; | 116 | u32 splitoff; |
118 | u32 s, e; | 117 | u32 s, e; |
@@ -157,10 +156,10 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max, | |||
157 | return -ENOSPC; | 156 | return -ENOSPC; |
158 | } | 157 | } |
159 | 158 | ||
160 | static struct nouveau_mm_node * | 159 | static struct nvkm_mm_node * |
161 | region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) | 160 | region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) |
162 | { | 161 | { |
163 | struct nouveau_mm_node *b; | 162 | struct nvkm_mm_node *b; |
164 | 163 | ||
165 | if (a->length == size) | 164 | if (a->length == size) |
166 | return a; | 165 | return a; |
@@ -178,14 +177,15 @@ region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) | |||
178 | list_add(&b->nl_entry, &a->nl_entry); | 177 | list_add(&b->nl_entry, &a->nl_entry); |
179 | if (b->type == NVKM_MM_TYPE_NONE) | 178 | if (b->type == NVKM_MM_TYPE_NONE) |
180 | list_add(&b->fl_entry, &a->fl_entry); | 179 | list_add(&b->fl_entry, &a->fl_entry); |
180 | |||
181 | return b; | 181 | return b; |
182 | } | 182 | } |
183 | 183 | ||
184 | int | 184 | int |
185 | nouveau_mm_tail(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max, | 185 | nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, |
186 | u32 size_min, u32 align, struct nouveau_mm_node **pnode) | 186 | u32 align, struct nvkm_mm_node **pnode) |
187 | { | 187 | { |
188 | struct nouveau_mm_node *prev, *this, *next; | 188 | struct nvkm_mm_node *prev, *this, *next; |
189 | u32 mask = align - 1; | 189 | u32 mask = align - 1; |
190 | 190 | ||
191 | BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); | 191 | BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); |
@@ -235,12 +235,12 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max, | |||
235 | } | 235 | } |
236 | 236 | ||
237 | int | 237 | int |
238 | nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) | 238 | nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) |
239 | { | 239 | { |
240 | struct nouveau_mm_node *node, *prev; | 240 | struct nvkm_mm_node *node, *prev; |
241 | u32 next; | 241 | u32 next; |
242 | 242 | ||
243 | if (nouveau_mm_initialised(mm)) { | 243 | if (nvkm_mm_initialised(mm)) { |
244 | prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); | 244 | prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); |
245 | next = prev->offset + prev->length; | 245 | next = prev->offset + prev->length; |
246 | if (next != offset) { | 246 | if (next != offset) { |
@@ -277,18 +277,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) | |||
277 | } | 277 | } |
278 | 278 | ||
279 | int | 279 | int |
280 | nouveau_mm_fini(struct nouveau_mm *mm) | 280 | nvkm_mm_fini(struct nvkm_mm *mm) |
281 | { | 281 | { |
282 | struct nouveau_mm_node *node, *temp; | 282 | struct nvkm_mm_node *node, *temp; |
283 | int nodes = 0; | 283 | int nodes = 0; |
284 | 284 | ||
285 | if (!nouveau_mm_initialised(mm)) | 285 | if (!nvkm_mm_initialised(mm)) |
286 | return 0; | 286 | return 0; |
287 | 287 | ||
288 | list_for_each_entry(node, &mm->nodes, nl_entry) { | 288 | list_for_each_entry(node, &mm->nodes, nl_entry) { |
289 | if (node->type != NVKM_MM_TYPE_HOLE) { | 289 | if (node->type != NVKM_MM_TYPE_HOLE) { |
290 | if (++nodes > mm->heap_nodes) { | 290 | if (++nodes > mm->heap_nodes) { |
291 | nouveau_mm_dump(mm, "mm not clean!"); | 291 | nvkm_mm_dump(mm, "mm not clean!"); |
292 | return -EBUSY; | 292 | return -EBUSY; |
293 | } | 293 | } |
294 | } | 294 | } |
@@ -298,6 +298,7 @@ nouveau_mm_fini(struct nouveau_mm *mm) | |||
298 | list_del(&node->nl_entry); | 298 | list_del(&node->nl_entry); |
299 | kfree(node); | 299 | kfree(node); |
300 | } | 300 | } |
301 | |||
301 | mm->heap_nodes = 0; | 302 | mm->heap_nodes = 0; |
302 | return 0; | 303 | return 0; |
303 | } | 304 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c index 0594a599f6fb..6400767c5dba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c | |||
@@ -21,16 +21,14 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/namedb.h> | 24 | #include <core/namedb.h> |
27 | #include <core/handle.h> | ||
28 | #include <core/gpuobj.h> | 25 | #include <core/gpuobj.h> |
26 | #include <core/handle.h> | ||
29 | 27 | ||
30 | static struct nouveau_handle * | 28 | static struct nvkm_handle * |
31 | nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name) | 29 | nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name) |
32 | { | 30 | { |
33 | struct nouveau_handle *handle; | 31 | struct nvkm_handle *handle; |
34 | 32 | ||
35 | list_for_each_entry(handle, &namedb->list, node) { | 33 | list_for_each_entry(handle, &namedb->list, node) { |
36 | if (handle->name == name) | 34 | if (handle->name == name) |
@@ -40,10 +38,10 @@ nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name) | |||
40 | return NULL; | 38 | return NULL; |
41 | } | 39 | } |
42 | 40 | ||
43 | static struct nouveau_handle * | 41 | static struct nvkm_handle * |
44 | nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass) | 42 | nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass) |
45 | { | 43 | { |
46 | struct nouveau_handle *handle; | 44 | struct nvkm_handle *handle; |
47 | 45 | ||
48 | list_for_each_entry(handle, &namedb->list, node) { | 46 | list_for_each_entry(handle, &namedb->list, node) { |
49 | if (nv_mclass(handle->object) == oclass) | 47 | if (nv_mclass(handle->object) == oclass) |
@@ -53,10 +51,10 @@ nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass) | |||
53 | return NULL; | 51 | return NULL; |
54 | } | 52 | } |
55 | 53 | ||
56 | static struct nouveau_handle * | 54 | static struct nvkm_handle * |
57 | nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst) | 55 | nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst) |
58 | { | 56 | { |
59 | struct nouveau_handle *handle; | 57 | struct nvkm_handle *handle; |
60 | 58 | ||
61 | list_for_each_entry(handle, &namedb->list, node) { | 59 | list_for_each_entry(handle, &namedb->list, node) { |
62 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { | 60 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { |
@@ -68,10 +66,10 @@ nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst) | |||
68 | return NULL; | 66 | return NULL; |
69 | } | 67 | } |
70 | 68 | ||
71 | static struct nouveau_handle * | 69 | static struct nvkm_handle * |
72 | nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst) | 70 | nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst) |
73 | { | 71 | { |
74 | struct nouveau_handle *handle; | 72 | struct nvkm_handle *handle; |
75 | 73 | ||
76 | list_for_each_entry(handle, &namedb->list, node) { | 74 | list_for_each_entry(handle, &namedb->list, node) { |
77 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { | 75 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { |
@@ -85,14 +83,14 @@ nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst) | |||
85 | } | 83 | } |
86 | 84 | ||
87 | int | 85 | int |
88 | nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name, | 86 | nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name, |
89 | struct nouveau_object *object, | 87 | struct nvkm_object *object, |
90 | struct nouveau_handle *handle) | 88 | struct nvkm_handle *handle) |
91 | { | 89 | { |
92 | int ret = -EEXIST; | 90 | int ret = -EEXIST; |
93 | write_lock_irq(&namedb->lock); | 91 | write_lock_irq(&namedb->lock); |
94 | if (!nouveau_namedb_lookup(namedb, name)) { | 92 | if (!nvkm_namedb_lookup(namedb, name)) { |
95 | nouveau_object_ref(object, &handle->object); | 93 | nvkm_object_ref(object, &handle->object); |
96 | handle->namedb = namedb; | 94 | handle->namedb = namedb; |
97 | list_add(&handle->node, &namedb->list); | 95 | list_add(&handle->node, &namedb->list); |
98 | ret = 0; | 96 | ret = 0; |
@@ -102,80 +100,79 @@ nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name, | |||
102 | } | 100 | } |
103 | 101 | ||
104 | void | 102 | void |
105 | nouveau_namedb_remove(struct nouveau_handle *handle) | 103 | nvkm_namedb_remove(struct nvkm_handle *handle) |
106 | { | 104 | { |
107 | struct nouveau_namedb *namedb = handle->namedb; | 105 | struct nvkm_namedb *namedb = handle->namedb; |
108 | struct nouveau_object *object = handle->object; | 106 | struct nvkm_object *object = handle->object; |
109 | write_lock_irq(&namedb->lock); | 107 | write_lock_irq(&namedb->lock); |
110 | list_del(&handle->node); | 108 | list_del(&handle->node); |
111 | write_unlock_irq(&namedb->lock); | 109 | write_unlock_irq(&namedb->lock); |
112 | nouveau_object_ref(NULL, &object); | 110 | nvkm_object_ref(NULL, &object); |
113 | } | 111 | } |
114 | 112 | ||
115 | struct nouveau_handle * | 113 | struct nvkm_handle * |
116 | nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name) | 114 | nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name) |
117 | { | 115 | { |
118 | struct nouveau_handle *handle; | 116 | struct nvkm_handle *handle; |
119 | read_lock(&namedb->lock); | 117 | read_lock(&namedb->lock); |
120 | handle = nouveau_namedb_lookup(namedb, name); | 118 | handle = nvkm_namedb_lookup(namedb, name); |
121 | if (handle == NULL) | 119 | if (handle == NULL) |
122 | read_unlock(&namedb->lock); | 120 | read_unlock(&namedb->lock); |
123 | return handle; | 121 | return handle; |
124 | } | 122 | } |
125 | 123 | ||
126 | struct nouveau_handle * | 124 | struct nvkm_handle * |
127 | nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass) | 125 | nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass) |
128 | { | 126 | { |
129 | struct nouveau_handle *handle; | 127 | struct nvkm_handle *handle; |
130 | read_lock(&namedb->lock); | 128 | read_lock(&namedb->lock); |
131 | handle = nouveau_namedb_lookup_class(namedb, oclass); | 129 | handle = nvkm_namedb_lookup_class(namedb, oclass); |
132 | if (handle == NULL) | 130 | if (handle == NULL) |
133 | read_unlock(&namedb->lock); | 131 | read_unlock(&namedb->lock); |
134 | return handle; | 132 | return handle; |
135 | } | 133 | } |
136 | 134 | ||
137 | struct nouveau_handle * | 135 | struct nvkm_handle * |
138 | nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst) | 136 | nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst) |
139 | { | 137 | { |
140 | struct nouveau_handle *handle; | 138 | struct nvkm_handle *handle; |
141 | read_lock(&namedb->lock); | 139 | read_lock(&namedb->lock); |
142 | handle = nouveau_namedb_lookup_vinst(namedb, vinst); | 140 | handle = nvkm_namedb_lookup_vinst(namedb, vinst); |
143 | if (handle == NULL) | 141 | if (handle == NULL) |
144 | read_unlock(&namedb->lock); | 142 | read_unlock(&namedb->lock); |
145 | return handle; | 143 | return handle; |
146 | } | 144 | } |
147 | 145 | ||
148 | struct nouveau_handle * | 146 | struct nvkm_handle * |
149 | nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst) | 147 | nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst) |
150 | { | 148 | { |
151 | struct nouveau_handle *handle; | 149 | struct nvkm_handle *handle; |
152 | read_lock(&namedb->lock); | 150 | read_lock(&namedb->lock); |
153 | handle = nouveau_namedb_lookup_cinst(namedb, cinst); | 151 | handle = nvkm_namedb_lookup_cinst(namedb, cinst); |
154 | if (handle == NULL) | 152 | if (handle == NULL) |
155 | read_unlock(&namedb->lock); | 153 | read_unlock(&namedb->lock); |
156 | return handle; | 154 | return handle; |
157 | } | 155 | } |
158 | 156 | ||
159 | void | 157 | void |
160 | nouveau_namedb_put(struct nouveau_handle *handle) | 158 | nvkm_namedb_put(struct nvkm_handle *handle) |
161 | { | 159 | { |
162 | if (handle) | 160 | if (handle) |
163 | read_unlock(&handle->namedb->lock); | 161 | read_unlock(&handle->namedb->lock); |
164 | } | 162 | } |
165 | 163 | ||
166 | int | 164 | int |
167 | nouveau_namedb_create_(struct nouveau_object *parent, | 165 | nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
168 | struct nouveau_object *engine, | 166 | struct nvkm_oclass *oclass, u32 pclass, |
169 | struct nouveau_oclass *oclass, u32 pclass, | 167 | struct nvkm_oclass *sclass, u64 engcls, |
170 | struct nouveau_oclass *sclass, u64 engcls, | 168 | int length, void **pobject) |
171 | int length, void **pobject) | ||
172 | { | 169 | { |
173 | struct nouveau_namedb *namedb; | 170 | struct nvkm_namedb *namedb; |
174 | int ret; | 171 | int ret; |
175 | 172 | ||
176 | ret = nouveau_parent_create_(parent, engine, oclass, pclass | | 173 | ret = nvkm_parent_create_(parent, engine, oclass, pclass | |
177 | NV_NAMEDB_CLASS, sclass, engcls, | 174 | NV_NAMEDB_CLASS, sclass, engcls, |
178 | length, pobject); | 175 | length, pobject); |
179 | namedb = *pobject; | 176 | namedb = *pobject; |
180 | if (ret) | 177 | if (ret) |
181 | return ret; | 178 | return ret; |
@@ -186,15 +183,14 @@ nouveau_namedb_create_(struct nouveau_object *parent, | |||
186 | } | 183 | } |
187 | 184 | ||
188 | int | 185 | int |
189 | _nouveau_namedb_ctor(struct nouveau_object *parent, | 186 | _nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
190 | struct nouveau_object *engine, | 187 | struct nvkm_oclass *oclass, void *data, u32 size, |
191 | struct nouveau_oclass *oclass, void *data, u32 size, | 188 | struct nvkm_object **pobject) |
192 | struct nouveau_object **pobject) | ||
193 | { | 189 | { |
194 | struct nouveau_namedb *object; | 190 | struct nvkm_namedb *object; |
195 | int ret; | 191 | int ret; |
196 | 192 | ||
197 | ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object); | 193 | ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object); |
198 | *pobject = nv_object(object); | 194 | *pobject = nv_object(object); |
199 | if (ret) | 195 | if (ret) |
200 | return ret; | 196 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/notify.c b/drivers/gpu/drm/nouveau/nvkm/core/notify.c index 839a32577680..023610d01458 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/notify.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/notify.c | |||
@@ -21,13 +21,8 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/client.h> | ||
26 | #include <core/event.h> | ||
27 | #include <core/notify.h> | 24 | #include <core/notify.h> |
28 | 25 | #include <core/event.h> | |
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/event.h> | ||
31 | 26 | ||
32 | static inline void | 27 | static inline void |
33 | nvkm_notify_put_locked(struct nvkm_notify *notify) | 28 | nvkm_notify_put_locked(struct nvkm_notify *notify) |
@@ -134,7 +129,7 @@ nvkm_notify_fini(struct nvkm_notify *notify) | |||
134 | } | 129 | } |
135 | 130 | ||
136 | int | 131 | int |
137 | nvkm_notify_init(struct nouveau_object *object, struct nvkm_event *event, | 132 | nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, |
138 | int (*func)(struct nvkm_notify *), bool work, | 133 | int (*func)(struct nvkm_notify *), bool work, |
139 | void *data, u32 size, u32 reply, | 134 | void *data, u32 size, u32 reply, |
140 | struct nvkm_notify *notify) | 135 | struct nvkm_notify *notify) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index b160860edf54..979f3627d395 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c | |||
@@ -21,36 +21,34 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | 24 | #include <core/object.h> |
26 | #include <core/engine.h> | 25 | #include <core/engine.h> |
27 | 26 | ||
28 | #ifdef NOUVEAU_OBJECT_MAGIC | 27 | #ifdef NVKM_OBJECT_MAGIC |
29 | static struct list_head _objlist = LIST_HEAD_INIT(_objlist); | 28 | static struct list_head _objlist = LIST_HEAD_INIT(_objlist); |
30 | static DEFINE_SPINLOCK(_objlist_lock); | 29 | static DEFINE_SPINLOCK(_objlist_lock); |
31 | #endif | 30 | #endif |
32 | 31 | ||
33 | int | 32 | int |
34 | nouveau_object_create_(struct nouveau_object *parent, | 33 | nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
35 | struct nouveau_object *engine, | 34 | struct nvkm_oclass *oclass, u32 pclass, |
36 | struct nouveau_oclass *oclass, u32 pclass, | 35 | int size, void **pobject) |
37 | int size, void **pobject) | ||
38 | { | 36 | { |
39 | struct nouveau_object *object; | 37 | struct nvkm_object *object; |
40 | 38 | ||
41 | object = *pobject = kzalloc(size, GFP_KERNEL); | 39 | object = *pobject = kzalloc(size, GFP_KERNEL); |
42 | if (!object) | 40 | if (!object) |
43 | return -ENOMEM; | 41 | return -ENOMEM; |
44 | 42 | ||
45 | nouveau_object_ref(parent, &object->parent); | 43 | nvkm_object_ref(parent, &object->parent); |
46 | nouveau_object_ref(engine, (struct nouveau_object **)&object->engine); | 44 | nvkm_object_ref(engine, (struct nvkm_object **)&object->engine); |
47 | object->oclass = oclass; | 45 | object->oclass = oclass; |
48 | object->oclass->handle |= pclass; | 46 | object->oclass->handle |= pclass; |
49 | atomic_set(&object->refcount, 1); | 47 | atomic_set(&object->refcount, 1); |
50 | atomic_set(&object->usecount, 0); | 48 | atomic_set(&object->usecount, 0); |
51 | 49 | ||
52 | #ifdef NOUVEAU_OBJECT_MAGIC | 50 | #ifdef NVKM_OBJECT_MAGIC |
53 | object->_magic = NOUVEAU_OBJECT_MAGIC; | 51 | object->_magic = NVKM_OBJECT_MAGIC; |
54 | spin_lock(&_objlist_lock); | 52 | spin_lock(&_objlist_lock); |
55 | list_add(&object->list, &_objlist); | 53 | list_add(&object->list, &_objlist); |
56 | spin_unlock(&_objlist_lock); | 54 | spin_unlock(&_objlist_lock); |
@@ -59,57 +57,55 @@ nouveau_object_create_(struct nouveau_object *parent, | |||
59 | } | 57 | } |
60 | 58 | ||
61 | int | 59 | int |
62 | _nouveau_object_ctor(struct nouveau_object *parent, | 60 | _nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
63 | struct nouveau_object *engine, | 61 | struct nvkm_oclass *oclass, void *data, u32 size, |
64 | struct nouveau_oclass *oclass, void *data, u32 size, | 62 | struct nvkm_object **pobject) |
65 | struct nouveau_object **pobject) | ||
66 | { | 63 | { |
67 | if (size != 0) | 64 | if (size != 0) |
68 | return -ENOSYS; | 65 | return -ENOSYS; |
69 | return nouveau_object_create(parent, engine, oclass, 0, pobject); | 66 | return nvkm_object_create(parent, engine, oclass, 0, pobject); |
70 | } | 67 | } |
71 | 68 | ||
72 | void | 69 | void |
73 | nouveau_object_destroy(struct nouveau_object *object) | 70 | nvkm_object_destroy(struct nvkm_object *object) |
74 | { | 71 | { |
75 | #ifdef NOUVEAU_OBJECT_MAGIC | 72 | #ifdef NVKM_OBJECT_MAGIC |
76 | spin_lock(&_objlist_lock); | 73 | spin_lock(&_objlist_lock); |
77 | list_del(&object->list); | 74 | list_del(&object->list); |
78 | spin_unlock(&_objlist_lock); | 75 | spin_unlock(&_objlist_lock); |
79 | #endif | 76 | #endif |
80 | nouveau_object_ref(NULL, (struct nouveau_object **)&object->engine); | 77 | nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine); |
81 | nouveau_object_ref(NULL, &object->parent); | 78 | nvkm_object_ref(NULL, &object->parent); |
82 | kfree(object); | 79 | kfree(object); |
83 | } | 80 | } |
84 | 81 | ||
85 | int | 82 | int |
86 | nouveau_object_init(struct nouveau_object *object) | 83 | nvkm_object_init(struct nvkm_object *object) |
87 | { | 84 | { |
88 | return 0; | 85 | return 0; |
89 | } | 86 | } |
90 | 87 | ||
91 | int | 88 | int |
92 | nouveau_object_fini(struct nouveau_object *object, bool suspend) | 89 | nvkm_object_fini(struct nvkm_object *object, bool suspend) |
93 | { | 90 | { |
94 | return 0; | 91 | return 0; |
95 | } | 92 | } |
96 | 93 | ||
97 | struct nouveau_ofuncs | 94 | struct nvkm_ofuncs |
98 | nouveau_object_ofuncs = { | 95 | nvkm_object_ofuncs = { |
99 | .ctor = _nouveau_object_ctor, | 96 | .ctor = _nvkm_object_ctor, |
100 | .dtor = nouveau_object_destroy, | 97 | .dtor = nvkm_object_destroy, |
101 | .init = nouveau_object_init, | 98 | .init = nvkm_object_init, |
102 | .fini = nouveau_object_fini, | 99 | .fini = nvkm_object_fini, |
103 | }; | 100 | }; |
104 | 101 | ||
105 | int | 102 | int |
106 | nouveau_object_ctor(struct nouveau_object *parent, | 103 | nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
107 | struct nouveau_object *engine, | 104 | struct nvkm_oclass *oclass, void *data, u32 size, |
108 | struct nouveau_oclass *oclass, void *data, u32 size, | 105 | struct nvkm_object **pobject) |
109 | struct nouveau_object **pobject) | ||
110 | { | 106 | { |
111 | struct nouveau_ofuncs *ofuncs = oclass->ofuncs; | 107 | struct nvkm_ofuncs *ofuncs = oclass->ofuncs; |
112 | struct nouveau_object *object = NULL; | 108 | struct nvkm_object *object = NULL; |
113 | int ret; | 109 | int ret; |
114 | 110 | ||
115 | ret = ofuncs->ctor(parent, engine, oclass, data, size, &object); | 111 | ret = ofuncs->ctor(parent, engine, oclass, data, size, &object); |
@@ -137,14 +133,14 @@ nouveau_object_ctor(struct nouveau_object *parent, | |||
137 | } | 133 | } |
138 | 134 | ||
139 | static void | 135 | static void |
140 | nouveau_object_dtor(struct nouveau_object *object) | 136 | nvkm_object_dtor(struct nvkm_object *object) |
141 | { | 137 | { |
142 | nv_trace(object, "destroying\n"); | 138 | nv_trace(object, "destroying\n"); |
143 | nv_ofuncs(object)->dtor(object); | 139 | nv_ofuncs(object)->dtor(object); |
144 | } | 140 | } |
145 | 141 | ||
146 | void | 142 | void |
147 | nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref) | 143 | nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref) |
148 | { | 144 | { |
149 | if (obj) { | 145 | if (obj) { |
150 | atomic_inc(&obj->refcount); | 146 | atomic_inc(&obj->refcount); |
@@ -155,14 +151,14 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref) | |||
155 | int dead = atomic_dec_and_test(&(*ref)->refcount); | 151 | int dead = atomic_dec_and_test(&(*ref)->refcount); |
156 | nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount)); | 152 | nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount)); |
157 | if (dead) | 153 | if (dead) |
158 | nouveau_object_dtor(*ref); | 154 | nvkm_object_dtor(*ref); |
159 | } | 155 | } |
160 | 156 | ||
161 | *ref = obj; | 157 | *ref = obj; |
162 | } | 158 | } |
163 | 159 | ||
164 | int | 160 | int |
165 | nouveau_object_inc(struct nouveau_object *object) | 161 | nvkm_object_inc(struct nvkm_object *object) |
166 | { | 162 | { |
167 | int ref = atomic_add_return(1, &object->usecount); | 163 | int ref = atomic_add_return(1, &object->usecount); |
168 | int ret; | 164 | int ret; |
@@ -173,7 +169,7 @@ nouveau_object_inc(struct nouveau_object *object) | |||
173 | 169 | ||
174 | nv_trace(object, "initialising...\n"); | 170 | nv_trace(object, "initialising...\n"); |
175 | if (object->parent) { | 171 | if (object->parent) { |
176 | ret = nouveau_object_inc(object->parent); | 172 | ret = nvkm_object_inc(object->parent); |
177 | if (ret) { | 173 | if (ret) { |
178 | nv_error(object, "parent failed, %d\n", ret); | 174 | nv_error(object, "parent failed, %d\n", ret); |
179 | goto fail_parent; | 175 | goto fail_parent; |
@@ -182,7 +178,7 @@ nouveau_object_inc(struct nouveau_object *object) | |||
182 | 178 | ||
183 | if (object->engine) { | 179 | if (object->engine) { |
184 | mutex_lock(&nv_subdev(object->engine)->mutex); | 180 | mutex_lock(&nv_subdev(object->engine)->mutex); |
185 | ret = nouveau_object_inc(&object->engine->subdev.object); | 181 | ret = nvkm_object_inc(&object->engine->subdev.object); |
186 | mutex_unlock(&nv_subdev(object->engine)->mutex); | 182 | mutex_unlock(&nv_subdev(object->engine)->mutex); |
187 | if (ret) { | 183 | if (ret) { |
188 | nv_error(object, "engine failed, %d\n", ret); | 184 | nv_error(object, "engine failed, %d\n", ret); |
@@ -203,19 +199,19 @@ nouveau_object_inc(struct nouveau_object *object) | |||
203 | fail_self: | 199 | fail_self: |
204 | if (object->engine) { | 200 | if (object->engine) { |
205 | mutex_lock(&nv_subdev(object->engine)->mutex); | 201 | mutex_lock(&nv_subdev(object->engine)->mutex); |
206 | nouveau_object_dec(&object->engine->subdev.object, false); | 202 | nvkm_object_dec(&object->engine->subdev.object, false); |
207 | mutex_unlock(&nv_subdev(object->engine)->mutex); | 203 | mutex_unlock(&nv_subdev(object->engine)->mutex); |
208 | } | 204 | } |
209 | fail_engine: | 205 | fail_engine: |
210 | if (object->parent) | 206 | if (object->parent) |
211 | nouveau_object_dec(object->parent, false); | 207 | nvkm_object_dec(object->parent, false); |
212 | fail_parent: | 208 | fail_parent: |
213 | atomic_dec(&object->usecount); | 209 | atomic_dec(&object->usecount); |
214 | return ret; | 210 | return ret; |
215 | } | 211 | } |
216 | 212 | ||
217 | static int | 213 | static int |
218 | nouveau_object_decf(struct nouveau_object *object) | 214 | nvkm_object_decf(struct nvkm_object *object) |
219 | { | 215 | { |
220 | int ret; | 216 | int ret; |
221 | 217 | ||
@@ -228,19 +224,19 @@ nouveau_object_decf(struct nouveau_object *object) | |||
228 | 224 | ||
229 | if (object->engine) { | 225 | if (object->engine) { |
230 | mutex_lock(&nv_subdev(object->engine)->mutex); | 226 | mutex_lock(&nv_subdev(object->engine)->mutex); |
231 | nouveau_object_dec(&object->engine->subdev.object, false); | 227 | nvkm_object_dec(&object->engine->subdev.object, false); |
232 | mutex_unlock(&nv_subdev(object->engine)->mutex); | 228 | mutex_unlock(&nv_subdev(object->engine)->mutex); |
233 | } | 229 | } |
234 | 230 | ||
235 | if (object->parent) | 231 | if (object->parent) |
236 | nouveau_object_dec(object->parent, false); | 232 | nvkm_object_dec(object->parent, false); |
237 | 233 | ||
238 | nv_trace(object, "stopped\n"); | 234 | nv_trace(object, "stopped\n"); |
239 | return 0; | 235 | return 0; |
240 | } | 236 | } |
241 | 237 | ||
242 | static int | 238 | static int |
243 | nouveau_object_decs(struct nouveau_object *object) | 239 | nvkm_object_decs(struct nvkm_object *object) |
244 | { | 240 | { |
245 | int ret, rret; | 241 | int ret, rret; |
246 | 242 | ||
@@ -255,7 +251,7 @@ nouveau_object_decs(struct nouveau_object *object) | |||
255 | 251 | ||
256 | if (object->engine) { | 252 | if (object->engine) { |
257 | mutex_lock(&nv_subdev(object->engine)->mutex); | 253 | mutex_lock(&nv_subdev(object->engine)->mutex); |
258 | ret = nouveau_object_dec(&object->engine->subdev.object, true); | 254 | ret = nvkm_object_dec(&object->engine->subdev.object, true); |
259 | mutex_unlock(&nv_subdev(object->engine)->mutex); | 255 | mutex_unlock(&nv_subdev(object->engine)->mutex); |
260 | if (ret) { | 256 | if (ret) { |
261 | nv_warn(object, "engine failed suspend, %d\n", ret); | 257 | nv_warn(object, "engine failed suspend, %d\n", ret); |
@@ -264,7 +260,7 @@ nouveau_object_decs(struct nouveau_object *object) | |||
264 | } | 260 | } |
265 | 261 | ||
266 | if (object->parent) { | 262 | if (object->parent) { |
267 | ret = nouveau_object_dec(object->parent, true); | 263 | ret = nvkm_object_dec(object->parent, true); |
268 | if (ret) { | 264 | if (ret) { |
269 | nv_warn(object, "parent failed suspend, %d\n", ret); | 265 | nv_warn(object, "parent failed suspend, %d\n", ret); |
270 | goto fail_parent; | 266 | goto fail_parent; |
@@ -277,7 +273,7 @@ nouveau_object_decs(struct nouveau_object *object) | |||
277 | fail_parent: | 273 | fail_parent: |
278 | if (object->engine) { | 274 | if (object->engine) { |
279 | mutex_lock(&nv_subdev(object->engine)->mutex); | 275 | mutex_lock(&nv_subdev(object->engine)->mutex); |
280 | rret = nouveau_object_inc(&object->engine->subdev.object); | 276 | rret = nvkm_object_inc(&object->engine->subdev.object); |
281 | mutex_unlock(&nv_subdev(object->engine)->mutex); | 277 | mutex_unlock(&nv_subdev(object->engine)->mutex); |
282 | if (rret) | 278 | if (rret) |
283 | nv_fatal(object, "engine failed to reinit, %d\n", rret); | 279 | nv_fatal(object, "engine failed to reinit, %d\n", rret); |
@@ -292,7 +288,7 @@ fail_engine: | |||
292 | } | 288 | } |
293 | 289 | ||
294 | int | 290 | int |
295 | nouveau_object_dec(struct nouveau_object *object, bool suspend) | 291 | nvkm_object_dec(struct nvkm_object *object, bool suspend) |
296 | { | 292 | { |
297 | int ref = atomic_add_return(-1, &object->usecount); | 293 | int ref = atomic_add_return(-1, &object->usecount); |
298 | int ret; | 294 | int ret; |
@@ -301,9 +297,9 @@ nouveau_object_dec(struct nouveau_object *object, bool suspend) | |||
301 | 297 | ||
302 | if (ref == 0) { | 298 | if (ref == 0) { |
303 | if (suspend) | 299 | if (suspend) |
304 | ret = nouveau_object_decs(object); | 300 | ret = nvkm_object_decs(object); |
305 | else | 301 | else |
306 | ret = nouveau_object_decf(object); | 302 | ret = nvkm_object_decf(object); |
307 | 303 | ||
308 | if (ret) { | 304 | if (ret) { |
309 | atomic_inc(&object->usecount); | 305 | atomic_inc(&object->usecount); |
@@ -315,10 +311,10 @@ nouveau_object_dec(struct nouveau_object *object, bool suspend) | |||
315 | } | 311 | } |
316 | 312 | ||
317 | void | 313 | void |
318 | nouveau_object_debug(void) | 314 | nvkm_object_debug(void) |
319 | { | 315 | { |
320 | #ifdef NOUVEAU_OBJECT_MAGIC | 316 | #ifdef NVKM_OBJECT_MAGIC |
321 | struct nouveau_object *object; | 317 | struct nvkm_object *object; |
322 | if (!list_empty(&_objlist)) { | 318 | if (!list_empty(&_objlist)) { |
323 | nv_fatal(NULL, "*******************************************\n"); | 319 | nv_fatal(NULL, "*******************************************\n"); |
324 | nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n"); | 320 | nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n"); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c index 9f6fcc5f66c2..19d153f8c8fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/option.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c | |||
@@ -21,12 +21,11 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/option.h> | 24 | #include <core/option.h> |
26 | #include <core/debug.h> | 25 | #include <core/debug.h> |
27 | 26 | ||
28 | const char * | 27 | const char * |
29 | nouveau_stropt(const char *optstr, const char *opt, int *arglen) | 28 | nvkm_stropt(const char *optstr, const char *opt, int *arglen) |
30 | { | 29 | { |
31 | while (optstr && *optstr != '\0') { | 30 | while (optstr && *optstr != '\0') { |
32 | int len = strcspn(optstr, ",="); | 31 | int len = strcspn(optstr, ",="); |
@@ -52,11 +51,11 @@ nouveau_stropt(const char *optstr, const char *opt, int *arglen) | |||
52 | } | 51 | } |
53 | 52 | ||
54 | bool | 53 | bool |
55 | nouveau_boolopt(const char *optstr, const char *opt, bool value) | 54 | nvkm_boolopt(const char *optstr, const char *opt, bool value) |
56 | { | 55 | { |
57 | int arglen; | 56 | int arglen; |
58 | 57 | ||
59 | optstr = nouveau_stropt(optstr, opt, &arglen); | 58 | optstr = nvkm_stropt(optstr, opt, &arglen); |
60 | if (optstr) { | 59 | if (optstr) { |
61 | if (!strncasecmpz(optstr, "0", arglen) || | 60 | if (!strncasecmpz(optstr, "0", arglen) || |
62 | !strncasecmpz(optstr, "no", arglen) || | 61 | !strncasecmpz(optstr, "no", arglen) || |
@@ -75,7 +74,7 @@ nouveau_boolopt(const char *optstr, const char *opt, bool value) | |||
75 | } | 74 | } |
76 | 75 | ||
77 | int | 76 | int |
78 | nouveau_dbgopt(const char *optstr, const char *sub) | 77 | nvkm_dbgopt(const char *optstr, const char *sub) |
79 | { | 78 | { |
80 | int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT; | 79 | int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT; |
81 | 80 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c index 1f28fa12e98f..dd56cd1eeb38 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/parent.c | |||
@@ -21,19 +21,18 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/parent.h> | 24 | #include <core/parent.h> |
27 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/engine.h> | ||
28 | 27 | ||
29 | int | 28 | int |
30 | nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, | 29 | nvkm_parent_sclass(struct nvkm_object *parent, u16 handle, |
31 | struct nouveau_object **pengine, | 30 | struct nvkm_object **pengine, |
32 | struct nouveau_oclass **poclass) | 31 | struct nvkm_oclass **poclass) |
33 | { | 32 | { |
34 | struct nouveau_sclass *sclass; | 33 | struct nvkm_sclass *sclass; |
35 | struct nouveau_engine *engine; | 34 | struct nvkm_engine *engine; |
36 | struct nouveau_oclass *oclass; | 35 | struct nvkm_oclass *oclass; |
37 | u64 mask; | 36 | u64 mask; |
38 | 37 | ||
39 | sclass = nv_parent(parent)->sclass; | 38 | sclass = nv_parent(parent)->sclass; |
@@ -54,7 +53,7 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, | |||
54 | if (nv_iclass(parent, NV_CLIENT_CLASS)) | 53 | if (nv_iclass(parent, NV_CLIENT_CLASS)) |
55 | engine = nv_engine(nv_client(parent)->device); | 54 | engine = nv_engine(nv_client(parent)->device); |
56 | else | 55 | else |
57 | engine = nouveau_engine(parent, i); | 56 | engine = nvkm_engine(parent, i); |
58 | 57 | ||
59 | if (engine) { | 58 | if (engine) { |
60 | oclass = engine->sclass; | 59 | oclass = engine->sclass; |
@@ -75,11 +74,11 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, | |||
75 | } | 74 | } |
76 | 75 | ||
77 | int | 76 | int |
78 | nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | 77 | nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size) |
79 | { | 78 | { |
80 | struct nouveau_sclass *sclass; | 79 | struct nvkm_sclass *sclass; |
81 | struct nouveau_engine *engine; | 80 | struct nvkm_engine *engine; |
82 | struct nouveau_oclass *oclass; | 81 | struct nvkm_oclass *oclass; |
83 | int nr = -1, i; | 82 | int nr = -1, i; |
84 | u64 mask; | 83 | u64 mask; |
85 | 84 | ||
@@ -92,7 +91,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | |||
92 | 91 | ||
93 | mask = nv_parent(parent)->engine; | 92 | mask = nv_parent(parent)->engine; |
94 | while (i = __ffs64(mask), mask) { | 93 | while (i = __ffs64(mask), mask) { |
95 | engine = nouveau_engine(parent, i); | 94 | engine = nvkm_engine(parent, i); |
96 | if (engine && (oclass = engine->sclass)) { | 95 | if (engine && (oclass = engine->sclass)) { |
97 | while (oclass->ofuncs) { | 96 | while (oclass->ofuncs) { |
98 | if (++nr < size) | 97 | if (++nr < size) |
@@ -108,18 +107,17 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | |||
108 | } | 107 | } |
109 | 108 | ||
110 | int | 109 | int |
111 | nouveau_parent_create_(struct nouveau_object *parent, | 110 | nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
112 | struct nouveau_object *engine, | 111 | struct nvkm_oclass *oclass, u32 pclass, |
113 | struct nouveau_oclass *oclass, u32 pclass, | 112 | struct nvkm_oclass *sclass, u64 engcls, |
114 | struct nouveau_oclass *sclass, u64 engcls, | 113 | int size, void **pobject) |
115 | int size, void **pobject) | ||
116 | { | 114 | { |
117 | struct nouveau_parent *object; | 115 | struct nvkm_parent *object; |
118 | struct nouveau_sclass *nclass; | 116 | struct nvkm_sclass *nclass; |
119 | int ret; | 117 | int ret; |
120 | 118 | ||
121 | ret = nouveau_object_create_(parent, engine, oclass, pclass | | 119 | ret = nvkm_object_create_(parent, engine, oclass, pclass | |
122 | NV_PARENT_CLASS, size, pobject); | 120 | NV_PARENT_CLASS, size, pobject); |
123 | object = *pobject; | 121 | object = *pobject; |
124 | if (ret) | 122 | if (ret) |
125 | return ret; | 123 | return ret; |
@@ -141,21 +139,21 @@ nouveau_parent_create_(struct nouveau_object *parent, | |||
141 | } | 139 | } |
142 | 140 | ||
143 | void | 141 | void |
144 | nouveau_parent_destroy(struct nouveau_parent *parent) | 142 | nvkm_parent_destroy(struct nvkm_parent *parent) |
145 | { | 143 | { |
146 | struct nouveau_sclass *sclass; | 144 | struct nvkm_sclass *sclass; |
147 | 145 | ||
148 | while ((sclass = parent->sclass)) { | 146 | while ((sclass = parent->sclass)) { |
149 | parent->sclass = sclass->sclass; | 147 | parent->sclass = sclass->sclass; |
150 | kfree(sclass); | 148 | kfree(sclass); |
151 | } | 149 | } |
152 | 150 | ||
153 | nouveau_object_destroy(&parent->object); | 151 | nvkm_object_destroy(&parent->object); |
154 | } | 152 | } |
155 | 153 | ||
156 | 154 | ||
157 | void | 155 | void |
158 | _nouveau_parent_dtor(struct nouveau_object *object) | 156 | _nvkm_parent_dtor(struct nvkm_object *object) |
159 | { | 157 | { |
160 | nouveau_parent_destroy(nv_parent(object)); | 158 | nvkm_parent_destroy(nv_parent(object)); |
161 | } | 159 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c index ed424f99f383..4a220eb91660 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/printk.c | |||
@@ -21,16 +21,14 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/client.h> | ||
27 | #include <core/subdev.h> | ||
28 | #include <core/printk.h> | 24 | #include <core/printk.h> |
25 | #include <core/client.h> | ||
26 | #include <core/device.h> | ||
29 | 27 | ||
30 | int nv_info_debug_level = NV_DBG_INFO_NORMAL; | 28 | int nv_info_debug_level = NV_DBG_INFO_NORMAL; |
31 | 29 | ||
32 | void | 30 | void |
33 | nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...) | 31 | nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...) |
34 | { | 32 | { |
35 | static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; | 33 | static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; |
36 | const char *pfx; | 34 | const char *pfx; |
@@ -60,8 +58,8 @@ nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...) | |||
60 | } | 58 | } |
61 | 59 | ||
62 | if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { | 60 | if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { |
63 | struct nouveau_object *device; | 61 | struct nvkm_object *device; |
64 | struct nouveau_object *subdev; | 62 | struct nvkm_object *subdev; |
65 | char obuf[64], *ofmt = ""; | 63 | char obuf[64], *ofmt = ""; |
66 | 64 | ||
67 | if (object->engine == NULL) { | 65 | if (object->engine == NULL) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c index 25cd6f29072b..0a382d0975b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c | |||
@@ -19,14 +19,12 @@ | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | |||
23 | #include <core/object.h> | ||
24 | #include <core/ramht.h> | 22 | #include <core/ramht.h> |
25 | 23 | ||
26 | #include <subdev/bar.h> | 24 | #include <subdev/bar.h> |
27 | 25 | ||
28 | static u32 | 26 | static u32 |
29 | nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle) | 27 | nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) |
30 | { | 28 | { |
31 | u32 hash = 0; | 29 | u32 hash = 0; |
32 | 30 | ||
@@ -41,13 +39,12 @@ nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle) | |||
41 | } | 39 | } |
42 | 40 | ||
43 | int | 41 | int |
44 | nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid, | 42 | nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context) |
45 | u32 handle, u32 context) | ||
46 | { | 43 | { |
47 | struct nouveau_bar *bar = nouveau_bar(ramht); | 44 | struct nvkm_bar *bar = nvkm_bar(ramht); |
48 | u32 co, ho; | 45 | u32 co, ho; |
49 | 46 | ||
50 | co = ho = nouveau_ramht_hash(ramht, chid, handle); | 47 | co = ho = nvkm_ramht_hash(ramht, chid, handle); |
51 | do { | 48 | do { |
52 | if (!nv_ro32(ramht, co + 4)) { | 49 | if (!nv_ro32(ramht, co + 4)) { |
53 | nv_wo32(ramht, co + 0, handle); | 50 | nv_wo32(ramht, co + 0, handle); |
@@ -66,39 +63,39 @@ nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid, | |||
66 | } | 63 | } |
67 | 64 | ||
68 | void | 65 | void |
69 | nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie) | 66 | nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie) |
70 | { | 67 | { |
71 | struct nouveau_bar *bar = nouveau_bar(ramht); | 68 | struct nvkm_bar *bar = nvkm_bar(ramht); |
72 | nv_wo32(ramht, cookie + 0, 0x00000000); | 69 | nv_wo32(ramht, cookie + 0, 0x00000000); |
73 | nv_wo32(ramht, cookie + 4, 0x00000000); | 70 | nv_wo32(ramht, cookie + 4, 0x00000000); |
74 | if (bar) | 71 | if (bar) |
75 | bar->flush(bar); | 72 | bar->flush(bar); |
76 | } | 73 | } |
77 | 74 | ||
78 | static struct nouveau_oclass | 75 | static struct nvkm_oclass |
79 | nouveau_ramht_oclass = { | 76 | nvkm_ramht_oclass = { |
80 | .handle = 0x0000abcd, | 77 | .handle = 0x0000abcd, |
81 | .ofuncs = &(struct nouveau_ofuncs) { | 78 | .ofuncs = &(struct nvkm_ofuncs) { |
82 | .ctor = NULL, | 79 | .ctor = NULL, |
83 | .dtor = _nouveau_gpuobj_dtor, | 80 | .dtor = _nvkm_gpuobj_dtor, |
84 | .init = _nouveau_gpuobj_init, | 81 | .init = _nvkm_gpuobj_init, |
85 | .fini = _nouveau_gpuobj_fini, | 82 | .fini = _nvkm_gpuobj_fini, |
86 | .rd32 = _nouveau_gpuobj_rd32, | 83 | .rd32 = _nvkm_gpuobj_rd32, |
87 | .wr32 = _nouveau_gpuobj_wr32, | 84 | .wr32 = _nvkm_gpuobj_wr32, |
88 | }, | 85 | }, |
89 | }; | 86 | }; |
90 | 87 | ||
91 | int | 88 | int |
92 | nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu, | 89 | nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu, |
93 | u32 size, u32 align, struct nouveau_ramht **pramht) | 90 | u32 size, u32 align, struct nvkm_ramht **pramht) |
94 | { | 91 | { |
95 | struct nouveau_ramht *ramht; | 92 | struct nvkm_ramht *ramht; |
96 | int ret; | 93 | int ret; |
97 | 94 | ||
98 | ret = nouveau_gpuobj_create(parent, parent->engine ? | 95 | ret = nvkm_gpuobj_create(parent, parent->engine ? |
99 | &parent->engine->subdev.object : parent, /* <nv50 ramht */ | 96 | &parent->engine->subdev.object : parent, /* <nv50 ramht */ |
100 | &nouveau_ramht_oclass, 0, pargpu, size, | 97 | &nvkm_ramht_oclass, 0, pargpu, size, |
101 | align, NVOBJ_FLAG_ZERO_ALLOC, &ramht); | 98 | align, NVOBJ_FLAG_ZERO_ALLOC, &ramht); |
102 | *pramht = ramht; | 99 | *pramht = ramht; |
103 | if (ret) | 100 | if (ret) |
104 | return ret; | 101 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index cb1c49c2e9d6..c5fb3a793174 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c | |||
@@ -21,16 +21,14 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/subdev.h> | 24 | #include <core/subdev.h> |
27 | #include <core/device.h> | 25 | #include <core/device.h> |
28 | #include <core/option.h> | 26 | #include <core/option.h> |
29 | 27 | ||
30 | struct nouveau_subdev * | 28 | struct nvkm_subdev * |
31 | nouveau_subdev(void *obj, int idx) | 29 | nvkm_subdev(void *obj, int idx) |
32 | { | 30 | { |
33 | struct nouveau_object *object = nv_object(obj); | 31 | struct nvkm_object *object = nv_object(obj); |
34 | while (object && !nv_iclass(object, NV_SUBDEV_CLASS)) | 32 | while (object && !nv_iclass(object, NV_SUBDEV_CLASS)) |
35 | object = object->parent; | 33 | object = object->parent; |
36 | if (object == NULL || nv_subidx(nv_subdev(object)) != idx) | 34 | if (object == NULL || nv_subidx(nv_subdev(object)) != idx) |
@@ -39,7 +37,7 @@ nouveau_subdev(void *obj, int idx) | |||
39 | } | 37 | } |
40 | 38 | ||
41 | void | 39 | void |
42 | nouveau_subdev_reset(struct nouveau_object *subdev) | 40 | nvkm_subdev_reset(struct nvkm_object *subdev) |
43 | { | 41 | { |
44 | nv_trace(subdev, "resetting...\n"); | 42 | nv_trace(subdev, "resetting...\n"); |
45 | nv_ofuncs(subdev)->fini(subdev, false); | 43 | nv_ofuncs(subdev)->fini(subdev, false); |
@@ -47,65 +45,64 @@ nouveau_subdev_reset(struct nouveau_object *subdev) | |||
47 | } | 45 | } |
48 | 46 | ||
49 | int | 47 | int |
50 | nouveau_subdev_init(struct nouveau_subdev *subdev) | 48 | nvkm_subdev_init(struct nvkm_subdev *subdev) |
51 | { | 49 | { |
52 | int ret = nouveau_object_init(&subdev->object); | 50 | int ret = nvkm_object_init(&subdev->object); |
53 | if (ret) | 51 | if (ret) |
54 | return ret; | 52 | return ret; |
55 | 53 | ||
56 | nouveau_subdev_reset(&subdev->object); | 54 | nvkm_subdev_reset(&subdev->object); |
57 | return 0; | 55 | return 0; |
58 | } | 56 | } |
59 | 57 | ||
60 | int | 58 | int |
61 | _nouveau_subdev_init(struct nouveau_object *object) | 59 | _nvkm_subdev_init(struct nvkm_object *object) |
62 | { | 60 | { |
63 | return nouveau_subdev_init(nv_subdev(object)); | 61 | return nvkm_subdev_init(nv_subdev(object)); |
64 | } | 62 | } |
65 | 63 | ||
66 | int | 64 | int |
67 | nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend) | 65 | nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) |
68 | { | 66 | { |
69 | if (subdev->unit) { | 67 | if (subdev->unit) { |
70 | nv_mask(subdev, 0x000200, subdev->unit, 0x00000000); | 68 | nv_mask(subdev, 0x000200, subdev->unit, 0x00000000); |
71 | nv_mask(subdev, 0x000200, subdev->unit, subdev->unit); | 69 | nv_mask(subdev, 0x000200, subdev->unit, subdev->unit); |
72 | } | 70 | } |
73 | 71 | ||
74 | return nouveau_object_fini(&subdev->object, suspend); | 72 | return nvkm_object_fini(&subdev->object, suspend); |
75 | } | 73 | } |
76 | 74 | ||
77 | int | 75 | int |
78 | _nouveau_subdev_fini(struct nouveau_object *object, bool suspend) | 76 | _nvkm_subdev_fini(struct nvkm_object *object, bool suspend) |
79 | { | 77 | { |
80 | return nouveau_subdev_fini(nv_subdev(object), suspend); | 78 | return nvkm_subdev_fini(nv_subdev(object), suspend); |
81 | } | 79 | } |
82 | 80 | ||
83 | void | 81 | void |
84 | nouveau_subdev_destroy(struct nouveau_subdev *subdev) | 82 | nvkm_subdev_destroy(struct nvkm_subdev *subdev) |
85 | { | 83 | { |
86 | int subidx = nv_hclass(subdev) & 0xff; | 84 | int subidx = nv_hclass(subdev) & 0xff; |
87 | nv_device(subdev)->subdev[subidx] = NULL; | 85 | nv_device(subdev)->subdev[subidx] = NULL; |
88 | nouveau_object_destroy(&subdev->object); | 86 | nvkm_object_destroy(&subdev->object); |
89 | } | 87 | } |
90 | 88 | ||
91 | void | 89 | void |
92 | _nouveau_subdev_dtor(struct nouveau_object *object) | 90 | _nvkm_subdev_dtor(struct nvkm_object *object) |
93 | { | 91 | { |
94 | nouveau_subdev_destroy(nv_subdev(object)); | 92 | nvkm_subdev_destroy(nv_subdev(object)); |
95 | } | 93 | } |
96 | 94 | ||
97 | int | 95 | int |
98 | nouveau_subdev_create_(struct nouveau_object *parent, | 96 | nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
99 | struct nouveau_object *engine, | 97 | struct nvkm_oclass *oclass, u32 pclass, |
100 | struct nouveau_oclass *oclass, u32 pclass, | 98 | const char *subname, const char *sysname, |
101 | const char *subname, const char *sysname, | 99 | int size, void **pobject) |
102 | int size, void **pobject) | ||
103 | { | 100 | { |
104 | struct nouveau_subdev *subdev; | 101 | struct nvkm_subdev *subdev; |
105 | int ret; | 102 | int ret; |
106 | 103 | ||
107 | ret = nouveau_object_create_(parent, engine, oclass, pclass | | 104 | ret = nvkm_object_create_(parent, engine, oclass, pclass | |
108 | NV_SUBDEV_CLASS, size, pobject); | 105 | NV_SUBDEV_CLASS, size, pobject); |
109 | subdev = *pobject; | 106 | subdev = *pobject; |
110 | if (ret) | 107 | if (ret) |
111 | return ret; | 108 | return ret; |
@@ -114,8 +111,8 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
114 | subdev->name = subname; | 111 | subdev->name = subname; |
115 | 112 | ||
116 | if (parent) { | 113 | if (parent) { |
117 | struct nouveau_device *device = nv_device(parent); | 114 | struct nvkm_device *device = nv_device(parent); |
118 | subdev->debug = nouveau_dbgopt(device->dbgopt, subname); | 115 | subdev->debug = nvkm_dbgopt(device->dbgopt, subname); |
119 | subdev->mmio = nv_subdev(device)->mmio; | 116 | subdev->mmio = nv_subdev(device)->mmio; |
120 | } | 117 | } |
121 | 118 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/nv84.c index 1e8e75c0684a..649922d983ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/nv84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/nv84.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <engine/xtensa.h> | 25 | #include <engine/xtensa.h> |
26 | #include <engine/bsp.h> | 26 | #include <engine/bsp.h> |
27 | 27 | ||
28 | #include <core/engctx.h> | ||
29 | |||
28 | /******************************************************************************* | 30 | /******************************************************************************* |
29 | * BSP object classes | 31 | * BSP object classes |
30 | ******************************************************************************/ | 32 | ******************************************************************************/ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 60f538976f3b..ae2ad8b4e5df 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/notify.h> | ||
25 | #include <core/object.h> | 26 | #include <core/object.h> |
26 | #include <core/device.h> | 27 | #include <core/device.h> |
27 | #include <core/client.h> | 28 | #include <core/client.h> |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h index a1bf359e53bd..4a92d9fa1d78 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h | |||
@@ -1,8 +1,9 @@ | |||
1 | #ifndef __NVKM_DISP_CONN_H__ | 1 | #ifndef __NVKM_DISP_CONN_H__ |
2 | #define __NVKM_DISP_CONN_H__ | 2 | #define __NVKM_DISP_CONN_H__ |
3 | |||
4 | #include "priv.h" | 3 | #include "priv.h" |
5 | 4 | ||
5 | #include <core/notify.h> | ||
6 | |||
6 | struct nvkm_connector { | 7 | struct nvkm_connector { |
7 | struct nouveau_object base; | 8 | struct nouveau_object base; |
8 | struct list_head head; | 9 | struct list_head head; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index 1fac367cc867..74ef058ff051 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __NVKM_DISP_OUTP_DP_H__ | 1 | #ifndef __NVKM_DISP_OUTP_DP_H__ |
2 | #define __NVKM_DISP_OUTP_DP_H__ | 2 | #define __NVKM_DISP_OUTP_DP_H__ |
3 | 3 | ||
4 | #include <core/notify.h> | ||
4 | #include <subdev/bios.h> | 5 | #include <subdev/bios.h> |
5 | #include <subdev/bios/dp.h> | 6 | #include <subdev/bios/dp.h> |
6 | 7 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index 2914646c8709..f6e9ae95c822 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c | |||
@@ -19,14 +19,14 @@ | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | |||
23 | #include <engine/falcon.h> | 22 | #include <engine/falcon.h> |
23 | |||
24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
25 | 25 | ||
26 | void | 26 | void |
27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | 27 | nvkm_falcon_intr(struct nvkm_subdev *subdev) |
28 | { | 28 | { |
29 | struct nouveau_falcon *falcon = (void *)subdev; | 29 | struct nvkm_falcon *falcon = (void *)subdev; |
30 | u32 dispatch = nv_ro32(falcon, 0x01c); | 30 | u32 dispatch = nv_ro32(falcon, 0x01c); |
31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | 31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); |
32 | 32 | ||
@@ -43,16 +43,16 @@ nouveau_falcon_intr(struct nouveau_subdev *subdev) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | u32 | 45 | u32 |
46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nvkm_falcon_rd32(struct nvkm_object *object, u64 addr) |
47 | { | 47 | { |
48 | struct nouveau_falcon *falcon = (void *)object; | 48 | struct nvkm_falcon *falcon = (void *)object; |
49 | return nv_rd32(falcon, falcon->addr + addr); | 49 | return nv_rd32(falcon, falcon->addr + addr); |
50 | } | 50 | } |
51 | 51 | ||
52 | void | 52 | void |
53 | _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data) | 53 | _nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data) |
54 | { | 54 | { |
55 | struct nouveau_falcon *falcon = (void *)object; | 55 | struct nvkm_falcon *falcon = (void *)object; |
56 | nv_wr32(falcon, falcon->addr + addr, data); | 56 | nv_wr32(falcon, falcon->addr + addr, data); |
57 | } | 57 | } |
58 | 58 | ||
@@ -67,17 +67,17 @@ vmemdup(const void *src, size_t len) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | int | 69 | int |
70 | _nouveau_falcon_init(struct nouveau_object *object) | 70 | _nvkm_falcon_init(struct nvkm_object *object) |
71 | { | 71 | { |
72 | struct nouveau_device *device = nv_device(object); | 72 | struct nvkm_device *device = nv_device(object); |
73 | struct nouveau_falcon *falcon = (void *)object; | 73 | struct nvkm_falcon *falcon = (void *)object; |
74 | const struct firmware *fw; | 74 | const struct firmware *fw; |
75 | char name[32] = "internal"; | 75 | char name[32] = "internal"; |
76 | int ret, i; | 76 | int ret, i; |
77 | u32 caps; | 77 | u32 caps; |
78 | 78 | ||
79 | /* enable engine, and determine its capabilities */ | 79 | /* enable engine, and determine its capabilities */ |
80 | ret = nouveau_engine_init(&falcon->base); | 80 | ret = nvkm_engine_init(&falcon->base); |
81 | if (ret) | 81 | if (ret) |
82 | return ret; | 82 | return ret; |
83 | 83 | ||
@@ -171,9 +171,8 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
171 | 171 | ||
172 | /* ensure any "self-bootstrapping" firmware image is in vram */ | 172 | /* ensure any "self-bootstrapping" firmware image is in vram */ |
173 | if (!falcon->data.data && !falcon->core) { | 173 | if (!falcon->data.data && !falcon->core) { |
174 | ret = nouveau_gpuobj_new(object->parent, NULL, | 174 | ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size, |
175 | falcon->code.size, 256, 0, | 175 | 256, 0, &falcon->core); |
176 | &falcon->core); | ||
177 | if (ret) { | 176 | if (ret) { |
178 | nv_error(falcon, "core allocation failed, %d\n", ret); | 177 | nv_error(falcon, "core allocation failed, %d\n", ret); |
179 | return ret; | 178 | return ret; |
@@ -238,12 +237,12 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
238 | } | 237 | } |
239 | 238 | ||
240 | int | 239 | int |
241 | _nouveau_falcon_fini(struct nouveau_object *object, bool suspend) | 240 | _nvkm_falcon_fini(struct nvkm_object *object, bool suspend) |
242 | { | 241 | { |
243 | struct nouveau_falcon *falcon = (void *)object; | 242 | struct nvkm_falcon *falcon = (void *)object; |
244 | 243 | ||
245 | if (!suspend) { | 244 | if (!suspend) { |
246 | nouveau_gpuobj_ref(NULL, &falcon->core); | 245 | nvkm_gpuobj_ref(NULL, &falcon->core); |
247 | if (falcon->external) { | 246 | if (falcon->external) { |
248 | vfree(falcon->data.data); | 247 | vfree(falcon->data.data); |
249 | vfree(falcon->code.data); | 248 | vfree(falcon->code.data); |
@@ -254,21 +253,20 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend) | |||
254 | nv_mo32(falcon, 0x048, 0x00000003, 0x00000000); | 253 | nv_mo32(falcon, 0x048, 0x00000003, 0x00000000); |
255 | nv_wo32(falcon, 0x014, 0xffffffff); | 254 | nv_wo32(falcon, 0x014, 0xffffffff); |
256 | 255 | ||
257 | return nouveau_engine_fini(&falcon->base, suspend); | 256 | return nvkm_engine_fini(&falcon->base, suspend); |
258 | } | 257 | } |
259 | 258 | ||
260 | int | 259 | int |
261 | nouveau_falcon_create_(struct nouveau_object *parent, | 260 | nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
262 | struct nouveau_object *engine, | 261 | struct nvkm_oclass *oclass, u32 addr, bool enable, |
263 | struct nouveau_oclass *oclass, u32 addr, bool enable, | 262 | const char *iname, const char *fname, |
264 | const char *iname, const char *fname, | 263 | int length, void **pobject) |
265 | int length, void **pobject) | ||
266 | { | 264 | { |
267 | struct nouveau_falcon *falcon; | 265 | struct nvkm_falcon *falcon; |
268 | int ret; | 266 | int ret; |
269 | 267 | ||
270 | ret = nouveau_engine_create_(parent, engine, oclass, enable, iname, | 268 | ret = nvkm_engine_create_(parent, engine, oclass, enable, iname, |
271 | fname, length, pobject); | 269 | fname, length, pobject); |
272 | falcon = *pobject; | 270 | falcon = *pobject; |
273 | if (ret) | 271 | if (ret) |
274 | return ret; | 272 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 836c6d32e82e..7e54a521bc05 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | |||
@@ -23,6 +23,8 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/device.h> | ||
27 | #include <core/notify.h> | ||
26 | #include <core/object.h> | 28 | #include <core/object.h> |
27 | #include <core/handle.h> | 29 | #include <core/handle.h> |
28 | #include <core/event.h> | 30 | #include <core/event.h> |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h index eb39af055bbb..a5cdb43524cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __NV50_GR_H__ | 1 | #ifndef __NV50_GR_H__ |
2 | #define __NV50_GR_H__ | 2 | #define __NV50_GR_H__ |
3 | struct nvkm_device; | ||
3 | 4 | ||
4 | int nv50_grctx_init(struct nouveau_device *, u32 *size); | 5 | int nv50_grctx_init(struct nouveau_device *, u32 *size); |
5 | void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); | 6 | void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); |
6 | |||
7 | #endif | 7 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h index 618e41fa36d0..fa0a236004d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __NVKM_SW_NV50_H__ | 1 | #ifndef __NVKM_SW_NV50_H__ |
2 | #define __NVKM_SW_NV50_H__ | 2 | #define __NVKM_SW_NV50_H__ |
3 | |||
4 | #include <engine/sw.h> | 3 | #include <engine/sw.h> |
4 | #include <core/notify.h> | ||
5 | 5 | ||
6 | struct nv50_sw_oclass { | 6 | struct nv50_sw_oclass { |
7 | struct nouveau_oclass base; | 7 | struct nouveau_oclass base; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/nv84.c index fd6272b8cdb2..9caa037b7a6b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/nv84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/nv84.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <engine/xtensa.h> | 25 | #include <engine/xtensa.h> |
26 | #include <engine/vp.h> | 26 | #include <engine/vp.h> |
27 | 27 | ||
28 | #include <core/engctx.h> | ||
29 | |||
28 | /******************************************************************************* | 30 | /******************************************************************************* |
29 | * VP object classes | 31 | * VP object classes |
30 | ******************************************************************************/ | 32 | ******************************************************************************/ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c index 92384759d2f5..3995d2cf0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c | |||
@@ -19,43 +19,42 @@ | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | |||
23 | #include <engine/xtensa.h> | 22 | #include <engine/xtensa.h> |
24 | 23 | ||
24 | #include <core/engctx.h> | ||
25 | |||
25 | u32 | 26 | u32 |
26 | _nouveau_xtensa_rd32(struct nouveau_object *object, u64 addr) | 27 | _nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr) |
27 | { | 28 | { |
28 | struct nouveau_xtensa *xtensa = (void *)object; | 29 | struct nvkm_xtensa *xtensa = (void *)object; |
29 | return nv_rd32(xtensa, xtensa->addr + addr); | 30 | return nv_rd32(xtensa, xtensa->addr + addr); |
30 | } | 31 | } |
31 | 32 | ||
32 | void | 33 | void |
33 | _nouveau_xtensa_wr32(struct nouveau_object *object, u64 addr, u32 data) | 34 | _nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data) |
34 | { | 35 | { |
35 | struct nouveau_xtensa *xtensa = (void *)object; | 36 | struct nvkm_xtensa *xtensa = (void *)object; |
36 | nv_wr32(xtensa, xtensa->addr + addr, data); | 37 | nv_wr32(xtensa, xtensa->addr + addr, data); |
37 | } | 38 | } |
38 | 39 | ||
39 | int | 40 | int |
40 | _nouveau_xtensa_engctx_ctor(struct nouveau_object *parent, | 41 | _nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
41 | struct nouveau_object *engine, | 42 | struct nvkm_oclass *oclass, void *data, u32 size, |
42 | struct nouveau_oclass *oclass, void *data, u32 size, | 43 | struct nvkm_object **pobject) |
43 | struct nouveau_object **pobject) | ||
44 | { | 44 | { |
45 | struct nouveau_engctx *engctx; | 45 | struct nvkm_engctx *engctx; |
46 | int ret; | 46 | int ret; |
47 | 47 | ||
48 | ret = nouveau_engctx_create(parent, engine, oclass, NULL, | 48 | ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000, |
49 | 0x10000, 0x1000, | 49 | NVOBJ_FLAG_ZERO_ALLOC, &engctx); |
50 | NVOBJ_FLAG_ZERO_ALLOC, &engctx); | ||
51 | *pobject = nv_object(engctx); | 50 | *pobject = nv_object(engctx); |
52 | return ret; | 51 | return ret; |
53 | } | 52 | } |
54 | 53 | ||
55 | void | 54 | void |
56 | _nouveau_xtensa_intr(struct nouveau_subdev *subdev) | 55 | _nvkm_xtensa_intr(struct nvkm_subdev *subdev) |
57 | { | 56 | { |
58 | struct nouveau_xtensa *xtensa = (void *)subdev; | 57 | struct nvkm_xtensa *xtensa = (void *)subdev; |
59 | u32 unk104 = nv_ro32(xtensa, 0xd04); | 58 | u32 unk104 = nv_ro32(xtensa, 0xd04); |
60 | u32 intr = nv_ro32(xtensa, 0xc20); | 59 | u32 intr = nv_ro32(xtensa, 0xc20); |
61 | u32 chan = nv_ro32(xtensa, 0xc28); | 60 | u32 chan = nv_ro32(xtensa, 0xc28); |
@@ -72,39 +71,36 @@ _nouveau_xtensa_intr(struct nouveau_subdev *subdev) | |||
72 | } | 71 | } |
73 | 72 | ||
74 | int | 73 | int |
75 | nouveau_xtensa_create_(struct nouveau_object *parent, | 74 | nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
76 | struct nouveau_object *engine, | 75 | struct nvkm_oclass *oclass, u32 addr, bool enable, |
77 | struct nouveau_oclass *oclass, u32 addr, bool enable, | 76 | const char *iname, const char *fname, |
78 | const char *iname, const char *fname, | 77 | int length, void **pobject) |
79 | int length, void **pobject) | ||
80 | { | 78 | { |
81 | struct nouveau_xtensa *xtensa; | 79 | struct nvkm_xtensa *xtensa; |
82 | int ret; | 80 | int ret; |
83 | 81 | ||
84 | ret = nouveau_engine_create_(parent, engine, oclass, enable, iname, | 82 | ret = nvkm_engine_create_(parent, engine, oclass, enable, iname, |
85 | fname, length, pobject); | 83 | fname, length, pobject); |
86 | xtensa = *pobject; | 84 | xtensa = *pobject; |
87 | if (ret) | 85 | if (ret) |
88 | return ret; | 86 | return ret; |
89 | 87 | ||
90 | nv_subdev(xtensa)->intr = _nouveau_xtensa_intr; | 88 | nv_subdev(xtensa)->intr = _nvkm_xtensa_intr; |
91 | |||
92 | xtensa->addr = addr; | 89 | xtensa->addr = addr; |
93 | |||
94 | return 0; | 90 | return 0; |
95 | } | 91 | } |
96 | 92 | ||
97 | int | 93 | int |
98 | _nouveau_xtensa_init(struct nouveau_object *object) | 94 | _nvkm_xtensa_init(struct nvkm_object *object) |
99 | { | 95 | { |
100 | struct nouveau_device *device = nv_device(object); | 96 | struct nvkm_device *device = nv_device(object); |
101 | struct nouveau_xtensa *xtensa = (void *)object; | 97 | struct nvkm_xtensa *xtensa = (void *)object; |
102 | const struct firmware *fw; | 98 | const struct firmware *fw; |
103 | char name[32]; | 99 | char name[32]; |
104 | int i, ret; | 100 | int i, ret; |
105 | u32 tmp; | 101 | u32 tmp; |
106 | 102 | ||
107 | ret = nouveau_engine_init(&xtensa->base); | 103 | ret = nvkm_engine_init(&xtensa->base); |
108 | if (ret) | 104 | if (ret) |
109 | return ret; | 105 | return ret; |
110 | 106 | ||
@@ -124,8 +120,8 @@ _nouveau_xtensa_init(struct nouveau_object *object) | |||
124 | return -EINVAL; | 120 | return -EINVAL; |
125 | } | 121 | } |
126 | 122 | ||
127 | ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, | 123 | ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, |
128 | &xtensa->gpu_fw); | 124 | &xtensa->gpu_fw); |
129 | if (ret) { | 125 | if (ret) { |
130 | release_firmware(fw); | 126 | release_firmware(fw); |
131 | return ret; | 127 | return ret; |
@@ -157,20 +153,19 @@ _nouveau_xtensa_init(struct nouveau_object *object) | |||
157 | 153 | ||
158 | nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */ | 154 | nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */ |
159 | nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */ | 155 | nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */ |
160 | |||
161 | return 0; | 156 | return 0; |
162 | } | 157 | } |
163 | 158 | ||
164 | int | 159 | int |
165 | _nouveau_xtensa_fini(struct nouveau_object *object, bool suspend) | 160 | _nvkm_xtensa_fini(struct nvkm_object *object, bool suspend) |
166 | { | 161 | { |
167 | struct nouveau_xtensa *xtensa = (void *)object; | 162 | struct nvkm_xtensa *xtensa = (void *)object; |
168 | 163 | ||
169 | nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */ | 164 | nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */ |
170 | nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */ | 165 | nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */ |
171 | 166 | ||
172 | if (!suspend) | 167 | if (!suspend) |
173 | nouveau_gpuobj_ref(NULL, &xtensa->gpu_fw); | 168 | nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw); |
174 | 169 | ||
175 | return nouveau_engine_fini(&xtensa->base, suspend); | 170 | return nvkm_engine_fini(&xtensa->base, suspend); |
176 | } | 171 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 7ad99b763f4c..98d79298cb07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/notify.h> | ||
25 | #include <subdev/bios.h> | 26 | #include <subdev/bios.h> |
26 | #include <subdev/bios/gpio.h> | 27 | #include <subdev/bios/gpio.h> |
27 | 28 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index d1f06e9aaca2..bd477cd8a919 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/notify.h> | ||
25 | #include <core/option.h> | 26 | #include <core/option.h> |
26 | #include <core/object.h> | 27 | #include <core/object.h> |
27 | #include <core/event.h> | 28 | #include <core/event.h> |