1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
|
/*
* GK20A Address Spaces
*
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <trace/events/gk20a.h>
#include <nvgpu/kmem.h>
#include <nvgpu/vm.h>
#include "gk20a/gk20a.h"
#include "gk20a/platform_gk20a.h"
/* dumb allocator... */
static int generate_as_share_id(struct gk20a_as *as)
{
gk20a_dbg_fn("");
return ++as->last_share_id;
}
/* still dumb */
static void release_as_share_id(struct gk20a_as *as, int id)
{
gk20a_dbg_fn("");
return;
}
/* address space interfaces for the gk20a module */
static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
u32 big_page_size, u32 flags)
{
struct gk20a_as *as = as_share->as;
struct gk20a *g = gk20a_from_as(as);
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm;
char name[32];
const bool userspace_managed =
(flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
gk20a_dbg_fn("");
if (big_page_size == 0) {
big_page_size = g->ops.mm.get_default_big_page_size();
} else {
if (!is_power_of_2(big_page_size))
return -EINVAL;
if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes))
return -EINVAL;
}
snprintf(name, sizeof(name), "as_%d", as_share->id);
vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10,
mm->channel.kernel_size,
mm->channel.user_size + mm->channel.kernel_size,
!mm->disable_bigpage, userspace_managed, name);
if (!vm)
return -ENOMEM;
as_share->vm = vm;
vm->as_share = as_share;
vm->enable_ctag = true;
return 0;
}
int gk20a_as_alloc_share(struct gk20a *g,
u32 big_page_size, u32 flags,
struct gk20a_as_share **out)
{
struct gk20a_as_share *as_share;
int err = 0;
gk20a_dbg_fn("");
g = gk20a_get(g);
if (!g)
return -ENODEV;
*out = NULL;
as_share = nvgpu_kzalloc(g, sizeof(*as_share));
if (!as_share)
return -ENOMEM;
as_share->as = &g->as;
as_share->id = generate_as_share_id(as_share->as);
/* this will set as_share->vm. */
err = gk20a_busy(g);
if (err)
goto failed;
err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
gk20a_idle(g);
if (err)
goto failed;
*out = as_share;
return 0;
failed:
nvgpu_kfree(g, as_share);
return err;
}
int gk20a_vm_release_share(struct gk20a_as_share *as_share)
{
struct vm_gk20a *vm = as_share->vm;
gk20a_dbg_fn("");
vm->as_share = NULL;
as_share->vm = NULL;
nvgpu_vm_put(vm);
return 0;
}
/*
* channels and the device nodes call this to release.
* once the ref_cnt hits zero the share is deleted.
*/
int gk20a_as_release_share(struct gk20a_as_share *as_share)
{
struct gk20a *g = as_share->vm->mm->g;
int err;
gk20a_dbg_fn("");
err = gk20a_busy(g);
if (err)
goto release_fail;
err = gk20a_vm_release_share(as_share);
gk20a_idle(g);
release_fail:
release_as_share_id(as_share->as, as_share->id);
gk20a_put(g);
nvgpu_kfree(g, as_share);
return err;
}
|