aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts/python/call-graph-from-sql.py
blob: b494a67a1c679ec2e6cd89105ce09702ee76146f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
#!/usr/bin/python2
# call-graph-from-sql.py: create call-graph from sql database
# Copyright (c) 2014-2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
# more details.

# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script.  Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
#	python tools/perf/scripts/python/call-graph-from-sql.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
#	python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph.  Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
#                                         Call Graph: pt_example
# Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
# v- ls
#     v- 2638:2638
#         v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
#           |- unknown               unknown       1        13198     0.1              1              0.0
#           >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
#           >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
#           v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
#              >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
#              >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
#              >- __libc_csu_init    ls            1        10354     0.1             10              0.0
#              |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
#              v- main               ls            1      8182043    99.6         180254             99.9
#
# Points to note:
#	The top level is a command name (comm)
#	The next level is a thread (pid:tid)
#	Subsequent levels are functions
#	'Count' is the number of calls
#	'Time' is the elapsed time until the function returns
#	Percentages are relative to the level above
#	'Branch Count' is the total number of branches for that function and all
#       functions that it calls

import sys
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
from decimal import *

class TreeItem():

	def __init__(self, db, row, parent_item):
		self.db = db
		self.row = row
		self.parent_item = parent_item
		self.query_done = False;
		self.child_count = 0
		self.child_items = []
		self.data = ["", "", "", "", "", "", ""]
		self.comm_id = 0
		self.thread_id = 0
		self.call_path_id = 1
		self.branch_count = 0
		self.time = 0
		if not parent_item:
			self.setUpRoot()

	def setUpRoot(self):
		self.query_done = True
		query = QSqlQuery(self.db)
		ret = query.exec_('SELECT id, comm FROM comms')
		if not ret:
			raise Exception("Query failed: " + query.lastError().text())
		while query.next():
			if not query.value(0):
				continue
			child_item = TreeItem(self.db, self.child_count, self)
			self.child_items.append(child_item)
			self.child_count += 1
			child_item.setUpLevel1(query.value(0), query.value(1))

	def setUpLevel1(self, comm_id, comm):
		self.query_done = True;
		self.comm_id = comm_id
		self.data[0] = comm
		self.child_items = []
		self.child_count = 0
		query = QSqlQuery(self.db)
		ret = query.exec_('SELECT thread_id, ( SELECT pid FROM threads WHERE id = thread_id ), ( SELECT tid FROM threads WHERE id = thread_id ) FROM comm_threads WHERE comm_id = ' + str(comm_id))
		if not ret:
			raise Exception("Query failed: " + query.lastError().text())
		while query.next():
			child_item = TreeItem(self.db, self.child_count, self)
			self.child_items.append(child_item)
			self.child_count += 1
			child_item.setUpLevel2(comm_id, query.value(0), query.value(1), query.value(2))

	def setUpLevel2(self, comm_id, thread_id, pid, tid):
		self.comm_id = comm_id
		self.thread_id = thread_id
		self.data[0] = str(pid) + ":" + str(tid)

	def getChildItem(self, row):
		return self.child_items[row]

	def getParentItem(self):
		return self.parent_item

	def getRow(self):
		return self.row

	def timePercent(self, b):
		if not self.time:
			return "0.0"
		x = (b * Decimal(100)) / self.time
		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))

	def branchPercent(self, b):
		if not self.branch_count:
			return "0.0"
		x = (b * Decimal(100)) / self.branch_count
		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))

	def addChild(self, call_path_id, name, dso, count, time, branch_count):
		child_item = TreeItem(self.db, self.child_count, self)
		child_item.comm_id = self.comm_id
		child_item.thread_id = self.thread_id
		child_item.call_path_id = call_path_id
		child_item.branch_count = branch_count
		child_item.time = time
		child_item.data[0] = name
		if dso == "[kernel.kallsyms]":
			dso = "[kernel]"
		child_item.data[1] = dso
		child_item.data[2] = str(count)
		child_item.data[3] = str(time)
		child_item.data[4] = self.timePercent(time)
		child_item.data[5] = str(branch_count)
		child_item.data[6] = self.branchPercent(branch_count)
		self.child_items.append(child_item)
		self.child_count += 1

	def selectCalls(self):
		self.query_done = True;
		query = QSqlQuery(self.db)
		ret = query.exec_('SELECT id, call_path_id, branch_count, call_time, return_time, '
				  '( SELECT name FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ), '
				  '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
				  '( SELECT ip FROM call_paths where id = call_path_id ) '
				  'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
				  ' ORDER BY call_path_id')
		if not ret:
			raise Exception("Query failed: " + query.lastError().text())
		last_call_path_id = 0
		name = ""
		dso = ""
		count = 0
		branch_count = 0
		total_branch_count = 0
		time = 0
		total_time = 0
		while query.next():
			if query.value(1) == last_call_path_id:
				count += 1
				branch_count += query.value(2)
				time += query.value(4) - query.value(3)
			else:
				if count:
					self.addChild(last_call_path_id, name, dso, count, time, branch_count)
				last_call_path_id = query.value(1)
				name = query.value(5)
				dso = query.value(6)
				count = 1
				total_branch_count += branch_count
				total_time += time
				branch_count = query.value(2)
				time = query.value(4) - query.value(3)
		if count:
			self.addChild(last_call_path_id, name, dso, count, time, branch_count)
		total_branch_count += branch_count
		total_time += time
		# Top level does not have time or branch count, so fix that here
		if total_branch_count > self.branch_count:
			self.branch_count = total_branch_count
			if self.branch_count:
				for child_item in self.child_items:
					child_item.data[6] = self.branchPercent(child_item.branch_count)
		if total_time > self.time:
			self.time = total_time
			if self.time:
				for child_item in self.child_items:
					child_item.data[4] = self.timePercent(child_item.time)

	def childCount(self):
		if not self.query_done:
			self.selectCalls()
		return self.child_count

	def columnCount(self):
		return 7

	def columnHeader(self, column):
		headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
		return headers[column]

	def getData(self, column):
		return self.data[column]

class TreeModel(QAbstractItemModel):

	def __init__(self, db, parent=None):
		super(TreeModel, self).__init__(parent)
		self.db = db
		self.root = TreeItem(db, 0, None)

	def columnCount(self, parent):
		return self.root.columnCount()

	def rowCount(self, parent):
		if parent.isValid():
			parent_item = parent.internalPointer()
		else:
			parent_item = self.root
		return parent_item.childCount()

	def headerData(self, section, orientation, role):
		if role == Qt.TextAlignmentRole:
			if section > 1:
				return Qt.AlignRight
		if role != Qt.DisplayRole:
			return None
		if orientation != Qt.Horizontal:
			return None
		return self.root.columnHeader(section)

	def parent(self, child):
		child_item = child.internalPointer()
		if child_item is self.root:
			return QModelIndex()
		parent_item = child_item.getParentItem()
		return self.createIndex(parent_item.getRow(), 0, parent_item)

	def index(self, row, column, parent):
		if parent.isValid():
			parent_item = parent.internalPointer()
		else:
			parent_item = self.root
		child_item = parent_item.getChildItem(row)
		return self.createIndex(row, column, child_item)

	def data(self, index, role):
		if role == Qt.TextAlignmentRole:
			if index.column() > 1:
				return Qt.AlignRight
		if role != Qt.DisplayRole:
			return None
		index_item = index.internalPointer()
		return index_item.getData(index.column())

class MainWindow(QMainWindow):

	def __init__(self, db, dbname, parent=None):
		super(MainWindow, self).__init__(parent)

		self.setObjectName("MainWindow")
		self.setWindowTitle("Call Graph: " + dbname)
		self.move(100, 100)
		self.resize(800, 600)
		style = self.style()
		icon = style.standardIcon(QStyle.SP_MessageBoxInformation)
		self.setWindowIcon(icon);

		self.model = TreeModel(db)

		self.view = QTreeView()
		self.view.setModel(self.model)

		self.setCentralWidget(self.view)

if __name__ == '__main__':
	if (len(sys.argv) < 2):
		print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>"
		raise Exception("Too few arguments")

	dbname = sys.argv[1]

	is_sqlite3 = False
	try:
		f = open(dbname)
		if f.read(15) == "SQLite format 3":
			is_sqlite3 = True
		f.close()
	except:
		pass

	if is_sqlite3:
		db = QSqlDatabase.addDatabase('QSQLITE')
	else:
		db = QSqlDatabase.addDatabase('QPSQL')
		opts = dbname.split()
		for opt in opts:
			if '=' in opt:
				opt = opt.split('=')
				if opt[0] == 'hostname':
					db.setHostName(opt[1])
				elif opt[0] == 'port':
					db.setPort(int(opt[1]))
				elif opt[0] == 'username':
					db.setUserName(opt[1])
				elif opt[0] == 'password':
					db.setPassword(opt[1])
				elif opt[0] == 'dbname':
					dbname = opt[1]
			else:
				dbname = opt

	db.setDatabaseName(dbname)
	if not db.open():
		raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())

	app = QApplication(sys.argv)
	window = MainWindow(db, dbname)
	window.show()
	err = app.exec_()
	db.close()
	sys.exit(err)
return 0; } return addr; } int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) { struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; nvgpu_free(vma, addr); return 0; } void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) { memset(mapping_batch, 0, sizeof(*mapping_batch)); mapping_batch->gpu_l2_flushed = false; mapping_batch->need_tlb_invalidate = false; } void nvgpu_vm_mapping_batch_finish_locked( struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch) { /* hanging kref_put batch pointer? */ WARN_ON(vm->kref_put_batch == mapping_batch); if (mapping_batch->need_tlb_invalidate) { struct gk20a *g = gk20a_from_vm(vm); g->ops.fb.tlb_invalidate(g, vm->pdb.mem); } } void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch) { nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_vm_mapping_batch_finish_locked(vm, mapping_batch); nvgpu_mutex_release(&vm->update_gmmu_lock); } /* * Determine if the passed address space can support big pages or not. */ int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) { u64 mask = ((u64)vm->big_page_size << 10) - 1U; if (base & mask || size & mask) { return 0; } return 1; } /* * Initialize a semaphore pool. Just return successfully if we do not need * semaphores (i.e when sync-pts are active). */ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) { struct nvgpu_semaphore_sea *sema_sea; struct mm_gk20a *mm = vm->mm; struct gk20a *g = mm->g; int err; /* * Don't waste the memory on semaphores if we don't need them. */ if (nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS)) { return 0; } if (vm->sema_pool) { return 0; } sema_sea = nvgpu_semaphore_sea_create(g); if (!sema_sea) { return -ENOMEM; } err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); if (err) { return err; } /* * Allocate a chunk of GPU VA space for mapping the semaphores. We will * do a fixed alloc in the kernel VM so that all channels have the same * RO address range for the semaphores. * * !!! TODO: cleanup. */ sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->kernel, vm->va_limit - mm->channel.kernel_size, 512U * PAGE_SIZE, SZ_4K); if (!sema_sea->gpu_va) { nvgpu_free(&vm->kernel, sema_sea->gpu_va); nvgpu_vm_put(vm); return -ENOMEM; } err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); if (err) { nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL], vm->sema_pool->gpu_va); return err; } return 0; } /* * Initialize a preallocated vm */ int __nvgpu_vm_init(struct mm_gk20a *mm, struct vm_gk20a *vm, u32 big_page_size, u64 low_hole, u64 kernel_reserved, u64 aperture_size, bool big_pages, bool userspace_managed, char *name) { int err = 0; char alloc_name[32]; u64 kernel_vma_flags; u64 user_vma_start, user_vma_limit; u64 user_lp_vma_start, user_lp_vma_limit; u64 kernel_vma_start, kernel_vma_limit; struct gk20a *g = gk20a_from_mm(mm); if (WARN_ON(kernel_reserved + low_hole > aperture_size)) { return -ENOMEM; } if (WARN_ON(vm->guest_managed && kernel_reserved != 0U)) { return -EINVAL; } nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, " "LP size=0x%x lowhole=0x%llx", name, aperture_size, (unsigned int)big_page_size, low_hole); vm->mm = mm; vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K; vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] = big_page_size; vm->gmmu_page_sizes[GMMU_PAGE_SIZE_KERNEL] = SZ_4K; /* Set up vma pointers. */ vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user; vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user; vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel; if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp; } vm->va_start = low_hole; vm->va_limit = aperture_size; vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; vm->userspace_managed = userspace_managed; vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION if (g->is_virtual && userspace_managed) { nvgpu_err(g, "vGPU: no userspace managed addr space support"); return -ENOSYS; } if (g->is_virtual && vgpu_vm_init(g, vm)) { nvgpu_err(g, "Failed to init vGPU VM!"); return -ENOMEM; } #endif /* Initialize the page table data structures. */ strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); err = nvgpu_gmmu_init_page_table(vm); if (err) { goto clean_up_vgpu_vm; } /* Setup vma limits. */ if (kernel_reserved + low_hole < aperture_size) { /* * If big_pages are disabled for this VM then it only makes * sense to make one VM, same as if the unified address flag * is set. */ if (!big_pages || nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { user_vma_start = low_hole; user_vma_limit = vm->va_limit - kernel_reserved; user_lp_vma_start = user_vma_limit; user_lp_vma_limit = user_vma_limit; } else { user_vma_start = low_hole; user_vma_limit = __nv_gmmu_va_small_page_limit(); user_lp_vma_start = __nv_gmmu_va_small_page_limit(); user_lp_vma_limit = vm->va_limit - kernel_reserved; } } else { user_vma_start = 0; user_vma_limit = 0; user_lp_vma_start = 0; user_lp_vma_limit = 0; } kernel_vma_start = vm->va_limit - kernel_reserved; kernel_vma_limit = vm->va_limit; nvgpu_log_info(g, "user_vma [0x%llx,0x%llx)", user_vma_start, user_vma_limit); nvgpu_log_info(g, "user_lp_vma [0x%llx,0x%llx)", user_lp_vma_start, user_lp_vma_limit); nvgpu_log_info(g, "kernel_vma [0x%llx,0x%llx)", kernel_vma_start, kernel_vma_limit); if (WARN_ON(user_vma_start > user_vma_limit) || WARN_ON(user_lp_vma_start > user_lp_vma_limit) || WARN_ON(!vm->guest_managed && kernel_vma_start >= kernel_vma_limit)) { err = -EINVAL; goto clean_up_page_tables; } kernel_vma_flags = (kernel_reserved + low_hole) == aperture_size ? 0U : GPU_ALLOC_GVA_SPACE; /* * A "user" area only makes sense for the GVA spaces. For VMs where * there is no "user" area user_vma_start will be equal to * user_vma_limit (i.e a 0 sized space). In such a situation the kernel * area must be non-zero in length. */ if (user_vma_start >= user_vma_limit && kernel_vma_start >= kernel_vma_limit) { err = -EINVAL; goto clean_up_page_tables; } /* * Determine if big pages are possible in this VM. If a split address * space is used then check the user_lp vma instead of the user vma. */ if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { vm->big_pages = big_pages && nvgpu_big_pages_possible(vm, user_vma_start, user_vma_limit - user_vma_start); } else { vm->big_pages = big_pages && nvgpu_big_pages_possible(vm, user_lp_vma_start, user_lp_vma_limit - user_lp_vma_start); } /* * User VMA. */ if (user_vma_start < user_vma_limit) { snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); err = nvgpu_buddy_allocator_init(g, &vm->user, vm, alloc_name, user_vma_start, user_vma_limit - user_vma_start, SZ_4K, GPU_BALLOC_MAX_ORDER, GPU_ALLOC_GVA_SPACE); if (err) { goto clean_up_page_tables; } } else { /* * Make these allocator pointers point to the kernel allocator * since we still use the legacy notion of page size to choose * the allocator. */ vm->vma[0] = &vm->kernel; vm->vma[1] = &vm->kernel; } /* * User VMA for large pages when a split address range is used. */ if (user_lp_vma_start < user_lp_vma_limit) { snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s_lp", name); err = nvgpu_buddy_allocator_init(g, &vm->user_lp, vm, alloc_name, user_lp_vma_start, user_lp_vma_limit - user_lp_vma_start, vm->big_page_size, GPU_BALLOC_MAX_ORDER, GPU_ALLOC_GVA_SPACE); if (err) { goto clean_up_allocators; } } /* * Kernel VMA. Must always exist for an address space. */ snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-sys", name); err = nvgpu_buddy_allocator_init(g, &vm->kernel, vm, alloc_name, kernel_vma_start, kernel_vma_limit - kernel_vma_start, SZ_4K, GPU_BALLOC_MAX_ORDER, kernel_vma_flags); if (err) { goto clean_up_allocators; } vm->mapped_buffers = NULL; err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock); if (err != 0) { nvgpu_err(g, "Error in syncpt_ro_map_lock mutex initialization"); goto clean_up_allocators; } err = nvgpu_mutex_init(&vm->update_gmmu_lock); if (err != 0) { nvgpu_err(g, "Error in update_gmmu_lock mutex initialization"); goto clean_up_ro_map_lock; } nvgpu_ref_init(&vm->ref); nvgpu_init_list_node(&vm->vm_area_list); /* * This is only necessary for channel address spaces. The best way to * distinguish channel address spaces from other address spaces is by * size - if the address space is 4GB or less, it's not a channel. */ if (vm->va_limit > 4ULL * SZ_1G) { err = nvgpu_init_sema_pool(vm); if (err) { goto clean_up_gmmu_lock; } } return 0; clean_up_gmmu_lock: nvgpu_mutex_destroy(&vm->update_gmmu_lock); clean_up_ro_map_lock: nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock); clean_up_allocators: if (nvgpu_alloc_initialized(&vm->kernel)) { nvgpu_alloc_destroy(&vm->kernel); } if (nvgpu_alloc_initialized(&vm->user)) { nvgpu_alloc_destroy(&vm->user); } if (nvgpu_alloc_initialized(&vm->user_lp)) { nvgpu_alloc_destroy(&vm->user_lp); } clean_up_page_tables: /* Cleans up nvgpu_gmmu_init_page_table() */ __nvgpu_pd_cache_free_direct(g, &vm->pdb); clean_up_vgpu_vm: #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION if (g->is_virtual) vgpu_vm_remove(vm); #endif return err; } /** * nvgpu_init_vm() - Initialize an address space. * * @mm - Parent MM. * @vm - The VM to init. * @big_page_size - Size of big pages associated with this VM. * @low_hole - The size of the low hole (unaddressable memory at the bottom of * the address space). * @kernel_reserved - Space reserved for kernel only allocations. * @aperture_size - Total size of the aperture. * @big_pages - If true then big pages are possible in the VM. Note this does * not guarantee that big pages will be possible. * @name - Name of the address space. * * This function initializes an address space according to the following map: * * +--+ 0x0 * | | * +--+ @low_hole * | | * ~ ~ This is the "user" section. * | | * +--+ @aperture_size - @kernel_reserved * | | * ~ ~ This is the "kernel" section. * | | * +--+ @aperture_size * * The user section is therefor what ever is left over after the @low_hole and * @kernel_reserved memory have been portioned out. The @kernel_reserved is * always persent at the top of the memory space and the @low_hole is always at * the bottom. * * For certain address spaces a "user" section makes no sense (bar1, etc) so in * such cases the @kernel_reserved and @low_hole should sum to exactly * @aperture_size. */ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, u32 big_page_size, u64 low_hole, u64 kernel_reserved, u64 aperture_size, bool big_pages, bool userspace_managed, char *name) { struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); if (!vm) { return NULL; } if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole, kernel_reserved, aperture_size, big_pages, userspace_managed, name)) { nvgpu_kfree(g, vm); return NULL; } return vm; } /* * Cleanup the VM! */ static void __nvgpu_vm_remove(struct vm_gk20a *vm) { struct nvgpu_mapped_buf *mapped_buffer; struct nvgpu_vm_area *vm_area, *vm_area_tmp; struct nvgpu_rbtree_node *node = NULL; struct gk20a *g = vm->mm->g; /* * Do this outside of the update_gmmu_lock since unmapping the semaphore * pool involves unmapping a GMMU mapping which means aquiring the * update_gmmu_lock. */ if (!nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS)) { if (vm->sema_pool) { nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); nvgpu_semaphore_pool_put(vm->sema_pool); } } if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) { nvgpu_gmmu_unmap(vm, &g->syncpt_mem, vm->syncpt_ro_map_gpu_va); } nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); while (node) { mapped_buffer = mapped_buffer_from_rbtree_node(node); __nvgpu_vm_unmap(mapped_buffer, NULL); nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); } /* destroy remaining reserved memory areas */ nvgpu_list_for_each_entry_safe(vm_area, vm_area_tmp, &vm->vm_area_list, nvgpu_vm_area, vm_area_list) { nvgpu_list_del(&vm_area->vm_area_list); nvgpu_kfree(vm->mm->g, vm_area); } if (nvgpu_alloc_initialized(&vm->kernel)) { nvgpu_alloc_destroy(&vm->kernel); } if (nvgpu_alloc_initialized(&vm->user)) { nvgpu_alloc_destroy(&vm->user); } if (nvgpu_alloc_initialized(&vm->user_lp)) { nvgpu_alloc_destroy(&vm->user_lp); } nvgpu_vm_free_entries(vm, &vm->pdb); #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION if (g->is_virtual) vgpu_vm_remove(vm); #endif nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock); nvgpu_kfree(g, vm); } static void __nvgpu_vm_remove_ref(struct nvgpu_ref *ref) { struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); __nvgpu_vm_remove(vm); } void nvgpu_vm_get(struct vm_gk20a *vm) { nvgpu_ref_get(&vm->ref); } void nvgpu_vm_put(struct vm_gk20a *vm) { nvgpu_ref_put(&vm->ref, __nvgpu_vm_remove_ref); } int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, struct nvgpu_mapped_buf *mapped_buffer) { mapped_buffer->node.key_start = mapped_buffer->addr; mapped_buffer->node.key_end = mapped_buffer->addr + mapped_buffer->size; nvgpu_rbtree_insert(&mapped_buffer->node, &vm->mapped_buffers); return 0; } void nvgpu_remove_mapped_buf(struct vm_gk20a *vm, struct nvgpu_mapped_buf *mapped_buffer) { nvgpu_rbtree_unlink(&mapped_buffer->node, &vm->mapped_buffers); } struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf( struct vm_gk20a *vm, u64 addr) { struct nvgpu_rbtree_node *node = NULL; struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_search(addr, &node, root); if (!node) { return NULL; } return mapped_buffer_from_rbtree_node(node); } struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range( struct vm_gk20a *vm, u64 addr) { struct nvgpu_rbtree_node *node = NULL; struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_range_search(addr, &node, root); if (!node) { return NULL; } return mapped_buffer_from_rbtree_node(node); } struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( struct vm_gk20a *vm, u64 addr) { struct nvgpu_rbtree_node *node = NULL; struct nvgpu_rbtree_node *root = vm->mapped_buffers; nvgpu_rbtree_less_than_search(addr, &node, root); if (!node) { return NULL; } return mapped_buffer_from_rbtree_node(node); } int nvgpu_vm_get_buffers(struct vm_gk20a *vm, struct nvgpu_mapped_buf ***mapped_buffers, int *num_buffers) { struct nvgpu_mapped_buf *mapped_buffer; struct nvgpu_mapped_buf **buffer_list; struct nvgpu_rbtree_node *node = NULL; int i = 0; if (vm->userspace_managed) { *mapped_buffers = NULL; *num_buffers = 0; return 0; } nvgpu_mutex_acquire(&vm->update_gmmu_lock); buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * vm->num_user_mapped_buffers); if (!buffer_list) { nvgpu_mutex_release(&vm->update_gmmu_lock); return -ENOMEM; } nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); while (node) { mapped_buffer = mapped_buffer_from_rbtree_node(node); buffer_list[i] = mapped_buffer; nvgpu_ref_get(&mapped_buffer->ref); i++; nvgpu_rbtree_enum_next(&node, node); } BUG_ON(i != vm->num_user_mapped_buffers); *num_buffers = vm->num_user_mapped_buffers; *mapped_buffers = buffer_list; nvgpu_mutex_release(&vm->update_gmmu_lock); return 0; } void nvgpu_vm_put_buffers(struct vm_gk20a *vm, struct nvgpu_mapped_buf **mapped_buffers, int num_buffers) { int i; struct vm_gk20a_mapping_batch batch; if (num_buffers == 0) { return; } nvgpu_mutex_acquire(&vm->update_gmmu_lock); nvgpu_vm_mapping_batch_start(&batch); vm->kref_put_batch = &batch; for (i = 0; i < num_buffers; ++i) { nvgpu_ref_put(&mapped_buffers[i]->ref, __nvgpu_vm_unmap_ref); } vm->kref_put_batch = NULL; nvgpu_vm_mapping_batch_finish_locked(vm, &batch); nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_big_free(vm->mm->g, mapped_buffers); } struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, struct nvgpu_os_buffer *os_buf, struct nvgpu_sgt *sgt, u64 map_addr, u64 map_size, u64 phys_offset, int rw, u32 flags, s16 compr_kind, s16 incompr_kind, struct vm_gk20a_mapping_batch *batch, enum nvgpu_aperture aperture) { struct gk20a *g = gk20a_from_vm(vm); struct nvgpu_mapped_buf *mapped_buffer = NULL; struct nvgpu_ctag_buffer_info binfo = { 0 }; struct nvgpu_vm_area *vm_area = NULL; int err = 0; u64 align; u32 ctag_offset = 0; bool clear_ctags = false; bool va_allocated = true; /* * The kind used as part of the key for map caching. HW may * actually be programmed with the fallback kind in case the * key kind is compressible but we're out of comptags. */ s16 map_key_kind; /* * The actual GMMU PTE kind */ u8 pte_kind; if (vm->userspace_managed && !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) { nvgpu_err(g, "non-fixed-offset mapping not available on " "userspace managed address spaces"); return ERR_PTR(-EINVAL); } binfo.flags = flags; binfo.size = nvgpu_os_buf_get_size(os_buf); binfo.compr_kind = (vm->enable_ctag && compr_kind != NVGPU_KIND_INVALID ? compr_kind : NVGPU_KIND_INVALID); binfo.incompr_kind = incompr_kind; if (compr_kind != NVGPU_KIND_INVALID) { map_key_kind = compr_kind; } else { map_key_kind = incompr_kind; } /* * Check if this buffer is already mapped. */ if (!vm->userspace_managed) { nvgpu_mutex_acquire(&vm->update_gmmu_lock); mapped_buffer = nvgpu_vm_find_mapping(vm, os_buf, map_addr, flags, map_key_kind); nvgpu_mutex_release(&vm->update_gmmu_lock); if (mapped_buffer) { nvgpu_ref_get(&mapped_buffer->ref); return mapped_buffer; } } /* * Generate a new mapping! */ mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); if (!mapped_buffer) { nvgpu_warn(g, "oom allocating tracking buffer"); return ERR_PTR(-ENOMEM); } align = nvgpu_sgt_alignment(g, sgt); if (g->mm.disable_bigpage) { binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL; } else { binfo.pgsz_idx = __get_pte_size(vm, map_addr, min_t(u64, binfo.size, align)); } map_size = map_size ? map_size : binfo.size; map_size = ALIGN(map_size, SZ_4K); if ((map_size > binfo.size) || (phys_offset > (binfo.size - map_size))) { err = -EINVAL; goto clean_up_nolock; } nvgpu_mutex_acquire(&vm->update_gmmu_lock); /* * Check if we should use a fixed offset for mapping this buffer. */ if (flags & NVGPU_VM_MAP_FIXED_OFFSET) { err = nvgpu_vm_area_validate_buffer(vm, map_addr, map_size, binfo.pgsz_idx, &vm_area); if (err) { goto clean_up; } va_allocated = false; } err = nvgpu_vm_compute_compression(vm, &binfo); if (err) { nvgpu_err(g, "failure setting up compression"); goto clean_up; } if ((binfo.compr_kind != NVGPU_KIND_INVALID) && (flags & NVGPU_VM_MAP_FIXED_OFFSET)) { /* * Fixed-address compressible mapping is * requested. Make sure we're respecting the alignment * requirement for virtual addresses and buffer * offsets. * * This check must be done before we may fall back to * the incompressible kind. */ const u64 offset_mask = g->ops.fb.compression_align_mask(g); if ((map_addr & offset_mask) != (phys_offset & offset_mask)) { nvgpu_log(g, gpu_dbg_map, "Misaligned compressible-kind fixed-address " "mapping"); err = -EINVAL; goto clean_up; } } if (binfo.compr_kind != NVGPU_KIND_INVALID) { struct gk20a_comptags comptags = { 0 }; /* * Get the comptags state, alloc if necessary */ err = gk20a_alloc_or_get_comptags(g, os_buf, &g->gr.comp_tags, &comptags); if (err) { /* * This is an irrecoverable failure and we need to * abort. In particular, it is not safe to proceed with * the incompressible fallback, since we cannot not mark * our alloc failure anywere. Later we would retry * allocation and break compressible map aliasing. */ nvgpu_err(g, "Error %d setting up comptags", err); goto clean_up; } /* * Newly allocated comptags needs to be cleared */ if (comptags.needs_clear) { if (g->ops.ltc.cbc_ctrl) { if (gk20a_comptags_start_clear(os_buf)) { err = g->ops.ltc.cbc_ctrl( g, gk20a_cbc_op_clear, comptags.offset, (comptags.offset + comptags.lines - 1U)); gk20a_comptags_finish_clear( os_buf, err == 0); if (err) { goto clean_up; } } } else { /* * Cleared as part of gmmu map */ clear_ctags = true; } } /* * Store the ctag offset for later use if we got the comptags */ if (comptags.lines) { ctag_offset = comptags.offset; } } /* * Figure out the kind and ctag offset for the GMMU page tables */ if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) { /* * Adjust the ctag_offset as per the buffer map offset */ ctag_offset += phys_offset >> ilog2(g->ops.fb.compression_page_size(g)); pte_kind = binfo.compr_kind; } else if (binfo.incompr_kind != NVGPU_KIND_INVALID) { /* * Incompressible kind, ctag offset will not be programmed */ ctag_offset = 0; pte_kind = binfo.incompr_kind; } else { /* * Caller required compression, but we cannot provide it */ nvgpu_err(g, "No comptags and no incompressible fallback kind"); err = -ENOMEM; goto clean_up; } if (clear_ctags) { clear_ctags = gk20a_comptags_start_clear(os_buf); } map_addr = g->ops.mm.gmmu_map(vm, map_addr, sgt, phys_offset, map_size, binfo.pgsz_idx, pte_kind, ctag_offset, flags, rw, clear_ctags, false, false, batch, aperture); if (clear_ctags) { gk20a_comptags_finish_clear(os_buf, map_addr != 0U); } if (!map_addr) { err = -ENOMEM; goto clean_up; } nvgpu_init_list_node(&mapped_buffer->buffer_list); nvgpu_ref_init(&mapped_buffer->ref); mapped_buffer->addr = map_addr; mapped_buffer->size = map_size; mapped_buffer->pgsz_idx = binfo.pgsz_idx; mapped_buffer->vm = vm; mapped_buffer->flags = flags; mapped_buffer->kind = map_key_kind; mapped_buffer->va_allocated = va_allocated; mapped_buffer->vm_area = vm_area; err = nvgpu_insert_mapped_buf(vm, mapped_buffer); if (err) { nvgpu_err(g, "failed to insert into mapped buffer tree"); goto clean_up; } vm->num_user_mapped_buffers++; if (vm_area) { nvgpu_list_add_tail(&mapped_buffer->buffer_list, &vm_area->buffer_list_head); mapped_buffer->vm_area = vm_area; } nvgpu_mutex_release(&vm->update_gmmu_lock); return mapped_buffer; clean_up: if (mapped_buffer->addr) { g->ops.mm.gmmu_unmap(vm, mapped_buffer->addr, mapped_buffer->size, mapped_buffer->pgsz_idx, mapped_buffer->va_allocated, gk20a_mem_flag_none, mapped_buffer->vm_area ? mapped_buffer->vm_area->sparse : false, NULL); } nvgpu_mutex_release(&vm->update_gmmu_lock); clean_up_nolock: nvgpu_kfree(g, mapped_buffer); return ERR_PTR(err); } /* * Really unmap. This does the real GMMU unmap and removes the mapping from the * VM map tracking tree (and vm_area list if necessary). */ static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, struct vm_gk20a_mapping_batch *batch) { struct vm_gk20a *vm = mapped_buffer->vm; struct gk20a *g = vm->mm->g; vm->num_user_mapped_buffers--; g->ops.mm.gmmu_unmap(vm, mapped_buffer->addr, mapped_buffer->size, mapped_buffer->pgsz_idx, mapped_buffer->va_allocated, gk20a_mem_flag_none, mapped_buffer->vm_area ? mapped_buffer->vm_area->sparse : false, batch); /* * Remove from mapped buffer tree. Then delete the buffer from the * linked list of mapped buffers; though note: not all mapped buffers * are part of a vm_area. */ nvgpu_remove_mapped_buf(vm, mapped_buffer); nvgpu_list_del(&mapped_buffer->buffer_list); /* * OS specific freeing. This is after the generic freeing incase the * generic freeing relies on some component of the OS specific * nvgpu_mapped_buf in some abstraction or the like. */ nvgpu_vm_unmap_system(mapped_buffer); nvgpu_kfree(g, mapped_buffer); } /* * Note: the update_gmmu_lock of the VM that owns this buffer must be locked * before calling nvgpu_ref_put() with this function as the unref function * argument since this can modify the tree of maps. */ void __nvgpu_vm_unmap_ref(struct nvgpu_ref *ref) { struct nvgpu_mapped_buf *mapped_buffer = container_of(ref, struct nvgpu_mapped_buf, ref); __nvgpu_vm_unmap(mapped_buffer, mapped_buffer->vm->kref_put_batch); } /* * For fixed-offset buffers we must sync the buffer. That means we wait for the * buffer to hit a ref-count of 1 before proceeding. * * Note: this requires the update_gmmu_lock to be held since we release it and * re-aquire it in this function. */ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, struct nvgpu_mapped_buf *mapped_buffer) { struct nvgpu_timeout timeout; int ret = 0; nvgpu_mutex_release(&vm->update_gmmu_lock); /* * 500ms second timer. */ nvgpu_timeout_init(vm->mm->g, &timeout, 50, NVGPU_TIMER_CPU_TIMER); do { if (nvgpu_atomic_read(&mapped_buffer->ref.refcount) == 1) { break; } nvgpu_msleep(10); } while (!nvgpu_timeout_expired_msg(&timeout, "sync-unmap failed on 0x%llx")); if (nvgpu_timeout_expired(&timeout)) { ret = -ETIMEDOUT; } nvgpu_mutex_acquire(&vm->update_gmmu_lock); return ret; } void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, struct vm_gk20a_mapping_batch *batch) { struct nvgpu_mapped_buf *mapped_buffer; nvgpu_mutex_acquire(&vm->update_gmmu_lock); mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); if (!mapped_buffer) { goto done; } if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) { if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) { /* * Looks like we have failed... Better not continue in * case the buffer is in use. */ goto done; } } /* * Make sure we have access to the batch if we end up calling through to * the unmap_ref function. */ vm->kref_put_batch = batch; nvgpu_ref_put(&mapped_buffer->ref, __nvgpu_vm_unmap_ref); vm->kref_put_batch = NULL; done: nvgpu_mutex_release(&vm->update_gmmu_lock); return; } static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, struct nvgpu_ctag_buffer_info *binfo) { bool kind_compressible = (binfo->compr_kind != NVGPU_KIND_INVALID); struct gk20a *g = gk20a_from_vm(vm); if (kind_compressible && vm->gmmu_page_sizes[binfo->pgsz_idx] < g->ops.fb.compressible_page_size(g)) { /* * Let's double check that there is a fallback kind */ if (binfo->incompr_kind == NVGPU_KIND_INVALID) { nvgpu_err(g, "Unsupported page size for compressible " "kind, but no fallback kind"); return -EINVAL; } else { nvgpu_log(g, gpu_dbg_map, "Unsupported page size for compressible " "kind, demoting to incompressible"); binfo->compr_kind = NVGPU_KIND_INVALID; kind_compressible = false; } } return 0; }