summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/comptags.c
blob: ab371972db61efb960bbfa2f8a438cecb410838b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/dma-buf.h>

#include <nvgpu/comptags.h>
#include <nvgpu/gk20a.h>

#include <nvgpu/linux/vm.h>

#include "dmabuf.h"

void gk20a_get_comptags(struct nvgpu_os_buffer *buf,
			struct gk20a_comptags *comptags)
{
	struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
							     buf->dev);

	if (!comptags)
		return;

	if (!priv) {
		memset(comptags, 0, sizeof(*comptags));
		return;
	}

	nvgpu_mutex_acquire(&priv->lock);
	*comptags = priv->comptags;
	nvgpu_mutex_release(&priv->lock);
}

int gk20a_alloc_or_get_comptags(struct gk20a *g,
				struct nvgpu_os_buffer *buf,
				struct gk20a_comptag_allocator *allocator,
				struct gk20a_comptags *comptags)
{
	struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
							     buf->dev);
	u32 offset;
	int err;
	unsigned int ctag_granularity;
	u32 lines;

	if (!priv)
		return -ENOSYS;

	nvgpu_mutex_acquire(&priv->lock);

	if (priv->comptags.allocated) {
		/*
		 * already allocated
		 */
		*comptags = priv->comptags;

		err = 0;
		goto exit_locked;
	}

	ctag_granularity = g->ops.fb.compression_page_size(g);
	lines = DIV_ROUND_UP_ULL(buf->dmabuf->size, ctag_granularity);

	/* 0-sized buffer? Shouldn't occur, but let's check anyways. */
	if (lines < 1) {
		err = -EINVAL;
		goto exit_locked;
	}

	/* store the allocator so we can use it when we free the ctags */
	priv->comptag_allocator = allocator;
	err = gk20a_comptaglines_alloc(allocator, &offset, lines);
	if (!err) {
		priv->comptags.offset = offset;
		priv->comptags.lines = lines;
		priv->comptags.needs_clear = true;
	} else {
		priv->comptags.offset = 0;
		priv->comptags.lines = 0;
		priv->comptags.needs_clear = false;
	}

	/*
	 * We don't report an error here if comptag alloc failed. The
	 * caller will simply fallback to incompressible kinds. It
	 * would not be safe to re-allocate comptags anyways on
	 * successive calls, as that would break map aliasing.
	 */
	err = 0;
	priv->comptags.allocated = true;

	*comptags = priv->comptags;

exit_locked:
	nvgpu_mutex_release(&priv->lock);

	return err;
}

bool gk20a_comptags_start_clear(struct nvgpu_os_buffer *buf)
{
	struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
							     buf->dev);
	bool clear_started = false;

	if (priv) {
		nvgpu_mutex_acquire(&priv->lock);

		clear_started = priv->comptags.needs_clear;

		if (!clear_started)
			nvgpu_mutex_release(&priv->lock);
	}

	return clear_started;
}

void gk20a_comptags_finish_clear(struct nvgpu_os_buffer *buf,
				 bool clear_successful)
{
	struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(buf->dmabuf,
							     buf->dev);
	if (priv) {
		if (clear_successful)
			priv->comptags.needs_clear = false;

		nvgpu_mutex_release(&priv->lock);
	}
}