aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-04-05 00:41:20 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-04-05 00:41:20 -0400
commit4dd2b32f3c48112da2ffe55279aedc10c3784f90 (patch)
treebcf9cd8019f030cb825bea7fceaed77ccc974a4f
parent00838d4f507ae73f2b5a260c826f6275bd2d4ba7 (diff)
staging: memrar: remove driver from tree
It's no longer needed at all. Cc: Ossama Othman <ossama.othman@intel.com> Cc: Eugene Epshteyn <eugene.epshteyn@intel.com> Cc: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/memrar/Kconfig15
-rw-r--r--drivers/staging/memrar/Makefile2
-rw-r--r--drivers/staging/memrar/TODO43
-rw-r--r--drivers/staging/memrar/memrar-abi89
-rw-r--r--drivers/staging/memrar/memrar.h174
-rw-r--r--drivers/staging/memrar/memrar_allocator.c432
-rw-r--r--drivers/staging/memrar/memrar_allocator.h149
-rw-r--r--drivers/staging/memrar/memrar_handler.c1007
10 files changed, 0 insertions, 1914 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 18b43fcb417..dca4a0bb6ca 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -117,8 +117,6 @@ source "drivers/staging/hv/Kconfig"
117 117
118source "drivers/staging/vme/Kconfig" 118source "drivers/staging/vme/Kconfig"
119 119
120source "drivers/staging/memrar/Kconfig"
121
122source "drivers/staging/sep/Kconfig" 120source "drivers/staging/sep/Kconfig"
123 121
124source "drivers/staging/iio/Kconfig" 122source "drivers/staging/iio/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index cfd13cd55ef..eb93012b6f5 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_VT6655) += vt6655/
40obj-$(CONFIG_VT6656) += vt6656/ 40obj-$(CONFIG_VT6656) += vt6656/
41obj-$(CONFIG_HYPERV) += hv/ 41obj-$(CONFIG_HYPERV) += hv/
42obj-$(CONFIG_VME_BUS) += vme/ 42obj-$(CONFIG_VME_BUS) += vme/
43obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
44obj-$(CONFIG_DX_SEP) += sep/ 43obj-$(CONFIG_DX_SEP) += sep/
45obj-$(CONFIG_IIO) += iio/ 44obj-$(CONFIG_IIO) += iio/
46obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/ 45obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/
diff --git a/drivers/staging/memrar/Kconfig b/drivers/staging/memrar/Kconfig
deleted file mode 100644
index cbeebc55090..00000000000
--- a/drivers/staging/memrar/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
1config MRST_RAR_HANDLER
2 tristate "RAR handler driver for Intel Moorestown platform"
3 depends on RAR_REGISTER
4 ---help---
5 This driver provides a memory management interface to
6 restricted access regions (RAR) available on the Intel
7 Moorestown platform.
8
9 Once locked down, restricted access regions are only
10 accessible by specific hardware on the platform. The x86
11 CPU is typically not one of those platforms. As such this
12 driver does not access RAR, and only provides a buffer
13 allocation/bookkeeping mechanism.
14
15 If unsure, say N.
diff --git a/drivers/staging/memrar/Makefile b/drivers/staging/memrar/Makefile
deleted file mode 100644
index a3336c00cc5..00000000000
--- a/drivers/staging/memrar/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
2memrar-y := memrar_allocator.o memrar_handler.o
diff --git a/drivers/staging/memrar/TODO b/drivers/staging/memrar/TODO
deleted file mode 100644
index 435e09ba44c..00000000000
--- a/drivers/staging/memrar/TODO
+++ /dev/null
@@ -1,43 +0,0 @@
1RAR Handler (memrar) Driver TODO Items
2======================================
3
4Maintainer: Eugene Epshteyn <eugene.epshteyn@intel.com>
5
6memrar.h
7--------
81. This header exposes the driver's user space and kernel space
9 interfaces. It should be moved to <linux/rar/memrar.h>, or
10 something along those lines, when this memrar driver is moved out
11 of `staging'.
12 a. It would be ideal if staging/rar_register/rar_register.h was
13 moved to the same directory.
14
15memrar_allocator.[ch]
16---------------------
171. Address potential fragmentation issues with the memrar_allocator.
18
192. Hide struct memrar_allocator details/fields. They need not be
20 exposed to the user.
21 a. Forward declare struct memrar_allocator.
22 b. Move all three struct definitions to `memrar_allocator.c'
23 source file.
24 c. Add a memrar_allocator_largest_free_area() function, or
25 something like that to get access to the value of the struct
26 memrar_allocator "largest_free_area" field. This allows the
27 struct memrar_allocator fields to be completely hidden from
28 the user. The memrar_handler code really only needs this for
29 statistic gathering on-demand.
30 d. Do the same for the "capacity" field as the
31 "largest_free_area" field.
32
333. Move memrar_allocator.* to kernel `lib' directory since it is HW
34 neutral.
35 a. Alternatively, use lib/genalloc.c instead.
36 b. A kernel port of Doug Lea's malloc() implementation may also
37 be an option.
38
39memrar_handler.c
40----------------
411. Split user space interface (ioctl code) from core/kernel code,
42 e.g.:
43 memrar_handler.c -> memrar_core.c, memrar_user.c
diff --git a/drivers/staging/memrar/memrar-abi b/drivers/staging/memrar/memrar-abi
deleted file mode 100644
index c23fc996a43..00000000000
--- a/drivers/staging/memrar/memrar-abi
+++ /dev/null
@@ -1,89 +0,0 @@
1What: /dev/memrar
2Date: March 2010
3KernelVersion: 2.6.34
4Contact: Eugene Epshteyn <eugene.epshteyn@intel.com>
5Description: The Intel Moorestown Restricted Access Region (RAR)
6 Handler driver exposes an ioctl() based interface that
7 allows a user to reserve and release blocks of RAR
8 memory.
9
10 Note: A sysfs based one was not appropriate for the
11 RAR handler's usage model.
12
13 =========================================================
14 ioctl() Requests
15 =========================================================
16 RAR_HANDLER_RESERVE
17 -------------------
18 Description: Reserve RAR block.
19 Type: struct RAR_block_info
20 Direction: in/out
21 Errors: EINVAL (invalid RAR type or size)
22 ENOMEM (not enough RAR memory)
23
24 RAR_HANDLER_STAT
25 ----------------
26 Description: Get RAR statistics.
27 Type: struct RAR_stat
28 Direction: in/out
29 Errors: EINVAL (invalid RAR type)
30
31 RAR_HANDLER_RELEASE
32 -------------------
33 Description: Release previously reserved RAR block.
34 Type: 32 bit unsigned integer
35 (e.g. uint32_t), i.e the RAR "handle".
36 Direction: in
37 Errors: EINVAL (invalid RAR handle)
38
39
40 =========================================================
41 ioctl() Request Parameter Types
42 =========================================================
43 The structures referred to above are defined as
44 follows:
45
46 /**
47 * struct RAR_block_info - user space struct that
48 * describes RAR buffer
49 * @type: Type of RAR memory (e.g.,
50 * RAR_TYPE_VIDEO or RAR_TYPE_AUDIO) [in]
51 * @size: Requested size of a block in bytes to
52 * be reserved in RAR. [in]
53 * @handle: Handle that can be used to refer to
54 * reserved block. [out]
55 *
56 * This is the basic structure exposed to the user
57 * space that describes a given RAR buffer. It used
58 * as the parameter for the RAR_HANDLER_RESERVE ioctl.
59 * The buffer's underlying bus address is not exposed
60 * to the user. User space code refers to the buffer
61 * entirely by "handle".
62 */
63 struct RAR_block_info {
64 __u32 type;
65 __u32 size;
66 __u32 handle;
67 };
68
69 /**
70 * struct RAR_stat - RAR statistics structure
71 * @type: Type of RAR memory (e.g.,
72 * RAR_TYPE_VIDEO or
73 * RAR_TYPE_AUDIO) [in]
74 * @capacity: Total size of RAR memory
75 * region. [out]
76 * @largest_block_size: Size of the largest reservable
77 * block. [out]
78 *
79 * This structure is used for RAR_HANDLER_STAT ioctl.
80 */
81 struct RAR_stat {
82 __u32 type;
83 __u32 capacity;
84 __u32 largest_block_size;
85 };
86
87 Lastly, the RAR_HANDLER_RELEASE ioctl expects a
88 "handle" to the RAR block of memory. It is a 32 bit
89 unsigned integer.
diff --git a/drivers/staging/memrar/memrar.h b/drivers/staging/memrar/memrar.h
deleted file mode 100644
index 0feb73b94c9..00000000000
--- a/drivers/staging/memrar/memrar.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * RAR Handler (/dev/memrar) internal driver API.
3 * Copyright (C) 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General
7 * Public License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be
10 * useful, but WITHOUT ANY WARRANTY; without even the implied
11 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
12 * PURPOSE. See the GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the Free
15 * Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 02111-1307, USA.
17 * The full GNU General Public License is included in this
18 * distribution in the file called COPYING.
19 */
20
21
22#ifndef _MEMRAR_H
23#define _MEMRAR_H
24
25#include <linux/ioctl.h>
26#include <linux/types.h>
27
28
29/**
30 * struct RAR_stat - RAR statistics structure
31 * @type: Type of RAR memory (e.g., audio vs. video)
32 * @capacity: Total size of RAR memory region.
33 * @largest_block_size: Size of the largest reservable block.
34 *
35 * This structure is used for RAR_HANDLER_STAT ioctl and for the
36 * RAR_get_stat() user space wrapper function.
37 */
38struct RAR_stat {
39 __u32 type;
40 __u32 capacity;
41 __u32 largest_block_size;
42};
43
44
45/**
46 * struct RAR_block_info - user space struct that describes RAR buffer
47 * @type: Type of RAR memory (e.g., audio vs. video)
48 * @size: Requested size of a block to be reserved in RAR.
49 * @handle: Handle that can be used to refer to reserved block.
50 *
51 * This is the basic structure exposed to the user space that
52 * describes a given RAR buffer. The buffer's underlying bus address
53 * is not exposed to the user. User space code refers to the buffer
54 * entirely by "handle".
55 */
56struct RAR_block_info {
57 __u32 type;
58 __u32 size;
59 __u32 handle;
60};
61
62
63#define RAR_IOCTL_BASE 0xE0
64
65/* Reserve RAR block. */
66#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
67
68/* Release previously reserved RAR block. */
69#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
70
71/* Get RAR stats. */
72#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
73
74
75#ifdef __KERNEL__
76
77/* -------------------------------------------------------------- */
78/* Kernel Side RAR Handler Interface */
79/* -------------------------------------------------------------- */
80
81/**
82 * struct RAR_buffer - kernel space struct that describes RAR buffer
83 * @info: structure containing base RAR buffer information
84 * @bus_address: buffer bus address
85 *
86 * Structure that contains all information related to a given block of
87 * memory in RAR. It is generally only used when retrieving RAR
88 * related bus addresses.
89 *
90 * Note: This structure is used only by RAR-enabled drivers, and is
91 * not intended to be exposed to the user space.
92 */
93struct RAR_buffer {
94 struct RAR_block_info info;
95 dma_addr_t bus_address;
96};
97
98#if defined(CONFIG_MRST_RAR_HANDLER)
99/**
100 * rar_reserve() - reserve RAR buffers
101 * @buffers: array of RAR_buffers where type and size of buffers to
102 * reserve are passed in, handle and bus address are
103 * passed out
104 * @count: number of RAR_buffers in the "buffers" array
105 *
106 * This function will reserve buffers in the restricted access regions
107 * of given types.
108 *
109 * It returns the number of successfully reserved buffers. Successful
110 * buffer reservations will have the corresponding bus_address field
111 * set to a non-zero value in the given buffers vector.
112 */
113extern size_t rar_reserve(struct RAR_buffer *buffers,
114 size_t count);
115
116/**
117 * rar_release() - release RAR buffers
118 * @buffers: array of RAR_buffers where handles to buffers to be
119 * released are passed in
120 * @count: number of RAR_buffers in the "buffers" array
121 *
122 * This function will release RAR buffers that were retrieved through
123 * a call to rar_reserve() or rar_handle_to_bus() by decrementing the
124 * reference count. The RAR buffer will be reclaimed when the
125 * reference count drops to zero.
126 *
127 * It returns the number of successfully released buffers. Successful
128 * releases will have their handle field set to zero in the given
129 * buffers vector.
130 */
131extern size_t rar_release(struct RAR_buffer *buffers,
132 size_t count);
133
134/**
135 * rar_handle_to_bus() - convert a vector of RAR handles to bus addresses
136 * @buffers: array of RAR_buffers containing handles to be
137 * converted to bus_addresses
138 * @count: number of RAR_buffers in the "buffers" array
139
140 * This function will retrieve the RAR buffer bus addresses, type and
141 * size corresponding to the RAR handles provided in the buffers
142 * vector.
143 *
144 * It returns the number of successfully converted buffers. The bus
145 * address will be set to 0 for unrecognized handles.
146 *
147 * The reference count for each corresponding buffer in RAR will be
148 * incremented. Call rar_release() when done with the buffers.
149 */
150extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
151 size_t count);
152
153#else
154
155extern inline size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
156{
157 return 0;
158}
159
160extern inline size_t rar_release(struct RAR_buffer *buffers, size_t count)
161{
162 return 0;
163}
164
165extern inline size_t rar_handle_to_bus(struct RAR_buffer *buffers,
166 size_t count)
167{
168 return 0;
169}
170
171#endif /* MRST_RAR_HANDLER */
172#endif /* __KERNEL__ */
173
174#endif /* _MEMRAR_H */
diff --git a/drivers/staging/memrar/memrar_allocator.c b/drivers/staging/memrar/memrar_allocator.c
deleted file mode 100644
index a4f8c5846a0..00000000000
--- a/drivers/staging/memrar/memrar_allocator.c
+++ /dev/null
@@ -1,432 +0,0 @@
1/*
2 * memrar_allocator 1.0: An allocator for Intel RAR.
3 *
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
20 *
21 *
22 * ------------------------------------------------------------------
23 *
24 * This simple allocator implementation provides a
25 * malloc()/free()-like interface for reserving space within a
26 * previously reserved block of memory. It is not specific to
27 * any hardware, nor is it coupled with the lower level paging
28 * mechanism.
29 *
30 * The primary goal of this implementation is to provide a means
31 * to partition an arbitrary block of memory without actually
32 * accessing the memory or incurring any hardware side-effects
33 * (e.g. paging). It is, in effect, a bookkeeping mechanism for
34 * buffers.
35 */
36
37
38#include "memrar_allocator.h"
39#include <linux/slab.h>
40#include <linux/bug.h>
41#include <linux/kernel.h>
42
43
44struct memrar_allocator *memrar_create_allocator(unsigned long base,
45 size_t capacity,
46 size_t block_size)
47{
48 struct memrar_allocator *allocator = NULL;
49 struct memrar_address_ranges *first_node = NULL;
50
51 /*
52 * Make sure the base address is aligned on a block_size
53 * boundary.
54 *
55 * @todo Is this necessary?
56 */
57 /* base = ALIGN(base, block_size); */
58
59 /* Validate parameters.
60 *
61 * Make sure we can allocate the entire memory space. Zero
62 * capacity or block size are obviously invalid.
63 */
64 if (base == 0
65 || capacity == 0
66 || block_size == 0
67 || ULONG_MAX - capacity < base
68 || capacity < block_size)
69 return allocator;
70
71 /*
72 * There isn't much point in creating a memory allocator that
73 * is only capable of holding one block but we'll allow it,
74 * and issue a diagnostic.
75 */
76 WARN(capacity < block_size * 2,
77 "memrar: Only one block available to allocator.\n");
78
79 allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
80
81 if (allocator == NULL)
82 return allocator;
83
84 mutex_init(&allocator->lock);
85 allocator->base = base;
86
87 /* Round the capacity down to a multiple of block_size. */
88 allocator->capacity = (capacity / block_size) * block_size;
89
90 allocator->block_size = block_size;
91
92 allocator->largest_free_area = allocator->capacity;
93
94 /* Initialize the handle and free lists. */
95 INIT_LIST_HEAD(&allocator->allocated_list.list);
96 INIT_LIST_HEAD(&allocator->free_list.list);
97
98 first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
99 if (first_node == NULL) {
100 kfree(allocator);
101 allocator = NULL;
102 } else {
103 /* Full range of blocks is available. */
104 first_node->range.begin = base;
105 first_node->range.end = base + allocator->capacity;
106 list_add(&first_node->list,
107 &allocator->free_list.list);
108 }
109
110 return allocator;
111}
112
113void memrar_destroy_allocator(struct memrar_allocator *allocator)
114{
115 /*
116 * Assume that the memory allocator lock isn't held at this
117 * point in time. Caller must ensure that.
118 */
119
120 struct memrar_address_ranges *pos = NULL;
121 struct memrar_address_ranges *n = NULL;
122
123 if (allocator == NULL)
124 return;
125
126 mutex_lock(&allocator->lock);
127
128 /* Reclaim free list resources. */
129 list_for_each_entry_safe(pos,
130 n,
131 &allocator->free_list.list,
132 list) {
133 list_del(&pos->list);
134 kfree(pos);
135 }
136
137 mutex_unlock(&allocator->lock);
138
139 kfree(allocator);
140}
141
142unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
143 size_t size)
144{
145 struct memrar_address_ranges *pos = NULL;
146
147 size_t num_blocks;
148 unsigned long reserved_bytes;
149
150 /*
151 * Address of allocated buffer. We assume that zero is not a
152 * valid address.
153 */
154 unsigned long addr = 0;
155
156 if (allocator == NULL || size == 0)
157 return addr;
158
159 /* Reserve enough blocks to hold the amount of bytes requested. */
160 num_blocks = DIV_ROUND_UP(size, allocator->block_size);
161
162 reserved_bytes = num_blocks * allocator->block_size;
163
164 mutex_lock(&allocator->lock);
165
166 if (reserved_bytes > allocator->largest_free_area) {
167 mutex_unlock(&allocator->lock);
168 return addr;
169 }
170
171 /*
172 * Iterate through the free list to find a suitably sized
173 * range of free contiguous memory blocks.
174 *
175 * We also take the opportunity to reset the size of the
176 * largest free area size statistic.
177 */
178 list_for_each_entry(pos, &allocator->free_list.list, list) {
179 struct memrar_address_range * const fr = &pos->range;
180 size_t const curr_size = fr->end - fr->begin;
181
182 if (curr_size >= reserved_bytes && addr == 0) {
183 struct memrar_address_range *range = NULL;
184 struct memrar_address_ranges * const new_node =
185 kmalloc(sizeof(*new_node), GFP_KERNEL);
186
187 if (new_node == NULL)
188 break;
189
190 list_add(&new_node->list,
191 &allocator->allocated_list.list);
192
193 /*
194 * Carve out area of memory from end of free
195 * range.
196 */
197 range = &new_node->range;
198 range->end = fr->end;
199 fr->end -= reserved_bytes;
200 range->begin = fr->end;
201 addr = range->begin;
202
203 /*
204 * Check if largest area has decreased in
205 * size. We'll need to continue scanning for
206 * the next largest area if it has.
207 */
208 if (curr_size == allocator->largest_free_area)
209 allocator->largest_free_area -=
210 reserved_bytes;
211 else
212 break;
213 }
214
215 /*
216 * Reset largest free area size statistic as needed,
217 * but only if we've actually allocated memory.
218 */
219 if (addr != 0
220 && curr_size > allocator->largest_free_area) {
221 allocator->largest_free_area = curr_size;
222 break;
223 }
224 }
225
226 mutex_unlock(&allocator->lock);
227
228 return addr;
229}
230
231long memrar_allocator_free(struct memrar_allocator *allocator,
232 unsigned long addr)
233{
234 struct list_head *pos = NULL;
235 struct list_head *tmp = NULL;
236 struct list_head *dst = NULL;
237
238 struct memrar_address_ranges *allocated = NULL;
239 struct memrar_address_range const *handle = NULL;
240
241 unsigned long old_end = 0;
242 unsigned long new_chunk_size = 0;
243
244 if (allocator == NULL)
245 return -EINVAL;
246
247 if (addr == 0)
248 return 0; /* Ignore "free(0)". */
249
250 mutex_lock(&allocator->lock);
251
252 /* Find the corresponding handle. */
253 list_for_each_entry(allocated,
254 &allocator->allocated_list.list,
255 list) {
256 if (allocated->range.begin == addr) {
257 handle = &allocated->range;
258 break;
259 }
260 }
261
262 /* No such buffer created by this allocator. */
263 if (handle == NULL) {
264 mutex_unlock(&allocator->lock);
265 return -EFAULT;
266 }
267
268 /*
269 * Coalesce adjacent chunks of memory if possible.
270 *
271 * @note This isn't full blown coalescing since we're only
272 * coalescing at most three chunks of memory.
273 */
274 list_for_each_safe(pos, tmp, &allocator->free_list.list) {
275 /* @todo O(n) performance. Optimize. */
276
277 struct memrar_address_range * const chunk =
278 &list_entry(pos,
279 struct memrar_address_ranges,
280 list)->range;
281
282 /* Extend size of existing free adjacent chunk. */
283 if (chunk->end == handle->begin) {
284 /*
285 * Chunk "less than" than the one we're
286 * freeing is adjacent.
287 *
288 * Before:
289 *
290 * +-----+------+
291 * |chunk|handle|
292 * +-----+------+
293 *
294 * After:
295 *
296 * +------------+
297 * | chunk |
298 * +------------+
299 */
300
301 struct memrar_address_ranges const * const next =
302 list_entry(pos->next,
303 struct memrar_address_ranges,
304 list);
305
306 chunk->end = handle->end;
307
308 /*
309 * Now check if next free chunk is adjacent to
310 * the current extended free chunk.
311 *
312 * Before:
313 *
314 * +------------+----+
315 * | chunk |next|
316 * +------------+----+
317 *
318 * After:
319 *
320 * +-----------------+
321 * | chunk |
322 * +-----------------+
323 */
324 if (!list_is_singular(pos)
325 && chunk->end == next->range.begin) {
326 chunk->end = next->range.end;
327 list_del(pos->next);
328 kfree(next);
329 }
330
331 list_del(&allocated->list);
332
333 new_chunk_size = chunk->end - chunk->begin;
334
335 goto exit_memrar_free;
336
337 } else if (handle->end == chunk->begin) {
338 /*
339 * Chunk "greater than" than the one we're
340 * freeing is adjacent.
341 *
342 * +------+-----+
343 * |handle|chunk|
344 * +------+-----+
345 *
346 * After:
347 *
348 * +------------+
349 * | chunk |
350 * +------------+
351 */
352
353 struct memrar_address_ranges const * const prev =
354 list_entry(pos->prev,
355 struct memrar_address_ranges,
356 list);
357
358 chunk->begin = handle->begin;
359
360 /*
361 * Now check if previous free chunk is
362 * adjacent to the current extended free
363 * chunk.
364 *
365 *
366 * Before:
367 *
368 * +----+------------+
369 * |prev| chunk |
370 * +----+------------+
371 *
372 * After:
373 *
374 * +-----------------+
375 * | chunk |
376 * +-----------------+
377 */
378 if (!list_is_singular(pos)
379 && prev->range.end == chunk->begin) {
380 chunk->begin = prev->range.begin;
381 list_del(pos->prev);
382 kfree(prev);
383 }
384
385 list_del(&allocated->list);
386
387 new_chunk_size = chunk->end - chunk->begin;
388
389 goto exit_memrar_free;
390
391 } else if (chunk->end < handle->begin
392 && chunk->end > old_end) {
393 /* Keep track of where the entry could be
394 * potentially moved from the "allocated" list
395 * to the "free" list if coalescing doesn't
396 * occur, making sure the "free" list remains
397 * sorted.
398 */
399 old_end = chunk->end;
400 dst = pos;
401 }
402 }
403
404 /*
405 * Nothing to coalesce.
406 *
407 * Move the entry from the "allocated" list to the "free"
408 * list.
409 */
410 list_move(&allocated->list, dst);
411 new_chunk_size = handle->end - handle->begin;
412 allocated = NULL;
413
414exit_memrar_free:
415
416 if (new_chunk_size > allocator->largest_free_area)
417 allocator->largest_free_area = new_chunk_size;
418
419 mutex_unlock(&allocator->lock);
420
421 kfree(allocated);
422
423 return 0;
424}
425
426
427
428/*
429 Local Variables:
430 c-file-style: "linux"
431 End:
432*/
diff --git a/drivers/staging/memrar/memrar_allocator.h b/drivers/staging/memrar/memrar_allocator.h
deleted file mode 100644
index 0b80dead710..00000000000
--- a/drivers/staging/memrar/memrar_allocator.h
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * Copyright (C) 2010 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General
6 * Public License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be
9 * useful, but WITHOUT ANY WARRANTY; without even the implied
10 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
11 * PURPOSE. See the GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public
13 * License along with this program; if not, write to the Free
14 * Software Foundation, Inc., 59 Temple Place - Suite 330,
15 * Boston, MA 02111-1307, USA.
16 * The full GNU General Public License is included in this
17 * distribution in the file called COPYING.
18 */
19
20#ifndef MEMRAR_ALLOCATOR_H
21#define MEMRAR_ALLOCATOR_H
22
23
24#include <linux/mutex.h>
25#include <linux/list.h>
26#include <linux/types.h>
27#include <linux/kernel.h>
28
29
30/**
31 * struct memrar_address_range - struct that describes a memory range
32 * @begin: Beginning of available address range.
33 * @end: End of available address range, one past the end,
34 * i.e. [begin, end).
35 */
36struct memrar_address_range {
37/* private: internal use only */
38 unsigned long begin;
39 unsigned long end;
40};
41
42/**
43 * struct memrar_address_ranges - list of areas of memory.
44 * @list: Linked list of address ranges.
45 * @range: Memory address range corresponding to given list node.
46 */
47struct memrar_address_ranges {
48/* private: internal use only */
49 struct list_head list;
50 struct memrar_address_range range;
51};
52
53/**
54 * struct memrar_allocator - encapsulation of the memory allocator state
55 * @lock: Lock used to synchronize access to the memory
56 * allocator state.
57 * @base: Base (start) address of the allocator memory
58 * space.
59 * @capacity: Size of the allocator memory space in bytes.
60 * @block_size: The size in bytes of individual blocks within
61 * the allocator memory space.
62 * @largest_free_area: Largest free area of memory in the allocator
63 * in bytes.
64 * @allocated_list: List of allocated memory block address
65 * ranges.
66 * @free_list: List of free address ranges.
67 *
68 * This structure contains all memory allocator state, including the
69 * base address, capacity, free list, lock, etc.
70 */
71struct memrar_allocator {
72/* private: internal use only */
73 struct mutex lock;
74 unsigned long base;
75 size_t capacity;
76 size_t block_size;
77 size_t largest_free_area;
78 struct memrar_address_ranges allocated_list;
79 struct memrar_address_ranges free_list;
80};
81
82/**
83 * memrar_create_allocator() - create a memory allocator
84 * @base: Address at which the memory allocator begins.
85 * @capacity: Desired size of the memory allocator. This value must
86 * be larger than the block_size, ideally more than twice
87 * as large since there wouldn't be much point in using a
88 * memory allocator otherwise.
89 * @block_size: The size of individual blocks within the memory
90 * allocator. This value must smaller than the
91 * capacity.
92 *
93 * Create a memory allocator with the given capacity and block size.
94 * The capacity will be reduced to be a multiple of the block size, if
95 * necessary.
96 *
97 * Returns an instance of the memory allocator, if creation succeeds,
98 * otherwise zero if creation fails. Failure may occur if not enough
99 * kernel memory exists to create the memrar_allocator instance
100 * itself, or if the capacity and block_size arguments are not
101 * compatible or make sense.
102 */
103struct memrar_allocator *memrar_create_allocator(unsigned long base,
104 size_t capacity,
105 size_t block_size);
106
107/**
108 * memrar_destroy_allocator() - destroy allocator
109 * @allocator: The allocator being destroyed.
110 *
111 * Reclaim resources held by the memory allocator. The caller must
112 * explicitly free all memory reserved by memrar_allocator_alloc()
113 * prior to calling this function. Otherwise leaks will occur.
114 */
115void memrar_destroy_allocator(struct memrar_allocator *allocator);
116
117/**
118 * memrar_allocator_alloc() - reserve an area of memory of given size
119 * @allocator: The allocator instance being used to reserve buffer.
120 * @size: The size in bytes of the buffer to allocate.
121 *
122 * This functions reserves an area of memory managed by the given
123 * allocator. It returns zero if allocation was not possible.
124 * Failure may occur if the allocator no longer has space available.
125 */
126unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
127 size_t size);
128
129/**
130 * memrar_allocator_free() - release buffer starting at given address
131 * @allocator: The allocator instance being used to release the buffer.
132 * @address: The address of the buffer being released.
133 *
134 * Release an area of memory starting at the given address. Failure
135 * could occur if the given address is not in the address space
136 * managed by the allocator. Returns zero on success or an errno
137 * (negative value) on failure.
138 */
139long memrar_allocator_free(struct memrar_allocator *allocator,
140 unsigned long address);
141
142#endif /* MEMRAR_ALLOCATOR_H */
143
144
145/*
146 Local Variables:
147 c-file-style: "linux"
148 End:
149*/
diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c
deleted file mode 100644
index cfcaa8e5b8e..00000000000
--- a/drivers/staging/memrar/memrar_handler.c
+++ /dev/null
@@ -1,1007 +0,0 @@
1/*
2 * memrar_handler 1.0: An Intel restricted access region handler device
3 *
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
20 *
21 * -------------------------------------------------------------------
22 *
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
25 * devices.
26 *
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
30 *
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
35 *
36 * RAR memory itself is never accessed directly by the RAR
37 * handler.
38 */
39
40#include <linux/miscdevice.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <linux/kref.h>
44#include <linux/mutex.h>
45#include <linux/kernel.h>
46#include <linux/uaccess.h>
47#include <linux/mm.h>
48#include <linux/ioport.h>
49#include <linux/io.h>
50#include <linux/rar_register.h>
51
52#include "memrar.h"
53#include "memrar_allocator.h"
54
55
56#define MEMRAR_VER "1.0"
57
58/*
59 * Moorestown supports three restricted access regions.
60 *
61 * We only care about the first two, video and audio. The third,
62 * reserved for Chaabi and the P-unit, will be handled by their
63 * respective drivers.
64 */
65#define MRST_NUM_RAR 2
66
67/* ---------------- -------------------- ------------------- */
68
69/**
70 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
71 * @list: Linked list of memrar_buffer_info objects.
72 * @buffer: Core RAR buffer information.
73 * @refcount: Reference count.
74 * @owner: File handle corresponding to process that reserved the
75 * block of memory in RAR. This will be zero for buffers
76 * allocated by other drivers instead of by a user space
77 * process.
78 *
79 * This structure encapsulates a link list of RAR buffers, as well as
80 * other characteristics specific to a given list node, such as the
81 * reference count on the corresponding RAR buffer.
82 */
83struct memrar_buffer_info {
84 struct list_head list;
85 struct RAR_buffer buffer;
86 struct kref refcount;
87 struct file *owner;
88};
89
90/**
91 * struct memrar_rar_info - characteristics of a given RAR
92 * @base: Base bus address of the RAR.
93 * @length: Length of the RAR.
94 * @iobase: Virtual address of RAR mapped into kernel.
95 * @allocator: Allocator associated with the RAR. Note the allocator
96 * "capacity" may be smaller than the RAR length if the
97 * length is not a multiple of the configured allocator
98 * block size.
99 * @buffers: Table that keeps track of all reserved RAR buffers.
100 * @lock: Lock used to synchronize access to RAR-specific data
101 * structures.
102 *
103 * Each RAR has an associated memrar_rar_info structure that describes
104 * where in memory the RAR is located, how large it is, and a list of
105 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
106 * associated with it to reduce lock contention when operations on
107 * multiple RARs are performed in parallel.
108 */
109struct memrar_rar_info {
110 dma_addr_t base;
111 unsigned long length;
112 void __iomem *iobase;
113 struct memrar_allocator *allocator;
114 struct memrar_buffer_info buffers;
115 struct mutex lock;
116 int allocated; /* True if we own this RAR */
117};
118
119/*
120 * Array of RAR characteristics.
121 */
122static struct memrar_rar_info memrars[MRST_NUM_RAR];
123
124/* ---------------- -------------------- ------------------- */
125
126/* Validate RAR type. */
127static inline int memrar_is_valid_rar_type(u32 type)
128{
129 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
130}
131
132/* Check if an address/handle falls with the given RAR memory range. */
133static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
134 u32 vaddr)
135{
136 unsigned long const iobase = (unsigned long) (rar->iobase);
137 return (vaddr >= iobase && vaddr < iobase + rar->length);
138}
139
140/* Retrieve RAR information associated with the given handle. */
141static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
142{
143 int i;
144 for (i = 0; i < MRST_NUM_RAR; ++i) {
145 struct memrar_rar_info * const rar = &memrars[i];
146 if (memrar_handle_in_range(rar, vaddr))
147 return rar;
148 }
149
150 return NULL;
151}
152
153/**
154 * memrar_get_bus address - handle to bus address
155 *
156 * Retrieve bus address from given handle.
157 *
158 * Returns address corresponding to given handle. Zero if handle is
159 * invalid.
160 */
161static dma_addr_t memrar_get_bus_address(
162 struct memrar_rar_info *rar,
163 u32 vaddr)
164{
165 unsigned long const iobase = (unsigned long) (rar->iobase);
166
167 if (!memrar_handle_in_range(rar, vaddr))
168 return 0;
169
170 /*
171 * An assumption is made that the virtual address offset is
172 * the same as the bus address offset, at least based on the
173 * way this driver is implemented. For example, vaddr + 2 ==
174 * baddr + 2.
175 *
176 * @todo Is that a valid assumption?
177 */
178 return rar->base + (vaddr - iobase);
179}
180
181/**
182 * memrar_get_physical_address - handle to physical address
183 *
184 * Retrieve physical address from given handle.
185 *
186 * Returns address corresponding to given handle. Zero if handle is
187 * invalid.
188 */
189static dma_addr_t memrar_get_physical_address(
190 struct memrar_rar_info *rar,
191 u32 vaddr)
192{
193 /*
194 * @todo This assumes that the bus address and physical
195 * address are the same. That is true for Moorestown
196 * but not necessarily on other platforms. This
197 * deficiency should be addressed at some point.
198 */
199 return memrar_get_bus_address(rar, vaddr);
200}
201
202/**
203 * memrar_release_block - release a block to the pool
204 * @kref: kref of block
205 *
206 * Core block release code. A node has hit zero references so can
207 * be released and the lists must be updated.
208 *
209 * Note: This code removes the node from a list. Make sure any list
210 * iteration is performed using list_for_each_safe().
211 */
212static void memrar_release_block_i(struct kref *ref)
213{
214 /*
215 * Last reference is being released. Remove from the table,
216 * and reclaim resources.
217 */
218
219 struct memrar_buffer_info * const node =
220 container_of(ref, struct memrar_buffer_info, refcount);
221
222 struct RAR_block_info * const user_info =
223 &node->buffer.info;
224
225 struct memrar_allocator * const allocator =
226 memrars[user_info->type].allocator;
227
228 list_del(&node->list);
229
230 memrar_allocator_free(allocator, user_info->handle);
231
232 kfree(node);
233}
234
235/**
236 * memrar_init_rar_resources - configure a RAR
237 * @rarnum: rar that has been allocated
238 * @devname: name of our device
239 *
240 * Initialize RAR parameters, such as bus addresses, etc and make
241 * the resource accessible.
242 */
243static int memrar_init_rar_resources(int rarnum, char const *devname)
244{
245 /* ---- Sanity Checks ----
246 * 1. RAR bus addresses in both Lincroft and Langwell RAR
247 * registers should be the same.
248 * a. There's no way we can do this through IA.
249 *
250 * 2. Secure device ID in Langwell RAR registers should be set
251 * appropriately, e.g. only LPE DMA for the audio RAR, and
252 * security for the other Langwell based RAR registers.
253 * a. There's no way we can do this through IA.
254 *
255 * 3. Audio and video RAR registers and RAR access should be
256 * locked down. If not, enable RAR access control. Except
257 * for debugging purposes, there is no reason for them to
258 * be unlocked.
259 * a. We can only do this for the Lincroft (IA) side.
260 *
261 * @todo Should the RAR handler driver even be aware of audio
262 * and video RAR settings?
263 */
264
265 /*
266 * RAR buffer block size.
267 *
268 * We choose it to be the size of a page to simplify the
269 * /dev/memrar mmap() implementation and usage. Otherwise
270 * paging is not involved once an RAR is locked down.
271 */
272 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
273
274 dma_addr_t low, high;
275 struct memrar_rar_info * const rar = &memrars[rarnum];
276
277 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
278 BUG_ON(!memrar_is_valid_rar_type(rarnum));
279 BUG_ON(rar->allocated);
280
281 if (rar_get_address(rarnum, &low, &high) != 0)
282 /* No RAR is available. */
283 return -ENODEV;
284
285 if (low == 0 || high == 0) {
286 rar->base = 0;
287 rar->length = 0;
288 rar->iobase = NULL;
289 rar->allocator = NULL;
290 return -ENOSPC;
291 }
292
293 /*
294 * @todo Verify that LNC and LNW RAR register contents
295 * addresses, security, etc are compatible and
296 * consistent).
297 */
298
299 rar->length = high - low + 1;
300
301 /* Claim RAR memory as our own. */
302 if (request_mem_region(low, rar->length, devname) == NULL) {
303 rar->length = 0;
304 pr_err("%s: Unable to claim RAR[%d] memory.\n",
305 devname, rarnum);
306 pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
307 return -EBUSY;
308 }
309
310 rar->base = low;
311
312 /*
313 * Now map it into the kernel address space.
314 *
315 * Note that the RAR memory may only be accessed by IA
316 * when debugging. Otherwise attempts to access the
317 * RAR memory when it is locked down will result in
318 * behavior similar to writing to /dev/null and
319 * reading from /dev/zero. This behavior is enforced
320 * by the hardware. Even if we don't access the
321 * memory, mapping it into the kernel provides us with
322 * a convenient RAR handle to bus address mapping.
323 */
324 rar->iobase = ioremap_nocache(rar->base, rar->length);
325 if (rar->iobase == NULL) {
326 pr_err("%s: Unable to map RAR memory.\n", devname);
327 release_mem_region(low, rar->length);
328 return -ENOMEM;
329 }
330
331 /* Initialize corresponding memory allocator. */
332 rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
333 rar->length, RAR_BLOCK_SIZE);
334 if (rar->allocator == NULL) {
335 iounmap(rar->iobase);
336 release_mem_region(low, rar->length);
337 return -ENOMEM;
338 }
339
340 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
341 devname, rarnum, (unsigned long) low, (unsigned long) high);
342
343 pr_info("%s: BRAR[%d] size = %zu KiB\n",
344 devname, rarnum, rar->allocator->capacity / 1024);
345
346 rar->allocated = 1;
347 return 0;
348}
349
350/**
351 * memrar_fini_rar_resources - free up RAR resources
352 *
353 * Finalize RAR resources. Free up the resource tables, hand the memory
354 * back to the kernel, unmap the device and release the address space.
355 */
356static void memrar_fini_rar_resources(void)
357{
358 int z;
359 struct memrar_buffer_info *pos;
360 struct memrar_buffer_info *tmp;
361
362 /*
363 * @todo Do we need to hold a lock at this point in time?
364 * (module initialization failure or exit?)
365 */
366
367 for (z = MRST_NUM_RAR; z-- != 0; ) {
368 struct memrar_rar_info * const rar = &memrars[z];
369
370 if (!rar->allocated)
371 continue;
372
373 /* Clean up remaining resources. */
374
375 list_for_each_entry_safe(pos,
376 tmp,
377 &rar->buffers.list,
378 list) {
379 kref_put(&pos->refcount, memrar_release_block_i);
380 }
381
382 memrar_destroy_allocator(rar->allocator);
383 rar->allocator = NULL;
384
385 iounmap(rar->iobase);
386 release_mem_region(rar->base, rar->length);
387
388 rar->iobase = NULL;
389 rar->base = 0;
390 rar->length = 0;
391
392 unregister_rar(z);
393 }
394}
395
396/**
397 * memrar_reserve_block - handle an allocation request
398 * @request: block being requested
399 * @filp: owner it is tied to
400 *
401 * Allocate a block of the requested RAR. If successful return the
402 * request object filled in and zero, if not report an error code
403 */
404
405static long memrar_reserve_block(struct RAR_buffer *request,
406 struct file *filp)
407{
408 struct RAR_block_info * const rinfo = &request->info;
409 struct RAR_buffer *buffer;
410 struct memrar_buffer_info *buffer_info;
411 u32 handle;
412 struct memrar_rar_info *rar = NULL;
413
414 /* Prevent array overflow. */
415 if (!memrar_is_valid_rar_type(rinfo->type))
416 return -EINVAL;
417
418 rar = &memrars[rinfo->type];
419 if (!rar->allocated)
420 return -ENODEV;
421
422 /* Reserve memory in RAR. */
423 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
424 if (handle == 0)
425 return -ENOMEM;
426
427 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
428
429 if (buffer_info == NULL) {
430 memrar_allocator_free(rar->allocator, handle);
431 return -ENOMEM;
432 }
433
434 buffer = &buffer_info->buffer;
435 buffer->info.type = rinfo->type;
436 buffer->info.size = rinfo->size;
437
438 /* Memory handle corresponding to the bus address. */
439 buffer->info.handle = handle;
440 buffer->bus_address = memrar_get_bus_address(rar, handle);
441
442 /*
443 * Keep track of owner so that we can later cleanup if
444 * necessary.
445 */
446 buffer_info->owner = filp;
447
448 kref_init(&buffer_info->refcount);
449
450 mutex_lock(&rar->lock);
451 list_add(&buffer_info->list, &rar->buffers.list);
452 mutex_unlock(&rar->lock);
453
454 rinfo->handle = buffer->info.handle;
455 request->bus_address = buffer->bus_address;
456
457 return 0;
458}
459
460/**
461 * memrar_release_block - release a RAR block
462 * @addr: address in RAR space
463 *
464 * Release a previously allocated block. Releases act on complete
465 * blocks, partially freeing a block is not supported
466 */
467
468static long memrar_release_block(u32 addr)
469{
470 struct memrar_buffer_info *pos;
471 struct memrar_buffer_info *tmp;
472 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
473 long result = -EINVAL;
474
475 if (rar == NULL)
476 return -ENOENT;
477
478 mutex_lock(&rar->lock);
479
480 /*
481 * Iterate through the buffer list to find the corresponding
482 * buffer to be released.
483 */
484 list_for_each_entry_safe(pos,
485 tmp,
486 &rar->buffers.list,
487 list) {
488 struct RAR_block_info * const info =
489 &pos->buffer.info;
490
491 /*
492 * Take into account handle offsets that may have been
493 * added to the base handle, such as in the following
494 * scenario:
495 *
496 * u32 handle = base + offset;
497 * rar_handle_to_bus(handle);
498 * rar_release(handle);
499 */
500 if (addr >= info->handle
501 && addr < (info->handle + info->size)
502 && memrar_is_valid_rar_type(info->type)) {
503 kref_put(&pos->refcount, memrar_release_block_i);
504 result = 0;
505 break;
506 }
507 }
508
509 mutex_unlock(&rar->lock);
510
511 return result;
512}
513
514/**
515 * memrar_get_stats - read statistics for a RAR
516 * @r: statistics to be filled in
517 *
518 * Returns the statistics data for the RAR, or an error code if
519 * the request cannot be completed
520 */
521static long memrar_get_stat(struct RAR_stat *r)
522{
523 struct memrar_allocator *allocator;
524
525 if (!memrar_is_valid_rar_type(r->type))
526 return -EINVAL;
527
528 if (!memrars[r->type].allocated)
529 return -ENODEV;
530
531 allocator = memrars[r->type].allocator;
532
533 BUG_ON(allocator == NULL);
534
535 /*
536 * Allocator capacity doesn't change over time. No
537 * need to synchronize.
538 */
539 r->capacity = allocator->capacity;
540
541 mutex_lock(&allocator->lock);
542 r->largest_block_size = allocator->largest_free_area;
543 mutex_unlock(&allocator->lock);
544 return 0;
545}
546
547/**
548 * memrar_ioctl - ioctl callback
549 * @filp: file issuing the request
550 * @cmd: command
551 * @arg: pointer to control information
552 *
553 * Perform one of the ioctls supported by the memrar device
554 */
555
556static long memrar_ioctl(struct file *filp,
557 unsigned int cmd,
558 unsigned long arg)
559{
560 void __user *argp = (void __user *)arg;
561 long result = 0;
562
563 struct RAR_buffer buffer;
564 struct RAR_block_info * const request = &buffer.info;
565 struct RAR_stat rar_info;
566 u32 rar_handle;
567
568 switch (cmd) {
569 case RAR_HANDLER_RESERVE:
570 if (copy_from_user(request,
571 argp,
572 sizeof(*request)))
573 return -EFAULT;
574
575 result = memrar_reserve_block(&buffer, filp);
576 if (result != 0)
577 return result;
578
579 return copy_to_user(argp, request, sizeof(*request));
580
581 case RAR_HANDLER_RELEASE:
582 if (copy_from_user(&rar_handle,
583 argp,
584 sizeof(rar_handle)))
585 return -EFAULT;
586
587 return memrar_release_block(rar_handle);
588
589 case RAR_HANDLER_STAT:
590 if (copy_from_user(&rar_info,
591 argp,
592 sizeof(rar_info)))
593 return -EFAULT;
594
595 /*
596 * Populate the RAR_stat structure based on the RAR
597 * type given by the user
598 */
599 if (memrar_get_stat(&rar_info) != 0)
600 return -EINVAL;
601
602 /*
603 * @todo Do we need to verify destination pointer
604 * "argp" is non-zero? Is that already done by
605 * copy_to_user()?
606 */
607 return copy_to_user(argp,
608 &rar_info,
609 sizeof(rar_info)) ? -EFAULT : 0;
610
611 default:
612 return -ENOTTY;
613 }
614
615 return 0;
616}
617
618/**
619 * memrar_mmap - mmap helper for deubgging
620 * @filp: handle doing the mapping
621 * @vma: memory area
622 *
623 * Support the mmap operation on the RAR space for debugging systems
624 * when the memory is not locked down.
625 */
626
627static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
628{
629 /*
630 * This mmap() implementation is predominantly useful for
631 * debugging since the CPU will be prevented from accessing
632 * RAR memory by the hardware when RAR is properly locked
633 * down.
634 *
635 * In order for this implementation to be useful RAR memory
636 * must be not be locked down. However, we only want to do
637 * that when debugging. DO NOT leave RAR memory unlocked in a
638 * deployed device that utilizes RAR.
639 */
640
641 size_t const size = vma->vm_end - vma->vm_start;
642
643 /* Users pass the RAR handle as the mmap() offset parameter. */
644 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
645
646 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
647 unsigned long pfn;
648
649 /* Only allow priviledged apps to go poking around this way */
650 if (!capable(CAP_SYS_RAWIO))
651 return -EPERM;
652
653 /* Invalid RAR handle or size passed to mmap(). */
654 if (rar == NULL
655 || handle == 0
656 || size > (handle - (unsigned long) rar->iobase))
657 return -EINVAL;
658
659 /*
660 * Retrieve physical address corresponding to the RAR handle,
661 * and convert it to a page frame.
662 */
663 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
664
665
666 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
667 handle,
668 handle + size);
669
670 /*
671 * Map RAR memory into user space. This is really only useful
672 * for debugging purposes since the memory won't be
673 * accessible, i.e. reads return zero and writes are ignored,
674 * when RAR access control is enabled.
675 */
676 if (remap_pfn_range(vma,
677 vma->vm_start,
678 pfn,
679 size,
680 vma->vm_page_prot))
681 return -EAGAIN;
682
683 /* vma->vm_ops = &memrar_mem_ops; */
684
685 return 0;
686}
687
688/**
689 * memrar_open - device open method
690 * @inode: inode to open
691 * @filp: file handle
692 *
693 * As we support multiple arbitary opens there is no work to be done
694 * really.
695 */
696
697static int memrar_open(struct inode *inode, struct file *filp)
698{
699 nonseekable_open(inode, filp);
700 return 0;
701}
702
703/**
704 * memrar_release - close method for miscev
705 * @inode: inode of device
706 * @filp: handle that is going away
707 *
708 * Free up all the regions that belong to this file handle. We use
709 * the handle as a natural Linux style 'lifetime' indicator and to
710 * ensure resources are not leaked when their owner explodes in an
711 * unplanned fashion.
712 */
713
714static int memrar_release(struct inode *inode, struct file *filp)
715{
716 /* Free all regions associated with the given file handle. */
717
718 struct memrar_buffer_info *pos;
719 struct memrar_buffer_info *tmp;
720 int z;
721
722 for (z = 0; z != MRST_NUM_RAR; ++z) {
723 struct memrar_rar_info * const rar = &memrars[z];
724
725 mutex_lock(&rar->lock);
726
727 list_for_each_entry_safe(pos,
728 tmp,
729 &rar->buffers.list,
730 list) {
731 if (filp == pos->owner)
732 kref_put(&pos->refcount,
733 memrar_release_block_i);
734 }
735
736 mutex_unlock(&rar->lock);
737 }
738
739 return 0;
740}
741
742/**
743 * rar_reserve - reserve RAR memory
744 * @buffers: buffers to reserve
745 * @count: number wanted
746 *
747 * Reserve a series of buffers in the RAR space. Returns the number of
748 * buffers successfully allocated
749 */
750
751size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
752{
753 struct RAR_buffer * const end =
754 (buffers == NULL ? buffers : buffers + count);
755 struct RAR_buffer *i;
756
757 size_t reserve_count = 0;
758
759 for (i = buffers; i != end; ++i) {
760 if (memrar_reserve_block(i, NULL) == 0)
761 ++reserve_count;
762 else
763 i->bus_address = 0;
764 }
765
766 return reserve_count;
767}
768EXPORT_SYMBOL(rar_reserve);
769
770/**
771 * rar_release - return RAR buffers
772 * @buffers: buffers to release
773 * @size: size of released block
774 *
775 * Return a set of buffers to the RAR pool
776 */
777
778size_t rar_release(struct RAR_buffer *buffers, size_t count)
779{
780 struct RAR_buffer * const end =
781 (buffers == NULL ? buffers : buffers + count);
782 struct RAR_buffer *i;
783
784 size_t release_count = 0;
785
786 for (i = buffers; i != end; ++i) {
787 u32 * const handle = &i->info.handle;
788 if (memrar_release_block(*handle) == 0) {
789 /*
790 * @todo We assume we should do this each time
791 * the ref count is decremented. Should
792 * we instead only do this when the ref
793 * count has dropped to zero, and the
794 * buffer has been completely
795 * released/unmapped?
796 */
797 *handle = 0;
798 ++release_count;
799 }
800 }
801
802 return release_count;
803}
804EXPORT_SYMBOL(rar_release);
805
806/**
807 * rar_handle_to_bus - RAR to bus address
808 * @buffers: RAR buffer structure
809 * @count: number of buffers to convert
810 *
811 * Turn a list of RAR handle mappings into actual bus addresses. Note
812 * that when the device is locked down the bus addresses in question
813 * are not CPU accessible.
814 */
815
816size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
817{
818 struct RAR_buffer * const end =
819 (buffers == NULL ? buffers : buffers + count);
820 struct RAR_buffer *i;
821 struct memrar_buffer_info *pos;
822
823 size_t conversion_count = 0;
824
825 /*
826 * Find all bus addresses corresponding to the given handles.
827 *
828 * @todo Not liking this nested loop. Optimize.
829 */
830 for (i = buffers; i != end; ++i) {
831 struct memrar_rar_info * const rar =
832 memrar_get_rar_info(i->info.handle);
833
834 /*
835 * Check if we have a bogus handle, and then continue
836 * with remaining buffers.
837 */
838 if (rar == NULL) {
839 i->bus_address = 0;
840 continue;
841 }
842
843 mutex_lock(&rar->lock);
844
845 list_for_each_entry(pos, &rar->buffers.list, list) {
846 struct RAR_block_info * const user_info =
847 &pos->buffer.info;
848
849 /*
850 * Take into account handle offsets that may
851 * have been added to the base handle, such as
852 * in the following scenario:
853 *
854 * u32 handle = base + offset;
855 * rar_handle_to_bus(handle);
856 */
857
858 if (i->info.handle >= user_info->handle
859 && i->info.handle < (user_info->handle
860 + user_info->size)) {
861 u32 const offset =
862 i->info.handle - user_info->handle;
863
864 i->info.type = user_info->type;
865 i->info.size = user_info->size - offset;
866 i->bus_address =
867 pos->buffer.bus_address
868 + offset;
869
870 /* Increment the reference count. */
871 kref_get(&pos->refcount);
872
873 ++conversion_count;
874 break;
875 } else {
876 i->bus_address = 0;
877 }
878 }
879
880 mutex_unlock(&rar->lock);
881 }
882
883 return conversion_count;
884}
885EXPORT_SYMBOL(rar_handle_to_bus);
886
887static const struct file_operations memrar_fops = {
888 .owner = THIS_MODULE,
889 .unlocked_ioctl = memrar_ioctl,
890 .mmap = memrar_mmap,
891 .open = memrar_open,
892 .release = memrar_release,
893 .llseek = no_llseek,
894};
895
896static struct miscdevice memrar_miscdev = {
897 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
898 .name = "memrar", /* /dev/memrar */
899 .fops = &memrar_fops
900};
901
902static char const banner[] __initdata =
903 KERN_INFO
904 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
905
906/**
907 * memrar_registration_callback - RAR obtained
908 * @rar: RAR number
909 *
910 * We have been granted ownership of the RAR. Add it to our memory
911 * management tables
912 */
913
914static int memrar_registration_callback(unsigned long rar)
915{
916 /*
917 * We initialize the RAR parameters early on so that we can
918 * discontinue memrar device initialization and registration
919 * if suitably configured RARs are not available.
920 */
921 return memrar_init_rar_resources(rar, memrar_miscdev.name);
922}
923
924/**
925 * memrar_init - initialise RAR support
926 *
927 * Initialise support for RAR handlers. This may get loaded before
928 * the RAR support is activated, but the callbacks on the registration
929 * will handle that situation for us anyway.
930 */
931
932static int __init memrar_init(void)
933{
934 int err;
935 int i;
936
937 printk(banner);
938
939 /*
940 * Some delayed initialization is performed in this driver.
941 * Make sure resources that are used during driver clean-up
942 * (e.g. during driver's release() function) are fully
943 * initialized before first use. This is particularly
944 * important for the case when the delayed initialization
945 * isn't completed, leaving behind a partially initialized
946 * driver.
947 *
948 * Such a scenario can occur when RAR is not available on the
949 * platform, and the driver is release()d.
950 */
951 for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
952 struct memrar_rar_info * const rar = &memrars[i];
953 mutex_init(&rar->lock);
954 INIT_LIST_HEAD(&rar->buffers.list);
955 }
956
957 err = misc_register(&memrar_miscdev);
958 if (err)
959 return err;
960
961 /* Now claim the two RARs we want */
962 err = register_rar(0, memrar_registration_callback, 0);
963 if (err)
964 goto fail;
965
966 err = register_rar(1, memrar_registration_callback, 1);
967 if (err == 0)
968 return 0;
969
970 /* It is possible rar 0 registered and allocated resources then rar 1
971 failed so do a full resource free */
972 memrar_fini_rar_resources();
973fail:
974 misc_deregister(&memrar_miscdev);
975 return err;
976}
977
978/**
979 * memrar_exit - unregister and unload
980 *
981 * Unregister the device and then unload any mappings and release
982 * the RAR resources
983 */
984
985static void __exit memrar_exit(void)
986{
987 misc_deregister(&memrar_miscdev);
988 memrar_fini_rar_resources();
989}
990
991
992module_init(memrar_init);
993module_exit(memrar_exit);
994
995
996MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
997MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
998MODULE_LICENSE("GPL");
999MODULE_VERSION(MEMRAR_VER);
1000
1001
1002
1003/*
1004 Local Variables:
1005 c-file-style: "linux"
1006 End:
1007*/