diff options
author | Cliff Wickman <cpw@sgi.com> | 2007-09-19 01:46:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-19 14:24:17 -0400 |
commit | 4191ba26dae8338892e73f6e67bd18068b4344e9 (patch) | |
tree | 150cbb20ba7b2c1e73b378df22c8179425ace848 /drivers/char/mspec.c | |
parent | 49cc886aea1d79cdb0ea409554866238b07fe26f (diff) |
mspec: handle shrinking virtual memory areas
The shrinking of a virtual memory area that is mmap(2)'d to a memory
special file (device drivers/char/mspec.c) can cause a panic.
If the mapped size of the vma (vm_area_struct) is very large, mspec allocates
a large vma_data structure with vmalloc(). But such a vma can be shrunk by
an munmap(2). The current driver uses the current size of each vma to
deduce whether its vma_data structure was allocated by kmalloc() or vmalloc().
So if the vma was shrunk it appears to have been allocated by kmalloc(),
and mspec attempts to free it with kfree(). This results in a panic.
This patch avoids the panic (by preserving the type of the allocation) and
also makes mspec work correctly as the vma is split into pieces by the
munmap(2)'s.
All vma's derived from such a split vma share the same vma_data structure that
represents all the pages mapped into this set of vma's. The mpec driver
must be made capable of using the right portion of the structure for each
member vma. In other words, it must index into the array of page addresses
using the portion of the array that represents the current vma. This is
enabled by storing the vma group's vm_start in the vma_data structure.
The shared vma_data's are not protected by mm->mmap_sem in the fork() case
so the reference count is left as atomic_t.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Acked-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char/mspec.c')
-rw-r--r-- | drivers/char/mspec.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index c08a4152ee8f..049a46cc9f87 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c | |||
@@ -67,7 +67,7 @@ | |||
67 | /* | 67 | /* |
68 | * Page types allocated by the device. | 68 | * Page types allocated by the device. |
69 | */ | 69 | */ |
70 | enum { | 70 | enum mspec_page_type { |
71 | MSPEC_FETCHOP = 1, | 71 | MSPEC_FETCHOP = 1, |
72 | MSPEC_CACHED, | 72 | MSPEC_CACHED, |
73 | MSPEC_UNCACHED | 73 | MSPEC_UNCACHED |
@@ -83,15 +83,25 @@ static int is_sn2; | |||
83 | * One of these structures is allocated when an mspec region is mmaped. The | 83 | * One of these structures is allocated when an mspec region is mmaped. The |
84 | * structure is pointed to by the vma->vm_private_data field in the vma struct. | 84 | * structure is pointed to by the vma->vm_private_data field in the vma struct. |
85 | * This structure is used to record the addresses of the mspec pages. | 85 | * This structure is used to record the addresses of the mspec pages. |
86 | * This structure is shared by all vma's that are split off from the | ||
87 | * original vma when split_vma()'s are done. | ||
88 | * | ||
89 | * The refcnt is incremented atomically because mm->mmap_sem does not | ||
90 | * protect in fork case where multiple tasks share the vma_data. | ||
86 | */ | 91 | */ |
87 | struct vma_data { | 92 | struct vma_data { |
88 | atomic_t refcnt; /* Number of vmas sharing the data. */ | 93 | atomic_t refcnt; /* Number of vmas sharing the data. */ |
89 | spinlock_t lock; /* Serialize access to the vma. */ | 94 | spinlock_t lock; /* Serialize access to this structure. */ |
90 | int count; /* Number of pages allocated. */ | 95 | int count; /* Number of pages allocated. */ |
91 | int type; /* Type of pages allocated. */ | 96 | enum mspec_page_type type; /* Type of pages allocated. */ |
97 | int flags; /* See VMD_xxx below. */ | ||
98 | unsigned long vm_start; /* Original (unsplit) base. */ | ||
99 | unsigned long vm_end; /* Original (unsplit) end. */ | ||
92 | unsigned long maddr[0]; /* Array of MSPEC addresses. */ | 100 | unsigned long maddr[0]; /* Array of MSPEC addresses. */ |
93 | }; | 101 | }; |
94 | 102 | ||
103 | #define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ | ||
104 | |||
95 | /* used on shub2 to clear FOP cache in the HUB */ | 105 | /* used on shub2 to clear FOP cache in the HUB */ |
96 | static unsigned long scratch_page[MAX_NUMNODES]; | 106 | static unsigned long scratch_page[MAX_NUMNODES]; |
97 | #define SH2_AMO_CACHE_ENTRIES 4 | 107 | #define SH2_AMO_CACHE_ENTRIES 4 |
@@ -129,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len) | |||
129 | * mspec_open | 139 | * mspec_open |
130 | * | 140 | * |
131 | * Called when a device mapping is created by a means other than mmap | 141 | * Called when a device mapping is created by a means other than mmap |
132 | * (via fork, etc.). Increments the reference count on the underlying | 142 | * (via fork, munmap, etc.). Increments the reference count on the |
133 | * mspec data so it is not freed prematurely. | 143 | * underlying mspec data so it is not freed prematurely. |
134 | */ | 144 | */ |
135 | static void | 145 | static void |
136 | mspec_open(struct vm_area_struct *vma) | 146 | mspec_open(struct vm_area_struct *vma) |
@@ -151,34 +161,44 @@ static void | |||
151 | mspec_close(struct vm_area_struct *vma) | 161 | mspec_close(struct vm_area_struct *vma) |
152 | { | 162 | { |
153 | struct vma_data *vdata; | 163 | struct vma_data *vdata; |
154 | int i, pages, result, vdata_size; | 164 | int index, last_index, result; |
165 | unsigned long my_page; | ||
155 | 166 | ||
156 | vdata = vma->vm_private_data; | 167 | vdata = vma->vm_private_data; |
157 | if (!atomic_dec_and_test(&vdata->refcnt)) | ||
158 | return; | ||
159 | 168 | ||
160 | pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 169 | BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); |
161 | vdata_size = sizeof(struct vma_data) + pages * sizeof(long); | 170 | |
162 | for (i = 0; i < pages; i++) { | 171 | spin_lock(&vdata->lock); |
163 | if (vdata->maddr[i] == 0) | 172 | index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; |
173 | last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT; | ||
174 | for (; index < last_index; index++) { | ||
175 | if (vdata->maddr[index] == 0) | ||
164 | continue; | 176 | continue; |
165 | /* | 177 | /* |
166 | * Clear the page before sticking it back | 178 | * Clear the page before sticking it back |
167 | * into the pool. | 179 | * into the pool. |
168 | */ | 180 | */ |
169 | result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE); | 181 | my_page = vdata->maddr[index]; |
182 | vdata->maddr[index] = 0; | ||
183 | spin_unlock(&vdata->lock); | ||
184 | result = mspec_zero_block(my_page, PAGE_SIZE); | ||
170 | if (!result) | 185 | if (!result) |
171 | uncached_free_page(vdata->maddr[i]); | 186 | uncached_free_page(my_page); |
172 | else | 187 | else |
173 | printk(KERN_WARNING "mspec_close(): " | 188 | printk(KERN_WARNING "mspec_close(): " |
174 | "failed to zero page %i\n", | 189 | "failed to zero page %i\n", |
175 | result); | 190 | result); |
191 | spin_lock(&vdata->lock); | ||
176 | } | 192 | } |
193 | spin_unlock(&vdata->lock); | ||
177 | 194 | ||
178 | if (vdata_size <= PAGE_SIZE) | 195 | if (!atomic_dec_and_test(&vdata->refcnt)) |
179 | kfree(vdata); | 196 | return; |
180 | else | 197 | |
198 | if (vdata->flags & VMD_VMALLOCED) | ||
181 | vfree(vdata); | 199 | vfree(vdata); |
200 | else | ||
201 | kfree(vdata); | ||
182 | } | 202 | } |
183 | 203 | ||
184 | 204 | ||
@@ -195,7 +215,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address) | |||
195 | int index; | 215 | int index; |
196 | struct vma_data *vdata = vma->vm_private_data; | 216 | struct vma_data *vdata = vma->vm_private_data; |
197 | 217 | ||
198 | index = (address - vma->vm_start) >> PAGE_SHIFT; | 218 | BUG_ON(address < vdata->vm_start || address >= vdata->vm_end); |
219 | index = (address - vdata->vm_start) >> PAGE_SHIFT; | ||
199 | maddr = (volatile unsigned long) vdata->maddr[index]; | 220 | maddr = (volatile unsigned long) vdata->maddr[index]; |
200 | if (maddr == 0) { | 221 | if (maddr == 0) { |
201 | maddr = uncached_alloc_page(numa_node_id()); | 222 | maddr = uncached_alloc_page(numa_node_id()); |
@@ -237,10 +258,11 @@ static struct vm_operations_struct mspec_vm_ops = { | |||
237 | * underlying pages. | 258 | * underlying pages. |
238 | */ | 259 | */ |
239 | static int | 260 | static int |
240 | mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) | 261 | mspec_mmap(struct file *file, struct vm_area_struct *vma, |
262 | enum mspec_page_type type) | ||
241 | { | 263 | { |
242 | struct vma_data *vdata; | 264 | struct vma_data *vdata; |
243 | int pages, vdata_size; | 265 | int pages, vdata_size, flags = 0; |
244 | 266 | ||
245 | if (vma->vm_pgoff != 0) | 267 | if (vma->vm_pgoff != 0) |
246 | return -EINVAL; | 268 | return -EINVAL; |
@@ -255,12 +277,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) | |||
255 | vdata_size = sizeof(struct vma_data) + pages * sizeof(long); | 277 | vdata_size = sizeof(struct vma_data) + pages * sizeof(long); |
256 | if (vdata_size <= PAGE_SIZE) | 278 | if (vdata_size <= PAGE_SIZE) |
257 | vdata = kmalloc(vdata_size, GFP_KERNEL); | 279 | vdata = kmalloc(vdata_size, GFP_KERNEL); |
258 | else | 280 | else { |
259 | vdata = vmalloc(vdata_size); | 281 | vdata = vmalloc(vdata_size); |
282 | flags = VMD_VMALLOCED; | ||
283 | } | ||
260 | if (!vdata) | 284 | if (!vdata) |
261 | return -ENOMEM; | 285 | return -ENOMEM; |
262 | memset(vdata, 0, vdata_size); | 286 | memset(vdata, 0, vdata_size); |
263 | 287 | ||
288 | vdata->vm_start = vma->vm_start; | ||
289 | vdata->vm_end = vma->vm_end; | ||
290 | vdata->flags = flags; | ||
264 | vdata->type = type; | 291 | vdata->type = type; |
265 | spin_lock_init(&vdata->lock); | 292 | spin_lock_init(&vdata->lock); |
266 | vdata->refcnt = ATOMIC_INIT(1); | 293 | vdata->refcnt = ATOMIC_INIT(1); |