aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/mspec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/mspec.c')
-rw-r--r--drivers/char/mspec.c65
1 files changed, 41 insertions, 24 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index c08a4152ee8f..04ac155d3a07 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -67,7 +67,7 @@
67/* 67/*
68 * Page types allocated by the device. 68 * Page types allocated by the device.
69 */ 69 */
70enum { 70enum mspec_page_type {
71 MSPEC_FETCHOP = 1, 71 MSPEC_FETCHOP = 1,
72 MSPEC_CACHED, 72 MSPEC_CACHED,
73 MSPEC_UNCACHED 73 MSPEC_UNCACHED
@@ -83,15 +83,25 @@ static int is_sn2;
83 * One of these structures is allocated when an mspec region is mmaped. The 83 * One of these structures is allocated when an mspec region is mmaped. The
84 * structure is pointed to by the vma->vm_private_data field in the vma struct. 84 * structure is pointed to by the vma->vm_private_data field in the vma struct.
85 * This structure is used to record the addresses of the mspec pages. 85 * This structure is used to record the addresses of the mspec pages.
86 * This structure is shared by all vma's that are split off from the
87 * original vma when split_vma()'s are done.
88 *
89 * The refcnt is incremented atomically because mm->mmap_sem does not
90 * protect in fork case where multiple tasks share the vma_data.
86 */ 91 */
87struct vma_data { 92struct vma_data {
88 atomic_t refcnt; /* Number of vmas sharing the data. */ 93 atomic_t refcnt; /* Number of vmas sharing the data. */
89 spinlock_t lock; /* Serialize access to the vma. */ 94 spinlock_t lock; /* Serialize access to this structure. */
90 int count; /* Number of pages allocated. */ 95 int count; /* Number of pages allocated. */
91 int type; /* Type of pages allocated. */ 96 enum mspec_page_type type; /* Type of pages allocated. */
97 int flags; /* See VMD_xxx below. */
98 unsigned long vm_start; /* Original (unsplit) base. */
99 unsigned long vm_end; /* Original (unsplit) end. */
92 unsigned long maddr[0]; /* Array of MSPEC addresses. */ 100 unsigned long maddr[0]; /* Array of MSPEC addresses. */
93}; 101};
94 102
103#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
104
95/* used on shub2 to clear FOP cache in the HUB */ 105/* used on shub2 to clear FOP cache in the HUB */
96static unsigned long scratch_page[MAX_NUMNODES]; 106static unsigned long scratch_page[MAX_NUMNODES];
97#define SH2_AMO_CACHE_ENTRIES 4 107#define SH2_AMO_CACHE_ENTRIES 4
@@ -129,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len)
129 * mspec_open 139 * mspec_open
130 * 140 *
131 * Called when a device mapping is created by a means other than mmap 141 * Called when a device mapping is created by a means other than mmap
132 * (via fork, etc.). Increments the reference count on the underlying 142 * (via fork, munmap, etc.). Increments the reference count on the
133 * mspec data so it is not freed prematurely. 143 * underlying mspec data so it is not freed prematurely.
134 */ 144 */
135static void 145static void
136mspec_open(struct vm_area_struct *vma) 146mspec_open(struct vm_area_struct *vma)
@@ -145,43 +155,43 @@ mspec_open(struct vm_area_struct *vma)
145 * mspec_close 155 * mspec_close
146 * 156 *
147 * Called when unmapping a device mapping. Frees all mspec pages 157 * Called when unmapping a device mapping. Frees all mspec pages
148 * belonging to the vma. 158 * belonging to all the vma's sharing this vma_data structure.
149 */ 159 */
150static void 160static void
151mspec_close(struct vm_area_struct *vma) 161mspec_close(struct vm_area_struct *vma)
152{ 162{
153 struct vma_data *vdata; 163 struct vma_data *vdata;
154 int i, pages, result, vdata_size; 164 int index, last_index;
165 unsigned long my_page;
155 166
156 vdata = vma->vm_private_data; 167 vdata = vma->vm_private_data;
168
157 if (!atomic_dec_and_test(&vdata->refcnt)) 169 if (!atomic_dec_and_test(&vdata->refcnt))
158 return; 170 return;
159 171
160 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 172 last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
161 vdata_size = sizeof(struct vma_data) + pages * sizeof(long); 173 for (index = 0; index < last_index; index++) {
162 for (i = 0; i < pages; i++) { 174 if (vdata->maddr[index] == 0)
163 if (vdata->maddr[i] == 0)
164 continue; 175 continue;
165 /* 176 /*
166 * Clear the page before sticking it back 177 * Clear the page before sticking it back
167 * into the pool. 178 * into the pool.
168 */ 179 */
169 result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE); 180 my_page = vdata->maddr[index];
170 if (!result) 181 vdata->maddr[index] = 0;
171 uncached_free_page(vdata->maddr[i]); 182 if (!mspec_zero_block(my_page, PAGE_SIZE))
183 uncached_free_page(my_page);
172 else 184 else
173 printk(KERN_WARNING "mspec_close(): " 185 printk(KERN_WARNING "mspec_close(): "
174 "failed to zero page %i\n", 186 "failed to zero page %ld\n", my_page);
175 result);
176 } 187 }
177 188
178 if (vdata_size <= PAGE_SIZE) 189 if (vdata->flags & VMD_VMALLOCED)
179 kfree(vdata);
180 else
181 vfree(vdata); 190 vfree(vdata);
191 else
192 kfree(vdata);
182} 193}
183 194
184
185/* 195/*
186 * mspec_nopfn 196 * mspec_nopfn
187 * 197 *
@@ -195,7 +205,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
195 int index; 205 int index;
196 struct vma_data *vdata = vma->vm_private_data; 206 struct vma_data *vdata = vma->vm_private_data;
197 207
198 index = (address - vma->vm_start) >> PAGE_SHIFT; 208 BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
209 index = (address - vdata->vm_start) >> PAGE_SHIFT;
199 maddr = (volatile unsigned long) vdata->maddr[index]; 210 maddr = (volatile unsigned long) vdata->maddr[index];
200 if (maddr == 0) { 211 if (maddr == 0) {
201 maddr = uncached_alloc_page(numa_node_id()); 212 maddr = uncached_alloc_page(numa_node_id());
@@ -237,10 +248,11 @@ static struct vm_operations_struct mspec_vm_ops = {
237 * underlying pages. 248 * underlying pages.
238 */ 249 */
239static int 250static int
240mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) 251mspec_mmap(struct file *file, struct vm_area_struct *vma,
252 enum mspec_page_type type)
241{ 253{
242 struct vma_data *vdata; 254 struct vma_data *vdata;
243 int pages, vdata_size; 255 int pages, vdata_size, flags = 0;
244 256
245 if (vma->vm_pgoff != 0) 257 if (vma->vm_pgoff != 0)
246 return -EINVAL; 258 return -EINVAL;
@@ -255,12 +267,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
255 vdata_size = sizeof(struct vma_data) + pages * sizeof(long); 267 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
256 if (vdata_size <= PAGE_SIZE) 268 if (vdata_size <= PAGE_SIZE)
257 vdata = kmalloc(vdata_size, GFP_KERNEL); 269 vdata = kmalloc(vdata_size, GFP_KERNEL);
258 else 270 else {
259 vdata = vmalloc(vdata_size); 271 vdata = vmalloc(vdata_size);
272 flags = VMD_VMALLOCED;
273 }
260 if (!vdata) 274 if (!vdata)
261 return -ENOMEM; 275 return -ENOMEM;
262 memset(vdata, 0, vdata_size); 276 memset(vdata, 0, vdata_size);
263 277
278 vdata->vm_start = vma->vm_start;
279 vdata->vm_end = vma->vm_end;
280 vdata->flags = flags;
264 vdata->type = type; 281 vdata->type = type;
265 spin_lock_init(&vdata->lock); 282 spin_lock_init(&vdata->lock);
266 vdata->refcnt = ATOMIC_INIT(1); 283 vdata->refcnt = ATOMIC_INIT(1);