aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-30 10:04:25 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-08-26 04:09:02 -0400
commit527e30b41d8b86e9ae7f5b740de416958c0e574e (patch)
treea19053eaa5ab6bb2f705006a0f7b5b90bb4a1a0b /arch/s390/mm/pgtable.c
parent6e0a0431bf7d90ed0b8a0a974ad219617a70cc22 (diff)
KVM: s390/mm: use radix trees for guest to host mappings
Store the target address for the gmap segments in a radix tree instead of using invalid segment table entries. gmap_translate becomes a simple radix_tree_lookup, gmap_fault is split into the address translation with gmap_translate and the part that does the linking of the gmap shadow page table with the process page table. A second radix tree is used to keep the pointers to the segment table entries for segments that are mapped in the guest address space. On unmap of a segment the pointer is retrieved from the radix tree and is used to carry out the segment invalidation in the gmap shadow page table. As the radix tree can only store one pointer, each host segment may only be mapped to exactly one guest location. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c621
1 files changed, 270 insertions, 351 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 16ca8617f2e1..74dfd9eaa300 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -158,17 +158,23 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
158 if (!gmap) 158 if (!gmap)
159 goto out; 159 goto out;
160 INIT_LIST_HEAD(&gmap->crst_list); 160 INIT_LIST_HEAD(&gmap->crst_list);
161 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
162 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
163 spin_lock_init(&gmap->guest_table_lock);
161 gmap->mm = mm; 164 gmap->mm = mm;
162 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 165 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
163 if (!page) 166 if (!page)
164 goto out_free; 167 goto out_free;
168 page->index = 0;
165 list_add(&page->lru, &gmap->crst_list); 169 list_add(&page->lru, &gmap->crst_list);
166 table = (unsigned long *) page_to_phys(page); 170 table = (unsigned long *) page_to_phys(page);
167 crst_table_init(table, _REGION1_ENTRY_EMPTY); 171 crst_table_init(table, _REGION1_ENTRY_EMPTY);
168 gmap->table = table; 172 gmap->table = table;
169 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | 173 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
170 _ASCE_USER_BITS | __pa(table); 174 _ASCE_USER_BITS | __pa(table);
175 down_write(&mm->mmap_sem);
171 list_add(&gmap->list, &mm->context.gmap_list); 176 list_add(&gmap->list, &mm->context.gmap_list);
177 up_write(&mm->mmap_sem);
172 return gmap; 178 return gmap;
173 179
174out_free: 180out_free:
@@ -178,27 +184,6 @@ out:
178} 184}
179EXPORT_SYMBOL_GPL(gmap_alloc); 185EXPORT_SYMBOL_GPL(gmap_alloc);
180 186
181static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
182{
183 struct gmap_pgtable *mp;
184 struct gmap_rmap *rmap;
185 struct page *page;
186
187 if (*table & _SEGMENT_ENTRY_INVALID)
188 return 0;
189 page = pfn_to_page(*table >> PAGE_SHIFT);
190 mp = (struct gmap_pgtable *) page->index;
191 list_for_each_entry(rmap, &mp->mapper, list) {
192 if (rmap->entry != table)
193 continue;
194 list_del(&rmap->list);
195 kfree(rmap);
196 break;
197 }
198 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
199 return 1;
200}
201
202static void gmap_flush_tlb(struct gmap *gmap) 187static void gmap_flush_tlb(struct gmap *gmap)
203{ 188{
204 if (MACHINE_HAS_IDTE) 189 if (MACHINE_HAS_IDTE)
@@ -208,6 +193,30 @@ static void gmap_flush_tlb(struct gmap *gmap)
208 __tlb_flush_global(); 193 __tlb_flush_global();
209} 194}
210 195
196static void gmap_radix_tree_free(struct radix_tree_root *root)
197{
198 struct radix_tree_iter iter;
199 unsigned long indices[16];
200 unsigned long index;
201 void **slot;
202 int i, nr;
203
204 /* A radix tree is freed by deleting all of its entries */
205 index = 0;
206 do {
207 nr = 0;
208 radix_tree_for_each_slot(slot, root, &iter, index) {
209 indices[nr] = iter.index;
210 if (++nr == 16)
211 break;
212 }
213 for (i = 0; i < nr; i++) {
214 index = indices[i];
215 radix_tree_delete(root, index);
216 }
217 } while (nr > 0);
218}
219
211/** 220/**
212 * gmap_free - free a guest address space 221 * gmap_free - free a guest address space
213 * @gmap: pointer to the guest address space structure 222 * @gmap: pointer to the guest address space structure
@@ -215,9 +224,6 @@ static void gmap_flush_tlb(struct gmap *gmap)
215void gmap_free(struct gmap *gmap) 224void gmap_free(struct gmap *gmap)
216{ 225{
217 struct page *page, *next; 226 struct page *page, *next;
218 unsigned long *table;
219 int i;
220
221 227
222 /* Flush tlb. */ 228 /* Flush tlb. */
223 if (MACHINE_HAS_IDTE) 229 if (MACHINE_HAS_IDTE)
@@ -227,19 +233,13 @@ void gmap_free(struct gmap *gmap)
227 __tlb_flush_global(); 233 __tlb_flush_global();
228 234
229 /* Free all segment & region tables. */ 235 /* Free all segment & region tables. */
230 down_read(&gmap->mm->mmap_sem); 236 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
231 spin_lock(&gmap->mm->page_table_lock);
232 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
233 table = (unsigned long *) page_to_phys(page);
234 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
235 /* Remove gmap rmap structures for segment table. */
236 for (i = 0; i < PTRS_PER_PMD; i++, table++)
237 gmap_unlink_segment(gmap, table);
238 __free_pages(page, ALLOC_ORDER); 237 __free_pages(page, ALLOC_ORDER);
239 } 238 gmap_radix_tree_free(&gmap->guest_to_host);
240 spin_unlock(&gmap->mm->page_table_lock); 239 gmap_radix_tree_free(&gmap->host_to_guest);
241 up_read(&gmap->mm->mmap_sem); 240 down_write(&gmap->mm->mmap_sem);
242 list_del(&gmap->list); 241 list_del(&gmap->list);
242 up_write(&gmap->mm->mmap_sem);
243 kfree(gmap); 243 kfree(gmap);
244} 244}
245EXPORT_SYMBOL_GPL(gmap_free); 245EXPORT_SYMBOL_GPL(gmap_free);
@@ -267,32 +267,88 @@ EXPORT_SYMBOL_GPL(gmap_disable);
267/* 267/*
268 * gmap_alloc_table is assumed to be called with mmap_sem held 268 * gmap_alloc_table is assumed to be called with mmap_sem held
269 */ 269 */
270static int gmap_alloc_table(struct gmap *gmap, 270static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
271 unsigned long *table, unsigned long init) 271 unsigned long init, unsigned long gaddr)
272 __releases(&gmap->mm->page_table_lock)
273 __acquires(&gmap->mm->page_table_lock)
274{ 272{
275 struct page *page; 273 struct page *page;
276 unsigned long *new; 274 unsigned long *new;
277 275
278 /* since we dont free the gmap table until gmap_free we can unlock */ 276 /* since we dont free the gmap table until gmap_free we can unlock */
279 spin_unlock(&gmap->mm->page_table_lock);
280 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 277 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
281 spin_lock(&gmap->mm->page_table_lock);
282 if (!page) 278 if (!page)
283 return -ENOMEM; 279 return -ENOMEM;
284 new = (unsigned long *) page_to_phys(page); 280 new = (unsigned long *) page_to_phys(page);
285 crst_table_init(new, init); 281 crst_table_init(new, init);
282 spin_lock(&gmap->mm->page_table_lock);
286 if (*table & _REGION_ENTRY_INVALID) { 283 if (*table & _REGION_ENTRY_INVALID) {
287 list_add(&page->lru, &gmap->crst_list); 284 list_add(&page->lru, &gmap->crst_list);
288 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 285 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
289 (*table & _REGION_ENTRY_TYPE_MASK); 286 (*table & _REGION_ENTRY_TYPE_MASK);
290 } else 287 page->index = gaddr;
288 page = NULL;
289 }
290 spin_unlock(&gmap->mm->page_table_lock);
291 if (page)
291 __free_pages(page, ALLOC_ORDER); 292 __free_pages(page, ALLOC_ORDER);
292 return 0; 293 return 0;
293} 294}
294 295
295/** 296/**
297 * __gmap_segment_gaddr - find virtual address from segment pointer
298 * @entry: pointer to a segment table entry in the guest address space
299 *
300 * Returns the virtual address in the guest address space for the segment
301 */
302static unsigned long __gmap_segment_gaddr(unsigned long *entry)
303{
304 struct page *page;
305 unsigned long offset;
306
307 offset = (unsigned long) entry / sizeof(unsigned long);
308 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
309 page = pmd_to_page((pmd_t *) entry);
310 return page->index + offset;
311}
312
313/**
314 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
315 * @gmap: pointer to the guest address space structure
316 * @vmaddr: address in the host process address space
317 *
318 * Returns 1 if a TLB flush is required
319 */
320static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
321{
322 unsigned long *entry;
323 int flush = 0;
324
325 spin_lock(&gmap->guest_table_lock);
326 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
327 if (entry) {
328 flush = (*entry != _SEGMENT_ENTRY_INVALID);
329 *entry = _SEGMENT_ENTRY_INVALID;
330 }
331 spin_unlock(&gmap->guest_table_lock);
332 return flush;
333}
334
335/**
336 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
337 * @gmap: pointer to the guest address space structure
338 * @gaddr: address in the guest address space
339 *
340 * Returns 1 if a TLB flush is required
341 */
342static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
343{
344 unsigned long vmaddr;
345
346 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
347 gaddr >> PMD_SHIFT);
348 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
349}
350
351/**
296 * gmap_unmap_segment - unmap segment from the guest address space 352 * gmap_unmap_segment - unmap segment from the guest address space
297 * @gmap: pointer to the guest address space structure 353 * @gmap: pointer to the guest address space structure
298 * @to: address in the guest address space 354 * @to: address in the guest address space
@@ -302,7 +358,6 @@ static int gmap_alloc_table(struct gmap *gmap,
302 */ 358 */
303int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) 359int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
304{ 360{
305 unsigned long *table;
306 unsigned long off; 361 unsigned long off;
307 int flush; 362 int flush;
308 363
@@ -312,31 +367,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
312 return -EINVAL; 367 return -EINVAL;
313 368
314 flush = 0; 369 flush = 0;
315 down_read(&gmap->mm->mmap_sem); 370 down_write(&gmap->mm->mmap_sem);
316 spin_lock(&gmap->mm->page_table_lock); 371 for (off = 0; off < len; off += PMD_SIZE)
317 for (off = 0; off < len; off += PMD_SIZE) { 372 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
318 /* Walk the guest addr space page table */ 373 up_write(&gmap->mm->mmap_sem);
319 table = gmap->table + (((to + off) >> 53) & 0x7ff);
320 if (*table & _REGION_ENTRY_INVALID)
321 goto out;
322 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
323 table = table + (((to + off) >> 42) & 0x7ff);
324 if (*table & _REGION_ENTRY_INVALID)
325 goto out;
326 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
327 table = table + (((to + off) >> 31) & 0x7ff);
328 if (*table & _REGION_ENTRY_INVALID)
329 goto out;
330 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
331 table = table + (((to + off) >> 20) & 0x7ff);
332
333 /* Clear segment table entry in guest address space. */
334 flush |= gmap_unlink_segment(gmap, table);
335 *table = _SEGMENT_ENTRY_INVALID;
336 }
337out:
338 spin_unlock(&gmap->mm->page_table_lock);
339 up_read(&gmap->mm->mmap_sem);
340 if (flush) 374 if (flush)
341 gmap_flush_tlb(gmap); 375 gmap_flush_tlb(gmap);
342 return 0; 376 return 0;
@@ -355,7 +389,6 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);
355int gmap_map_segment(struct gmap *gmap, unsigned long from, 389int gmap_map_segment(struct gmap *gmap, unsigned long from,
356 unsigned long to, unsigned long len) 390 unsigned long to, unsigned long len)
357{ 391{
358 unsigned long *table;
359 unsigned long off; 392 unsigned long off;
360 int flush; 393 int flush;
361 394
@@ -366,66 +399,26 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
366 return -EINVAL; 399 return -EINVAL;
367 400
368 flush = 0; 401 flush = 0;
369 down_read(&gmap->mm->mmap_sem); 402 down_write(&gmap->mm->mmap_sem);
370 spin_lock(&gmap->mm->page_table_lock);
371 for (off = 0; off < len; off += PMD_SIZE) { 403 for (off = 0; off < len; off += PMD_SIZE) {
372 /* Walk the gmap address space page table */ 404 /* Remove old translation */
373 table = gmap->table + (((to + off) >> 53) & 0x7ff); 405 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
374 if ((*table & _REGION_ENTRY_INVALID) && 406 /* Store new translation */
375 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) 407 if (radix_tree_insert(&gmap->guest_to_host,
376 goto out_unmap; 408 (to + off) >> PMD_SHIFT,
377 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 409 (void *) from + off))
378 table = table + (((to + off) >> 42) & 0x7ff); 410 break;
379 if ((*table & _REGION_ENTRY_INVALID) &&
380 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
381 goto out_unmap;
382 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
383 table = table + (((to + off) >> 31) & 0x7ff);
384 if ((*table & _REGION_ENTRY_INVALID) &&
385 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
386 goto out_unmap;
387 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
388 table = table + (((to + off) >> 20) & 0x7ff);
389
390 /* Store 'from' address in an invalid segment table entry. */
391 flush |= gmap_unlink_segment(gmap, table);
392 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
393 _SEGMENT_ENTRY_PROTECT);
394 } 411 }
395 spin_unlock(&gmap->mm->page_table_lock); 412 up_write(&gmap->mm->mmap_sem);
396 up_read(&gmap->mm->mmap_sem);
397 if (flush) 413 if (flush)
398 gmap_flush_tlb(gmap); 414 gmap_flush_tlb(gmap);
399 return 0; 415 if (off >= len)
400 416 return 0;
401out_unmap:
402 spin_unlock(&gmap->mm->page_table_lock);
403 up_read(&gmap->mm->mmap_sem);
404 gmap_unmap_segment(gmap, to, len); 417 gmap_unmap_segment(gmap, to, len);
405 return -ENOMEM; 418 return -ENOMEM;
406} 419}
407EXPORT_SYMBOL_GPL(gmap_map_segment); 420EXPORT_SYMBOL_GPL(gmap_map_segment);
408 421
409static unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr)
410{
411 unsigned long *table;
412
413 table = gmap->table + ((gaddr >> 53) & 0x7ff);
414 if (unlikely(*table & _REGION_ENTRY_INVALID))
415 return ERR_PTR(-EFAULT);
416 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
417 table = table + ((gaddr >> 42) & 0x7ff);
418 if (unlikely(*table & _REGION_ENTRY_INVALID))
419 return ERR_PTR(-EFAULT);
420 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
421 table = table + ((gaddr >> 31) & 0x7ff);
422 if (unlikely(*table & _REGION_ENTRY_INVALID))
423 return ERR_PTR(-EFAULT);
424 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
425 table = table + ((gaddr >> 20) & 0x7ff);
426 return table;
427}
428
429/** 422/**
430 * __gmap_translate - translate a guest address to a user space address 423 * __gmap_translate - translate a guest address to a user space address
431 * @gmap: pointer to guest mapping meta data structure 424 * @gmap: pointer to guest mapping meta data structure
@@ -439,25 +432,11 @@ static unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr)
439 */ 432 */
440unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) 433unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
441{ 434{
442 unsigned long *segment_ptr, vmaddr, segment; 435 unsigned long vmaddr;
443 struct gmap_pgtable *mp;
444 struct page *page;
445 436
446 current->thread.gmap_addr = gaddr; 437 vmaddr = (unsigned long)
447 segment_ptr = gmap_table_walk(gmap, gaddr); 438 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
448 if (IS_ERR(segment_ptr)) 439 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
449 return PTR_ERR(segment_ptr);
450 /* Convert the gmap address to an mm address. */
451 segment = *segment_ptr;
452 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
453 page = pfn_to_page(segment >> PAGE_SHIFT);
454 mp = (struct gmap_pgtable *) page->index;
455 return mp->vmaddr | (gaddr & ~PMD_MASK);
456 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
457 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
458 return vmaddr | (gaddr & ~PMD_MASK);
459 }
460 return -EFAULT;
461} 440}
462EXPORT_SYMBOL_GPL(__gmap_translate); 441EXPORT_SYMBOL_GPL(__gmap_translate);
463 442
@@ -481,125 +460,124 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
481} 460}
482EXPORT_SYMBOL_GPL(gmap_translate); 461EXPORT_SYMBOL_GPL(gmap_translate);
483 462
484static int gmap_connect_pgtable(struct gmap *gmap, unsigned long gaddr, 463/**
485 unsigned long segment, 464 * gmap_unlink - disconnect a page table from the gmap shadow tables
486 unsigned long *segment_ptr) 465 * @gmap: pointer to guest mapping meta data structure
466 * @table: pointer to the host page table
467 * @vmaddr: vm address associated with the host page table
468 */
469static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
470 unsigned long vmaddr)
471{
472 struct gmap *gmap;
473 int flush;
474
475 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
476 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
477 if (flush)
478 gmap_flush_tlb(gmap);
479 }
480}
481
482/**
483 * gmap_link - set up shadow page tables to connect a host to a guest address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
486 * @vmaddr: vm address
487 *
488 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
489 * if the vm address is already mapped to a different guest segment.
490 * The mmap_sem of the mm that belongs to the address space must be held
491 * when this function gets called.
492 */
493int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
487{ 494{
488 unsigned long vmaddr;
489 struct vm_area_struct *vma;
490 struct gmap_pgtable *mp;
491 struct gmap_rmap *rmap;
492 struct mm_struct *mm; 495 struct mm_struct *mm;
493 struct page *page; 496 unsigned long *table;
497 spinlock_t *ptl;
494 pgd_t *pgd; 498 pgd_t *pgd;
495 pud_t *pud; 499 pud_t *pud;
496 pmd_t *pmd; 500 pmd_t *pmd;
501 int rc;
497 502
498 mm = gmap->mm; 503 /* Create higher level tables in the gmap page table */
499 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 504 table = gmap->table + ((gaddr >> 53) & 0x7ff);
500 vma = find_vma(mm, vmaddr); 505 if ((*table & _REGION_ENTRY_INVALID) &&
501 if (!vma || vma->vm_start > vmaddr) 506 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
502 return -EFAULT; 507 gaddr & 0xffe0000000000000))
503 /* Walk the parent mm page table */
504 pgd = pgd_offset(mm, vmaddr);
505 pud = pud_alloc(mm, pgd, vmaddr);
506 if (!pud)
507 return -ENOMEM; 508 return -ENOMEM;
508 pmd = pmd_alloc(mm, pud, vmaddr); 509 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
509 if (!pmd) 510 table = table + ((gaddr >> 42) & 0x7ff);
511 if ((*table & _REGION_ENTRY_INVALID) &&
512 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
513 gaddr & 0xfffffc0000000000))
510 return -ENOMEM; 514 return -ENOMEM;
511 if (!pmd_present(*pmd) && 515 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
512 __pte_alloc(mm, vma, pmd, vmaddr)) 516 table = table + ((gaddr >> 31) & 0x7ff);
517 if ((*table & _REGION_ENTRY_INVALID) &&
518 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
519 gaddr & 0xffffffff80000000))
513 return -ENOMEM; 520 return -ENOMEM;
521 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
522 table = table + ((gaddr >> 20) & 0x7ff);
523 /* Walk the parent mm page table */
524 mm = gmap->mm;
525 pgd = pgd_offset(mm, vmaddr);
526 VM_BUG_ON(pgd_none(*pgd));
527 pud = pud_offset(pgd, vmaddr);
528 VM_BUG_ON(pud_none(*pud));
529 pmd = pmd_offset(pud, vmaddr);
530 VM_BUG_ON(pmd_none(*pmd));
514 /* large pmds cannot yet be handled */ 531 /* large pmds cannot yet be handled */
515 if (pmd_large(*pmd)) 532 if (pmd_large(*pmd))
516 return -EFAULT; 533 return -EFAULT;
517 /* pmd now points to a valid segment table entry. */
518 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
519 if (!rmap)
520 return -ENOMEM;
521 /* Link gmap segment table entry location to page table. */ 534 /* Link gmap segment table entry location to page table. */
522 page = pmd_page(*pmd); 535 rc = radix_tree_preload(GFP_KERNEL);
523 mp = (struct gmap_pgtable *) page->index; 536 if (rc)
524 rmap->gmap = gmap; 537 return rc;
525 rmap->entry = segment_ptr; 538 ptl = pmd_lock(mm, pmd);
526 rmap->vmaddr = gaddr & PMD_MASK; 539 spin_lock(&gmap->guest_table_lock);
527 spin_lock(&mm->page_table_lock); 540 if (*table == _SEGMENT_ENTRY_INVALID) {
528 if (*segment_ptr == segment) { 541 rc = radix_tree_insert(&gmap->host_to_guest,
529 list_add(&rmap->list, &mp->mapper); 542 vmaddr >> PMD_SHIFT, table);
530 /* Set gmap segment table entry to page table. */ 543 if (!rc)
531 *segment_ptr = pmd_val(*pmd) & PAGE_MASK; 544 *table = pmd_val(*pmd);
532 rmap = NULL; 545 } else
533 } 546 rc = 0;
534 spin_unlock(&mm->page_table_lock); 547 spin_unlock(&gmap->guest_table_lock);
535 kfree(rmap); 548 spin_unlock(ptl);
536 return 0; 549 radix_tree_preload_end();
537} 550 return rc;
538
539static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
540{
541 struct gmap_rmap *rmap, *next;
542 struct gmap_pgtable *mp;
543 struct page *page;
544 int flush;
545
546 flush = 0;
547 spin_lock(&mm->page_table_lock);
548 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
549 mp = (struct gmap_pgtable *) page->index;
550 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
551 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
552 _SEGMENT_ENTRY_PROTECT);
553 list_del(&rmap->list);
554 kfree(rmap);
555 flush = 1;
556 }
557 spin_unlock(&mm->page_table_lock);
558 if (flush)
559 __tlb_flush_global();
560} 551}
561 552
562/* 553/**
563 * this function is assumed to be called with mmap_sem held 554 * gmap_fault - resolve a fault on a guest address
555 * @gmap: pointer to guest mapping meta data structure
556 * @gaddr: guest address
557 * @fault_flags: flags to pass down to handle_mm_fault()
558 *
559 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
560 * if the vm address is already mapped to a different guest segment.
564 */ 561 */
565unsigned long __gmap_fault(struct gmap *gmap, unsigned long gaddr) 562int gmap_fault(struct gmap *gmap, unsigned long gaddr,
563 unsigned int fault_flags)
566{ 564{
567 unsigned long *segment_ptr, segment; 565 unsigned long vmaddr;
568 struct gmap_pgtable *mp;
569 struct page *page;
570 int rc; 566 int rc;
571 567
572 current->thread.gmap_addr = gaddr;
573 segment_ptr = gmap_table_walk(gmap, gaddr);
574 if (IS_ERR(segment_ptr))
575 return -EFAULT;
576 /* Convert the gmap address to an mm address. */
577 while (1) {
578 segment = *segment_ptr;
579 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
580 /* Page table is present */
581 page = pfn_to_page(segment >> PAGE_SHIFT);
582 mp = (struct gmap_pgtable *) page->index;
583 return mp->vmaddr | (gaddr & ~PMD_MASK);
584 }
585 if (!(segment & _SEGMENT_ENTRY_PROTECT))
586 /* Nothing mapped in the gmap address space. */
587 break;
588 rc = gmap_connect_pgtable(gmap, gaddr, segment, segment_ptr);
589 if (rc)
590 return rc;
591 }
592 return -EFAULT;
593}
594
595unsigned long gmap_fault(struct gmap *gmap, unsigned long gaddr)
596{
597 unsigned long rc;
598
599 down_read(&gmap->mm->mmap_sem); 568 down_read(&gmap->mm->mmap_sem);
600 rc = __gmap_fault(gmap, gaddr); 569 vmaddr = __gmap_translate(gmap, gaddr);
570 if (IS_ERR_VALUE(vmaddr)) {
571 rc = vmaddr;
572 goto out_up;
573 }
574 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
575 rc = -EFAULT;
576 goto out_up;
577 }
578 rc = __gmap_link(gmap, gaddr, vmaddr);
579out_up:
601 up_read(&gmap->mm->mmap_sem); 580 up_read(&gmap->mm->mmap_sem);
602
603 return rc; 581 return rc;
604} 582}
605EXPORT_SYMBOL_GPL(gmap_fault); 583EXPORT_SYMBOL_GPL(gmap_fault);
@@ -619,17 +597,24 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
619 free_swap_and_cache(entry); 597 free_swap_and_cache(entry);
620} 598}
621 599
622/** 600/*
623 * The mm->mmap_sem lock must be held 601 * this function is assumed to be called with mmap_sem held
624 */ 602 */
625static void gmap_zap_unused(struct mm_struct *mm, unsigned long vmaddr) 603void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
626{ 604{
627 unsigned long ptev, pgstev; 605 unsigned long vmaddr, ptev, pgstev;
606 pte_t *ptep, pte;
628 spinlock_t *ptl; 607 spinlock_t *ptl;
629 pgste_t pgste; 608 pgste_t pgste;
630 pte_t *ptep, pte;
631 609
632 ptep = get_locked_pte(mm, vmaddr, &ptl); 610 /* Find the vm address for the guest address */
611 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
612 gaddr >> PMD_SHIFT);
613 if (!vmaddr)
614 return;
615 vmaddr |= gaddr & ~PMD_MASK;
616 /* Get pointer to the page table entry */
617 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
633 if (unlikely(!ptep)) 618 if (unlikely(!ptep))
634 return; 619 return;
635 pte = *ptep; 620 pte = *ptep;
@@ -641,87 +626,34 @@ static void gmap_zap_unused(struct mm_struct *mm, unsigned long vmaddr)
641 ptev = pte_val(pte); 626 ptev = pte_val(pte);
642 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || 627 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
643 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) { 628 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
644 gmap_zap_swap_entry(pte_to_swp_entry(pte), mm); 629 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
645 pte_clear(mm, vmaddr, ptep); 630 pte_clear(gmap->mm, vmaddr, ptep);
646 } 631 }
647 pgste_set_unlock(ptep, pgste); 632 pgste_set_unlock(ptep, pgste);
648out_pte: 633out_pte:
649 pte_unmap_unlock(*ptep, ptl); 634 pte_unmap_unlock(*ptep, ptl);
650} 635}
651
652/*
653 * this function is assumed to be called with mmap_sem held
654 */
655void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
656{
657 unsigned long *table, *segment_ptr;
658 unsigned long segment, vmaddr, pgstev, ptev;
659 struct gmap_pgtable *mp;
660 struct page *page;
661
662 segment_ptr = gmap_table_walk(gmap, gaddr);
663 if (IS_ERR(segment_ptr))
664 return;
665 segment = *segment_ptr;
666 if (segment & _SEGMENT_ENTRY_INVALID)
667 return;
668 page = pfn_to_page(segment >> PAGE_SHIFT);
669 mp = (struct gmap_pgtable *) page->index;
670 vmaddr = mp->vmaddr | (gaddr & ~PMD_MASK);
671 /* Page table is present */
672 table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
673 table = table + ((vmaddr >> 12) & 0xff);
674 pgstev = table[PTRS_PER_PTE];
675 ptev = table[0];
676 /* quick check, checked again with locks held */
677 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
678 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
679 gmap_zap_unused(gmap->mm, vmaddr);
680}
681EXPORT_SYMBOL_GPL(__gmap_zap); 636EXPORT_SYMBOL_GPL(__gmap_zap);
682 637
683void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) 638void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
684{ 639{
685 640 unsigned long gaddr, vmaddr, size;
686 unsigned long *table, gaddr, size;
687 struct vm_area_struct *vma; 641 struct vm_area_struct *vma;
688 struct gmap_pgtable *mp;
689 struct page *page;
690 642
691 down_read(&gmap->mm->mmap_sem); 643 down_read(&gmap->mm->mmap_sem);
692 gaddr = from; 644 for (gaddr = from; gaddr < to;
693 while (gaddr < to) { 645 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
694 /* Walk the gmap address space page table */ 646 /* Find the vm address for the guest address */
695 table = gmap->table + ((gaddr >> 53) & 0x7ff); 647 vmaddr = (unsigned long)
696 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 648 radix_tree_lookup(&gmap->guest_to_host,
697 gaddr = (gaddr + PMD_SIZE) & PMD_MASK; 649 gaddr >> PMD_SHIFT);
698 continue; 650 if (!vmaddr)
699 }
700 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
701 table = table + ((gaddr >> 42) & 0x7ff);
702 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
703 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
704 continue;
705 }
706 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
707 table = table + ((gaddr >> 31) & 0x7ff);
708 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
709 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
710 continue;
711 }
712 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
713 table = table + ((gaddr >> 20) & 0x7ff);
714 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
715 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
716 continue; 651 continue;
717 } 652 vmaddr |= gaddr & ~PMD_MASK;
718 page = pfn_to_page(*table >> PAGE_SHIFT); 653 /* Find vma in the parent mm */
719 mp = (struct gmap_pgtable *) page->index; 654 vma = find_vma(gmap->mm, vmaddr);
720 vma = find_vma(gmap->mm, mp->vmaddr);
721 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); 655 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
722 zap_page_range(vma, mp->vmaddr | (gaddr & ~PMD_MASK), 656 zap_page_range(vma, vmaddr, size, NULL);
723 size, NULL);
724 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
725 } 657 }
726 up_read(&gmap->mm->mmap_sem); 658 up_read(&gmap->mm->mmap_sem);
727} 659}
@@ -778,7 +710,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
778 down_read(&gmap->mm->mmap_sem); 710 down_read(&gmap->mm->mmap_sem);
779 while (len) { 711 while (len) {
780 /* Convert gmap address and connect the page tables */ 712 /* Convert gmap address and connect the page tables */
781 addr = __gmap_fault(gmap, gaddr); 713 addr = __gmap_translate(gmap, gaddr);
782 if (IS_ERR_VALUE(addr)) { 714 if (IS_ERR_VALUE(addr)) {
783 rc = addr; 715 rc = addr;
784 break; 716 break;
@@ -788,6 +720,9 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
788 rc = -EFAULT; 720 rc = -EFAULT;
789 break; 721 break;
790 } 722 }
723 rc = __gmap_link(gmap, gaddr, addr);
724 if (rc)
725 break;
791 /* Walk the process page table, lock and get pte pointer */ 726 /* Walk the process page table, lock and get pte pointer */
792 ptep = get_locked_pte(gmap->mm, addr, &ptl); 727 ptep = get_locked_pte(gmap->mm, addr, &ptl);
793 if (unlikely(!ptep)) 728 if (unlikely(!ptep))
@@ -817,23 +752,24 @@ EXPORT_SYMBOL_GPL(gmap_ipte_notify);
817 * This function is assumed to be called with the page table lock held 752 * This function is assumed to be called with the page table lock held
818 * for the pte to notify. 753 * for the pte to notify.
819 */ 754 */
820void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) 755void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
821{ 756{
822 unsigned long segment_offset; 757 unsigned long offset, gaddr;
758 unsigned long *table;
823 struct gmap_notifier *nb; 759 struct gmap_notifier *nb;
824 struct gmap_pgtable *mp; 760 struct gmap *gmap;
825 struct gmap_rmap *rmap;
826 struct page *page;
827 761
828 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 762 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
829 segment_offset = segment_offset * (4096 / sizeof(pte_t)); 763 offset = offset * (4096 / sizeof(pte_t));
830 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
831 mp = (struct gmap_pgtable *) page->index;
832 spin_lock(&gmap_notifier_lock); 764 spin_lock(&gmap_notifier_lock);
833 list_for_each_entry(rmap, &mp->mapper, list) { 765 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
766 table = radix_tree_lookup(&gmap->host_to_guest,
767 vmaddr >> PMD_SHIFT);
768 if (!table)
769 continue;
770 gaddr = __gmap_segment_gaddr(table) + offset;
834 list_for_each_entry(nb, &gmap_notifier_list, list) 771 list_for_each_entry(nb, &gmap_notifier_list, list)
835 nb->notifier_call(rmap->gmap, 772 nb->notifier_call(gmap, gaddr);
836 rmap->vmaddr + segment_offset);
837 } 773 }
838 spin_unlock(&gmap_notifier_lock); 774 spin_unlock(&gmap_notifier_lock);
839} 775}
@@ -844,29 +780,18 @@ static inline int page_table_with_pgste(struct page *page)
844 return atomic_read(&page->_mapcount) == 0; 780 return atomic_read(&page->_mapcount) == 0;
845} 781}
846 782
847static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 783static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
848 unsigned long vmaddr)
849{ 784{
850 struct page *page; 785 struct page *page;
851 unsigned long *table; 786 unsigned long *table;
852 struct gmap_pgtable *mp;
853 787
854 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 788 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
855 if (!page) 789 if (!page)
856 return NULL; 790 return NULL;
857 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
858 if (!mp) {
859 __free_page(page);
860 return NULL;
861 }
862 if (!pgtable_page_ctor(page)) { 791 if (!pgtable_page_ctor(page)) {
863 kfree(mp);
864 __free_page(page); 792 __free_page(page);
865 return NULL; 793 return NULL;
866 } 794 }
867 mp->vmaddr = vmaddr & PMD_MASK;
868 INIT_LIST_HEAD(&mp->mapper);
869 page->index = (unsigned long) mp;
870 atomic_set(&page->_mapcount, 0); 795 atomic_set(&page->_mapcount, 0);
871 table = (unsigned long *) page_to_phys(page); 796 table = (unsigned long *) page_to_phys(page);
872 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); 797 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
@@ -877,14 +802,10 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
877static inline void page_table_free_pgste(unsigned long *table) 802static inline void page_table_free_pgste(unsigned long *table)
878{ 803{
879 struct page *page; 804 struct page *page;
880 struct gmap_pgtable *mp;
881 805
882 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 806 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
883 mp = (struct gmap_pgtable *) page->index;
884 BUG_ON(!list_empty(&mp->mapper));
885 pgtable_page_dtor(page); 807 pgtable_page_dtor(page);
886 atomic_set(&page->_mapcount, -1); 808 atomic_set(&page->_mapcount, -1);
887 kfree(mp);
888 __free_page(page); 809 __free_page(page);
889} 810}
890 811
@@ -1041,8 +962,7 @@ static inline int page_table_with_pgste(struct page *page)
1041 return 0; 962 return 0;
1042} 963}
1043 964
1044static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 965static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
1045 unsigned long vmaddr)
1046{ 966{
1047 return NULL; 967 return NULL;
1048} 968}
@@ -1056,8 +976,8 @@ static inline void page_table_free_pgste(unsigned long *table)
1056{ 976{
1057} 977}
1058 978
1059static inline void gmap_disconnect_pgtable(struct mm_struct *mm, 979static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
1060 unsigned long *table) 980 unsigned long vmaddr)
1061{ 981{
1062} 982}
1063 983
@@ -1077,14 +997,14 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
1077/* 997/*
1078 * page table entry allocation/free routines. 998 * page table entry allocation/free routines.
1079 */ 999 */
1080unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) 1000unsigned long *page_table_alloc(struct mm_struct *mm)
1081{ 1001{
1082 unsigned long *uninitialized_var(table); 1002 unsigned long *uninitialized_var(table);
1083 struct page *uninitialized_var(page); 1003 struct page *uninitialized_var(page);
1084 unsigned int mask, bit; 1004 unsigned int mask, bit;
1085 1005
1086 if (mm_has_pgste(mm)) 1006 if (mm_has_pgste(mm))
1087 return page_table_alloc_pgste(mm, vmaddr); 1007 return page_table_alloc_pgste(mm);
1088 /* Allocate fragments of a 4K page as 1K/2K page table */ 1008 /* Allocate fragments of a 4K page as 1K/2K page table */
1089 spin_lock_bh(&mm->context.list_lock); 1009 spin_lock_bh(&mm->context.list_lock);
1090 mask = FRAG_MASK; 1010 mask = FRAG_MASK;
@@ -1126,10 +1046,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
1126 unsigned int bit, mask; 1046 unsigned int bit, mask;
1127 1047
1128 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 1048 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1129 if (page_table_with_pgste(page)) { 1049 if (page_table_with_pgste(page))
1130 gmap_disconnect_pgtable(mm, table);
1131 return page_table_free_pgste(table); 1050 return page_table_free_pgste(table);
1132 }
1133 /* Free 1K/2K page table fragment of a 4K page */ 1051 /* Free 1K/2K page table fragment of a 4K page */
1134 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); 1052 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1135 spin_lock_bh(&mm->context.list_lock); 1053 spin_lock_bh(&mm->context.list_lock);
@@ -1161,7 +1079,8 @@ static void __page_table_free_rcu(void *table, unsigned bit)
1161 } 1079 }
1162} 1080}
1163 1081
1164void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) 1082void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1083 unsigned long vmaddr)
1165{ 1084{
1166 struct mm_struct *mm; 1085 struct mm_struct *mm;
1167 struct page *page; 1086 struct page *page;
@@ -1170,7 +1089,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
1170 mm = tlb->mm; 1089 mm = tlb->mm;
1171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 1090 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1172 if (page_table_with_pgste(page)) { 1091 if (page_table_with_pgste(page)) {
1173 gmap_disconnect_pgtable(mm, table); 1092 gmap_unlink(mm, table, vmaddr);
1174 table = (unsigned long *) (__pa(table) | FRAG_MASK); 1093 table = (unsigned long *) (__pa(table) | FRAG_MASK);
1175 tlb_remove_table(tlb, table); 1094 tlb_remove_table(tlb, table);
1176 return; 1095 return;
@@ -1306,7 +1225,7 @@ again:
1306 if (page_table_with_pgste(page)) 1225 if (page_table_with_pgste(page))
1307 continue; 1226 continue;
1308 /* Allocate new page table with pgstes */ 1227 /* Allocate new page table with pgstes */
1309 new = page_table_alloc_pgste(mm, addr); 1228 new = page_table_alloc_pgste(mm);
1310 if (!new) 1229 if (!new)
1311 return -ENOMEM; 1230 return -ENOMEM;
1312 1231
@@ -1321,7 +1240,7 @@ again:
1321 /* Establish new table */ 1240 /* Establish new table */
1322 pmd_populate(mm, pmd, (pte_t *) new); 1241 pmd_populate(mm, pmd, (pte_t *) new);
1323 /* Free old table with rcu, there might be a walker! */ 1242 /* Free old table with rcu, there might be a walker! */
1324 page_table_free_rcu(tlb, table); 1243 page_table_free_rcu(tlb, table, addr);
1325 new = NULL; 1244 new = NULL;
1326 } 1245 }
1327 spin_unlock(ptl); 1246 spin_unlock(ptl);