aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-07-01 08:36:04 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-08-26 04:09:03 -0400
commitc6c956b80bdf151cf41d3e7e5c54755d930a212c (patch)
tree2f105bb983034a098f6f26eb1564397273ba39b6 /arch/s390
parent527e30b41d8b86e9ae7f5b740de416958c0e574e (diff)
KVM: s390/mm: support gmap page tables with less than 5 levels
Add an addressing limit to the gmap address spaces and only allocate the page table levels that are needed for the given limit. The limit is fixed and can not be changed after a gmap has been created. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/pgtable.h3
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c85
3 files changed, 59 insertions, 33 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9bfdbca14f95..7705180e906d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -808,6 +808,7 @@ struct gmap {
808 spinlock_t guest_table_lock; 808 spinlock_t guest_table_lock;
809 unsigned long *table; 809 unsigned long *table;
810 unsigned long asce; 810 unsigned long asce;
811 unsigned long asce_end;
811 void *private; 812 void *private;
812 bool pfault_enabled; 813 bool pfault_enabled;
813}; 814};
@@ -844,7 +845,7 @@ struct gmap_notifier {
844 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); 845 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
845}; 846};
846 847
847struct gmap *gmap_alloc(struct mm_struct *mm); 848struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
848void gmap_free(struct gmap *gmap); 849void gmap_free(struct gmap *gmap);
849void gmap_enable(struct gmap *gmap); 850void gmap_enable(struct gmap *gmap);
850void gmap_disable(struct gmap *gmap); 851void gmap_disable(struct gmap *gmap);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 543c24baf1eb..82065dc7948d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -451,7 +451,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
451 if (type & KVM_VM_S390_UCONTROL) { 451 if (type & KVM_VM_S390_UCONTROL) {
452 kvm->arch.gmap = NULL; 452 kvm->arch.gmap = NULL;
453 } else { 453 } else {
454 kvm->arch.gmap = gmap_alloc(current->mm); 454 kvm->arch.gmap = gmap_alloc(current->mm, -1UL);
455 if (!kvm->arch.gmap) 455 if (!kvm->arch.gmap)
456 goto out_nogmap; 456 goto out_nogmap;
457 kvm->arch.gmap->private = kvm; 457 kvm->arch.gmap->private = kvm;
@@ -535,7 +535,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536 kvm_clear_async_pf_completion_queue(vcpu); 536 kvm_clear_async_pf_completion_queue(vcpu);
537 if (kvm_is_ucontrol(vcpu->kvm)) { 537 if (kvm_is_ucontrol(vcpu->kvm)) {
538 vcpu->arch.gmap = gmap_alloc(current->mm); 538 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
539 if (!vcpu->arch.gmap) 539 if (!vcpu->arch.gmap)
540 return -ENOMEM; 540 return -ENOMEM;
541 vcpu->arch.gmap->private = vcpu->kvm; 541 vcpu->arch.gmap->private = vcpu->kvm;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 74dfd9eaa300..665714b08c0d 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -145,15 +145,34 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
145/** 145/**
146 * gmap_alloc - allocate a guest address space 146 * gmap_alloc - allocate a guest address space
147 * @mm: pointer to the parent mm_struct 147 * @mm: pointer to the parent mm_struct
148 * @limit: maximum size of the gmap address space
148 * 149 *
149 * Returns a guest address space structure. 150 * Returns a guest address space structure.
150 */ 151 */
151struct gmap *gmap_alloc(struct mm_struct *mm) 152struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
152{ 153{
153 struct gmap *gmap; 154 struct gmap *gmap;
154 struct page *page; 155 struct page *page;
155 unsigned long *table; 156 unsigned long *table;
156 157 unsigned long etype, atype;
158
159 if (limit < (1UL << 31)) {
160 limit = (1UL << 31) - 1;
161 atype = _ASCE_TYPE_SEGMENT;
162 etype = _SEGMENT_ENTRY_EMPTY;
163 } else if (limit < (1UL << 42)) {
164 limit = (1UL << 42) - 1;
165 atype = _ASCE_TYPE_REGION3;
166 etype = _REGION3_ENTRY_EMPTY;
167 } else if (limit < (1UL << 53)) {
168 limit = (1UL << 53) - 1;
169 atype = _ASCE_TYPE_REGION2;
170 etype = _REGION2_ENTRY_EMPTY;
171 } else {
172 limit = -1UL;
173 atype = _ASCE_TYPE_REGION1;
174 etype = _REGION1_ENTRY_EMPTY;
175 }
157 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); 176 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
158 if (!gmap) 177 if (!gmap)
159 goto out; 178 goto out;
@@ -168,10 +187,11 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
168 page->index = 0; 187 page->index = 0;
169 list_add(&page->lru, &gmap->crst_list); 188 list_add(&page->lru, &gmap->crst_list);
170 table = (unsigned long *) page_to_phys(page); 189 table = (unsigned long *) page_to_phys(page);
171 crst_table_init(table, _REGION1_ENTRY_EMPTY); 190 crst_table_init(table, etype);
172 gmap->table = table; 191 gmap->table = table;
173 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | 192 gmap->asce = atype | _ASCE_TABLE_LENGTH |
174 _ASCE_USER_BITS | __pa(table); 193 _ASCE_USER_BITS | __pa(table);
194 gmap->asce_end = limit;
175 down_write(&mm->mmap_sem); 195 down_write(&mm->mmap_sem);
176 list_add(&gmap->list, &mm->context.gmap_list); 196 list_add(&gmap->list, &mm->context.gmap_list);
177 up_write(&mm->mmap_sem); 197 up_write(&mm->mmap_sem);
@@ -187,8 +207,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
187static void gmap_flush_tlb(struct gmap *gmap) 207static void gmap_flush_tlb(struct gmap *gmap)
188{ 208{
189 if (MACHINE_HAS_IDTE) 209 if (MACHINE_HAS_IDTE)
190 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | 210 __tlb_flush_asce(gmap->mm, gmap->asce);
191 _ASCE_TYPE_REGION1);
192 else 211 else
193 __tlb_flush_global(); 212 __tlb_flush_global();
194} 213}
@@ -227,8 +246,7 @@ void gmap_free(struct gmap *gmap)
227 246
228 /* Flush tlb. */ 247 /* Flush tlb. */
229 if (MACHINE_HAS_IDTE) 248 if (MACHINE_HAS_IDTE)
230 __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | 249 __tlb_flush_asce(gmap->mm, gmap->asce);
231 _ASCE_TYPE_REGION1);
232 else 250 else
233 __tlb_flush_global(); 251 __tlb_flush_global();
234 252
@@ -394,8 +412,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
394 412
395 if ((from | to | len) & (PMD_SIZE - 1)) 413 if ((from | to | len) & (PMD_SIZE - 1))
396 return -EINVAL; 414 return -EINVAL;
397 if (len == 0 || from + len > TASK_MAX_SIZE || 415 if (len == 0 || from + len < from || to + len < to ||
398 from + len < from || to + len < to) 416 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
399 return -EINVAL; 417 return -EINVAL;
400 418
401 flush = 0; 419 flush = 0;
@@ -501,25 +519,32 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
501 int rc; 519 int rc;
502 520
503 /* Create higher level tables in the gmap page table */ 521 /* Create higher level tables in the gmap page table */
504 table = gmap->table + ((gaddr >> 53) & 0x7ff); 522 table = gmap->table;
505 if ((*table & _REGION_ENTRY_INVALID) && 523 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
506 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, 524 table += (gaddr >> 53) & 0x7ff;
507 gaddr & 0xffe0000000000000)) 525 if ((*table & _REGION_ENTRY_INVALID) &&
508 return -ENOMEM; 526 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
509 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 527 gaddr & 0xffe0000000000000))
510 table = table + ((gaddr >> 42) & 0x7ff); 528 return -ENOMEM;
511 if ((*table & _REGION_ENTRY_INVALID) && 529 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
512 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, 530 }
513 gaddr & 0xfffffc0000000000)) 531 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
514 return -ENOMEM; 532 table += (gaddr >> 42) & 0x7ff;
515 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 533 if ((*table & _REGION_ENTRY_INVALID) &&
516 table = table + ((gaddr >> 31) & 0x7ff); 534 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
517 if ((*table & _REGION_ENTRY_INVALID) && 535 gaddr & 0xfffffc0000000000))
518 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, 536 return -ENOMEM;
519 gaddr & 0xffffffff80000000)) 537 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
520 return -ENOMEM; 538 }
521 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); 539 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
522 table = table + ((gaddr >> 20) & 0x7ff); 540 table += (gaddr >> 31) & 0x7ff;
541 if ((*table & _REGION_ENTRY_INVALID) &&
542 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
543 gaddr & 0xffffffff80000000))
544 return -ENOMEM;
545 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
546 }
547 table += (gaddr >> 20) & 0x7ff;
523 /* Walk the parent mm page table */ 548 /* Walk the parent mm page table */
524 mm = gmap->mm; 549 mm = gmap->mm;
525 pgd = pgd_offset(mm, vmaddr); 550 pgd = pgd_offset(mm, vmaddr);