aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/appldata/appldata_net_sum.c3
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S12
-rw-r--r--arch/s390/kernel/time.c18
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c64
-rw-r--r--arch/s390/kvm/kvm-s390.h2
8 files changed, 58 insertions, 60 deletions
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 9a9586f4103f..f02e89ce4df1 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -85,7 +85,8 @@ static void appldata_get_net_sum_data(void *data)
85 85
86 rcu_read_lock(); 86 rcu_read_lock();
87 for_each_netdev_rcu(&init_net, dev) { 87 for_each_netdev_rcu(&init_net, dev) {
88 const struct net_device_stats *stats = dev_get_stats(dev); 88 struct rtnl_link_stats64 temp;
89 const struct net_device_stats *stats = dev_get_stats(dev, &temp);
89 90
90 rx_packets += stats->rx_packets; 91 rx_packets += stats->rx_packets;
91 tx_packets += stats->tx_packets; 92 tx_packets += stats->tx_packets;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 27605b62b980..cef7dbf69dfc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -26,7 +26,7 @@
26 26
27struct sca_entry { 27struct sca_entry {
28 atomic_t scn; 28 atomic_t scn;
29 __u64 reserved; 29 __u32 reserved;
30 __u64 sda; 30 __u64 sda;
31 __u64 reserved2[2]; 31 __u64 reserved2[2];
32} __attribute__((packed)); 32} __attribute__((packed));
@@ -41,7 +41,8 @@ struct sca_block {
41} __attribute__((packed)); 41} __attribute__((packed));
42 42
43#define KVM_NR_PAGE_SIZES 2 43#define KVM_NR_PAGE_SIZES 2
44#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8) 44#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
45#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
45#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 46#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
46#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 47#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
47#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 48#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index d5e3e6007447..bea9ee37ac9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -535,8 +535,16 @@ pgm_no_vtime2:
535 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
536 la %r8,0x7f 536 la %r8,0x7f
537 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
538 be BASED(pgm_exit) # only per or per+check ? 538 be BASED(pgm_exit2) # only per or per+check ?
539 b BASED(pgm_do_call) 539 l %r7,BASED(.Ljump_table)
540 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler
544pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return)
540 548
541# 549#
542# it was a single stepped SVC that is causing all the trouble 550# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e7192e1cb678..8bccec15ea90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -544,8 +544,16 @@ pgm_no_vtime2:
544 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
545 lghi %r8,0x7f 545 lghi %r8,0x7f
546 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
547 je pgm_exit 547 je pgm_exit2
548 j pgm_do_call 548 sll %r8,3
549 larl %r1,pgm_check_table
550 lg %r1,0(%r8,%r1) # load address of handler routine
551 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return
549 557
550# 558#
551# it was a single stepped SVC that is causing all the trouble 559# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a2163c95eb98..15a7536452d5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -524,8 +524,11 @@ void etr_switch_to_local(void)
524 if (!etr_eacr.sl) 524 if (!etr_eacr.sl)
525 return; 525 return;
526 disable_sync_clock(NULL); 526 disable_sync_clock(NULL);
527 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 527 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
528 queue_work(time_sync_wq, &etr_work); 528 etr_eacr.es = etr_eacr.sl = 0;
529 etr_setr(&etr_eacr);
530 queue_work(time_sync_wq, &etr_work);
531 }
529} 532}
530 533
531/* 534/*
@@ -539,8 +542,11 @@ void etr_sync_check(void)
539 if (!etr_eacr.es) 542 if (!etr_eacr.es)
540 return; 543 return;
541 disable_sync_clock(NULL); 544 disable_sync_clock(NULL);
542 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 545 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
543 queue_work(time_sync_wq, &etr_work); 546 etr_eacr.es = 0;
547 etr_setr(&etr_eacr);
548 queue_work(time_sync_wq, &etr_work);
549 }
544} 550}
545 551
546/* 552/*
@@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
902 * Do not try to get the alternate port aib if the clock 908 * Do not try to get the alternate port aib if the clock
903 * is not in sync yet. 909 * is not in sync yet.
904 */ 910 */
905 if (!check_sync_clock()) 911 if (!eacr.es || !check_sync_clock())
906 return eacr; 912 return eacr;
907 913
908 /* 914 /*
@@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
1064 * If the clock is in sync just update the eacr and return. 1070 * If the clock is in sync just update the eacr and return.
1065 * If there is no valid sync port wait for a port update. 1071 * If there is no valid sync port wait for a port update.
1066 */ 1072 */
1067 if (check_sync_clock() || sync_port < 0) { 1073 if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1068 etr_update_eacr(eacr); 1074 etr_update_eacr(eacr);
1069 etr_set_tolec_timeout(now); 1075 etr_set_tolec_timeout(now);
1070 goto out_unlock; 1076 goto out_unlock;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 3ddc30895e31..f7b6df45d8be 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -135,7 +135,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
135 spin_lock_bh(&vcpu->arch.local_int.lock); 135 spin_lock_bh(&vcpu->arch.local_int.lock);
136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { 136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; 137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
138 rc = __kvm_s390_vcpu_store_status(vcpu, 138 rc = kvm_s390_vcpu_store_status(vcpu,
139 KVM_S390_STORE_STATUS_NOADDR); 139 KVM_S390_STORE_STATUS_NOADDR);
140 if (rc >= 0) 140 if (rc >= 0)
141 rc = -EOPNOTSUPP; 141 rc = -EOPNOTSUPP;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ae3705816878..4fe68650535c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -207,6 +207,7 @@ out_nokvm:
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{ 208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 211 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block) 212 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 213 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
@@ -296,7 +297,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296{ 297{
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 298 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); 299 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299 vcpu->arch.sie_block->ecb = 2; 300 vcpu->arch.sie_block->ecb = 6;
300 vcpu->arch.sie_block->eca = 0xC1002001U; 301 vcpu->arch.sie_block->eca = 0xC1002001U;
301 vcpu->arch.sie_block->fac = (int) (long) facilities; 302 vcpu->arch.sie_block->fac = (int) (long) facilities;
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 303 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
@@ -329,6 +330,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 330 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 331 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 332 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
333 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
332 334
333 spin_lock_init(&vcpu->arch.local_int.lock); 335 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 336 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
@@ -363,63 +365,49 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
363 365
364static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 366static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365{ 367{
366 vcpu_load(vcpu);
367 kvm_s390_vcpu_initial_reset(vcpu); 368 kvm_s390_vcpu_initial_reset(vcpu);
368 vcpu_put(vcpu);
369 return 0; 369 return 0;
370} 370}
371 371
372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{ 373{
374 vcpu_load(vcpu);
375 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs)); 374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
376 vcpu_put(vcpu);
377 return 0; 375 return 0;
378} 376}
379 377
380int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381{ 379{
382 vcpu_load(vcpu);
383 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); 380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
384 vcpu_put(vcpu);
385 return 0; 381 return 0;
386} 382}
387 383
388int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 384int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
389 struct kvm_sregs *sregs) 385 struct kvm_sregs *sregs)
390{ 386{
391 vcpu_load(vcpu);
392 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); 387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
393 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
394 vcpu_put(vcpu);
395 return 0; 389 return 0;
396} 390}
397 391
398int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 392int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
399 struct kvm_sregs *sregs) 393 struct kvm_sregs *sregs)
400{ 394{
401 vcpu_load(vcpu);
402 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); 395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
403 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
404 vcpu_put(vcpu);
405 return 0; 397 return 0;
406} 398}
407 399
408int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409{ 401{
410 vcpu_load(vcpu);
411 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
412 vcpu->arch.guest_fpregs.fpc = fpu->fpc; 403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
413 vcpu_put(vcpu);
414 return 0; 404 return 0;
415} 405}
416 406
417int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 407int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418{ 408{
419 vcpu_load(vcpu);
420 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
421 fpu->fpc = vcpu->arch.guest_fpregs.fpc; 410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
422 vcpu_put(vcpu);
423 return 0; 411 return 0;
424} 412}
425 413
@@ -427,14 +415,12 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
427{ 415{
428 int rc = 0; 416 int rc = 0;
429 417
430 vcpu_load(vcpu);
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) 418 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
432 rc = -EBUSY; 419 rc = -EBUSY;
433 else { 420 else {
434 vcpu->run->psw_mask = psw.mask; 421 vcpu->run->psw_mask = psw.mask;
435 vcpu->run->psw_addr = psw.addr; 422 vcpu->run->psw_addr = psw.addr;
436 } 423 }
437 vcpu_put(vcpu);
438 return rc; 424 return rc;
439} 425}
440 426
@@ -498,8 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
498 int rc; 484 int rc;
499 sigset_t sigsaved; 485 sigset_t sigsaved;
500 486
501 vcpu_load(vcpu);
502
503rerun_vcpu: 487rerun_vcpu:
504 if (vcpu->requests) 488 if (vcpu->requests)
505 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 489 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
@@ -568,8 +552,6 @@ rerun_vcpu:
568 if (vcpu->sigset_active) 552 if (vcpu->sigset_active)
569 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 553 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
570 554
571 vcpu_put(vcpu);
572
573 vcpu->stat.exit_userspace++; 555 vcpu->stat.exit_userspace++;
574 return rc; 556 return rc;
575} 557}
@@ -589,7 +571,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
589 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 571 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
590 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 572 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
591 */ 573 */
592int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 574int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
593{ 575{
594 const unsigned char archmode = 1; 576 const unsigned char archmode = 1;
595 int prefix; 577 int prefix;
@@ -651,45 +633,42 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
651 return 0; 633 return 0;
652} 634}
653 635
654static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
655{
656 int rc;
657
658 vcpu_load(vcpu);
659 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
660 vcpu_put(vcpu);
661 return rc;
662}
663
664long kvm_arch_vcpu_ioctl(struct file *filp, 636long kvm_arch_vcpu_ioctl(struct file *filp,
665 unsigned int ioctl, unsigned long arg) 637 unsigned int ioctl, unsigned long arg)
666{ 638{
667 struct kvm_vcpu *vcpu = filp->private_data; 639 struct kvm_vcpu *vcpu = filp->private_data;
668 void __user *argp = (void __user *)arg; 640 void __user *argp = (void __user *)arg;
641 long r;
669 642
670 switch (ioctl) { 643 switch (ioctl) {
671 case KVM_S390_INTERRUPT: { 644 case KVM_S390_INTERRUPT: {
672 struct kvm_s390_interrupt s390int; 645 struct kvm_s390_interrupt s390int;
673 646
647 r = -EFAULT;
674 if (copy_from_user(&s390int, argp, sizeof(s390int))) 648 if (copy_from_user(&s390int, argp, sizeof(s390int)))
675 return -EFAULT; 649 break;
676 return kvm_s390_inject_vcpu(vcpu, &s390int); 650 r = kvm_s390_inject_vcpu(vcpu, &s390int);
651 break;
677 } 652 }
678 case KVM_S390_STORE_STATUS: 653 case KVM_S390_STORE_STATUS:
679 return kvm_s390_vcpu_store_status(vcpu, arg); 654 r = kvm_s390_vcpu_store_status(vcpu, arg);
655 break;
680 case KVM_S390_SET_INITIAL_PSW: { 656 case KVM_S390_SET_INITIAL_PSW: {
681 psw_t psw; 657 psw_t psw;
682 658
659 r = -EFAULT;
683 if (copy_from_user(&psw, argp, sizeof(psw))) 660 if (copy_from_user(&psw, argp, sizeof(psw)))
684 return -EFAULT; 661 break;
685 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 662 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
663 break;
686 } 664 }
687 case KVM_S390_INITIAL_RESET: 665 case KVM_S390_INITIAL_RESET:
688 return kvm_arch_vcpu_ioctl_initial_reset(vcpu); 666 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
667 break;
689 default: 668 default:
690 ; 669 r = -EINVAL;
691 } 670 }
692 return -EINVAL; 671 return r;
693} 672}
694 673
695/* Section: memory related */ 674/* Section: memory related */
@@ -744,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
744{ 723{
745} 724}
746 725
747gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
748{
749 return gfn;
750}
751
752static int __init kvm_s390_init(void) 726static int __init kvm_s390_init(void)
753{ 727{
754 int ret; 728 int ret;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index cfa9d1777457..a7b7586626db 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -92,7 +92,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
92int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 92int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
93 93
94/* implemented in kvm-s390.c */ 94/* implemented in kvm-s390.c */
95int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, 95int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
96 unsigned long addr); 96 unsigned long addr);
97/* implemented in diag.c */ 97/* implemented in diag.c */
98int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 98int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);