aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-09 23:38:02 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-09 23:38:02 -0400
commit3cef5c5b0b56f3f90b0e9ff8d3f8dc57f464cc14 (patch)
tree02e95f15bd8a04071a9a36f534a92a066a8ce66a /arch/s390
parent8ac467e837a24eb024177b4b01013d8e6764913a (diff)
parentaffb8172de395a6e1db52ed9790ca0456d8c29a9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/cadence/macb.c Overlapping changes in macb driver, mostly fixes and cleanups in 'net' overlapping with the integration of at91_ether into macb in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/kernel/jump_label.c12
-rw-r--r--arch/s390/kernel/module.c1
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c68
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/pci/pci.c28
-rw-r--r--arch/s390/pci/pci_mmio.c17
11 files changed, 76 insertions, 82 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..f407bbf5ee94 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@ struct s390_io_adapter {
515#define S390_ARCH_FAC_MASK_SIZE_U64 \ 515#define S390_ARCH_FAC_MASK_SIZE_U64 \
516 (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) 516 (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
517 517
518struct s390_model_fac { 518struct kvm_s390_fac {
519 /* facilities used in SIE context */ 519 /* facility list requested by guest */
520 __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; 520 __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
521 /* subset enabled by kvm */ 521 /* facility mask supported by kvm & hosting machine */
522 __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; 522 __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
523}; 523};
524 524
525struct kvm_s390_cpu_model { 525struct kvm_s390_cpu_model {
526 struct s390_model_fac *fac; 526 struct kvm_s390_fac *fac;
527 struct cpuid cpu_id; 527 struct cpuid cpu_id;
528 unsigned short ibc; 528 unsigned short ibc;
529}; 529};
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
62{ 62{
63 int cpu = smp_processor_id(); 63 int cpu = smp_processor_id();
64 64
65 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
65 if (prev == next) 66 if (prev == next)
66 return; 67 return;
67 if (MACHINE_HAS_TLB_LC) 68 if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
73 atomic_dec(&prev->context.attach_count); 74 atomic_dec(&prev->context.attach_count);
74 if (MACHINE_HAS_TLB_LC) 75 if (MACHINE_HAS_TLB_LC)
75 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 76 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
76 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
77} 77}
78 78
79#define finish_arch_post_lock_switch finish_arch_post_lock_switch 79#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
37#endif 37#endif
38} 38}
39 39
40static inline void clear_page(void *page) 40#define clear_page(page) memset((page), 0, PAGE_SIZE)
41{
42 register unsigned long reg1 asm ("1") = 0;
43 register void *reg2 asm ("2") = page;
44 register unsigned long reg3 asm ("3") = 4096;
45 asm volatile(
46 " mvcl 2,0"
47 : "+d" (reg2), "+d" (reg3) : "d" (reg1)
48 : "memory", "cc");
49}
50 41
51/* 42/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 43 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
36 insn->offset = (entry->target - entry->code) >> 1; 36 insn->offset = (entry->target - entry->code) >> 1;
37} 37}
38 38
39static void jump_label_bug(struct jump_entry *entry, struct insn *insn) 39static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
40 struct insn *new)
40{ 41{
41 unsigned char *ipc = (unsigned char *)entry->code; 42 unsigned char *ipc = (unsigned char *)entry->code;
42 unsigned char *ipe = (unsigned char *)insn; 43 unsigned char *ipe = (unsigned char *)expected;
44 unsigned char *ipn = (unsigned char *)new;
43 45
44 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); 46 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
45 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", 47 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
46 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); 48 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
47 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", 49 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
48 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); 50 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
51 pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
52 ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
49 panic("Corrupted kernel text"); 53 panic("Corrupted kernel text");
50} 54}
51 55
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
69 } 73 }
70 if (init) { 74 if (init) {
71 if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) 75 if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
72 jump_label_bug(entry, &old); 76 jump_label_bug(entry, &orignop, &new);
73 } else { 77 } else {
74 if (memcmp((void *)entry->code, &old, sizeof(old))) 78 if (memcmp((void *)entry->code, &old, sizeof(old)))
75 jump_label_bug(entry, &old); 79 jump_label_bug(entry, &old, &new);
76 } 80 }
77 probe_kernel_write((void *)entry->code, &new, sizeof(new)); 81 probe_kernel_write((void *)entry->code, &new, sizeof(new));
78} 82}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
436 const Elf_Shdr *sechdrs, 436 const Elf_Shdr *sechdrs,
437 struct module *me) 437 struct module *me)
438{ 438{
439 jump_label_apply_nops(me);
439 vfree(me->arch.syminfo); 440 vfree(me->arch.syminfo);
440 me->arch.syminfo = NULL; 441 me->arch.syminfo = NULL;
441 return 0; 442 return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
18 18
19static DEFINE_PER_CPU(struct cpuid, cpu_id); 19static DEFINE_PER_CPU(struct cpuid, cpu_id);
20 20
21void cpu_relax(void) 21void notrace cpu_relax(void)
22{ 22{
23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) 23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
24 asm volatile("diag 0,0,0x44"); 24 asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c3623927563..f6579cfde2df 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, 522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523 sizeof(struct cpuid)); 523 sizeof(struct cpuid));
524 kvm->arch.model.ibc = proc->ibc; 524 kvm->arch.model.ibc = proc->ibc;
525 memcpy(kvm->arch.model.fac->kvm, proc->fac_list, 525 memcpy(kvm->arch.model.fac->list, proc->fac_list,
526 S390_ARCH_FAC_LIST_SIZE_BYTE); 526 S390_ARCH_FAC_LIST_SIZE_BYTE);
527 } else 527 } else
528 ret = -EFAULT; 528 ret = -EFAULT;
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
556 } 556 }
557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); 557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558 proc->ibc = kvm->arch.model.ibc; 558 proc->ibc = kvm->arch.model.ibc;
559 memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); 559 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561 ret = -EFAULT; 561 ret = -EFAULT;
562 kfree(proc); 562 kfree(proc);
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
576 } 576 }
577 get_cpu_id((struct cpuid *) &mach->cpuid); 577 get_cpu_id((struct cpuid *) &mach->cpuid);
578 mach->ibc = sclp_get_ibc(); 578 mach->ibc = sclp_get_ibc();
579 memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, 579 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
580 kvm_s390_fac_list_mask_size() * sizeof(u64)); 580 S390_ARCH_FAC_LIST_SIZE_BYTE);
581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
582 S390_ARCH_FAC_LIST_SIZE_U64); 582 S390_ARCH_FAC_LIST_SIZE_BYTE);
583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584 ret = -EFAULT; 584 ret = -EFAULT;
585 kfree(mach); 585 kfree(mach);
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
778static int kvm_s390_query_ap_config(u8 *config) 778static int kvm_s390_query_ap_config(u8 *config)
779{ 779{
780 u32 fcn_code = 0x04000000UL; 780 u32 fcn_code = 0x04000000UL;
781 u32 cc; 781 u32 cc = 0;
782 782
783 memset(config, 0, 128);
783 asm volatile( 784 asm volatile(
784 "lgr 0,%1\n" 785 "lgr 0,%1\n"
785 "lgr 2,%2\n" 786 "lgr 2,%2\n"
786 ".long 0xb2af0000\n" /* PQAP(QCI) */ 787 ".long 0xb2af0000\n" /* PQAP(QCI) */
787 "ipm %0\n" 788 "0: ipm %0\n"
788 "srl %0,28\n" 789 "srl %0,28\n"
789 : "=r" (cc) 790 "1:\n"
791 EX_TABLE(0b, 1b)
792 : "+r" (cc)
790 : "r" (fcn_code), "r" (config) 793 : "r" (fcn_code), "r" (config)
791 : "cc", "0", "2", "memory" 794 : "cc", "0", "2", "memory"
792 ); 795 );
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
839 842
840 kvm_s390_set_crycb_format(kvm); 843 kvm_s390_set_crycb_format(kvm);
841 844
842 /* Disable AES/DEA protected key functions by default */ 845 /* Enable AES/DEA protected key functions by default */
843 kvm->arch.crypto.aes_kw = 0; 846 kvm->arch.crypto.aes_kw = 1;
844 kvm->arch.crypto.dea_kw = 0; 847 kvm->arch.crypto.dea_kw = 1;
848 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
849 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
850 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
851 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
845 852
846 return 0; 853 return 0;
847} 854}
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
886 /* 893 /*
887 * The architectural maximum amount of facilities is 16 kbit. To store 894 * The architectural maximum amount of facilities is 16 kbit. To store
888 * this amount, 2 kbyte of memory is required. Thus we need a full 895 * this amount, 2 kbyte of memory is required. Thus we need a full
889 * page to hold the active copy (arch.model.fac->sie) and the current 896 * page to hold the guest facility list (arch.model.fac->list) and the
890 * facilities set (arch.model.fac->kvm). Its address size has to be 897 * facility mask (arch.model.fac->mask). Its address size has to be
891 * 31 bits and word aligned. 898 * 31 bits and word aligned.
892 */ 899 */
893 kvm->arch.model.fac = 900 kvm->arch.model.fac =
894 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 901 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
895 if (!kvm->arch.model.fac) 902 if (!kvm->arch.model.fac)
896 goto out_nofac; 903 goto out_nofac;
897 904
898 memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, 905 /* Populate the facility mask initially. */
899 S390_ARCH_FAC_LIST_SIZE_U64); 906 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
900 907 S390_ARCH_FAC_LIST_SIZE_BYTE);
901 /*
902 * If this KVM host runs *not* in a LPAR, relax the facility bits
903 * of the kvm facility mask by all missing facilities. This will allow
904 * to determine the right CPU model by means of the remaining facilities.
905 * Live guest migration must prohibit the migration of KVMs running in
906 * a LPAR to non LPAR hosts.
907 */
908 if (!MACHINE_IS_LPAR)
909 for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
910 kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
911
912 /*
913 * Apply the kvm facility mask to limit the kvm supported/tolerated
914 * facility list.
915 */
916 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 908 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
917 if (i < kvm_s390_fac_list_mask_size()) 909 if (i < kvm_s390_fac_list_mask_size())
918 kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; 910 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
919 else 911 else
920 kvm->arch.model.fac->kvm[i] = 0UL; 912 kvm->arch.model.fac->mask[i] = 0UL;
921 } 913 }
922 914
915 /* Populate the facility list initially. */
916 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE);
918
923 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 919 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
924 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 920 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
925 921
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1165 1161
1166 mutex_lock(&vcpu->kvm->lock); 1162 mutex_lock(&vcpu->kvm->lock);
1167 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; 1163 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1168 memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1169 S390_ARCH_FAC_LIST_SIZE_BYTE);
1170 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; 1164 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
1171 mutex_unlock(&vcpu->kvm->lock); 1165 mutex_unlock(&vcpu->kvm->lock);
1172 1166
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1212 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1206 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1213 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 1207 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1214 } 1208 }
1215 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; 1209 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
1216 1210
1217 spin_lock_init(&vcpu->arch.local_int.lock); 1211 spin_lock_init(&vcpu->arch.local_int.lock);
1218 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1212 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c2114d7ef..c34109aa552d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
128/* test availability of facility in a kvm intance */ 128/* test availability of facility in a kvm intance */
129static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) 129static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
130{ 130{
131 return __test_facility(nr, kvm->arch.model.fac->kvm); 131 return __test_facility(nr, kvm->arch.model.fac->mask) &&
132 __test_facility(nr, kvm->arch.model.fac->list);
132} 133}
133 134
134/* are cpu states controlled by user space */ 135/* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b17e03..351116939ea2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
348 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 348 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
349 * into a u32 memory representation. They will remain bits 0-31. 349 * into a u32 memory representation. They will remain bits 0-31.
350 */ 350 */
351 fac = *vcpu->kvm->arch.model.fac->sie >> 32; 351 fac = *vcpu->kvm->arch.model.fac->list >> 32;
352 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 352 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
353 &fac, sizeof(fac)); 353 &fac, sizeof(fac));
354 if (rc) 354 if (rc)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 753a56731951..f0b85443e060 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
288 return (void __iomem *) addr + offset; 288 return (void __iomem *) addr + offset;
289} 289}
290EXPORT_SYMBOL_GPL(pci_iomap_range); 290EXPORT_SYMBOL(pci_iomap_range);
291 291
292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
293{ 293{
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
309 } 309 }
310 spin_unlock(&zpci_iomap_lock); 310 spin_unlock(&zpci_iomap_lock);
311} 311}
312EXPORT_SYMBOL_GPL(pci_iounmap); 312EXPORT_SYMBOL(pci_iounmap);
313 313
314static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 314static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
315 int size, u32 *val) 315 int size, u32 *val)
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
483 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); 483 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
484} 484}
485 485
486static void zpci_map_resources(struct zpci_dev *zdev) 486static void zpci_map_resources(struct pci_dev *pdev)
487{ 487{
488 struct pci_dev *pdev = zdev->pdev;
489 resource_size_t len; 488 resource_size_t len;
490 int i; 489 int i;
491 490
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
499 } 498 }
500} 499}
501 500
502static void zpci_unmap_resources(struct zpci_dev *zdev) 501static void zpci_unmap_resources(struct pci_dev *pdev)
503{ 502{
504 struct pci_dev *pdev = zdev->pdev;
505 resource_size_t len; 503 resource_size_t len;
506 int i; 504 int i;
507 505
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
651 649
652 zdev->pdev = pdev; 650 zdev->pdev = pdev;
653 pdev->dev.groups = zpci_attr_groups; 651 pdev->dev.groups = zpci_attr_groups;
654 zpci_map_resources(zdev); 652 zpci_map_resources(pdev);
655 653
656 for (i = 0; i < PCI_BAR_COUNT; i++) { 654 for (i = 0; i < PCI_BAR_COUNT; i++) {
657 res = &pdev->resource[i]; 655 res = &pdev->resource[i];
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
663 return 0; 661 return 0;
664} 662}
665 663
664void pcibios_release_device(struct pci_dev *pdev)
665{
666 zpci_unmap_resources(pdev);
667}
668
666int pcibios_enable_device(struct pci_dev *pdev, int mask) 669int pcibios_enable_device(struct pci_dev *pdev, int mask)
667{ 670{
668 struct zpci_dev *zdev = get_zdev(pdev); 671 struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
670 zdev->pdev = pdev; 673 zdev->pdev = pdev;
671 zpci_debug_init_device(zdev); 674 zpci_debug_init_device(zdev);
672 zpci_fmb_enable_device(zdev); 675 zpci_fmb_enable_device(zdev);
673 zpci_map_resources(zdev);
674 676
675 return pci_enable_resources(pdev, mask); 677 return pci_enable_resources(pdev, mask);
676} 678}
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
679{ 681{
680 struct zpci_dev *zdev = get_zdev(pdev); 682 struct zpci_dev *zdev = get_zdev(pdev);
681 683
682 zpci_unmap_resources(zdev);
683 zpci_fmb_disable_device(zdev); 684 zpci_fmb_disable_device(zdev);
684 zpci_debug_exit_device(zdev); 685 zpci_debug_exit_device(zdev);
685 zdev->pdev = NULL; 686 zdev->pdev = NULL;
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
688#ifdef CONFIG_HIBERNATE_CALLBACKS 689#ifdef CONFIG_HIBERNATE_CALLBACKS
689static int zpci_restore(struct device *dev) 690static int zpci_restore(struct device *dev)
690{ 691{
691 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); 692 struct pci_dev *pdev = to_pci_dev(dev);
693 struct zpci_dev *zdev = get_zdev(pdev);
692 int ret = 0; 694 int ret = 0;
693 695
694 if (zdev->state != ZPCI_FN_STATE_ONLINE) 696 if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
698 if (ret) 700 if (ret)
699 goto out; 701 goto out;
700 702
701 zpci_map_resources(zdev); 703 zpci_map_resources(pdev);
702 zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, 704 zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
703 zdev->start_dma + zdev->iommu_size - 1, 705 zdev->start_dma + zdev->iommu_size - 1,
704 (u64) zdev->dma_table); 706 (u64) zdev->dma_table);
@@ -709,12 +711,14 @@ out:
709 711
710static int zpci_freeze(struct device *dev) 712static int zpci_freeze(struct device *dev)
711{ 713{
712 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); 714 struct pci_dev *pdev = to_pci_dev(dev);
715 struct zpci_dev *zdev = get_zdev(pdev);
713 716
714 if (zdev->state != ZPCI_FN_STATE_ONLINE) 717 if (zdev->state != ZPCI_FN_STATE_ONLINE)
715 return 0; 718 return 0;
716 719
717 zpci_unregister_ioat(zdev, 0); 720 zpci_unregister_ioat(zdev, 0);
721 zpci_unmap_resources(pdev);
718 return clp_disable_fh(zdev); 722 return clp_disable_fh(zdev);
719} 723}
720 724
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b3d1ad..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
64 if (copy_from_user(buf, user_buffer, length)) 64 if (copy_from_user(buf, user_buffer, length))
65 goto out; 65 goto out;
66 66
67 memcpy_toio(io_addr, buf, length); 67 ret = zpci_memcpy_toio(io_addr, buf, length);
68 ret = 0;
69out: 68out:
70 if (buf != local_buf) 69 if (buf != local_buf)
71 kfree(buf); 70 kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
98 goto out; 97 goto out;
99 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); 98 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
100 99
101 ret = -EFAULT; 100 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 101 ret = -EFAULT;
103 goto out; 102 goto out;
104 103 }
105 memcpy_fromio(buf, io_addr, length); 104 ret = zpci_memcpy_fromio(buf, io_addr, length);
106 105 if (ret)
107 if (copy_to_user(user_buffer, buf, length))
108 goto out; 106 goto out;
107 if (copy_to_user(user_buffer, buf, length))
108 ret = -EFAULT;
109 109
110 ret = 0;
111out: 110out:
112 if (buf != local_buf) 111 if (buf != local_buf)
113 kfree(buf); 112 kfree(buf);